diff --git a/.gitignore b/.gitignore index 0c39aa20b6ba..be92dfa89957 100644 --- a/.gitignore +++ b/.gitignore @@ -7,38 +7,40 @@ # command after changing this file, to see if there are # any tracked files which get ignored after the change. # -# Normal rules +# Normal rules (sorted alphabetically) # .* +*.a +*.bin +*.bz2 +*.c.[012]*.* +*.dtb +*.dtb.S +*.dwo +*.elf +*.gcno +*.gz +*.i +*.ko +*.ll +*.lst +*.lz4 +*.lzma +*.lzo +*.mod.c *.o *.o.* -*.a +*.order +*.patch *.s -*.ko *.so *.so.dbg -*.mod.c -*.i -*.lst +*.su *.symtypes -*.order -*.elf -*.bin *.tar -*.gz -*.bz2 -*.lzma *.xz -*.lz4 -*.lzo -*.patch -*.gcno -*.ll -modules.builtin Module.symvers -*.dwo -*.su -*.c.[012]*.* +modules.builtin # # Top-level generic files @@ -53,6 +55,11 @@ Module.symvers /System.map /Module.markers +# +# RPM spec file (make rpm-pkg) +# +/*.spec + # # Debian directory (make deb-pkg) # @@ -115,3 +122,6 @@ all.config # Kdevelop4 *.kdev4 + +# fetched Android config fragments +kernel/configs/android-*.cfg diff --git a/Documentation/ABI/testing/configfs-usb-gadget-dvctrace b/Documentation/ABI/testing/configfs-usb-gadget-dvctrace new file mode 100644 index 000000000000..6391096ac151 --- /dev/null +++ b/Documentation/ABI/testing/configfs-usb-gadget-dvctrace @@ -0,0 +1,9 @@ +What: /config/usb-gadget//functions/dvctrace./source_dev +Date: Mar 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) The name of the source device paired with this function + instance, if upon creation of the instance a source device + named exists and is free, the source device will be + associated with the current instance, otherwise the first free + source device will be used. diff --git a/Documentation/ABI/testing/sysfs-bus-dvctrace b/Documentation/ABI/testing/sysfs-bus-dvctrace new file mode 100644 index 000000000000..bf2b5bb2144a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-dvctrace @@ -0,0 +1,68 @@ +What: /sys/bus/dvctrace +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: Groups the devices and drivers registered to + to dvc-trace bus. + +What: /sys/bus/dvctrace/devices//status +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) The status of a dvc-trace source device with + respect to an USB function driver. + Free - The device is free + Reserved - The device is reserved by an USB + function but not in use. + In use - The device is used by an USB function. + +What: /sys/bus/dvctrace/devices//protocol +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) The protocol id of a dvc-trace source device, + this will used in function driver interface + descriptors (u8). According to USB debug class + specification the protocol id is vendor specific. + +What: /sys/bus/dvctrace/devices//descriptors +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Hex-dump of the descriptors provided by the + source device. + eg. A debug class output connection descriptor + 09 24 02 04 03 00 00 00 00 + ll tt ss xx xx xx xx xx ii + | | | +- iConnection string id. + | | +- Descriptor sub-type DC_OUTPUT_CONNECTION + | +- Descriptor type (USB_DT_CS_INTERFACE) + +- Descriptor length + Writing: + - is not allowed while the device is Reserved or In Use. + - will replace all the descriptors currently present. + - will remove any strings previously provided. + - should use the same format. + - accepts multiple descriptors separated by space or '\n'. + +What: /sys/bus/dvctrace/devices//strings +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Currently set usb descriptor strings in + .: string format. + . identifies the location where + the string id is needed. + eg. Having the same debug class output connection descriptor, + as the first descriptor. + 09 24 02 04 03 00 00 00 00 + ll tt ss xx xx xx xx xx ii + +- iConnection string id. + 0.8: My output connection - will identify the string associated + with this descriptor. + Writing: + - is not allowed while the device is Reserved or In Use. + - will replace all the strings currently present. + - should use the same format. + - accepts multiple strings separated by ";" or '\n'. + eg. "0.4: first string; 1.4: second string" diff --git a/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith b/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith new file mode 100644 index 000000000000..0dda3aefb89c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith @@ -0,0 +1,68 @@ +What: /sys/bus/dvctrace/devices/dvcith-/msc +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) Symbolic link to the Intel Trace Hub MSC + (Memory Storage Controller) sub-device used to get tracing data. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_min_transfer +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Window transfer watermark, the driver will queue a + new transfer only if at least bytes + of trace data is available. Since on every switch @48 bytes + of trace data is generated, this should not be set under this + threshold. + Default 2048 + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_retry_timeout +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Read retry interval, If by the time the last usb transfer + is complete, there is no new data to be sent the driver will + sleep ms, before checking again. + Default: 2 ms + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_max_retry +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) the maximum retries to be bone before triggering a switch + and sending the currently available data regardless of the + available size. + Default: 150 + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_proc_type +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Data process type, during DvC tracing the MSC is set up in + Multi Window mode (check Intel Trace Hub Developer's Manual for + details), This attribute specifies what the dvc-trace data stream + should contain. + Available values are: + - 1 - Full blocks, + - 2 - Trimmed blocks (Block header + STP data) + - 3 - STP data only. + Default 3. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_transfer_type +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Data transfer type, This attribute specifies how the trace data + is queued in the USB requests. + Available values are: + - 1 - Auto, + - 2 - SG-List, + - 3 - Linear buffer. + Default 1. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_stats +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) Provides statistical information regarding the latest. + trace session. Available if (CONFIG_INTEL_TH_MSU_DVC_DEBUG). diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 7eead5f97e02..64e65450f483 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -32,7 +32,7 @@ Description: Description of the physical chip / device for device X. Typically a part number. -What: /sys/bus/iio/devices/iio:deviceX/timestamp_clock +What: /sys/bus/iio/devices/iio:deviceX/current_timestamp_clock KernelVersion: 4.5 Contact: linux-iio@vger.kernel.org Description: diff --git a/Documentation/ABI/testing/sysfs-class-dual-role-usb b/Documentation/ABI/testing/sysfs-class-dual-role-usb new file mode 100644 index 000000000000..a900fd75430c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-dual-role-usb @@ -0,0 +1,71 @@ +What: /sys/class/dual_role_usb/.../ +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + Provide a generic interface to monitor and change + the state of dual role usb ports. The name here + refers to the name mentioned in the + dual_role_phy_desc that is passed while registering + the dual_role_phy_intstance through + devm_dual_role_instance_register. + +What: /sys/class/dual_role_usb/.../supported_modes +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + This is a static node, once initialized this + is not expected to change during runtime. "dfp" + refers to "downstream facing port" i.e. port can + only act as host. "ufp" refers to "upstream + facing port" i.e. port can only act as device. + "dfp ufp" refers to "dual role port" i.e. the port + can either be a host port or a device port. + +What: /sys/class/dual_role_usb/.../mode +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + The mode node refers to the current mode in which the + port is operating. "dfp" for host ports. "ufp" for device + ports and "none" when cable is not connected. + + On devices where the USB mode is software-controllable, + userspace can change the mode by writing "dfp" or "ufp". + On devices where the USB mode is fixed in hardware, + this attribute is read-only. + +What: /sys/class/dual_role_usb/.../power_role +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + The power_role node mentions whether the port + is "sink"ing or "source"ing power. "none" if + they are not connected. + + On devices implementing USB Power Delivery, + userspace can control the power role by writing "sink" or + "source". On devices without USB-PD, this attribute is + read-only. + +What: /sys/class/dual_role_usb/.../data_role +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + The data_role node mentions whether the port + is acting as "host" or "device" for USB data connection. + "none" if there is no active data link. + + On devices implementing USB Power Delivery, userspace + can control the data role by writing "host" or "device". + On devices without USB-PD, this attribute is read-only + +What: /sys/class/dual_role_usb/.../powers_vconn +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + The powers_vconn node mentions whether the port + is supplying power for VCONN pin. + + On devices with software control of VCONN, + userspace can disable the power supply to VCONN by writing "n", + or enable the power supply by writing "y". diff --git a/Documentation/ABI/testing/sysfs-class-rpmb b/Documentation/ABI/testing/sysfs-class-rpmb new file mode 100644 index 000000000000..75d70654df83 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-rpmb @@ -0,0 +1,47 @@ +What: /sys/class/rpmb/ +Date: Jun 2017 +KernelVersion: 4.12 +Contact: Tomas Winkler +Description: + The rpmb/ class sub-directory belongs to RPMB device class. + + Few storage technologies such is EMMC, UFS, and NVMe support + Replay Protected Memory Block (RPMB) hardware partition with + common protocol and frame layout. + Such a partition provides authenticated and replay protected access, + hence suitable as a secure storage. + +What: /sys/class/rpmb/rpmbN/ +Date: Jun 2017 +KernelVersion: 4.12 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN directory is created for + each RPMB registered device. + +What: /sys/class/rpmb/rpmbN/type +Date: Jun 2017 +KernelVersion: 4.12 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/type file contains device + underlaying storage type technology: EMMC, UFS, SIM. + +What: /sys/class/rpmb/rpmbN/id +Date: Jun 2017 +KernelVersion: 4.12 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/id file contains unique device id + in a binary form as defined by underlying storage device. + In case of multiple RPMB devices a user can determine correct + device. + The content can be parsed according the storage device type. + +What: /sys/class/rpmb/rpmbN/reliable_wr_cnt +Date: Jun 2017 +KernelVersion: 4.12 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/reliable_wr_cnt file contains + number of blocks that can be reliable written in a single request. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index f3d5817c4ef0..258902db14bf 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -373,3 +373,19 @@ Contact: Linux kernel mailing list Description: information about CPUs heterogeneity. cpu_capacity: capacity of cpu#. + +What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/meltdown + /sys/devices/system/cpu/vulnerabilities/spectre_v1 + /sys/devices/system/cpu/vulnerabilities/spectre_v2 +Date: January 2018 +Contact: Linux kernel mailing list +Description: Information about CPU vulnerabilities + + The files are named after the code names of CPU + vulnerabilities. The output of those files reflects the + state of the CPUs in the system. Possible output values: + + "Not affected" CPU is not affected by the vulnerability + "Vulnerable" CPU is affected and no mitigation in effect + "Mitigation: $M" CPU is affected and mitigation $M is in effect diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 11b7f4ebea7c..d870b5514d15 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -51,6 +51,18 @@ Description: Controls the dirty page count condition for the in-place-update policies. +What: /sys/fs/f2fs//min_hot_blocks +Date: March 2017 +Contact: "Jaegeuk Kim" +Description: + Controls the dirty page count condition for redefining hot data. + +What: /sys/fs/f2fs//min_ssr_sections +Date: October 2017 +Contact: "Chao Yu" +Description: + Controls the fee section threshold to trigger SSR allocation. + What: /sys/fs/f2fs//max_small_discards Date: November 2013 Contact: "Jaegeuk Kim" @@ -102,6 +114,12 @@ Contact: "Jaegeuk Kim" Description: Controls the idle timing. +What: /sys/fs/f2fs//iostat_enable +Date: August 2017 +Contact: "Chao Yu" +Description: + Controls to enable/disable IO stat. + What: /sys/fs/f2fs//ra_nid_pages Date: October 2015 Contact: "Chao Yu" @@ -122,6 +140,12 @@ Contact: "Shuoran Liu" Description: Shows total written kbytes issued to disk. +What: /sys/fs/f2fs//feature +Date: July 2017 +Contact: "Jaegeuk Kim" +Description: + Shows all enabled features in current device. + What: /sys/fs/f2fs//inject_rate Date: May 2016 Contact: "Sheng Yong" @@ -138,7 +162,18 @@ What: /sys/fs/f2fs//reserved_blocks Date: June 2017 Contact: "Chao Yu" Description: - Controls current reserved blocks in system. + Controls target reserved blocks in system, the threshold + is soft, it could exceed current available user space. + +What: /sys/fs/f2fs//current_reserved_blocks +Date: October 2017 +Contact: "Yunlong Song" +Contact: "Chao Yu" +Description: + Shows current reserved blocks in system, it may be temporarily + smaller than target_reserved_blocks, but will gradually + increase to target_reserved_blocks when more free blocks are + freed by user later. What: /sys/fs/f2fs//gc_urgent Date: August 2017 @@ -151,3 +186,9 @@ Date: August 2017 Contact: "Jaegeuk Kim" Description: Controls sleep time of GC urgent mode + +What: /sys/fs/f2fs//readdir_ra +Date: November 2017 +Contact: "Sheng Yong" +Description: + Controls readahead inode block in readdir. diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons new file mode 100644 index 000000000000..acb19b91c192 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons @@ -0,0 +1,16 @@ +What: /sys/kernel/wakeup_reasons/last_resume_reason +Date: February 2014 +Contact: Ruchi Kandoi +Description: + The /sys/kernel/wakeup_reasons/last_resume_reason is + used to report wakeup reasons after system exited suspend. + +What: /sys/kernel/wakeup_reasons/last_suspend_time +Date: March 2015 +Contact: jinqian +Description: + The /sys/kernel/wakeup_reasons/last_suspend_time is + used to report time spent in last suspend cycle. It contains + two numbers (in seconds) separated by space. First number is + the time spent in suspend and resume processes. Second number + is the time spent in sleep state. \ No newline at end of file diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 05496622b4ef..ba54696ed976 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -837,6 +837,9 @@ dis_ucode_ldr [X86] Disable the microcode loader. + dm= [DM] Allows early creation of a device-mapper device. + See Documentation/device-mapper/boot.txt. + dma_debug=off If the kernel is compiled with DMA_API_DEBUG support, this option disables the debugging code at boot. @@ -1841,13 +1844,6 @@ Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y, the default is off. - kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode - Valid arguments: 0, 1, 2 - kmemcheck=0 (disabled) - kmemcheck=1 (enabled) - kmemcheck=2 (one-shot mode) - Default: 2 (one-shot mode) - kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. Default is 0 (don't ignore, but inject #GP) @@ -2599,6 +2595,11 @@ nosmt [KNL,S390] Disable symmetric multithreading (SMT). Equivalent to smt=1. + nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 + (indirect branch prediction) vulnerability. System may + allow data leaks with this option, which is equivalent + to spectre_v2=off. + noxsave [BUGS=X86] Disables x86 extended register state save and restore using xsave. The kernel will fallback to enabling legacy floating-point and sse state. @@ -2713,8 +2714,6 @@ norandmaps Don't use address space randomization. Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space - noreplace-paravirt [X86,IA-64,PV_OPS] Don't patch paravirt_ops - noreplace-smp [X86-32,SMP] Don't replace SMP instructions with UP alternatives @@ -3253,6 +3252,21 @@ pt. [PARIDE] See Documentation/blockdev/paride.txt. + pti= [X86_64] Control Page Table Isolation of user and + kernel address spaces. Disabling this feature + removes hardening, but improves performance of + system calls and interrupts. + + on - unconditionally enable + off - unconditionally disable + auto - kernel detects whether your CPU model is + vulnerable to issues that PTI mitigates + + Not specifying this option is equivalent to pti=auto. + + nopti [X86_64] + Equivalent to pti=off + pty.legacy_count= [KNL] Number of legacy pty's. Overwrites compiled-in default number. @@ -3639,6 +3653,9 @@ reboot_cpu is s[mp]#### with #### being the processor to be used for rebooting. + reboot_panic= [KNL] + Same as reboot parameter above but only in case of panic. + relax_domain_level= [KNL, SMP] Set scheduler's default relax_domain_level. See Documentation/cgroup-v1/cpusets.txt. @@ -3893,6 +3910,29 @@ sonypi.*= [HW] Sony Programmable I/O Control Device driver See Documentation/laptops/sonypi.txt + spectre_v2= [X86] Control mitigation of Spectre variant 2 + (indirect branch speculation) vulnerability. + + on - unconditionally enable + off - unconditionally disable + auto - kernel detects whether your CPU model is + vulnerable + + Selecting 'on' will, and 'auto' may, choose a + mitigation method at run time according to the + CPU, the available microcode, the setting of the + CONFIG_RETPOLINE configuration option, and the + compiler with which the kernel was built. + + Specific mitigations can also be selected manually: + + retpoline - replace indirect branches + retpoline,generic - google's original retpoline + retpoline,amd - AMD-specific minimal thunk + + Not specifying this option is equivalent to + spectre_v2=auto. + spia_io_base= [HW,MTD] spia_fio_base= spia_pedr= diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 66e8ce14d23d..f3d0d316d5f1 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -71,6 +71,7 @@ stable kernels. | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | | Hisilicon | Hip0{6,7} | #161010701 | N/A | | | | | | -| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | +| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | +| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | diff --git a/Documentation/conf.py b/Documentation/conf.py index 63857d33778c..b9cadc9817f7 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -404,6 +404,8 @@ 'The kernel development community', 'manual'), ('userspace-api/index', 'userspace-api.tex', 'The Linux kernel user-space API guide', 'The kernel development community', 'manual'), + ('rpmb/index', 'rpmb.tex', 'Linux RPMB Subsystem Documentation', + 'The kernel development community', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst index a81787cd47d7..e313925fb0fa 100644 --- a/Documentation/dev-tools/index.rst +++ b/Documentation/dev-tools/index.rst @@ -21,7 +21,6 @@ whole; patches welcome! kasan ubsan kmemleak - kmemcheck gdb-kernel-debugging kgdb kselftest diff --git a/Documentation/dev-tools/kmemcheck.rst b/Documentation/dev-tools/kmemcheck.rst deleted file mode 100644 index 7f3d1985de74..000000000000 --- a/Documentation/dev-tools/kmemcheck.rst +++ /dev/null @@ -1,733 +0,0 @@ -Getting started with kmemcheck -============================== - -Vegard Nossum - - -Introduction ------------- - -kmemcheck is a debugging feature for the Linux Kernel. More specifically, it -is a dynamic checker that detects and warns about some uses of uninitialized -memory. - -Userspace programmers might be familiar with Valgrind's memcheck. The main -difference between memcheck and kmemcheck is that memcheck works for userspace -programs only, and kmemcheck works for the kernel only. The implementations -are of course vastly different. Because of this, kmemcheck is not as accurate -as memcheck, but it turns out to be good enough in practice to discover real -programmer errors that the compiler is not able to find through static -analysis. - -Enabling kmemcheck on a kernel will probably slow it down to the extent that -the machine will not be usable for normal workloads such as e.g. an -interactive desktop. kmemcheck will also cause the kernel to use about twice -as much memory as normal. For this reason, kmemcheck is strictly a debugging -feature. - - -Downloading ------------ - -As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel. - - -Configuring and compiling -------------------------- - -kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of -configuration variables must have specific settings in order for the kmemcheck -menu to even appear in "menuconfig". These are: - -- ``CONFIG_CC_OPTIMIZE_FOR_SIZE=n`` - This option is located under "General setup" / "Optimize for size". - - Without this, gcc will use certain optimizations that usually lead to - false positive warnings from kmemcheck. An example of this is a 16-bit - field in a struct, where gcc may load 32 bits, then discard the upper - 16 bits. kmemcheck sees only the 32-bit load, and may trigger a - warning for the upper 16 bits (if they're uninitialized). - -- ``CONFIG_SLAB=y`` or ``CONFIG_SLUB=y`` - This option is located under "General setup" / "Choose SLAB - allocator". - -- ``CONFIG_FUNCTION_TRACER=n`` - This option is located under "Kernel hacking" / "Tracers" / "Kernel - Function Tracer" - - When function tracing is compiled in, gcc emits a call to another - function at the beginning of every function. This means that when the - page fault handler is called, the ftrace framework will be called - before kmemcheck has had a chance to handle the fault. If ftrace then - modifies memory that was tracked by kmemcheck, the result is an - endless recursive page fault. - -- ``CONFIG_DEBUG_PAGEALLOC=n`` - This option is located under "Kernel hacking" / "Memory Debugging" - / "Debug page memory allocations". - -In addition, I highly recommend turning on ``CONFIG_DEBUG_INFO=y``. This is also -located under "Kernel hacking". With this, you will be able to get line number -information from the kmemcheck warnings, which is extremely valuable in -debugging a problem. This option is not mandatory, however, because it slows -down the compilation process and produces a much bigger kernel image. - -Now the kmemcheck menu should be visible (under "Kernel hacking" / "Memory -Debugging" / "kmemcheck: trap use of uninitialized memory"). Here follows -a description of the kmemcheck configuration variables: - -- ``CONFIG_KMEMCHECK`` - This must be enabled in order to use kmemcheck at all... - -- ``CONFIG_KMEMCHECK_``[``DISABLED`` | ``ENABLED`` | ``ONESHOT``]``_BY_DEFAULT`` - This option controls the status of kmemcheck at boot-time. "Enabled" - will enable kmemcheck right from the start, "disabled" will boot the - kernel as normal (but with the kmemcheck code compiled in, so it can - be enabled at run-time after the kernel has booted), and "one-shot" is - a special mode which will turn kmemcheck off automatically after - detecting the first use of uninitialized memory. - - If you are using kmemcheck to actively debug a problem, then you - probably want to choose "enabled" here. - - The one-shot mode is mostly useful in automated test setups because it - can prevent floods of warnings and increase the chances of the machine - surviving in case something is really wrong. In other cases, the one- - shot mode could actually be counter-productive because it would turn - itself off at the very first error -- in the case of a false positive - too -- and this would come in the way of debugging the specific - problem you were interested in. - - If you would like to use your kernel as normal, but with a chance to - enable kmemcheck in case of some problem, it might be a good idea to - choose "disabled" here. When kmemcheck is disabled, most of the run- - time overhead is not incurred, and the kernel will be almost as fast - as normal. - -- ``CONFIG_KMEMCHECK_QUEUE_SIZE`` - Select the maximum number of error reports to store in an internal - (fixed-size) buffer. Since errors can occur virtually anywhere and in - any context, we need a temporary storage area which is guaranteed not - to generate any other page faults when accessed. The queue will be - emptied as soon as a tasklet may be scheduled. If the queue is full, - new error reports will be lost. - - The default value of 64 is probably fine. If some code produces more - than 64 errors within an irqs-off section, then the code is likely to - produce many, many more, too, and these additional reports seldom give - any more information (the first report is usually the most valuable - anyway). - - This number might have to be adjusted if you are not using serial - console or similar to capture the kernel log. If you are using the - "dmesg" command to save the log, then getting a lot of kmemcheck - warnings might overflow the kernel log itself, and the earlier reports - will get lost in that way instead. Try setting this to 10 or so on - such a setup. - -- ``CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT`` - Select the number of shadow bytes to save along with each entry of the - error-report queue. These bytes indicate what parts of an allocation - are initialized, uninitialized, etc. and will be displayed when an - error is detected to help the debugging of a particular problem. - - The number entered here is actually the logarithm of the number of - bytes that will be saved. So if you pick for example 5 here, kmemcheck - will save 2^5 = 32 bytes. - - The default value should be fine for debugging most problems. It also - fits nicely within 80 columns. - -- ``CONFIG_KMEMCHECK_PARTIAL_OK`` - This option (when enabled) works around certain GCC optimizations that - produce 32-bit reads from 16-bit variables where the upper 16 bits are - thrown away afterwards. - - The default value (enabled) is recommended. This may of course hide - some real errors, but disabling it would probably produce a lot of - false positives. - -- ``CONFIG_KMEMCHECK_BITOPS_OK`` - This option silences warnings that would be generated for bit-field - accesses where not all the bits are initialized at the same time. This - may also hide some real bugs. - - This option is probably obsolete, or it should be replaced with - the kmemcheck-/bitfield-annotations for the code in question. The - default value is therefore fine. - -Now compile the kernel as usual. - - -How to use ----------- - -Booting -~~~~~~~ - -First some information about the command-line options. There is only one -option specific to kmemcheck, and this is called "kmemcheck". It can be used -to override the default mode as chosen by the ``CONFIG_KMEMCHECK_*_BY_DEFAULT`` -option. Its possible settings are: - -- ``kmemcheck=0`` (disabled) -- ``kmemcheck=1`` (enabled) -- ``kmemcheck=2`` (one-shot mode) - -If SLUB debugging has been enabled in the kernel, it may take precedence over -kmemcheck in such a way that the slab caches which are under SLUB debugging -will not be tracked by kmemcheck. In order to ensure that this doesn't happen -(even though it shouldn't by default), use SLUB's boot option ``slub_debug``, -like this: ``slub_debug=-`` - -In fact, this option may also be used for fine-grained control over SLUB vs. -kmemcheck. For example, if the command line includes -``kmemcheck=1 slub_debug=,dentry``, then SLUB debugging will be used only -for the "dentry" slab cache, and with kmemcheck tracking all the other -caches. This is advanced usage, however, and is not generally recommended. - - -Run-time enable/disable -~~~~~~~~~~~~~~~~~~~~~~~ - -When the kernel has booted, it is possible to enable or disable kmemcheck at -run-time. WARNING: This feature is still experimental and may cause false -positive warnings to appear. Therefore, try not to use this. If you find that -it doesn't work properly (e.g. you see an unreasonable amount of warnings), I -will be happy to take bug reports. - -Use the file ``/proc/sys/kernel/kmemcheck`` for this purpose, e.g.:: - - $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck - -The numbers are the same as for the ``kmemcheck=`` command-line option. - - -Debugging -~~~~~~~~~ - -A typical report will look something like this:: - - WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024) - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - ^ - - Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A - RIP: 0010:[] [] __dequeue_signal+0xc8/0x190 - RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002 - RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009 - RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84 - RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000 - R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e - R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8 - FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000 - CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 - CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0 - DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 - DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400 - [] dequeue_signal+0x8e/0x170 - [] get_signal_to_deliver+0x98/0x390 - [] do_notify_resume+0xad/0x7d0 - [] int_signal+0x12/0x17 - [] 0xffffffffffffffff - -The single most valuable information in this report is the RIP (or EIP on 32- -bit) value. This will help us pinpoint exactly which instruction that caused -the warning. - -If your kernel was compiled with ``CONFIG_DEBUG_INFO=y``, then all we have to do -is give this address to the addr2line program, like this:: - - $ addr2line -e vmlinux -i ffffffff8104ede8 - arch/x86/include/asm/string_64.h:12 - include/asm-generic/siginfo.h:287 - kernel/signal.c:380 - kernel/signal.c:410 - -The "``-e vmlinux``" tells addr2line which file to look in. **IMPORTANT:** -This must be the vmlinux of the kernel that produced the warning in the -first place! If not, the line number information will almost certainly be -wrong. - -The "``-i``" tells addr2line to also print the line numbers of inlined -functions. In this case, the flag was very important, because otherwise, -it would only have printed the first line, which is just a call to -``memcpy()``, which could be called from a thousand places in the kernel, and -is therefore not very useful. These inlined functions would not show up in -the stack trace above, simply because the kernel doesn't load the extra -debugging information. This technique can of course be used with ordinary -kernel oopses as well. - -In this case, it's the caller of ``memcpy()`` that is interesting, and it can be -found in ``include/asm-generic/siginfo.h``, line 287:: - - 281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) - 282 { - 283 if (from->si_code < 0) - 284 memcpy(to, from, sizeof(*to)); - 285 else - 286 /* _sigchld is currently the largest know union member */ - 287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); - 288 } - -Since this was a read (kmemcheck usually warns about reads only, though it can -warn about writes to unallocated or freed memory as well), it was probably the -"from" argument which contained some uninitialized bytes. Following the chain -of calls, we move upwards to see where "from" was allocated or initialized, -``kernel/signal.c``, line 380:: - - 359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) - 360 { - ... - 367 list_for_each_entry(q, &list->list, list) { - 368 if (q->info.si_signo == sig) { - 369 if (first) - 370 goto still_pending; - 371 first = q; - ... - 377 if (first) { - 378 still_pending: - 379 list_del_init(&first->list); - 380 copy_siginfo(info, &first->info); - 381 __sigqueue_free(first); - ... - 392 } - 393 } - -Here, it is ``&first->info`` that is being passed on to ``copy_siginfo()``. The -variable ``first`` was found on a list -- passed in as the second argument to -``collect_signal()``. We continue our journey through the stack, to figure out -where the item on "list" was allocated or initialized. We move to line 410:: - - 395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - 396 siginfo_t *info) - 397 { - ... - 410 collect_signal(sig, pending, info); - ... - 414 } - -Now we need to follow the ``pending`` pointer, since that is being passed on to -``collect_signal()`` as ``list``. At this point, we've run out of lines from the -"addr2line" output. Not to worry, we just paste the next addresses from the -kmemcheck stack dump, i.e.:: - - [] dequeue_signal+0x8e/0x170 - [] get_signal_to_deliver+0x98/0x390 - [] do_notify_resume+0xad/0x7d0 - [] int_signal+0x12/0x17 - - $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \ - ffffffff8100b87d ffffffff8100c7b5 - kernel/signal.c:446 - kernel/signal.c:1806 - arch/x86/kernel/signal.c:805 - arch/x86/kernel/signal.c:871 - arch/x86/kernel/entry_64.S:694 - -Remember that since these addresses were found on the stack and not as the -RIP value, they actually point to the _next_ instruction (they are return -addresses). This becomes obvious when we look at the code for line 446:: - - 422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) - 423 { - ... - 431 signr = __dequeue_signal(&tsk->signal->shared_pending, - 432 mask, info); - 433 /* - 434 * itimer signal ? - 435 * - 436 * itimers are process shared and we restart periodic - 437 * itimers in the signal delivery path to prevent DoS - 438 * attacks in the high resolution timer case. This is - 439 * compliant with the old way of self restarting - 440 * itimers, as the SIGALRM is a legacy signal and only - 441 * queued once. Changing the restart behaviour to - 442 * restart the timer in the signal dequeue path is - 443 * reducing the timer noise on heavy loaded !highres - 444 * systems too. - 445 */ - 446 if (unlikely(signr == SIGALRM)) { - ... - 489 } - -So instead of looking at 446, we should be looking at 431, which is the line -that executes just before 446. Here we see that what we are looking for is -``&tsk->signal->shared_pending``. - -Our next task is now to figure out which function that puts items on this -``shared_pending`` list. A crude, but efficient tool, is ``git grep``:: - - $ git grep -n 'shared_pending' kernel/ - ... - kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending; - kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending; - ... - -There were more results, but none of them were related to list operations, -and these were the only assignments. We inspect the line numbers more closely -and find that this is indeed where items are being added to the list:: - - 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - 817 int group) - 818 { - ... - 828 pending = group ? &t->signal->shared_pending : &t->pending; - ... - 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - 852 (is_si_special(info) || - 853 info->si_code >= 0))); - 854 if (q) { - 855 list_add_tail(&q->list, &pending->list); - ... - 890 } - -and:: - - 1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) - 1310 { - .... - 1339 pending = group ? &t->signal->shared_pending : &t->pending; - 1340 list_add_tail(&q->list, &pending->list); - .... - 1347 } - -In the first case, the list element we are looking for, ``q``, is being -returned from the function ``__sigqueue_alloc()``, which looks like an -allocation function. Let's take a look at it:: - - 187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, - 188 int override_rlimit) - 189 { - 190 struct sigqueue *q = NULL; - 191 struct user_struct *user; - 192 - 193 /* - 194 * We won't get problems with the target's UID changing under us - 195 * because changing it requires RCU be used, and if t != current, the - 196 * caller must be holding the RCU readlock (by way of a spinlock) and - 197 * we use RCU protection here - 198 */ - 199 user = get_uid(__task_cred(t)->user); - 200 atomic_inc(&user->sigpending); - 201 if (override_rlimit || - 202 atomic_read(&user->sigpending) <= - 203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) - 204 q = kmem_cache_alloc(sigqueue_cachep, flags); - 205 if (unlikely(q == NULL)) { - 206 atomic_dec(&user->sigpending); - 207 free_uid(user); - 208 } else { - 209 INIT_LIST_HEAD(&q->list); - 210 q->flags = 0; - 211 q->user = user; - 212 } - 213 - 214 return q; - 215 } - -We see that this function initializes ``q->list``, ``q->flags``, and -``q->user``. It seems that now is the time to look at the definition of -``struct sigqueue``, e.g.:: - - 14 struct sigqueue { - 15 struct list_head list; - 16 int flags; - 17 siginfo_t info; - 18 struct user_struct *user; - 19 }; - -And, you might remember, it was a ``memcpy()`` on ``&first->info`` that -caused the warning, so this makes perfect sense. It also seems reasonable -to assume that it is the caller of ``__sigqueue_alloc()`` that has the -responsibility of filling out (initializing) this member. - -But just which fields of the struct were uninitialized? Let's look at -kmemcheck's report again:: - - WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024) - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - ^ - -These first two lines are the memory dump of the memory object itself, and -the shadow bytemap, respectively. The memory object itself is in this case -``&first->info``. Just beware that the start of this dump is NOT the start -of the object itself! The position of the caret (^) corresponds with the -address of the read (ffff88003e4a2024). - -The shadow bytemap dump legend is as follows: - -- i: initialized -- u: uninitialized -- a: unallocated (memory has been allocated by the slab layer, but has not - yet been handed off to anybody) -- f: freed (memory has been allocated by the slab layer, but has been freed - by the previous owner) - -In order to figure out where (relative to the start of the object) the -uninitialized memory was located, we have to look at the disassembly. For -that, we'll need the RIP address again:: - - RIP: 0010:[] [] __dequeue_signal+0xc8/0x190 - - $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8: - ffffffff8104edc8: mov %r8,0x8(%r8) - ffffffff8104edcc: test %r10d,%r10d - ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168> - ffffffff8104edd5: mov %rax,%rdx - ffffffff8104edd8: mov $0xc,%ecx - ffffffff8104eddd: mov %r13,%rdi - ffffffff8104ede0: mov $0x30,%eax - ffffffff8104ede5: mov %rdx,%rsi - ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi) - ffffffff8104edea: test $0x2,%al - ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0> - ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi) - ffffffff8104edf0: test $0x1,%al - ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5> - ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi) - ffffffff8104edf5: mov %r8,%rdi - ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free> - -As expected, it's the "``rep movsl``" instruction from the ``memcpy()`` -that causes the warning. We know about ``REP MOVSL`` that it uses the register -``RCX`` to count the number of remaining iterations. By taking a look at the -register dump again (from the kmemcheck report), we can figure out how many -bytes were left to copy:: - - RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009 - -By looking at the disassembly, we also see that ``%ecx`` is being loaded -with the value ``$0xc`` just before (ffffffff8104edd8), so we are very -lucky. Keep in mind that this is the number of iterations, not bytes. And -since this is a "long" operation, we need to multiply by 4 to get the -number of bytes. So this means that the uninitialized value was encountered -at 4 * (0xc - 0x9) = 12 bytes from the start of the object. - -We can now try to figure out which field of the "``struct siginfo``" that -was not initialized. This is the beginning of the struct:: - - 40 typedef struct siginfo { - 41 int si_signo; - 42 int si_errno; - 43 int si_code; - 44 - 45 union { - .. - 92 } _sifields; - 93 } siginfo_t; - -On 64-bit, the int is 4 bytes long, so it must the union member that has -not been initialized. We can verify this using gdb:: - - $ gdb vmlinux - ... - (gdb) p &((struct siginfo *) 0)->_sifields - $1 = (union {...} *) 0x10 - -Actually, it seems that the union member is located at offset 0x10 -- which -means that gcc has inserted 4 bytes of padding between the members ``si_code`` -and ``_sifields``. We can now get a fuller picture of the memory dump:: - - _----------------------------=> si_code - / _--------------------=> (padding) - | / _------------=> _sifields(._kill._pid) - | | / _----=> _sifields(._kill._uid) - | | | / - -------|-------|-------|-------| - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - -This allows us to realize another important fact: ``si_code`` contains the -value 0x80. Remember that x86 is little endian, so the first 4 bytes -"80000000" are really the number 0x00000080. With a bit of research, we -find that this is actually the constant ``SI_KERNEL`` defined in -``include/asm-generic/siginfo.h``:: - - 144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */ - -This macro is used in exactly one place in the x86 kernel: In ``send_signal()`` -in ``kernel/signal.c``:: - - 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - 817 int group) - 818 { - ... - 828 pending = group ? &t->signal->shared_pending : &t->pending; - ... - 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - 852 (is_si_special(info) || - 853 info->si_code >= 0))); - 854 if (q) { - 855 list_add_tail(&q->list, &pending->list); - 856 switch ((unsigned long) info) { - ... - 865 case (unsigned long) SEND_SIG_PRIV: - 866 q->info.si_signo = sig; - 867 q->info.si_errno = 0; - 868 q->info.si_code = SI_KERNEL; - 869 q->info.si_pid = 0; - 870 q->info.si_uid = 0; - 871 break; - ... - 890 } - -Not only does this match with the ``.si_code`` member, it also matches the place -we found earlier when looking for where siginfo_t objects are enqueued on the -``shared_pending`` list. - -So to sum up: It seems that it is the padding introduced by the compiler -between two struct fields that is uninitialized, and this gets reported when -we do a ``memcpy()`` on the struct. This means that we have identified a false -positive warning. - -Normally, kmemcheck will not report uninitialized accesses in ``memcpy()`` calls -when both the source and destination addresses are tracked. (Instead, we copy -the shadow bytemap as well). In this case, the destination address clearly -was not tracked. We can dig a little deeper into the stack trace from above:: - - arch/x86/kernel/signal.c:805 - arch/x86/kernel/signal.c:871 - arch/x86/kernel/entry_64.S:694 - -And we clearly see that the destination siginfo object is located on the -stack:: - - 782 static void do_signal(struct pt_regs *regs) - 783 { - 784 struct k_sigaction ka; - 785 siginfo_t info; - ... - 804 signr = get_signal_to_deliver(&info, &ka, regs, NULL); - ... - 854 } - -And this ``&info`` is what eventually gets passed to ``copy_siginfo()`` as the -destination argument. - -Now, even though we didn't find an actual error here, the example is still a -good one, because it shows how one would go about to find out what the report -was all about. - - -Annotating false positives -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are a few different ways to make annotations in the source code that -will keep kmemcheck from checking and reporting certain allocations. Here -they are: - -- ``__GFP_NOTRACK_FALSE_POSITIVE`` - This flag can be passed to ``kmalloc()`` or ``kmem_cache_alloc()`` - (therefore also to other functions that end up calling one of - these) to indicate that the allocation should not be tracked - because it would lead to a false positive report. This is a "big - hammer" way of silencing kmemcheck; after all, even if the false - positive pertains to particular field in a struct, for example, we - will now lose the ability to find (real) errors in other parts of - the same struct. - - Example:: - - /* No warnings will ever trigger on accessing any part of x */ - x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE); - -- ``kmemcheck_bitfield_begin(name)``/``kmemcheck_bitfield_end(name)`` and - ``kmemcheck_annotate_bitfield(ptr, name)`` - The first two of these three macros can be used inside struct - definitions to signal, respectively, the beginning and end of a - bitfield. Additionally, this will assign the bitfield a name, which - is given as an argument to the macros. - - Having used these markers, one can later use - kmemcheck_annotate_bitfield() at the point of allocation, to indicate - which parts of the allocation is part of a bitfield. - - Example:: - - struct foo { - int x; - - kmemcheck_bitfield_begin(flags); - int flag_a:1; - int flag_b:1; - kmemcheck_bitfield_end(flags); - - int y; - }; - - struct foo *x = kmalloc(sizeof *x); - - /* No warnings will trigger on accessing the bitfield of x */ - kmemcheck_annotate_bitfield(x, flags); - - Note that ``kmemcheck_annotate_bitfield()`` can be used even before the - return value of ``kmalloc()`` is checked -- in other words, passing NULL - as the first argument is legal (and will do nothing). - - -Reporting errors ----------------- - -As we have seen, kmemcheck will produce false positive reports. Therefore, it -is not very wise to blindly post kmemcheck warnings to mailing lists and -maintainers. Instead, I encourage maintainers and developers to find errors -in their own code. If you get a warning, you can try to work around it, try -to figure out if it's a real error or not, or simply ignore it. Most -developers know their own code and will quickly and efficiently determine the -root cause of a kmemcheck report. This is therefore also the most efficient -way to work with kmemcheck. - -That said, we (the kmemcheck maintainers) will always be on the lookout for -false positives that we can annotate and silence. So whatever you find, -please drop us a note privately! Kernel configs and steps to reproduce (if -available) are of course a great help too. - -Happy hacking! - - -Technical description ---------------------- - -kmemcheck works by marking memory pages non-present. This means that whenever -somebody attempts to access the page, a page fault is generated. The page -fault handler notices that the page was in fact only hidden, and so it calls -on the kmemcheck code to make further investigations. - -When the investigations are completed, kmemcheck "shows" the page by marking -it present (as it would be under normal circumstances). This way, the -interrupted code can continue as usual. - -But after the instruction has been executed, we should hide the page again, so -that we can catch the next access too! Now kmemcheck makes use of a debugging -feature of the processor, namely single-stepping. When the processor has -finished the one instruction that generated the memory access, a debug -exception is raised. From here, we simply hide the page again and continue -execution, this time with the single-stepping feature turned off. - -kmemcheck requires some assistance from the memory allocator in order to work. -The memory allocator needs to - - 1. Tell kmemcheck about newly allocated pages and pages that are about to - be freed. This allows kmemcheck to set up and tear down the shadow memory - for the pages in question. The shadow memory stores the status of each - byte in the allocation proper, e.g. whether it is initialized or - uninitialized. - - 2. Tell kmemcheck which parts of memory should be marked uninitialized. - There are actually a few more states, such as "not yet allocated" and - "recently freed". - -If a slab cache is set up using the SLAB_NOTRACK flag, it will never return -memory that can take page faults because of kmemcheck. - -If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still -request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags. -This does not prevent the page faults from occurring, however, but marks the -object in question as being initialized so that no warnings will ever be -produced for this object. - -Currently, the SLAB and SLUB allocators are supported by kmemcheck. diff --git a/Documentation/device-mapper/boot.txt b/Documentation/device-mapper/boot.txt new file mode 100644 index 000000000000..adcaad5e5e32 --- /dev/null +++ b/Documentation/device-mapper/boot.txt @@ -0,0 +1,42 @@ +Boot time creation of mapped devices +=================================== + +It is possible to configure a device mapper device to act as the root +device for your system in two ways. + +The first is to build an initial ramdisk which boots to a minimal +userspace which configures the device, then pivot_root(8) in to it. + +For simple device mapper configurations, it is possible to boot directly +using the following kernel command line: + +dm=" ,table line 1,...,table line n" + +name = the name to associate with the device + after boot, udev, if used, will use that name to label + the device node. +uuid = may be 'none' or the UUID desired for the device. +ro = may be "ro" or "rw". If "ro", the device and device table will be + marked read-only. + +Each table line may be as normal when using the dmsetup tool except for +two variations: +1. Any use of commas will be interpreted as a newline +2. Quotation marks cannot be escaped and cannot be used without + terminating the dm= argument. + +Unless renamed by udev, the device node created will be dm-0 as the +first minor number for the device-mapper is used during early creation. + +Example +======= + +- Booting to a linear array made up of user-mode linux block devices: + + dm="lroot none 0, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" \ + root=/dev/dm-0 + +Will boot to a rw dm-linear target of 8192 sectors split across two +block devices identified by their major:minor numbers. After boot, udev +will rename this target to /dev/mapper/lroot (depending on the rules). +No uuid was assigned. diff --git a/Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt similarity index 84% rename from Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt rename to Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt index 7175dc3740ac..ed34253d9fb1 100644 --- a/Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt +++ b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt @@ -2,7 +2,7 @@ Toppoly TD028TTEC1 Panel ======================== Required properties: -- compatible: "toppoly,td028ttec1" +- compatible: "tpo,td028ttec1" Optional properties: - label: a symbolic name for the panel @@ -14,7 +14,7 @@ Example ------- lcd-panel: td028ttec1@0 { - compatible = "toppoly,td028ttec1"; + compatible = "tpo,td028ttec1"; reg = <0>; spi-max-frequency = <100000>; spi-cpol; diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index a122723907ac..99acc712f83a 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt @@ -64,6 +64,6 @@ Example: reg = <0xe0000000 0x1000>; interrupts = <0 35 0x4>; dmas = <&dmahost 12 0 1>, - <&dmahost 13 0 1 0>; + <&dmahost 13 1 0>; dma-names = "rx", "rx"; }; diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt index 07a250498fbb..f569db58f64a 100644 --- a/Documentation/devicetree/bindings/hwmon/jc42.txt +++ b/Documentation/devicetree/bindings/hwmon/jc42.txt @@ -34,6 +34,10 @@ Required properties: - reg: I2C address +Optional properties: +- smbus-timeout-disable: When set, the smbus timeout function will be disabled. + This is not supported on all chips. + Example: temp-sensor@1a { diff --git a/Documentation/devicetree/bindings/misc/memory-state-time.txt b/Documentation/devicetree/bindings/misc/memory-state-time.txt new file mode 100644 index 000000000000..c99a506c030d --- /dev/null +++ b/Documentation/devicetree/bindings/misc/memory-state-time.txt @@ -0,0 +1,8 @@ +Memory bandwidth and frequency state tracking + +Required properties: +- compatible : should be: + "memory-state-time" +- freq-tbl: Should contain entries with each frequency in Hz. +- bw-buckets: Should contain upper-bound limits for each bandwidth bucket in Mbps. + Must match the framework power_profile.xml for the device. diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt new file mode 100644 index 000000000000..c6b82511ae8a --- /dev/null +++ b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt @@ -0,0 +1,8 @@ +Binding for MIPS Cluster Power Controller (CPC). + +This binding allows a system to specify where the CPC registers are +located. + +Required properties: +compatible : Should be "mti,mips-cpc". +regs: Should describe the address & size of the CPC register region. diff --git a/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt b/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt new file mode 100644 index 000000000000..2ceb202462cb --- /dev/null +++ b/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt @@ -0,0 +1,378 @@ +=========================================================== +Energy cost bindings for Energy Aware Scheduling +=========================================================== + +=========================================================== +1 - Introduction +=========================================================== + +This note specifies bindings required for energy-aware scheduling +(EAS)[1]. Historically, the scheduler's primary objective has been +performance. EAS aims to provide an alternative objective - energy +efficiency. EAS relies on a simple platform energy cost model to +guide scheduling decisions. The model only considers the CPU +subsystem. + +This note is aligned with the definition of the layout of physical +CPUs in the system as described in the ARM topology binding +description [2]. The concept is applicable to any system so long as +the cost model data is provided for those processing elements in +that system's topology that EAS is required to service. + +Processing elements refer to hardware threads, CPUs and clusters of +related CPUs in increasing order of hierarchy. + +EAS requires two key cost metrics - busy costs and idle costs. Busy +costs comprise of a list of compute capacities for the processing +element in question and the corresponding power consumption at that +capacity. Idle costs comprise of a list of power consumption values +for each idle state [C-state] that the processing element supports. +For a detailed description of these metrics, their derivation and +their use see [3]. + +These cost metrics are required for processing elements in all +scheduling domain levels that EAS is required to service. + +=========================================================== +2 - energy-costs node +=========================================================== + +Energy costs for the processing elements in scheduling domains that +EAS is required to service are defined in the energy-costs node +which acts as a container for the actual per processing element cost +nodes. A single energy-costs node is required for a given system. + +- energy-costs node + + Usage: Required + + Description: The energy-costs node is a container node and + it's sub-nodes describe costs for each processing element at + all scheduling domain levels that EAS is required to + service. + + Node name must be "energy-costs". + + The energy-costs node's parent node must be the cpus node. + + The energy-costs node's child nodes can be: + + - one or more cost nodes. + + Any other configuration is considered invalid. + +The energy-costs node can only contain a single type of child node +whose bindings are described in paragraph 4. + +=========================================================== +3 - energy-costs node child nodes naming convention +=========================================================== + +energy-costs child nodes must follow a naming convention where the +node name must be "thread-costN", "core-costN", "cluster-costN" +depending on whether the costs in the node are for a thread, core or +cluster. N (where N = {0, 1, ...}) is the node number and has no +bearing to the OS' logical thread, core or cluster index. + +=========================================================== +4 - cost node bindings +=========================================================== + +Bindings for cost nodes are defined as follows: + +- system-cost node + + Description: Optional. Must be declared within an energy-costs + node. A system should contain no more than one system-cost node. + + Systems with no modelled system cost should not provide this + node. + + The system-cost node name must be "system-costN" as + described in 3 above. + + A system-cost node must be a leaf node with no children. + + Properties for system-cost nodes are described in paragraph + 5 below. + + Any other configuration is considered invalid. + +- cluster-cost node + + Description: must be declared within an energy-costs node. A + system can contain multiple clusters and each cluster + serviced by EAS must have a corresponding cluster-costs + node. + + The cluster-cost node name must be "cluster-costN" as + described in 3 above. + + A cluster-cost node must be a leaf node with no children. + + Properties for cluster-cost nodes are described in paragraph + 5 below. + + Any other configuration is considered invalid. + +- core-cost node + + Description: must be declared within an energy-costs node. A + system can contain multiple cores and each core serviced by + EAS must have a corresponding core-cost node. + + The core-cost node name must be "core-costN" as described in + 3 above. + + A core-cost node must be a leaf node with no children. + + Properties for core-cost nodes are described in paragraph + 5 below. + + Any other configuration is considered invalid. + +- thread-cost node + + Description: must be declared within an energy-costs node. A + system can contain cores with multiple hardware threads and + each thread serviced by EAS must have a corresponding + thread-cost node. + + The core-cost node name must be "core-costN" as described in + 3 above. + + A core-cost node must be a leaf node with no children. + + Properties for thread-cost nodes are described in paragraph + 5 below. + + Any other configuration is considered invalid. + +=========================================================== +5 - Cost node properties +========================================================== + +All cost node types must have only the following properties: + +- busy-cost-data + + Usage: required + Value type: An array of 2-item tuples. Each item is of type + u32. + Definition: The first item in the tuple is the capacity + value as described in [3]. The second item in the tuple is + the energy cost value as described in [3]. + +- idle-cost-data + + Usage: required + Value type: An array of 1-item tuples. The item is of type + u32. + Definition: The item in the tuple is the energy cost value + as described in [3]. + +=========================================================== +4 - Extensions to the cpu node +=========================================================== + +The cpu node is extended with a property that establishes the +connection between the processing element represented by the cpu +node and the cost-nodes associated with this processing element. + +The connection is expressed in line with the topological hierarchy +that this processing element belongs to starting with the level in +the hierarchy that this processing element itself belongs to through +to the highest level that EAS is required to service. The +connection cannot be sparse and must be contiguous from the +processing element's level through to the highest desired level. The +highest desired level must be the same for all processing elements. + +Example: Given that a cpu node may represent a thread that is a part +of a core, this property may contain multiple elements which +associate the thread with cost nodes describing the costs for the +thread itself, the core the thread belongs to, the cluster the core +belongs to and so on. The elements must be ordered from the lowest +level nodes to the highest desired level that EAS must service. The +highest desired level must be the same for all cpu nodes. The +elements must not be sparse: there must be elements for the current +thread, the next level of hierarchy (core) and so on without any +'holes'. + +Example: Given that a cpu node may represent a core that is a part +of a cluster of related cpus this property may contain multiple +elements which associate the core with cost nodes describing the +costs for the core itself, the cluster the core belongs to and so +on. The elements must be ordered from the lowest level nodes to the +highest desired level that EAS must service. The highest desired +level must be the same for all cpu nodes. The elements must not be +sparse: there must be elements for the current thread, the next +level of hierarchy (core) and so on without any 'holes'. + +If the system comprises of hierarchical clusters of clusters, this +property will contain multiple associations with the relevant number +of cluster elements in hierarchical order. + +Property added to the cpu node: + +- sched-energy-costs + + Usage: required + Value type: List of phandles + Definition: a list of phandles to specific cost nodes in the + energy-costs parent node that correspond to the processing + element represented by this cpu node in hierarchical order + of topology. + + The order of phandles in the list is significant. The first + phandle is to the current processing element's own cost + node. Subsequent phandles are to higher hierarchical level + cost nodes up until the maximum level that EAS is to + service. + + All cpu nodes must have the same highest level cost node. + + The phandle list must not be sparsely populated with handles + to non-contiguous hierarchical levels. See commentary above + for clarity. + + Any other configuration is invalid. + +=========================================================== +5 - Example dts +=========================================================== + +Example 1 (ARM 64-bit, 6-cpu system, two clusters of cpus, one +cluster of 2 Cortex-A57 cpus, one cluster of 4 Cortex-A53 cpus): + +cpus { + #address-cells = <2>; + #size-cells = <0>; + . + . + . + A57_0: cpu@0 { + compatible = "arm,cortex-a57","arm,armv8"; + reg = <0x0 0x0>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A57_L2>; + clocks = <&scpi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; + }; + + A57_1: cpu@1 { + compatible = "arm,cortex-a57","arm,armv8"; + reg = <0x0 0x1>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A57_L2>; + clocks = <&scpi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; + }; + + A53_0: cpu@100 { + compatible = "arm,cortex-a53","arm,armv8"; + reg = <0x0 0x100>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + clocks = <&scpi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; + }; + + A53_1: cpu@101 { + compatible = "arm,cortex-a53","arm,armv8"; + reg = <0x0 0x101>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + clocks = <&scpi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; + }; + + A53_2: cpu@102 { + compatible = "arm,cortex-a53","arm,armv8"; + reg = <0x0 0x102>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + clocks = <&scpi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; + }; + + A53_3: cpu@103 { + compatible = "arm,cortex-a53","arm,armv8"; + reg = <0x0 0x103>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + clocks = <&scpi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; + }; + + energy-costs { + CPU_COST_0: core-cost0 { + busy-cost-data = < + 417 168 + 579 251 + 744 359 + 883 479 + 1024 616 + >; + idle-cost-data = < + 15 + 0 + >; + }; + CPU_COST_1: core-cost1 { + busy-cost-data = < + 235 33 + 302 46 + 368 61 + 406 76 + 447 93 + >; + idle-cost-data = < + 6 + 0 + >; + }; + CLUSTER_COST_0: cluster-cost0 { + busy-cost-data = < + 417 24 + 579 32 + 744 43 + 883 49 + 1024 64 + >; + idle-cost-data = < + 65 + 24 + >; + }; + CLUSTER_COST_1: cluster-cost1 { + busy-cost-data = < + 235 26 + 303 30 + 368 39 + 406 47 + 447 57 + >; + idle-cost-data = < + 56 + 17 + >; + }; + }; +}; + +=============================================================================== +[1] https://lkml.org/lkml/2015/5/12/728 +[2] Documentation/devicetree/bindings/topology.txt +[3] Documentation/scheduler/sched-energy.txt diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt index dad3b2ec66d4..aeb6db4e35c3 100644 --- a/Documentation/devicetree/bindings/serial/8250.txt +++ b/Documentation/devicetree/bindings/serial/8250.txt @@ -24,6 +24,7 @@ Required properties: - "ti,da830-uart" - "aspeed,ast2400-vuart" - "aspeed,ast2500-vuart" + - "nuvoton,npcm750-uart" - "serial" if the port type is unknown. - reg : offset and length of the register set for the device. - interrupts : should contain uart interrupt. diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt index 6ca6b9e582a0..d740989eb569 100644 --- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt +++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt @@ -20,16 +20,16 @@ Required Properties: (CMT1 on sh73a0 and r8a7740) This is a fallback for the above renesas,cmt-48-* entries. - - "renesas,cmt0-r8a73a4" for the 32-bit CMT0 device included in r8a73a4. - - "renesas,cmt1-r8a73a4" for the 48-bit CMT1 device included in r8a73a4. - - "renesas,cmt0-r8a7790" for the 32-bit CMT0 device included in r8a7790. - - "renesas,cmt1-r8a7790" for the 48-bit CMT1 device included in r8a7790. - - "renesas,cmt0-r8a7791" for the 32-bit CMT0 device included in r8a7791. - - "renesas,cmt1-r8a7791" for the 48-bit CMT1 device included in r8a7791. - - "renesas,cmt0-r8a7793" for the 32-bit CMT0 device included in r8a7793. - - "renesas,cmt1-r8a7793" for the 48-bit CMT1 device included in r8a7793. - - "renesas,cmt0-r8a7794" for the 32-bit CMT0 device included in r8a7794. - - "renesas,cmt1-r8a7794" for the 48-bit CMT1 device included in r8a7794. + - "renesas,r8a73a4-cmt0" for the 32-bit CMT0 device included in r8a73a4. + - "renesas,r8a73a4-cmt1" for the 48-bit CMT1 device included in r8a73a4. + - "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790. + - "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790. + - "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791. + - "renesas,r8a7791-cmt1" for the 48-bit CMT1 device included in r8a7791. + - "renesas,r8a7793-cmt0" for the 32-bit CMT0 device included in r8a7793. + - "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793. + - "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794. + - "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794. - "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2. - "renesas,rcar-gen2-cmt1" for 48-bit CMT1 devices included in R-Car Gen2. @@ -46,7 +46,7 @@ Required Properties: Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes cmt0: timer@ffca0000 { - compatible = "renesas,cmt0-r8a7790", "renesas,rcar-gen2-cmt0"; + compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0"; reg = <0 0xffca0000 0 0x1004>; interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>, <0 142 IRQ_TYPE_LEVEL_HIGH>; @@ -55,7 +55,7 @@ Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes }; cmt1: timer@e6130000 { - compatible = "renesas,cmt1-r8a7790", "renesas,rcar-gen2-cmt1"; + compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1"; reg = <0 0xe6130000 0 0x1004>; interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>, <0 121 IRQ_TYPE_LEVEL_HIGH>, diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt new file mode 100644 index 000000000000..18329d39487e --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt @@ -0,0 +1,8 @@ +Trusty fiq debugger interface + +Provides a single fiq for the fiq debugger. + +Required properties: +- compatible: compatible = "android,trusty-fiq-v1-*"; where * is a serial port. + +Must be a child of the node that provides fiq support ("android,trusty-fiq-v1"). diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt new file mode 100644 index 000000000000..de810b955bc9 --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt @@ -0,0 +1,8 @@ +Trusty fiq interface + +Trusty provides fiq emulation. + +Required properties: +- compatible: "android,trusty-fiq-v1" + +Must be a child of the node that provides the trusty std/fast call interface. diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt new file mode 100644 index 000000000000..5aefeb8e536f --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt @@ -0,0 +1,67 @@ +Trusty irq interface + +Trusty requires non-secure irqs to be forwarded to the secure OS. + +Required properties: +- compatible: "android,trusty-irq-v1" + +Optional properties: + +- interrupt-templates: is an optional property that works together + with "interrupt-ranges" to specify secure side to kernel IRQs mapping. + + It is a list of entries, each one of which defines a group of interrupts + having common properties, and has the following format: + < phandle irq_id_pos [templ_data]> + phandle - phandle of interrupt controller this template is for + irq_id_pos - the position of irq id in interrupt specifier array + for interrupt controller referenced by phandle. + templ_data - is an array of u32 values (could be empty) in the same + format as interrupt specifier for interrupt controller + referenced by phandle but with omitted irq id field. + +- interrupt-ranges: list of entries that specifies secure side to kernel + IRQs mapping. + + Each entry in the "interrupt-ranges" list has the following format: + + beg - first entry in this range + end - last entry in this range + templ_idx - index of entry in "interrupt-templates" property + that must be used as a template for all interrupts + in this range + +Example: +{ + gic: interrupt-controller@50041000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + interrupt-controller; + ... + }; + ... + IPI: interrupt-controller { + compatible = "android,CustomIPI"; + #interrupt-cells = <1>; + interrupt-controller; + }; + ... + trusty { + compatible = "android,trusty-smc-v1"; + ranges; + #address-cells = <2>; + #size-cells = <2>; + + irq { + compatible = "android,trusty-irq-v1"; + interrupt-templates = <&IPI 0>, + <&gic 1 GIC_PPI 0>, + <&gic 1 GIC_SPI 0>; + interrupt-ranges = < 0 15 0>, + <16 31 1>, + <32 223 2>; + }; + } +} + +Must be a child of the node that provides the trusty std/fast call interface. diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt new file mode 100644 index 000000000000..1b39ad317c67 --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt @@ -0,0 +1,6 @@ +Trusty smc interface + +Trusty is running in secure mode on the same (arm) cpu(s) as the current os. + +Required properties: +- compatible: "android,trusty-smc-v1" diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt index ce02cebac26a..464ddf7b509a 100644 --- a/Documentation/devicetree/bindings/usb/usb-device.txt +++ b/Documentation/devicetree/bindings/usb/usb-device.txt @@ -11,7 +11,7 @@ Required properties: be used, but a device adhering to this binding may leave out all except for usbVID,PID. - reg: the port number which this device is connecting to, the range - is 1-31. + is 1-255. Example: diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt index 2d80b60eeabe..7a69b8b47b97 100644 --- a/Documentation/devicetree/bindings/usb/usb-xhci.txt +++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt @@ -12,6 +12,7 @@ Required properties: - "renesas,xhci-r8a7793" for r8a7793 SoC - "renesas,xhci-r8a7795" for r8a7795 SoC - "renesas,xhci-r8a7796" for r8a7796 SoC + - "renesas,xhci-r8a77965" for r8a77965 SoC - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 compatible device - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device - "xhci-platform" (deprecated) diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst index a0dc2879a152..4a18ef9997c0 100644 --- a/Documentation/driver-api/pm/devices.rst +++ b/Documentation/driver-api/pm/devices.rst @@ -328,7 +328,10 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``. After the ``->prepare`` callback method returns, no new children may be registered below the device. The method may also prepare the device or driver in some way for the upcoming system power transition, but it - should not put the device into a low-power state. + should not put the device into a low-power state. Moreover, if the + device supports runtime power management, the ``->prepare`` callback + method must not update its state in case it is necessary to resume it + from runtime suspend later on. For devices supporting runtime power management, the return value of the prepare callback can be used to indicate to the PM core that it may @@ -356,6 +359,16 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``. the appropriate low-power state, depending on the bus type the device is on, and they may enable wakeup events. + However, for devices supporting runtime power management, the + ``->suspend`` methods provided by subsystems (bus types and PM domains + in particular) must follow an additional rule regarding what can be done + to the devices before their drivers' ``->suspend`` methods are called. + Namely, they can only resume the devices from runtime suspend by + calling :c:func:`pm_runtime_resume` for them, if that is necessary, and + they must not update the state of the devices in any other way at that + time (in case the drivers need to resume the devices from runtime + suspend in their ``->suspend`` methods). + 3. For a number of devices it is convenient to split suspend into the "quiesce device" and "save device state" phases, in which cases ``suspend_late`` is meant to do the latter. It is always executed after @@ -729,6 +742,16 @@ state temporarily, for example so that its system wakeup capability can be disabled. This all depends on the hardware and the design of the subsystem and device driver in question. +If it is necessary to resume a device from runtime suspend during a system-wide +transition into a sleep state, that can be done by calling +:c:func:`pm_runtime_resume` for it from the ``->suspend`` callback (or its +couterpart for transitions related to hibernation) of either the device's driver +or a subsystem responsible for it (for example, a bus type or a PM domain). +That is guaranteed to work by the requirement that subsystems must not change +the state of devices (possibly except for resuming them from runtime suspend) +from their ``->prepare`` and ``->suspend`` callbacks (or equivalent) *before* +invoking device drivers' ``->suspend`` callbacks (or equivalent). + During system-wide resume from a sleep state it's easiest to put devices into the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`. Refer to that document for more information regarding this particular issue as diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt index 5a8f7f4d2bca..7449893dc039 100644 --- a/Documentation/filesystems/ext4.txt +++ b/Documentation/filesystems/ext4.txt @@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs data_err=abort Abort the journal if an error occurs in a file data buffer in ordered mode. -grpid Give objects the same group ID as their creator. +grpid New objects have the group ID of their parent. bsdgroups nogrpid (*) New objects have the group ID of their creator. diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index adba21b5ada7..99ca8e30a4ca 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -396,6 +396,8 @@ is not associated with a file: [stack] = the stack of the main process [vdso] = the "virtual dynamic shared object", the kernel system call handler + [anon:] = an anonymous mapping that has been + named by userspace or if empty, the mapping is anonymous. @@ -424,6 +426,7 @@ KernelPageSize: 4 kB MMUPageSize: 4 kB Locked: 0 kB VmFlags: rd ex mr mw me dw +Name: name from userspace the first of these lines shows the same information as is displayed for the mapping in /proc/PID/maps. The remaining lines show the size of the mapping @@ -496,6 +499,9 @@ Note that there is no guarantee that every flag and associated mnemonic will be present in all further kernel releases. Things get changed, the flags may be vanished or the reverse -- new added. +The "Name" field will only be present on a mapping that has been named by +userspace, and will show the name passed in by userspace. + This file is only present if the CONFIG_MMU kernel configuration option is enabled. diff --git a/Documentation/index.rst b/Documentation/index.rst index cb7f1ba5b3b1..380868d12704 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -75,6 +75,7 @@ needed). sound/index crypto/index filesystems/index + rpmb/index Architecture-specific documentation ----------------------------------- diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 3e3fdae5f3ed..c195b51d9ab0 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -324,6 +324,7 @@ Code Seq#(hex) Include File Comments 0xB3 00 linux/mmc/ioctl.h 0xB4 00-0F linux/gpio.h 0xB5 00-0F uapi/linux/rpmsg.h +0xB5 80-8F linux/uapi/linux/rpmb.h 0xC0 00-0F linux/usb/iowarrior.h 0xCA 00-0F uapi/misc/cxl.h 0xCA 80-BF uapi/scsi/cxlflash_ioctl.h diff --git a/Documentation/misc-devices/mei/dal/dal.txt b/Documentation/misc-devices/mei/dal/dal.txt new file mode 100644 index 000000000000..09801e725a0b --- /dev/null +++ b/Documentation/misc-devices/mei/dal/dal.txt @@ -0,0 +1,126 @@ +Intel(R) Dynamic Application Loader (Intel(R) DAL) +=================================================== + +Introduction +============ + +Intel Dynamic Application Loader (Intel DAL) is a service that runs +on the Intel Management Engine (Intel ME). It provides the ability to +install, run, and interact with Trusted Applets - TAs, +written in subset of Java) in a secure environment. +--- +installing applet int JHI and running them on Intel ME. + +There are two interfaces to Intel DAL from the operating system. +One from the user space, called JHI and one from kernel space, called KDI. +User space applications can install and uninstall TAs, and both kernel +and user space applications can communicate with installed TAs. + +Intel DAL Linux Kernel Driver +================ +The driver supports both user space clients and kernel space clients. + +For user space clients: +----------------------- +For each DAL FW client (IVM, SDM and RTM) the driver exposes a char device +called /dev/dal{i}, while i is 0-2 respectively. + +The user space interface allows sending raw messages from user-space +to DAL FW client, without any modifications. +The driver will send back to the user the raw messages which was received back. +Usually this interface is used by JHI - the DAL SW (for more information +search dynamic-application-loader-host-interface in github) +The messages are sent by using the char device 'write' function, +and received by using the 'read' function in accordance. + +For kernel space clients: +------------------------- +The driver expose api in file, to allow a kernel space client +using DAL. + +dal_uuid_to_bin - convert uuid string to bin + Input uuid is in either hyphenless or standard format + Arguments: + uuid_str: uuid string + uuid: output param to hold uuid bin + + Return: + 0 on success + <0 on failure + +dal_create_session - create session to an installed trusted application. + Arguments: + session_handle: output param to hold the session handle + ta_id: trusted application (ta) id + acp_pkg acp file of the ta + acp_pkg_len: acp file length + init_param: init parameters to the session (optional) + init_param_len: length of the init parameters + + Return: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_send_and_receive - send and receive data to/from ta + Arguments: + session_handle: session handle + command_id: command id + input: message to be sent + input_len: sent message size + output: An output parameter to hold a pointer + to the buffer which will contain the received + message. + This buffer is allocated by the driver and freed + by the user + output_len: An input and output param - + - input: the expected maximum length + of the received message. + - output: size of the received message + response_code: An output parameter to hold the return + value from the applet + + Return: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_close_session - close ta session + Arguments: +session_handle: session handle + + Return: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_set_ta_exclusive_access - set client to be owner of the ta, + so no one else (especially user space client) + will be able to open session to it + Arguments: + ta_id: trusted application (ta) id + + Return: + 0 on success + -ENODEV when the device can't be found + -ENOMEM on memory allocation failure + -EPERM when ta is owned by another client + -EEXIST when ta is already owned by current client + +dal_unset_ta_exclusive_access - unset client from owning ta + Arguments: + ta_id: trusted application (ta) id + + Return: + 0 on success + -ENODEV when the device can't be found + -ENOENT when ta isn't found in exclusiveness ta list + -EPERM when ta is owned by another client + +dal_get_version_info - return DAL driver version + Arguments: + version_info: output param to hold DAL driver version information + + Return: + 0 on success + -EINVAL on incorrect input diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 77f4de59dc9c..57f55bf0b906 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -508,7 +508,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max min: Minimal size of receive buffer used by TCP sockets. It is guaranteed to each TCP socket, even under moderate memory pressure. - Default: 1 page + Default: 4K default: initial size of receive buffer used by TCP sockets. This value overrides net.core.rmem_default used by other protocols. @@ -608,6 +608,16 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER initial value when the blackhole issue goes away. By default, it is set to 1hr. +tcp_fwmark_accept - BOOLEAN + If set, incoming connections to listening sockets that do not have a + socket mark will set the mark of the accepting socket to the fwmark of + the incoming SYN packet. This will cause all packets on that connection + (starting from the first SYNACK) to be sent with that fwmark. The + listening socket's mark is unchanged. Listening sockets that already + have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are + unaffected. + Default: 0 + tcp_syn_retries - INTEGER Number of times initial SYNs for an active TCP connection attempt will be retransmitted. Should not be higher than 127. Default value @@ -666,7 +676,7 @@ tcp_window_scaling - BOOLEAN tcp_wmem - vector of 3 INTEGERs: min, default, max min: Amount of memory reserved for send buffers for TCP sockets. Each TCP socket has rights to use it due to fact of its birth. - Default: 1 page + Default: 4K default: initial size of send buffer used by TCP sockets. This value overrides net.core.wmem_default used by other protocols. diff --git a/Documentation/rpmb/conf.py b/Documentation/rpmb/conf.py new file mode 100644 index 000000000000..15430a0b3a08 --- /dev/null +++ b/Documentation/rpmb/conf.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8; mode: python -*- + +project = "Linux RPMB Subsystem" + +tags.add("subproject") diff --git a/Documentation/rpmb/index.rst b/Documentation/rpmb/index.rst new file mode 100644 index 000000000000..876a2603e4b5 --- /dev/null +++ b/Documentation/rpmb/index.rst @@ -0,0 +1,18 @@ +.. -*- coding: utf-8; mode: rst -*- + +============================================== +Replay Protected Memory Block (RPMB) subsystem +============================================== + +.. toctree:: + + introduction + simulation-device.rst + rpmb-tool.rst + +.. only:: subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/rpmb/introduction.rst b/Documentation/rpmb/introduction.rst new file mode 100644 index 000000000000..f4ded7d18e55 --- /dev/null +++ b/Documentation/rpmb/introduction.rst @@ -0,0 +1,102 @@ +.. -*- coding: utf-8; mode: rst -*- + +============= +Introduction: +============= + +Few storage technologies such is EMMC, UFS, and NVMe support RPMB +hardware partition with common protocol and frame layout. +The RPMB partition `cannot` be accessed via standard block layer, +but by a set of specific commands: + +WRITE, READ, GET_WRITE_COUNTER, and PROGRAM_KEY. + +The commands and the data are embedded within :c:type:`rpmb_frame `. + +An RPMB partition provides authenticated and replay protected access, +hence it is suitable as a secure storage. + +In-kernel API +------------- +The RPMB layer aims to provide in-kernel API for Trusted Execution +Environment (TEE) devices that are capable to securely compute the block +frame signature. In case a TEE device wish to store a replay protected +data, it creates an RPMB frame with requested data and computes HMAC of +the frame, then it requests the storage device via RPMB layer to store +the data. + +The layer provides two APIs, for :c:func:`rpmb_req_cmd()` for issuing one of RPMB +specific commands and :c:func:`rpmb_seq_cmd()` for issuing of raw RPMB protocol +frames, which is close to the functionality provided by emmc multi ioctl +interface. + +.. c:function:: int rpmb_cmd_req(struct rpmb_dev *rdev, struct rpmb_data *data); + +.. c:function:: int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds); + + +A TEE driver can claim the RPMB interface, for example, via +:c:func:`class_interface_register`: + +.. code-block:: c + + struct class_interface tee_rpmb_intf = { + .class = &rpmb_class; + .add_dev = rpmb_add_device; + .remove_dev = rpmb_remove_device; + } + class_interface_register(&tee_rpmb_intf); + + +RPMB device registeration +---------------------------- + +A storage device registers its RPMB hardware (eMMC) partition or RPMB +W-LUN (UFS) with the RPMB layer :c:func:`rpmb_dev_register` providing +an implementation for :c:func:`rpmb_seq_cmd()` handler. The interface +enables sending sequence of RPMB standard frames. + +.. code-block:: c + + struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .type = RPMB_TYPE_EMMC, + ... + } + rpmb_dev_register(disk_to_dev(part_md->disk), &mmc_rpmb_dev_ops); + + +User space API +-------------- + +A parallel user space API is provided via /dev/rpmbX character +device with two IOCTL commands. +Simplified one, ``RPMB_IOC_REQ_CMD``, were read result cycles is performed +by the framework on behalf the user and second, ``RPMB_IOC_SEQ_CMD`` where +the whole RPMB sequence, including ``RESULT_READ`` is supplied by the caller. +The latter is intended for easier adjusting of the applications that +use ``MMC_IOC_MULTI_CMD`` ioctl, such as +https://android.googlesource.com/trusty/app/storage/ + +.. code-block:: c + + struct rpmb_ioc_req_cmd ireq; + int ret; + + ireq.req_type = RPMB_WRITE_DATA; + rpmb_ioc_cmd_set(ireq.icmd, RPMB_F_WRITE, frames_in, cnt_in); + rpmb_ioc_cmd_set(ireq.ocmd, 0, frames_out, cnt_out); + + ret = ioctl(fd, RPMB_IOC_REQ_CMD, &ireq); + + +API +--- +.. kernel-doc:: include/linux/rpmb.h + +.. kernel-doc:: drivers/char/rpmb/core.c + +.. kernel-doc:: include/uapi/linux/rpmb.h + +.. kernel-doc:: drivers/char/rpmb/cdev.c + diff --git a/Documentation/rpmb/rpmb-tool.rst b/Documentation/rpmb/rpmb-tool.rst new file mode 100644 index 000000000000..148eb0df4750 --- /dev/null +++ b/Documentation/rpmb/rpmb-tool.rst @@ -0,0 +1,19 @@ +========== +RPMB Tool +========== + +There is a sample rpmb tool under tools/rpmb/ directory that exercises +the RPMB devices via RPMB character devices interface (/dev/rpmbX) + +.. code-block:: none + + rpmb [-v] [-r|-s] + + rpmb program-key [- + rpmb write-counter [KEY_FILE] + rpmb write-blocks
+ rpmb read-blocks
[KEY_FILE] + + rpmb -v/--verbose: runs in verbose mode + rpmb -s/--sequence: use RPMB_IOC_SEQ_CMD + rpmb -r/--request: use RPMB_IOC_REQ_CMD diff --git a/Documentation/rpmb/simulation-device.rst b/Documentation/rpmb/simulation-device.rst new file mode 100644 index 000000000000..9192a78a71d4 --- /dev/null +++ b/Documentation/rpmb/simulation-device.rst @@ -0,0 +1,19 @@ +====================== +RPMB Simulation Device +====================== + +RPMB partition simulation device is a virtual device that +provides simulation of the RPMB protocol and uses kernel memory +as storage. + +This driver cannot promise any real security, it is suitable for testing +of the RPMB subsystem it self and mostly it was found useful for testing of +RPMB applications prior to RPMB key provisioning/programming as +The RPMB key programming can be performed only once in the life time +of the storage device. + +Implementation: +--------------- + +.. kernel-doc:: drivers/char/rpmb/rpmb_sim.c + diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt new file mode 100644 index 000000000000..dab2f9088b33 --- /dev/null +++ b/Documentation/scheduler/sched-energy.txt @@ -0,0 +1,362 @@ +Energy cost model for energy-aware scheduling (EXPERIMENTAL) + +Introduction +============= + +The basic energy model uses platform energy data stored in sched_group_energy +data structures attached to the sched_groups in the sched_domain hierarchy. The +energy cost model offers two functions that can be used to guide scheduling +decisions: + +1. static unsigned int sched_group_energy(struct energy_env *eenv) +2. static int energy_diff(struct energy_env *eenv) + +sched_group_energy() estimates the energy consumed by all cpus in a specific +sched_group including any shared resources owned exclusively by this group of +cpus. Resources shared with other cpus are excluded (e.g. later level caches). + +energy_diff() estimates the total energy impact of a utilization change. That +is, adding, removing, or migrating utilization (tasks). + +Both functions use a struct energy_env to specify the scenario to be evaluated: + + struct energy_env { + struct sched_group *sg_top; + struct sched_group *sg_cap; + int cap_idx; + int util_delta; + int src_cpu; + int dst_cpu; + int energy; + }; + +sg_top: sched_group to be evaluated. Not used by energy_diff(). + +sg_cap: sched_group covering the cpus in the same frequency domain. Set by +sched_group_energy(). + +cap_idx: Capacity state to be used for energy calculations. Set by +find_new_capacity(). + +util_delta: Amount of utilization to be added, removed, or migrated. + +src_cpu: Source cpu from where 'util_delta' utilization is removed. Should be +-1 if no source (e.g. task wake-up). + +dst_cpu: Destination cpu where 'util_delta' utilization is added. Should be -1 +if utilization is removed (e.g. terminating tasks). + +energy: Result of sched_group_energy(). + +The metric used to represent utilization is the actual per-entity running time +averaged over time using a geometric series. Very similar to the existing +per-entity load-tracking, but _not_ scaled by task priority and capped by the +capacity of the cpu. The latter property does mean that utilization may +underestimate the compute requirements for task on fully/over utilized cpus. +The greatest potential for energy savings without affecting performance too much +is scenarios where the system isn't fully utilized. If the system is deemed +fully utilized load-balancing should be done with task load (includes task +priority) instead in the interest of fairness and performance. + + +Background and Terminology +=========================== + +To make it clear from the start: + +energy = [joule] (resource like a battery on powered devices) +power = energy/time = [joule/second] = [watt] + +The goal of energy-aware scheduling is to minimize energy, while still getting +the job done. That is, we want to maximize: + + performance [inst/s] + -------------------- + power [W] + +which is equivalent to minimizing: + + energy [J] + ----------- + instruction + +while still getting 'good' performance. It is essentially an alternative +optimization objective to the current performance-only objective for the +scheduler. This alternative considers two objectives: energy-efficiency and +performance. Hence, there needs to be a user controllable knob to switch the +objective. Since it is early days, this is currently a sched_feature +(ENERGY_AWARE). + +The idea behind introducing an energy cost model is to allow the scheduler to +evaluate the implications of its decisions rather than applying energy-saving +techniques blindly that may only have positive effects on some platforms. At +the same time, the energy cost model must be as simple as possible to minimize +the scheduler latency impact. + +Platform topology +------------------ + +The system topology (cpus, caches, and NUMA information, not peripherals) is +represented in the scheduler by the sched_domain hierarchy which has +sched_groups attached at each level that covers one or more cpus (see +sched-domains.txt for more details). To add energy awareness to the scheduler +we need to consider power and frequency domains. + +Power domain: + +A power domain is a part of the system that can be powered on/off +independently. Power domains are typically organized in a hierarchy where you +may be able to power down just a cpu or a group of cpus along with any +associated resources (e.g. shared caches). Powering up a cpu means that all +power domains it is a part of in the hierarchy must be powered up. Hence, it is +more expensive to power up the first cpu that belongs to a higher level power +domain than powering up additional cpus in the same high level domain. Two +level power domain hierarchy example: + + Power source + +-------------------------------+----... +per group PD G G + | +----------+ | + +--------+-------| Shared | (other groups) +per-cpu PD G G | resource | + | | +----------+ + +-------+ +-------+ + | CPU 0 | | CPU 1 | + +-------+ +-------+ + +Frequency domain: + +Frequency domains (P-states) typically cover the same group of cpus as one of +the power domain levels. That is, there might be several smaller power domains +sharing the same frequency (P-state) or there might be a power domain spanning +multiple frequency domains. + +From a scheduling point of view there is no need to know the actual frequencies +[Hz]. All the scheduler cares about is the compute capacity available at the +current state (P-state) the cpu is in and any other available states. For that +reason, and to also factor in any cpu micro-architecture differences, compute +capacity scaling states are called 'capacity states' in this document. For SMP +systems this is equivalent to P-states. For mixed micro-architecture systems +(like ARM big.LITTLE) it is P-states scaled according to the micro-architecture +performance relative to the other cpus in the system. + +Energy modelling: +------------------ + +Due to the hierarchical nature of the power domains, the most obvious way to +model energy costs is therefore to associate power and energy costs with +domains (groups of cpus). Energy costs of shared resources are associated with +the group of cpus that share the resources, only the cost of powering the +cpu itself and any private resources (e.g. private L1 caches) is associated +with the per-cpu groups (lowest level). + +For example, for an SMP system with per-cpu power domains and a cluster level +(group of cpus) power domain we get the overall energy costs to be: + + energy = energy_cluster + n * energy_cpu + +where 'n' is the number of cpus powered up and energy_cluster is the cost paid +as soon as any cpu in the cluster is powered up. + +The power and frequency domains can naturally be mapped onto the existing +sched_domain hierarchy and sched_groups by adding the necessary data to the +existing data structures. + +The energy model considers energy consumption from two contributors (shown in +the illustration below): + +1. Busy energy: Energy consumed while a cpu and the higher level groups that it +belongs to are busy running tasks. Busy energy is associated with the state of +the cpu, not an event. The time the cpu spends in this state varies. Thus, the +most obvious platform parameter for this contribution is busy power +(energy/time). + +2. Idle energy: Energy consumed while a cpu and higher level groups that it +belongs to are idle (in a C-state). Like busy energy, idle energy is associated +with the state of the cpu. Thus, the platform parameter for this contribution +is idle power (energy/time). + +Energy consumed during transitions from an idle-state (C-state) to a busy state +(P-state) or going the other way is ignored by the model to simplify the energy +model calculations. + + + Power + ^ + | busy->idle idle->busy + | transition transition + | + | _ __ + | / \ / \__________________ + |______________/ \ / + | \ / + | Busy \ Idle / Busy + | low P-state \____________/ high P-state + | + +------------------------------------------------------------> time + +Busy |--------------| |-----------------| + +Wakeup |------| |------| + +Idle |------------| + + +The basic algorithm +==================== + +The basic idea is to determine the total energy impact when utilization is +added or removed by estimating the impact at each level in the sched_domain +hierarchy starting from the bottom (sched_group contains just a single cpu). +The energy cost comes from busy time (sched_group is awake because one or more +cpus are busy) and idle time (in an idle-state). Energy model numbers account +for energy costs associated with all cpus in the sched_group as a group. + + for_each_domain(cpu, sd) { + sg = sched_group_of(cpu) + energy_before = curr_util(sg) * busy_power(sg) + + (1-curr_util(sg)) * idle_power(sg) + energy_after = new_util(sg) * busy_power(sg) + + (1-new_util(sg)) * idle_power(sg) + energy_diff += energy_before - energy_after + + } + + return energy_diff + +{curr, new}_util: The cpu utilization at the lowest level and the overall +non-idle time for the entire group for higher levels. Utilization is in the +range 0.0 to 1.0 in the pseudo-code. + +busy_power: The power consumption of the sched_group. + +idle_power: The power consumption of the sched_group when idle. + +Note: It is a fundamental assumption that the utilization is (roughly) scale +invariant. Task utilization tracking factors in any frequency scaling and +performance scaling differences due to difference cpu microarchitectures such +that task utilization can be used across the entire system. + + +Platform energy data +===================== + +struct sched_group_energy can be attached to sched_groups in the sched_domain +hierarchy and has the following members: + +cap_states: + List of struct capacity_state representing the supported capacity states + (P-states). struct capacity_state has two members: cap and power, which + represents the compute capacity and the busy_power of the state. The + list must be ordered by capacity low->high. + +nr_cap_states: + Number of capacity states in cap_states list. + +idle_states: + List of struct idle_state containing idle_state power cost for each + idle-state supported by the system orderd by shallowest state first. + All states must be included at all level in the hierarchy, i.e. a + sched_group spanning just a single cpu must also include coupled + idle-states (cluster states). In addition to the cpuidle idle-states, + the list must also contain an entry for the idling using the arch + default idle (arch_idle_cpu()). Despite this state may not be a true + hardware idle-state it is considered the shallowest idle-state in the + energy model and must be the first entry. cpus may enter this state + (possibly 'active idling') if cpuidle decides not enter a cpuidle + idle-state. Default idle may not be used when cpuidle is enabled. + In this case, it should just be a copy of the first cpuidle idle-state. + +nr_idle_states: + Number of idle states in idle_states list. + +There are no unit requirements for the energy cost data. Data can be normalized +with any reference, however, the normalization must be consistent across all +energy cost data. That is, one bogo-joule/watt must be the same quantity for +data, but we don't care what it is. + +A recipe for platform characterization +======================================= + +Obtaining the actual model data for a particular platform requires some way of +measuring power/energy. There isn't a tool to help with this (yet). This +section provides a recipe for use as reference. It covers the steps used to +characterize the ARM TC2 development platform. This sort of measurements is +expected to be done anyway when tuning cpuidle and cpufreq for a given +platform. + +The energy model needs two types of data (struct sched_group_energy holds +these) for each sched_group where energy costs should be taken into account: + +1. Capacity state information + +A list containing the compute capacity and power consumption when fully +utilized attributed to the group as a whole for each available capacity state. +At the lowest level (group contains just a single cpu) this is the power of the +cpu alone without including power consumed by resources shared with other cpus. +It basically needs to fit the basic modelling approach described in "Background +and Terminology" section: + + energy_system = energy_shared + n * energy_cpu + +for a system containing 'n' busy cpus. Only 'energy_cpu' should be included at +the lowest level. 'energy_shared' is included at the next level which +represents the group of cpus among which the resources are shared. + +This model is, of course, a simplification of reality. Thus, power/energy +attributions might not always exactly represent how the hardware is designed. +Also, busy power is likely to depend on the workload. It is therefore +recommended to use a representative mix of workloads when characterizing the +capacity states. + +If the group has no capacity scaling support, the list will contain a single +state where power is the busy power attributed to the group. The capacity +should be set to a default value (1024). + +When frequency domains include multiple power domains, the group representing +the frequency domain and all child groups share capacity states. This must be +indicated by setting the SD_SHARE_CAP_STATES sched_domain flag. All groups at +all levels that share the capacity state must have the list of capacity states +with the power set to the contribution of the individual group. + +2. Idle power information + +Stored in the idle_states list. The power number is the group idle power +consumption in each idle state as well when the group is idle but has not +entered an idle-state ('active idle' as mentioned earlier). Due to the way the +energy model is defined, the idle power of the deepest group idle state can +alternatively be accounted for in the parent group busy power. In that case the +group idle state power values are offset such that the idle power of the +deepest state is zero. It is less intuitive, but it is easier to measure as +idle power consumed by the group and the busy/idle power of the parent group +cannot be distinguished without per group measurement points. + +Measuring capacity states and idle power: + +The capacity states' capacity and power can be estimated by running a benchmark +workload at each available capacity state. By restricting the benchmark to run +on subsets of cpus it is possible to extrapolate the power consumption of +shared resources. + +ARM TC2 has two clusters of two and three cpus respectively. Each cluster has a +shared L2 cache. TC2 has on-chip energy counters per cluster. Running a +benchmark workload on just one cpu in a cluster means that power is consumed in +the cluster (higher level group) and a single cpu (lowest level group). Adding +another benchmark task to another cpu increases the power consumption by the +amount consumed by the additional cpu. Hence, it is possible to extrapolate the +cluster busy power. + +For platforms that don't have energy counters or equivalent instrumentation +built-in, it may be possible to use an external DAQ to acquire similar data. + +If the benchmark includes some performance score (for example sysbench cpu +benchmark), this can be used to record the compute capacity. + +Measuring idle power requires insight into the idle state implementation on the +particular platform. Specifically, if the platform has coupled idle-states (or +package states). To measure non-coupled per-cpu idle-states it is necessary to +keep one cpu busy to keep any shared resources alive to isolate the idle power +of the cpu from idle/busy power of the shared resources. The cpu can be tricked +into different per-cpu idle states by disabling the other states. Based on +various combinations of measurements with specific cpus busy and disabling +idle-states it is possible to extrapolate the idle-state power. diff --git a/Documentation/scheduler/sched-tune.txt b/Documentation/scheduler/sched-tune.txt new file mode 100644 index 000000000000..5df0ea361311 --- /dev/null +++ b/Documentation/scheduler/sched-tune.txt @@ -0,0 +1,413 @@ + Central, scheduler-driven, power-performance control + (EXPERIMENTAL) + +Abstract +======== + +The topic of a single simple power-performance tunable, that is wholly +scheduler centric, and has well defined and predictable properties has come up +on several occasions in the past [1,2]. With techniques such as a scheduler +driven DVFS [3], we now have a good framework for implementing such a tunable. +This document describes the overall ideas behind its design and implementation. + + +Table of Contents +================= + +1. Motivation +2. Introduction +3. Signal Boosting Strategy +4. OPP selection using boosted CPU utilization +5. Per task group boosting +6. Per-task wakeup-placement-strategy Selection +7. Question and Answers + - What about "auto" mode? + - What about boosting on a congested system? + - How CPUs are boosted when we have tasks with multiple boost values? +8. References + + +1. Motivation +============= + +Sched-DVFS [3] was a new event-driven cpufreq governor which allows the +scheduler to select the optimal DVFS operating point (OPP) for running a task +allocated to a CPU. Later, the cpufreq maintainers introduced a similar +governor, schedutil. The introduction of schedutil also enables running +workloads at the most energy efficient OPPs. + +However, sometimes it may be desired to intentionally boost the performance of +a workload even if that could imply a reasonable increase in energy +consumption. For example, in order to reduce the response time of a task, we +may want to run the task at a higher OPP than the one that is actually required +by it's CPU bandwidth demand. + +This last requirement is especially important if we consider that one of the +main goals of the utilization-driven governor component is to replace all +currently available CPUFreq policies. Since sched-DVFS and schedutil are event +based, as opposed to the sampling driven governors we currently have, they are +already more responsive at selecting the optimal OPP to run tasks allocated to +a CPU. However, just tracking the actual task load demand may not be enough +from a performance standpoint. For example, it is not possible to get +behaviors similar to those provided by the "performance" and "interactive" +CPUFreq governors. + +This document describes an implementation of a tunable, stacked on top of the +utilization-driven governors which extends their functionality to support task +performance boosting. + +By "performance boosting" we mean the reduction of the time required to +complete a task activation, i.e. the time elapsed from a task wakeup to its +next deactivation (e.g. because it goes back to sleep or it terminates). For +example, if we consider a simple periodic task which executes the same workload +for 5[s] every 20[s] while running at a certain OPP, a boosted execution of +that task must complete each of its activations in less than 5[s]. + +A previous attempt [5] to introduce such a boosting feature has not been +successful mainly because of the complexity of the proposed solution. Previous +versions of the approach described in this document exposed a single simple +interface to user-space. This single tunable knob allowed the tuning of +system wide scheduler behaviours ranging from energy efficiency at one end +through to incremental performance boosting at the other end. This first +tunable affects all tasks. However, that is not useful for Android products +so in this version only a more advanced extension of the concept is provided +which uses CGroups to boost the performance of only selected tasks while using +the energy efficient default for all others. + +The rest of this document introduces in more details the proposed solution +which has been named SchedTune. + + +2. Introduction +=============== + +SchedTune exposes a simple user-space interface provided through a new +CGroup controller 'stune' which provides two power-performance tunables +per group: + + //schedtune.prefer_idle + //schedtune.boost + +The CGroup implementation permits arbitrary user-space defined task +classification to tune the scheduler for different goals depending on the +specific nature of the task, e.g. background vs interactive vs low-priority. + +More details are given in section 5. + +2.1 Boosting +============ + +The boost value is expressed as an integer in the range [-100..0..100]. + +A value of 0 (default) configures the CFS scheduler for maximum energy +efficiency. This means that sched-DVFS runs the tasks at the minimum OPP +required to satisfy their workload demand. + +A value of 100 configures scheduler for maximum performance, which translates +to the selection of the maximum OPP on that CPU. + +A value of -100 configures scheduler for minimum performance, which translates +to the selection of the minimum OPP on that CPU. + +The range between -100, 0 and 100 can be set to satisfy other scenarios suitably. +For example to satisfy interactive response or depending on other system events +(battery level etc). + +The overall design of the SchedTune module is built on top of "Per-Entity Load +Tracking" (PELT) signals and sched-DVFS by introducing a bias on the Operating +Performance Point (OPP) selection. + +Each time a task is allocated on a CPU, cpufreq is given the opportunity to tune +the operating frequency of that CPU to better match the workload demand. The +selection of the actual OPP being activated is influenced by the boost value +for the task CGroup. + +This simple biasing approach leverages existing frameworks, which means minimal +modifications to the scheduler, and yet it allows to achieve a range of +different behaviours all from a single simple tunable knob. + +In EAS schedulers, we use boosted task and CPU utilization for energy +calculation and energy-aware task placement. + +2.2 prefer_idle +=============== + +This is a flag which indicates to the scheduler that userspace would like +the scheduler to focus on energy or to focus on performance. + +A value of 0 (default) signals to the CFS scheduler that tasks in this group +can be placed according to the energy-aware wakeup strategy. + +A value of 1 signals to the CFS scheduler that tasks in this group should be +placed to minimise wakeup latency. + +The value is combined with the boost value - task placement will not be +boost aware however CPU OPP selection is still boost aware. + +Android platforms typically use this flag for application tasks which the +user is currently interacting with. + + +3. Signal Boosting Strategy +=========================== + +The whole PELT machinery works based on the value of a few load tracking signals +which basically track the CPU bandwidth requirements for tasks and the capacity +of CPUs. The basic idea behind the SchedTune knob is to artificially inflate +some of these load tracking signals to make a task or RQ appears more demanding +that it actually is. + +Which signals have to be inflated depends on the specific "consumer". However, +independently from the specific (signal, consumer) pair, it is important to +define a simple and possibly consistent strategy for the concept of boosting a +signal. + +A boosting strategy defines how the "abstract" user-space defined +sched_cfs_boost value is translated into an internal "margin" value to be added +to a signal to get its inflated value: + + margin := boosting_strategy(sched_cfs_boost, signal) + boosted_signal := signal + margin + +Different boosting strategies were identified and analyzed before selecting the +one found to be most effective. + +Signal Proportional Compensation (SPC) +-------------------------------------- + +In this boosting strategy the sched_cfs_boost value is used to compute a +margin which is proportional to the complement of the original signal. +When a signal has a maximum possible value, its complement is defined as +the delta from the actual value and its possible maximum. + +Since the tunable implementation uses signals which have SCHED_LOAD_SCALE as +the maximum possible value, the margin becomes: + + margin := sched_cfs_boost * (SCHED_LOAD_SCALE - signal) + +Using this boosting strategy: +- a 100% sched_cfs_boost means that the signal is scaled to the maximum value +- each value in the range of sched_cfs_boost effectively inflates the signal in + question by a quantity which is proportional to the maximum value. + +For example, by applying the SPC boosting strategy to the selection of the OPP +to run a task it is possible to achieve these behaviors: + +- 0% boosting: run the task at the minimum OPP required by its workload +- 100% boosting: run the task at the maximum OPP available for the CPU +- 50% boosting: run at the half-way OPP between minimum and maximum + +Which means that, at 50% boosting, a task will be scheduled to run at half of +the maximum theoretically achievable performance on the specific target +platform. + +A graphical representation of an SPC boosted signal is represented in the +following figure where: + a) "-" represents the original signal + b) "b" represents a 50% boosted signal + c) "p" represents a 100% boosted signal + + + ^ + | SCHED_LOAD_SCALE + +-----------------------------------------------------------------+ + |pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp + | + | boosted_signal + | bbbbbbbbbbbbbbbbbbbbbbbb + | + | original signal + | bbbbbbbbbbbbbbbbbbbbbbbb+----------------------+ + | | + |bbbbbbbbbbbbbbbbbb | + | | + | | + | | + | +-----------------------+ + | | + | | + | | + |------------------+ + | + | + +-----------------------------------------------------------------------> + +The plot above shows a ramped load signal (titled 'original_signal') and it's +boosted equivalent. For each step of the original signal the boosted signal +corresponding to a 50% boost is midway from the original signal and the upper +bound. Boosting by 100% generates a boosted signal which is always saturated to +the upper bound. + + +4. OPP selection using boosted CPU utilization +============================================== + +It is worth calling out that the implementation does not introduce any new load +signals. Instead, it provides an API to tune existing signals. This tuning is +done on demand and only in scheduler code paths where it is sensible to do so. +The new API calls are defined to return either the default signal or a boosted +one, depending on the value of sched_cfs_boost. This is a clean an non invasive +modification of the existing existing code paths. + +The signal representing a CPU's utilization is boosted according to the +previously described SPC boosting strategy. To sched-DVFS, this allows a CPU +(ie CFS run-queue) to appear more used then it actually is. + +Thus, with the sched_cfs_boost enabled we have the following main functions to +get the current utilization of a CPU: + + cpu_util() + boosted_cpu_util() + +The new boosted_cpu_util() is similar to the first but returns a boosted +utilization signal which is a function of the sched_cfs_boost value. + +This function is used in the CFS scheduler code paths where sched-DVFS needs to +decide the OPP to run a CPU at. +For example, this allows selecting the highest OPP for a CPU which has +the boost value set to 100%. + + +5. Per task group boosting +========================== + +On battery powered devices there usually are many background services which are +long running and need energy efficient scheduling. On the other hand, some +applications are more performance sensitive and require an interactive +response and/or maximum performance, regardless of the energy cost. + +To better service such scenarios, the SchedTune implementation has an extension +that provides a more fine grained boosting interface. + +A new CGroup controller, namely "schedtune", can be enabled which allows to +defined and configure task groups with different boosting values. +Tasks that require special performance can be put into separate CGroups. +The value of the boost associated with the tasks in this group can be specified +using a single knob exposed by the CGroup controller: + + schedtune.boost + +This knob allows the definition of a boost value that is to be used for +SPC boosting of all tasks attached to this group. + +The current schedtune controller implementation is really simple and has these +main characteristics: + + 1) It is only possible to create 1 level depth hierarchies + + The root control groups define the system-wide boost value to be applied + by default to all tasks. Its direct subgroups are named "boost groups" and + they define the boost value for specific set of tasks. + Further nested subgroups are not allowed since they do not have a sensible + meaning from a user-space standpoint. + + 2) It is possible to define only a limited number of "boost groups" + + This number is defined at compile time and by default configured to 16. + This is a design decision motivated by two main reasons: + a) In a real system we do not expect utilization scenarios with more then few + boost groups. For example, a reasonable collection of groups could be + just "background", "interactive" and "performance". + b) It simplifies the implementation considerably, especially for the code + which has to compute the per CPU boosting once there are multiple + RUNNABLE tasks with different boost values. + +Such a simple design should allow servicing the main utilization scenarios identified +so far. It provides a simple interface which can be used to manage the +power-performance of all tasks or only selected tasks. +Moreover, this interface can be easily integrated by user-space run-times (e.g. +Android, ChromeOS) to implement a QoS solution for task boosting based on tasks +classification, which has been a long standing requirement. + +Setup and usage +--------------- + +0. Use a kernel with CONFIG_SCHED_TUNE support enabled + +1. Check that the "schedtune" CGroup controller is available: + + root@linaro-nano:~# cat /proc/cgroups + #subsys_name hierarchy num_cgroups enabled + cpuset 0 1 1 + cpu 0 1 1 + schedtune 0 1 1 + +2. Mount a tmpfs to create the CGroups mount point (Optional) + + root@linaro-nano:~# sudo mount -t tmpfs cgroups /sys/fs/cgroup + +3. Mount the "schedtune" controller + + root@linaro-nano:~# mkdir /sys/fs/cgroup/stune + root@linaro-nano:~# sudo mount -t cgroup -o schedtune stune /sys/fs/cgroup/stune + +4. Create task groups and configure their specific boost value (Optional) + + For example here we create a "performance" boost group configure to boost + all its tasks to 100% + + root@linaro-nano:~# mkdir /sys/fs/cgroup/stune/performance + root@linaro-nano:~# echo 100 > /sys/fs/cgroup/stune/performance/schedtune.boost + +5. Move tasks into the boost group + + For example, the following moves the tasks with PID $TASKPID (and all its + threads) into the "performance" boost group. + + root@linaro-nano:~# echo "TASKPID > /sys/fs/cgroup/stune/performance/cgroup.procs + +This simple configuration allows only the threads of the $TASKPID task to run, +when needed, at the highest OPP in the most capable CPU of the system. + + +6. Per-task wakeup-placement-strategy Selection +=============================================== + +Many devices have a number of CFS tasks in use which require an absolute +minimum wakeup latency, and many tasks for which wakeup latency is not +important. + +For touch-driven environments, removing additional wakeup latency can be +critical. + +When you use the Schedtume CGroup controller, you have access to a second +parameter which allows a group to be marked such that energy_aware task +placement is bypassed for tasks belonging to that group. + +prefer_idle=0 (default - use energy-aware task placement if available) +prefer_idle=1 (never use energy-aware task placement for these tasks) + +Since the regular wakeup task placement algorithm in CFS is biased for +performance, this has the effect of restoring minimum wakeup latency +for the desired tasks whilst still allowing energy-aware wakeup placement +to save energy for other tasks. + + +7. Question and Answers +======================= + +What about "auto" mode? +----------------------- + +The 'auto' mode as described in [5] can be implemented by interfacing SchedTune +with some suitable user-space element. This element could use the exposed +system-wide or cgroup based interface. + +How are multiple groups of tasks with different boost values managed? +--------------------------------------------------------------------- + +The current SchedTune implementation keeps track of the boosted RUNNABLE tasks +on a CPU. The CPU utilization seen by the scheduler-driven cpufreq governors +(and used to select an appropriate OPP) is boosted with a value which is the +maximum of the boost values of the currently RUNNABLE tasks in its RQ. + +This allows cpufreq to boost a CPU only while there are boosted tasks ready +to run and switch back to the energy efficient mode as soon as the last boosted +task is dequeued. + + +8. References +============= +[1] http://lwn.net/Articles/552889 +[2] http://lkml.org/lkml/2012/5/18/91 +[3] http://lkml.org/lkml/2015/6/26/620 diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt new file mode 100644 index 000000000000..e9e6cbae2841 --- /dev/null +++ b/Documentation/speculation.txt @@ -0,0 +1,90 @@ +This document explains potential effects of speculation, and how undesirable +effects can be mitigated portably using common APIs. + +=========== +Speculation +=========== + +To improve performance and minimize average latencies, many contemporary CPUs +employ speculative execution techniques such as branch prediction, performing +work which may be discarded at a later stage. + +Typically speculative execution cannot be observed from architectural state, +such as the contents of registers. However, in some cases it is possible to +observe its impact on microarchitectural state, such as the presence or +absence of data in caches. Such state may form side-channels which can be +observed to extract secret information. + +For example, in the presence of branch prediction, it is possible for bounds +checks to be ignored by code which is speculatively executed. Consider the +following code: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else + return array[index]; + } + +Which, on arm64, may be compiled to an assembly sequence such as: + + CMP , #MAX_ARRAY_ELEMS + B.LT less + MOV , #0 + RET + less: + LDR , [, ] + RET + +It is possible that a CPU mis-predicts the conditional branch, and +speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This +value will subsequently be discarded, but the speculated load may affect +microarchitectural state which can be subsequently measured. + +More complex sequences involving multiple dependent memory accesses may +result in sensitive information being leaked. Consider the following +code, building on the prior example: + + int load_dependent_arrays(int *arr1, int *arr2, int index) + { + int val1, val2, + + val1 = load_array(arr1, index); + val2 = load_array(arr2, val1); + + return val2; + } + +Under speculation, the first call to load_array() may return the value +of an out-of-bounds address, while the second call will influence +microarchitectural state dependent on this value. This may provide an +arbitrary read primitive. + +==================================== +Mitigating speculation side-channels +==================================== + +The kernel provides a generic API to ensure that bounds checks are +respected even under speculation. Architectures which are affected by +speculation-based side-channels are expected to implement these +primitives. + +The array_index_nospec() helper in can be used to +prevent information from being leaked via side-channels. + +A call to array_index_nospec(index, size) returns a sanitized index +value that is bounded to [0, size) even under cpu speculation +conditions. + +This can be used to protect the earlier load_array() example: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else { + index = array_index_nospec(index, MAX_ARRAY_ELEMS); + return array[index]; + } + } diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py index 39aa9e8697cc..fbedcc39460b 100644 --- a/Documentation/sphinx/kerneldoc.py +++ b/Documentation/sphinx/kerneldoc.py @@ -36,8 +36,7 @@ from docutils import nodes, statemachine from docutils.statemachine import ViewList -from docutils.parsers.rst import directives -from sphinx.util.compat import Directive +from docutils.parsers.rst import directives, Directive from sphinx.ext.autodoc import AutodocReporter __version__ = '1.0' diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 694968c7523c..b757d6eb365b 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -653,7 +653,8 @@ allowed to execute. perf_event_paranoid: Controls use of the performance events system by unprivileged -users (without CAP_SYS_ADMIN). The default value is 2. +users (without CAP_SYS_ADMIN). The default value is 3 if +CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 2 otherwise. -1: Allow use of (almost) all events by all users Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK @@ -661,6 +662,7 @@ users (without CAP_SYS_ADMIN). The default value is 2. Disallow raw tracepoint access by users without CAP_SYS_ADMIN >=1: Disallow CPU event access by users without CAP_SYS_ADMIN >=2: Disallow kernel profiling by users without CAP_SYS_ADMIN +>=3: Disallow all event access by users without CAP_SYS_ADMIN ============================================================== diff --git a/Documentation/trace/events-power.txt b/Documentation/trace/events-power.txt index 21d514ced212..4d817d5acc40 100644 --- a/Documentation/trace/events-power.txt +++ b/Documentation/trace/events-power.txt @@ -25,6 +25,7 @@ cpufreq. cpu_idle "state=%lu cpu_id=%lu" cpu_frequency "state=%lu cpu_id=%lu" +cpu_frequency_limits "min=%lu max=%lu cpu_id=%lu" A suspend event is used to indicate the system going in and out of the suspend mode: diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index d4601df6e72e..f2fcbb7a70c6 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -2407,6 +2407,35 @@ will produce: 1) 1.449 us | } +You can disable the hierarchical function call formatting and instead print a +flat list of function entry and return events. This uses the format described +in the Output Formatting section and respects all the trace options that +control that formatting. Hierarchical formatting is the default. + + hierachical: echo nofuncgraph-flat > trace_options + flat: echo funcgraph-flat > trace_options + + ie: + + # tracer: function_graph + # + # entries-in-buffer/entries-written: 68355/68355 #P:2 + # + # _-----=> irqs-off + # / _----=> need-resched + # | / _---=> hardirq/softirq + # || / _--=> preempt-depth + # ||| / delay + # TASK-PID CPU# |||| TIMESTAMP FUNCTION + # | | | |||| | | + sh-1806 [001] d... 198.843443: graph_ent: func=_raw_spin_lock + sh-1806 [001] d... 198.843445: graph_ent: func=__raw_spin_lock + sh-1806 [001] d..1 198.843447: graph_ret: func=__raw_spin_lock + sh-1806 [001] d..1 198.843449: graph_ret: func=_raw_spin_lock + sh-1806 [001] d..1 198.843451: graph_ent: func=_raw_spin_unlock_irqrestore + sh-1806 [001] d... 198.843453: graph_ret: func=_raw_spin_unlock_irqrestore + + You might find other useful features for this tracer in the following "dynamic ftrace" section such as tracing only specific functions or tasks. diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX index af0d23968ee7..9d616fca64bd 100644 --- a/Documentation/virtual/00-INDEX +++ b/Documentation/virtual/00-INDEX @@ -9,3 +9,6 @@ kvm/ - Kernel Virtual Machine. See also http://linux-kvm.org uml/ - User Mode Linux, builds/runs Linux kernel as a userspace program. + +acrn/ + - ACRN Project. See also http://xxxx diff --git a/Documentation/virtual/acrn/00-INDEX b/Documentation/virtual/acrn/00-INDEX new file mode 100644 index 000000000000..5beb50eef9e1 --- /dev/null +++ b/Documentation/virtual/acrn/00-INDEX @@ -0,0 +1,8 @@ +00-INDEX + - this file. +index.rst + - Index. +vhm.rst + - virtio and hypervisor service module (VHM) APIs. +vbs.rst + - virtio and backend service (VBS) APIs. diff --git a/Documentation/virtual/acrn/conf.py b/Documentation/virtual/acrn/conf.py new file mode 100644 index 000000000000..ed247df22700 --- /dev/null +++ b/Documentation/virtual/acrn/conf.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8; mode: python -*- + +project = "ACRN Project" + +tags.add("subproject") diff --git a/Documentation/virtual/acrn/index.rst b/Documentation/virtual/acrn/index.rst new file mode 100644 index 000000000000..3630d4fe3207 --- /dev/null +++ b/Documentation/virtual/acrn/index.rst @@ -0,0 +1,17 @@ +.. -*- coding: utf-8; mode: rst -*- + +============================= +ACRN Project +============================= + +.. toctree:: + + vbs.rst + vhm.rst + +.. only:: subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/virtual/acrn/vbs.rst b/Documentation/virtual/acrn/vbs.rst new file mode 100644 index 000000000000..40a0683a1c0b --- /dev/null +++ b/Documentation/virtual/acrn/vbs.rst @@ -0,0 +1,20 @@ +================================ +Virtio and Backend Service (VBS) +================================ + +The Virtio and Backend Service (VBS) in part of ACRN Project. + +The VBS can be further divided into two parts: VBS in user space (VBS-U) +and VBS in kernel space (VBS-K). + +Example: +-------- +A reference driver for VBS-K can be found at :c:type:`struct vbs_rng`. + +.. kernel-doc:: drivers/vbs/vbs_rng.c + +APIs: +----- + +.. kernel-doc:: include/linux/vbs/vbs.h +.. kernel-doc:: include/linux/vbs/vq.h diff --git a/Documentation/virtual/acrn/vhm.rst b/Documentation/virtual/acrn/vhm.rst new file mode 100644 index 000000000000..d3235eab9eb3 --- /dev/null +++ b/Documentation/virtual/acrn/vhm.rst @@ -0,0 +1,14 @@ +================================== +Virtio and Hypervisor Module (VHM) +================================== + +The Virtio and Hypervisor service Module (VHM) in part of +ACRN Project. + +APIs: +----- + +.. kernel-doc:: include/linux/vhm/acrn_vhm_ioreq.h +.. kernel-doc:: include/linux/vhm/acrn_vhm_mm.h +.. kernel-doc:: include/linux/vhm/vhm_ioctl_defs.h +.. kernel-doc:: include/linux/vhm/vhm_vm_mngt.h diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index 3c65feb83010..a81c97a4b4a5 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt @@ -54,6 +54,10 @@ KVM_FEATURE_PV_UNHALT || 7 || guest checks this feature bit || || before enabling paravirtualized || || spinlock support. ------------------------------------------------------------------------------ +KVM_FEATURE_ASYNC_PF_VMEXIT || 10 || paravirtualized async PF VM exit + || || can be enabled by setting bit 2 + || || when writing to msr 0x4b564d02 +------------------------------------------------------------------------------ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side || || per-cpu warps are expected in || || kvmclock. diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt index 1ebecc115dc6..f3f0d57ced8e 100644 --- a/Documentation/virtual/kvm/msr.txt +++ b/Documentation/virtual/kvm/msr.txt @@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02 when asynchronous page faults are enabled on the vcpu 0 when disabled. Bit 1 is 1 if asynchronous page faults can be injected when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults - are delivered to L1 as #PF vmexits. + are delivered to L1 as #PF vmexits. Bit 2 can be set only if + KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID. First 4 byte of 64 byte memory location will be written to by the hypervisor at the time of asynchronous page fault (APF) diff --git a/Documentation/x86/orc-unwinder.txt b/Documentation/x86/orc-unwinder.txt index af0c9a4c65a6..cd4b29be29af 100644 --- a/Documentation/x86/orc-unwinder.txt +++ b/Documentation/x86/orc-unwinder.txt @@ -4,7 +4,7 @@ ORC unwinder Overview -------- -The kernel CONFIG_ORC_UNWINDER option enables the ORC unwinder, which is +The kernel CONFIG_UNWINDER_ORC option enables the ORC unwinder, which is similar in concept to a DWARF unwinder. The difference is that the format of the ORC data is much simpler than DWARF, which in turn allows the ORC unwinder to be much simpler and faster. diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt new file mode 100644 index 000000000000..5cd58439ad2d --- /dev/null +++ b/Documentation/x86/pti.txt @@ -0,0 +1,186 @@ +Overview +======== + +Page Table Isolation (pti, previously known as KAISER[1]) is a +countermeasure against attacks on the shared user/kernel address +space such as the "Meltdown" approach[2]. + +To mitigate this class of attacks, we create an independent set of +page tables for use only when running userspace applications. When +the kernel is entered via syscalls, interrupts or exceptions, the +page tables are switched to the full "kernel" copy. When the system +switches back to user mode, the user copy is used again. + +The userspace page tables contain only a minimal amount of kernel +data: only what is needed to enter/exit the kernel such as the +entry/exit functions themselves and the interrupt descriptor table +(IDT). There are a few strictly unnecessary things that get mapped +such as the first C function when entering an interrupt (see +comments in pti.c). + +This approach helps to ensure that side-channel attacks leveraging +the paging structures do not function when PTI is enabled. It can be +enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time. +Once enabled at compile-time, it can be disabled at boot with the +'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt). + +Page Table Management +===================== + +When PTI is enabled, the kernel manages two sets of page tables. +The first set is very similar to the single set which is present in +kernels without PTI. This includes a complete mapping of userspace +that the kernel can use for things like copy_to_user(). + +Although _complete_, the user portion of the kernel page tables is +crippled by setting the NX bit in the top level. This ensures +that any missed kernel->user CR3 switch will immediately crash +userspace upon executing its first instruction. + +The userspace page tables map only the kernel data needed to enter +and exit the kernel. This data is entirely contained in the 'struct +cpu_entry_area' structure which is placed in the fixmap which gives +each CPU's copy of the area a compile-time-fixed virtual address. + +For new userspace mappings, the kernel makes the entries in its +page tables like normal. The only difference is when the kernel +makes entries in the top (PGD) level. In addition to setting the +entry in the main kernel PGD, a copy of the entry is made in the +userspace page tables' PGD. + +This sharing at the PGD level also inherently shares all the lower +layers of the page tables. This leaves a single, shared set of +userspace page tables to manage. One PTE to lock, one set of +accessed bits, dirty bits, etc... + +Overhead +======== + +Protection against side-channel attacks is important. But, +this protection comes at a cost: + +1. Increased Memory Use + a. Each process now needs an order-1 PGD instead of order-0. + (Consumes an additional 4k per process). + b. The 'cpu_entry_area' structure must be 2MB in size and 2MB + aligned so that it can be mapped by setting a single PMD + entry. This consumes nearly 2MB of RAM once the kernel + is decompressed, but no space in the kernel image itself. + +2. Runtime Cost + a. CR3 manipulation to switch between the page table copies + must be done at interrupt, syscall, and exception entry + and exit (it can be skipped when the kernel is interrupted, + though.) Moves to CR3 are on the order of a hundred + cycles, and are required at every entry and exit. + b. A "trampoline" must be used for SYSCALL entry. This + trampoline depends on a smaller set of resources than the + non-PTI SYSCALL entry code, so requires mapping fewer + things into the userspace page tables. The downside is + that stacks must be switched at entry time. + c. Global pages are disabled for all kernel structures not + mapped into both kernel and userspace page tables. This + feature of the MMU allows different processes to share TLB + entries mapping the kernel. Losing the feature means more + TLB misses after a context switch. The actual loss of + performance is very small, however, never exceeding 1%. + d. Process Context IDentifiers (PCID) is a CPU feature that + allows us to skip flushing the entire TLB when switching page + tables by setting a special bit in CR3 when the page tables + are changed. This makes switching the page tables (at context + switch, or kernel entry/exit) cheaper. But, on systems with + PCID support, the context switch code must flush both the user + and kernel entries out of the TLB. The user PCID TLB flush is + deferred until the exit to userspace, minimizing the cost. + See intel.com/sdm for the gory PCID/INVPCID details. + e. The userspace page tables must be populated for each new + process. Even without PTI, the shared kernel mappings + are created by copying top-level (PGD) entries into each + new process. But, with PTI, there are now *two* kernel + mappings: one in the kernel page tables that maps everything + and one for the entry/exit structures. At fork(), we need to + copy both. + f. In addition to the fork()-time copying, there must also + be an update to the userspace PGD any time a set_pgd() is done + on a PGD used to map userspace. This ensures that the kernel + and userspace copies always map the same userspace + memory. + g. On systems without PCID support, each CR3 write flushes + the entire TLB. That means that each syscall, interrupt + or exception flushes the TLB. + h. INVPCID is a TLB-flushing instruction which allows flushing + of TLB entries for non-current PCIDs. Some systems support + PCIDs, but do not support INVPCID. On these systems, addresses + can only be flushed from the TLB for the current PCID. When + flushing a kernel address, we need to flush all PCIDs, so a + single kernel address flush will require a TLB-flushing CR3 + write upon the next use of every PCID. + +Possible Future Work +==================== +1. We can be more careful about not actually writing to CR3 + unless its value is actually changed. +2. Allow PTI to be enabled/disabled at runtime in addition to the + boot-time switching. + +Testing +======== + +To test stability of PTI, the following test procedure is recommended, +ideally doing all of these in parallel: + +1. Set CONFIG_DEBUG_ENTRY=y +2. Run several copies of all of the tools/testing/selftests/x86/ tests + (excluding MPX and protection_keys) in a loop on multiple CPUs for + several minutes. These tests frequently uncover corner cases in the + kernel entry code. In general, old kernels might cause these tests + themselves to crash, but they should never crash the kernel. +3. Run the 'perf' tool in a mode (top or record) that generates many + frequent performance monitoring non-maskable interrupts (see "NMI" + in /proc/interrupts). This exercises the NMI entry/exit code which + is known to trigger bugs in code paths that did not expect to be + interrupted, including nested NMIs. Using "-c" boosts the rate of + NMIs, and using two -c with separate counters encourages nested NMIs + and less deterministic behavior. + + while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done + +4. Launch a KVM virtual machine. +5. Run 32-bit binaries on systems supporting the SYSCALL instruction. + This has been a lightly-tested code path and needs extra scrutiny. + +Debugging +========= + +Bugs in PTI cause a few different signatures of crashes +that are worth noting here. + + * Failures of the selftests/x86 code. Usually a bug in one of the + more obscure corners of entry_64.S + * Crashes in early boot, especially around CPU bringup. Bugs + in the trampoline code or mappings cause these. + * Crashes at the first interrupt. Caused by bugs in entry_64.S, + like screwing up a page table switch. Also caused by + incorrectly mapping the IRQ handler entry code. + * Crashes at the first NMI. The NMI code is separate from main + interrupt handlers and can have bugs that do not affect + normal interrupts. Also caused by incorrectly mapping NMI + code. NMIs that interrupt the entry code must be very + careful and can be the cause of crashes that show up when + running perf. + * Kernel crashes at the first exit to userspace. entry_64.S + bugs, or failing to map some of the exit code. + * Crashes at first interrupt that interrupts userspace. The paths + in entry_64.S that return to userspace are sometimes separate + from the ones that return to the kernel. + * Double faults: overflowing the kernel stack because of page + faults upon page faults. Caused by touching non-pti-mapped + data in the entry code, or forgetting to switch to kernel + CR3 before calling into C functions which are not pti-mapped. + * Userspace segfaults early in boot, sometimes manifesting + as mount(8) failing to mount the rootfs. These have + tended to be TLB invalidation issues. Usually invalidating + the wrong PCID, or otherwise missing an invalidation. + +1. https://gruss.cc/files/kaiser.pdf +2. https://meltdownattack.com/meltdown.pdf diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index b0798e281aa6..ea91cb61a602 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -1,6 +1,4 @@ - - Virtual memory map with 4 level page tables: 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm @@ -14,13 +12,17 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) ... unused hole ... + vaddr_end for KASLR +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping +fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable) -ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space (variable) +[fixmap start] - ffffffffff5fffff kernel-internal fixmap range +ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Virtual memory map with 5 level page tables: @@ -29,26 +31,31 @@ Virtual memory map with 5 level page tables: hole caused by [56:63] sign extension ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory -ff90000000000000 - ff91ffffffffffff (=49 bits) hole -ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space +ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI +ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB) ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) ... unused hole ... -ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB) +ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) +... unused hole ... + vaddr_end for KASLR +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping ... unused hole ... ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space -ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space +[fixmap start] - ffffffffff5fffff kernel-internal fixmap range +ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Architecture defines a 64-bit virtual address. Implementations can support less. Currently supported are 48- and 57-bit virtual addresses. Bits 63 -through to the most-significant implemented bit are set to either all ones -or all zero. This causes hole between user space and kernel addresses. +through to the most-significant implemented bit are sign extended. +This causes hole between user space and kernel addresses if you interpret them +as unsigned. The direct mapping covers all memory in the system up to the highest memory address (this means in some cases it can also include PCI memory @@ -58,19 +65,15 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of the processes using the page fault handler, with init_top_pgt as reference. -Current X86-64 implementations support up to 46 bits of address space (64 TB), -which is our current limit. This expands into MBZ space in the page tables. - We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual memory window (this size is arbitrary, it can be raised later if needed). The mappings are not part of any other kernel PGD and are only available during EFI runtime calls. -The module mapping space size changes based on the CONFIG requirements for the -following fixmap section. - Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all physical memory, vmalloc/ioremap space and virtual memory map are randomized. Their order is preserved but their base will be offset early at boot time. --Andi Kleen, Jul 2004 +Be very careful vs. KASLR when changing anything here. The KASLR address +range must not overlap with anything except the KASAN shadow area, which is +correct as KASAN disables KASLR. diff --git a/MAINTAINERS b/MAINTAINERS index 2811a211632c..5201db8bf7f1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4839,6 +4839,21 @@ S: Maintained F: drivers/media/usb/dvb-usb-v2/dvb_usb* F: drivers/media/usb/dvb-usb-v2/usb_urb.c +DVC_TRACE BUS DRIVER +M: Traian Schiau +S: Maintained +F: drivers/bus/dvctrace.c +F: include/linux/dvctrace.h +F: Documentation/ABI/testing/sysfs-bus-dvctrace + +DVC_TRACE USB_GADGET DRIVER +M: Traian Schiau +S: Maintained +F: drivers/usb/gadget/function/f_dvctrace.c +F: drivers/usb/gadget/function/u_dvctrace.h +F: include/linux/usb/debug.h +F: Documentation/ABI/testing/configfs-usb-gadget-dvctrace + DYNAMIC DEBUG M: Jason Baron S: Maintained @@ -7110,6 +7125,12 @@ S: Supported F: Documentation/trace/intel_th.txt F: drivers/hwtracing/intel_th/ +INTEL(R) TRACE HUB TO USB-DVC.TRACE +M: Traian Schiau +S: Supported +F: drivers/hwtracing/intel_th/msu-dvc.c +F: Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith + INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) M: Ning Sun L: tboot-devel@lists.sourceforge.net @@ -7670,16 +7691,6 @@ F: include/linux/kdb.h F: include/linux/kgdb.h F: kernel/debug/ -KMEMCHECK -M: Vegard Nossum -M: Pekka Enberg -S: Maintained -F: Documentation/dev-tools/kmemcheck.rst -F: arch/x86/include/asm/kmemcheck.h -F: arch/x86/mm/kmemcheck/ -F: include/linux/kmemcheck.h -F: mm/kmemcheck.c - KMEMLEAK M: Catalin Marinas S: Maintained @@ -9011,6 +9022,7 @@ MIPS GENERIC PLATFORM M: Paul Burton L: linux-mips@linux-mips.org S: Supported +F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt F: arch/mips/generic/ F: arch/mips/tools/generic-board-config.sh @@ -10054,7 +10066,7 @@ M: Stephen Boyd L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git -F: drivers/base/power/opp/ +F: drivers/opp/ F: include/linux/pm_opp.h F: Documentation/power/opp.txt F: Documentation/devicetree/bindings/opp/ @@ -11573,6 +11585,17 @@ F: include/net/rose.h F: include/uapi/linux/rose.h F: net/rose/ +RPMB SUBSYSTEM +M: Tomas Winkler +L: linux-kernel@vger.kernel.org +S: Supported +F: drivers/char/rpmb/* +F: include/uapi/linux/rpmb.h +F: include/linux/rpmb.h +F: Documentation/ABI/testing/sysfs-class-rpmb +F: Documentation/rpmb.rst +F: tools/rpmb/ + RTL2830 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org diff --git a/Makefile b/Makefile index ccd981892ef2..c6a9d5c603b6 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 0 +SUBLEVEL = 35 EXTRAVERSION = -NAME = Fearless Coyote +NAME = Petit Gorille # *DOCUMENTATION* # To see a list of typical targets execute "make help" @@ -11,6 +11,10 @@ NAME = Fearless Coyote # Comments in this file are targeted only to the developer, do not # expect to learn how to build the kernel reading this file. +# That's our default target when none is given on the command line +PHONY := _all +_all: + # o Do not use make's built-in rules and variables # (this increases performance and avoids hard-to-debug behaviour); # o Look for make include files relative to root of kernel src @@ -117,10 +121,6 @@ ifeq ("$(origin O)", "command line") KBUILD_OUTPUT := $(O) endif -# That's our default target when none is given on the command line -PHONY := _all -_all: - # Cancel implicit rules on top Makefile $(CURDIR)/Makefile Makefile: ; @@ -187,15 +187,6 @@ ifeq ("$(origin M)", "command line") KBUILD_EXTMOD := $(M) endif -# If building an external module we do not care about the all: rule -# but instead _all depend on modules -PHONY += all -ifeq ($(KBUILD_EXTMOD),) -_all: all -else -_all: modules -endif - ifeq ($(KBUILD_SRC),) # building in the source tree srctree := . @@ -207,6 +198,9 @@ else srctree := $(KBUILD_SRC) endif endif + +export KBUILD_CHECKSRC KBUILD_EXTMOD KBUILD_SRC + objtree := . src := $(srctree) obj := $(objtree) @@ -215,6 +209,74 @@ VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD)) export srctree objtree VPATH +# To make sure we do not include .config for any of the *config targets +# catch them early, and hand them over to scripts/kconfig/Makefile +# It is allowed to specify more targets when calling make, including +# mixing *config targets and build targets. +# For example 'make oldconfig all'. +# Detect when mixed targets is specified, and make a second invocation +# of make so .config is not included in this case either (for *config). + +version_h := include/generated/uapi/linux/version.h +old_version_h := include/linux/version.h + +no-dot-config-targets := clean mrproper distclean \ + cscope gtags TAGS tags help% %docs check% coccicheck \ + $(version_h) headers_% archheaders archscripts \ + kernelversion %src-pkg + +config-targets := 0 +mixed-targets := 0 +dot-config := 1 + +ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) + ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) + dot-config := 0 + endif +endif + +ifeq ($(KBUILD_EXTMOD),) + ifneq ($(filter config %config,$(MAKECMDGOALS)),) + config-targets := 1 + ifneq ($(words $(MAKECMDGOALS)),1) + mixed-targets := 1 + endif + endif +endif +# install and modules_install need also be processed one by one +ifneq ($(filter install,$(MAKECMDGOALS)),) + ifneq ($(filter modules_install,$(MAKECMDGOALS)),) + mixed-targets := 1 + endif +endif + +ifeq ($(mixed-targets),1) +# =========================================================================== +# We're called with mixed targets (*config and build targets). +# Handle them one by one. + +PHONY += $(MAKECMDGOALS) __build_one_by_one + +$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one + @: + +__build_one_by_one: + $(Q)set -e; \ + for i in $(MAKECMDGOALS); do \ + $(MAKE) -f $(srctree)/Makefile $$i; \ + done + +else + +# We need some generic definitions (do not try to remake the file). +scripts/Kbuild.include: ; +include scripts/Kbuild.include + +# Read KERNELRELEASE from include/config/kernel.release (if it exists) +KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) +KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) +export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION + # SUBARCH tells the usermode build what the underlying arch is. That is set # first, and if a usermode build is happening, the "ARCH=um" on the command # line overrides the setting of ARCH below. If a native build is happening, @@ -312,40 +374,6 @@ HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \ -Wno-missing-field-initializers -fno-delete-null-pointer-checks endif -# Decide whether to build built-in, modular, or both. -# Normally, just do built-in. - -KBUILD_MODULES := -KBUILD_BUILTIN := 1 - -# If we have only "make modules", don't compile built-in objects. -# When we're building modules with modversions, we need to consider -# the built-in objects during the descend as well, in order to -# make sure the checksums are up to date before we record them. - -ifeq ($(MAKECMDGOALS),modules) - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) -endif - -# If we have "make modules", compile modules -# in addition to whatever we do anyway. -# Just "make" or "make all" shall build modules as well - -ifneq ($(filter all _all modules,$(MAKECMDGOALS)),) - KBUILD_MODULES := 1 -endif - -ifeq ($(MAKECMDGOALS),) - KBUILD_MODULES := 1 -endif - -export KBUILD_MODULES KBUILD_BUILTIN -export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD - -# We need some generic definitions (do not try to remake the file). -scripts/Kbuild.include: ; -include scripts/Kbuild.include - # Make variables (CC, etc...) AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld @@ -373,9 +401,6 @@ LDFLAGS_MODULE = CFLAGS_KERNEL = AFLAGS_KERNEL = LDFLAGS_vmlinux = -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) -CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) - # Use USERINCLUDE when you must reference the UAPI directories only. USERINCLUDE := \ @@ -394,34 +419,28 @@ LINUXINCLUDE := \ -I$(objtree)/include \ $(USERINCLUDE) -KBUILD_CPPFLAGS := -D__KERNEL__ - +KBUILD_AFLAGS := -D__ASSEMBLY__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -fshort-wchar \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -std=gnu89 $(call cc-option,-fno-PIE) - - + -std=gnu89 +KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := -KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) KBUILD_AFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds +GCC_PLUGINS_CFLAGS := -# Read KERNELRELEASE from include/config/kernel.release (if it exists) -KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) -KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) - -export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL @@ -463,73 +482,23 @@ ifneq ($(KBUILD_SRC),) $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) endif -# Support for using generic headers in asm-generic -PHONY += asm-generic uapi-asm-generic -asm-generic: uapi-asm-generic - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ - src=asm obj=arch/$(SRCARCH)/include/generated/asm -uapi-asm-generic: - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ - src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm - -# To make sure we do not include .config for any of the *config targets -# catch them early, and hand them over to scripts/kconfig/Makefile -# It is allowed to specify more targets when calling make, including -# mixing *config targets and build targets. -# For example 'make oldconfig all'. -# Detect when mixed targets is specified, and make a second invocation -# of make so .config is not included in this case either (for *config). - -version_h := include/generated/uapi/linux/version.h -old_version_h := include/linux/version.h - -no-dot-config-targets := clean mrproper distclean \ - cscope gtags TAGS tags help% %docs check% coccicheck \ - $(version_h) headers_% archheaders archscripts \ - kernelversion %src-pkg - -config-targets := 0 -mixed-targets := 0 -dot-config := 1 - -ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) - ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) - dot-config := 0 - endif +ifeq ($(cc-name),clang) +ifneq ($(CROSS_COMPILE),) +CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) +GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) endif - -ifeq ($(KBUILD_EXTMOD),) - ifneq ($(filter config %config,$(MAKECMDGOALS)),) - config-targets := 1 - ifneq ($(words $(MAKECMDGOALS)),1) - mixed-targets := 1 - endif - endif +ifneq ($(GCC_TOOLCHAIN),) +CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) endif -# install and modules_install need also be processed one by one -ifneq ($(filter install,$(MAKECMDGOALS)),) - ifneq ($(filter modules_install,$(MAKECMDGOALS)),) - mixed-targets := 1 - endif +KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) +KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) endif -ifeq ($(mixed-targets),1) -# =========================================================================== -# We're called with mixed targets (*config and build targets). -# Handle them one by one. - -PHONY += $(MAKECMDGOALS) __build_one_by_one - -$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one - @: - -__build_one_by_one: - $(Q)set -e; \ - for i in $(MAKECMDGOALS); do \ - $(MAKE) -f $(srctree)/Makefile $$i; \ - done +RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register +RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk +RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) +export RETPOLINE_CFLAGS -else ifeq ($(config-targets),1) # =========================================================================== # *config targets only - make sure prerequisites are updated, and descend @@ -552,6 +521,44 @@ else # Build targets only - this includes vmlinux, arch specific targets, clean # targets and others. In general all targets except *config targets. +# If building an external module we do not care about the all: rule +# but instead _all depend on modules +PHONY += all +ifeq ($(KBUILD_EXTMOD),) +_all: all +else +_all: modules +endif + +# Decide whether to build built-in, modular, or both. +# Normally, just do built-in. + +KBUILD_MODULES := +KBUILD_BUILTIN := 1 + +# If we have only "make modules", don't compile built-in objects. +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. + +ifeq ($(MAKECMDGOALS),modules) + KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) +endif + +# If we have "make modules", compile modules +# in addition to whatever we do anyway. +# Just "make" or "make all" shall build modules as well + +ifneq ($(filter all _all modules,$(MAKECMDGOALS)),) + KBUILD_MODULES := 1 +endif + +ifeq ($(MAKECMDGOALS),) + KBUILD_MODULES := 1 +endif + +export KBUILD_MODULES KBUILD_BUILTIN + ifeq ($(KBUILD_EXTMOD),) # Additional helpers built in scripts/ # Carefully list dependencies so we do not try to build scripts twice @@ -622,6 +629,12 @@ endif # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux +KBUILD_CFLAGS += $(call cc-option,-fno-PIE) +KBUILD_AFLAGS += $(call cc-option,-fno-PIE) +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) +CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) +export CFLAGS_GCOV CFLAGS_KCOV + # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default # values of the respective KBUILD_* variables ARCH_CPPFLAGS := @@ -698,7 +711,8 @@ KBUILD_CFLAGS += $(stackp-flag) ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) -CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) +CLANG_TRIPLE ?= $(CROSS_COMPILE) +CLANG_TARGET := --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) endif ifneq ($(GCC_TOOLCHAIN),) @@ -711,6 +725,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) +KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier) # Quiet clang warning: comparison of unsigned expression < 0 is always false KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the @@ -801,6 +816,18 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) +# clang sets -fmerge-all-constants by default as optimization, but this +# is non-conforming behavior for C and in fact breaks the kernel, so we +# need to disable it here generally. +KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants) + +# for gcc -fno-merge-all-constants disables everything, but it is fine +# to have actual conforming behavior enabled. +KBUILD_CFLAGS += $(call cc-option,-fmerge-constants) + +# Make sure -fstack-check isn't enabled (like gentoo apparently did) +KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) + # conserve stack if available KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) @@ -934,8 +961,8 @@ ifdef CONFIG_STACK_VALIDATION ifeq ($(has_libelf),1) objtool_target := tools/objtool FORCE else - ifdef CONFIG_ORC_UNWINDER - $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") + ifdef CONFIG_UNWINDER_ORC + $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") else $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") endif @@ -1071,6 +1098,15 @@ prepare0: archprepare gcc-plugins # All the preparing.. prepare: prepare0 prepare-objtool +# Support for using generic headers in asm-generic +PHONY += asm-generic uapi-asm-generic +asm-generic: uapi-asm-generic + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ + src=asm obj=arch/$(SRCARCH)/include/generated/asm +uapi-asm-generic: + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ + src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm + PHONY += prepare-objtool prepare-objtool: $(objtool_target) diff --git a/arch/Kconfig b/arch/Kconfig index 057370a0ac4e..400b9e1b2f27 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -91,7 +91,7 @@ config STATIC_KEYS_SELFTEST config OPTPROBES def_bool y depends on KPROBES && HAVE_OPTPROBES - depends on !PREEMPT + select TASKS_RCU if PREEMPT config KPROBES_ON_FTRACE def_bool y diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index d2e4da93e68c..ca3322536f72 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -20,8 +20,8 @@ "3: .subsection 2\n" \ "4: br 1b\n" \ " .previous\n" \ - EXC(1b,3b,%1,$31) \ - EXC(2b,3b,%1,$31) \ + EXC(1b,3b,$31,%1) \ + EXC(2b,3b,$31,%1) \ : "=&r" (oldval), "=&r"(ret) \ : "r" (uaddr), "r"(oparg) \ : "memory") @@ -82,8 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "3: .subsection 2\n" "4: br 1b\n" " .previous\n" - EXC(1b,3b,%0,$31) - EXC(2b,3b,%0,$31) + EXC(1b,3b,$31,%0) + EXC(2b,3b,$31,%0) : "+r"(ret), "=&r"(prev), "=&r"(cmp) : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) : "memory"); diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c index 8e9a41966881..5476279329a6 100644 --- a/arch/alpha/kernel/console.c +++ b/arch/alpha/kernel/console.c @@ -21,6 +21,7 @@ struct pci_controller *pci_vga_hose; static struct resource alpha_vga = { .name = "alpha-vga+", + .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index ce3a675c0c4b..75a5c35a2067 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -964,8 +964,8 @@ static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return copy_to_user(o, &(struct timeval32){ - .tv_sec = o->tv_sec, - .tv_usec = o->tv_usec}, + .tv_sec = i->tv_sec, + .tv_usec = i->tv_usec}, sizeof(struct timeval32)); } diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h index 26231601630e..f332d88ffaff 100644 --- a/arch/alpha/kernel/pci_impl.h +++ b/arch/alpha/kernel/pci_impl.h @@ -144,7 +144,8 @@ struct pci_iommu_arena }; #if defined(CONFIG_ALPHA_SRM) && \ - (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA)) + (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \ + defined(CONFIG_ALPHA_AVANTI)) # define NEED_SRM_SAVE_RESTORE #else # undef NEED_SRM_SAVE_RESTORE diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 74bfb1f2d68e..3a885253f486 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -269,12 +269,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp, application calling fork. */ if (clone_flags & CLONE_SETTLS) childti->pcb.unique = regs->r20; + else + regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */ childti->pcb.usp = usp ?: rdusp(); *childregs = *regs; childregs->r0 = 0; childregs->r19 = 0; childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ - regs->r20 = 0; stack = ((struct switch_stack *) regs) - 1; *childstack = *stack; childstack->r26 = (unsigned long) ret_from_fork; diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index 37bd6d9b8eb9..a6bdc1da47ad 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -102,6 +102,15 @@ sio_pci_route(void) alpha_mv.sys.sio.route_tab); } +static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev) +{ + if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && + (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) + return false; + + return true; +} + static unsigned int __init sio_collect_irq_levels(void) { @@ -110,8 +119,7 @@ sio_collect_irq_levels(void) /* Iterate through the devices, collecting IRQ levels. */ for_each_pci_dev(dev) { - if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && - (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) + if (!sio_pci_dev_irq_needs_level(dev)) continue; if (dev->irq) @@ -120,8 +128,7 @@ sio_collect_irq_levels(void) return level_bits; } -static void __init -sio_fixup_irq_levels(unsigned int level_bits) +static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset) { unsigned int old_level_bits; @@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits) */ old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); - level_bits |= (old_level_bits & 0x71ff); + if (reset) + old_level_bits &= 0x71ff; + + level_bits |= old_level_bits; outb((level_bits >> 0) & 0xff, 0x4d0); outb((level_bits >> 8) & 0xff, 0x4d1); } +static inline void +sio_fixup_irq_levels(unsigned int level_bits) +{ + __sio_fixup_irq_levels(level_bits, true); +} + static inline int noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { @@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; int irq = COMMON_TABLE_LOOKUP, tmp; tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); - return irq >= 0 ? tmp : -1; + + irq = irq >= 0 ? tmp : -1; + + /* Fixup IRQ level if an actual IRQ mapping is detected */ + if (sio_pci_dev_irq_needs_level(dev) && irq >= 0) + __sio_fixup_irq_levels(1 << irq, false); + + return irq; } static inline int diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 4bd99a7b1c41..f43bd05dede2 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -160,11 +160,16 @@ void show_stack(struct task_struct *task, unsigned long *sp) for(i=0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; - if (i && ((i % 4) == 0)) - printk("\n "); - printk("%016lx ", *stack++); + if ((i % 4) == 0) { + if (i) + pr_cont("\n"); + printk(" "); + } else { + pr_cont(" "); + } + pr_cont("%016lx", *stack++); } - printk("\n"); + pr_cont("\n"); dik_show_trace(sp); } diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore index 5246969a20c5..c4c5fd529c25 100644 --- a/arch/arc/boot/.gitignore +++ b/arch/arc/boot/.gitignore @@ -1,2 +1 @@ -*.dtb* uImage diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index f35974ee7264..c9173c02081c 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) return 0; __asm__ __volatile__( + " mov lp_count, %5 \n" " lp 3f \n" "1: ldb.ab %3, [%2, 1] \n" " breq.d %3, 0, 3f \n" @@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .word 1b, 4b \n" " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) - : "g"(-EFAULT), "l"(count) - : "memory"); + : "g"(-EFAULT), "r"(count) + : "lp_count", "lp_start", "lp_end", "memory"); return res; } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d1346a160760..858638134bfa 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1826,6 +1826,15 @@ config XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. +config ARM_FLUSH_CONSOLE_ON_RESTART + bool "Force flush the console on restart" + help + If the console is locked while the system is rebooted, the messages + in the temporary logbuffer would not have propogated to all the + console drivers. This option forces the console lock to be + released if it failed to be acquired, which will cause all the + pending messages to be flushed. + endmenu menu "Boot options" @@ -1854,6 +1863,21 @@ config DEPRECATED_PARAM_STRUCT This was deprecated in 2001 and announced to live on for 5 years. Some old boot loaders still use this way. +config BUILD_ARM_APPENDED_DTB_IMAGE + bool "Build a concatenated zImage/dtb by default" + depends on OF + help + Enabling this option will cause a concatenated zImage and list of + DTBs to be built by default (instead of a standalone zImage.) + The image will built in arch/arm/boot/zImage-dtb + +config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES + string "Default dtb names" + depends on BUILD_ARM_APPENDED_DTB_IMAGE + help + Space separated list of names of dtbs to append when + building a concatenated zImage-dtb. + # Compressed boot loader in ROM. Yes, we really want to ask about # TEXT and BSS so we preserve their values in the config files. config ZBOOT_ROM_TEXT diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 36ae4454554c..bc805d702d82 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -303,6 +303,8 @@ libs-y := arch/arm/lib/ $(libs-y) boot := arch/arm/boot ifeq ($(CONFIG_XIP_KERNEL),y) KBUILD_IMAGE := $(boot)/xipImage +else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y) +KBUILD_IMAGE := $(boot)/zImage-dtb else KBUILD_IMAGE := $(boot)/zImage endif @@ -356,6 +358,9 @@ ifeq ($(CONFIG_VDSO),y) $(Q)$(MAKE) $(build)=arch/arm/vdso $@ endif +zImage-dtb: vmlinux scripts dtbs + $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ + # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore index 3c79f85975aa..ad7a0253ea96 100644 --- a/arch/arm/boot/.gitignore +++ b/arch/arm/boot/.gitignore @@ -4,3 +4,4 @@ xipImage bootpImage uImage *.dtb +zImage-dtb \ No newline at end of file diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 50f8d1be7fcb..da75630c440d 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -16,6 +16,7 @@ OBJCOPYFLAGS :=-O binary -R .comment -S ifneq ($(MACHINE),) include $(MACHINE)/Makefile.boot endif +include $(srctree)/arch/arm/boot/dts/Makefile # Note: the following conditions must always be true: # ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET) @@ -29,6 +30,14 @@ export ZRELADDR INITRD_PHYS PARAMS_PHYS targets := Image zImage xipImage bootpImage uImage +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST)) + ifeq ($(CONFIG_XIP_KERNEL),y) $(obj)/xipImage: vmlinux FORCE @@ -55,6 +64,10 @@ $(obj)/compressed/vmlinux: $(obj)/Image FORCE $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(call if_changed,objcopy) +$(obj)/zImage-dtb: $(obj)/zImage $(DTB_OBJS) FORCE + $(call if_changed,cat) + @echo ' Kernel: $@ is ready' + endif ifneq ($(LOADADDR),) diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 8a756870c238..5b9e2d4bc1b3 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -794,6 +794,8 @@ __armv7_mmu_cache_on: bic r6, r6, #1 << 31 @ 32-bit translation system bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer + mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs + mcr p15, 0, r0, c7, c5, 4 @ ISB mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcrne p15, 0, r6, c2, c0, 2 @ load ttb control #endif diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index eff87a344566..86e591cc2567 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -1074,5 +1074,15 @@ endif dtstree := $(srctree)/$(src) dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) -always := $(dtb-y) +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif + +targets += dtbs dtbs_install +targets += $(DTB_LIST) + +always := $(DTB_LIST) clean-files := *.dtb diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index e5b061469bf8..4714a59fd86d 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -927,7 +927,8 @@ reg = <0x48038000 0x2000>, <0x46000000 0x400000>; reg-names = "mpu", "dat"; - interrupts = <80>, <81>; + interrupts = , + ; interrupt-names = "tx", "rx"; status = "disabled"; dmas = <&edma 8 2>, @@ -941,7 +942,8 @@ reg = <0x4803C000 0x2000>, <0x46400000 0x400000>; reg-names = "mpu", "dat"; - interrupts = <82>, <83>; + interrupts = , + ; interrupt-names = "tx", "rx"; status = "disabled"; dmas = <&edma 10 2>, diff --git a/arch/arm/boot/dts/am437x-cm-t43.dts b/arch/arm/boot/dts/am437x-cm-t43.dts index 9e92d480576b..3b9a94c274a7 100644 --- a/arch/arm/boot/dts/am437x-cm-t43.dts +++ b/arch/arm/boot/dts/am437x-cm-t43.dts @@ -301,8 +301,8 @@ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&spi0_pins>; - dmas = <&edma 16 - &edma 17>; + dmas = <&edma 16 0 + &edma 17 0>; dma-names = "tx0", "rx0"; flash: w25q64cvzpig@0 { diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi index 7b8d90b7aeea..29b636fce23f 100644 --- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi +++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi @@ -150,11 +150,6 @@ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>; }; -&charlcd { - interrupt-parent = <&intc>; - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; -}; - &serial0 { interrupt-parent = <&intc>; interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts index f53e89d63477..c971cc93f42d 100644 --- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts +++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts @@ -16,7 +16,7 @@ bootargs = "console=ttyS4,115200 earlyprintk"; }; - memory { + memory@80000000 { reg = <0x80000000 0x20000000>; }; }; diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index dff66974feed..d5f5e92e7488 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -85,7 +85,7 @@ timer@20200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x20200 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; @@ -93,7 +93,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x20600 0x20>; interrupts = ; + IRQ_TYPE_EDGE_RISING)>; clocks = <&periph_clk>; }; diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts index 3bc50849d013..b8bde13de90a 100644 --- a/arch/arm/boot/dts/bcm958623hr.dts +++ b/arch/arm/boot/dts/bcm958623hr.dts @@ -141,10 +141,6 @@ status = "okay"; }; -&sata { - status = "okay"; -}; - &qspi { bspi-sel = <0>; flash: m25p80@0 { diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index d94d14b3c745..6a44b8021702 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts @@ -177,10 +177,6 @@ status = "okay"; }; -&sata { - status = "okay"; -}; - &srab { compatible = "brcm,bcm58625-srab", "brcm,nsp-srab"; status = "okay"; diff --git a/arch/arm/boot/dts/dra76-evm.dts b/arch/arm/boot/dts/dra76-evm.dts index b024a65c6e27..f64aab450315 100644 --- a/arch/arm/boot/dts/dra76-evm.dts +++ b/arch/arm/boot/dts/dra76-evm.dts @@ -148,6 +148,7 @@ compatible = "ti,tps65917"; reg = <0x58>; ti,system-power-controller; + ti,palmas-override-powerhold; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index bceb919ac637..65602cd51a4e 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -395,7 +395,7 @@ reg = <0>; vdd3-supply = <&lcd_vdd3_reg>; vci-supply = <&ldo25_reg>; - reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpf2 1 GPIO_ACTIVE_HIGH>; power-on-delay= <50>; reset-delay = <100>; init-delay = <100>; diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi index 7eab4bc07cec..7628bbb02324 100644 --- a/arch/arm/boot/dts/exynos5410.dtsi +++ b/arch/arm/boot/dts/exynos5410.dtsi @@ -333,7 +333,6 @@ &rtc { clocks = <&clock CLK_RTC>; clock-names = "rtc"; - interrupt-parent = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 8bf0d89cdd35..2e516f4985e4 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -433,15 +433,6 @@ clock-names = "ipg", "per"; }; - srtc: srtc@53fa4000 { - compatible = "fsl,imx53-rtc", "fsl,imx25-rtc"; - reg = <0x53fa4000 0x4000>; - interrupts = <24>; - interrupt-parent = <&tzic>; - clocks = <&clks IMX5_CLK_SRTC_GATE>; - clock-names = "ipg"; - }; - iomuxc: iomuxc@53fa8000 { compatible = "fsl,imx53-iomuxc"; reg = <0x53fa8000 0x4000>; diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts index cf2f5240e176..27cc913ca0f5 100644 --- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts +++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts @@ -53,7 +53,8 @@ }; pinctrl: pin-controller@10000 { - pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>; + pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header + &pmx_gpio_header_gpo>; pinctrl-names = "default"; pmx_uart0: pmx-uart0 { @@ -85,11 +86,16 @@ * ground. */ pmx_gpio_header: pmx-gpio-header { - marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28", + marvell,pins = "mpp17", "mpp29", "mpp28", "mpp35", "mpp34", "mpp40"; marvell,function = "gpio"; }; + pmx_gpio_header_gpo: pxm-gpio-header-gpo { + marvell,pins = "mpp7"; + marvell,function = "gpo"; + }; + pmx_gpio_init: pmx-init { marvell,pins = "mpp38"; marvell,function = "gpio"; diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts index 38faa90007d7..2fa5eb4bd402 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts @@ -72,7 +72,8 @@ }; &gpmc { - ranges = <1 0 0x08000000 0x1000000>; /* CS1: 16MB for LAN9221 */ + ranges = <0 0 0x30000000 0x1000000 /* CS0: 16MB for NAND */ + 1 0 0x2c000000 0x1000000>; /* CS1: 16MB for LAN9221 */ ethernet@gpmc { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index 26cce4d18405..e262fa9ef334 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi @@ -37,7 +37,7 @@ }; &gpmc { - ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */ + ranges = <0 0 0x30000000 0x1000000>; /* CS0: 16MB for NAND */ nand@0,0 { compatible = "ti,omap2-nand"; @@ -97,6 +97,8 @@ }; &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; clock-frequency = <2600000>; twl: twl@48 { @@ -121,7 +123,7 @@ &mmc3 { interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; - pinctrl-0 = <&mmc3_pins>; + pinctrl-0 = <&mmc3_pins &wl127x_gpio>; pinctrl-names = "default"; vmmc-supply = <&wl12xx_vmmc>; non-removable; @@ -132,8 +134,8 @@ wlcore: wlcore@2 { compatible = "ti,wl1273"; reg = <2>; - interrupt-parent = <&gpio5>; - interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */ + interrupt-parent = <&gpio1>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 2 */ ref-clock-frequency = <26000000>; }; }; @@ -157,8 +159,6 @@ OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */ OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */ OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */ - OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT_PULLUP | MUX_MODE4) /* mcbsp4_clkx.gpio_152 */ - OMAP3_CORE1_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */ OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */ OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */ >; @@ -217,7 +217,12 @@ >; }; - + i2c1_pins: pinmux_i2c1_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ + >; + }; }; &omap3_pmx_wkup { @@ -228,6 +233,12 @@ OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4) /* sys_boot2.gpio_4 */ >; }; + wl127x_gpio: pinmux_wl127x_gpio_pin { + pinctrl-single,pins = < + OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */ + OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */ + >; + }; }; &omap3_pmx_core2 { diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 6d89736c7b44..cf22b35f0a28 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -104,6 +104,8 @@ }; &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; clock-frequency = <2600000>; twl: twl@48 { @@ -211,6 +213,12 @@ OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ >; }; + i2c1_pins: pinmux_i2c1_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ + >; + }; }; &uart2 { diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts index 52b3ed10283a..e2bc731079be 100644 --- a/arch/arm/boot/dts/lpc3250-ea3250.dts +++ b/arch/arm/boot/dts/lpc3250-ea3250.dts @@ -156,8 +156,8 @@ uda1380: uda1380@18 { compatible = "nxp,uda1380"; reg = <0x18>; - power-gpio = <&gpio 0x59 0>; - reset-gpio = <&gpio 0x51 0>; + power-gpio = <&gpio 3 10 0>; + reset-gpio = <&gpio 3 2 0>; dac-clk = "wspll"; }; diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts index fd95e2b10357..b7bd3a110a8d 100644 --- a/arch/arm/boot/dts/lpc3250-phy3250.dts +++ b/arch/arm/boot/dts/lpc3250-phy3250.dts @@ -81,8 +81,8 @@ uda1380: uda1380@18 { compatible = "nxp,uda1380"; reg = <0x18>; - power-gpio = <&gpio 0x59 0>; - reset-gpio = <&gpio 0x51 0>; + power-gpio = <&gpio 3 10 0>; + reset-gpio = <&gpio 3 2 0>; dac-clk = "wspll"; }; diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts index 940875316d0f..67b4de0e3439 100644 --- a/arch/arm/boot/dts/ls1021a-qds.dts +++ b/arch/arm/boot/dts/ls1021a-qds.dts @@ -215,7 +215,7 @@ reg = <0x2a>; VDDA-supply = <®_3p3v>; VDDIO-supply = <®_3p3v>; - clocks = <&sys_mclk 1>; + clocks = <&sys_mclk>; }; }; }; diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts index a8b148ad1dd2..44715c8ef756 100644 --- a/arch/arm/boot/dts/ls1021a-twr.dts +++ b/arch/arm/boot/dts/ls1021a-twr.dts @@ -187,7 +187,7 @@ reg = <0x0a>; VDDA-supply = <®_3p3v>; VDDIO-supply = <®_3p3v>; - clocks = <&sys_mclk 1>; + clocks = <&sys_mclk>; }; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 9319e1f0f1d8..379b4a03cfe2 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -155,7 +155,7 @@ }; esdhc: esdhc@1560000 { - compatible = "fsl,esdhc"; + compatible = "fsl,ls1021a-esdhc", "fsl,esdhc"; reg = <0x0 0x1560000 0x0 0x10000>; interrupts = ; clock-frequency = <0>; diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi index afe12e5b51f9..f936000f0699 100644 --- a/arch/arm/boot/dts/mt2701.dtsi +++ b/arch/arm/boot/dts/mt2701.dtsi @@ -593,6 +593,7 @@ compatible = "mediatek,mt2701-hifsys", "syscon"; reg = <0 0x1a000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; usb0: usb@1a1c0000 { @@ -677,6 +678,7 @@ compatible = "mediatek,mt2701-ethsys", "syscon"; reg = <0 0x1b000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; eth: ethernet@1b100000 { diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index ec8a07415cb3..36983a7d7cfd 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -753,6 +753,7 @@ "syscon"; reg = <0 0x1b000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; eth: ethernet@1b100000 { diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts index 688a86378cee..7bf5aa2237c9 100644 --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts @@ -204,7 +204,7 @@ bus-width = <4>; max-frequency = <50000000>; cap-sd-highspeed; - cd-gpios = <&pio 261 0>; + cd-gpios = <&pio 261 GPIO_ACTIVE_LOW>; vmmc-supply = <&mt6323_vmch_reg>; vqmmc-supply = <&mt6323_vio18_reg>; }; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 64d00f5893a6..28d10abd8b04 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -354,7 +354,7 @@ elm: elm@48078000 { compatible = "ti,am3352-elm"; reg = <0x48078000 0x2000>; - interrupts = <4>; + interrupts = ; ti,hwmods = "elm"; status = "disabled"; }; @@ -861,14 +861,12 @@ usbhsohci: ohci@4a064800 { compatible = "ti,ohci-omap3"; reg = <0x4a064800 0x400>; - interrupt-parent = <&gic>; interrupts = ; }; usbhsehci: ehci@4a064c00 { compatible = "ti,ehci-omap"; reg = <0x4a064c00 0x400>; - interrupt-parent = <&gic>; interrupts = ; }; }; diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index 0ce0b278e1cb..25c3a10d669c 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts @@ -278,6 +278,12 @@ }; }; + cec_clock: cec-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <12000000>; + }; + hdmi-out { compatible = "hdmi-connector"; type = "a"; @@ -642,12 +648,6 @@ }; }; - cec_clock: cec-clock { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <12000000>; - }; - hdmi@39 { compatible = "adi,adv7511w"; reg = <0x39>; diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi index 99cfae875e12..5eae4776ffde 100644 --- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi +++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi @@ -110,26 +110,6 @@ }; }; -&cpu0 { - cpu0-supply = <&vdd_cpu>; - operating-points = < - /* KHz uV */ - 1800000 1400000 - 1608000 1350000 - 1512000 1300000 - 1416000 1200000 - 1200000 1100000 - 1008000 1050000 - 816000 1000000 - 696000 950000 - 600000 900000 - 408000 900000 - 312000 900000 - 216000 900000 - 126000 900000 - >; -}; - &emmc { status = "okay"; bus-width = <8>; diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi index 726c5d0dbd5b..b290a5abb901 100644 --- a/arch/arm/boot/dts/s5pv210.dtsi +++ b/arch/arm/boot/dts/s5pv210.dtsi @@ -463,6 +463,7 @@ compatible = "samsung,exynos4210-ohci"; reg = <0xec300000 0x100>; interrupts = <23>; + interrupt-parent = <&vic1>; clocks = <&clocks CLK_USB_HOST>; clock-names = "usbhost"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts index 84101e4eebbf..0f5f379323a8 100644 --- a/arch/arm/boot/dts/spear1310-evb.dts +++ b/arch/arm/boot/dts/spear1310-evb.dts @@ -349,7 +349,7 @@ spi0: spi@e0100000 { status = "okay"; num-cs = <3>; - cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>; + cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>; stmpe610@0 { compatible = "st,stmpe610"; diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index 5f347054527d..d4dbc4098653 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -142,8 +142,8 @@ reg = <0xb4100000 0x1000>; interrupts = <0 105 0x4>; status = "disabled"; - dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */ - <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */ + dmas = <&dwdma0 12 0 1>, + <&dwdma0 13 1 0>; dma-names = "tx", "rx"; }; diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 17ea0abcdbd7..086b4b333249 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -100,7 +100,7 @@ reg = <0xb2800000 0x1000>; interrupts = <0 29 0x4>; status = "disabled"; - dmas = <&dwdma0 0 0 0 0>; + dmas = <&dwdma0 0 0 0>; dma-names = "data"; }; @@ -290,8 +290,8 @@ #size-cells = <0>; interrupts = <0 31 0x4>; status = "disabled"; - dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */ - <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */ + dmas = <&dwdma0 4 0 0>, + <&dwdma0 5 0 0>; dma-names = "tx", "rx"; }; diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index 6b32d20acc9f..00166eb9be86 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi @@ -194,6 +194,7 @@ rtc: rtc@fc900000 { compatible = "st,spear600-rtc"; reg = <0xfc900000 0x1000>; + interrupt-parent = <&vic0>; interrupts = <10>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index 68aab50a73ab..733678b75b88 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -750,6 +750,7 @@ reg = <0x10120000 0x1000>; interrupt-names = "combined"; interrupts = <14>; + interrupt-parent = <&vica>; clocks = <&clcdclk>, <&hclkclcd>; clock-names = "clcdclk", "apb_pclk"; status = "disabled"; diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi index fa149837df14..11fdecd9312e 100644 --- a/arch/arm/boot/dts/stih407.dtsi +++ b/arch/arm/boot/dts/stih407.dtsi @@ -8,6 +8,7 @@ */ #include "stih407-clock.dtsi" #include "stih407-family.dtsi" +#include / { soc { sti-display-subsystem { @@ -122,7 +123,7 @@ <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; - hdmi,hpd-gpio = <&pio5 3>; + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; reset-names = "hdmi"; resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; ddc = <&hdmiddc>; diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi index 21fe72b183d8..96eed0dc08b8 100644 --- a/arch/arm/boot/dts/stih410.dtsi +++ b/arch/arm/boot/dts/stih410.dtsi @@ -9,6 +9,7 @@ #include "stih410-clock.dtsi" #include "stih407-family.dtsi" #include "stih410-pinctrl.dtsi" +#include / { aliases { bdisp0 = &bdisp0; @@ -213,7 +214,7 @@ <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; - hdmi,hpd-gpio = <&pio5 3>; + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; reset-names = "hdmi"; resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; ddc = <&hdmiddc>; diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts index 51e6f1d21c32..b2758dd8ce43 100644 --- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts +++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts @@ -42,7 +42,6 @@ /dts-v1/; #include "sun6i-a31s.dtsi" -#include "sunxi-common-regulators.dtsi" #include / { @@ -99,6 +98,7 @@ pinctrl-0 = <&gmac_pins_rgmii_a>, <&gmac_phy_reset_pin_bpi_m2>; phy = <&phy1>; phy-mode = "rgmii"; + phy-supply = <®_dldo1>; snps,reset-gpio = <&pio 0 21 GPIO_ACTIVE_HIGH>; /* PA21 */ snps,reset-active-low; snps,reset-delays-us = <0 10000 30000>; @@ -118,7 +118,7 @@ &mmc0 { pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_bpi_m2>; - vmmc-supply = <®_vcc3v0>; + vmmc-supply = <®_dcdc1>; bus-width = <4>; cd-gpios = <&pio 0 4 GPIO_ACTIVE_HIGH>; /* PA4 */ cd-inverted; @@ -132,7 +132,7 @@ &mmc2 { pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins_a>; - vmmc-supply = <®_vcc3v0>; + vmmc-supply = <®_aldo1>; mmc-pwrseq = <&mmc2_pwrseq>; bus-width = <4>; non-removable; @@ -163,6 +163,8 @@ reg = <0x68>; interrupt-parent = <&nmi_intc>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + eldoin-supply = <®_dcdc1>; + x-powers,drive-vbus-en; }; }; @@ -193,7 +195,28 @@ #include "axp22x.dtsi" +®_aldo1 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi"; +}; + +®_aldo2 { + regulator-always-on; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + regulator-name = "vcc-gmac"; +}; + +®_aldo3 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "avcc"; +}; + ®_dc5ldo { + regulator-always-on; regulator-min-microvolt = <700000>; regulator-max-microvolt = <1320000>; regulator-name = "vdd-cpus"; @@ -233,6 +256,40 @@ regulator-name = "vcc-dram"; }; +®_dldo1 { + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-mac"; +}; + +®_dldo2 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-name = "avdd-csi"; +}; + +®_dldo3 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-pb"; +}; + +®_eldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vdd-csi"; + status = "okay"; +}; + +®_ldo_io1 { + regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc-pm-cpus"; + status = "okay"; +}; + &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pins_a>; diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index a4c7713edfcd..c5c365f35baa 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -41,6 +41,7 @@ cci-control-port = <&cci_control1>; cpu-idle-states = <&CLUSTER_SLEEP_BIG>; capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_A15 &CLUSTER_COST_A15>; }; cpu1: cpu@1 { @@ -50,6 +51,7 @@ cci-control-port = <&cci_control1>; cpu-idle-states = <&CLUSTER_SLEEP_BIG>; capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_A15 &CLUSTER_COST_A15>; }; cpu2: cpu@2 { @@ -59,6 +61,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + sched-energy-costs = <&CPU_COST_A7 &CLUSTER_COST_A7>; }; cpu3: cpu@3 { @@ -68,6 +71,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + sched-energy-costs = <&CPU_COST_A7 &CLUSTER_COST_A7>; }; cpu4: cpu@4 { @@ -77,6 +81,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + sched-energy-costs = <&CPU_COST_A7 &CLUSTER_COST_A7>; }; idle-states { @@ -96,6 +101,77 @@ min-residency-us = <2500>; }; }; + + energy-costs { + CPU_COST_A15: core-cost0 { + busy-cost-data = < + 426 2021 + 512 2312 + 597 2756 + 682 3125 + 768 3524 + 853 3846 + 938 5177 + 1024 6997 + >; + idle-cost-data = < + 0 + 0 + 0 + >; + }; + CPU_COST_A7: core-cost1 { + busy-cost-data = < + 150 187 + 172 275 + 215 334 + 258 407 + 301 447 + 344 549 + 387 761 + 430 1024 + >; + idle-cost-data = < + 0 + 0 + 0 + >; + }; + CLUSTER_COST_A15: cluster-cost0 { + busy-cost-data = < + 426 7920 + 512 8165 + 597 8172 + 682 8195 + 768 8265 + 853 8446 + 938 11426 + 1024 15200 + >; + idle-cost-data = < + 70 + 70 + 25 + >; + }; + CLUSTER_COST_A7: cluster-cost1 { + busy-cost-data = < + 150 2967 + 172 2792 + 215 2810 + 258 2815 + 301 2919 + 344 2847 + 387 3917 + 430 4905 + >; + idle-cost-data = < + 25 + 25 + 10 + >; + }; + }; }; memory@80000000 { diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index e5ad0708849a..f07f47943bf3 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig @@ -15,3 +15,7 @@ config SHARP_PARAM config SHARP_SCOOP bool + +config FIQ_GLUE + bool + select FIQ diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 70b4a14ed993..10b506469c95 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -5,6 +5,7 @@ obj-y += firmware.o +obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o obj-$(CONFIG_SA1111) += sa1111.o obj-$(CONFIG_DMABOUNCE) += dmabounce.o obj-$(CONFIG_SHARP_LOCOMO) += locomo.o diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c index 4c10c6452678..f4dc1714a79e 100644 --- a/arch/arm/common/bL_switcher_dummy_if.c +++ b/arch/arm/common/bL_switcher_dummy_if.c @@ -57,3 +57,7 @@ static struct miscdevice bL_switcher_device = { &bL_switcher_fops }; module_misc_device(bL_switcher_device); + +MODULE_AUTHOR("Nicolas Pitre "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface"); diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S new file mode 100644 index 000000000000..24b42cec4813 --- /dev/null +++ b/arch/arm/common/fiq_glue.S @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + + .text + + .global fiq_glue_end + + /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */ + +ENTRY(fiq_glue) + /* store pc, cpsr from previous mode, reserve space for spsr */ + mrs r12, spsr + sub lr, lr, #4 + subs r10, #1 + bne nested_fiq + + str r12, [sp, #-8]! + str lr, [sp, #-4]! + + /* store r8-r14 from previous mode */ + sub sp, sp, #(7 * 4) + stmia sp, {r8-r14}^ + nop + + /* store r0-r7 from previous mode */ + stmfd sp!, {r0-r7} + + /* setup func(data,regs) arguments */ + mov r0, r9 + mov r1, sp + mov r3, r8 + + mov r7, sp + + /* Get sp and lr from non-user modes */ + and r4, r12, #MODE_MASK + cmp r4, #USR_MODE + beq fiq_from_usr_mode + + mov r7, sp + orr r4, r4, #(PSR_I_BIT | PSR_F_BIT) + msr cpsr_c, r4 + str sp, [r7, #(4 * 13)] + str lr, [r7, #(4 * 14)] + mrs r5, spsr + str r5, [r7, #(4 * 17)] + + cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + /* use fiq stack if we reenter this mode */ + subne sp, r7, #(4 * 3) + +fiq_from_usr_mode: + msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + mov r2, sp + sub sp, r7, #12 + stmfd sp!, {r2, ip, lr} + /* call func(data,regs) */ + blx r3 + ldmfd sp, {r2, ip, lr} + mov sp, r2 + + /* restore/discard saved state */ + cmp r4, #USR_MODE + beq fiq_from_usr_mode_exit + + msr cpsr_c, r4 + ldr sp, [r7, #(4 * 13)] + ldr lr, [r7, #(4 * 14)] + msr spsr_cxsf, r5 + +fiq_from_usr_mode_exit: + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + + ldmfd sp!, {r0-r7} + ldr lr, [sp, #(4 * 7)] + ldr r12, [sp, #(4 * 8)] + add sp, sp, #(10 * 4) +exit_fiq: + msr spsr_cxsf, r12 + add r10, #1 + cmp r11, #0 + moveqs pc, lr + bx r11 /* jump to custom fiq return function */ + +nested_fiq: + orr r12, r12, #(PSR_F_BIT) + b exit_fiq + +fiq_glue_end: + +ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */ + stmfd sp!, {r4} + mrs r4, cpsr + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + movs r8, r0 + mov r9, r1 + mov sp, r2 + mov r11, r3 + moveq r10, #0 + movne r10, #1 + msr cpsr_c, r4 + ldmfd sp!, {r4} + bx lr + diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c new file mode 100644 index 000000000000..8cb1b611c6d5 --- /dev/null +++ b/arch/arm/common/fiq_glue_setup.c @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +extern unsigned char fiq_glue, fiq_glue_end; +extern void fiq_glue_setup(void *func, void *data, void *sp, + fiq_return_handler_t fiq_return_handler); + +static struct fiq_handler fiq_debbuger_fiq_handler = { + .name = "fiq_glue", +}; +DEFINE_PER_CPU(void *, fiq_stack); +static struct fiq_glue_handler *current_handler; +static fiq_return_handler_t fiq_return_handler; +static DEFINE_MUTEX(fiq_glue_lock); + +static void fiq_glue_setup_helper(void *info) +{ + struct fiq_glue_handler *handler = info; + fiq_glue_setup(handler->fiq, handler, + __get_cpu_var(fiq_stack) + THREAD_START_SP, + fiq_return_handler); +} + +int fiq_glue_register_handler(struct fiq_glue_handler *handler) +{ + int ret; + int cpu; + + if (!handler || !handler->fiq) + return -EINVAL; + + mutex_lock(&fiq_glue_lock); + if (fiq_stack) { + ret = -EBUSY; + goto err_busy; + } + + for_each_possible_cpu(cpu) { + void *stack; + stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); + if (WARN_ON(!stack)) { + ret = -ENOMEM; + goto err_alloc_fiq_stack; + } + per_cpu(fiq_stack, cpu) = stack; + } + + ret = claim_fiq(&fiq_debbuger_fiq_handler); + if (WARN_ON(ret)) + goto err_claim_fiq; + + current_handler = handler; + on_each_cpu(fiq_glue_setup_helper, handler, true); + set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue); + + mutex_unlock(&fiq_glue_lock); + return 0; + +err_claim_fiq: +err_alloc_fiq_stack: + for_each_possible_cpu(cpu) { + __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER); + per_cpu(fiq_stack, cpu) = NULL; + } +err_busy: + mutex_unlock(&fiq_glue_lock); + return ret; +} + +static void fiq_glue_update_return_handler(void (*fiq_return)(void)) +{ + fiq_return_handler = fiq_return; + if (current_handler) + on_each_cpu(fiq_glue_setup_helper, current_handler, true); +} + +int fiq_glue_set_return_handler(void (*fiq_return)(void)) +{ + int ret; + + mutex_lock(&fiq_glue_lock); + if (fiq_return_handler) { + ret = -EBUSY; + goto err_busy; + } + fiq_glue_update_return_handler(fiq_return); + ret = 0; +err_busy: + mutex_unlock(&fiq_glue_lock); + + return ret; +} +EXPORT_SYMBOL(fiq_glue_set_return_handler); + +int fiq_glue_clear_return_handler(void (*fiq_return)(void)) +{ + int ret; + + mutex_lock(&fiq_glue_lock); + if (WARN_ON(fiq_return_handler != fiq_return)) { + ret = -EINVAL; + goto err_inval; + } + fiq_glue_update_return_handler(NULL); + ret = 0; +err_inval: + mutex_unlock(&fiq_glue_lock); + + return ret; +} +EXPORT_SYMBOL(fiq_glue_clear_return_handler); + +/** + * fiq_glue_resume - Restore fiqs after suspend or low power idle states + * + * This must be called before calling local_fiq_enable after returning from a + * power state where the fiq mode registers were lost. If a driver provided + * a resume hook when it registered the handler it will be called. + */ + +void fiq_glue_resume(void) +{ + if (!current_handler) + return; + fiq_glue_setup(current_handler->fiq, current_handler, + __get_cpu_var(fiq_stack) + THREAD_START_SP, + fiq_return_handler); + if (current_handler->resume) + current_handler->resume(current_handler); +} + diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 6c7b06854fce..51936bde1eb2 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -826,28 +826,6 @@ static int locomo_match(struct device *_dev, struct device_driver *_drv) return dev->devid == drv->devid; } -static int locomo_bus_suspend(struct device *dev, pm_message_t state) -{ - struct locomo_dev *ldev = LOCOMO_DEV(dev); - struct locomo_driver *drv = LOCOMO_DRV(dev->driver); - int ret = 0; - - if (drv && drv->suspend) - ret = drv->suspend(ldev, state); - return ret; -} - -static int locomo_bus_resume(struct device *dev) -{ - struct locomo_dev *ldev = LOCOMO_DEV(dev); - struct locomo_driver *drv = LOCOMO_DRV(dev->driver); - int ret = 0; - - if (drv && drv->resume) - ret = drv->resume(ldev); - return ret; -} - static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); @@ -875,8 +853,6 @@ struct bus_type locomo_bus_type = { .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, - .suspend = locomo_bus_suspend, - .resume = locomo_bus_resume, }; int locomo_driver_register(struct locomo_driver *driver) diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 8c2a2619971b..f1d7834990ec 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -244,7 +244,7 @@ CONFIG_USB_STORAGE_ONETOUCH=m CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m +CONFIG_USB_UAS=y CONFIG_USB_DWC3=y CONFIG_USB_DWC2=y CONFIG_USB_HSIC_USB3503=y diff --git a/arch/arm/configs/ranchu_defconfig b/arch/arm/configs/ranchu_defconfig new file mode 100644 index 000000000000..461a85a02764 --- /dev/null +++ b/arch/arm/configs/ranchu_defconfig @@ -0,0 +1,314 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_ARCH_MMAP_RND_BITS=16 +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_VIRT=y +CONFIG_ARM_KERNMEM_PERMS=y +CONFIG_SMP=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y +CONFIG_HIGHMEM=y +CONFIG_KSM=y +CONFIG_SECCOMP=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_VFP=y +CONFIG_NEON=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_INET_ESP=y +# CONFIG_INET_LRO is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_SMSC911X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_FB=y +CONFIG_FB_GOLDFISH=y +CONFIG_FB_SIMPLE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_VIRTUALIZATION=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 5caaf971fb50..df433abfcb02 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -10,6 +10,7 @@ CONFIG_SMP=y CONFIG_NR_CPUS=8 CONFIG_AEABI=y CONFIG_HIGHMEM=y +CONFIG_CMA=y CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_CPU_FREQ=y @@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y # CONFIG_WIRELESS is not set CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMA_CMA=y CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_AHCI_SUNXI=y diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index b8e69fe282b8..925d1364727a 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -121,4 +121,10 @@ config CRYPTO_CHACHA20_NEON select CRYPTO_BLKCIPHER select CRYPTO_CHACHA20 +config CRYPTO_SPECK_NEON + tristate "NEON accelerated Speck cipher algorithms" + depends on KERNEL_MODE_NEON + select CRYPTO_BLKCIPHER + select CRYPTO_SPECK + endif diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 30ef8e291271..3304e671918d 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o +obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o @@ -53,7 +54,9 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o +speck-neon-y := speck-neon-core.o speck-neon-glue.o +ifdef REGENERATE_ARM_CRYPTO quiet_cmd_perl = PERL $@ cmd_perl = $(PERL) $(<) > $(@) @@ -62,5 +65,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl $(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl $(call cmd,perl) +endif .PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index 1b0e0e86ee9c..96e62ec105d0 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c @@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32", .base.cra_driver_name = "crc32-arm-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, }, { @@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32c", .base.cra_driver_name = "crc32c-arm-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S new file mode 100644 index 000000000000..3c1e203e53b9 --- /dev/null +++ b/arch/arm/crypto/speck-neon-core.S @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * + * Copyright (c) 2018 Google, Inc + * + * Author: Eric Biggers + */ + +#include + + .text + .fpu neon + + // arguments + ROUND_KEYS .req r0 // const {u64,u32} *round_keys + NROUNDS .req r1 // int nrounds + DST .req r2 // void *dst + SRC .req r3 // const void *src + NBYTES .req r4 // unsigned int nbytes + TWEAK .req r5 // void *tweak + + // registers which hold the data being encrypted/decrypted + X0 .req q0 + X0_L .req d0 + X0_H .req d1 + Y0 .req q1 + Y0_H .req d3 + X1 .req q2 + X1_L .req d4 + X1_H .req d5 + Y1 .req q3 + Y1_H .req d7 + X2 .req q4 + X2_L .req d8 + X2_H .req d9 + Y2 .req q5 + Y2_H .req d11 + X3 .req q6 + X3_L .req d12 + X3_H .req d13 + Y3 .req q7 + Y3_H .req d15 + + // the round key, duplicated in all lanes + ROUND_KEY .req q8 + ROUND_KEY_L .req d16 + ROUND_KEY_H .req d17 + + // index vector for vtbl-based 8-bit rotates + ROTATE_TABLE .req d18 + + // multiplication table for updating XTS tweaks + GF128MUL_TABLE .req d19 + GF64MUL_TABLE .req d19 + + // current XTS tweak value(s) + TWEAKV .req q10 + TWEAKV_L .req d20 + TWEAKV_H .req d21 + + TMP0 .req q12 + TMP0_L .req d24 + TMP0_H .req d25 + TMP1 .req q13 + TMP2 .req q14 + TMP3 .req q15 + + .align 4 +.Lror64_8_table: + .byte 1, 2, 3, 4, 5, 6, 7, 0 +.Lror32_8_table: + .byte 1, 2, 3, 0, 5, 6, 7, 4 +.Lrol64_8_table: + .byte 7, 0, 1, 2, 3, 4, 5, 6 +.Lrol32_8_table: + .byte 3, 0, 1, 2, 7, 4, 5, 6 +.Lgf128mul_table: + .byte 0, 0x87 + .fill 14 +.Lgf64mul_table: + .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b + .fill 12 + +/* + * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time + * + * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for + * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes + * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. + * + * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because + * the vtbl approach is faster on some processors and the same speed on others. + */ +.macro _speck_round_128bytes n + + // x = ror(x, 8) + vtbl.8 X0_L, {X0_L}, ROTATE_TABLE + vtbl.8 X0_H, {X0_H}, ROTATE_TABLE + vtbl.8 X1_L, {X1_L}, ROTATE_TABLE + vtbl.8 X1_H, {X1_H}, ROTATE_TABLE + vtbl.8 X2_L, {X2_L}, ROTATE_TABLE + vtbl.8 X2_H, {X2_H}, ROTATE_TABLE + vtbl.8 X3_L, {X3_L}, ROTATE_TABLE + vtbl.8 X3_H, {X3_H}, ROTATE_TABLE + + // x += y + vadd.u\n X0, Y0 + vadd.u\n X1, Y1 + vadd.u\n X2, Y2 + vadd.u\n X3, Y3 + + // x ^= k + veor X0, ROUND_KEY + veor X1, ROUND_KEY + veor X2, ROUND_KEY + veor X3, ROUND_KEY + + // y = rol(y, 3) + vshl.u\n TMP0, Y0, #3 + vshl.u\n TMP1, Y1, #3 + vshl.u\n TMP2, Y2, #3 + vshl.u\n TMP3, Y3, #3 + vsri.u\n TMP0, Y0, #(\n - 3) + vsri.u\n TMP1, Y1, #(\n - 3) + vsri.u\n TMP2, Y2, #(\n - 3) + vsri.u\n TMP3, Y3, #(\n - 3) + + // y ^= x + veor Y0, TMP0, X0 + veor Y1, TMP1, X1 + veor Y2, TMP2, X2 + veor Y3, TMP3, X3 +.endm + +/* + * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time + * + * This is the inverse of _speck_round_128bytes(). + */ +.macro _speck_unround_128bytes n + + // y ^= x + veor TMP0, Y0, X0 + veor TMP1, Y1, X1 + veor TMP2, Y2, X2 + veor TMP3, Y3, X3 + + // y = ror(y, 3) + vshr.u\n Y0, TMP0, #3 + vshr.u\n Y1, TMP1, #3 + vshr.u\n Y2, TMP2, #3 + vshr.u\n Y3, TMP3, #3 + vsli.u\n Y0, TMP0, #(\n - 3) + vsli.u\n Y1, TMP1, #(\n - 3) + vsli.u\n Y2, TMP2, #(\n - 3) + vsli.u\n Y3, TMP3, #(\n - 3) + + // x ^= k + veor X0, ROUND_KEY + veor X1, ROUND_KEY + veor X2, ROUND_KEY + veor X3, ROUND_KEY + + // x -= y + vsub.u\n X0, Y0 + vsub.u\n X1, Y1 + vsub.u\n X2, Y2 + vsub.u\n X3, Y3 + + // x = rol(x, 8); + vtbl.8 X0_L, {X0_L}, ROTATE_TABLE + vtbl.8 X0_H, {X0_H}, ROTATE_TABLE + vtbl.8 X1_L, {X1_L}, ROTATE_TABLE + vtbl.8 X1_H, {X1_H}, ROTATE_TABLE + vtbl.8 X2_L, {X2_L}, ROTATE_TABLE + vtbl.8 X2_H, {X2_H}, ROTATE_TABLE + vtbl.8 X3_L, {X3_L}, ROTATE_TABLE + vtbl.8 X3_H, {X3_H}, ROTATE_TABLE +.endm + +.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp + + // Load the next source block + vld1.8 {\dst_reg}, [SRC]! + + // Save the current tweak in the tweak buffer + vst1.8 {TWEAKV}, [\tweak_buf:128]! + + // XOR the next source block with the current tweak + veor \dst_reg, TWEAKV + + /* + * Calculate the next tweak by multiplying the current one by x, + * modulo p(x) = x^128 + x^7 + x^2 + x + 1. + */ + vshr.u64 \tmp, TWEAKV, #63 + vshl.u64 TWEAKV, #1 + veor TWEAKV_H, \tmp\()_L + vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H + veor TWEAKV_L, \tmp\()_H +.endm + +.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp + + // Load the next two source blocks + vld1.8 {\dst_reg}, [SRC]! + + // Save the current two tweaks in the tweak buffer + vst1.8 {TWEAKV}, [\tweak_buf:128]! + + // XOR the next two source blocks with the current two tweaks + veor \dst_reg, TWEAKV + + /* + * Calculate the next two tweaks by multiplying the current ones by x^2, + * modulo p(x) = x^64 + x^4 + x^3 + x + 1. + */ + vshr.u64 \tmp, TWEAKV, #62 + vshl.u64 TWEAKV, #2 + vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L + vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H + veor TWEAKV, \tmp +.endm + +/* + * _speck_xts_crypt() - Speck-XTS encryption/decryption + * + * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer + * using Speck-XTS, specifically the variant with a block size of '2n' and round + * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and + * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a + * nonzero multiple of 128. + */ +.macro _speck_xts_crypt n, decrypting + push {r4-r7} + mov r7, sp + + /* + * The first four parameters were passed in registers r0-r3. Load the + * additional parameters, which were passed on the stack. + */ + ldr NBYTES, [sp, #16] + ldr TWEAK, [sp, #20] + + /* + * If decrypting, modify the ROUND_KEYS parameter to point to the last + * round key rather than the first, since for decryption the round keys + * are used in reverse order. + */ +.if \decrypting +.if \n == 64 + add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3 + sub ROUND_KEYS, #8 +.else + add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2 + sub ROUND_KEYS, #4 +.endif +.endif + + // Load the index vector for vtbl-based 8-bit rotates +.if \decrypting + ldr r12, =.Lrol\n\()_8_table +.else + ldr r12, =.Lror\n\()_8_table +.endif + vld1.8 {ROTATE_TABLE}, [r12:64] + + // One-time XTS preparation + + /* + * Allocate stack space to store 128 bytes worth of tweaks. For + * performance, this space is aligned to a 16-byte boundary so that we + * can use the load/store instructions that declare 16-byte alignment. + */ + sub sp, #128 + bic sp, #0xf + +.if \n == 64 + // Load first tweak + vld1.8 {TWEAKV}, [TWEAK] + + // Load GF(2^128) multiplication table + ldr r12, =.Lgf128mul_table + vld1.8 {GF128MUL_TABLE}, [r12:64] +.else + // Load first tweak + vld1.8 {TWEAKV_L}, [TWEAK] + + // Load GF(2^64) multiplication table + ldr r12, =.Lgf64mul_table + vld1.8 {GF64MUL_TABLE}, [r12:64] + + // Calculate second tweak, packing it together with the first + vshr.u64 TMP0_L, TWEAKV_L, #63 + vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L + vshl.u64 TWEAKV_H, TWEAKV_L, #1 + veor TWEAKV_H, TMP0_L +.endif + +.Lnext_128bytes_\@: + + /* + * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak + * values, and save the tweaks on the stack for later. Then + * de-interleave the 'x' and 'y' elements of each block, i.e. make it so + * that the X[0-3] registers contain only the second halves of blocks, + * and the Y[0-3] registers contain only the first halves of blocks. + * (Speck uses the order (y, x) rather than the more intuitive (x, y).) + */ + mov r12, sp +.if \n == 64 + _xts128_precrypt_one X0, r12, TMP0 + _xts128_precrypt_one Y0, r12, TMP0 + _xts128_precrypt_one X1, r12, TMP0 + _xts128_precrypt_one Y1, r12, TMP0 + _xts128_precrypt_one X2, r12, TMP0 + _xts128_precrypt_one Y2, r12, TMP0 + _xts128_precrypt_one X3, r12, TMP0 + _xts128_precrypt_one Y3, r12, TMP0 + vswp X0_L, Y0_H + vswp X1_L, Y1_H + vswp X2_L, Y2_H + vswp X3_L, Y3_H +.else + _xts64_precrypt_two X0, r12, TMP0 + _xts64_precrypt_two Y0, r12, TMP0 + _xts64_precrypt_two X1, r12, TMP0 + _xts64_precrypt_two Y1, r12, TMP0 + _xts64_precrypt_two X2, r12, TMP0 + _xts64_precrypt_two Y2, r12, TMP0 + _xts64_precrypt_two X3, r12, TMP0 + _xts64_precrypt_two Y3, r12, TMP0 + vuzp.32 Y0, X0 + vuzp.32 Y1, X1 + vuzp.32 Y2, X2 + vuzp.32 Y3, X3 +.endif + + // Do the cipher rounds + + mov r12, ROUND_KEYS + mov r6, NROUNDS + +.Lnext_round_\@: +.if \decrypting +.if \n == 64 + vld1.64 ROUND_KEY_L, [r12] + sub r12, #8 + vmov ROUND_KEY_H, ROUND_KEY_L +.else + vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12] + sub r12, #4 +.endif + _speck_unround_128bytes \n +.else +.if \n == 64 + vld1.64 ROUND_KEY_L, [r12]! + vmov ROUND_KEY_H, ROUND_KEY_L +.else + vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]! +.endif + _speck_round_128bytes \n +.endif + subs r6, r6, #1 + bne .Lnext_round_\@ + + // Re-interleave the 'x' and 'y' elements of each block +.if \n == 64 + vswp X0_L, Y0_H + vswp X1_L, Y1_H + vswp X2_L, Y2_H + vswp X3_L, Y3_H +.else + vzip.32 Y0, X0 + vzip.32 Y1, X1 + vzip.32 Y2, X2 + vzip.32 Y3, X3 +.endif + + // XOR the encrypted/decrypted blocks with the tweaks we saved earlier + mov r12, sp + vld1.8 {TMP0, TMP1}, [r12:128]! + vld1.8 {TMP2, TMP3}, [r12:128]! + veor X0, TMP0 + veor Y0, TMP1 + veor X1, TMP2 + veor Y1, TMP3 + vld1.8 {TMP0, TMP1}, [r12:128]! + vld1.8 {TMP2, TMP3}, [r12:128]! + veor X2, TMP0 + veor Y2, TMP1 + veor X3, TMP2 + veor Y3, TMP3 + + // Store the ciphertext in the destination buffer + vst1.8 {X0, Y0}, [DST]! + vst1.8 {X1, Y1}, [DST]! + vst1.8 {X2, Y2}, [DST]! + vst1.8 {X3, Y3}, [DST]! + + // Continue if there are more 128-byte chunks remaining, else return + subs NBYTES, #128 + bne .Lnext_128bytes_\@ + + // Store the next tweak +.if \n == 64 + vst1.8 {TWEAKV}, [TWEAK] +.else + vst1.8 {TWEAKV_L}, [TWEAK] +.endif + + mov sp, r7 + pop {r4-r7} + bx lr +.endm + +ENTRY(speck128_xts_encrypt_neon) + _speck_xts_crypt n=64, decrypting=0 +ENDPROC(speck128_xts_encrypt_neon) + +ENTRY(speck128_xts_decrypt_neon) + _speck_xts_crypt n=64, decrypting=1 +ENDPROC(speck128_xts_decrypt_neon) + +ENTRY(speck64_xts_encrypt_neon) + _speck_xts_crypt n=32, decrypting=0 +ENDPROC(speck64_xts_encrypt_neon) + +ENTRY(speck64_xts_decrypt_neon) + _speck_xts_crypt n=32, decrypting=1 +ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c new file mode 100644 index 000000000000..f012c3ea998f --- /dev/null +++ b/arch/arm/crypto/speck-neon-glue.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * + * Copyright (c) 2018 Google, Inc + * + * Note: the NIST recommendation for XTS only specifies a 128-bit block size, + * but a 64-bit version (needed for Speck64) is fairly straightforward; the math + * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial + * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004: + * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes + * OCB and PMAC"), represented as 0x1B. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* The assembly functions only handle multiples of 128 bytes */ +#define SPECK_NEON_CHUNK_SIZE 128 + +/* Speck128 */ + +struct speck128_xts_tfm_ctx { + struct speck128_tfm_ctx main_key; + struct speck128_tfm_ctx tweak_key; +}; + +asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck128_xts_crypt(struct skcipher_request *req, + speck128_crypt_one_t crypt_one, + speck128_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + le128 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + le128_xor((le128 *)dst, (const le128 *)src, &tweak); + (*crypt_one)(&ctx->main_key, dst, dst); + le128_xor((le128 *)dst, (const le128 *)dst, &tweak); + gf128mul_x_ble(&tweak, &tweak); + + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck128_xts_encrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_encrypt, + speck128_xts_encrypt_neon); +} + +static int speck128_xts_decrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_decrypt, + speck128_xts_decrypt_neon); +} + +static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck128_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +/* Speck64 */ + +struct speck64_xts_tfm_ctx { + struct speck64_tfm_ctx main_key; + struct speck64_tfm_ctx tweak_key; +}; + +asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, + speck64_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + __le64 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + *(__le64 *)dst = *(__le64 *)src ^ tweak; + (*crypt_one)(&ctx->main_key, dst, dst); + *(__le64 *)dst ^= tweak; + tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ + ((tweak & cpu_to_le64(1ULL << 63)) ? + 0x1B : 0)); + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck64_xts_encrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_encrypt, + speck64_xts_encrypt_neon); +} + +static int speck64_xts_decrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_decrypt, + speck64_xts_decrypt_neon); +} + +static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck64_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +static struct skcipher_alg speck_algs[] = { + { + .base.cra_name = "xts(speck128)", + .base.cra_driver_name = "xts-speck128-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK128_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK128_128_KEY_SIZE, + .max_keysize = 2 * SPECK128_256_KEY_SIZE, + .ivsize = SPECK128_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck128_xts_setkey, + .encrypt = speck128_xts_encrypt, + .decrypt = speck128_xts_decrypt, + }, { + .base.cra_name = "xts(speck64)", + .base.cra_driver_name = "xts-speck64-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK64_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK64_96_KEY_SIZE, + .max_keysize = 2 * SPECK64_128_KEY_SIZE, + .ivsize = SPECK64_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck64_xts_setkey, + .encrypt = speck64_xts_encrypt, + .decrypt = speck64_xts_decrypt, + } +}; + +static int __init speck_neon_module_init(void) +{ + if (!(elf_hwcap & HWCAP_NEON)) + return -ENODEV; + return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_neon_module_exit(void) +{ + crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_neon_module_init); +module_exit(speck_neon_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers "); +MODULE_ALIAS_CRYPTO("xts(speck128)"); +MODULE_ALIAS_CRYPTO("xts-speck128-neon"); +MODULE_ALIAS_CRYPTO("xts(speck64)"); +MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index ad301f107dd2..bc8d4bbd82e2 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -518,4 +518,22 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm + .macro bug, msg, line +#ifdef CONFIG_THUMB2_KERNEL +1: .inst 0xde02 +#else +1: .inst 0xe7f001f2 +#endif +#ifdef CONFIG_DEBUG_BUGVERBOSE + .pushsection .rodata.str, "aMS", %progbits, 1 +2: .asciz "\msg" + .popsection + .pushsection __bug_table, "aw" + .align 2 + .word 1b, 2b + .hword \line + .popsection +#endif + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 0722ec6be692..6821f1249300 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -7,7 +7,6 @@ #include #include #include -#include #include #define ARM_MAPPING_ERROR (~(dma_addr_t)0x0) diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 8c5ca92a87a9..51f0d2417fa9 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -113,8 +113,12 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the base location for PIE (ET_DYN with INTERP) loads. */ -#define ELF_ET_DYN_BASE 0x400000UL +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h new file mode 100644 index 000000000000..a9e244f9f197 --- /dev/null +++ b/arch/arm/include/asm/fiq_glue.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_FIQ_GLUE_H +#define __ASM_FIQ_GLUE_H + +struct fiq_glue_handler { + void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp); + void (*resume)(struct fiq_glue_handler *h); +}; +typedef void (*fiq_return_handler_t)(void); + +int fiq_glue_register_handler(struct fiq_glue_handler *handler); +int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return); +int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return); + +#ifdef CONFIG_FIQ_GLUE +void fiq_glue_resume(void); +#else +static inline void fiq_glue_resume(void) {} +#endif + +#endif diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h index 74e51d6bd93f..f8712e3c29cf 100644 --- a/arch/arm/include/asm/hardware/locomo.h +++ b/arch/arm/include/asm/hardware/locomo.h @@ -189,8 +189,6 @@ struct locomo_driver { unsigned int devid; int (*probe)(struct locomo_dev *); int (*remove)(struct locomo_dev *); - int (*suspend)(struct locomo_dev *, pm_message_t); - int (*resume)(struct locomo_dev *); }; #define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv) diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index c8781450905b..3ab8b3781bfe 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -161,8 +161,7 @@ #else #define VTTBR_X (5 - KVM_T0SZ) #endif -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT _AC(48, ULL) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 4a879f6ff13b..31fbb9285f62 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -293,4 +293,10 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +static inline bool kvm_arm_harden_branch_predictor(void) +{ + /* No way to detect it yet, pretend it is not there. */ + return false; +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index fa6f2174276b..eb46fc81a440 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void) return 8; } +static inline void *kvm_get_hyp_vector(void) +{ + return kvm_ksym_ref(__kvm_hyp_vector); +} + +static inline int kvm_map_vectors(void) +{ + return 0; +} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h deleted file mode 100644 index 6bda945d31fa..000000000000 --- a/arch/arm/include/asm/kvm_psci.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_PSCI_H__ -#define __ARM_KVM_PSCI_H__ - -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 - -int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM_KVM_PSCI_H__ */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b2902a5cd780..2d7344f0e208 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) static inline void clean_pte_table(pte_t *pte) { diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index e9c9a117bd25..c7cdbb43ae7c 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -126,8 +126,7 @@ extern unsigned long profile_pc(struct pt_regs *regs); /* * kprobe-based event tracer support */ -#include -#include +#include #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) extern int regs_query_register_offset(const char *name); diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index f59ab9bcbaf9..2a786f54d8b8 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -25,6 +25,17 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); +#include + +/* Replace task scheduler's default frequency-invariant accounting */ +#define arch_scale_freq_capacity topology_get_freq_scale + +/* Replace task scheduler's default cpu-invariant accounting */ +#define arch_scale_cpu_capacity topology_get_cpu_scale + +/* Enable topology flag updates */ +#define arch_update_cpu_topology topology_update_cpu_topology + #else static inline void init_cpu_topology(void) { } diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index d523cd8439a3..0f07579af472 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -300,6 +300,8 @@ mov r2, sp ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr ldr lr, [r2, #\offset + S_PC]! @ get pc + tst r1, #PSR_I_BIT | 0x0f + bne 1f msr spsr_cxsf, r1 @ save in spsr_svc #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) @ We must avoid clrex due to Cortex-A15 erratum #830321 @@ -314,6 +316,7 @@ @ after ldm {}^ add sp, sp, #\offset + PT_REGS_SIZE movs pc, lr @ return & move spsr_svc into cpsr +1: bug "Returning to usermode but unexpected PSR bits set?", \@ #elif defined(CONFIG_CPU_V7M) @ V7M restore. @ Note that we don't need to do clrex here as clearing the local @@ -329,6 +332,8 @@ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC] @ get pc add sp, sp, #\offset + S_SP + tst r1, #PSR_I_BIT | 0x0f + bne 1f msr spsr_cxsf, r1 @ save in spsr_svc @ We must avoid clrex due to Cortex-A15 erratum #830321 @@ -341,6 +346,7 @@ .endif add sp, sp, #PT_REGS_SIZE - S_SP movs pc, lr @ return & move spsr_svc into cpsr +1: bug "Returning to usermode but unexpected PSR bits set?", \@ #endif /* !CONFIG_THUMB2_KERNEL */ .endm diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index caa0dbe3dc61..923a725ab9b5 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -141,6 +141,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr) { + if (user_mode(regs)) + return -1; kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; @@ -148,6 +150,8 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr) static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) { + if (user_mode(regs)) + return -1; compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d96714e1858c..4b675a80f523 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -94,6 +94,77 @@ void arch_cpu_idle_exit(void) ledtrig_cpu(CPU_LED_IDLE_END); } +/* + * dump a block of kernel memory from around the given address + */ +static void show_data(unsigned long addr, int nbytes, const char *name) +{ + int i, j; + int nlines; + u32 *p; + + /* + * don't attempt to dump non-kernel addresses or + * values that are probably just small negative numbers + */ + if (addr < PAGE_OFFSET || addr > -256UL) + return; + + printk("\n%s: %#lx:\n", name, addr); + + /* + * round address down to a 32 bit boundary + * and always dump a multiple of 32 bytes + */ + p = (u32 *)(addr & ~(sizeof(u32) - 1)); + nbytes += (addr & (sizeof(u32) - 1)); + nlines = (nbytes + 31) / 32; + + + for (i = 0; i < nlines; i++) { + /* + * just display low 16 bits of address to keep + * each line of the dump < 80 characters + */ + printk("%04lx ", (unsigned long)p & 0xffff); + for (j = 0; j < 8; j++) { + u32 data; + if (probe_kernel_address(p, data)) { + printk(" ********"); + } else { + printk(" %08x", data); + } + ++p; + } + printk("\n"); + } +} + +static void show_extra_register_data(struct pt_regs *regs, int nbytes) +{ + mm_segment_t fs; + + fs = get_fs(); + set_fs(KERNEL_DS); + show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC"); + show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR"); + show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP"); + show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP"); + show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP"); + show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0"); + show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1"); + show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2"); + show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3"); + show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4"); + show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5"); + show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6"); + show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7"); + show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8"); + show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9"); + show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10"); + set_fs(fs); +} + void __show_regs(struct pt_regs *regs) { unsigned long flags; @@ -185,6 +256,8 @@ void __show_regs(struct pt_regs *regs) printk("Control: %08x%s\n", ctrl, buf); } #endif + + show_extra_register_data(regs, 128); } void show_regs(struct pt_regs * regs) diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 3b2aa9a9fe26..c74249136f18 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -6,6 +6,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include #include #include #include @@ -125,6 +126,31 @@ void machine_power_off(void) pm_power_off(); } +#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART +void arm_machine_flush_console(void) +{ + printk("\n"); + pr_emerg("Restarting %s\n", linux_banner); + if (console_trylock()) { + console_unlock(); + return; + } + + mdelay(50); + + local_irq_disable(); + if (!console_trylock()) + pr_emerg("arm_restart: Console was locked! Busting\n"); + else + pr_emerg("arm_restart: Console was locked!\n"); + console_unlock(); +} +#else +void arm_machine_flush_console(void) +{ +} +#endif + /* * Restart requires that the secondary CPUs stop performing any activity * while the primary CPU resets the system. Systems with a single CPU can @@ -141,6 +167,10 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); + /* Flush the console to make sure all the relevant messages make it + * out to the console drivers */ + arm_machine_flush_console(); + if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); else diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 24ac3cab411d..a86e1057bd36 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -25,11 +25,49 @@ #include #include #include +#include #include #include #include +/* sd energy functions */ +static inline +const struct sched_group_energy * const cpu_core_energy(int cpu) +{ + struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0]; + unsigned long capacity; + int max_cap_idx; + + if (!sge) { + pr_warn("Invalid sched_group_energy for CPU%d\n", cpu); + return NULL; + } + + max_cap_idx = sge->nr_cap_states - 1; + capacity = sge->cap_states[max_cap_idx].cap; + + printk_deferred("cpu=%d set cpu scale %lu from energy model\n", + cpu, capacity); + + topology_set_cpu_scale(cpu, capacity); + + return sge; +} + +static inline +const struct sched_group_energy * const cpu_cluster_energy(int cpu) +{ + struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1]; + + if (!sge) { + pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu); + return NULL; + } + + return sge; +} + /* * cpu capacity scale management */ @@ -169,10 +207,26 @@ static void __init parse_dt_topology(void) */ static void update_cpu_capacity(unsigned int cpu) { - if (!cpu_capacity(cpu) || cap_from_dt) - return; + const struct sched_group_energy *sge; + unsigned long capacity; - topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity); + sge = cpu_core_energy(cpu); + + if (sge) { + int max_cap_idx; + + max_cap_idx = sge->nr_cap_states - 1; + capacity = sge->cap_states[max_cap_idx].cap; + + printk_deferred("cpu=%d set cpu scale %lu from energy model\n", + cpu, capacity); + } else { + if (!cpu_capacity(cpu) || cap_from_dt) + return; + capacity = cpu_capacity(cpu) / middle_capacity; + } + + topology_set_cpu_scale(cpu, capacity); pr_info("CPU%u: update cpu_capacity %lu\n", cpu, topology_get_cpu_scale(NULL, cpu)); @@ -278,23 +332,37 @@ void store_cpu_topology(unsigned int cpuid) update_cpu_capacity(cpuid); + topology_detect_flags(); + pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, cpu_topology[cpuid].socket_id, mpidr); } +#ifdef CONFIG_SCHED_MC +static int core_flags(void) +{ + return cpu_core_flags() | topology_core_flags(); +} + static inline int cpu_corepower_flags(void) { - return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; + return topology_core_flags() + | SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; +} +#endif + +static int cpu_flags(void) +{ + return topology_cpu_flags(); } static struct sched_domain_topology_level arm_topology[] = { #ifdef CONFIG_SCHED_MC - { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) }, - { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, + { cpu_coregroup_mask, core_flags, cpu_core_energy, SD_INIT_NAME(MC) }, #endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { cpu_cpu_mask, cpu_flags, cpu_cluster_energy, SD_INIT_NAME(DIE) }, { NULL, }, }; @@ -322,4 +390,6 @@ void __init init_cpu_topology(void) /* Set scheduler topology descriptor */ set_sched_topology(arm_topology); + + init_sched_energy_costs(); } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0fcd82f01388..b8dc3b516f93 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -790,7 +790,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index cf8bf6bf87c4..910bd8dabb3c 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include "trace.h" @@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; - ret = kvm_psci_call(vcpu); + ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } @@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_inject_undefined(vcpu); + /* + * "If an SMC instruction executed at Non-secure EL1 is + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a + * Trap exception, not a Secure Monitor Call exception [...]" + * + * We need to advance the PC after the trap, as it would + * otherwise return to the same address... + */ + vcpu_set_reg(vcpu, 0, ~0UL); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 5638ce0c9524..63d6b404d88e 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING KVM=../../../../virt/kvm +CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve) + obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o @@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o +CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE) + obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o +CFLAGS_switch.o += $(CFLAGS_ARMV7VE) obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c index 111bda8cdebd..be4b8b0a40ad 100644 --- a/arch/arm/kvm/hyp/banked-sr.c +++ b/arch/arm/kvm/hyp/banked-sr.c @@ -20,6 +20,10 @@ #include +/* + * gcc before 4.9 doesn't understand -march=armv7ve, so we have to + * trick the assembler. + */ __asm__(".arch_extension virt"); void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 1712f132b80d..b83fdc06286a 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -85,7 +85,11 @@ .pushsection .text.fixup,"ax" .align 4 9001: mov r4, #-EFAULT +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + ldr r5, [sp, #9*4] @ *err_ptr +#else ldr r5, [sp, #8*4] @ *err_ptr +#endif str r4, [r5] ldmia sp, {r1, r2} @ retrieve dst, len add r2, r2, r1 diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index 9b49867154bf..63fa79f9f121 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig @@ -42,7 +42,7 @@ config MACH_ARMADA_375 depends on ARCH_MULTI_V7 select ARMADA_370_XP_IRQ select ARM_ERRATA_720789 - select ARM_ERRATA_753970 + select PL310_ERRATA_753970 select ARM_GIC select ARMADA_375_CLK select HAVE_ARM_SCU @@ -58,7 +58,7 @@ config MACH_ARMADA_38X bool "Marvell Armada 380/385 boards" depends on ARCH_MULTI_V7 select ARM_ERRATA_720789 - select ARM_ERRATA_753970 + select PL310_ERRATA_753970 select ARM_GIC select ARM_GLOBAL_TIMER select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c index 5ac122e88f67..fa7f308c9027 100644 --- a/arch/arm/mach-omap2/omap-secure.c +++ b/arch/arm/mach-omap2/omap-secure.c @@ -73,6 +73,27 @@ phys_addr_t omap_secure_ram_mempool_base(void) return omap_secure_memblock_base; } +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) +u32 omap3_save_secure_ram(void __iomem *addr, int size) +{ + u32 ret; + u32 param[5]; + + if (size != OMAP3_SAVE_SECURE_RAM_SZ) + return OMAP3_SAVE_SECURE_RAM_SZ; + + param[0] = 4; /* Number of arguments */ + param[1] = __pa(addr); /* Physical address for saving */ + param[2] = 0; + param[3] = 1; + param[4] = 1; + + ret = save_secure_ram_context(__pa(param)); + + return ret; +} +#endif + /** * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls * @idx: The PPA API index diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h index bae263fba640..c509cde71f93 100644 --- a/arch/arm/mach-omap2/omap-secure.h +++ b/arch/arm/mach-omap2/omap-secure.h @@ -31,6 +31,8 @@ /* Maximum Secure memory storage size */ #define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K) +#define OMAP3_SAVE_SECURE_RAM_SZ 0x803F + /* Secure low power HAL API index */ #define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a #define OMAP4_HAL_SAVEHW_INDEX 0x1b @@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs); extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs); extern phys_addr_t omap_secure_ram_mempool_base(void); extern int omap_secure_ram_reserve_memblock(void); +extern u32 save_secure_ram_context(u32 args_pa); +extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size); extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs, u32 arg1, u32 arg2, u32 arg3, u32 arg4); diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index c3276436b0ae..c12e7b572a41 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1656,6 +1656,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = { .main_clk = "mmchs3_fck", .prcm = { .omap2 = { + .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC3_SHIFT, .idlest_reg_id = 1, diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index b668719b9b25..8e30772cfe32 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h @@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz; /* ... and its pointer from SRAM after copy */ extern void (*omap3_do_wfi_sram)(void); -/* save_secure_ram_context function pointer and size, for copy to SRAM */ -extern int save_secure_ram_context(u32 *addr); -extern unsigned int save_secure_ram_context_sz; - extern void omap3_save_scratchpad_contents(void); #define PM_RTA_ERRATUM_i608 (1 << 0) diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 841ba19d64a6..36c55547137c 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -48,6 +48,7 @@ #include "prm3xxx.h" #include "pm.h" #include "sdrc.h" +#include "omap-secure.h" #include "sram.h" #include "control.h" #include "vc.h" @@ -66,7 +67,6 @@ struct power_state { static LIST_HEAD(pwrst_list); -static int (*_omap_save_secure_sram)(u32 *addr); void (*omap3_do_wfi_sram)(void); static struct powerdomain *mpu_pwrdm, *neon_pwrdm; @@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void) * will hang the system. */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); - ret = _omap_save_secure_sram((u32 *)(unsigned long) - __pa(omap3_secure_ram_storage)); + ret = omap3_save_secure_ram(omap3_secure_ram_storage, + OMAP3_SAVE_SECURE_RAM_SZ); pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ if (ret) { @@ -434,15 +434,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) * * The minimum set of functions is pushed to SRAM for execution: * - omap3_do_wfi for erratum i581 WA, - * - save_secure_ram_context for security extensions. */ void omap_push_sram_idle(void) { omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); - - if (omap_type() != OMAP2_DEVICE_TYPE_GP) - _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, - save_secure_ram_context_sz); } static void __init pm_errata_configure(void) @@ -553,7 +548,7 @@ int __init omap3_pm_init(void) clkdm_add_wkdep(neon_clkdm, mpu_clkdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { omap3_secure_ram_storage = - kmalloc(0x803F, GFP_KERNEL); + kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL); if (!omap3_secure_ram_storage) pr_err("Memory allocation failed when allocating for secure sram context\n"); diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c index d2c5bcabdbeb..ebaf80d72a10 100644 --- a/arch/arm/mach-omap2/prm33xx.c +++ b/arch/arm/mach-omap2/prm33xx.c @@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm) return v; } -static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) -{ - u32 v; - - v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs); - v &= AM33XX_LASTPOWERSTATEENTERED_MASK; - v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT; - - return v; -} - static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) { am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK, @@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = { .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst, - .pwrdm_read_prev_pwrst = am33xx_pwrdm_read_prev_pwrst, .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst, .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst, .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst, diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index fa5fd24f524c..22daf4efed68 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore) ENDPROC(enable_omap3630_toggle_l2_on_restore) /* - * Function to call rom code to save secure ram context. This gets - * relocated to SRAM, so it can be all in .data section. Otherwise - * we need to initialize api_params separately. + * Function to call rom code to save secure ram context. + * + * r0 = physical address of the parameters */ - .data - .align 3 ENTRY(save_secure_ram_context) stmfd sp!, {r4 - r11, lr} @ save registers on stack - adr r3, api_params @ r3 points to parameters - str r0, [r3,#0x4] @ r0 has sdram address - ldr r12, high_mask - and r3, r3, r12 - ldr r12, sram_phy_addr_mask - orr r3, r3, r12 + mov r3, r0 @ physical address of parameters mov r0, #25 @ set service ID for PPA mov r12, r0 @ copy secure service ID in r12 mov r1, #0 @ set task id for ROM code in r1 @@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context) nop nop ldmfd sp!, {r4 - r11, pc} - .align -sram_phy_addr_mask: - .word SRAM_BASE_P -high_mask: - .word 0xffff -api_params: - .word 0x4, 0x0, 0x0, 0x1, 0x1 ENDPROC(save_secure_ram_context) -ENTRY(save_secure_ram_context_sz) - .word . - save_secure_ram_context - - .text /* * ====================== diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index 107f37210fb9..83606087edc7 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c @@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = { }, }; module_platform_driver(tosa_bt_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dmitry Baryshkov"); +MODULE_DESCRIPTION("Bluetooth built-in chip control"); diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 24659952c278..11da0f50a1fe 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -270,6 +270,11 @@ v6_dma_clean_range: * - end - virtual end address of region */ ENTRY(v6_dma_flush_range) +#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT + sub r2, r1, r0 + cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT + bhi v6_dma_flush_dcache_all +#endif #ifdef CONFIG_DMA_CACHE_RWFO ldrb r2, [r0] @ read for ownership strb r2, [r0] @ write for ownership @@ -292,6 +297,18 @@ ENTRY(v6_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr +#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT +v6_dma_flush_dcache_all: + mov r0, #0 +#ifdef HARVARD_CACHE + mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate +#else + mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate +#endif + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mov pc, lr +#endif + /* * dma_map_area(start, size, dir) * - start - kernel virtual start address diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 35ff45470dbf..fc3b44028cfb 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = { .val = PMD_SECT_USER, .set = "USR", }, { - .mask = L_PMD_SECT_RDONLY, - .val = L_PMD_SECT_RDONLY, + .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2, + .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2, .set = "ro", .clear = "RW", #elif __LINUX_ARM_ARCH__ >= 6 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 42f585379e19..6123d126e5ae 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -274,10 +274,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) local_irq_enable(); /* - * If we're in an interrupt or have no user + * If we're in an interrupt, or have no irqs, or have no user * context, we must not take the fault.. */ - if (faulthandler_disabled() || !mm) + if (faulthandler_disabled() || irqs_disabled() || !mm) goto no_context; if (user_mode(regs)) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ad80548325fe..0f6d1537f330 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -639,8 +639,8 @@ static struct section_perm ro_perms[] = { .start = (unsigned long)_stext, .end = (unsigned long)__init_begin, #ifdef CONFIG_ARM_LPAE - .mask = ~L_PMD_SECT_RDONLY, - .prot = L_PMD_SECT_RDONLY, + .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), + .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, #else .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index c199990e12b6..323a4df59a6c 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -27,14 +27,58 @@ int bpf_jit_enable __read_mostly; +/* + * eBPF prog stack layout: + * + * high + * original ARM_SP => +-----+ + * | | callee saved registers + * +-----+ <= (BPF_FP + SCRATCH_SIZE) + * | ... | eBPF JIT scratch space + * eBPF fp register => +-----+ + * (BPF_FP) | ... | eBPF prog stack + * +-----+ + * |RSVD | JIT scratchpad + * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE) + * | | + * | ... | Function call stack + * | | + * +-----+ + * low + * + * The callee saved registers depends on whether frame pointers are enabled. + * With frame pointers (to be compliant with the ABI): + * + * high + * original ARM_SP => +------------------+ \ + * | pc | | + * current ARM_FP => +------------------+ } callee saved registers + * |r4-r8,r10,fp,ip,lr| | + * +------------------+ / + * low + * + * Without frame pointers: + * + * high + * original ARM_SP => +------------------+ + * | r4-r8,r10,fp,lr | callee saved registers + * current ARM_FP => +------------------+ + * low + * + * When popping registers off the stack at the end of a BPF function, we + * reference them via the current ARM_FP register. + */ +#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ + 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \ + 1 << ARM_FP) +#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) +#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) + #define STACK_OFFSET(k) (k) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ -/* Flags used for JIT optimization */ -#define SEEN_CALL (1 << 0) - #define FLAG_IMM_OVERFLOW (1 << 0) /* @@ -95,7 +139,6 @@ static const u8 bpf2a32[][2] = { * idx : index of current last JITed instruction. * prologue_bytes : bytes used in prologue. * epilogue_offset : offset of epilogue starting. - * seen : bit mask used for JIT optimization. * offsets : array of eBPF instruction offsets in * JITed code. * target : final JITed code. @@ -110,7 +153,6 @@ struct jit_ctx { unsigned int idx; unsigned int prologue_bytes; unsigned int epilogue_offset; - u32 seen; u32 flags; u32 *offsets; u32 *target; @@ -179,8 +221,13 @@ static void jit_fill_hole(void *area, unsigned int size) *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); } -/* Stack must be multiples of 16 Bytes */ -#define STACK_ALIGN(sz) (((sz) + 3) & ~3) +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +/* EABI requires the stack to be aligned to 64-bit boundaries */ +#define STACK_ALIGNMENT 8 +#else +/* Stack must be aligned to 32-bit boundaries */ +#define STACK_ALIGNMENT 4 +#endif /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, @@ -194,7 +241,7 @@ static void jit_fill_hole(void *area, unsigned int size) + SCRATCH_SIZE + \ + 4 /* extra for skb_copy_bits buffer */) -#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) +#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) /* Get the offset of eBPF REGISTERs stored on scratch space. */ #define STACK_VAR(off) (STACK_SIZE-off-4) @@ -285,16 +332,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx) emit_mov_i_no8m(rd, val, ctx); } -static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) +static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx) { - ctx->seen |= SEEN_CALL; -#if __LINUX_ARM_ARCH__ < 5 - emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); - if (elf_hwcap & HWCAP_THUMB) emit(ARM_BX(tgt_reg), ctx); else emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); +} + +static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) +{ +#if __LINUX_ARM_ARCH__ < 5 + emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); + emit_bx_r(tgt_reg, ctx); #else emit(ARM_BLX_R(tgt_reg), ctx); #endif @@ -354,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) } /* Call appropriate function */ - ctx->seen |= SEEN_CALL; emit_mov_i(ARM_IP, op == BPF_DIV ? (u32)jit_udiv32 : (u32)jit_mod32, ctx); emit_blx_r(ARM_IP, ctx); @@ -620,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk, /* Do LSH operation */ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); @@ -656,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, /* Do the ARSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); _emit(ARM_COND_MI, ARM_B(0), ctx); @@ -692,8 +737,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, /* Do LSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); @@ -828,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk, /* Do Multiplication */ emit(ARM_MUL(ARM_IP, rd, rn), ctx); emit(ARM_MUL(ARM_LR, rm, rt), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); @@ -872,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk, } /* dst = *(size*)(src + off) */ -static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk, - const s32 off, struct jit_ctx *ctx, const u8 sz){ +static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, + s32 off, struct jit_ctx *ctx, const u8 sz){ const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[1] : dst; + const u8 *rd = dstk ? tmp : dst; u8 rm = src; + s32 off_max; - if (off) { + if (sz == BPF_H) + off_max = 0xff; + else + off_max = 0xfff; + + if (off < 0 || off > off_max) { emit_a32_mov_i(tmp[0], off, false, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); rm = tmp[0]; + off = 0; + } else if (rd[1] == rm) { + emit(ARM_MOV_R(tmp[0], rm), ctx); + rm = tmp[0]; } switch (sz) { - case BPF_W: - /* Load a Word */ - emit(ARM_LDR_I(rd, rm, 0), ctx); + case BPF_B: + /* Load a Byte */ + emit(ARM_LDRB_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); break; case BPF_H: /* Load a HalfWord */ - emit(ARM_LDRH_I(rd, rm, 0), ctx); + emit(ARM_LDRH_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); break; - case BPF_B: - /* Load a Byte */ - emit(ARM_LDRB_I(rd, rm, 0), ctx); + case BPF_W: + /* Load a Word */ + emit(ARM_LDR_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); + break; + case BPF_DW: + /* Load a Double Word */ + emit(ARM_LDR_I(rd[1], rm, off), ctx); + emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); break; } if (dstk) - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx); + if (dstk && sz == BPF_DW) + emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx); } /* Arithmatic Operation */ @@ -906,7 +967,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, const u8 rn, struct jit_ctx *ctx, u8 op) { switch (op) { case BPF_JSET: - ctx->seen |= SEEN_CALL; emit(ARM_AND_R(ARM_IP, rt, rn), ctx); emit(ARM_AND_R(ARM_LR, rd, rm), ctx); emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); @@ -945,7 +1005,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const u8 *tcc = bpf2a32[TCALL_CNT]; const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) -#define jmp_offset (out_offset - (cur_offset)) +#define jmp_offset (out_offset - (cur_offset) - 2) u32 off, lo, hi; /* if (index >= array->map.max_entries) @@ -956,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_a32_mov_i(tmp[1], off, false, ctx); emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); - /* index (64 bit) */ + /* index is 32-bit for arrays */ emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); /* index >= array->map.max_entries */ emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); @@ -997,7 +1057,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_a32_mov_i(tmp2[1], off, false, ctx); emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); - emit(ARM_BX(tmp[1]), ctx); + emit_bx_r(tmp[1], ctx); /* out: */ if (out_offset == -1) @@ -1070,54 +1130,22 @@ static void build_prologue(struct jit_ctx *ctx) const u8 r2 = bpf2a32[BPF_REG_1][1]; const u8 r3 = bpf2a32[BPF_REG_1][0]; const u8 r4 = bpf2a32[BPF_REG_6][1]; - const u8 r5 = bpf2a32[BPF_REG_6][0]; - const u8 r6 = bpf2a32[TMP_REG_1][1]; - const u8 r7 = bpf2a32[TMP_REG_1][0]; - const u8 r8 = bpf2a32[TMP_REG_2][1]; - const u8 r10 = bpf2a32[TMP_REG_2][0]; const u8 fplo = bpf2a32[BPF_REG_FP][1]; const u8 fphi = bpf2a32[BPF_REG_FP][0]; - const u8 sp = ARM_SP; const u8 *tcc = bpf2a32[TCALL_CNT]; - u16 reg_set = 0; - - /* - * eBPF prog stack layout - * - * high - * original ARM_SP => +-----+ eBPF prologue - * |FP/LR| - * current ARM_FP => +-----+ - * | ... | callee saved registers - * eBPF fp register => +-----+ <= (BPF_FP) - * | ... | eBPF JIT scratch space - * | | eBPF prog stack - * +-----+ - * |RSVD | JIT scratchpad - * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) - * | | - * | ... | Function call stack - * | | - * +-----+ - * low - */ - /* Save callee saved registers. */ - reg_set |= (1<seen & SEEN_CALL) - reg_set |= (1<stack_size = imm8m(STACK_SIZE); @@ -1140,33 +1168,19 @@ static void build_prologue(struct jit_ctx *ctx) /* end of prologue */ } +/* restore callee saved registers. */ static void build_epilogue(struct jit_ctx *ctx) { - const u8 r4 = bpf2a32[BPF_REG_6][1]; - const u8 r5 = bpf2a32[BPF_REG_6][0]; - const u8 r6 = bpf2a32[TMP_REG_1][1]; - const u8 r7 = bpf2a32[TMP_REG_1][0]; - const u8 r8 = bpf2a32[TMP_REG_2][1]; - const u8 r10 = bpf2a32[TMP_REG_2][0]; - u16 reg_set = 0; - - /* unwind function call stack */ - emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); - - /* restore callee saved registers. */ - reg_set |= (1<seen & SEEN_CALL) - reg_set |= (1<seen & SEEN_CALL)) - emit(ARM_BX(ARM_LR), ctx); + emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx); + emit(ARM_POP(CALLEE_POP_MASK), ctx); #endif } @@ -1394,8 +1408,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit_rev32(rt, rt, ctx); goto emit_bswap_uxt; case 64: - /* Because of the usage of ARM_LR */ - ctx->seen |= SEEN_CALL; emit_rev32(ARM_LR, rt, ctx); emit_rev32(rt, rd, ctx); emit(ARM_MOV_R(rd, ARM_LR), ctx); @@ -1448,22 +1460,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) rn = sstk ? tmp2[1] : src_lo; if (sstk) emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - switch (BPF_SIZE(code)) { - case BPF_W: - /* Load a Word */ - case BPF_H: - /* Load a Half-Word */ - case BPF_B: - /* Load a Byte */ - emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code)); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); - break; - case BPF_DW: - /* Load a double word */ - emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W); - emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W); - break; - } + emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code)); break; /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ case BPF_LD | BPF_ABS | BPF_W: diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h index fb061cf0d736..30a07730807a 100644 --- a/arch/arm/plat-omap/include/plat/sram.h +++ b/arch/arm/plat-omap/include/plat/sram.h @@ -5,13 +5,4 @@ void omap_map_sram(unsigned long start, unsigned long size, unsigned long skip, int cached); void omap_sram_reset(void); -extern void *omap_sram_push_address(unsigned long size); - -/* Macro to push a function to the internal SRAM, using the fncpy API */ -#define omap_sram_push(funcp, size) ({ \ - typeof(&(funcp)) _res = NULL; \ - void *_sram_address = omap_sram_push_address(size); \ - if (_sram_address) \ - _res = fncpy(_sram_address, &(funcp), size); \ - _res; \ -}) +extern void *omap_sram_push(void *funcp, unsigned long size); diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index a5bc92d7e476..921840acf65c 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -42,7 +43,7 @@ static void __iomem *omap_sram_ceil; * Note that fncpy requires the returned address to be aligned * to an 8-byte boundary. */ -void *omap_sram_push_address(unsigned long size) +static void *omap_sram_push_address(unsigned long size) { unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; @@ -60,6 +61,30 @@ void *omap_sram_push_address(unsigned long size) return (void *)omap_sram_ceil; } +void *omap_sram_push(void *funcp, unsigned long size) +{ + void *sram; + unsigned long base; + int pages; + void *dst = NULL; + + sram = omap_sram_push_address(size); + if (!sram) + return NULL; + + base = (unsigned long)sram & PAGE_MASK; + pages = PAGE_ALIGN(size) / PAGE_SIZE; + + set_memory_rw(base, pages); + + dst = fncpy(sram, funcp, size); + + set_memory_ro(base, pages); + set_memory_x(base, pages); + + return dst; +} + /* * The SRAM context is lost during off-idle and stack * needs to be reset. @@ -75,6 +100,9 @@ void omap_sram_reset(void) void __init omap_map_sram(unsigned long start, unsigned long size, unsigned long skip, int cached) { + unsigned long base; + int pages; + if (size == 0) return; @@ -95,4 +123,10 @@ void __init omap_map_sram(unsigned long start, unsigned long size, */ memset_io(omap_sram_base + omap_sram_skip, 0, omap_sram_size - omap_sram_skip); + + base = (unsigned long)omap_sram_base; + pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE; + + set_memory_ro(base, pages); + set_memory_x(base, pages); } diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index aff6994950ba..a2399fd66e97 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, /***************************************************************************** * Ethernet switch ****************************************************************************/ -static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii"; -static __initdata struct mdio_board_info - orion_ge00_switch_board_info; +static __initdata struct mdio_board_info orion_ge00_switch_board_info = { + .bus_id = "orion-mii", + .modalias = "mv88e6085", +}; void __init orion_ge00_switch_init(struct dsa_chip_data *d) { - struct mdio_board_info *bd; unsigned int i; if (!IS_BUILTIN(CONFIG_PHYLIB)) return; - for (i = 0; i < ARRAY_SIZE(d->port_names); i++) - if (!strcmp(d->port_names[i], "cpu")) + for (i = 0; i < ARRAY_SIZE(d->port_names); i++) { + if (!strcmp(d->port_names[i], "cpu")) { + d->netdev[i] = &orion_ge00.dev; break; + } + } - bd = &orion_ge00_switch_board_info; - bd->bus_id = orion_ge00_mvmdio_bus_name; - bd->mdio_addr = d->sw_addr; - d->netdev[i] = &orion_ge00.dev; - strcpy(bd->modalias, "mv88e6085"); - bd->platform_data = d; + orion_ge00_switch_board_info.mdio_addr = d->sw_addr; + orion_ge00_switch_board_info.platform_data = d; mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); } diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index a71a48e71fff..aa7496be311d 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -648,7 +648,7 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp, */ static int vfp_dying_cpu(unsigned int cpu) { - vfp_force_reload(cpu, current_thread_info()); + vfp_current_hw_state[cpu] = NULL; return 0; } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0df64a6a56d4..cc904e0a1a59 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -504,20 +504,13 @@ config CAVIUM_ERRATUM_30115 config QCOM_FALKOR_ERRATUM_1003 bool "Falkor E1003: Incorrect translation due to ASID change" default y - select ARM64_PAN if ARM64_SW_TTBR0_PAN help On Falkor v1, an incorrect ASID may be cached in the TLB when ASID - and BADDR are changed together in TTBRx_EL1. The workaround for this - issue is to use a reserved ASID in cpu_do_switch_mm() before - switching to the new ASID. Saying Y here selects ARM64_PAN if - ARM64_SW_TTBR0_PAN is selected. This is done because implementing and - maintaining the E1003 workaround in the software PAN emulation code - would be an unnecessary complication. The affected Falkor v1 CPU - implements ARMv8.1 hardware PAN support and using hardware PAN - support versus software PAN emulation is mutually exclusive at - runtime. - - If unsure, say Y. + and BADDR are changed together in TTBRx_EL1. Since we keep the ASID + in TTBR1_EL1, this situation only occurs in the entry trampoline and + then only for entries in the walk cache, since the leaf translation + is unchanged. Work around the erratum by invalidating the walk cache + entries for the trampoline before entering the kernel proper. config QCOM_FALKOR_ERRATUM_1009 bool "Falkor E1009: Prematurely complete a DSB after a TLBI" @@ -539,6 +532,16 @@ config QCOM_QDF2400_ERRATUM_0065 If unsure, say Y. +config QCOM_FALKOR_ERRATUM_E1041 + bool "Falkor E1041: Speculative instruction fetches might cause errant memory access" + default y + help + Falkor CPU may speculatively fetch instructions from an improper + memory location when MMU translation is changed from SCTLR_ELn[M]=1 + to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem. + + If unsure, say Y. + endmenu @@ -803,6 +806,35 @@ config FORCE_MAX_ZONEORDER However for 4K, we choose a higher default value, 11 as opposed to 10, giving us 4M allocations matching the default size used by generic code. +config UNMAP_KERNEL_AT_EL0 + bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT + default y + help + Speculation attacks against some high-performance processors can + be used to bypass MMU permission checks and leak kernel data to + userspace. This can be defended against by unmapping the kernel + when running in userspace, mapping it back in on exception entry + via a trampoline page in the vector table. + + If unsure, say Y. + +config HARDEN_BRANCH_PREDICTOR + bool "Harden the branch predictor against aliasing attacks" if EXPERT + default y + help + Speculation attacks against some high-performance processors rely on + being able to manipulate the branch predictor for a victim context by + executing aliasing branches in the attacker context. Such attacks + can be partially mitigated against by clearing internal branch + predictor state and limiting the prediction logic in some situations. + + This config option will take CPU-specific actions to harden the + branch predictor against aliasing attacks and may rely on specific + instruction sequences or control bits being set by the system + firmware. + + If unsure, say Y. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT @@ -1049,6 +1081,23 @@ config CMDLINE entering them here. As a minimum, you should specify the the root device (e.g. root=/dev/nfs). +choice + prompt "Kernel command line type" if CMDLINE != "" + default CMDLINE_FROM_BOOTLOADER + +config CMDLINE_FROM_BOOTLOADER + bool "Use bootloader kernel arguments if available" + help + Uses the command-line options passed by the boot loader. If + the boot loader doesn't provide any, the default kernel command + string provided in CMDLINE will be used. + +config CMDLINE_EXTEND + bool "Extend bootloader kernel arguments" + help + The command-line arguments provided by the boot loader will be + appended to the default kernel command string. + config CMDLINE_FORCE bool "Always use the default kernel command string" help @@ -1056,6 +1105,7 @@ config CMDLINE_FORCE loader passes other arguments to the kernel. This is useful if you cannot or don't want to change the command-line options your boot loader passes to the kernel. +endchoice config EFI_STUB bool @@ -1088,6 +1138,41 @@ config DMI However, even with this option, the resultant kernel should continue to boot on existing non-UEFI platforms. +config BUILD_ARM64_APPENDED_DTB_IMAGE + bool "Build a concatenated Image.gz/dtb by default" + depends on OF + help + Enabling this option will cause a concatenated Image.gz and list of + DTBs to be built by default (instead of a standalone Image.gz.) + The image will built in arch/arm64/boot/Image.gz-dtb + +choice + prompt "Appended DTB Kernel Image name" + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + help + Enabling this option will cause a specific kernel image Image or + Image.gz to be used for final image creation. + The image will built in arch/arm64/boot/IMAGE-NAME-dtb + + config IMG_GZ_DTB + bool "Image.gz-dtb" + config IMG_DTB + bool "Image-dtb" +endchoice + +config BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME + string + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + default "Image.gz-dtb" if IMG_GZ_DTB + default "Image-dtb" if IMG_DTB + +config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES + string "Default dtb names" + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + help + Space separated list of names of dtbs to append when + building a concatenated Image.gz-dtb. + endmenu menu "Userspace binary formats" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 939b310913cf..78df900d75b5 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -14,8 +14,12 @@ LDFLAGS_vmlinux :=-p --no-undefined -X CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 -ifneq ($(CONFIG_RELOCATABLE),) -LDFLAGS_vmlinux += -pie -shared -Bsymbolic +ifeq ($(CONFIG_RELOCATABLE), y) +# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour +# for relative relocs, since this leads to better Image compression +# with the relocation offsets always being zero. +LDFLAGS_vmlinux += -pie -shared -Bsymbolic \ + $(call ld-option, --no-apply-dynamic-relocs) endif ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) @@ -45,9 +49,17 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable) endif endif -KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) +ifeq ($(cc-name),clang) +# This is a workaround for https://bugs.llvm.org/show_bug.cgi?id=30792. +# TODO: revert when this is fixed in LLVM. +KBUILD_CFLAGS += -mno-implicit-float +else +KBUILD_CFLAGS += -mgeneral-regs-only +endif +KBUILD_CFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) +KBUILD_CFLAGS += -fno-pic KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) @@ -77,9 +89,6 @@ endif ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds -ifeq ($(CONFIG_DYNAMIC_FTRACE),y) -KBUILD_LDFLAGS_MODULE += $(objtree)/arch/arm64/kernel/ftrace-mod.o -endif endif # Default value @@ -94,6 +103,10 @@ else TEXT_OFFSET := 0x00080000 endif +ifeq ($(cc-name),clang) +KBUILD_CFLAGS += $(call cc-disable-warning, asm-operand-widths) +endif + # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61) # in 32-bit arithmetic KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ @@ -113,10 +126,15 @@ core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a # Default target when executing plain make boot := arch/arm64/boot +ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y) +KBUILD_IMAGE := $(boot)/$(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) +else KBUILD_IMAGE := $(boot)/Image.gz +endif + KBUILD_DTBS := dtbs -all: Image.gz $(KBUILD_DTBS) +all: Image.gz $(KBUILD_DTBS) $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) Image: vmlinux @@ -139,6 +157,12 @@ dtbs: prepare scripts dtbs_install: $(Q)$(MAKE) $(dtbinst)=$(boot)/dts +Image-dtb: vmlinux scripts dtbs + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +Image.gz-dtb: vmlinux scripts dtbs Image.gz + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore index 8dab0bb6ae66..34e35209fc2e 100644 --- a/arch/arm64/boot/.gitignore +++ b/arch/arm64/boot/.gitignore @@ -1,2 +1,4 @@ Image +Image-dtb Image.gz +Image.gz-dtb diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index 1f012c506434..2c8cb864315e 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -14,16 +14,29 @@ # Based on the ia64 boot/Makefile. # +include $(srctree)/arch/arm64/boot/dts/Makefile + OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.gz +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST)) + $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) $(obj)/Image.bz2: $(obj)/Image FORCE $(call if_changed,bzip2) +$(obj)/Image-dtb: $(obj)/Image $(DTB_OBJS) FORCE + $(call if_changed,cat) + $(obj)/Image.gz: $(obj)/Image FORCE $(call if_changed,gzip) @@ -36,6 +49,9 @@ $(obj)/Image.lzma: $(obj)/Image FORCE $(obj)/Image.lzo: $(obj)/Image FORCE $(call if_changed,lzo) +$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE + $(call if_changed,cat) + install: $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image System.map "$(INSTALL_PATH)" diff --git a/arch/arm64/boot/dts/.gitignore b/arch/arm64/boot/dts/.gitignore deleted file mode 100644 index b60ed208c779..000000000000 --- a/arch/arm64/boot/dts/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.dtb diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index c6684ab8e201..db5a70876487 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -32,3 +32,17 @@ dtstree := $(srctree)/$(src) dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts))) always := $(dtb-y) + +targets += dtbs + +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +targets += $(DTB_LIST) + +dtbs: $(addprefix $(obj)/, $(DTB_LIST)) + +clean-files := dts/*.dtb *.dtb diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 1ffa1c238a72..08b7bb7f5b74 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -301,6 +301,7 @@ &usb1_phy { status = "okay"; + phy-supply = <&usb_otg_pwr>; }; &usb0 { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index d8dd3298b15c..fb8d76a17bc5 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -49,6 +49,14 @@ / { compatible = "amlogic,meson-gxl"; + + reserved-memory { + /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */ + secmon_reserved_alt: secmon@05000000 { + reg = <0x0 0x05000000 0x0 0x300000>; + no-map; + }; + }; }; ðmac { diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts index b39b6d6ec5aa..d2467e478ec3 100644 --- a/arch/arm64/boot/dts/arm/juno-r2.dts +++ b/arch/arm64/boot/dts/arm/juno-r2.dts @@ -98,6 +98,7 @@ next-level-cache = <&A72_L2>; clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A72 &CLUSTER_COST_A72>; capacity-dmips-mhz = <1024>; }; @@ -115,6 +116,7 @@ next-level-cache = <&A72_L2>; clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A72 &CLUSTER_COST_A72>; capacity-dmips-mhz = <1024>; }; @@ -132,6 +134,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53R2 &CLUSTER_COST_A53R2>; capacity-dmips-mhz = <485>; }; @@ -149,6 +152,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53R2 &CLUSTER_COST_A53R2>; capacity-dmips-mhz = <485>; }; @@ -166,6 +170,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53R2 &CLUSTER_COST_A53R2>; capacity-dmips-mhz = <485>; }; @@ -183,6 +188,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53R2 &CLUSTER_COST_A53R2>; capacity-dmips-mhz = <485>; }; @@ -199,6 +205,7 @@ cache-line-size = <64>; cache-sets = <1024>; }; + /include/ "juno-sched-energy.dtsi" }; pmu_a72 { diff --git a/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi b/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi new file mode 100644 index 000000000000..7ffb2554817b --- /dev/null +++ b/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi @@ -0,0 +1,123 @@ +/* + * ARM JUNO specific energy cost model data. There are no unit requirements for + * the data. Data can be normalized to any reference point, but the + * normalization must be consistent. That is, one bogo-joule/watt must be the + * same quantity for all data, but we don't care what it is. + */ + +energy-costs { + /* Juno r0 Energy */ + CPU_COST_A57: core-cost0 { + busy-cost-data = < + 417 168 + 579 251 + 744 359 + 883 479 + 1024 616 + >; + idle-cost-data = < + 15 + 15 + 0 + 0 + >; + }; + CPU_COST_A53: core-cost1 { + busy-cost-data = < + 235 33 + 302 46 + 368 61 + 406 76 + 447 93 + >; + idle-cost-data = < + 6 + 6 + 0 + 0 + >; + }; + CLUSTER_COST_A57: cluster-cost0 { + busy-cost-data = < + 417 24 + 579 32 + 744 43 + 883 49 + 1024 64 + >; + idle-cost-data = < + 65 + 65 + 65 + 24 + >; + }; + CLUSTER_COST_A53: cluster-cost1 { + busy-cost-data = < + 235 26 + 303 30 + 368 39 + 406 47 + 447 57 + >; + idle-cost-data = < + 56 + 56 + 56 + 17 + >; + }; + /* Juno r2 Energy */ + CPU_COST_A72: core-cost2 { + busy-cost-data = < + 501 174 + 849 344 + 1024 526 + >; + idle-cost-data = < + 48 + 48 + 0 + 0 + >; + }; + CPU_COST_A53R2: core-cost3 { + busy-cost-data = < + 276 37 + 501 59 + 593 117 + >; + idle-cost-data = < + 33 + 33 + 0 + 0 + >; + }; + CLUSTER_COST_A72: cluster-cost2 { + busy-cost-data = < + 501 48 + 849 73 + 1024 107 + >; + idle-cost-data = < + 48 + 48 + 48 + 18 + >; + }; + CLUSTER_COST_A53R2: cluster-cost3 { + busy-cost-data = < + 276 41 + 501 86 + 593 107 + >; + idle-cost-data = < + 41 + 41 + 41 + 14 + >; + }; +}; diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index c9236c4b967d..ae5306a0ca26 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -97,6 +97,7 @@ next-level-cache = <&A57_L2>; clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A57 &CLUSTER_COST_A57>; capacity-dmips-mhz = <1024>; }; @@ -114,6 +115,7 @@ next-level-cache = <&A57_L2>; clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A57 &CLUSTER_COST_A57>; capacity-dmips-mhz = <1024>; }; @@ -131,6 +133,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>; capacity-dmips-mhz = <578>; }; @@ -148,6 +151,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>; capacity-dmips-mhz = <578>; }; @@ -165,6 +169,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>; capacity-dmips-mhz = <578>; }; @@ -182,6 +187,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>; capacity-dmips-mhz = <578>; }; @@ -198,6 +204,7 @@ cache-line-size = <64>; cache-sets = <1024>; }; + /include/ "juno-sched-energy.dtsi" }; pmu_a57 { diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi index ff1dc89f599e..66d48e35d66d 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi @@ -92,7 +92,9 @@ cooling-max-level = <0>; #cooling-cells = <2>; /* min followed by max */ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; dynamic-power-coefficient = <311>; + capacity-dmips-mhz = <1024>; }; cpu1: cpu@1 { @@ -103,6 +105,8 @@ next-level-cache = <&CLUSTER0_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; cpu2: cpu@2 { @@ -113,6 +117,7 @@ next-level-cache = <&CLUSTER0_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + capacity-dmips-mhz = <1024>; }; cpu3: cpu@3 { @@ -123,6 +128,8 @@ next-level-cache = <&CLUSTER0_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; cpu4: cpu@100 { @@ -133,6 +140,7 @@ next-level-cache = <&CLUSTER1_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + capacity-dmips-mhz = <1024>; }; cpu5: cpu@101 { @@ -143,6 +151,8 @@ next-level-cache = <&CLUSTER1_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; cpu6: cpu@102 { @@ -153,6 +163,8 @@ next-level-cache = <&CLUSTER1_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; cpu7: cpu@103 { @@ -163,6 +175,8 @@ next-level-cache = <&CLUSTER1_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; CLUSTER0_L2: l2-cache0 { @@ -172,6 +186,50 @@ CLUSTER1_L2: l2-cache1 { compatible = "cache"; }; + + energy-costs { + SYSTEM_COST: system-cost0 { + busy-cost-data = < + 1024 0 + >; + idle-cost-data = < + 0 + 0 + 0 + 0 + >; + }; + CLUSTER_COST: cluster-cost0 { + busy-cost-data = < + 178 16 + 369 29 + 622 47 + 819 75 + 1024 112 + >; + idle-cost-data = < + 107 + 107 + 47 + 0 + >; + }; + CPU_COST: core-cost0 { + busy-cost-data = < + 178 69 + 369 125 + 622 224 + 819 367 + 1024 670 + >; + idle-cost-data = < + 15 + 15 + 0 + 0 + >; + }; + }; }; cpu_opp_table: cpu_opp_table { diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts index 9c3bdf87e543..51327645b3fb 100644 --- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts @@ -61,6 +61,12 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cpm_eth1; + ethernet2 = &cpm_eth2; + }; + cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { compatible = "regulator-fixed"; regulator-name = "usb3h0-vbus"; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts index 0d7b2ae46610..a4f82f1efbbc 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts @@ -61,6 +61,13 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cpm_eth2; + ethernet2 = &cps_eth0; + ethernet3 = &cps_eth1; + }; + cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { compatible = "regulator-fixed"; regulator-name = "cpm-usb3h0-vbus"; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts index acf5c7d16d79..e6ee7443b530 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts @@ -62,6 +62,12 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cps_eth0; + ethernet2 = &cps_eth1; + }; + /* Regulator labels correspond with schematics */ v_3_3: regulator-3-3v { compatible = "regulator-fixed"; @@ -222,8 +228,11 @@ &cpm_eth0 { status = "okay"; + /* Network PHY */ phy = <&phy0>; phy-mode = "10gbase-kr"; + /* Generic PHY, providing serdes lanes */ + phys = <&cpm_comphy4 0>; }; &cpm_sata0 { @@ -257,15 +266,21 @@ &cps_eth0 { status = "okay"; + /* Network PHY */ phy = <&phy8>; phy-mode = "10gbase-kr"; + /* Generic PHY, providing serdes lanes */ + phys = <&cps_comphy4 0>; }; &cps_eth1 { /* CPS Lane 0 - J5 (Gigabit RJ45) */ status = "okay"; + /* Network PHY */ phy = <&ge_phy>; phy-mode = "sgmii"; + /* Generic PHY, providing serdes lanes */ + phys = <&cps_comphy0 1>; }; &cps_pinctrl { diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi index f2aa2a81de4d..9a7b63cd63a3 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi @@ -63,8 +63,10 @@ cpm_ethernet: ethernet@0 { compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>; - clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>; - clock-names = "pp_clk", "gop_clk", "mg_clk"; + clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, + <&cpm_clk 1 5>, <&cpm_clk 1 18>; + clock-names = "pp_clk", "gop_clk", + "mg_clk","axi_clk"; marvell,system-controller = <&cpm_syscon0>; status = "disabled"; dma-coherent; @@ -109,12 +111,51 @@ }; }; + cpm_comphy: phy@120000 { + compatible = "marvell,comphy-cp110"; + reg = <0x120000 0x6000>; + marvell,system-controller = <&cpm_syscon0>; + #address-cells = <1>; + #size-cells = <0>; + + cpm_comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + cpm_comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + + cpm_comphy2: phy@2 { + reg = <2>; + #phy-cells = <1>; + }; + + cpm_comphy3: phy@3 { + reg = <3>; + #phy-cells = <1>; + }; + + cpm_comphy4: phy@4 { + reg = <4>; + #phy-cells = <1>; + }; + + cpm_comphy5: phy@5 { + reg = <5>; + #phy-cells = <1>; + }; + }; + cpm_mdio: mdio@12a200 { #address-cells = <1>; #size-cells = <0>; compatible = "marvell,orion-mdio"; reg = <0x12a200 0x10>; - clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>; + clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>, + <&cpm_clk 1 6>, <&cpm_clk 1 18>; status = "disabled"; }; @@ -295,8 +336,8 @@ compatible = "marvell,armada-cp110-sdhci"; reg = <0x780000 0x300>; interrupts = ; - clock-names = "core"; - clocks = <&cpm_clk 1 4>; + clock-names = "core","axi"; + clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>; dma-coherent; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 4fe70323abb3..faf28633a309 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi @@ -63,8 +63,10 @@ cps_ethernet: ethernet@0 { compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>; - clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>; - clock-names = "pp_clk", "gop_clk", "mg_clk"; + clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, + <&cps_clk 1 5>, <&cps_clk 1 18>; + clock-names = "pp_clk", "gop_clk", + "mg_clk", "axi_clk"; marvell,system-controller = <&cps_syscon0>; status = "disabled"; dma-coherent; @@ -109,12 +111,51 @@ }; }; + cps_comphy: phy@120000 { + compatible = "marvell,comphy-cp110"; + reg = <0x120000 0x6000>; + marvell,system-controller = <&cps_syscon0>; + #address-cells = <1>; + #size-cells = <0>; + + cps_comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + cps_comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + + cps_comphy2: phy@2 { + reg = <2>; + #phy-cells = <1>; + }; + + cps_comphy3: phy@3 { + reg = <3>; + #phy-cells = <1>; + }; + + cps_comphy4: phy@4 { + reg = <4>; + #phy-cells = <1>; + }; + + cps_comphy5: phy@5 { + reg = <5>; + #phy-cells = <1>; + }; + }; + cps_mdio: mdio@12a200 { #address-cells = <1>; #size-cells = <0>; compatible = "marvell,orion-mdio"; reg = <0x12a200 0x10>; - clocks = <&cps_clk 1 9>, <&cps_clk 1 5>; + clocks = <&cps_clk 1 9>, <&cps_clk 1 5>, + <&cps_clk 1 6>, <&cps_clk 1 18>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index b99a27372965..da64e1cab233 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -81,6 +81,7 @@ reg = <0x000>; enable-method = "psci"; cpu-idle-states = <&CPU_SLEEP_0>; + #cooling-cells = <2>; }; cpu1: cpu@1 { @@ -97,6 +98,7 @@ reg = <0x100>; enable-method = "psci"; cpu-idle-states = <&CPU_SLEEP_0>; + #cooling-cells = <2>; }; cpu3: cpu@101 { diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index dc3817593e14..61da6e65900b 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -901,6 +901,7 @@ "dsi_phy_regulator"; #clock-cells = <1>; + #phy-cells = <0>; clocks = <&gcc GCC_MDSS_AHB_CLK>; clock-names = "iface_clk"; @@ -1430,8 +1431,8 @@ #address-cells = <1>; #size-cells = <0>; - qcom,ipc-1 = <&apcs 0 13>; - qcom,ipc-6 = <&apcs 0 19>; + qcom,ipc-1 = <&apcs 8 13>; + qcom,ipc-3 = <&apcs 8 19>; apps_smsm: apps@0 { reg = <0>; diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index d9d885006a8e..9eb11a8d9eda 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -264,6 +264,7 @@ reg = <0>; interrupt-parent = <&gpio2>; interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index 1b868df2393f..e95d99265af9 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -145,7 +145,6 @@ &avb { pinctrl-0 = <&avb_pins>; pinctrl-names = "default"; - renesas,no-ether-link; phy-handle = <&phy0>; status = "okay"; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 34480e9af2e7..36e21112113e 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -4,6 +4,7 @@ CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -24,8 +25,9 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_JUMP_LABEL=y @@ -69,13 +71,13 @@ CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_PCI_LAYERSCAPE=y CONFIG_PCI_HISI=y CONFIG_PCIE_QCOM=y -CONFIG_PCIE_KIRIN=y CONFIG_PCIE_ARMADA_8K=y +CONFIG_PCIE_KIRIN=y CONFIG_PCI_AARDVARK=y CONFIG_PCIE_RCAR=y -CONFIG_PCIE_ROCKCHIP=m CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y +CONFIG_PCIE_ROCKCHIP=m CONFIG_ARM64_VA_BITS_48=y CONFIG_SCHED_MC=y CONFIG_NUMA=y @@ -93,6 +95,12 @@ CONFIG_HIBERNATION=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPUFREQ_DT=y CONFIG_ARM_BIG_LITTLE_CPUFREQ=y CONFIG_ARM_SCPI_CPUFREQ=y @@ -140,11 +148,10 @@ CONFIG_BT_HIDP=m CONFIG_BT_LEDS=y # CONFIG_BT_DEBUGFS is not set CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_LL=y -CONFIG_CFG80211=m -CONFIG_MAC80211=m +CONFIG_CFG80211=y +CONFIG_MAC80211=y CONFIG_MAC80211_LEDS=y -CONFIG_RFKILL=m +CONFIG_RFKILL=y CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -210,21 +217,16 @@ CONFIG_REALTEK_PHY=m CONFIG_ROCKCHIP_PHY=y CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m -CONFIG_USB_RTL8152=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SR9800=m -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m +CONFIG_USB_RTL8152=y CONFIG_BRCMFMAC=m +CONFIG_RTL_CARDS=m CONFIG_WL18XX=m CONFIG_WLCORE_SDIO=m +CONFIG_USB_NET_RNDIS_WLAN=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_ADC=m -CONFIG_KEYBOARD_CROS_EC=y CONFIG_KEYBOARD_GPIO=y +CONFIG_KEYBOARD_CROS_EC=y CONFIG_INPUT_MISC=y CONFIG_INPUT_PM8941_PWRKEY=y CONFIG_INPUT_HISI_POWERKEY=y @@ -275,20 +277,20 @@ CONFIG_I2C_UNIPHIER_F=y CONFIG_I2C_RCAR=y CONFIG_I2C_CROS_EC_TUNNEL=y CONFIG_SPI=y -CONFIG_SPI_MESON_SPICC=m -CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_BCM2835=m CONFIG_SPI_BCM2835AUX=m +CONFIG_SPI_MESON_SPICC=m +CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y -CONFIG_SPI_QUP=y CONFIG_SPI_ROCKCHIP=y +CONFIG_SPI_QUP=y CONFIG_SPI_S3C64XX=y CONFIG_SPI_SPIDEV=m CONFIG_SPMI=y -CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_SINGLE=y CONFIG_PINCTRL_MAX77620=y +CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_MSM8916=y CONFIG_PINCTRL_MSM8994=y CONFIG_PINCTRL_MSM8996=y @@ -313,9 +315,8 @@ CONFIG_SENSORS_INA2XX=m CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y -CONFIG_BRCMSTB_THERMAL=m -CONFIG_EXYNOS_THERMAL=y CONFIG_ROCKCHIP_THERMAL=m +CONFIG_EXYNOS_THERMAL=y CONFIG_WATCHDOG=y CONFIG_S3C2410_WATCHDOG=y CONFIG_MESON_GXBB_WATCHDOG=m @@ -334,9 +335,9 @@ CONFIG_MFD_MAX77620=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_RK808=y CONFIG_MFD_SEC_CORE=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_HI6421V530=y CONFIG_REGULATOR_HI655X=y @@ -346,16 +347,13 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QCOM_SPMI=y CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_S2MPS11=y +CONFIG_RC_DEVICES=y +CONFIG_IR_MESON=m CONFIG_MEDIA_SUPPORT=m CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_ANALOG_TV_SUPPORT=y CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y -CONFIG_MEDIA_RC_SUPPORT=y -CONFIG_RC_CORE=m -CONFIG_RC_DEVICES=y -CONFIG_RC_DECODERS=y -CONFIG_IR_MESON=m CONFIG_VIDEO_V4L2_SUBDEV_API=y # CONFIG_DVB_NET is not set CONFIG_V4L_MEM2MEM_DRIVERS=y @@ -393,7 +391,6 @@ CONFIG_FB_ARMCLCD=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_LP855X=m -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -492,7 +489,6 @@ CONFIG_XEN_GRANT_DEV_ALLOC=y CONFIG_COMMON_CLK_RK808=y CONFIG_COMMON_CLK_SCPI=y CONFIG_COMMON_CLK_CS2000_CP=y -CONFIG_COMMON_CLK_S2MPS11=y CONFIG_CLK_QORIQ=y CONFIG_COMMON_CLK_PWM=y CONFIG_COMMON_CLK_QCOM=y @@ -531,13 +527,13 @@ CONFIG_PWM_MESON=m CONFIG_PWM_ROCKCHIP=y CONFIG_PWM_SAMSUNG=y CONFIG_PWM_TEGRA=m -CONFIG_PHY_RCAR_GEN3_USB2=y -CONFIG_PHY_HI6220_USB=y +CONFIG_PHY_XGENE=y CONFIG_PHY_SUN4I_USB=y -CONFIG_PHY_ROCKCHIP_INNO_USB2=y +CONFIG_PHY_HI6220_USB=y +CONFIG_PHY_RCAR_GEN3_USB2=y CONFIG_PHY_ROCKCHIP_EMMC=y +CONFIG_PHY_ROCKCHIP_INNO_USB2=y CONFIG_PHY_ROCKCHIP_PCIE=m -CONFIG_PHY_XGENE=y CONFIG_PHY_TEGRA_XUSB=y CONFIG_QCOM_L2_PMU=y CONFIG_QCOM_L3_PMU=y @@ -579,29 +575,27 @@ CONFIG_VIRTUALIZATION=y CONFIG_KVM=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y -CONFIG_LOCKUP_DETECTOR=y -# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set +CONFIG_PROVE_LOCKING=y +CONFIG_FUNCTION_TRACER=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_SCHED_TRACER=y CONFIG_MEMTEST=y CONFIG_SECURITY=y CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA256_ARM64=m CONFIG_CRYPTO_SHA512_ARM64=m CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m CONFIG_CRYPTO_CRC32_ARM64_CE=m -CONFIG_CRYPTO_AES_ARM64=m -CONFIG_CRYPTO_AES_ARM64_CE=m CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m CONFIG_CRYPTO_CHACHA20_NEON=m CONFIG_CRYPTO_AES_ARM64_BS=m diff --git a/arch/arm64/configs/ranchu64_defconfig b/arch/arm64/configs/ranchu64_defconfig new file mode 100644 index 000000000000..51c3bfc8658c --- /dev/null +++ b/arch/arm64/configs/ranchu64_defconfig @@ -0,0 +1,310 @@ +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_MMAP_RND_BITS=24 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_NR_CPUS=4 +CONFIG_PREEMPT=y +CONFIG_KSM=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_INET_ESP=y +# CONFIG_INET_LRO is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_SCSI=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_SMC91X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +CONFIG_BATTERY_GOLDFISH=y +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_FB=y +CONFIG_FB_GOLDFISH=y +CONFIG_FB_SIMPLE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +# CONFIG_USB_SUPPORT is not set +CONFIG_RTC_CLASS=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT2_FS=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_FTRACE is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_DEBUG_RODATA=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 70c517aa4501..f856628e53fc 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -95,4 +95,10 @@ config CRYPTO_AES_ARM64_BS select CRYPTO_AES_ARM64 select CRYPTO_SIMD +config CRYPTO_SPECK_NEON + tristate "NEON accelerated Speck cipher algorithms" + depends on KERNEL_MODE_NEON + select CRYPTO_BLKCIPHER + select CRYPTO_SPECK + endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index b5edc5918c28..e761c0a7a181 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o -CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto +aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o @@ -44,6 +44,9 @@ sha512-arm64-y := sha512-glue.o sha512-core.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o +obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o +speck-neon-y := speck-neon-core.o speck-neon-glue.o + obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o @@ -58,6 +61,7 @@ CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE $(call if_changed_rule,cc_o_c) +ifdef REGENERATE_ARM64_CRYPTO quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $(<) void $(@) @@ -66,5 +70,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl $(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl $(call cmd,perlasm) +endif .PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S diff --git a/arch/arm64/crypto/aes-ce-core.S b/arch/arm64/crypto/aes-ce-core.S new file mode 100644 index 000000000000..8efdfdade393 --- /dev/null +++ b/arch/arm64/crypto/aes-ce-core.S @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2013 - 2017 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + + .arch armv8-a+crypto + +ENTRY(__aes_ce_encrypt) + sub w3, w3, #2 + ld1 {v0.16b}, [x2] + ld1 {v1.4s}, [x0], #16 + cmp w3, #10 + bmi 0f + bne 3f + mov v3.16b, v1.16b + b 2f +0: mov v2.16b, v1.16b + ld1 {v3.4s}, [x0], #16 +1: aese v0.16b, v2.16b + aesmc v0.16b, v0.16b +2: ld1 {v1.4s}, [x0], #16 + aese v0.16b, v3.16b + aesmc v0.16b, v0.16b +3: ld1 {v2.4s}, [x0], #16 + subs w3, w3, #3 + aese v0.16b, v1.16b + aesmc v0.16b, v0.16b + ld1 {v3.4s}, [x0], #16 + bpl 1b + aese v0.16b, v2.16b + eor v0.16b, v0.16b, v3.16b + st1 {v0.16b}, [x1] + ret +ENDPROC(__aes_ce_encrypt) + +ENTRY(__aes_ce_decrypt) + sub w3, w3, #2 + ld1 {v0.16b}, [x2] + ld1 {v1.4s}, [x0], #16 + cmp w3, #10 + bmi 0f + bne 3f + mov v3.16b, v1.16b + b 2f +0: mov v2.16b, v1.16b + ld1 {v3.4s}, [x0], #16 +1: aesd v0.16b, v2.16b + aesimc v0.16b, v0.16b +2: ld1 {v1.4s}, [x0], #16 + aesd v0.16b, v3.16b + aesimc v0.16b, v0.16b +3: ld1 {v2.4s}, [x0], #16 + subs w3, w3, #3 + aesd v0.16b, v1.16b + aesimc v0.16b, v0.16b + ld1 {v3.4s}, [x0], #16 + bpl 1b + aesd v0.16b, v2.16b + eor v0.16b, v0.16b, v3.16b + st1 {v0.16b}, [x1] + ret +ENDPROC(__aes_ce_decrypt) + +/* + * __aes_ce_sub() - use the aese instruction to perform the AES sbox + * substitution on each byte in 'input' + */ +ENTRY(__aes_ce_sub) + dup v1.4s, w0 + movi v0.16b, #0 + aese v0.16b, v1.16b + umov w0, v0.s[0] + ret +ENDPROC(__aes_ce_sub) + +ENTRY(__aes_ce_invert) + ld1 {v0.4s}, [x1] + aesimc v1.16b, v0.16b + st1 {v1.4s}, [x0] + ret +ENDPROC(__aes_ce_invert) diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-glue.c similarity index 62% rename from arch/arm64/crypto/aes-ce-cipher.c rename to arch/arm64/crypto/aes-ce-glue.c index 6a75cd75ed11..e6b3227bbf57 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-glue.c @@ -29,6 +29,13 @@ struct aes_block { u8 b[AES_BLOCK_SIZE]; }; +asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); +asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds); + +asmlinkage u32 __aes_ce_sub(u32 l); +asmlinkage void __aes_ce_invert(struct aes_block *out, + const struct aes_block *in); + static int num_rounds(struct crypto_aes_ctx *ctx) { /* @@ -44,10 +51,6 @@ static int num_rounds(struct crypto_aes_ctx *ctx) static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - struct aes_block *out = (struct aes_block *)dst; - struct aes_block const *in = (struct aes_block *)src; - void *dummy0; - int dummy1; if (!may_use_simd()) { __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); @@ -55,49 +58,13 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) } kernel_neon_begin(); - - __asm__(" ld1 {v0.16b}, %[in] ;" - " ld1 {v1.4s}, [%[key]], #16 ;" - " cmp %w[rounds], #10 ;" - " bmi 0f ;" - " bne 3f ;" - " mov v3.16b, v1.16b ;" - " b 2f ;" - "0: mov v2.16b, v1.16b ;" - " ld1 {v3.4s}, [%[key]], #16 ;" - "1: aese v0.16b, v2.16b ;" - " aesmc v0.16b, v0.16b ;" - "2: ld1 {v1.4s}, [%[key]], #16 ;" - " aese v0.16b, v3.16b ;" - " aesmc v0.16b, v0.16b ;" - "3: ld1 {v2.4s}, [%[key]], #16 ;" - " subs %w[rounds], %w[rounds], #3 ;" - " aese v0.16b, v1.16b ;" - " aesmc v0.16b, v0.16b ;" - " ld1 {v3.4s}, [%[key]], #16 ;" - " bpl 1b ;" - " aese v0.16b, v2.16b ;" - " eor v0.16b, v0.16b, v3.16b ;" - " st1 {v0.16b}, %[out] ;" - - : [out] "=Q"(*out), - [key] "=r"(dummy0), - [rounds] "=r"(dummy1) - : [in] "Q"(*in), - "1"(ctx->key_enc), - "2"(num_rounds(ctx) - 2) - : "cc"); - + __aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); kernel_neon_end(); } static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - struct aes_block *out = (struct aes_block *)dst; - struct aes_block const *in = (struct aes_block *)src; - void *dummy0; - int dummy1; if (!may_use_simd()) { __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); @@ -105,62 +72,10 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) } kernel_neon_begin(); - - __asm__(" ld1 {v0.16b}, %[in] ;" - " ld1 {v1.4s}, [%[key]], #16 ;" - " cmp %w[rounds], #10 ;" - " bmi 0f ;" - " bne 3f ;" - " mov v3.16b, v1.16b ;" - " b 2f ;" - "0: mov v2.16b, v1.16b ;" - " ld1 {v3.4s}, [%[key]], #16 ;" - "1: aesd v0.16b, v2.16b ;" - " aesimc v0.16b, v0.16b ;" - "2: ld1 {v1.4s}, [%[key]], #16 ;" - " aesd v0.16b, v3.16b ;" - " aesimc v0.16b, v0.16b ;" - "3: ld1 {v2.4s}, [%[key]], #16 ;" - " subs %w[rounds], %w[rounds], #3 ;" - " aesd v0.16b, v1.16b ;" - " aesimc v0.16b, v0.16b ;" - " ld1 {v3.4s}, [%[key]], #16 ;" - " bpl 1b ;" - " aesd v0.16b, v2.16b ;" - " eor v0.16b, v0.16b, v3.16b ;" - " st1 {v0.16b}, %[out] ;" - - : [out] "=Q"(*out), - [key] "=r"(dummy0), - [rounds] "=r"(dummy1) - : [in] "Q"(*in), - "1"(ctx->key_dec), - "2"(num_rounds(ctx) - 2) - : "cc"); - + __aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); kernel_neon_end(); } -/* - * aes_sub() - use the aese instruction to perform the AES sbox substitution - * on each byte in 'input' - */ -static u32 aes_sub(u32 input) -{ - u32 ret; - - __asm__("dup v1.4s, %w[in] ;" - "movi v0.16b, #0 ;" - "aese v0.16b, v1.16b ;" - "umov %w[out], v0.4s[0] ;" - - : [out] "=r"(ret) - : [in] "r"(input) - : "v0","v1"); - - return ret; -} - int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) { @@ -189,7 +104,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, u32 *rki = ctx->key_enc + (i * kwords); u32 *rko = rki + kwords; - rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; + rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; rko[1] = rko[0] ^ rki[1]; rko[2] = rko[1] ^ rki[2]; rko[3] = rko[2] ^ rki[3]; @@ -202,7 +117,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, } else if (key_len == AES_KEYSIZE_256) { if (i >= 6) break; - rko[4] = aes_sub(rko[3]) ^ rki[4]; + rko[4] = __aes_ce_sub(rko[3]) ^ rki[4]; rko[5] = rko[4] ^ rki[5]; rko[6] = rko[5] ^ rki[6]; rko[7] = rko[6] ^ rki[7]; @@ -221,13 +136,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, key_dec[0] = key_enc[j]; for (i = 1, j--; j > 0; i++, j--) - __asm__("ld1 {v0.4s}, %[in] ;" - "aesimc v1.16b, v0.16b ;" - "st1 {v1.4s}, %[out] ;" - - : [out] "=Q"(key_dec[i]) - : [in] "Q"(key_enc[j]) - : "v0","v1"); + __aes_ce_invert(key_dec + i, key_enc + j); key_dec[i] = key_enc[0]; kernel_neon_end(); diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c index 624f4137918c..34b4e3d46aab 100644 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ b/arch/arm64/crypto/crc32-ce-glue.c @@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32", .base.cra_driver_name = "crc32-arm64-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, }, { @@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32c", .base.cra_driver_name = "crc32c-arm64-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S new file mode 100644 index 000000000000..b14463438b09 --- /dev/null +++ b/arch/arm64/crypto/speck-neon-core.S @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * + * Copyright (c) 2018 Google, Inc + * + * Author: Eric Biggers + */ + +#include + + .text + + // arguments + ROUND_KEYS .req x0 // const {u64,u32} *round_keys + NROUNDS .req w1 // int nrounds + NROUNDS_X .req x1 + DST .req x2 // void *dst + SRC .req x3 // const void *src + NBYTES .req w4 // unsigned int nbytes + TWEAK .req x5 // void *tweak + + // registers which hold the data being encrypted/decrypted + // (underscores avoid a naming collision with ARM64 registers x0-x3) + X_0 .req v0 + Y_0 .req v1 + X_1 .req v2 + Y_1 .req v3 + X_2 .req v4 + Y_2 .req v5 + X_3 .req v6 + Y_3 .req v7 + + // the round key, duplicated in all lanes + ROUND_KEY .req v8 + + // index vector for tbl-based 8-bit rotates + ROTATE_TABLE .req v9 + ROTATE_TABLE_Q .req q9 + + // temporary registers + TMP0 .req v10 + TMP1 .req v11 + TMP2 .req v12 + TMP3 .req v13 + + // multiplication table for updating XTS tweaks + GFMUL_TABLE .req v14 + GFMUL_TABLE_Q .req q14 + + // next XTS tweak value(s) + TWEAKV_NEXT .req v15 + + // XTS tweaks for the blocks currently being encrypted/decrypted + TWEAKV0 .req v16 + TWEAKV1 .req v17 + TWEAKV2 .req v18 + TWEAKV3 .req v19 + TWEAKV4 .req v20 + TWEAKV5 .req v21 + TWEAKV6 .req v22 + TWEAKV7 .req v23 + + .align 4 +.Lror64_8_table: + .octa 0x080f0e0d0c0b0a090007060504030201 +.Lror32_8_table: + .octa 0x0c0f0e0d080b0a090407060500030201 +.Lrol64_8_table: + .octa 0x0e0d0c0b0a09080f0605040302010007 +.Lrol32_8_table: + .octa 0x0e0d0c0f0a09080b0605040702010003 +.Lgf128mul_table: + .octa 0x00000000000000870000000000000001 +.Lgf64mul_table: + .octa 0x0000000000000000000000002d361b00 + +/* + * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time + * + * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for + * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes + * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. + * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64. + */ +.macro _speck_round_128bytes n, lanes + + // x = ror(x, 8) + tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b + tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b + tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b + tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b + + // x += y + add X_0.\lanes, X_0.\lanes, Y_0.\lanes + add X_1.\lanes, X_1.\lanes, Y_1.\lanes + add X_2.\lanes, X_2.\lanes, Y_2.\lanes + add X_3.\lanes, X_3.\lanes, Y_3.\lanes + + // x ^= k + eor X_0.16b, X_0.16b, ROUND_KEY.16b + eor X_1.16b, X_1.16b, ROUND_KEY.16b + eor X_2.16b, X_2.16b, ROUND_KEY.16b + eor X_3.16b, X_3.16b, ROUND_KEY.16b + + // y = rol(y, 3) + shl TMP0.\lanes, Y_0.\lanes, #3 + shl TMP1.\lanes, Y_1.\lanes, #3 + shl TMP2.\lanes, Y_2.\lanes, #3 + shl TMP3.\lanes, Y_3.\lanes, #3 + sri TMP0.\lanes, Y_0.\lanes, #(\n - 3) + sri TMP1.\lanes, Y_1.\lanes, #(\n - 3) + sri TMP2.\lanes, Y_2.\lanes, #(\n - 3) + sri TMP3.\lanes, Y_3.\lanes, #(\n - 3) + + // y ^= x + eor Y_0.16b, TMP0.16b, X_0.16b + eor Y_1.16b, TMP1.16b, X_1.16b + eor Y_2.16b, TMP2.16b, X_2.16b + eor Y_3.16b, TMP3.16b, X_3.16b +.endm + +/* + * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time + * + * This is the inverse of _speck_round_128bytes(). + */ +.macro _speck_unround_128bytes n, lanes + + // y ^= x + eor TMP0.16b, Y_0.16b, X_0.16b + eor TMP1.16b, Y_1.16b, X_1.16b + eor TMP2.16b, Y_2.16b, X_2.16b + eor TMP3.16b, Y_3.16b, X_3.16b + + // y = ror(y, 3) + ushr Y_0.\lanes, TMP0.\lanes, #3 + ushr Y_1.\lanes, TMP1.\lanes, #3 + ushr Y_2.\lanes, TMP2.\lanes, #3 + ushr Y_3.\lanes, TMP3.\lanes, #3 + sli Y_0.\lanes, TMP0.\lanes, #(\n - 3) + sli Y_1.\lanes, TMP1.\lanes, #(\n - 3) + sli Y_2.\lanes, TMP2.\lanes, #(\n - 3) + sli Y_3.\lanes, TMP3.\lanes, #(\n - 3) + + // x ^= k + eor X_0.16b, X_0.16b, ROUND_KEY.16b + eor X_1.16b, X_1.16b, ROUND_KEY.16b + eor X_2.16b, X_2.16b, ROUND_KEY.16b + eor X_3.16b, X_3.16b, ROUND_KEY.16b + + // x -= y + sub X_0.\lanes, X_0.\lanes, Y_0.\lanes + sub X_1.\lanes, X_1.\lanes, Y_1.\lanes + sub X_2.\lanes, X_2.\lanes, Y_2.\lanes + sub X_3.\lanes, X_3.\lanes, Y_3.\lanes + + // x = rol(x, 8) + tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b + tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b + tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b + tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b +.endm + +.macro _next_xts_tweak next, cur, tmp, n +.if \n == 64 + /* + * Calculate the next tweak by multiplying the current one by x, + * modulo p(x) = x^128 + x^7 + x^2 + x + 1. + */ + sshr \tmp\().2d, \cur\().2d, #63 + and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b + shl \next\().2d, \cur\().2d, #1 + ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 + eor \next\().16b, \next\().16b, \tmp\().16b +.else + /* + * Calculate the next two tweaks by multiplying the current ones by x^2, + * modulo p(x) = x^64 + x^4 + x^3 + x + 1. + */ + ushr \tmp\().2d, \cur\().2d, #62 + shl \next\().2d, \cur\().2d, #2 + tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b + eor \next\().16b, \next\().16b, \tmp\().16b +.endif +.endm + +/* + * _speck_xts_crypt() - Speck-XTS encryption/decryption + * + * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer + * using Speck-XTS, specifically the variant with a block size of '2n' and round + * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and + * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a + * nonzero multiple of 128. + */ +.macro _speck_xts_crypt n, lanes, decrypting + + /* + * If decrypting, modify the ROUND_KEYS parameter to point to the last + * round key rather than the first, since for decryption the round keys + * are used in reverse order. + */ +.if \decrypting + mov NROUNDS, NROUNDS /* zero the high 32 bits */ +.if \n == 64 + add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3 + sub ROUND_KEYS, ROUND_KEYS, #8 +.else + add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2 + sub ROUND_KEYS, ROUND_KEYS, #4 +.endif +.endif + + // Load the index vector for tbl-based 8-bit rotates +.if \decrypting + ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table +.else + ldr ROTATE_TABLE_Q, .Lror\n\()_8_table +.endif + + // One-time XTS preparation +.if \n == 64 + // Load first tweak + ld1 {TWEAKV0.16b}, [TWEAK] + + // Load GF(2^128) multiplication table + ldr GFMUL_TABLE_Q, .Lgf128mul_table +.else + // Load first tweak + ld1 {TWEAKV0.8b}, [TWEAK] + + // Load GF(2^64) multiplication table + ldr GFMUL_TABLE_Q, .Lgf64mul_table + + // Calculate second tweak, packing it together with the first + ushr TMP0.2d, TWEAKV0.2d, #63 + shl TMP1.2d, TWEAKV0.2d, #1 + tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b + eor TMP0.8b, TMP0.8b, TMP1.8b + mov TWEAKV0.d[1], TMP0.d[0] +.endif + +.Lnext_128bytes_\@: + + // Calculate XTS tweaks for next 128 bytes + _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n + _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n + _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n + _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n + _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n + _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n + _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n + _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n + + // Load the next source blocks into {X,Y}[0-3] + ld1 {X_0.16b-Y_1.16b}, [SRC], #64 + ld1 {X_2.16b-Y_3.16b}, [SRC], #64 + + // XOR the source blocks with their XTS tweaks + eor TMP0.16b, X_0.16b, TWEAKV0.16b + eor Y_0.16b, Y_0.16b, TWEAKV1.16b + eor TMP1.16b, X_1.16b, TWEAKV2.16b + eor Y_1.16b, Y_1.16b, TWEAKV3.16b + eor TMP2.16b, X_2.16b, TWEAKV4.16b + eor Y_2.16b, Y_2.16b, TWEAKV5.16b + eor TMP3.16b, X_3.16b, TWEAKV6.16b + eor Y_3.16b, Y_3.16b, TWEAKV7.16b + + /* + * De-interleave the 'x' and 'y' elements of each block, i.e. make it so + * that the X[0-3] registers contain only the second halves of blocks, + * and the Y[0-3] registers contain only the first halves of blocks. + * (Speck uses the order (y, x) rather than the more intuitive (x, y).) + */ + uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes + uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes + uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes + uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes + uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes + uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes + uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes + uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes + + // Do the cipher rounds + mov x6, ROUND_KEYS + mov w7, NROUNDS +.Lnext_round_\@: +.if \decrypting + ld1r {ROUND_KEY.\lanes}, [x6] + sub x6, x6, #( \n / 8 ) + _speck_unround_128bytes \n, \lanes +.else + ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 ) + _speck_round_128bytes \n, \lanes +.endif + subs w7, w7, #1 + bne .Lnext_round_\@ + + // Re-interleave the 'x' and 'y' elements of each block + zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes + zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes + zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes + zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes + zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes + zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes + zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes + zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes + + // XOR the encrypted/decrypted blocks with the tweaks calculated earlier + eor X_0.16b, TMP0.16b, TWEAKV0.16b + eor Y_0.16b, Y_0.16b, TWEAKV1.16b + eor X_1.16b, TMP1.16b, TWEAKV2.16b + eor Y_1.16b, Y_1.16b, TWEAKV3.16b + eor X_2.16b, TMP2.16b, TWEAKV4.16b + eor Y_2.16b, Y_2.16b, TWEAKV5.16b + eor X_3.16b, TMP3.16b, TWEAKV6.16b + eor Y_3.16b, Y_3.16b, TWEAKV7.16b + mov TWEAKV0.16b, TWEAKV_NEXT.16b + + // Store the ciphertext in the destination buffer + st1 {X_0.16b-Y_1.16b}, [DST], #64 + st1 {X_2.16b-Y_3.16b}, [DST], #64 + + // Continue if there are more 128-byte chunks remaining + subs NBYTES, NBYTES, #128 + bne .Lnext_128bytes_\@ + + // Store the next tweak and return +.if \n == 64 + st1 {TWEAKV_NEXT.16b}, [TWEAK] +.else + st1 {TWEAKV_NEXT.8b}, [TWEAK] +.endif + ret +.endm + +ENTRY(speck128_xts_encrypt_neon) + _speck_xts_crypt n=64, lanes=2d, decrypting=0 +ENDPROC(speck128_xts_encrypt_neon) + +ENTRY(speck128_xts_decrypt_neon) + _speck_xts_crypt n=64, lanes=2d, decrypting=1 +ENDPROC(speck128_xts_decrypt_neon) + +ENTRY(speck64_xts_encrypt_neon) + _speck_xts_crypt n=32, lanes=4s, decrypting=0 +ENDPROC(speck64_xts_encrypt_neon) + +ENTRY(speck64_xts_decrypt_neon) + _speck_xts_crypt n=32, lanes=4s, decrypting=1 +ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c new file mode 100644 index 000000000000..6e233aeb4ff4 --- /dev/null +++ b/arch/arm64/crypto/speck-neon-glue.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * (64-bit version; based on the 32-bit version) + * + * Copyright (c) 2018 Google, Inc + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* The assembly functions only handle multiples of 128 bytes */ +#define SPECK_NEON_CHUNK_SIZE 128 + +/* Speck128 */ + +struct speck128_xts_tfm_ctx { + struct speck128_tfm_ctx main_key; + struct speck128_tfm_ctx tweak_key; +}; + +asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck128_xts_crypt(struct skcipher_request *req, + speck128_crypt_one_t crypt_one, + speck128_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + le128 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + le128_xor((le128 *)dst, (const le128 *)src, &tweak); + (*crypt_one)(&ctx->main_key, dst, dst); + le128_xor((le128 *)dst, (const le128 *)dst, &tweak); + gf128mul_x_ble(&tweak, &tweak); + + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck128_xts_encrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_encrypt, + speck128_xts_encrypt_neon); +} + +static int speck128_xts_decrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_decrypt, + speck128_xts_decrypt_neon); +} + +static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck128_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +/* Speck64 */ + +struct speck64_xts_tfm_ctx { + struct speck64_tfm_ctx main_key; + struct speck64_tfm_ctx tweak_key; +}; + +asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, + speck64_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + __le64 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + *(__le64 *)dst = *(__le64 *)src ^ tweak; + (*crypt_one)(&ctx->main_key, dst, dst); + *(__le64 *)dst ^= tweak; + tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ + ((tweak & cpu_to_le64(1ULL << 63)) ? + 0x1B : 0)); + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck64_xts_encrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_encrypt, + speck64_xts_encrypt_neon); +} + +static int speck64_xts_decrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_decrypt, + speck64_xts_decrypt_neon); +} + +static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck64_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +static struct skcipher_alg speck_algs[] = { + { + .base.cra_name = "xts(speck128)", + .base.cra_driver_name = "xts-speck128-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK128_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK128_128_KEY_SIZE, + .max_keysize = 2 * SPECK128_256_KEY_SIZE, + .ivsize = SPECK128_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck128_xts_setkey, + .encrypt = speck128_xts_encrypt, + .decrypt = speck128_xts_decrypt, + }, { + .base.cra_name = "xts(speck64)", + .base.cra_driver_name = "xts-speck64-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK64_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK64_96_KEY_SIZE, + .max_keysize = 2 * SPECK64_128_KEY_SIZE, + .ivsize = SPECK64_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck64_xts_setkey, + .encrypt = speck64_xts_encrypt, + .decrypt = speck64_xts_decrypt, + } +}; + +static int __init speck_neon_module_init(void) +{ + if (!(elf_hwcap & HWCAP_ASIMD)) + return -ENODEV; + return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_neon_module_exit(void) +{ + crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_neon_module_init); +module_exit(speck_neon_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers "); +MODULE_ALIAS_CRYPTO("xts(speck128)"); +MODULE_ALIAS_CRYPTO("xts-speck128-neon"); +MODULE_ALIAS_CRYPTO("xts(speck64)"); +MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index b3da6c886835..dd49c3567f20 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -13,51 +14,62 @@ #ifdef CONFIG_ARM64_SW_TTBR0_PAN .macro __uaccess_ttbr0_disable, tmp1 mrs \tmp1, ttbr1_el1 // swapper_pg_dir + bic \tmp1, \tmp1, #TTBR_ASID_MASK add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb + sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE + msr ttbr1_el1, \tmp1 // set reserved ASID + isb .endm - .macro __uaccess_ttbr0_enable, tmp1 + .macro __uaccess_ttbr0_enable, tmp1, tmp2 get_thread_info \tmp1 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 + mrs \tmp2, ttbr1_el1 + extr \tmp2, \tmp2, \tmp1, #48 + ror \tmp2, \tmp2, #16 + msr ttbr1_el1, \tmp2 // set the active ASID + isb msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 isb .endm - .macro uaccess_ttbr0_disable, tmp1 + .macro uaccess_ttbr0_disable, tmp1, tmp2 alternative_if_not ARM64_HAS_PAN + save_and_disable_irq \tmp2 // avoid preemption __uaccess_ttbr0_disable \tmp1 + restore_irq \tmp2 alternative_else_nop_endif .endm - .macro uaccess_ttbr0_enable, tmp1, tmp2 + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 alternative_if_not ARM64_HAS_PAN - save_and_disable_irq \tmp2 // avoid preemption - __uaccess_ttbr0_enable \tmp1 - restore_irq \tmp2 + save_and_disable_irq \tmp3 // avoid preemption + __uaccess_ttbr0_enable \tmp1, \tmp2 + restore_irq \tmp3 alternative_else_nop_endif .endm #else - .macro uaccess_ttbr0_disable, tmp1 + .macro uaccess_ttbr0_disable, tmp1, tmp2 .endm - .macro uaccess_ttbr0_enable, tmp1, tmp2 + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 .endm #endif /* * These macros are no-ops when UAO is present. */ - .macro uaccess_disable_not_uao, tmp1 - uaccess_ttbr0_disable \tmp1 + .macro uaccess_disable_not_uao, tmp1, tmp2 + uaccess_ttbr0_disable \tmp1, \tmp2 alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(1) alternative_else_nop_endif .endm - .macro uaccess_enable_not_uao, tmp1, tmp2 - uaccess_ttbr0_enable \tmp1, \tmp2 + .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3 + uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3 alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(0) alternative_else_nop_endif diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index d58a6253c6ab..463619dcadd4 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -25,7 +25,6 @@ #include #include -#include #include #include #include @@ -96,6 +95,24 @@ dmb \opt .endm +/* + * Value prediction barrier + */ + .macro csdb + hint #20 + .endm + +/* + * Sanitise a 64-bit bounded index wrt speculation, returning zero if out + * of bounds. + */ + .macro mask_nospec64, idx, limit, tmp + sub \tmp, \idx, \limit + bic \tmp, \tmp, \idx + and \idx, \idx, \tmp, asr #63 + csdb + .endm + /* * NOP sequence */ @@ -464,39 +481,18 @@ alternative_endif mrs \rd, sp_el0 .endm -/* - * Errata workaround prior to TTBR0_EL1 update - * - * val: TTBR value with new BADDR, preserved - * tmp0: temporary register, clobbered - * tmp1: other temporary register, clobbered +/** + * Errata workaround prior to disable MMU. Insert an ISB immediately prior + * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. */ - .macro pre_ttbr0_update_workaround, val, tmp0, tmp1 -#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 -alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 - mrs \tmp0, ttbr0_el1 - mov \tmp1, #FALKOR_RESERVED_ASID - bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR - msr ttbr0_el1, \tmp0 + .macro pre_disable_mmu_workaround +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041 isb - bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR - msr ttbr0_el1, \tmp0 - isb -alternative_else_nop_endif #endif .endm -/* - * Errata workaround post TTBR0_EL1 update. - */ - .macro post_ttbr0_update_workaround -#ifdef CONFIG_CAVIUM_ERRATUM_27456 -alternative_if ARM64_WORKAROUND_CAVIUM_27456 - ic iallu - dsb nsh - isb -alternative_else_nop_endif -#endif + .macro pte_to_phys, phys, pte + and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) .endm #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 0fe7e43b7fbc..0b0755c961ac 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -31,6 +31,8 @@ #define dmb(opt) asm volatile("dmb " #opt : : : "memory") #define dsb(opt) asm volatile("dsb " #opt : : : "memory") +#define csdb() asm volatile("hint #20" : : : "memory") + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) @@ -38,6 +40,27 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) +/* + * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz + * and 0 otherwise. + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( + " cmp %1, %2\n" + " sbc %0, xzr, xzr\n" + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc"); + + csdb(); + return mask; +} + #define __smp_mb() dmb(ish) #define __smp_rmb() dmb(ishld) #define __smp_wmb() dmb(ishst) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index e39d487bf724..a3c7f271ad4c 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -215,7 +215,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8da621627d7c..2e7b236bc596 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -40,7 +40,10 @@ #define ARM64_WORKAROUND_858921 19 #define ARM64_WORKAROUND_CAVIUM_30115 20 #define ARM64_HAS_DCPOP 21 +#define ARM64_UNMAP_KERNEL_AT_EL0 23 +#define ARM64_HARDEN_BRANCH_PREDICTOR 24 +#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 -#define ARM64_NCAPS 22 +#define ARM64_NCAPS 26 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 235e77d98261..be7bd19c87ec 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -79,26 +79,37 @@ #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 #define ARM_CPU_PART_CORTEX_A57 0xD07 +#define ARM_CPU_PART_CORTEX_A72 0xD08 #define ARM_CPU_PART_CORTEX_A53 0xD03 #define ARM_CPU_PART_CORTEX_A73 0xD09 +#define ARM_CPU_PART_CORTEX_A75 0xD0A #define APM_CPU_PART_POTENZA 0x000 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3 +#define CAVIUM_CPU_PART_THUNDERX2 0x0AF #define BRCM_CPU_PART_VULCAN 0x516 #define QCOM_CPU_PART_FALKOR_V1 0x800 +#define QCOM_CPU_PART_FALKOR 0xC00 +#define QCOM_CPU_PART_KRYO 0x200 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) +#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) +#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) +#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2) +#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) +#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) +#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 650344d01124..8389050328bb 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -121,22 +121,22 @@ static inline void efi_set_pgd(struct mm_struct *mm) if (mm != current->active_mm) { /* * Update the current thread's saved ttbr0 since it is - * restored as part of a return from exception. Set - * the hardware TTBR0_EL1 using cpu_switch_mm() - * directly to enable potential errata workarounds. + * restored as part of a return from exception. Enable + * access to the valid TTBR0_EL1 and invoke the errata + * workaround directly since there is no return from + * exception when invoking the EFI run-time services. */ update_saved_ttbr0(current, mm); - cpu_switch_mm(mm->pgd, mm); + uaccess_ttbr0_enable(); + post_ttbr_update_workaround(); } else { /* * Defer the switch to the current thread's TTBR0_EL1 * until uaccess_enable(). Restore the current * thread's saved ttbr0 corresponding to its active_mm - * (if different from init_mm). */ - cpu_set_reserved_ttbr0(); - if (current->active_mm != &init_mm) - update_saved_ttbr0(current, current->active_mm); + uaccess_ttbr0_disable(); + update_saved_ttbr0(current, current->active_mm); } } } diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 33be513ef24c..36d9863cb3cb 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -173,7 +173,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef CONFIG_COMPAT /* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ -#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL +#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index caf86be815ba..ec1e6d6fa14c 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -51,6 +51,18 @@ enum fixed_addresses { FIX_EARLYCON_MEM_BASE, FIX_TEXT_POKE0, + +#ifdef CONFIG_ACPI_APEI_GHES + /* Used for GHES mapping from assorted contexts */ + FIX_APEI_GHES_IRQ, + FIX_APEI_GHES_NMI, +#endif /* CONFIG_ACPI_APEI_GHES */ + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + FIX_ENTRY_TRAMP_DATA, + FIX_ENTRY_TRAMP_TEXT, +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ __end_of_permanent_fixed_addresses, /* diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 5bb2fd4674e7..07fe2479d310 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -48,9 +48,10 @@ do { \ } while (0) static inline int -arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { int oldval = 0, ret, tmp; + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); pagefault_disable(); @@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, u32 oldval, u32 newval) { int ret = 0; u32 val, tmp; + u32 __user *uaddr; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) return -EFAULT; + uaddr = __uaccess_mask_ptr(_uaddr); uaccess_enable(); asm volatile("// futex_atomic_cmpxchg_inatomic\n" " prfm pstl1strm, %2\n" diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 61d694c2eae5..555d463c0eaa 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -170,8 +170,7 @@ #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) #define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 26a64d0f9ab9..a7ef5a051911 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -66,6 +66,8 @@ extern u32 __kvm_get_mdcr_el2(void); extern u32 __init_stage2_translation(void); +extern void __qcom_hyp_sanitize_btac_predictors(void); + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e923b58606e2..8ad208cb866c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -384,4 +384,9 @@ static inline void __cpu_init_stage2(void) "PARange is %d bits, unsupported configuration!", parange); } +static inline bool kvm_arm_harden_branch_predictor(void) +{ + return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); +} + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 672c8684d5c2..2d6d4bd9de52 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void) return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; } +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +#include + +static inline void *kvm_get_hyp_vector(void) +{ + struct bp_hardening_data *data = arm64_get_bp_hardening_data(); + void *vect = kvm_ksym_ref(__kvm_hyp_vector); + + if (data->fn) { + vect = __bp_harden_hyp_vecs_start + + data->hyp_vectors_slot * SZ_2K; + + if (!has_vhe()) + vect = lm_alias(vect); + } + + return vect; +} + +static inline int kvm_map_vectors(void) +{ + return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start), + kvm_ksym_ref(__bp_harden_hyp_vecs_end), + PAGE_HYP_EXEC); +} + +#else +static inline void *kvm_get_hyp_vector(void) +{ + return kvm_ksym_ref(__kvm_hyp_vector); +} + +static inline int kvm_map_vectors(void) +{ + return 0; +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h deleted file mode 100644 index bc39e557c56c..000000000000 --- a/arch/arm64/include/asm/kvm_psci.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012,2013 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM64_KVM_PSCI_H__ -#define __ARM64_KVM_PSCI_H__ - -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 - -int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM64_KVM_PSCI_H__ */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index f7c4d2146aed..d4bae7d6e0d8 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -61,8 +61,6 @@ * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. - * TASK_SIZE - the maximum size of a user space task. - * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) #define VA_START (UL(0xffffffffffffffff) - \ @@ -77,19 +75,6 @@ #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) -#define TASK_SIZE_64 (UL(1) << VA_BITS) - -#ifdef CONFIG_COMPAT -#define TASK_SIZE_32 UL(0x100000000) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#else -#define TASK_SIZE TASK_SIZE_64 -#endif /* CONFIG_COMPAT */ - -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) #define KERNEL_START _text #define KERNEL_END _end diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 0d34bf0a89c7..6dd83d75b82a 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -17,6 +17,10 @@ #define __ASM_MMU_H #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ +#define USER_ASID_FLAG (UL(1) << 48) +#define TTBR_ASID_MASK (UL(0xffff) << 48) + +#ifndef __ASSEMBLY__ typedef struct { atomic64_t id; @@ -31,6 +35,49 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) +static inline bool arm64_kernel_unmapped_at_el0(void) +{ + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && + cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); +} + +typedef void (*bp_hardening_cb_t)(void); + +struct bp_hardening_data { + int hyp_vectors_slot; + bp_hardening_cb_t fn; +}; + +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; + +DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); + +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) +{ + return this_cpu_ptr(&bp_hardening_data); +} + +static inline void arm64_apply_bp_hardening(void) +{ + struct bp_hardening_data *d; + + if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) + return; + + d = arm64_get_bp_hardening_data(); + if (d->fn) + d->fn(); +} +#else +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) +{ + return NULL; +} + +static inline void arm64_apply_bp_hardening(void) { } +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ + extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); @@ -41,4 +88,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void *fixmap_remap_fdt(phys_addr_t dt_phys); extern void mark_linear_text_alias_ro(void); +#endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 3257895a9b5e..779d7a2ec5ec 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -19,8 +19,6 @@ #ifndef __ASM_MMU_CONTEXT_H #define __ASM_MMU_CONTEXT_H -#define FALKOR_RESERVED_ASID 1 - #ifndef __ASSEMBLY__ #include @@ -57,6 +55,13 @@ static inline void cpu_set_reserved_ttbr0(void) isb(); } +static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) +{ + BUG_ON(pgd == swapper_pg_dir); + cpu_set_reserved_ttbr0(); + cpu_do_switch_mm(virt_to_phys(pgd),mm); +} + /* * TCR.T0SZ value to use when the ID map is active. Usually equals * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in @@ -156,29 +161,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) -/* - * This is called when "tsk" is about to enter lazy TLB mode. - * - * mm: describes the currently active mm context - * tsk: task which is entering lazy tlb - * cpu: cpu number which is entering lazy tlb - * - * tsk->mm will be NULL - */ -static inline void -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) { - if (system_uses_ttbr0_pan()) { - BUG_ON(mm->pgd == swapper_pg_dir); - task_thread_info(tsk)->ttbr0 = - virt_to_phys(mm->pgd) | ASID(mm) << 48; - } + u64 ttbr; + + if (!system_uses_ttbr0_pan()) + return; + + if (mm == &init_mm) + ttbr = __pa_symbol(empty_zero_page); + else + ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; + + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } #else static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -187,6 +184,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, } #endif +static inline void +enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ + /* + * We don't actually care about the ttbr0 mapping, so point it at the + * zero page. + */ + update_saved_ttbr0(tsk, &init_mm); +} + static inline void __switch_mm(struct mm_struct *next) { unsigned int cpu = smp_processor_id(); @@ -214,17 +221,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, * Update the saved TTBR0_EL1 of the scheduled-in task as the previous * value may have not been initialised yet (activate_mm caller) or the * ASID has changed since the last run (following the context switch - * of another thread of the same process). Avoid setting the reserved - * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit). + * of another thread of the same process). */ - if (next != &init_mm) - update_saved_ttbr0(tsk, next); + update_saved_ttbr0(tsk, next); } #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, current) void verify_cpu_asid_bits(void); +void post_ttbr_update_workaround(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 19bd97671bb8..4f766178fa6f 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -32,7 +32,7 @@ struct mod_arch_specific { struct mod_plt_sec init; /* for CONFIG_DYNAMIC_FTRACE */ - void *ftrace_trampoline; + struct plt_entry *ftrace_trampoline; }; #endif @@ -45,4 +45,48 @@ extern u64 module_alloc_base; #define module_alloc_base ((u64)_etext - MODULES_VSIZE) #endif +struct plt_entry { + /* + * A program that conforms to the AArch64 Procedure Call Standard + * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or + * IP1 (x17) may be inserted at any branch instruction that is + * exposed to a relocation that supports long branches. Since that + * is exactly what we are dealing with here, we are free to use x16 + * as a scratch register in the PLT veneers. + */ + __le32 mov0; /* movn x16, #0x.... */ + __le32 mov1; /* movk x16, #0x...., lsl #16 */ + __le32 mov2; /* movk x16, #0x...., lsl #32 */ + __le32 br; /* br x16 */ +}; + +static inline struct plt_entry get_plt_entry(u64 val) +{ + /* + * MOVK/MOVN/MOVZ opcode: + * +--------+------------+--------+-----------+-------------+---------+ + * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] | + * +--------+------------+--------+-----------+-------------+---------+ + * + * Rd := 0x10 (x16) + * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32) + * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ) + * sf := 1 (64-bit variant) + */ + return (struct plt_entry){ + cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5), + cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5), + cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5), + cpu_to_le32(0xd61f0200) + }; +} + +static inline bool plt_entries_equal(const struct plt_entry *a, + const struct plt_entry *b) +{ + return a->mov0 == b->mov0 && + a->mov1 == b->mov1 && + a->mov2 == b->mov2; +} + #endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index d25f4f137c2a..5ca6a573a701 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -26,7 +26,7 @@ #define check_pgt_cache() do { } while (0) -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #if CONFIG_PGTABLE_LEVELS > 2 diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index eb0c2bd90de9..8df4cb6ac6f7 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -272,6 +272,7 @@ #define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT) #define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT) +#define TCR_A1 (UL(1) << 22) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) #define TCR_HA (UL(1) << 39) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 0a5635fb0ef9..2db84df5eb42 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -34,8 +34,14 @@ #include -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) +#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) + +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) + +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) @@ -47,23 +53,24 @@ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) -#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) +#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) +#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT -#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) +#define PAGE_KERNEL __pgprot(PROT_NORMAL) +#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) +#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) +#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) +#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) -#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) -#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) -#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) +#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) +#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) +#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) -#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) -#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) +#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b46e54c2399b..aafea648a30f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) +#define pte_valid_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) /* * Could the pte be present in the TLB? We must check mm_tlb_flush_pending @@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_accessible(mm, pte) \ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) +/* + * p??_access_permitted() is true for valid user mappings (subject to the + * write permission check) other than user execute-only which do not have the + * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + */ +#define pte_access_permitted(pte, write) \ + (pte_valid_user(pte) && (!(write) || pte_write(pte))) +#define pmd_access_permitted(pmd, write) \ + (pte_access_permitted(pmd_pte(pmd), (write))) +#define pud_access_permitted(pud, write) \ + (pte_access_permitted(pud_pte(pud), (write))) + static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) { pte_val(pte) &= ~pgprot_val(prot); @@ -135,12 +149,20 @@ static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkclean(pte_t pte) { - return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); + + return pte; } static inline pte_t pte_mkdirty(pte_t pte) { - return set_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); + + if (pte_write(pte)) + pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); + + return pte; } static inline pte_t pte_mkold(pte_t pte) @@ -628,28 +650,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * ptep_set_wrprotect - mark read-only while preserving the hardware update of - * the Access Flag. + * ptep_set_wrprotect - mark read-only while trasferring potential hardware + * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. */ #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte, pte; - /* - * ptep_set_wrprotect() is only called on CoW mappings which are - * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE && - * PTE_RDONLY) or writable and software-dirty (PTE_WRITE && - * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and - * protection_map[]. There is no race with the hardware update of the - * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM) - * is set. - */ - VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep), - "%s: potential race with hardware DBM", __func__); pte = READ_ONCE(*ptep); do { old_pte = pte; + /* + * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY + * clear), set the PTE_DIRTY bit. + */ + if (pte_hw_dirty(pte)) + pte = pte_mkdirty(pte); pte = pte_wrprotect(pte); pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), pte_val(old_pte), pte_val(pte)); @@ -667,6 +684,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; +extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a swap entry: diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 14ad6e4e87d1..16cef2e8449e 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include -#define cpu_switch_mm(pgd,mm) \ -do { \ - BUG_ON(pgd == swapper_pg_dir); \ - cpu_do_switch_mm(virt_to_phys(pgd),mm); \ -} while (0) - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* __ASM_PROCFNS_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 29adab8138c3..fda6f5812281 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -19,6 +19,13 @@ #ifndef __ASM_PROCESSOR_H #define __ASM_PROCESSOR_H +#define TASK_SIZE_64 (UL(1) << VA_BITS) + +#define KERNEL_DS UL(-1) +#define USER_DS (TASK_SIZE_64 - 1) + +#ifndef __ASSEMBLY__ + /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -37,6 +44,22 @@ #include #include +/* + * TASK_SIZE - the maximum size of a user space task. + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. + */ +#ifdef CONFIG_COMPAT +#define TASK_SIZE_32 UL(0x100000000) +#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#else +#define TASK_SIZE TASK_SIZE_64 +#endif /* CONFIG_COMPAT */ + +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) + #define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 @@ -194,4 +217,5 @@ static inline void spin_lock_prefetch(const void *ptr) int cpu_enable_pan(void *__unused); int cpu_enable_cache_maint_trap(void *__unused); +#endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f707fed5886f..ede80d47d0ef 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -332,6 +332,8 @@ #define ID_AA64ISAR1_DPB_SHIFT 0 /* id_aa64pfr0 */ +#define ID_AA64PFR0_CSV3_SHIFT 60 +#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_ASIMD_SHIFT 20 #define ID_AA64PFR0_FP_SHIFT 16 diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index af1c76981911..9e82dd79c7db 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -23,6 +23,7 @@ #include #include +#include /* * Raw TLBI operations. @@ -54,6 +55,11 @@ #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) +#define __tlbi_user(op, arg) do { \ + if (arm64_kernel_unmapped_at_el0()) \ + __tlbi(op, (arg) | USER_ASID_FLAG); \ +} while (0) + /* * TLB Management * ============== @@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) dsb(ishst); __tlbi(aside1is, asid); + __tlbi_user(aside1is, asid); dsb(ish); } @@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, dsb(ishst); __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); dsb(ish); } @@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { - if (last_level) + if (last_level) { __tlbi(vale1is, addr); - else + __tlbi_user(vale1is, addr); + } else { __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); + } } dsb(ish); } @@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); dsb(ish); } diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index b3202284568b..13a00ece862b 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -33,6 +33,17 @@ int pcibus_to_node(struct pci_bus *bus); #endif /* CONFIG_NUMA */ +#include + +/* Replace task scheduler's default frequency-invariant accounting */ +#define arch_scale_freq_capacity topology_get_freq_scale + +/* Replace task scheduler's default cpu-invariant accounting */ +#define arch_scale_cpu_capacity topology_get_cpu_scale + +/* Enable topology flag updates */ +#define arch_update_cpu_topology topology_update_cpu_topology + #include #endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index fc0f9eb66039..fad8c1b2ca3e 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -35,16 +35,20 @@ #include #include -#define KERNEL_DS (-1UL) #define get_ds() (KERNEL_DS) - -#define USER_DS TASK_SIZE_64 #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; + /* + * Prevent a mispredicted conditional call to set_fs from forwarding + * the wrong address limit to access_ok under speculation. + */ + dsb(nsh); + isb(); + /* On user-mode return, check fs is correct */ set_thread_flag(TIF_FSCHECK); @@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs) * Returns 1 if the range is valid, 0 otherwise. * * This is equivalent to the following test: - * (u65)addr + (u65)size <= current->addr_limit - * - * This needs 65-bit arithmetic. + * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 */ -#define __range_ok(addr, size) \ -({ \ - unsigned long __addr = (unsigned long)(addr); \ - unsigned long flag, roksum; \ - __chk_user_ptr(addr); \ - asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ - : "=&r" (flag), "=&r" (roksum) \ - : "1" (__addr), "Ir" (size), \ - "r" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; \ -}) +static inline unsigned long __range_ok(unsigned long addr, unsigned long size) +{ + unsigned long limit = current_thread_info()->addr_limit; + + __chk_user_ptr(addr); + asm volatile( + // A + B <= C + 1 for all A,B,C, in four easy steps: + // 1: X = A + B; X' = X % 2^64 + " adds %0, %0, %2\n" + // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 + " csel %1, xzr, %1, hi\n" + // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' + // to compensate for the carry flag being set in step 4. For + // X > 2^64, X' merely has to remain nonzero, which it does. + " csinv %0, %0, xzr, cc\n" + // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 + // comes from the carry in being clear. Otherwise, we are + // testing X' - C == 0, subject to the previous adjustments. + " sbcs xzr, %0, %1\n" + " cset %0, ls\n" + : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); + + return addr; +} /* * When dealing with data aborts, watchpoints, or instruction traps we may end @@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs) */ #define untagged_addr(addr) sign_extend64(addr, 55) -#define access_ok(type, addr, size) __range_ok(addr, size) +#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) #define user_addr_max get_fs #define _ASM_EXTABLE(from, to) \ @@ -105,17 +119,23 @@ static inline void set_fs(mm_segment_t fs) #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void __uaccess_ttbr0_disable(void) { - unsigned long ttbr; + unsigned long flags, ttbr; + local_irq_save(flags); + ttbr = read_sysreg(ttbr1_el1); + ttbr &= ~TTBR_ASID_MASK; /* reserved_ttbr0 placed at the end of swapper_pg_dir */ - ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; - write_sysreg(ttbr, ttbr0_el1); + write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); isb(); + /* Set reserved ASID */ + write_sysreg(ttbr, ttbr1_el1); + isb(); + local_irq_restore(flags); } static inline void __uaccess_ttbr0_enable(void) { - unsigned long flags; + unsigned long flags, ttbr0, ttbr1; /* * Disable interrupts to avoid preemption between reading the 'ttbr0' @@ -123,7 +143,17 @@ static inline void __uaccess_ttbr0_enable(void) * roll-over and an update of 'ttbr0'. */ local_irq_save(flags); - write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); + ttbr0 = READ_ONCE(current_thread_info()->ttbr0); + + /* Restore active ASID */ + ttbr1 = read_sysreg(ttbr1_el1); + ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ + ttbr1 |= ttbr0 & TTBR_ASID_MASK; + write_sysreg(ttbr1, ttbr1_el1); + isb(); + + /* Restore user page table */ + write_sysreg(ttbr0, ttbr0_el1); isb(); local_irq_restore(flags); } @@ -192,6 +222,26 @@ static inline void uaccess_enable_not_uao(void) __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); } +/* + * Sanitise a uaccess pointer such that it becomes NULL if above the + * current addr_limit. + */ +#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) +static inline void __user *__uaccess_mask_ptr(const void __user *ptr) +{ + void __user *safe_ptr; + + asm volatile( + " bics xzr, %1, %2\n" + " csel %0, %1, xzr, eq\n" + : "=&r" (safe_ptr) + : "r" (ptr), "r" (current_thread_info()->addr_limit) + : "cc"); + + csdb(); + return safe_ptr; +} + /* * The "__xxx" versions of the user access functions do not verify the address * space - it must have been done previously with a separate "access_ok()" @@ -244,28 +294,33 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user(x, ptr) \ +#define __get_user_check(x, ptr, err) \ ({ \ - int __gu_err = 0; \ - __get_user_err((x), (ptr), __gu_err); \ - __gu_err; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __get_user_err((x), __p, (err)); \ + } else { \ + (x) = 0; (err) = -EFAULT; \ + } \ }) #define __get_user_error(x, ptr, err) \ ({ \ - __get_user_err((x), (ptr), (err)); \ + __get_user_check((x), (ptr), (err)); \ (void)0; \ }) -#define get_user(x, ptr) \ +#define __get_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - might_fault(); \ - access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ - __get_user((x), __p) : \ - ((x) = 0, -EFAULT); \ + int __gu_err = 0; \ + __get_user_check((x), (ptr), __gu_err); \ + __gu_err; \ }) +#define get_user __get_user + #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ @@ -308,43 +363,63 @@ do { \ uaccess_disable_not_uao(); \ } while (0) -#define __put_user(x, ptr) \ +#define __put_user_check(x, ptr, err) \ ({ \ - int __pu_err = 0; \ - __put_user_err((x), (ptr), __pu_err); \ - __pu_err; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __put_user_err((x), __p, (err)); \ + } else { \ + (err) = -EFAULT; \ + } \ }) #define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x), (ptr), (err)); \ + __put_user_check((x), (ptr), (err)); \ (void)0; \ }) -#define put_user(x, ptr) \ +#define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - might_fault(); \ - access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ - __put_user((x), __p) : \ - -EFAULT; \ + int __pu_err = 0; \ + __put_user_check((x), (ptr), __pu_err); \ + __pu_err; \ }) +#define put_user __put_user + extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); -#define raw_copy_from_user __arch_copy_from_user +#define raw_copy_from_user(to, from, n) \ +({ \ + __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ +}) + extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -#define raw_copy_to_user __arch_copy_to_user -extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); +#define raw_copy_to_user(to, from, n) \ +({ \ + __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ +}) + +extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); +#define raw_copy_in_user(to, from, n) \ +({ \ + __arch_copy_in_user(__uaccess_mask_ptr(to), \ + __uaccess_mask_ptr(from), (n)); \ +}) + #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); +static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user(to, n); + n = __arch_clear_user(__uaccess_mask_ptr(to), n); return n; } +#define clear_user __clear_user extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -358,7 +433,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) { kasan_check_write(dst, size); - return __copy_user_flushcache(dst, src, size); + return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); } #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 0029e13adb59..def8d5623fd1 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -55,6 +55,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +ifeq ($(CONFIG_KVM),y) +arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o +endif + obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) head-y := head.o @@ -63,6 +67,3 @@ extra-y += $(head-y) vmlinux.lds ifeq ($(CONFIG_DEBUG_EFI),y) AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\"" endif - -# will be included by each individual module but not by the core kernel itself -extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 67368c7329c0..66be504edb6c 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page); /* user mem (segment) */ EXPORT_SYMBOL(__arch_copy_from_user); EXPORT_SYMBOL(__arch_copy_to_user); -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(raw_copy_in_user); +EXPORT_SYMBOL(__arch_clear_user); +EXPORT_SYMBOL(__arch_copy_in_user); /* physical memory */ EXPORT_SYMBOL(memstart_addr); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 71bf088f1e4b..af247d10252f 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -148,11 +149,14 @@ int main(void) DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); - BLANK(); DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val)); + BLANK(); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + DEFINE(TRAMP_VALIAS, TRAMP_VALIAS); +#endif return 0; } diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S new file mode 100644 index 000000000000..e5de33513b5d --- /dev/null +++ b/arch/arm64/kernel/bpi.S @@ -0,0 +1,83 @@ +/* + * Contains CPU specific branch predictor invalidation sequences + * + * Copyright (C) 2018 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +.macro ventry target + .rept 31 + nop + .endr + b \target +.endm + +.macro vectors target + ventry \target + 0x000 + ventry \target + 0x080 + ventry \target + 0x100 + ventry \target + 0x180 + + ventry \target + 0x200 + ventry \target + 0x280 + ventry \target + 0x300 + ventry \target + 0x380 + + ventry \target + 0x400 + ventry \target + 0x480 + ventry \target + 0x500 + ventry \target + 0x580 + + ventry \target + 0x600 + ventry \target + 0x680 + ventry \target + 0x700 + ventry \target + 0x780 +.endm + + .align 11 +ENTRY(__bp_harden_hyp_vecs_start) + .rept 4 + vectors __kvm_hyp_vector + .endr +ENTRY(__bp_harden_hyp_vecs_end) + +ENTRY(__qcom_hyp_sanitize_link_stack_start) + stp x29, x30, [sp, #-16]! + .rept 16 + bl . + 4 + .endr + ldp x29, x30, [sp], #16 +ENTRY(__qcom_hyp_sanitize_link_stack_end) + +.macro smccc_workaround_1 inst + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 + \inst #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +.endm + +ENTRY(__smccc_workaround_1_smc_start) + smccc_workaround_1 smc +ENTRY(__smccc_workaround_1_smc_end) + +ENTRY(__smccc_workaround_1_hvc_start) + smccc_workaround_1 hvc +ENTRY(__smccc_workaround_1_hvc_end) diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 65f42d257414..8021b46c9743 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -16,7 +16,7 @@ #include .text -.pushsection .idmap.text, "ax" +.pushsection .idmap.text, "awx" /* * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for @@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart) mrs x12, sctlr_el1 ldr x13, =SCTLR_ELx_FLAGS bic x12, x12, x13 + pre_disable_mmu_workaround msr sctlr_el1, x12 isb diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0e27f86ee709..52f15cd896e1 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) entry->midr_range_max); } +static bool __maybe_unused +is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) +{ + u32 model; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + model = read_cpuid_id(); + model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | + MIDR_ARCHITECTURE_MASK; + + return model == entry->midr_model; +} + static bool has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, int scope) @@ -46,6 +60,174 @@ static int cpu_enable_trap_ctr_access(void *__unused) return 0; } +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +#include +#include + +DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); + +#ifdef CONFIG_KVM +extern char __qcom_hyp_sanitize_link_stack_start[]; +extern char __qcom_hyp_sanitize_link_stack_end[]; +extern char __smccc_workaround_1_smc_start[]; +extern char __smccc_workaround_1_smc_end[]; +extern char __smccc_workaround_1_hvc_start[]; +extern char __smccc_workaround_1_hvc_end[]; + +static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); + int i; + + for (i = 0; i < SZ_2K; i += 0x80) + memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); + + flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); +} + +static void __install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + static int last_slot = -1; + static DEFINE_SPINLOCK(bp_lock); + int cpu, slot = -1; + + spin_lock(&bp_lock); + for_each_possible_cpu(cpu) { + if (per_cpu(bp_hardening_data.fn, cpu) == fn) { + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); + break; + } + } + + if (slot == -1) { + last_slot++; + BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start) + / SZ_2K) <= last_slot); + slot = last_slot; + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); + } + + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); + __this_cpu_write(bp_hardening_data.fn, fn); + spin_unlock(&bp_lock); +} +#else +#define __qcom_hyp_sanitize_link_stack_start NULL +#define __qcom_hyp_sanitize_link_stack_end NULL +#define __smccc_workaround_1_smc_start NULL +#define __smccc_workaround_1_smc_end NULL +#define __smccc_workaround_1_hvc_start NULL +#define __smccc_workaround_1_hvc_end NULL + +static void __install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + __this_cpu_write(bp_hardening_data.fn, fn); +} +#endif /* CONFIG_KVM */ + +static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, + bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + u64 pfr0; + + if (!entry->matches(entry, SCOPE_LOCAL_CPU)) + return; + + pfr0 = read_cpuid(ID_AA64PFR0_EL1); + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) + return; + + __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); +} + +#include +#include +#include + +static void call_smc_arch_workaround_1(void) +{ + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static void call_hvc_arch_workaround_1(void) +{ + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static int enable_smccc_arch_workaround_1(void *data) +{ + const struct arm64_cpu_capabilities *entry = data; + bp_hardening_cb_t cb; + void *smccc_start, *smccc_end; + struct arm_smccc_res res; + + if (!entry->matches(entry, SCOPE_LOCAL_CPU)) + return 0; + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) + return 0; + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if (res.a0) + return 0; + cb = call_hvc_arch_workaround_1; + smccc_start = __smccc_workaround_1_hvc_start; + smccc_end = __smccc_workaround_1_hvc_end; + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if (res.a0) + return 0; + cb = call_smc_arch_workaround_1; + smccc_start = __smccc_workaround_1_smc_start; + smccc_end = __smccc_workaround_1_smc_end; + break; + + default: + return 0; + } + + install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); + + return 0; +} + +static void qcom_link_stack_sanitization(void) +{ + u64 tmp; + + asm volatile("mov %0, x30 \n" + ".rept 16 \n" + "bl . + 4 \n" + ".endr \n" + "mov x30, %0 \n" + : "=&r" (tmp)); +} + +static int qcom_enable_link_stack_sanitization(void *data) +{ + const struct arm64_cpu_capabilities *entry = data; + + install_bp_hardening_cb(entry, qcom_link_stack_sanitization, + __qcom_hyp_sanitize_link_stack_start, + __qcom_hyp_sanitize_link_stack_end); + + return 0; +} +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ + #define MIDR_RANGE(model, min, max) \ .def_scope = SCOPE_LOCAL_CPU, \ .matches = is_affected_midr_range, \ @@ -169,6 +351,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(0, 0)), }, + { + .desc = "Qualcomm Technologies Kryo erratum 1003", + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, + .def_scope = SCOPE_LOCAL_CPU, + .midr_model = MIDR_QCOM_KRYO, + .matches = is_kryo_midr, + }, #endif #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 { @@ -186,6 +375,56 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_858921, MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), }, +#endif +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), + .enable = qcom_enable_link_stack_sanitization, + }, + { + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + .enable = qcom_enable_link_stack_sanitization, + }, + { + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), + .enable = enable_smccc_arch_workaround_1, + }, #endif { } @@ -200,15 +439,18 @@ void verify_local_cpu_errata_workarounds(void) { const struct arm64_cpu_capabilities *caps = arm64_errata; - for (; caps->matches; caps++) - if (!cpus_have_cap(caps->capability) && - caps->matches(caps, SCOPE_LOCAL_CPU)) { + for (; caps->matches; caps++) { + if (cpus_have_cap(caps->capability)) { + if (caps->enable) + caps->enable((void *)caps); + } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) { pr_crit("CPU%d: Requires work around for %s, not detected" " at boot time\n", smp_processor_id(), caps->desc ? : "an erratum"); cpu_die_early(); } + } } void update_cpu_errata_workarounds(void) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 21e2c95d24e7..345d4e521191 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -125,6 +125,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), @@ -173,9 +175,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { }; static const struct arm64_ftr_bits ftr_ctr[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ /* * Linux can handle differing I-cache policies. Userspace JITs will @@ -796,6 +800,86 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus ID_AA64PFR0_FP_SHIFT) < 0; } +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ + +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + char const *str = "command line option"; + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + /* + * For reasons that aren't entirely clear, enabling KPTI on Cavium + * ThunderX leads to apparent I-cache corruption of kernel text, which + * ends as well as you might imagine. Don't even try. + */ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { + str = "ARM64_WORKAROUND_CAVIUM_27456"; + __kpti_forced = -1; + } + + /* Forced? */ + if (__kpti_forced) { + pr_info_once("kernel page table isolation forced %s by %s\n", + __kpti_forced > 0 ? "ON" : "OFF", str); + return __kpti_forced > 0; + } + + /* Useful for KASLR robustness */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return true; + + /* Don't force KPTI for CPUs that are not vulnerable */ + switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) { + case MIDR_CAVIUM_THUNDERX2: + case MIDR_BRCM_VULCAN: + return false; + } + + /* Defer to CPU feature registers */ + return !cpuid_feature_extract_unsigned_field(pfr0, + ID_AA64PFR0_CSV3_SHIFT); +} + +static int kpti_install_ng_mappings(void *__unused) +{ + typedef void (kpti_remap_fn)(int, int, phys_addr_t); + extern kpti_remap_fn idmap_kpti_install_ng_mappings; + kpti_remap_fn *remap_fn; + + static bool kpti_applied = false; + int cpu = smp_processor_id(); + + if (kpti_applied) + return 0; + + remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); + + cpu_install_idmap(); + remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); + cpu_uninstall_idmap(); + + if (!cpu) + kpti_applied = true; + + return 0; +} + +static int __init parse_kpti(char *str) +{ + bool enabled; + int ret = strtobool(str, &enabled); + + if (ret) + return ret; + + __kpti_forced = enabled ? 1 : -1; + return 0; +} +__setup("kpti=", parse_kpti); +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -882,6 +966,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .def_scope = SCOPE_SYSTEM, .matches = hyp_offset_low, }, +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + { + .desc = "Kernel page table isolation (KPTI)", + .capability = ARM64_UNMAP_KERNEL_AT_EL0, + .def_scope = SCOPE_SYSTEM, + .matches = unmap_kernel_at_el0, + .enable = kpti_install_ng_mappings, + }, +#endif { /* FP/SIMD is not implemented */ .capability = ARM64_HAS_NO_FPSIMD, @@ -1000,6 +1093,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) cap_set_elf_hwcap(hwcaps); } +/* + * Check if the current CPU has a given feature capability. + * Should be called from non-preemptible context. + */ +static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, + unsigned int cap) +{ + const struct arm64_cpu_capabilities *caps; + + if (WARN_ON(preemptible())) + return false; + + for (caps = cap_array; caps->matches; caps++) + if (caps->capability == cap && + caps->matches(caps, SCOPE_LOCAL_CPU)) + return true; + return false; +} + void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info) { @@ -1035,7 +1147,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) * uses an IPI, giving us a PSTATE that disappears when * we return. */ - stop_machine(caps->enable, NULL, cpu_online_mask); + stop_machine(caps->enable, (void *)caps, cpu_online_mask); } } } @@ -1078,8 +1190,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) } static void -verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) +verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list) { + const struct arm64_cpu_capabilities *caps = caps_list; for (; caps->matches; caps++) { if (!cpus_have_cap(caps->capability)) continue; @@ -1087,13 +1200,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) * If the new CPU misses an advertised feature, we cannot proceed * further, park the cpu. */ - if (!caps->matches(caps, SCOPE_LOCAL_CPU)) { + if (!__this_cpu_has_cap(caps_list, caps->capability)) { pr_crit("CPU%d: missing feature: %s\n", smp_processor_id(), caps->desc); cpu_die_early(); } if (caps->enable) - caps->enable(NULL); + caps->enable((void *)caps); } } @@ -1148,25 +1261,6 @@ static void __init mark_const_caps_ready(void) static_branch_enable(&arm64_const_caps_ready); } -/* - * Check if the current CPU has a given feature capability. - * Should be called from non-preemptible context. - */ -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, - unsigned int cap) -{ - const struct arm64_cpu_capabilities *caps; - - if (WARN_ON(preemptible())) - return false; - - for (caps = cap_array; caps->desc; caps++) - if (caps->capability == cap && caps->matches) - return caps->matches(caps, SCOPE_LOCAL_CPU); - - return false; -} - extern const struct arm64_cpu_capabilities arm64_errata[]; bool this_cpu_has_cap(unsigned int cap) diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 4e6ad355bd05..6b9736c3fb56 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -96,6 +96,7 @@ ENTRY(entry) mrs x0, sctlr_el2 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C + pre_disable_mmu_workaround msr sctlr_el2, x0 isb b 2f @@ -103,6 +104,7 @@ ENTRY(entry) mrs x0, sctlr_el1 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C + pre_disable_mmu_workaround msr sctlr_el1, x0 isb 2: diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e1c59d4008a8..93958d1341bb 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include #include @@ -69,8 +71,21 @@ #define BAD_FIQ 2 #define BAD_ERROR 3 - .macro kernel_ventry label + .macro kernel_ventry, el, label, regsize = 64 .align 7 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +alternative_if ARM64_UNMAP_KERNEL_AT_EL0 + .if \el == 0 + .if \regsize == 64 + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr + .else + mov x30, xzr + .endif + .endif +alternative_else_nop_endif +#endif + sub sp, sp, #S_FRAME_SIZE #ifdef CONFIG_VMAP_STACK /* @@ -82,7 +97,7 @@ tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp - b \label + b el\()\el\()_\label 0: /* @@ -114,7 +129,12 @@ sub sp, sp, x0 mrs x0, tpidrro_el0 #endif - b \label + b el\()\el\()_\label + .endm + + .macro tramp_alias, dst, sym + mov_q \dst, TRAMP_VALIAS + add \dst, \dst, #(\sym - .entry.tramp.text) .endm .macro kernel_entry, el, regsize = 64 @@ -147,10 +167,10 @@ .else add x21, sp, #S_FRAME_SIZE get_thread_info tsk - /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ + /* Save the task's original addr_limit and set USER_DS */ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [sp, #S_ORIG_ADDR_LIMIT] - mov x20, #TASK_SIZE_64 + mov x20, #USER_DS str x20, [tsk, #TSK_TI_ADDR_LIMIT] /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ .endif /* \el == 0 */ @@ -185,7 +205,7 @@ alternative_else_nop_endif .if \el != 0 mrs x21, ttbr0_el1 - tst x21, #0xffff << 48 // Check for the reserved ASID + tst x21, #TTBR_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR @@ -246,7 +266,7 @@ alternative_else_nop_endif tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set .endif - __uaccess_ttbr0_enable x0 + __uaccess_ttbr0_enable x0, x1 .if \el == 0 /* @@ -255,7 +275,7 @@ alternative_else_nop_endif * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache * corruption). */ - post_ttbr0_update_workaround + bl post_ttbr_update_workaround .endif 1: .if \el != 0 @@ -267,18 +287,20 @@ alternative_else_nop_endif .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 + tst x22, #PSR_MODE32_BIT // native task? + b.eq 3f + #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 - tbz x22, #4, 1f #ifdef CONFIG_PID_IN_CONTEXTIDR mrs x29, contextidr_el1 msr contextidr_el1, x29 #else msr contextidr_el1, xzr #endif -1: alternative_else_nop_endif #endif +3: .endif msr elr_el1, x21 // set up the return data @@ -300,7 +322,21 @@ alternative_else_nop_endif ldp x28, x29, [sp, #16 * 14] ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp - eret // return to kernel + + .if \el == 0 +alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + bne 4f + msr far_el1, x30 + tramp_alias x30, tramp_exit_native + br x30 +4: + tramp_alias x30, tramp_exit_compat + br x30 +#endif + .else + eret + .endif .endm .macro irq_stack_entry @@ -340,6 +376,7 @@ alternative_else_nop_endif * x7 is reserved for the system call number in 32-bit mode. */ wsc_nr .req w25 // number of system calls +xsc_nr .req x25 // number of system calls (zero-extended) wscno .req w26 // syscall number xscno .req x26 // syscall number (zero-extended) stbl .req x27 // syscall table pointer @@ -365,31 +402,31 @@ tsk .req x28 // current thread_info .align 11 ENTRY(vectors) - kernel_ventry el1_sync_invalid // Synchronous EL1t - kernel_ventry el1_irq_invalid // IRQ EL1t - kernel_ventry el1_fiq_invalid // FIQ EL1t - kernel_ventry el1_error_invalid // Error EL1t + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t - kernel_ventry el1_sync // Synchronous EL1h - kernel_ventry el1_irq // IRQ EL1h - kernel_ventry el1_fiq_invalid // FIQ EL1h - kernel_ventry el1_error_invalid // Error EL1h + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error_invalid // Error EL1h - kernel_ventry el0_sync // Synchronous 64-bit EL0 - kernel_ventry el0_irq // IRQ 64-bit EL0 - kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0 - kernel_ventry el0_error_invalid // Error 64-bit EL0 + kernel_ventry 0, sync // Synchronous 64-bit EL0 + kernel_ventry 0, irq // IRQ 64-bit EL0 + kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 + kernel_ventry 0, error_invalid // Error 64-bit EL0 #ifdef CONFIG_COMPAT - kernel_ventry el0_sync_compat // Synchronous 32-bit EL0 - kernel_ventry el0_irq_compat // IRQ 32-bit EL0 - kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 - kernel_ventry el0_error_invalid_compat // Error 32-bit EL0 + kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0 #else - kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0 - kernel_ventry el0_irq_invalid // IRQ 32-bit EL0 - kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0 - kernel_ventry el0_error_invalid // Error 32-bit EL0 + kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 #endif END(vectors) @@ -687,13 +724,15 @@ el0_ia: * Instruction abort handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp - bl do_mem_abort + bl do_el0_ia_bp_hardening b ret_to_user el0_fpsimd_acc: /* @@ -720,8 +759,10 @@ el0_sp_pc: * Stack or PC alignment exception handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif ct_user_exit mov x0, x26 mov x1, x25 @@ -780,6 +821,11 @@ el0_irq_naked: #endif ct_user_exit +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + tbz x22, #55, 1f + bl do_el0_irq_bp_hardening +1: +#endif irq_handler #ifdef CONFIG_TRACE_IRQFLAGS @@ -848,6 +894,7 @@ el0_svc_naked: // compat entry point b.ne __sys_trace cmp wscno, wsc_nr // check upper syscall limit b.hs ni_sys + mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number ldr x16, [stbl, xscno, lsl #3] // address in the syscall table blr x16 // call sys_* routine b ret_fast_syscall @@ -895,6 +942,117 @@ __ni_sys_trace: .popsection // .entry.text +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + */ + .pushsection ".entry.tramp.text", "ax" + + .macro tramp_map_kernel, tmp + mrs \tmp, ttbr1_el1 + sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + bic \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 +alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 + /* ASID already in \tmp[63:48] */ + movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) + movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) + /* 2MB boundary containing the vectors, so we nobble the walk cache */ + movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) + isb + tlbi vae1, \tmp + dsb nsh +alternative_else_nop_endif +#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ + .endm + + .macro tramp_unmap_kernel, tmp + mrs \tmp, ttbr1_el1 + add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + orr \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp + /* + * We avoid running the post_ttbr_update_workaround here because + * it's only needed by Cavium ThunderX, which requires KPTI to be + * disabled. + */ + .endm + + .macro tramp_ventry, regsize = 64 + .align 7 +1: + .if \regsize == 64 + msr tpidrro_el0, x30 // Restored in kernel_ventry + .endif + /* + * Defend against branch aliasing attacks by pushing a dummy + * entry onto the return stack and using a RET instruction to + * enter the full-fat kernel vectors. + */ + bl 2f + b . +2: + tramp_map_kernel x30 +#ifdef CONFIG_RANDOMIZE_BASE + adr x30, tramp_vectors + PAGE_SIZE +alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 + ldr x30, [x30] +#else + ldr x30, =vectors +#endif + prfm plil1strm, [x30, #(1b - tramp_vectors)] + msr vbar_el1, x30 + add x30, x30, #(1b - tramp_vectors) + isb + ret + .endm + + .macro tramp_exit, regsize = 64 + adr x30, tramp_vectors + msr vbar_el1, x30 + tramp_unmap_kernel x30 + .if \regsize == 64 + mrs x30, far_el1 + .endif + eret + .endm + + .align 11 +ENTRY(tramp_vectors) + .space 0x400 + + tramp_ventry + tramp_ventry + tramp_ventry + tramp_ventry + + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 +END(tramp_vectors) + +ENTRY(tramp_exit_native) + tramp_exit +END(tramp_exit_native) + +ENTRY(tramp_exit_compat) + tramp_exit 32 +END(tramp_exit_compat) + + .ltorg + .popsection // .entry.tramp.text +#ifdef CONFIG_RANDOMIZE_BASE + .pushsection ".rodata", "a" + .align PAGE_SHIFT + .globl __entry_tramp_data_start +__entry_tramp_data_start: + .quad vectors + .popsection // .rodata +#endif /* CONFIG_RANDOMIZE_BASE */ +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + /* * Special system call wrappers. */ diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S deleted file mode 100644 index 00c4025be4ff..000000000000 --- a/arch/arm64/kernel/ftrace-mod.S +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (C) 2017 Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include - - .section ".text.ftrace_trampoline", "ax" - .align 3 -0: .quad 0 -__ftrace_trampoline: - ldr x16, 0b - br x16 -ENDPROC(__ftrace_trampoline) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index c13b1fca0e5b..50986e388d2b 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (offset < -SZ_128M || offset >= SZ_128M) { #ifdef CONFIG_ARM64_MODULE_PLTS - unsigned long *trampoline; + struct plt_entry trampoline; struct module *mod; /* @@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * is added in the future, but for now, the pr_err() below * deals with a theoretical issue only. */ - trampoline = (unsigned long *)mod->arch.ftrace_trampoline; - if (trampoline[0] != addr) { - if (trampoline[0] != 0) { + trampoline = get_plt_entry(addr); + if (!plt_entries_equal(mod->arch.ftrace_trampoline, + &trampoline)) { + if (!plt_entries_equal(mod->arch.ftrace_trampoline, + &(struct plt_entry){})) { pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); return -EINVAL; } /* point the trampoline to our ftrace entry point */ module_disable_ro(mod); - trampoline[0] = addr; + *mod->arch.ftrace_trampoline = trampoline; module_enable_ro(mod, true); /* update trampoline before patching in the branch */ smp_wmb(); } - addr = (unsigned long)&trampoline[1]; + addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; #else /* CONFIG_ARM64_MODULE_PLTS */ return -EINVAL; #endif /* CONFIG_ARM64_MODULE_PLTS */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0b243ecaf7ac..261f3f88364c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -371,7 +371,7 @@ ENDPROC(__primary_switched) * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ - .section ".idmap.text","ax" + .section ".idmap.text","awx" ENTRY(kimage_vaddr) .quad _text - TEXT_OFFSET @@ -732,6 +732,7 @@ __primary_switch: * to take into account by discarding the current kernel mapping and * creating a new one. */ + pre_disable_mmu_workaround msr sctlr_el1, x20 // disable the MMU isb bl __create_page_tables // recreate kernel mapping diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index 354be2a872ae..79b17384effa 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c @@ -25,8 +25,7 @@ */ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)from, 8) || - !IS_ALIGNED((unsigned long)to, 8))) { + while (count && !IS_ALIGNED((unsigned long)from, 8)) { *(u8 *)to = __raw_readb(from); from++; to++; @@ -54,23 +53,22 @@ EXPORT_SYMBOL(__memcpy_fromio); */ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)to, 8) || - !IS_ALIGNED((unsigned long)from, 8))) { - __raw_writeb(*(volatile u8 *)from, to); + while (count && !IS_ALIGNED((unsigned long)to, 8)) { + __raw_writeb(*(u8 *)from, to); from++; to++; count--; } while (count >= 8) { - __raw_writeq(*(volatile u64 *)from, to); + __raw_writeq(*(u64 *)from, to); from += 8; to += 8; count -= 8; } while (count) { - __raw_writeb(*(volatile u8 *)from, to); + __raw_writeb(*(u8 *)from, to); from++; to++; count--; diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index d05dbe658409..ea640f92fe5a 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -11,21 +11,6 @@ #include #include -struct plt_entry { - /* - * A program that conforms to the AArch64 Procedure Call Standard - * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or - * IP1 (x17) may be inserted at any branch instruction that is - * exposed to a relocation that supports long branches. Since that - * is exactly what we are dealing with here, we are free to use x16 - * as a scratch register in the PLT veneers. - */ - __le32 mov0; /* movn x16, #0x.... */ - __le32 mov1; /* movk x16, #0x...., lsl #16 */ - __le32 mov2; /* movk x16, #0x...., lsl #32 */ - __le32 br; /* br x16 */ -}; - static bool in_init(const struct module *mod, void *loc) { return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size; @@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, int i = pltsec->plt_num_entries; u64 val = sym->st_value + rela->r_addend; - /* - * MOVK/MOVN/MOVZ opcode: - * +--------+------------+--------+-----------+-------------+---------+ - * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] | - * +--------+------------+--------+-----------+-------------+---------+ - * - * Rd := 0x10 (x16) - * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32) - * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ) - * sf := 1 (64-bit variant) - */ - plt[i] = (struct plt_entry){ - cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5), - cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5), - cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5), - cpu_to_le32(0xd61f0200) - }; + plt[i] = get_plt_entry(val); /* * Check if the entry we just created is a duplicate. Given that the * relocations are sorted, this will be the last entry we allocated. * (if one exists). */ - if (i > 0 && - plt[i].mov0 == plt[i - 1].mov0 && - plt[i].mov1 == plt[i - 1].mov1 && - plt[i].mov2 == plt[i - 1].mov2) + if (i > 0 && plt_entries_equal(plt + i, plt + i - 1)) return (u64)&plt[i - 1]; pltsec->plt_num_entries++; @@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned long core_plts = 0; unsigned long init_plts = 0; Elf64_Sym *syms = NULL; + Elf_Shdr *tramp = NULL; int i; /* @@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt = sechdrs + i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) mod->arch.init.plt = sechdrs + i; + else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && + !strcmp(secstrings + sechdrs[i].sh_name, + ".text.ftrace_trampoline")) + tramp = sechdrs + i; else if (sechdrs[i].sh_type == SHT_SYMTAB) syms = (Elf64_Sym *)sechdrs[i].sh_addr; } @@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.init.plt_num_entries = 0; mod->arch.init.plt_max_entries = init_plts; + if (tramp) { + tramp->sh_type = SHT_NOBITS; + tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + tramp->sh_addralign = __alignof__(struct plt_entry); + tramp->sh_size = sizeof(struct plt_entry); + } + return 0; } diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds index f7c9781a9d48..22e36a21c113 100644 --- a/arch/arm64/kernel/module.lds +++ b/arch/arm64/kernel/module.lds @@ -1,4 +1,5 @@ SECTIONS { .plt (NOLOAD) : { BYTE(0) } .init.plt (NOLOAD) : { BYTE(0) } + .text.ftrace_trampoline (NOLOAD) : { BYTE(0) } } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 2dc0f8482210..07de304210e3 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -170,6 +170,70 @@ void machine_restart(char *cmd) while (1); } +/* + * dump a block of kernel memory from around the given address + */ +static void show_data(unsigned long addr, int nbytes, const char *name) +{ + int i, j; + int nlines; + u32 *p; + + /* + * don't attempt to dump non-kernel addresses or + * values that are probably just small negative numbers + */ + if (addr < PAGE_OFFSET || addr > -256UL) + return; + + printk("\n%s: %#lx:\n", name, addr); + + /* + * round address down to a 32 bit boundary + * and always dump a multiple of 32 bytes + */ + p = (u32 *)(addr & ~(sizeof(u32) - 1)); + nbytes += (addr & (sizeof(u32) - 1)); + nlines = (nbytes + 31) / 32; + + + for (i = 0; i < nlines; i++) { + /* + * just display low 16 bits of address to keep + * each line of the dump < 80 characters + */ + printk("%04lx ", (unsigned long)p & 0xffff); + for (j = 0; j < 8; j++) { + u32 data; + if (probe_kernel_address(p, data)) { + printk(" ********"); + } else { + printk(" %08x", data); + } + ++p; + } + printk("\n"); + } +} + +static void show_extra_register_data(struct pt_regs *regs, int nbytes) +{ + mm_segment_t fs; + unsigned int i; + + fs = get_fs(); + set_fs(KERNEL_DS); + show_data(regs->pc - nbytes, nbytes * 2, "PC"); + show_data(regs->regs[30] - nbytes, nbytes * 2, "LR"); + show_data(regs->sp - nbytes, nbytes * 2, "SP"); + for (i = 0; i < 30; i++) { + char name[4]; + snprintf(name, sizeof(name), "X%u", i); + show_data(regs->regs[i] - nbytes, nbytes * 2, name); + } + set_fs(fs); +} + void __show_regs(struct pt_regs *regs) { int i, top_reg; @@ -205,6 +269,9 @@ void __show_regs(struct pt_regs *regs) pr_cont("\n"); } + if (!user_mode(regs)) + show_extra_register_data(regs, 128); + printk("\n"); } void show_regs(struct pt_regs * regs) @@ -258,6 +325,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); + /* + * In case p was allocated the same task_struct pointer as some + * other recently-exited task, make sure p is disassociated from + * any cpu that may have run that now-exited task recently. + * Otherwise we could erroneously skip reloading the FPSIMD + * registers for p. + */ + fpsimd_flush_task_state(p); + if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; @@ -305,16 +381,14 @@ void tls_preserve_current_state(void) static void tls_thread_switch(struct task_struct *next) { - unsigned long tpidr, tpidrro; - tls_preserve_current_state(); - tpidr = *task_user_tls(next); - tpidrro = is_compat_thread(task_thread_info(next)) ? - next->thread.tp_value : 0; + if (is_compat_thread(task_thread_info(next))) + write_sysreg(next->thread.tp_value, tpidrro_el0); + else if (!arm64_kernel_unmapped_at_el0()) + write_sysreg(0, tpidrro_el0); - write_sysreg(tpidr, tpidr_el0); - write_sysreg(tpidrro, tpidrro_el0); + write_sysreg(*task_user_tls(next), tpidr_el0); } /* Restore the UAO state depending on next's addr_limit */ diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index ce704a4aeadd..f407e422a720 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel) mrs x0, sctlr_el2 ldr x1, =SCTLR_ELx_FLAGS bic x0, x0, x1 + pre_disable_mmu_workaround msr sctlr_el2, x0 isb 1: diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 10dd16d7902d..bebec8ef9372 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter) ret ENDPROC(__cpu_suspend_enter) - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly bl __cpu_setup diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 8d48b233e6ce..35dbdfaa6129 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -187,6 +188,8 @@ static int __init parse_dt_topology(void) if (!map) goto out; + init_sched_energy_costs(); + ret = parse_cluster(map, 0); if (ret != 0) goto out_map; @@ -280,8 +283,89 @@ void store_cpu_topology(unsigned int cpuid) topology_populated: update_siblings_masks(cpuid); + topology_detect_flags(); +} + +#ifdef CONFIG_SCHED_SMT +static int smt_flags(void) +{ + return cpu_smt_flags() | topology_smt_flags(); +} +#endif + +#ifdef CONFIG_SCHED_MC +static int core_flags(void) +{ + return cpu_core_flags() | topology_core_flags(); +} +#endif + +static int cpu_flags(void) +{ + return topology_cpu_flags(); +} + +static inline +const struct sched_group_energy * const cpu_core_energy(int cpu) +{ + struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0]; + unsigned long capacity; + int max_cap_idx; + + if (!sge) { + pr_warn("Invalid sched_group_energy for CPU%d\n", cpu); + return NULL; + } + + max_cap_idx = sge->nr_cap_states - 1; + capacity = sge->cap_states[max_cap_idx].cap; + + printk_deferred("cpu=%d set cpu scale %lu from energy model\n", + cpu, capacity); + + topology_set_cpu_scale(cpu, capacity); + + return sge; +} + +static inline +const struct sched_group_energy * const cpu_cluster_energy(int cpu) +{ + struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1]; + + if (!sge) { + pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu); + return NULL; + } + + return sge; +} + +static inline +const struct sched_group_energy * const cpu_system_energy(int cpu) +{ + struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL2]; + + if (!sge) { + pr_warn("Invalid sched_group_energy for System%d\n", cpu); + return NULL; + } + + return sge; } +static struct sched_domain_topology_level arm64_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, smt_flags, SD_INIT_NAME(SMT) }, +#endif +#ifdef CONFIG_SCHED_MC + { cpu_coregroup_mask, core_flags, cpu_core_energy, SD_INIT_NAME(MC) }, +#endif + { cpu_cpu_mask, cpu_flags, cpu_cluster_energy, SD_INIT_NAME(DIE) }, + { cpu_cpu_mask, NULL, cpu_system_energy, SD_INIT_NAME(SYS) }, + { NULL, } +}; + static void __init reset_cpu_topology(void) { unsigned int cpu; @@ -310,4 +394,6 @@ void __init init_cpu_topology(void) */ if (of_have_populated_dt() && parse_dt_topology()) reset_cpu_topology(); + else + set_sched_topology(arm64_topology); } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8383af15a759..4fc0e958770b 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -56,7 +56,7 @@ static const char *handler[]= { "Error" }; -int show_unhandled_signals = 1; +int show_unhandled_signals = 0; /* * Dump out the contents of some kernel memory nicely... @@ -573,14 +573,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) } #endif - if (show_unhandled_signals_ratelimited()) { - pr_info("%s[%d]: syscall %d\n", current->comm, - task_pid_nr(current), regs->syscallno); - dump_instr("", regs); - if (user_mode(regs)) - __show_regs(regs); - } - return sys_ni_syscall(); } diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index 76320e920965..c39872a7b03c 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -309,7 +309,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: - cbz w1, 3f + cbz x1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 7da3e5c366a0..ddfd3c0942f7 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -57,6 +57,17 @@ jiffies = jiffies_64; #define HIBERNATE_TEXT #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_TEXT \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ + *(.entry.tramp.text) \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_end) = .; +#else +#define TRAMP_TEXT +#endif + /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -113,6 +124,7 @@ SECTIONS HYPERVISOR_TEXT IDMAP_TEXT HIBERNATE_TEXT + TRAMP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -214,6 +226,11 @@ SECTIONS . += RESERVED_TTBR0_SIZE; #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_pg_dir = .; + . += PAGE_SIZE; +#endif + __pecoff_data_size = ABSOLUTE(. - __initdata_begin); _end = .; @@ -234,7 +251,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) <= SZ_4K, "Hibernate exit text too big or misaligned") #endif - +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, + "Entry trampoline text too big") +#endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 7debb74843a0..ab48c5ed3943 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -22,12 +22,13 @@ #include #include +#include + #include #include #include #include #include -#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -42,9 +43,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; - ret = kvm_psci_call(vcpu); + ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } @@ -53,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_inject_undefined(vcpu); + /* + * "If an SMC instruction executed at Non-secure EL1 is + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a + * Trap exception, not a Secure Monitor Call exception [...]" + * + * We need to advance the PC after the trap, as it would + * otherwise return to the same address... + */ + vcpu_set_reg(vcpu, 0, ~0UL); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 3f9615582377..870828c364c5 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -151,6 +151,7 @@ reset: mrs x5, sctlr_el2 ldr x6, =SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc + pre_disable_mmu_workaround msr sctlr_el2, x5 isb diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index f5154ed3da6c..2add22699764 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -84,6 +84,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1) { u64 reg; + /* Clear pmscr in case of early return */ + *pmscr_el1 = 0; + /* SPE present on this CPU? */ if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1), ID_AA64DFR0_PMSVER_SHIFT)) diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 12ee62d6d410..9c45c6af1f58 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -196,3 +196,15 @@ alternative_endif eret ENDPROC(__fpsimd_guest_restore) + +ENTRY(__qcom_hyp_sanitize_btac_predictors) + /** + * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700) + * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls + * b15-b0: contains SiP functionID + */ + movz x0, #0x1700 + movk x0, #0xc200, lsl #16 + smc #0 + ret +ENDPROC(__qcom_hyp_sanitize_btac_predictors) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 5170ce1021da..f49b53331d28 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include @@ -64,10 +65,11 @@ alternative_endif lsr x0, x1, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 + ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap - mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest - cbnz x1, el1_trap // called HVC + mrs x1, vttbr_el2 // If vttbr is valid, the guest + cbnz x1, el1_hvc_guest // called HVC /* Here, we're pretty sure the host called HVC. */ ldp x0, x1, [sp], #16 @@ -100,6 +102,20 @@ alternative_endif eret +el1_hvc_guest: + /* + * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. + * The workaround has already been applied on the host, + * so let's quickly get back to the guest. We don't bother + * restoring x1, as it can be clobbered anyway. + */ + ldr x1, [sp] // Guest's x0 + eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 + cbnz w1, el1_trap + mov x0, x1 + add sp, sp, #16 + eret + el1_trap: /* * x0: ESR_EC diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 945e79c641c4..e08ae6b6b63e 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -17,6 +17,9 @@ #include #include +#include + +#include #include #include @@ -51,7 +54,7 @@ static void __hyp_text __activate_traps_vhe(void) val &= ~CPACR_EL1_FPEN; write_sysreg(val, cpacr_el1); - write_sysreg(__kvm_hyp_vector, vbar_el1); + write_sysreg(kvm_get_hyp_vector(), vbar_el1); } static void __hyp_text __activate_traps_nvhe(void) @@ -364,6 +367,16 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* 0 falls through to be handled out of EL2 */ } + if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) { + u32 midr = read_cpuid_id(); + + /* Apply BTAC predictors mitigation to all Falkor chips */ + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) { + __qcom_hyp_sanitize_btac_predictors(); + } + } + fp_enabled = __fpsimd_enabled(); __sysreg_save_guest_state(guest_ctxt); diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index e88fb99c1561..21ba0b29621b 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -21,7 +21,7 @@ .text -/* Prototype: int __clear_user(void *addr, size_t sz) +/* Prototype: int __arch_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear @@ -29,8 +29,8 @@ * * Alignment fixed up by hardware. */ -ENTRY(__clear_user) - uaccess_enable_not_uao x2, x3 +ENTRY(__arch_clear_user) + uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f @@ -50,9 +50,9 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 b.mi 5f uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 - uaccess_disable_not_uao x2 + uaccess_disable_not_uao x2, x3 ret -ENDPROC(__clear_user) +ENDPROC(__arch_clear_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 4b5d826895ff..20305d485046 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -64,10 +64,10 @@ end .req x5 ENTRY(__arch_copy_from_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 // Nothing to copy ret ENDPROC(__arch_copy_from_user) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index b24a830419ad..54b75deb1d16 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -64,14 +64,15 @@ .endm end .req x5 -ENTRY(raw_copy_in_user) - uaccess_enable_not_uao x3, x4 + +ENTRY(__arch_copy_in_user) + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 ret -ENDPROC(raw_copy_in_user) +ENDPROC(__arch_copy_in_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 351f0766f7a6..fda6172d6b88 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -63,10 +63,10 @@ end .req x5 ENTRY(__arch_copy_to_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_to_user) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 7f1dbe962cf5..91464e7f77cc 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -49,7 +49,7 @@ ENTRY(flush_icache_range) * - end - virtual end address of region */ ENTRY(__flush_cache_user_range) - uaccess_ttbr0_enable x2, x3 + uaccess_ttbr0_enable x2, x3, x4 dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x0, x3 @@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU isb mov x0, #0 1: - uaccess_ttbr0_disable x1 + uaccess_ttbr0_disable x1, x2 ret 9: mov x0, #-EFAULT diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index ab9f5f0fb2c7..9284788733d6 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) -#define NUM_USER_ASIDS ASID_FIRST_VERSION + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) +#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) +#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) +#else +#define NUM_USER_ASIDS (ASID_FIRST_VERSION) +#define asid2idx(asid) ((asid) & ~ASID_MASK) +#define idx2asid(idx) asid2idx(idx) +#endif /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -79,13 +88,6 @@ void verify_cpu_asid_bits(void) } } -static void set_reserved_asid_bits(void) -{ - if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) && - cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003)) - __set_bit(FALKOR_RESERVED_ASID, asid_map); -} - static void flush_context(unsigned int cpu) { int i; @@ -94,8 +96,6 @@ static void flush_context(unsigned int cpu) /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); - set_reserved_asid_bits(); - /* * Ensure the generation bump is observed before we xchg the * active_asids. @@ -113,7 +113,7 @@ static void flush_context(unsigned int cpu) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); + __set_bit(asid2idx(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -165,16 +165,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - asid &= ~ASID_MASK; - if (!__test_and_set_bit(asid, asid_map)) + if (!__test_and_set_bit(asid2idx(asid), asid_map)) return newasid; } /* * Allocate a free ASID. If we can't find one, take a note of the - * currently active ASIDs and mark the TLBs as requiring flushes. - * We always count from ASID #1, as we use ASID #0 when setting a - * reserved TTBR0 for the init_mm. + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. */ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) @@ -191,7 +191,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return asid | generation; + return idx2asid(asid) | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) @@ -227,6 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: + + arm64_apply_bp_hardening(); + /* * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when * emulating PAN. @@ -235,6 +238,15 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) cpu_switch_mm(mm->pgd, mm); } +/* Errata workaround post TTBRx_EL1 update. */ +asmlinkage void post_ttbr_update_workaround(void) +{ + asm(ALTERNATIVE("nop; nop; nop", + "ic iallu; dsb nsh; isb", + ARM64_WORKAROUND_CAVIUM_27456, + CONFIG_CAVIUM_ERRATUM_27456)); +} + static int asids_init(void) { asid_bits = get_cpu_asid_bits(); @@ -250,8 +262,6 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - set_reserved_asid_bits(); - pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 614af886b7ef..115b32639a3c 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -166,7 +166,7 @@ static void *__dma_alloc(struct device *dev, size_t size, /* create a coherent mapping */ page = virt_to_page(ptr); coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, - prot, NULL); + prot, __builtin_return_address(0)); if (!coherent_ptr) goto no_map; diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index ca74a2aace42..7b60d62ac593 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -389,7 +389,7 @@ void ptdump_check_wx(void) .check_wx = true, }; - walk_pgd(&st, &init_mm, 0); + walk_pgd(&st, &init_mm, VA_START); note_page(&st, 0, 0, 0); if (st.wx_pages || st.uxn_pages) pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b64958b23a7f..5edb706aacb0 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -242,7 +242,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, if (fsc_type == ESR_ELx_FSC_PERM) return true; - if (addr < USER_DS && system_uses_ttbr0_pan()) + if (addr < TASK_SIZE && system_uses_ttbr0_pan()) return fsc_type == ESR_ELx_FSC_FAULT && (regs->pstate & PSR_PAN_BIT); @@ -426,7 +426,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, mm_flags |= FAULT_FLAG_WRITE; } - if (addr < USER_DS && is_permission_fault(esr, regs, addr)) { + if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) { /* regs->orig_addr_limit may be 0 if we entered from EL0 */ if (regs->orig_addr_limit == KERNEL_DS) die("Accessing user space memory with fs=KERNEL_DS", regs, esr); @@ -751,6 +751,29 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, arm64_notify_die("", regs, &info, esr); } +asmlinkage void __exception do_el0_irq_bp_hardening(void) +{ + /* PC has already been checked in entry.S */ + arm64_apply_bp_hardening(); +} + +asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + /* + * We've taken an instruction abort from userspace and not yet + * re-enabled IRQs. If the address is a kernel address, apply + * BP hardening prior to enabling IRQs and pre-emption. + */ + if (addr > TASK_SIZE) + arm64_apply_bp_hardening(); + + local_irq_enable(); + do_mem_abort(addr, esr, regs); +} + + /* * Handle stack alignment exceptions. */ @@ -761,6 +784,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, struct siginfo info; struct task_struct *tsk = current; + if (user_mode(regs)) { + if (instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + local_irq_enable(); + } + if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", tsk->comm, task_pid_nr(tsk), @@ -820,6 +849,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); + if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + if (!inf->fn(addr, esr, regs)) { rv = 1; } else { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5960bef0170d..885402869696 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -285,9 +285,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) #endif /* CONFIG_NUMA */ #ifdef CONFIG_HAVE_ARCH_PFN_VALID +#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1) + int pfn_valid(unsigned long pfn) { - return memblock_is_map_memory(pfn << PAGE_SHIFT); + return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT); } EXPORT_SYMBOL(pfn_valid); #endif @@ -476,6 +478,8 @@ void __init arm64_memblock_init(void) reserve_elfcorehdr(); + high_memory = __va(memblock_end_of_DRAM() - 1) + 1; + dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); @@ -502,7 +506,6 @@ void __init bootmem_init(void) sparse_init(); zone_sizes_init(min, max); - high_memory = __va((max << PAGE_SHIFT) - 1) + 1; memblock_dump_all(); } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f1eb15e0e864..f6b877d2726d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -107,7 +107,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new) * The following mapping attributes may be updated in live * kernel mappings without the need for break-before-make. */ - static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; + static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; /* creating or taking down mappings is always safe */ if (old == 0 || new == 0) @@ -117,6 +117,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new) if ((old | new) & PTE_CONT) return false; + /* Transitioning from Non-Global to Global is unsafe */ + if (old & ~new & PTE_NG) + return false; + return ((old ^ new) & ~mask) == 0; } @@ -525,6 +529,37 @@ static int __init parse_rodata(char *arg) } early_param("rodata", parse_rodata); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __init map_entry_trampoline(void) +{ + extern char __entry_tramp_text_start[]; + + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); + + /* The trampoline is always mapped and can therefore be global */ + pgprot_val(prot) &= ~PTE_NG; + + /* Map only the text into the trampoline page table */ + memset(tramp_pg_dir, 0, PGD_SIZE); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, + prot, pgd_pgtable_alloc, 0); + + /* Map both the text and data into the kernel page table */ + __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + extern char __entry_tramp_data_start[]; + + __set_fixmap(FIX_ENTRY_TRAMP_DATA, + __pa_symbol(__entry_tramp_data_start), + PAGE_KERNEL_RO); + } + + return 0; +} +core_initcall(map_entry_trampoline); +#endif + /* * Create fine-grained mappings for the kernel. */ @@ -902,3 +937,13 @@ int pmd_clear_huge(pmd_t *pmd) pmd_clear(pmd); return 1; } + +int pud_free_pmd_page(pud_t *pud) +{ + return pud_none(*pud); +} + +int pmd_free_pte_page(pmd_t *pmd) +{ + return pmd_none(*pmd); +} diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 877d42fb0df6..139320a7f7a2 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -86,7 +86,7 @@ ENDPROC(cpu_do_suspend) * * x0: Address of context pointer */ - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] @@ -138,16 +138,30 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) - pre_ttbr0_update_workaround x0, x2, x3 + mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id - bfi x0, x1, #48, #16 // set the ASID - msr ttbr0_el1, x0 // set TTBR0 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + bfi x0, x1, #48, #16 // set the ASID field in TTBR0 +#endif + bfi x2, x1, #48, #16 // set the ASID + msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) isb - post_ttbr0_update_workaround - ret + msr ttbr0_el1, x0 // now update TTBR0 + isb + b post_ttbr_update_workaround // Back to C code... ENDPROC(cpu_do_switch_mm) - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" + +.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 + adrp \tmp1, empty_zero_page + msr ttbr1_el1, \tmp1 + isb + tlbi vmalle1 + dsb nsh + isb +.endm + /* * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) * @@ -158,13 +172,7 @@ ENTRY(idmap_cpu_replace_ttbr1) mrs x2, daif msr daifset, #0xf - adrp x1, empty_zero_page - msr ttbr1_el1, x1 - isb - - tlbi vmalle1 - dsb nsh - isb + __idmap_cpu_set_reserved_ttbr1 x1, x3 msr ttbr1_el1, x0 isb @@ -175,13 +183,200 @@ ENTRY(idmap_cpu_replace_ttbr1) ENDPROC(idmap_cpu_replace_ttbr1) .popsection +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + .pushsection ".idmap.text", "awx" + + .macro __idmap_kpti_get_pgtable_ent, type + dc cvac, cur_\()\type\()p // Ensure any existing dirty + dmb sy // lines are written back before + ldr \type, [cur_\()\type\()p] // loading the entry + tbz \type, #0, skip_\()\type // Skip invalid and + tbnz \type, #11, skip_\()\type // non-global entries + .endm + + .macro __idmap_kpti_put_pgtable_ent_ng, type + orr \type, \type, #PTE_NG // Same bit for blocks and pages + str \type, [cur_\()\type\()p] // Update the entry and ensure it + dc civac, cur_\()\type\()p // is visible to all CPUs. + .endm + +/* + * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) + * + * Called exactly once from stop_machine context by each CPU found during boot. + */ +__idmap_kpti_flag: + .long 1 +ENTRY(idmap_kpti_install_ng_mappings) + cpu .req w0 + num_cpus .req w1 + swapper_pa .req x2 + swapper_ttb .req x3 + flag_ptr .req x4 + cur_pgdp .req x5 + end_pgdp .req x6 + pgd .req x7 + cur_pudp .req x8 + end_pudp .req x9 + pud .req x10 + cur_pmdp .req x11 + end_pmdp .req x12 + pmd .req x13 + cur_ptep .req x14 + end_ptep .req x15 + pte .req x16 + + mrs swapper_ttb, ttbr1_el1 + adr flag_ptr, __idmap_kpti_flag + + cbnz cpu, __idmap_kpti_secondary + + /* We're the boot CPU. Wait for the others to catch up */ + sevl +1: wfe + ldaxr w18, [flag_ptr] + eor w18, w18, num_cpus + cbnz w18, 1b + + /* We need to walk swapper, so turn off the MMU. */ + mrs x18, sctlr_el1 + bic x18, x18, #SCTLR_ELx_M + msr sctlr_el1, x18 + isb + + /* Everybody is enjoying the idmap, so we can rewrite swapper. */ + /* PGD */ + mov cur_pgdp, swapper_pa + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) +do_pgd: __idmap_kpti_get_pgtable_ent pgd + tbnz pgd, #1, walk_puds +next_pgd: + __idmap_kpti_put_pgtable_ent_ng pgd +skip_pgd: + add cur_pgdp, cur_pgdp, #8 + cmp cur_pgdp, end_pgdp + b.ne do_pgd + + /* Publish the updated tables and nuke all the TLBs */ + dsb sy + tlbi vmalle1is + dsb ish + isb + + /* We're done: fire up the MMU again */ + mrs x18, sctlr_el1 + orr x18, x18, #SCTLR_ELx_M + msr sctlr_el1, x18 + isb + + /* Set the flag to zero to indicate that we're all done */ + str wzr, [flag_ptr] + ret + + /* PUD */ +walk_puds: + .if CONFIG_PGTABLE_LEVELS > 3 + pte_to_phys cur_pudp, pgd + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) +do_pud: __idmap_kpti_get_pgtable_ent pud + tbnz pud, #1, walk_pmds +next_pud: + __idmap_kpti_put_pgtable_ent_ng pud +skip_pud: + add cur_pudp, cur_pudp, 8 + cmp cur_pudp, end_pudp + b.ne do_pud + b next_pgd + .else /* CONFIG_PGTABLE_LEVELS <= 3 */ + mov pud, pgd + b walk_pmds +next_pud: + b next_pgd + .endif + + /* PMD */ +walk_pmds: + .if CONFIG_PGTABLE_LEVELS > 2 + pte_to_phys cur_pmdp, pud + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) +do_pmd: __idmap_kpti_get_pgtable_ent pmd + tbnz pmd, #1, walk_ptes +next_pmd: + __idmap_kpti_put_pgtable_ent_ng pmd +skip_pmd: + add cur_pmdp, cur_pmdp, #8 + cmp cur_pmdp, end_pmdp + b.ne do_pmd + b next_pud + .else /* CONFIG_PGTABLE_LEVELS <= 2 */ + mov pmd, pud + b walk_ptes +next_pmd: + b next_pud + .endif + + /* PTE */ +walk_ptes: + pte_to_phys cur_ptep, pmd + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) +do_pte: __idmap_kpti_get_pgtable_ent pte + __idmap_kpti_put_pgtable_ent_ng pte +skip_pte: + add cur_ptep, cur_ptep, #8 + cmp cur_ptep, end_ptep + b.ne do_pte + b next_pmd + + /* Secondary CPUs end up here */ +__idmap_kpti_secondary: + /* Uninstall swapper before surgery begins */ + __idmap_cpu_set_reserved_ttbr1 x18, x17 + + /* Increment the flag to let the boot CPU we're ready */ +1: ldxr w18, [flag_ptr] + add w18, w18, #1 + stxr w17, w18, [flag_ptr] + cbnz w17, 1b + + /* Wait for the boot CPU to finish messing around with swapper */ + sevl +1: wfe + ldxr w18, [flag_ptr] + cbnz w18, 1b + + /* All done, act like nothing happened */ + msr ttbr1_el1, swapper_ttb + isb + ret + + .unreq cpu + .unreq num_cpus + .unreq swapper_pa + .unreq swapper_ttb + .unreq flag_ptr + .unreq cur_pgdp + .unreq end_pgdp + .unreq pgd + .unreq cur_pudp + .unreq end_pudp + .unreq pud + .unreq cur_pmdp + .unreq end_pmdp + .unreq pmd + .unreq cur_ptep + .unreq end_ptep + .unreq pte +ENDPROC(idmap_kpti_install_ng_mappings) + .popsection +#endif + /* * __cpu_setup * * Initialise the processor for turning the MMU on. Return in x0 the * value of the SCTLR_EL1 register. */ - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh @@ -225,7 +420,7 @@ ENTRY(__cpu_setup) * both user and kernel. */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 tcr_set_idmap_t0sz x10, x9 /* diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ba38d403abb2..be155f70f108 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) /* Stack must be multiples of 16B */ #define STACK_ALIGN(sz) (((sz) + 15) & ~15) -#define PROLOGUE_OFFSET 8 +/* Tail call offset to jump into */ +#define PROLOGUE_OFFSET 7 static int build_prologue(struct jit_ctx *ctx) { @@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx) /* Initialize tail_call_cnt */ emit(A64_MOVZ(1, tcc, 0, 0), ctx); - /* 4 byte extra for skb_copy_bits buffer */ - ctx->stack_size = prog->aux->stack_depth + 4; - ctx->stack_size = STACK_ALIGN(ctx->stack_size); - - /* Set up function call stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); - cur_offset = ctx->idx - idx0; if (cur_offset != PROLOGUE_OFFSET) { pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", cur_offset, PROLOGUE_OFFSET); return -1; } + + /* 4 byte extra for skb_copy_bits buffer */ + ctx->stack_size = prog->aux->stack_depth + 4; + ctx->stack_size = STACK_ALIGN(ctx->stack_size); + + /* Set up function call stack */ + emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); return 0; } @@ -237,8 +238,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) off = offsetof(struct bpf_array, map.max_entries); emit_a64_mov_i64(tmp, off, ctx); emit(A64_LDR32(tmp, r2, tmp), ctx); + emit(A64_MOV(0, r3, r3), ctx); emit(A64_CMP(0, r3, tmp), ctx); - emit(A64_B_(A64_COND_GE, jmp_offset), ctx); + emit(A64_B_(A64_COND_CS, jmp_offset), ctx); /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; @@ -246,7 +248,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); emit(A64_CMP(1, tcc, tmp), ctx); - emit(A64_B_(A64_COND_GT, jmp_offset), ctx); + emit(A64_B_(A64_COND_HI, jmp_offset), ctx); emit(A64_ADD_I(1, tcc, tcc, 1), ctx); /* prog = array->ptrs[index]; @@ -260,11 +262,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_LDR64(prg, tmp, prg), ctx); emit(A64_CBZ(1, prg, jmp_offset), ctx); - /* goto *(prog->bpf_func + prologue_size); */ + /* goto *(prog->bpf_func + prologue_offset); */ off = offsetof(struct bpf_prog, bpf_func); emit_a64_mov_i64(tmp, off, ctx); emit(A64_LDR64(tmp, prg, tmp), ctx); emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); emit(A64_BR(tmp), ctx); /* out: */ diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 401ceb71540c..c5f05c4a4d00 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -101,12 +101,12 @@ ENTRY(privcmd_call) * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation * is enabled (it implies that hardware UAO and PAN disabled). */ - uaccess_ttbr0_enable x6, x7 + uaccess_ttbr0_enable x6, x7, x8 hvc XEN_IMM /* * Disable userspace access from kernel once the hyp call completed. */ - uaccess_ttbr0_disable x6 + uaccess_ttbr0_disable x6, x7 ret ENDPROC(privcmd_call); diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index af5369422032..d9c2866ba618 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -321,11 +321,14 @@ config BF53x config GPIO_ADI def_bool y + depends on !PINCTRL depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561) -config PINCTRL +config PINCTRL_BLACKFIN_ADI2 def_bool y - depends on BF54x || BF60x + depends on (BF54x || BF60x) + select PINCTRL + select PINCTRL_ADI2 config MEM_MT48LC64M4A2FB_7E bool diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug index 4ddd1b73ee3e..c8d957274cc2 100644 --- a/arch/blackfin/Kconfig.debug +++ b/arch/blackfin/Kconfig.debug @@ -18,6 +18,7 @@ config DEBUG_VERBOSE config DEBUG_MMRS tristate "Generate Blackfin MMR tree" + depends on !PINCTRL select DEBUG_FS help Create a tree of Blackfin MMRs via the debugfs tree. If diff --git a/arch/h8300/include/asm/byteorder.h b/arch/h8300/include/asm/byteorder.h index ecff2d1ca5a3..6eaa7ad5fc2c 100644 --- a/arch/h8300/include/asm/byteorder.h +++ b/arch/h8300/include/asm/byteorder.h @@ -2,7 +2,6 @@ #ifndef __H8300_BYTEORDER_H__ #define __H8300_BYTEORDER_H__ -#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__ #include #endif diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index aa7be020a904..c954523d00fe 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk) } if (ti->softirq_time) { - delta = cycle_to_nsec(ti->softirq_time)); + delta = cycle_to_nsec(ti->softirq_time); account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); } diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 0d9446c37ae8..498398d915c1 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -196,8 +196,8 @@ config TIMER_DIVIDE default "128" config CPU_BIG_ENDIAN - bool "Generate big endian code" - default n + bool + default !CPU_LITTLE_ENDIAN config CPU_LITTLE_ENDIAN bool "Generate little endian code" diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index cb79fba79d43..b88a8dd14933 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -122,7 +122,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds index 3aa571a513b5..cf6edda38971 100644 --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -45,6 +45,8 @@ SECTIONS { .text : { HEAD_TEXT TEXT_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 89172b8974b9..625a5785804f 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -16,6 +16,8 @@ SECTIONS .text : { HEAD_TEXT TEXT_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 293990efc917..9868270b0984 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -16,6 +16,8 @@ SECTIONS .text : { HEAD_TEXT TEXT_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 8d1408583cf4..b523a604cb87 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -170,7 +170,7 @@ void __init cf_bootmem_alloc(void) max_pfn = max_low_pfn = PFN_DOWN(_ramend); high_memory = (void *)_ramend; - m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; + m68k_virt_to_node_shift = fls(_ramend - 1) - 6; module_fixup(NULL, __start_fixup, __stop_fixup); /* setup bootmem data */ diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore index 2d6c0c160884..6c662ddb909a 100644 --- a/arch/metag/boot/.gitignore +++ b/arch/metag/boot/.gitignore @@ -1,4 +1,3 @@ vmlinux* uImage* ramdisk.* -*.dtb* diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 830ee7d42fa0..d269dd4b8279 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -36,16 +36,21 @@ endif CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_DIV) += -mno-xl-soft-div CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_BARREL) += -mxl-barrel-shift CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare -CPUFLAGS-$(CONFIG_BIG_ENDIAN) += -mbig-endian -CPUFLAGS-$(CONFIG_LITTLE_ENDIAN) += -mlittle-endian + +ifdef CONFIG_CPU_BIG_ENDIAN +KBUILD_CFLAGS += -mbig-endian +KBUILD_AFLAGS += -mbig-endian +LD += -EB +else +KBUILD_CFLAGS += -mlittle-endian +KBUILD_AFLAGS += -mlittle-endian +LD += -EL +endif CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) # r31 holds current when in kernel mode -KBUILD_CFLAGS += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) - -LDFLAGS := -LDFLAGS_vmlinux := +KBUILD_CFLAGS += -ffixed-r31 $(CPUFLAGS-y) $(CPUFLAGS-1) $(CPUFLAGS-2) head-y := arch/microblaze/kernel/head.o libs-y += arch/microblaze/lib/ diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore index bf0459186027..679502d64a97 100644 --- a/arch/microblaze/boot/.gitignore +++ b/arch/microblaze/boot/.gitignore @@ -1,3 +1,2 @@ -*.dtb linux.bin* simpleImage.* diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5d3284d20678..c82457b0e733 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -65,7 +65,7 @@ config MIPS select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS - select HAVE_VIRT_CPU_ACCOUNTING_GEN + select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP select IRQ_FORCED_THREADING select MODULES_USE_ELF_RELA if MODULES && 64BIT select MODULES_USE_ELF_REL if MODULES @@ -119,12 +119,12 @@ config MIPS_GENERIC select SYS_SUPPORTS_MULTITHREADING select SYS_SUPPORTS_RELOCATABLE select SYS_SUPPORTS_SMARTMIPS - select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN - select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN - select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN + select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN + select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN + select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select USE_OF help Select this to build a kernel which aims to support multiple boards, diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 4674f1efbe7a..e1675c25d5d4 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void) uart_port.type = PORT_AR7; uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.iotype = UPIO_MEM32; - uart_port.flags = UPF_FIXED_TYPE; + uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF; uart_port.regshift = 2; uart_port.line = 0; diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c index 9ab48ff80c1c..6d11ae581ea7 100644 --- a/arch/mips/ath25/board.c +++ b/arch/mips/ath25/board.c @@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size) } board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); + if (!board_data) + goto error; ath25_board.config = (struct ath25_boarddata *)board_data; memcpy_fromio(board_data, bcfg, 0x100); if (broken_boarddata) { diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index d4f2407a42c6..8307a8a02667 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -331,7 +331,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = { /* Verified on: WRT54GS V1.0 */ static const struct gpio_led bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = { - BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), }; diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore index d3962cd5ce0c..a73d6e2c4f64 100644 --- a/arch/mips/boot/.gitignore +++ b/arch/mips/boot/.gitignore @@ -5,4 +5,3 @@ zImage zImage.tmp calc_vmlinuz_load_addr uImage -*.dtb diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 1bd5c4f00d19..c22da16d67b8 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -126,6 +126,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS quiet_cmd_cpp_its_S = ITS $@ cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \ + -D__ASSEMBLY__ \ -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ -DVMLINUX_BINARY="\"$(3)\"" \ -DVMLINUX_COMPRESSION="\"$(2)\"" \ diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile index 9e09cc4556b3..398994312361 100644 --- a/arch/mips/boot/dts/brcm/Makefile +++ b/arch/mips/boot/dts/brcm/Makefile @@ -23,7 +23,6 @@ dtb-$(CONFIG_DT_NONE) += \ bcm63268-comtrend-vr-3032u.dtb \ bcm93384wvg.dtb \ bcm93384wvg_viper.dtb \ - bcm96358nb4ser.dtb \ bcm96368mvwg.dtb \ bcm9ejtagprb.dtb \ bcm97125cbmb.dtb \ diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 5b3a3f6a9ad3..d99f5242169e 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, } host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); + if (!host_data) + return -ENOMEM; raw_spin_lock_init(&host_data->lock); addr = of_get_address(ciu_node, 0, NULL, NULL); diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 7c8aab23bce8..b1f66699677d 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += qrwlock.h generic-y += qspinlock.h generic-y += sections.h generic-y += segment.h -generic-y += serial.h generic-y += trace_clock.h generic-y += unaligned.h generic-y += user.h diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 83054f79f72a..feb069cbf44e 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -19,6 +19,9 @@ #include #endif +/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ +#undef fp + /* * Helper macros for generating raw instruction encodings. */ @@ -105,6 +108,7 @@ .macro fpu_save_16odd thread .set push .set mips64r2 + .set fp=64 SET_HARDFLOAT sdc1 $f1, THREAD_FPR1(\thread) sdc1 $f3, THREAD_FPR3(\thread) @@ -126,8 +130,8 @@ .endm .macro fpu_save_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) sll \tmp, \status, 5 bgez \tmp, 10f fpu_save_16odd \thread @@ -163,6 +167,7 @@ .macro fpu_restore_16odd thread .set push .set mips64r2 + .set fp=64 SET_HARDFLOAT ldc1 $f1, THREAD_FPR1(\thread) ldc1 $f3, THREAD_FPR3(\thread) @@ -184,8 +189,8 @@ .endm .macro fpu_restore_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) sll \tmp, \status, 5 bgez \tmp, 10f # 16 register mode? @@ -234,9 +239,6 @@ .endm #ifdef TOOLCHAIN_SUPPORTS_MSA -/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ -#undef fp - .macro _cfcmsa rd, cs .set push .set mips32r2 diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 7e25c5cc353a..89e9fb7976fe 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -204,8 +204,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #else #include #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#ifndef CONFIG_SMP #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif +#endif #undef __scbeqz diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 8e2b5b556488..08ec0762ca50 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -86,7 +86,6 @@ struct compat_flock { compat_off_t l_len; s32 l_sysid; compat_pid_t l_pid; - short __unused; s32 pad[4]; }; @@ -200,7 +199,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h new file mode 100644 index 000000000000..1d830c6666c2 --- /dev/null +++ b/arch/mips/include/asm/serial.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2017 MIPS Tech, LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __ASM__SERIAL_H +#define __ASM__SERIAL_H + +#ifdef CONFIG_MIPS_GENERIC +/* + * Generic kernels cannot know a correct value for all platforms at + * compile time. Set it to 0 to prevent 8250_early using it + */ +#define BASE_BAUD 0 +#else +#include +#endif + +#endif /* __ASM__SERIAL_H */ diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index dd5567b1e305..8f5bd04f320a 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, *this_cpu_ptr(&cm_core_lock_flags)); } else { WARN_ON(cluster != 0); - WARN_ON(vp != 0); WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index 19c88d770054..fcf9af492d60 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -10,6 +10,8 @@ #include #include +#include +#include #include #include @@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); phys_addr_t __weak mips_cpc_default_phys_base(void) { + struct device_node *cpc_node; + struct resource res; + int err; + + cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc"); + if (cpc_node) { + err = of_address_to_resource(cpc_node, 0, &res); + if (!err) + return res.start; + } + return 0; } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c5ff6bfe2825..2f2d176396aa 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) struct task_struct *t; int max_users; + /* If nothing to change, return right away, successfully. */ + if (value == mips_get_process_fp_mode(task)) + return 0; + + /* Only accept a mode change if 64-bit FP enabled for o32. */ + if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) + return -EOPNOTSUPP; + + /* And only for o32 tasks. */ + if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS)) + return -EOPNOTSUPP; + /* Check the value is valid */ if (value & ~known_bits) return -EOPNOTSUPP; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 1395654cfc8d..c552c20237d4 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -410,63 +410,160 @@ static int gpr64_set(struct task_struct *target, #endif /* CONFIG_64BIT */ +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer, + * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots + * correspond 1:1 to buffer slots. Only general registers are copied. + */ +static int fpr_get_fpa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + void **kbuf, void __user **ubuf) +{ + return user_regset_copyout(pos, count, kbuf, ubuf, + &target->thread.fpu, + 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); +} + +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer, + * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's + * general register slots are copied to buffer slots. Only general + * registers are copied. + */ +static int fpr_get_msa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + void **kbuf, void __user **ubuf) +{ + unsigned int i; + u64 fpr_val; + int err; + + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); + for (i = 0; i < NUM_FPU_REGS; i++) { + fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); + err = user_regset_copyout(pos, count, kbuf, ubuf, + &fpr_val, i * sizeof(elf_fpreg_t), + (i + 1) * sizeof(elf_fpreg_t)); + if (err) + return err; + } + + return 0; +} + +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer. + * Choose the appropriate helper for general registers, and then copy + * the FCSR register separately. + */ static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - unsigned i; + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); int err; - u64 fpr_val; - /* XXX fcr31 */ + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) + err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf); + else + err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf); + if (err) + return err; - if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fcr31, + fcr31_pos, fcr31_pos + sizeof(u32)); - for (i = 0; i < NUM_FPU_REGS; i++) { - fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); - err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &fpr_val, i * sizeof(elf_fpreg_t), - (i + 1) * sizeof(elf_fpreg_t)); + return err; +} + +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context, + * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP + * context's general register slots. Only general registers are copied. + */ +static int fpr_set_fpa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf) +{ + return user_regset_copyin(pos, count, kbuf, ubuf, + &target->thread.fpu, + 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); +} + +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context, + * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 + * bits only of FP context's general register slots. Only general + * registers are copied. + */ +static int fpr_set_msa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf) +{ + unsigned int i; + u64 fpr_val; + int err; + + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); + for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { + err = user_regset_copyin(pos, count, kbuf, ubuf, + &fpr_val, i * sizeof(elf_fpreg_t), + (i + 1) * sizeof(elf_fpreg_t)); if (err) return err; + set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); } return 0; } +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context. + * Choose the appropriate helper for general registers, and then copy + * the FCSR register separately. + * + * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', + * which is supposed to have been guaranteed by the kernel before + * calling us, e.g. in `ptrace_regset'. We enforce that requirement, + * so that we can safely avoid preinitializing temporaries for + * partial register writes. + */ static int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - unsigned i; + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); + u32 fcr31; int err; - u64 fpr_val; - /* XXX fcr31 */ + BUG_ON(count % sizeof(elf_fpreg_t)); + + if (pos + count > sizeof(elf_fpregset_t)) + return -EIO; init_fp_ctx(target); - if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) - return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) + err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); + else + err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); + if (err) + return err; - BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); - for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) { + if (count > 0) { err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &fpr_val, i * sizeof(elf_fpreg_t), - (i + 1) * sizeof(elf_fpreg_t)); + &fcr31, + fcr31_pos, fcr31_pos + sizeof(u32)); if (err) return err; - set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); + + ptrace_setfcr31(target, fcr31); } - return 0; + return err; } enum mips_regset { @@ -618,6 +715,19 @@ static const struct user_regset_view user_mips64_view = { .n = ARRAY_SIZE(mips64_regsets), }; +#ifdef CONFIG_MIPS32_N32 + +static const struct user_regset_view user_mipsn32_view = { + .name = "mipsn32", + .e_flags = EF_MIPS_ABI2, + .e_machine = ELF_ARCH, + .ei_osabi = ELF_OSABI, + .regsets = mips64_regsets, + .n = ARRAY_SIZE(mips64_regsets), +}; + +#endif /* CONFIG_MIPS32_N32 */ + #endif /* CONFIG_64BIT */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) @@ -628,6 +738,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) #ifdef CONFIG_MIPS32_O32 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) return &user_mips_view; +#endif +#ifdef CONFIG_MIPS32_N32 + if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) + return &user_mipsn32_view; #endif return &user_mips64_view; #endif diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 0a83b1708b3c..8e3a6020c613 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -40,8 +40,8 @@ */ LEAF(_save_fp) EXPORT_SYMBOL(_save_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 @@ -52,8 +52,8 @@ EXPORT_SYMBOL(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 @@ -246,11 +246,11 @@ LEAF(_save_fp_context) cfc1 t1, fcr31 .set pop -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT -#ifdef CONFIG_CPU_MIPS32_R2 +#ifdef CONFIG_CPU_MIPSR2 .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS @@ -314,11 +314,11 @@ LEAF(_save_fp_context) LEAF(_restore_fp_context) EX lw t1, 0(a1) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT -#ifdef CONFIG_CPU_MIPS32_R2 +#ifdef CONFIG_CPU_MIPSR2 .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index fe3939726765..795caa763da3 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -374,6 +374,7 @@ static void __init bootmem_init(void) unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; + phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX; bool bootmap_valid = false; int i; @@ -394,7 +395,8 @@ static void __init bootmem_init(void) max_low_pfn = 0; /* - * Find the highest page frame number we have available. + * Find the highest page frame number we have available + * and the lowest used RAM address */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; @@ -406,6 +408,8 @@ static void __init bootmem_init(void) end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); + ramstart = min(ramstart, boot_mem_map.map[i].addr); + #ifndef CONFIG_HIGHMEM /* * Skip highmem here so we get an accurate max_low_pfn if low @@ -435,6 +439,13 @@ static void __init bootmem_init(void) mapstart = max(reserved_end, start); } + /* + * Reserve any memory between the start of RAM and PHYS_OFFSET + */ + if (ramstart > PHYS_OFFSET) + add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET, + BOOT_MEM_RESERVED); + if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); if (min_low_pfn > ARCH_PFN_OFFSET) { @@ -663,9 +674,6 @@ static int __init early_parse_mem(char *p) add_memory_region(start, size, BOOT_MEM_RAM); - if (start && start > PHYS_OFFSET) - add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET, - BOOT_MEM_RESERVED); return 0; } early_param("mem", early_parse_mem); diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 87dcac2447c8..382d12eb88f0 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus) return; } - if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, - "smp_ipi0", NULL)) + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL)) panic("Can't request IPI0 interrupt"); - if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, - "smp_ipi1", NULL)) + if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL)) panic("Can't request IPI1 interrupt"); } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index d535edc01434..75fdeaa8c62f 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = -EINTR; - sigset_t sigsaved; - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) @@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) local_irq_enable(); out: - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig index 692ae85a3e3d..8e3a1fc2bc39 100644 --- a/arch/mips/lantiq/Kconfig +++ b/arch/mips/lantiq/Kconfig @@ -13,6 +13,8 @@ choice config SOC_AMAZON_SE bool "Amazon SE" select SOC_TYPE_XWAY + select MFD_SYSCON + select MFD_CORE config SOC_XWAY bool "XWAY" diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 7611c3013793..c05bed624075 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -551,9 +551,9 @@ void __init ltq_soc_init(void) clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), ltq_ar9_fpi_hz(), CLOCK_250M); clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); - clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); - clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM); clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); @@ -562,7 +562,7 @@ void __init ltq_soc_init(void) } else { clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), ltq_danube_fpi_hz(), ltq_danube_pp32_hz()); - clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 78c2affeabf8..e84e12655fa8 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o # libgcc-style stuff needed in the kernel -obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o +obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \ + ucmpdi2.o diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h index 28002ed90c2c..199a7f96282f 100644 --- a/arch/mips/lib/libgcc.h +++ b/arch/mips/lib/libgcc.h @@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__))); struct DWstruct { int high, low; }; + +struct TWstruct { + long long high, low; +}; #elif defined(__LITTLE_ENDIAN) struct DWstruct { int low, high; }; + +struct TWstruct { + long long low, high; +}; #else #error I feel sick. #endif @@ -23,4 +31,13 @@ typedef union { long long ll; } DWunion; +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) +typedef int ti_type __attribute__((mode(TI))); + +typedef union { + struct TWstruct s; + ti_type ti; +} TWunion; +#endif + #endif /* __ASM_LIBGCC_H */ diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c new file mode 100644 index 000000000000..111ad475aa0c --- /dev/null +++ b/arch/mips/lib/multi3.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include "libgcc.h" + +/* + * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that + * specific case only we'll implement it here. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 + */ +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) + +/* multiply 64-bit values, low 64-bits returned */ +static inline long long notrace dmulu(long long a, long long b) +{ + long long res; + + asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b)); + return res; +} + +/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */ +static inline long long notrace dmuhu(long long a, long long b) +{ + long long res; + + asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b)); + return res; +} + +/* multiply 128-bit values, low 128-bits returned */ +ti_type notrace __multi3(ti_type a, ti_type b) +{ + TWunion res, aa, bb; + + aa.ti = a; + bb.ti = b; + + /* + * a * b = (a.lo * b.lo) + * + 2^64 * (a.hi * b.lo + a.lo * b.hi) + * [+ 2^128 * (a.hi * b.hi)] + */ + res.s.low = dmulu(aa.s.low, bb.s.low); + res.s.high = dmuhu(aa.s.low, bb.s.low); + res.s.high += dmulu(aa.s.high, bb.s.low); + res.s.high += dmulu(aa.s.low, bb.s.high); + + return res.ti; +} +EXPORT_SYMBOL(__multi3); + +#endif /* 64BIT && CPU_MIPSR6 && GCC7 */ diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 16d9ef5a78c5..6f57212f5659 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -1795,7 +1795,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); rv.s = ieee754sp_maddf(fd, fs, ft); - break; + goto copcsr; } case fmsubf_op: { @@ -1809,7 +1809,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); rv.s = ieee754sp_msubf(fd, fs, ft); - break; + goto copcsr; } case frint_op: { @@ -1834,7 +1834,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754sp_2008class(fs); rfmt = w_fmt; - break; + goto copcsr; } case fmin_op: { @@ -1847,7 +1847,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmin(fs, ft); - break; + goto copcsr; } case fmina_op: { @@ -1860,7 +1860,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmina(fs, ft); - break; + goto copcsr; } case fmax_op: { @@ -1873,7 +1873,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmax(fs, ft); - break; + goto copcsr; } case fmaxa_op: { @@ -1886,7 +1886,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmaxa(fs, ft); - break; + goto copcsr; } case fabs_op: @@ -2165,7 +2165,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); rv.d = ieee754dp_maddf(fd, fs, ft); - break; + goto copcsr; } case fmsubf_op: { @@ -2179,7 +2179,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); rv.d = ieee754dp_msubf(fd, fs, ft); - break; + goto copcsr; } case frint_op: { @@ -2204,7 +2204,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); rv.l = ieee754dp_2008class(fs); rfmt = l_fmt; - break; + goto copcsr; } case fmin_op: { @@ -2217,7 +2217,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmin(fs, ft); - break; + goto copcsr; } case fmina_op: { @@ -2230,7 +2230,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmina(fs, ft); - break; + goto copcsr; } case fmax_op: { @@ -2243,7 +2243,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmax(fs, ft); - break; + goto copcsr; } case fmaxa_op: { @@ -2256,7 +2256,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmaxa(fs, ft); - break; + goto copcsr; } case fabs_op: diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index 90fba9bf98da..27ac00c36bc0 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -121,7 +121,7 @@ static int wait_pciephy_busy(void) else break; if (retry++ > WAITRETRY_MAX) { - printk(KERN_WARN "PCIE-PHY retry failed.\n"); + pr_warn("PCIE-PHY retry failed.\n"); return -1; } } diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 9be8b08ae46b..41b71c4352c2 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -145,8 +145,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { FUNC("i2c", 0, 4, 2), }; -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; +static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index 1b274742077d..d2718de60b9b 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -170,6 +170,28 @@ void prom_soc_init(struct ralink_soc_info *soc_info) u32 n1; u32 rev; + /* Early detection of CMP support */ + mips_cm_probe(); + mips_cpc_probe(); + + if (mips_cps_numiocu(0)) { + /* + * mips_cm_probe() wipes out bootloader + * config for CM regions and we have to configure them + * again. This SoC cannot talk to pamlbus devices + * witout proper iocu region set up. + * + * FIXME: it would be better to do this with values + * from DT, but we need this very early because + * without this we cannot talk to pretty much anything + * including serial. + */ + write_gcr_reg0_base(MT7621_PALMBUS_BASE); + write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE | + CM_GCR_REGn_MASK_CMTGT_IOCU0); + __sync(); + } + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); @@ -194,26 +216,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info) rt2880_pinmux_data = mt7621_pinmux_data; - /* Early detection of CMP support */ - mips_cm_probe(); - mips_cpc_probe(); - - if (mips_cps_numiocu(0)) { - /* - * mips_cm_probe() wipes out bootloader - * config for CM regions and we have to configure them - * again. This SoC cannot talk to pamlbus devices - * witout proper iocu region set up. - * - * FIXME: it would be better to do this with values - * from DT, but we need this very early because - * without this we cannot talk to pretty much anything - * including serial. - */ - write_gcr_reg0_base(MT7621_PALMBUS_BASE); - write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE | - CM_GCR_REGn_MASK_CMTGT_IOCU0); - } if (!register_cps_smp_ops()) return; diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c index 64543d66e76b..e9531fea23a2 100644 --- a/arch/mips/ralink/reset.c +++ b/arch/mips/ralink/reset.c @@ -96,16 +96,9 @@ static void ralink_restart(char *command) unreachable(); } -static void ralink_halt(void) -{ - local_irq_disable(); - unreachable(); -} - static int __init mips_reboot_setup(void) { _machine_restart = ralink_restart; - _machine_halt = ralink_halt; return 0; } diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index b39a388825ae..8ace89617c1c 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -437,7 +437,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) info.si_signo = SIGSEGV; info.si_errno = 0; - info.si_code = 0; + info.si_code = SEGV_MAPERR; info.si_addr = (void *) regs->pc; force_sig_info(SIGSEGV, &info, current); return; diff --git a/arch/nios2/boot/.gitignore b/arch/nios2/boot/.gitignore index 109279ca5a4d..64386a8dedd8 100644 --- a/arch/nios2/boot/.gitignore +++ b/arch/nios2/boot/.gitignore @@ -1,2 +1 @@ -*.dtb vmImage diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index f41bd3cb76d9..e212a1f0b6d2 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -23,7 +23,6 @@ */ #include -#include #include extern const struct dma_map_ops or1k_dma_map_ops; diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 803e9e756f77..8d8437169b5e 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -306,12 +306,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) siginfo_t info; if (user_mode(regs)) { - /* Send a SIGSEGV */ - info.si_signo = SIGSEGV; + /* Send a SIGBUS */ + info.si_signo = SIGBUS; info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void *)address; - force_sig_info(SIGSEGV, &info, current); + info.si_code = BUS_ADRALN; + info.si_addr = (void __user *)address; + force_sig_info(SIGBUS, &info, current); } else { printk("KERNEL: Unaligned Access 0x%.8lx\n", address); show_registers(regs); diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index 9345b44b86f0..f57118e1f6b4 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -123,8 +123,8 @@ int puts(const char *s) while ((nuline = strchr(s, '\n')) != NULL) { if (nuline != s) pdc_iodc_print(s, nuline - s); - pdc_iodc_print("\r\n", 2); - s = nuline + 1; + pdc_iodc_print("\r\n", 2); + s = nuline + 1; } if (*s != '\0') pdc_iodc_print(s, strlen(s)); diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 3742508cc534..bd5ce31936f5 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long); void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long); +void purge_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_page_asm(void *); void flush_kernel_icache_page(void *); diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 07f48827afda..acf8aa07cbe0 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -195,7 +195,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h index dd5a08aaa4da..3eb4bfc1fb36 100644 --- a/arch/parisc/include/asm/ldcw.h +++ b/arch/parisc/include/asm/ldcw.h @@ -12,6 +12,7 @@ for the semaphore. */ #define __PA_LDCW_ALIGNMENT 16 +#define __PA_LDCW_ALIGN_ORDER 4 #define __ldcw_align(a) ({ \ unsigned long __ret = (unsigned long) &(a)->lock[0]; \ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ @@ -29,6 +30,7 @@ ldcd). */ #define __PA_LDCW_ALIGNMENT 4 +#define __PA_LDCW_ALIGN_ORDER 2 #define __ldcw_align(a) (&(a)->slock) #define __LDCW "ldcw,co" diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 0e6ab6e4a4e9..2dbe5580a1a4 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -316,6 +316,8 @@ extern int _parisc_requires_coherency; #define parisc_requires_coherency() (0) #endif +extern int running_on_qemu; + #endif /* __ASSEMBLY__ */ #endif /* __ASM_PARISC_PROCESSOR_H */ diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index c980a02a52bc..598c8d60fa5e 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -35,7 +35,12 @@ struct thread_info { /* thread information allocation */ +#ifdef CONFIG_IRQSTACKS +#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */ +#else #define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */ +#endif + /* Be sure to hunt all references to this down when you change the size of * the kernel stack */ #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 19c0c141bc3f..e3b45546d589 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page); int __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end) { - unsigned long flags, size; + unsigned long flags; - size = (end - start); - if (size >= parisc_tlb_flush_threshold) { + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + end - start >= parisc_tlb_flush_threshold) { flush_tlb_all(); return 1; } @@ -539,13 +539,12 @@ void flush_cache_mm(struct mm_struct *mm) struct vm_area_struct *vma; pgd_t *pgd; - /* Flush the TLB to avoid speculation if coherency is required. */ - if (parisc_requires_coherency()) - flush_tlb_all(); - /* Flushing the whole cache on each cpu takes forever on rp3440, etc. So, avoid it if the mm isn't too big. */ - if (mm_total_size(mm) >= parisc_cache_flush_threshold) { + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + mm_total_size(mm) >= parisc_cache_flush_threshold) { + if (mm->context) + flush_tlb_all(); flush_cache_all(); return; } @@ -553,9 +552,9 @@ void flush_cache_mm(struct mm_struct *mm) if (mm->context == mfsp(3)) { for (vma = mm->mmap; vma; vma = vma->vm_next) { flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); - if ((vma->vm_flags & VM_EXEC) == 0) - continue; - flush_user_icache_range_asm(vma->vm_start, vma->vm_end); + if (vma->vm_flags & VM_EXEC) + flush_user_icache_range_asm(vma->vm_start, vma->vm_end); + flush_tlb_range(vma, vma->vm_start, vma->vm_end); } return; } @@ -573,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm) pfn = pte_pfn(*ptep); if (!pfn_valid(pfn)) continue; + if (unlikely(mm->context)) + flush_tlb_page(vma, addr); __flush_cache_page(vma, addr, PFN_PHYS(pfn)); } } @@ -581,30 +582,45 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - BUG_ON(!vma->vm_mm->context); - - /* Flush the TLB to avoid speculation if coherency is required. */ - if (parisc_requires_coherency()) - flush_tlb_range(vma, start, end); + pgd_t *pgd; + unsigned long addr; - if ((end - start) >= parisc_cache_flush_threshold - || vma->vm_mm->context != mfsp(3)) { + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + end - start >= parisc_cache_flush_threshold) { + if (vma->vm_mm->context) + flush_tlb_range(vma, start, end); flush_cache_all(); return; } - flush_user_dcache_range_asm(start, end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(start, end); + if (vma->vm_mm->context == mfsp(3)) { + flush_user_dcache_range_asm(start, end); + if (vma->vm_flags & VM_EXEC) + flush_user_icache_range_asm(start, end); + flush_tlb_range(vma, start, end); + return; + } + + pgd = vma->vm_mm->pgd; + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { + unsigned long pfn; + pte_t *ptep = get_ptep(pgd, addr); + if (!ptep) + continue; + pfn = pte_pfn(*ptep); + if (pfn_valid(pfn)) { + if (unlikely(vma->vm_mm->context)) + flush_tlb_page(vma, addr); + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + } + } } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - BUG_ON(!vma->vm_mm->context); - if (pfn_valid(pfn)) { - if (parisc_requires_coherency()) + if (likely(vma->vm_mm->context)) flush_tlb_page(vma, vmaddr); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } @@ -613,21 +629,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long void flush_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; + unsigned long end = start + size; - if ((unsigned long)size > parisc_cache_flush_threshold) + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + (unsigned long)size >= parisc_cache_flush_threshold) { + flush_tlb_kernel_range(start, end); flush_data_cache(); - else - flush_kernel_dcache_range_asm(start, start + size); + return; + } + + flush_kernel_dcache_range_asm(start, end); + flush_tlb_kernel_range(start, end); } EXPORT_SYMBOL(flush_kernel_vmap_range); void invalidate_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; + unsigned long end = start + size; - if ((unsigned long)size > parisc_cache_flush_threshold) + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + (unsigned long)size >= parisc_cache_flush_threshold) { + flush_tlb_kernel_range(start, end); flush_data_cache(); - else - flush_kernel_dcache_range_asm(start, start + size); + return; + } + + purge_kernel_dcache_range_asm(start, end); + flush_tlb_kernel_range(start, end); } EXPORT_SYMBOL(invalidate_kernel_vmap_range); diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index d8f77358e2ba..513826a43efd 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -651,6 +651,10 @@ static int match_pci_device(struct device *dev, int index, (modpath->mod == PCI_FUNC(devfn))); } + /* index might be out of bounds for bc[] */ + if (index >= 6) + return 0; + id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5); return (modpath->bc[index] == id); } diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index a4fd296c958e..e95207c0565e 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,14 @@ #endif .import pa_tlb_lock,data + .macro load_pa_tlb_lock reg +#if __PA_LDCW_ALIGNMENT > 4 + load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg + depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg +#else + load32 PA(pa_tlb_lock), \reg +#endif + .endm /* space_to_prot macro creates a prot id from a space id */ @@ -457,7 +466,7 @@ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault #ifdef CONFIG_SMP cmpib,COND(=),n 0,\spc,2f - load32 PA(pa_tlb_lock),\tmp + load_pa_tlb_lock \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop @@ -480,7 +489,7 @@ /* Release pa_tlb_lock lock. */ .macro tlb_unlock1 spc,tmp #ifdef CONFIG_SMP - load32 PA(pa_tlb_lock),\tmp + load_pa_tlb_lock \tmp tlb_unlock0 \spc,\tmp #endif .endm @@ -878,9 +887,6 @@ ENTRY_CFI(syscall_exit_rfi) STREG %r19,PT_SR7(%r16) intr_return: - /* NOTE: Need to enable interrupts incase we schedule. */ - ssm PSW_SM_I, %r0 - /* check for reschedule */ mfctl %cr30,%r1 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ @@ -907,6 +913,11 @@ intr_check_sig: LDREG PT_IASQ1(%r16), %r20 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ + /* NOTE: We need to enable interrupts if we have to deliver + * signals. We used to do this earlier but it caused kernel + * stack overflows. */ + ssm PSW_SM_I, %r0 + copy %r0, %r25 /* long in_syscall = 0 */ #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ @@ -958,6 +969,10 @@ intr_do_resched: cmpib,COND(=) 0, %r20, intr_do_preempt nop + /* NOTE: We need to enable interrupts if we schedule. We used + * to do this earlier but it caused kernel stack overflows. */ + ssm PSW_SM_I, %r0 + #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index e3a8e5e4d5de..781c3b9a3e46 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -84,6 +84,7 @@ END(hpmc_pim_data) .text .import intr_save, code + .align 16 ENTRY_CFI(os_hpmc) .os_hpmc: @@ -300,11 +301,15 @@ os_hpmc_6: b . nop + .align 16 /* make function length multiple of 16 bytes */ ENDPROC_CFI(os_hpmc) .os_hpmc_end: __INITRODATA - .export os_hpmc_size +.globl os_hpmc_size + .align 4 + .type os_hpmc_size, @object + .size os_hpmc_size, 4 os_hpmc_size: .word .os_hpmc_end-.os_hpmc diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index adf7187f8951..67b0f7532e83 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -36,6 +36,7 @@ #include #include #include +#include #include .text @@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local) .macro tlb_lock la,flags,tmp #ifdef CONFIG_SMP - ldil L%pa_tlb_lock,%r1 - ldo R%pa_tlb_lock(%r1),\la +#if __PA_LDCW_ALIGNMENT > 4 + load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la + depi 0,31,__PA_LDCW_ALIGN_ORDER, \la +#else + load32 pa_tlb_lock, \la +#endif rsm PSW_SM_I,\flags 1: LDCW 0(\la),\tmp cmpib,<>,n 0,\tmp,3f @@ -1105,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) .procend ENDPROC_CFI(flush_kernel_dcache_range_asm) +ENTRY_CFI(purge_kernel_dcache_range_asm) + .proc + .callinfo NO_CALLS + .entry + + ldil L%dcache_stride, %r1 + ldw R%dcache_stride(%r1), %r23 + ldo -1(%r23), %r21 + ANDCM %r26, %r21, %r26 + +1: cmpb,COND(<<),n %r26, %r25,1b + pdc,m %r23(%r26) + + sync + syncdma + bv %r0(%r2) + nop + .exit + + .procend +ENDPROC_CFI(purge_kernel_dcache_range_asm) + ENTRY_CFI(flush_user_icache_range_asm) .proc .callinfo NO_CALLS diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 30f92391a93e..cad3e8661cd6 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -183,6 +184,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) return 1; } +/* + * Idle thread support + * + * Detect when running on QEMU with SeaBIOS PDC Firmware and let + * QEMU idle the host too. + */ + +int running_on_qemu __read_mostly; + +void __cpuidle arch_cpu_idle_dead(void) +{ + /* nop on real hardware, qemu will offline CPU. */ + asm volatile("or %%r31,%%r31,%%r31\n":::); +} + +void __cpuidle arch_cpu_idle(void) +{ + local_irq_enable(); + + /* nop on real hardware, qemu will idle sleep. */ + asm volatile("or %%r10,%%r10,%%r10\n":::); +} + +static int __init parisc_idle_init(void) +{ + const char *marker; + + /* check QEMU/SeaBIOS marker in PAGE0 */ + marker = (char *) &PAGE0->pad0; + running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); + + if (!running_on_qemu) + cpu_idle_poll_ctrl(1); + + return 0; +} +arch_initcall(parisc_idle_init); + /* * Copy architecture-specific thread state */ diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 41e60a9c7db2..e775f80ae28c 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -690,15 +690,15 @@ cas_action: /* ELF32 Process entry path */ lws_compare_and_swap_2: #ifdef CONFIG_64BIT - /* Clip the input registers */ + /* Clip the input registers. We don't need to clip %r23 as we + only use it for word operations */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r24 - depdi 0, 31, 32, %r23 #endif /* Check the validity of the size pointer */ - subi,>>= 4, %r23, %r0 + subi,>>= 3, %r23, %r0 b,n lws_exit_nosys /* Jump to the functions which will load the old and new values into diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 4b8fd6dc22da..f7e684560186 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) next_tick = cpuinfo->it_value; /* Calculate how many ticks have elapsed. */ + now = mfctl(16); do { ++ticks_elapsed; next_tick += cpt; - now = mfctl(16); } while (next_tick - now > cpt); /* Store (in CR16 cycles) up to when we are accounting right now. */ @@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) * if one or the other wrapped. If "now" is "bigger" we'll end up * with a very large unsigned number. */ - while (next_tick - mfctl(16) > cpt) + now = mfctl(16); + while (next_tick - now > cpt) next_tick += cpt; /* Program the IT when to deliver the next interrupt. * Only bottom 32-bits of next_tick are writable in CR16! * Timer interrupt will be delivered at least a few hundred cycles - * after the IT fires, so if we are too close (<= 500 cycles) to the + * after the IT fires, so if we are too close (<= 8000 cycles) to the * next cycle, simply skip it. */ - if (next_tick - mfctl(16) <= 500) + if (next_tick - now <= 8000) next_tick += cpt; mtctl(next_tick, 16); @@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void) * different sockets, so mark them unstable and lower rating on * multi-socket SMP systems. */ - if (num_online_cpus() > 1) { + if (num_online_cpus() > 1 && !running_on_qemu) { int cpu; unsigned long cpu0_loc; cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index cb782ac1c35d..fe418226df7f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,6 +164,7 @@ config PPC select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_SMP_IDLE_THREAD diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index 84774ccba1c2..f92d0530ceb1 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore @@ -18,7 +18,6 @@ otheros.bld uImage cuImage.* dtbImage.* -*.dtb treeImage.* vmlinux.strip zImage diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index f058e0c3e4d4..fd1d6c83f0c0 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -141,6 +141,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-vpmsum", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index abef812de7f8..2c895e8d07f7 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -33,6 +33,7 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags); int patch_instruction(unsigned int *addr, unsigned int instr); int instr_is_relative_branch(unsigned int instr); +int instr_is_relative_link_branch(unsigned int instr); int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); unsigned long branch_target(const unsigned int *instr); unsigned int translate_branch(const unsigned int *dest, diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index a035b1e5dfa7..8a2aecfe9b02 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -185,7 +185,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index a703452d67b6..555e22d5e07f 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -209,5 +209,11 @@ exc_##label##_book3e: ori r3,r3,vector_offset@l; \ mtspr SPRN_IVOR##vector_number,r3; +#define RFI_TO_KERNEL \ + rfi + +#define RFI_TO_USER \ + rfi + #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 9a318973af05..ccf10c2f8899 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -69,6 +69,59 @@ */ #define EX_R3 EX_DAR +/* + * Macros for annotating the expected destination of (h)rfid + * + * The nop instructions allow us to insert one or more instructions to flush the + * L1-D cache when returning to userspace or a guest. + */ +#define RFI_FLUSH_SLOT \ + RFI_FLUSH_FIXUP_SECTION; \ + nop; \ + nop; \ + nop + +#define RFI_TO_KERNEL \ + rfid + +#define RFI_TO_USER \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback + +#define RFI_TO_USER_OR_KERNEL \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback + +#define RFI_TO_GUEST \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback + +#define HRFI_TO_KERNEL \ + hrfid + +#define HRFI_TO_USER \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + +#define HRFI_TO_USER_OR_KERNEL \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + +#define HRFI_TO_GUEST \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + +#define HRFI_TO_UNKNOWN \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + #ifdef CONFIG_RELOCATABLE #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ @@ -213,7 +266,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mtspr SPRN_##h##SRR0,r12; \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ mtspr SPRN_##h##SRR1,r10; \ - h##rfid; \ + h##RFI_TO_KERNEL; \ b . /* prevent speculative execution */ #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ __EXCEPTION_PROLOG_PSERIES_1(label, h) @@ -227,7 +280,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mtspr SPRN_##h##SRR0,r12; \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ mtspr SPRN_##h##SRR1,r10; \ - h##rfid; \ + h##RFI_TO_KERNEL; \ b . /* prevent speculative execution */ #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 8f88f771cc55..1e82eb3caabd 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -187,7 +187,20 @@ label##3: \ FTR_ENTRY_OFFSET label##1b-label##3b; \ .popsection; +#define RFI_FLUSH_FIXUP_SECTION \ +951: \ + .pushsection __rfi_flush_fixup,"a"; \ + .align 2; \ +952: \ + FTR_ENTRY_OFFSET 951b-952b; \ + .popsection; + + #ifndef __ASSEMBLY__ +#include + +extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; + void apply_feature_fixups(void); void setup_feature_keys(void); #endif diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index a409177be8bd..eca3f9c68907 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -241,6 +241,7 @@ #define H_GET_HCA_INFO 0x1B8 #define H_GET_PERF_COUNT 0x1BC #define H_MANAGE_TRACE 0x1C0 +#define H_GET_CPU_CHARACTERISTICS 0x1C8 #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 #define H_QUERY_INT_STATE 0x1E4 #define H_POLL_PENDING 0x1D8 @@ -330,6 +331,17 @@ #define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 /* >= 0 values are CPU number */ +/* H_GET_CPU_CHARACTERISTICS return values */ +#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0 +#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1 +#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 +#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 +#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 + +#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 +#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 +#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 + /* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 #define PROC_TABLE_DEREG 0x10 @@ -341,6 +353,7 @@ #define PROC_TABLE_GTSE 0x01 #ifndef __ASSEMBLY__ +#include /** * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments @@ -436,6 +449,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc) } } +struct h_cpu_char_result { + u64 character; + u64 behaviour; +}; + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HVCALL_H */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 73b92017b6d7..cd2fc1cc1cc7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -76,6 +76,7 @@ struct machdep_calls { void __noreturn (*restart)(char *cmd); void __noreturn (*halt)(void); + void (*panic)(char *str); void (*cpu_die)(void); long (*time_init)(void); /* Optional, may be NULL */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 492d8140a395..44fdf4786638 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -114,9 +114,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 04b60af027ae..b8366df50d19 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -231,6 +231,16 @@ struct paca_struct { struct sibling_subcore_state *sibling_subcore_state; #endif #endif +#ifdef CONFIG_PPC_BOOK3S_64 + /* + * rfi fallback flush must be in its own cacheline to prevent + * other paca data leaking into the L1d + */ + u64 exrfi[EX_SIZE] __aligned(0x80); + void *rfi_flush_fallback_area; + u64 l1d_flush_congruence; + u64 l1d_flush_sets; +#endif }; extern void copy_mm_to_paca(struct mm_struct *mm); diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index a14203c005f1..e11f03007b57 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) } #endif /* MODULE */ -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) #ifdef CONFIG_PPC_BOOK3S #include diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 7f01b22fa6cb..55eddf50d149 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu) return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); } +static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf); + if (rc == H_SUCCESS) { + p->character = retbuf[0]; + p->behaviour = retbuf[1]; + } + + return rc; +} + #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 257d23dbf55d..469b7fdc9be4 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -24,6 +24,7 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void initmem_init(void); +void setup_panic(void); #define ARCH_PANIC_TIMEOUT 180 #ifdef CONFIG_PPC_PSERIES @@ -38,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {} static inline void pseries_little_endian_exceptions(void) {} #endif /* CONFIG_PPC_PSERIES */ +void rfi_flush_enable(bool enable); + +/* These are bit flags */ +enum l1d_flush_type { + L1D_FLUSH_NONE = 0x1, + L1D_FLUSH_FALLBACK = 0x2, + L1D_FLUSH_ORI = 0x4, + L1D_FLUSH_MTTRIG = 0x8, +}; + +void __init setup_rfi_flush(enum l1d_flush_type, bool enable); +void do_rfi_flush_fixups(enum l1d_flush_type types); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 023ff9f17501..d5f2ee882f74 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); extern int numa_update_cpu_topology(bool cpus_locked); +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) +{ + numa_cpu_lookup_table[cpu] = node; +} + static inline int early_cpu_to_node(int cpu) { int nid; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8cfb20e38cfe..748cdc4bb89a 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -237,6 +237,11 @@ int main(void) OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); OFFSET(PACA_IN_MCE, paca_struct, in_mce); OFFSET(PACA_IN_NMI, paca_struct, in_nmi); + OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); + OFFSET(PACA_EXRFI, paca_struct, exrfi); + OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence); + OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets); + #endif OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 610955fe8b81..679bbe714e85 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -102,6 +102,7 @@ _GLOBAL(__setup_cpu_power9) li r0,0 mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 + mtspr SPRN_PID,r0 mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) or r3, r3, r4 @@ -126,6 +127,7 @@ _GLOBAL(__restore_cpu_power9) li r0,0 mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 + mtspr SPRN_PID,r0 mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) or r3, r3, r4 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 4a0fd4f40245..6f07c687fc05 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -37,6 +37,11 @@ #include #include #include +#ifdef CONFIG_PPC_BOOK3S +#include +#else +#include +#endif /* * System calls. @@ -262,13 +267,23 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ + ld r2,GPR2(r1) + ld r1,GPR1(r1) + mtlr r4 + mtcr r5 + mtspr SPRN_SRR0,r7 + mtspr SPRN_SRR1,r8 + RFI_TO_USER + b . /* prevent speculative execution */ + + /* exit to kernel */ 1: ld r2,GPR2(r1) ld r1,GPR1(r1) mtlr r4 mtcr r5 mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 - RFI + RFI_TO_KERNEL b . /* prevent speculative execution */ .Lsyscall_error: @@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) mtmsrd r10, 1 mtspr SPRN_SRR0, r11 mtspr SPRN_SRR1, r12 - - rfid + RFI_TO_USER b . /* prevent speculative execution */ #endif _ASM_NOKPROBE_SYMBOL(system_call_common); @@ -878,7 +892,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ACCOUNT_CPU_USER_EXIT(r13, r2, r4) REST_GPR(13, r1) -1: + mtspr SPRN_SRR1,r3 ld r2,_CCR(r1) @@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r3,GPR3(r1) ld r4,GPR4(r1) ld r1,GPR1(r1) + RFI_TO_USER + b . /* prevent speculative execution */ + +1: mtspr SPRN_SRR1,r3 - rfid + ld r2,_CCR(r1) + mtcrf 0xFF,r2 + ld r2,_NIP(r1) + mtspr SPRN_SRR0,r2 + + ld r0,GPR0(r1) + ld r2,GPR2(r1) + ld r3,GPR3(r1) + ld r4,GPR4(r1) + ld r1,GPR1(r1) + RFI_TO_KERNEL b . /* prevent speculative execution */ #endif /* CONFIG_PPC_BOOK3E */ @@ -911,9 +939,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) beq 1f rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS stb r7,PACAIRQHAPPENED(r13) -1: li r0,0 - stb r0,PACASOFTIRQEN(r13); - TRACE_DISABLE_INTS +1: +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) + /* The interrupt should not have soft enabled. */ + lbz r7,PACASOFTIRQEN(r13) +1: tdnei r7,0 + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING +#endif b .Ldo_restore /* @@ -1073,7 +1105,7 @@ __enter_rtas: mtspr SPRN_SRR0,r5 mtspr SPRN_SRR1,r6 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ rtas_return_loc: @@ -1098,7 +1130,7 @@ rtas_return_loc: mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ _ASM_NOKPROBE_SYMBOL(__enter_rtas) _ASM_NOKPROBE_SYMBOL(rtas_return_loc) @@ -1171,7 +1203,7 @@ _GLOBAL(enter_prom) LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) andc r11,r11,r12 mtsrr1 r11 - rfid + RFI_TO_KERNEL #endif /* CONFIG_PPC_BOOK3E */ 1: /* Return from OF */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1c80bd292e48..f9ca4bb3d48e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -254,7 +254,7 @@ BEGIN_FTR_SECTION LOAD_HANDLER(r12, machine_check_handle_early) 1: mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r11 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ 2: /* Stack overflow. Stay on emergency stack and panic. @@ -443,7 +443,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) li r3,MSR_ME andc r10,r10,r3 /* Turn off MSR_ME */ mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . 2: /* @@ -461,7 +461,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) */ bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP - rfid + RFI_TO_USER_OR_KERNEL 9: /* Deliver the machine check to host kernel in V mode. */ MACHINE_CHECK_HANDLER_WINDUP @@ -542,7 +542,7 @@ EXC_COMMON_BEGIN(instruction_access_common) RECONCILE_IRQ_STATE(r10, r11) ld r12,_MSR(r1) ld r3,_NIP(r1) - andis. r4,r12,DSISR_BAD_FAULT_64S@h + andis. r4,r12,DSISR_SRR1_MATCH_64S@h li r5,0x400 std r3,_DAR(r1) std r4,_DSISR(r1) @@ -596,6 +596,9 @@ EXC_COMMON_BEGIN(slb_miss_common) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ + andi. r9,r11,MSR_PR // Check for exception from userspace + cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later + /* * Test MSR_RI before calling slb_allocate_realmode, because the * MSR in r11 gets clobbered. However we still want to allocate @@ -622,9 +625,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) /* All done -- return from exception. */ + bne cr4,1f /* returning to kernel */ + .machine push .machine "power4" mtcrf 0x80,r9 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ mtcrf 0x02,r9 /* I/D indication is in cr6 */ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ @@ -638,9 +644,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) ld r11,PACA_EXSLB+EX_R11(r13) ld r12,PACA_EXSLB+EX_R12(r13) ld r13,PACA_EXSLB+EX_R13(r13) - rfid + RFI_TO_USER + b . /* prevent speculative execution */ +1: +.machine push +.machine "power4" + mtcrf 0x80,r9 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ + mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ + mtcrf 0x02,r9 /* I/D indication is in cr6 */ + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ +.machine pop + + RESTORE_CTR(r9, PACA_EXSLB) + RESTORE_PPR_PACA(PACA_EXSLB, r9) + mr r3,r12 + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + RFI_TO_KERNEL b . /* prevent speculative execution */ + 2: std r3,PACA_EXSLB+EX_DAR(r13) mr r3,r12 mfspr r11,SPRN_SRR0 @@ -649,7 +676,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) mtspr SPRN_SRR0,r10 ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . 8: std r3,PACA_EXSLB+EX_DAR(r13) @@ -660,7 +687,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) mtspr SPRN_SRR0,r10 ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . EXC_COMMON_BEGIN(unrecov_slb) @@ -677,7 +704,7 @@ EXC_COMMON_BEGIN(bad_addr_slb) ld r3, PACA_EXSLB+EX_DAR(r13) std r3, _DAR(r1) beq cr6, 2f - li r10, 0x480 /* fix trap number for I-SLB miss */ + li r10, 0x481 /* fix trap number for I-SLB miss */ std r10, _TRAP(r1) 2: bl save_nvgprs addi r3, r1, STACK_FRAME_OVERHEAD @@ -905,7 +932,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ mtspr SPRN_SRR0,r10 ; \ ld r10,PACAKMSR(r13) ; \ mtspr SPRN_SRR1,r10 ; \ - rfid ; \ + RFI_TO_KERNEL ; \ b . ; /* prevent speculative execution */ #define SYSCALL_FASTENDIAN \ @@ -914,7 +941,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ xori r12,r12,MSR_LE ; \ mtspr SPRN_SRR1,r12 ; \ mr r13,r9 ; \ - rfid ; /* return to userspace */ \ + RFI_TO_USER ; /* return to userspace */ \ b . ; /* prevent speculative execution */ #if defined(CONFIG_RELOCATABLE) @@ -1299,7 +1326,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) ld r13,PACA_EXGEN+EX_R13(r13) - HRFID + HRFI_TO_UNKNOWN b . #endif @@ -1403,10 +1430,94 @@ masked_##_H##interrupt: \ ld r10,PACA_EXGEN+EX_R10(r13); \ ld r11,PACA_EXGEN+EX_R11(r13); \ /* returns to kernel where r13 must be set up, so don't restore it */ \ - ##_H##rfid; \ + ##_H##RFI_TO_KERNEL; \ b .; \ MASKED_DEC_HANDLER(_H) +TRAMP_REAL_BEGIN(rfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + std r12,PACA_EXRFI+EX_R12(r13) + std r8,PACA_EXRFI+EX_R13(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) + ld r11,PACA_L1D_FLUSH_SETS(r13) + ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ + addi r12,r12,8 + mtctr r11 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync +1: li r8,0 + .rept 8 /* 8-way set associative */ + ldx r11,r10,r8 + add r8,r8,r12 + xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not + add r8,r8,r11 // Add 0, this creates a dependency on the ldx + .endr + addi r10,r10,128 /* 128 byte cache line */ + bdnz 1b + + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) + ld r12,PACA_EXRFI+EX_R12(r13) + ld r8,PACA_EXRFI+EX_R13(r13) + GET_SCRATCH0(r13); + rfid + +TRAMP_REAL_BEGIN(hrfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + std r12,PACA_EXRFI+EX_R12(r13) + std r8,PACA_EXRFI+EX_R13(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) + ld r11,PACA_L1D_FLUSH_SETS(r13) + ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ + addi r12,r12,8 + mtctr r11 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync +1: li r8,0 + .rept 8 /* 8-way set associative */ + ldx r11,r10,r8 + add r8,r8,r12 + xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not + add r8,r8,r11 // Add 0, this creates a dependency on the ldx + .endr + addi r10,r10,128 /* 128 byte cache line */ + bdnz 1b + + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) + ld r12,PACA_EXRFI+EX_R12(r13) + ld r8,PACA_EXRFI+EX_R13(r13) + GET_SCRATCH0(r13); + hrfid + /* * Real mode exceptions actually use this too, but alternate * instruction code patches (which end up in the common .text area) @@ -1426,7 +1537,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt) addi r13, r13, 4 mtspr SPRN_SRR0, r13 GET_SCRATCH0(r13) - rfid + RFI_TO_KERNEL b . TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) @@ -1438,7 +1549,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) addi r13, r13, 4 mtspr SPRN_HSRR0, r13 GET_SCRATCH0(r13) - hrfid + HRFI_TO_KERNEL b . #endif @@ -1506,7 +1617,7 @@ USE_TEXT_SECTION() .balign IFETCH_ALIGN_BYTES do_hash_page: #ifdef CONFIG_PPC_STD_MMU_64 - lis r0,DSISR_BAD_FAULT_64S@h + lis r0,(DSISR_BAD_FAULT_64S|DSISR_DABRMATCH)@h ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r4,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index e1431800bfb9..29d2b6050140 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1453,25 +1453,6 @@ static void fadump_init_files(void) return; } -static int fadump_panic_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - /* - * If firmware-assisted dump has been registered then trigger - * firmware-assisted dump and let firmware handle everything - * else. If this returns, then fadump was not registered, so - * go through the rest of the panic path. - */ - crash_fadump(NULL, ptr); - - return NOTIFY_DONE; -} - -static struct notifier_block fadump_panic_block = { - .notifier_call = fadump_panic_event, - .priority = INT_MIN /* may not return; must be done last */ -}; - /* * Prepare for firmware-assisted dump. */ @@ -1504,9 +1485,6 @@ int __init setup_fadump(void) init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); fadump_init_files(); - atomic_notifier_chain_register(&panic_notifier_list, - &fadump_panic_block); - return 1; } subsys_initcall(setup_fadump); diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 8c54166491e7..29b2fed93289 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -388,7 +388,7 @@ DataAccess: EXCEPTION_PROLOG mfspr r10,SPRN_DSISR stw r10,_DSISR(r11) - andis. r0,r10,DSISR_BAD_FAULT_32S@h + andis. r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h bne 1f /* if not, try to put a PTE */ mfspr r4,SPRN_DAR /* into the hash table */ rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4e65bf82f5e0..0ce8b0e5d7ba 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -430,6 +430,14 @@ void force_external_irq_replay(void) */ WARN_ON(!arch_irqs_disabled()); + /* + * Interrupts must always be hard disabled before irq_happened is + * modified (to prevent lost update in case of interrupt between + * load and store). + */ + __hard_irq_disable(); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + /* Indicate in the PACA that we have an interrupt to replay */ local_paca->irq_happened |= PACA_IRQ_EE; } diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 6c089d9757c9..2d81404f818c 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -65,6 +65,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, /* Disable irq for emulating a breakpoint and avoiding preempt */ local_irq_save(flags); hard_irq_disable(); + preempt_disable(); p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) @@ -86,12 +87,18 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) __skip_singlestep(p, regs, kcb, orig_nip); - /* - * If pre_handler returns !0, it sets regs->nip and - * resets current kprobe. - */ + else { + /* + * If pre_handler returns !0, it sets regs->nip and + * resets current kprobe. In this case, we still need + * to restore irq, but not preemption. + */ + local_irq_restore(flags); + return; + } } end: + preempt_enable_no_resched(); local_irq_restore(flags); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 8ac0bd2bddb0..3280953a82cf 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -623,7 +623,9 @@ BEGIN_FTR_SECTION * NOTE, we rely on r0 being 0 from above. */ mtspr SPRN_IAMR,r0 +BEGIN_FTR_SECTION_NESTED(42) mtspr SPRN_AMOR,r0 +END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) /* save regs for local vars on new stack. diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 0b0f89685b67..2a1b1273a312 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -486,7 +486,17 @@ static bool is_early_mcount_callsite(u32 *instruction) restore r2. */ static int restore_r2(u32 *instruction, struct module *me) { - if (is_early_mcount_callsite(instruction - 1)) + u32 *prev_insn = instruction - 1; + + if (is_early_mcount_callsite(prev_insn)) + return 1; + + /* + * Make sure the branch isn't a sibling call. Sibling calls aren't + * "link" branches and they don't return, so they don't need the r2 + * restore afterwards. + */ + if (!instr_is_relative_link_branch(*prev_insn)) return 1; if (*instruction != PPC_INST_NOP) { diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 91e037ab20a1..60ba7f1370a8 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -115,7 +115,6 @@ static unsigned long can_optimize(struct kprobe *p) static void optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long flags; /* This is possible if op is under delayed unoptimizing */ @@ -124,13 +123,14 @@ static void optimized_callback(struct optimized_kprobe *op, local_irq_save(flags); hard_irq_disable(); + preempt_disable(); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { __this_cpu_write(current_kprobe, &op->kp); regs->nip = (unsigned long)op->kp.addr; - kcb->kprobe_status = KPROBE_HIT_ACTIVE; + get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } @@ -140,6 +140,7 @@ static void optimized_callback(struct optimized_kprobe *op, * local_irq_restore() will re-enable interrupts, * if they were hard disabled. */ + preempt_enable_no_resched(); local_irq_restore(flags); } NOKPROBE_SYMBOL(optimized_callback); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 2e3bc16d02b2..90bc20efb4c7 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -704,6 +704,30 @@ int check_legacy_ioport(unsigned long base_port) } EXPORT_SYMBOL(check_legacy_ioport); +static int ppc_panic_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + /* + * If firmware-assisted dump has been registered then trigger + * firmware-assisted dump and let firmware handle everything else. + */ + crash_fadump(NULL, ptr); + ppc_md.panic(ptr); /* May not return */ + return NOTIFY_DONE; +} + +static struct notifier_block ppc_panic_block = { + .notifier_call = ppc_panic_event, + .priority = INT_MIN /* may not return; must be done last */ +}; + +void __init setup_panic(void) +{ + if (!ppc_md.panic) + return; + atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); +} + #ifdef CONFIG_CHECK_CACHE_COHERENCY /* * For platforms that have configurable cache-coherency. This function @@ -848,6 +872,9 @@ void __init setup_arch(char **cmdline_p) /* Probe the machine type, establish ppc_md. */ probe_machine(); + /* Setup panic notifier if requested by the platform. */ + setup_panic(); + /* * Configure ppc_md.power_save (ppc32 only, 64-bit machines do * it from their respective probe() function. diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index b89c6aac48c9..9527a4c6cbc2 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -784,3 +785,141 @@ static int __init disable_hardlockup_detector(void) return 0; } early_initcall(disable_hardlockup_detector); + +#ifdef CONFIG_PPC_BOOK3S_64 +static enum l1d_flush_type enabled_flush_types; +static void *l1d_flush_fallback_area; +static bool no_rfi_flush; +bool rfi_flush; + +static int __init handle_no_rfi_flush(char *p) +{ + pr_info("rfi-flush: disabled on command line."); + no_rfi_flush = true; + return 0; +} +early_param("no_rfi_flush", handle_no_rfi_flush); + +/* + * The RFI flush is not KPTI, but because users will see doco that says to use + * nopti we hijack that option here to also disable the RFI flush. + */ +static int __init handle_no_pti(char *p) +{ + pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); + handle_no_rfi_flush(NULL); + return 0; +} +early_param("nopti", handle_no_pti); + +static void do_nothing(void *unused) +{ + /* + * We don't need to do the flush explicitly, just enter+exit kernel is + * sufficient, the RFI exit handlers will do the right thing. + */ +} + +void rfi_flush_enable(bool enable) +{ + if (rfi_flush == enable) + return; + + if (enable) { + do_rfi_flush_fixups(enabled_flush_types); + on_each_cpu(do_nothing, NULL, 1); + } else + do_rfi_flush_fixups(L1D_FLUSH_NONE); + + rfi_flush = enable; +} + +static void init_fallback_flush(void) +{ + u64 l1d_size, limit; + int cpu; + + l1d_size = ppc64_caches.l1d.size; + limit = min(safe_stack_limit(), ppc64_rma_size); + + /* + * Align to L1d size, and size it at 2x L1d size, to catch possible + * hardware prefetch runoff. We don't have a recipe for load patterns to + * reliably avoid the prefetcher. + */ + l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); + memset(l1d_flush_fallback_area, 0, l1d_size * 2); + + for_each_possible_cpu(cpu) { + /* + * The fallback flush is currently coded for 8-way + * associativity. Different associativity is possible, but it + * will be treated as 8-way and may not evict the lines as + * effectively. + * + * 128 byte lines are mandatory. + */ + u64 c = l1d_size / 8; + + paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; + paca[cpu].l1d_flush_congruence = c; + paca[cpu].l1d_flush_sets = c / 128; + } +} + +void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) +{ + if (types & L1D_FLUSH_FALLBACK) { + pr_info("rfi-flush: Using fallback displacement flush\n"); + init_fallback_flush(); + } + + if (types & L1D_FLUSH_ORI) + pr_info("rfi-flush: Using ori type flush\n"); + + if (types & L1D_FLUSH_MTTRIG) + pr_info("rfi-flush: Using mttrig type flush\n"); + + enabled_flush_types = types; + + if (!no_rfi_flush) + rfi_flush_enable(enable); +} + +#ifdef CONFIG_DEBUG_FS +static int rfi_flush_set(void *data, u64 val) +{ + if (val == 1) + rfi_flush_enable(true); + else if (val == 0) + rfi_flush_enable(false); + else + return -EINVAL; + + return 0; +} + +static int rfi_flush_get(void *data, u64 *val) +{ + *val = rfi_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); + +static __init int rfi_flush_debugfs_init(void) +{ + debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); + return 0; +} +device_initcall(rfi_flush_debugfs_init); +#endif + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (rfi_flush) + return sprintf(buf, "Mitigation: RFI Flush\n"); + + return sprintf(buf, "Vulnerable\n"); +} +#endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index e9436c5e1e09..3d7539b90010 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -103,7 +103,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, static void do_signal(struct task_struct *tsk) { sigset_t *oldset = sigmask_to_save(); - struct ksignal ksig; + struct ksignal ksig = { .sig = 0 }; int ret; int is32 = is_32bit_task(); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0494e1566ee2..307843d23682 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -132,6 +132,15 @@ SECTIONS /* Read-only data */ RO_DATA(PAGE_SIZE) +#ifdef CONFIG_PPC64 + . = ALIGN(8); + __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { + __start___rfi_flush_fixup = .; + *(__rfi_flush_fixup) + __stop___rfi_flush_fixup = .; + } +#endif + EXCEPTION_TABLE(0) NOTES :kernel :notes diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 57190f384f63..ce848ff84edd 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -276,9 +276,12 @@ void arch_touch_nmi_watchdog(void) { unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; int cpu = smp_processor_id(); + u64 tb = get_tb(); - if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks) - watchdog_timer_interrupt(cpu); + if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { + per_cpu(wd_timer_tb, cpu) = tb; + wd_smp_clear_cpu_pending(cpu, tb); + } } EXPORT_SYMBOL(arch_touch_nmi_watchdog); diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index b12b8eb39c29..648160334abf 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -68,7 +68,7 @@ config KVM_BOOK3S_64 select KVM_BOOK3S_64_HANDLER select KVM select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE - select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV) ---help--- Support running unmodified book3s_64 and book3s_32 guest kernels in virtual machines on book3s_64 host processors. diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 29ebe2fd5867..a93d719edc90 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, gpte->may_read = true; gpte->may_write = true; gpte->page_size = MMU_PAGE_4K; + gpte->wimg = HPTE_R_M; return 0; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 59247af5fd45..2645d484e945 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -65,11 +65,17 @@ struct kvm_resize_hpt { u32 order; /* These fields protected by kvm->lock */ + + /* Possible values and their usage: + * <0 an error occurred during allocation, + * -EBUSY allocation is in the progress, + * 0 allocation made successfuly. + */ int error; - bool prepare_done; - /* Private to the work thread, until prepare_done is true, - * then protected by kvm->resize_hpt_sem */ + /* Private to the work thread, until error != -EBUSY, + * then protected by kvm->lock. + */ struct kvm_hpt_info hpt; }; @@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) * Reset all the reverse-mapping chains for all memslots */ kvmppc_rmap_reset(kvm); - /* Ensure that each vcpu will flush its TLB on next entry. */ - cpumask_setall(&kvm->arch.need_tlb_flush); err = 0; goto out; } @@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) kvmppc_set_hpt(kvm, &info); out: + if (err == 0) + /* Ensure that each vcpu will flush its TLB on next entry. */ + cpumask_setall(&kvm->arch.need_tlb_flush); + mutex_unlock(&kvm->lock); return err; } @@ -1424,16 +1432,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) { - BUG_ON(kvm->arch.resize_hpt != resize); + if (WARN_ON(!mutex_is_locked(&kvm->lock))) + return; if (!resize) return; - if (resize->hpt.virt) - kvmppc_free_hpt(&resize->hpt); + if (resize->error != -EBUSY) { + if (resize->hpt.virt) + kvmppc_free_hpt(&resize->hpt); + kfree(resize); + } - kvm->arch.resize_hpt = NULL; - kfree(resize); + if (kvm->arch.resize_hpt == resize) + kvm->arch.resize_hpt = NULL; } static void resize_hpt_prepare_work(struct work_struct *work) @@ -1442,17 +1454,41 @@ static void resize_hpt_prepare_work(struct work_struct *work) struct kvm_resize_hpt, work); struct kvm *kvm = resize->kvm; - int err; + int err = 0; - resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", - resize->order); - - err = resize_hpt_allocate(resize); + if (WARN_ON(resize->error != -EBUSY)) + return; mutex_lock(&kvm->lock); + /* Request is still current? */ + if (kvm->arch.resize_hpt == resize) { + /* We may request large allocations here: + * do not sleep with kvm->lock held for a while. + */ + mutex_unlock(&kvm->lock); + + resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", + resize->order); + + err = resize_hpt_allocate(resize); + + /* We have strict assumption about -EBUSY + * when preparing for HPT resize. + */ + if (WARN_ON(err == -EBUSY)) + err = -EINPROGRESS; + + mutex_lock(&kvm->lock); + /* It is possible that kvm->arch.resize_hpt != resize + * after we grab kvm->lock again. + */ + } + resize->error = err; - resize->prepare_done = true; + + if (kvm->arch.resize_hpt != resize) + resize_hpt_release(kvm, resize); mutex_unlock(&kvm->lock); } @@ -1477,14 +1513,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, if (resize) { if (resize->order == shift) { - /* Suitable resize in progress */ - if (resize->prepare_done) { - ret = resize->error; - if (ret != 0) - resize_hpt_release(kvm, resize); - } else { + /* Suitable resize in progress? */ + ret = resize->error; + if (ret == -EBUSY) ret = 100; /* estimated time in ms */ - } + else if (ret) + resize_hpt_release(kvm, resize); goto out; } @@ -1504,6 +1538,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, ret = -ENOMEM; goto out; } + + resize->error = -EBUSY; resize->order = shift; resize->kvm = kvm; INIT_WORK(&resize->work, resize_hpt_prepare_work); @@ -1558,16 +1594,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, if (!resize || (resize->order != shift)) goto out; - ret = -EBUSY; - if (!resize->prepare_done) - goto out; - ret = resize->error; - if (ret != 0) + if (ret) goto out; ret = resize_hpt_rehash(resize); - if (ret != 0) + if (ret) goto out; resize_hpt_pivot(resize); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8d43cf205d34..f48e3379a18a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -999,8 +999,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return EMULATE_FAIL; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) return RESUME_GUEST; if (get_op(inst) != 31) @@ -1050,6 +1048,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* Called with vcpu->arch.vcore->lock held */ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1169,7 +1168,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_debug_inst(run, vcpu); + spin_lock(&vcpu->arch.vcore->lock); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1184,8 +1186,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, */ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: r = EMULATE_FAIL; - if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) + if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && + cpu_has_feature(CPU_FTR_ARCH_300)) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_doorbell_instr(vcpu); + spin_lock(&vcpu->arch.vcore->lock); + } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -2889,13 +2896,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); + preempt_enable(); + for (sub = 0; sub < core_info.n_subcores; ++sub) { pvc = core_info.vc[sub]; post_guest_process(pvc, pvc == vc); } spin_lock(&vc->lock); - preempt_enable(); out: vc->vcore_state = VCORE_INACTIVE; diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 90644db9d38e..8e0cf8f186df 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -529,6 +529,8 @@ static inline bool is_rm(void) unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_xirr(vcpu); @@ -541,6 +543,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; vcpu->arch.gpr[5] = get_tb(); if (xive_enabled()) { if (is_rm()) @@ -554,6 +558,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_ipoll(vcpu, server); @@ -567,6 +573,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_ipi(vcpu, server, mfrr); @@ -579,6 +587,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_cppr(vcpu, cppr); @@ -591,6 +601,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_eoi(vcpu, xirr); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 4efe364f1188..4962d537c186 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -447,8 +447,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (kvm->arch.lpid)); - trace_tlbie(kvm->arch.lpid, 0, rbvalues[i], - kvm->arch.lpid, 0, 0, 0); } asm volatile("eieio; tlbsync; ptesync" : : : "memory"); kvm->arch.tlbie_lock = 0; @@ -458,8 +456,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (0)); - trace_tlbie(kvm->arch.lpid, 1, rbvalues[i], - 0, 0, 0, 0); } asm volatile("ptesync" : : : "memory"); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 42639fba89e8..2b3194b9608f 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -78,7 +78,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) mtmsrd r0,1 /* clear RI in MSR */ mtsrr0 r5 mtsrr1 r6 - RFI + RFI_TO_KERNEL kvmppc_call_hv_entry: ld r4, HSTATE_KVM_VCPU(r13) @@ -187,7 +187,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtmsrd r6, 1 /* Clear RI in MSR */ mtsrr0 r8 mtsrr1 r7 - RFI + RFI_TO_KERNEL /* Virtual-mode return */ .Lvirt_return: @@ -1131,8 +1131,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r0, VCPU_GPR(R0)(r4) ld r4, VCPU_GPR(R4)(r4) - - hrfid + HRFI_TO_GUEST b . secondary_too_late: @@ -1388,6 +1387,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) blt deliver_guest_interrupt guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ + /* Save more register state */ + mfdar r6 + mfdsisr r7 + std r6, VCPU_DAR(r9) + stw r7, VCPU_DSISR(r9) + /* don't overwrite fault_dar/fault_dsisr if HDSI */ + cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE + beq mc_cont + std r6, VCPU_FAULT_DAR(r9) + stw r7, VCPU_FAULT_DSISR(r9) + + /* See if it is a machine check */ + cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK + beq machine_check_realmode +mc_cont: +#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING + addi r3, r9, VCPU_TB_RMEXIT + mr r4, r9 + bl kvmhv_accumulate_time +#endif #ifdef CONFIG_KVM_XICS /* We are exiting, pull the VP from the XIVE */ lwz r0, VCPU_XIVE_PUSHED(r9) @@ -1425,26 +1444,6 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ eieio 1: #endif /* CONFIG_KVM_XICS */ - /* Save more register state */ - mfdar r6 - mfdsisr r7 - std r6, VCPU_DAR(r9) - stw r7, VCPU_DSISR(r9) - /* don't overwrite fault_dar/fault_dsisr if HDSI */ - cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE - beq mc_cont - std r6, VCPU_FAULT_DAR(r9) - stw r7, VCPU_FAULT_DSISR(r9) - - /* See if it is a machine check */ - cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK - beq machine_check_realmode -mc_cont: -#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING - addi r3, r9, VCPU_TB_RMEXIT - mr r4, r9 - bl kvmhv_accumulate_time -#endif mr r3, r12 /* Increment exit count, poke other threads to exit */ diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 69a09444d46e..e2ef16198456 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); #define MSR_USER32 MSR_USER #define MSR_USER64 MSR_USER #define HW_PAGE_SIZE PAGE_SIZE +#define HPTE_R_M _PAGE_COHERENT #endif static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) @@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte.eaddr = eaddr; pte.vpage = eaddr >> 12; pte.page_size = MMU_PAGE_64K; + pte.wimg = HPTE_R_M; } switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 42a4b237df5f..34a5adeff084 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -46,6 +46,9 @@ #define FUNC(name) name +#define RFI_TO_KERNEL RFI +#define RFI_TO_GUEST RFI + .macro INTERRUPT_TRAMPOLINE intno .global kvmppc_trampoline_\intno @@ -141,7 +144,7 @@ kvmppc_handler_skip_ins: GET_SCRATCH0(r13) /* And get back into the code */ - RFI + RFI_TO_KERNEL #endif /* @@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline) ori r5, r5, MSR_EE mtsrr0 r7 mtsrr1 r6 - RFI + RFI_TO_KERNEL #include "book3s_segment.S" diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 2a2b96d53999..93a180ceefad 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -156,7 +156,7 @@ no_dcbz32_on: PPC_LL r9, SVCPU_R9(r3) PPC_LL r3, (SVCPU_R3)(r3) - RFI + RFI_TO_GUEST kvmppc_handler_trampoline_enter_end: @@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) cmpwi r12, BOOK3S_INTERRUPT_DOORBELL beqa BOOK3S_INTERRUPT_DOORBELL - RFI + RFI_TO_KERNEL kvmppc_handler_trampoline_exit_end: diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index bf457843e032..0d750d274c4e 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) /* Return the per-cpu state for state saving/migration */ return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | - (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; + (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | + (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; } int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) @@ -1558,7 +1559,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) /* * Restore P and Q. If the interrupt was pending, we - * force both P and Q, which will trigger a resend. + * force Q and !P, which will trigger a resend. * * That means that a guest that had both an interrupt * pending (queued) and Q set will restore with only @@ -1566,7 +1567,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) * is perfectly fine as coalescing interrupts that haven't * been presented yet is always allowed. */ - if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) + if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING)) state->old_p = true; if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) state->old_q = true; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index ee279c7f4802..2b02d51d14d8 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r; - sigset_t sigsaved; if (vcpu->mmio_needed) { vcpu->mmio_needed = 0; @@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) #endif } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (run->immediate_exit) r = -EINTR; else r = kvmppc_vcpu_run(run, vcpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index c9de03e0c1f1..096d4e4d31e6 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -21,6 +21,7 @@ #include #include #include +#include static int __patch_instruction(unsigned int *addr, unsigned int instr) { @@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr) * During early early boot patch_instruction is called * when text_poke_area is not ready, but we still need * to allow patching. We just do the plain old patching - * We use slab_is_available and per cpu read * via this_cpu_read - * of text_poke_area. Per-CPU areas might not be up early - * this can create problems with just using this_cpu_read() */ - if (!slab_is_available() || !this_cpu_read(text_poke_area)) + if (!this_cpu_read(*PTRRELOC(&text_poke_area))) return __patch_instruction(addr, instr); local_irq_save(flags); @@ -304,6 +302,11 @@ int instr_is_relative_branch(unsigned int instr) return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); } +int instr_is_relative_link_branch(unsigned int instr) +{ + return instr_is_relative_branch(instr) && (instr & BRANCH_SET_LINK); +} + static unsigned long branch_iform_target(const unsigned int *instr) { signed long imm; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 41cf5ae273cf..a95ea007d654 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } +#ifdef CONFIG_PPC_BOOK3S_64 +void do_rfi_flush_fixups(enum l1d_flush_type types) +{ + unsigned int instrs[3], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___rfi_flush_fixup), + end = PTRRELOC(&__stop___rfi_flush_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + + if (types & L1D_FLUSH_FALLBACK) + /* b .+16 to fallback flush */ + instrs[0] = 0x48000010; + + i = 0; + if (types & L1D_FLUSH_ORI) { + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ + } + + if (types & L1D_FLUSH_MTTRIG) + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + patch_instruction(dest + 1, instrs[1]); + patch_instruction(dest + 2, instrs[2]); + } + + printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); +} +#endif /* CONFIG_PPC_BOOK3S_64 */ + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { long *start, *end; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 4797d08581ce..6e1e39035380 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address) return __bad_area(regs, address, SEGV_MAPERR); } +static noinline int bad_access(struct pt_regs *regs, unsigned long address) +{ + return __bad_area(regs, address, SEGV_ACCERR); +} + static int do_sigbus(struct pt_regs *regs, unsigned long address, unsigned int fault) { @@ -490,7 +495,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, good_area: if (unlikely(access_error(is_write, is_exec, vma))) - return bad_area(regs, address); + return bad_access(regs, address); /* * If for any reason at all we couldn't handle the fault, diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 3848af167df9..640cf566e986 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -47,7 +47,8 @@ DEFINE_RAW_SPINLOCK(native_tlbie_lock); -static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) +static inline unsigned long ___tlbie(unsigned long vpn, int psize, + int apsize, int ssize) { unsigned long va; unsigned int penc; @@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) : "memory"); break; } - trace_tlbie(0, 0, va, 0, 0, 0, 0); + return va; +} + +static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) +{ + unsigned long rb; + + rb = ___tlbie(vpn, psize, apsize, ssize); + trace_tlbie(0, 0, rb, 0, 0, 0, 0); } static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) @@ -652,7 +661,7 @@ static void native_hpte_clear(void) if (hpte_v & HPTE_V_VALID) { hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); hptep->v = 0; - __tlbie(vpn, psize, apsize, ssize); + ___tlbie(vpn, psize, apsize, ssize); } } diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 558e9d3891bf..bd022d16745c 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -49,17 +49,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; - if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE)) - mm->context.addr_limit = TASK_SIZE; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; if (len & ~huge_page_mask(h)) return -EINVAL; - if (len > mm->task_size) + if (len > high_limit) return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } - if (flags & MAP_FIXED) { + if (unlikely(addr > mm->context.addr_limit && + mm->context.addr_limit != TASK_SIZE)) + mm->context.addr_limit = TASK_SIZE; + + if (fixed) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; @@ -68,7 +79,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && + if (high_limit - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -79,12 +90,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; - info.high_limit = current->mm->mmap_base; + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; - if (addr > DEFAULT_MAP_WINDOW) - info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW; - return vm_unmapped_area(&info); } diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 5d78b193fec4..6d476a7b5611 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } + if (unlikely(addr > mm->context.addr_limit && mm->context.addr_limit != TASK_SIZE)) mm->context.addr_limit = TASK_SIZE; - if (len > mm->task_size - mmap_min_addr) - return -ENOMEM; - - if (flags & MAP_FIXED) + if (fixed) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && addr >= mmap_min_addr && + if (high_limit - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; + info.high_limit = high_limit; info.align_mask = 0; - if (unlikely(addr > DEFAULT_MAP_WINDOW)) - info.high_limit = mm->context.addr_limit; - else - info.high_limit = DEFAULT_MAP_WINDOW; - return vm_unmapped_area(&info); } @@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } + if (unlikely(addr > mm->context.addr_limit && mm->context.addr_limit != TASK_SIZE)) mm->context.addr_limit = TASK_SIZE; - /* requested length too big for entire address space */ - if (len > mm->task_size - mmap_min_addr) - return -ENOMEM; - - if (flags & MAP_FIXED) + if (fixed) return addr; - /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma))) + if (high_limit - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); - info.high_limit = mm->mmap_base; + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = 0; - if (addr > DEFAULT_MAP_WINDOW) - info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW; - addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index 05e15386d4cb..b94fb62e60fd 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -93,11 +93,11 @@ static int hash__init_new_context(struct mm_struct *mm) return index; /* - * We do switch_slb() early in fork, even before we setup the - * mm->context.addr_limit. Default to max task size so that we copy the - * default values to paca which will help us to handle slb miss early. + * In the case of exec, use the default limit, + * otherwise inherit it from the mm we are duplicating. */ - mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; + if (!mm->context.addr_limit) + mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; /* * The old code would re-promote on fork, we don't do that when using diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index a51df9ef529d..a81279249bfb 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -142,11 +142,6 @@ static void reset_numa_cpu_lookup_table(void) numa_cpu_lookup_table[cpu] = -1; } -static void update_numa_cpu_lookup_table(unsigned int cpu, int node) -{ - numa_cpu_lookup_table[cpu] = node; -} - static void map_cpu_to_node(int cpu, int node) { update_numa_cpu_lookup_table(cpu, node); diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 39c252b54d16..17ae5c15a9e0 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -169,6 +170,16 @@ void radix__mark_rodata_ro(void) { unsigned long start, end; + /* + * mark_rodata_ro() will mark itself as !writable at some point. + * Due to DD1 workaround in radix__pte_update(), we'll end up with + * an invalid pte and the system will crash quite severly. + */ + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { + pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n"); + return; + } + start = (unsigned long)_stext; end = (unsigned long)__init_begin; @@ -661,6 +672,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) pud_clear(pud); } +struct change_mapping_params { + pte_t *pte; + unsigned long start; + unsigned long end; + unsigned long aligned_start; + unsigned long aligned_end; +}; + +static int stop_machine_change_mapping(void *data) +{ + struct change_mapping_params *params = + (struct change_mapping_params *)data; + + if (!data) + return -1; + + spin_unlock(&init_mm.page_table_lock); + pte_clear(&init_mm, params->aligned_start, params->pte); + create_physical_mapping(params->aligned_start, params->start); + create_physical_mapping(params->end, params->aligned_end); + spin_lock(&init_mm.page_table_lock); + return 0; +} + static void remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end) { @@ -689,6 +724,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, } } +/* + * clear the pte and potentially split the mapping helper + */ +static void split_kernel_mapping(unsigned long addr, unsigned long end, + unsigned long size, pte_t *pte) +{ + unsigned long mask = ~(size - 1); + unsigned long aligned_start = addr & mask; + unsigned long aligned_end = addr + size; + struct change_mapping_params params; + bool split_region = false; + + if ((end - addr) < size) { + /* + * We're going to clear the PTE, but not flushed + * the mapping, time to remap and flush. The + * effects if visible outside the processor or + * if we are running in code close to the + * mapping we cleared, we are in trouble. + */ + if (overlaps_kernel_text(aligned_start, addr) || + overlaps_kernel_text(end, aligned_end)) { + /* + * Hack, just return, don't pte_clear + */ + WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel " + "text, not splitting\n", addr, end); + return; + } + split_region = true; + } + + if (split_region) { + params.pte = pte; + params.start = addr; + params.end = end; + params.aligned_start = addr & ~(size - 1); + params.aligned_end = min_t(unsigned long, aligned_end, + (unsigned long)__va(memblock_end_of_DRAM())); + stop_machine(stop_machine_change_mapping, ¶ms, NULL); + return; + } + + pte_clear(&init_mm, addr, pte); +} + static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end) { @@ -704,13 +785,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, continue; if (pmd_huge(*pmd)) { - if (!IS_ALIGNED(addr, PMD_SIZE) || - !IS_ALIGNED(next, PMD_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pmd); + split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); continue; } @@ -735,13 +810,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr, continue; if (pud_huge(*pud)) { - if (!IS_ALIGNED(addr, PUD_SIZE) || - !IS_ALIGNED(next, PUD_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pud); + split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud); continue; } @@ -767,13 +836,7 @@ static void remove_pagetable(unsigned long start, unsigned long end) continue; if (pgd_huge(*pgd)) { - if (!IS_ALIGNED(addr, PGDIR_SIZE) || - !IS_ALIGNED(next, PGDIR_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pgd); + split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd); continue; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ac0717a90ca6..12f95b1f7d07 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -483,6 +483,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, if (old & PATB_HR) { asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); } else { asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 45f6740dd407..a4f93699194b 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, { struct vm_area_struct *vma; - if ((mm->task_size - len) < addr) + if ((mm->context.addr_limit - len) < addr) return 0; vma = find_vma(mm, addr); return (!vma || (addr + len) <= vm_start_gap(vma)); @@ -133,7 +133,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) if (!slice_low_has_vma(mm, i)) ret->low_slices |= 1u << i; - if (mm->task_size <= SLICE_LOW_TOP) + if (mm->context.addr_limit <= SLICE_LOW_TOP) return; for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) @@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, struct slice_mask compat_mask; int fixed = (flags & MAP_FIXED); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); + unsigned long page_size = 1UL << pshift; struct mm_struct *mm = current->mm; unsigned long newaddr; unsigned long high_limit; - /* - * Check if we need to expland slice area. - */ - if (unlikely(addr > mm->context.addr_limit && - mm->context.addr_limit != TASK_SIZE)) { - mm->context.addr_limit = TASK_SIZE; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (len & (page_size - 1)) + return -EINVAL; + if (fixed) { + if (addr & (page_size - 1)) + return -EINVAL; + if (addr > high_limit - len) + return -ENOMEM; + } + + if (high_limit > mm->context.addr_limit) { + mm->context.addr_limit = high_limit; on_each_cpu(slice_flush_segments, mm, 1); } - /* - * This mmap request can allocate upt to 512TB - */ - if (addr > DEFAULT_MAP_WINDOW) - high_limit = mm->context.addr_limit; - else - high_limit = DEFAULT_MAP_WINDOW; + /* * init different masks */ @@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* Sanity checks */ BUG_ON(mm->task_size == 0); + BUG_ON(mm->context.addr_limit == 0); VM_BUG_ON(radix_enabled()); slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", addr, len, flags, topdown); - if (len > mm->task_size) - return -ENOMEM; - if (len & ((1ul << pshift) - 1)) - return -EINVAL; - if (fixed && (addr & ((1ul << pshift) - 1))) - return -EINVAL; - if (fixed && addr > (mm->task_size - len)) - return -ENOMEM; - /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { - addr = _ALIGN_UP(addr, 1ul << pshift); + addr = _ALIGN_UP(addr, page_size); slice_dbg(" aligned addr=%lx\n", addr); /* Ignore hint if it's too large or overlaps a VMA */ - if (addr > mm->task_size - len || + if (addr > high_limit - len || !slice_area_is_free(mm, addr, len)) addr = 0; } diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index d304028641a2..4b295cfd5f7e 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -453,14 +453,12 @@ void radix__flush_tlb_all(void) */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); - trace_tlbie(0, 0, rb, rs, ric, prs, r); /* * now flush host entires by passing PRS = 0 and LPID == 0 */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory"); - trace_tlbie(0, 0, rb, 0, ric, prs, r); } void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index a66e64b0b251..bd0786c23109 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -241,6 +241,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 * goto out; */ PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); + PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31); PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); PPC_BCC(COND_GE, out); @@ -762,7 +763,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, func = (u8 *) __bpf_call_base + imm; /* Save skb pointer if we need to re-cache skb data */ - if (bpf_helper_changes_pkt_data(func)) + if ((ctx->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data(func)) PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx)); bpf_jit_emit_func_call(image, ctx, (u64)func); @@ -771,7 +773,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_MR(b2p[BPF_REG_0], 3); /* refresh skb cache */ - if (bpf_helper_changes_pkt_data(func)) { + if ((ctx->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data(func)) { /* reload skb pointer to r3 */ PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx)); bpf_jit_emit_skb_loads(image, ctx); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 9e3da168d54c..fce545774d50 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr) int ret; __u64 target; - if (is_kernel_addr(addr)) - return branch_target((unsigned int *)addr); + if (is_kernel_addr(addr)) { + if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) + return 0; + + return branch_target(&instr); + } /* Userspace: need copy instruction here then translate it */ pagefault_disable(); @@ -1415,7 +1419,7 @@ static int collect_events(struct perf_event *group, int max_count, int n = 0; struct perf_event *event; - if (!is_software_event(group)) { + if (group->pmu->task_ctx_nr == perf_hw_context) { if (n >= max_count) return -1; ctrs[n] = group; @@ -1423,7 +1427,7 @@ static int collect_events(struct perf_event *group, int max_count, events[n++] = group->hw.config; } list_for_each_entry(event, &group->sibling_list, group_entry) { - if (!is_software_event(event) && + if (event->pmu->task_ctx_nr == perf_hw_context && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 9c88b82f6229..72238eedc360 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -540,7 +540,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2) { if (s1 < s2) return 1; - if (s2 > s1) + if (s1 > s2) return -1; return memcmp(d1, d2, s1); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 36344117c680..da6ba9ba73ed 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -308,6 +308,19 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu) if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask)) return 0; + /* + * Check whether nest_imc is registered. We could end up here if the + * cpuhotplug callback registration fails. i.e, callback invokes the + * offline path for all successfully registered nodes. At this stage, + * nest_imc pmu will not be registered and we should return here. + * + * We return with a zero since this is not an offline failure. And + * cpuhp_setup_state() returns the actual failure reason to the caller, + * which in turn will call the cleanup routine. + */ + if (!nest_pmus) + return 0; + /* * Now that this cpu is one of the designated, * find a next cpu a) which is online and b) in same chip. @@ -467,7 +480,7 @@ static int nest_imc_event_init(struct perf_event *event) * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). * Get the base memory addresss for this cpu. */ - chip_id = topology_physical_package_id(event->cpu); + chip_id = cpu_to_chip_id(event->cpu); pcni = pmu->mem_info; do { if (pcni->id == chip_id) { @@ -524,19 +537,19 @@ static int nest_imc_event_init(struct perf_event *event) */ static int core_imc_mem_init(int cpu, int size) { - int phys_id, rc = 0, core_id = (cpu / threads_per_core); + int nid, rc = 0, core_id = (cpu / threads_per_core); struct imc_mem_info *mem_info; /* * alloc_pages_node() will allocate memory for core in the * local node only. */ - phys_id = topology_physical_package_id(cpu); + nid = cpu_to_node(cpu); mem_info = &core_imc_pmu->mem_info[core_id]; mem_info->id = core_id; /* We need only vbase for core counters */ - mem_info->vbase = page_address(alloc_pages_node(phys_id, + mem_info->vbase = page_address(alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | __GFP_NOWARN, get_order(size))); if (!mem_info->vbase) @@ -797,14 +810,14 @@ static int core_imc_event_init(struct perf_event *event) static int thread_imc_mem_alloc(int cpu_id, int size) { u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); - int phys_id = topology_physical_package_id(cpu_id); + int nid = cpu_to_node(cpu_id); if (!local_mem) { /* * This case could happen only once at start, since we dont * free the memory in cpu offline path. */ - local_mem = page_address(alloc_pages_node(phys_id, + local_mem = page_address(alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | __GFP_NOWARN, get_order(size))); if (!local_mem) diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c index cf33769a7b72..45b3feb8aa2f 100644 --- a/arch/powerpc/platforms/powernv/opal-async.c +++ b/arch/powerpc/platforms/powernv/opal-async.c @@ -39,18 +39,18 @@ int __opal_async_get_token(void) int token; spin_lock_irqsave(&opal_async_comp_lock, flags); - token = find_first_bit(opal_async_complete_map, opal_max_async_tokens); + token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens); if (token >= opal_max_async_tokens) { token = -EBUSY; goto out; } - if (__test_and_set_bit(token, opal_async_token_map)) { + if (!__test_and_clear_bit(token, opal_async_complete_map)) { token = -EBUSY; goto out; } - __clear_bit(token, opal_async_complete_map); + __set_bit(token, opal_async_token_map); out: spin_unlock_irqrestore(&opal_async_comp_lock, flags); diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 21f6531fae20..b150f4deaccf 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -191,8 +191,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev) break; } - if (!imc_pmu_create(imc_dev, pmu_count, domain)) - pmu_count++; + if (!imc_pmu_create(imc_dev, pmu_count, domain)) { + if (domain == IMC_DOMAIN_NEST) + pmu_count++; + } } return 0; diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index bbb73aa0eb8f..7966a314d93a 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -36,13 +36,62 @@ #include #include #include +#include #include "powernv.h" +static void pnv_setup_rfi_flush(void) +{ + struct device_node *np, *fw_features; + enum l1d_flush_type type; + int enable; + + /* Default to fallback in case fw-features are not available */ + type = L1D_FLUSH_FALLBACK; + enable = 1; + + np = of_find_node_by_name(NULL, "ibm,opal"); + fw_features = of_get_child_by_name(np, "fw-features"); + of_node_put(np); + + if (fw_features) { + np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); + if (np && of_property_read_bool(np, "enabled")) + type = L1D_FLUSH_MTTRIG; + + of_node_put(np); + + np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); + if (np && of_property_read_bool(np, "enabled")) + type = L1D_FLUSH_ORI; + + of_node_put(np); + + /* Enable unless firmware says NOT to */ + enable = 2; + np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); + if (np && of_property_read_bool(np, "disabled")) + enable--; + + of_node_put(np); + + np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); + if (np && of_property_read_bool(np, "disabled")) + enable--; + + of_node_put(np); + of_node_put(fw_features); + } + + setup_rfi_flush(type, enable > 0); +} + static void __init pnv_setup_arch(void) { set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); + pnv_setup_rfi_flush(); + /* Initialize SMP */ pnv_smp_init(); @@ -319,7 +368,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu) { unsigned long ret_freq; - ret_freq = cpufreq_quick_get(cpu) * 1000ul; + ret_freq = cpufreq_get(cpu) * 1000ul; /* * If the backend cpufreq driver does not exist, diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 9dabea6e1443..6244bc849469 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -104,6 +104,20 @@ static void __noreturn ps3_halt(void) ps3_sys_manager_halt(); /* never returns */ } +static void ps3_panic(char *str) +{ + DBG("%s:%d %s\n", __func__, __LINE__, str); + + smp_send_stop(); + printk("\n"); + printk(" System does not reboot automatically.\n"); + printk(" Please press POWER button.\n"); + printk("\n"); + + while(1) + lv1_pause(1); +} + #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) static void __init prealloc(struct ps3_prealloc *p) @@ -255,6 +269,7 @@ define_machine(ps3) { .probe = ps3_probe, .setup_arch = ps3_setup_arch, .init_IRQ = ps3_init_IRQ, + .panic = ps3_panic, .get_boot_time = ps3_get_boot_time, .set_dabr = ps3_set_dabr, .calibrate_decr = ps3_calibrate_decr, diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index e45b5f10645a..e9149d05d30b 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -586,11 +586,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, static CLASS_ATTR_RW(dlpar); -static int __init pseries_dlpar_init(void) +int __init dlpar_workqueue_init(void) { + if (pseries_hp_wq) + return 0; + pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", - WQ_UNBOUND, 1); + WQ_UNBOUND, 1); + + return pseries_hp_wq ? 0 : -ENOMEM; +} + +static int __init dlpar_sysfs_init(void) +{ + int rc; + + rc = dlpar_workqueue_init(); + if (rc) + return rc; + return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); } -machine_device_initcall(pseries, pseries_dlpar_init); +machine_device_initcall(pseries, dlpar_sysfs_init); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fadb95efbb9e..b1ac8ac38434 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "pseries.h" #include "offline_states.h" @@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np) BUG_ON(cpu_online(cpu)); set_cpu_present(cpu, false); set_hard_smp_processor_id(cpu, -1); + update_numa_cpu_lookup_table(cpu, -1); break; } if (cpu >= nr_cpu_ids) diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 4470a3194311..1ae1d9f4dbe9 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void) return CMO_PageSize; } +int dlpar_workqueue_init(void); + #endif /* _PSERIES_PSERIES_H */ diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 4923ffe230cf..5e1ef9150182 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -48,6 +48,28 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); static irqreturn_t ras_error_interrupt(int irq, void *dev_id); +/* + * Enable the hotplug interrupt late because processing them may touch other + * devices or systems (e.g. hugepages) that have not been initialized at the + * subsys stage. + */ +int __init init_ras_hotplug_IRQ(void) +{ + struct device_node *np; + + /* Hotplug Events */ + np = of_find_node_by_path("/event-sources/hot-plug-events"); + if (np != NULL) { + if (dlpar_workqueue_init() == 0) + request_event_sources_irqs(np, ras_hotplug_interrupt, + "RAS_HOTPLUG"); + of_node_put(np); + } + + return 0; +} +machine_late_initcall(pseries, init_ras_hotplug_IRQ); + /* * Initialize handlers for the set of interrupts caused by hardware errors * and power system events. @@ -66,14 +88,6 @@ static int __init init_ras_IRQ(void) of_node_put(np); } - /* Hotplug Events */ - np = of_find_node_by_path("/event-sources/hot-plug-events"); - if (np != NULL) { - request_event_sources_irqs(np, ras_hotplug_interrupt, - "RAS_HOTPLUG"); - of_node_put(np); - } - /* EPOW Events */ np = of_find_node_by_path("/event-sources/epow-events"); if (np != NULL) { diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 5f1beb8367ac..ae4f596273b5 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void) of_pci_check_probe_only(); } +static void pseries_setup_rfi_flush(void) +{ + struct h_cpu_char_result result; + enum l1d_flush_type types; + bool enable; + long rc; + + /* Enable by default */ + enable = true; + + rc = plpar_get_cpu_characteristics(&result); + if (rc == H_SUCCESS) { + types = L1D_FLUSH_NONE; + + if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) + types |= L1D_FLUSH_MTTRIG; + if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) + types |= L1D_FLUSH_ORI; + + /* Use fallback if nothing set in hcall */ + if (types == L1D_FLUSH_NONE) + types = L1D_FLUSH_FALLBACK; + + if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) + enable = false; + } else { + /* Default to fallback if case hcall is not available */ + types = L1D_FLUSH_FALLBACK; + } + + setup_rfi_flush(types, enable); +} + static void __init pSeries_setup_arch(void) { set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); @@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void) fwnmi_init(); + pseries_setup_rfi_flush(); + /* By default, only probe PCI (can be overridden by rtas_pci) */ pci_add_flags(PCI_PROBE_ONLY); @@ -726,6 +761,7 @@ define_machine(pseries) { .pcibios_fixup = pSeries_final_fixup, .restart = rtas_restart, .halt = rtas_halt, + .panic = rtas_os_term, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 12277bc9fd9e..d86938260a86 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1592,6 +1592,8 @@ ATTRIBUTE_GROUPS(vio_dev); void vio_unregister_device(struct vio_dev *viodev) { device_unregister(&viodev->dev); + if (viodev->family == VDEVICE) + irq_dispose_mapping(viodev->irq); } EXPORT_SYMBOL(vio_unregister_device); diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 16f1edd78c40..535cf1f6941c 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -846,12 +846,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) u32 ipic_get_mcp_status(void) { - return ipic_read(primary_ipic->regs, IPIC_SERMR); + return ipic_read(primary_ipic->regs, IPIC_SERSR); } void ipic_clear_mcp_status(u32 mask) { - ipic_write(primary_ipic->regs, IPIC_SERMR, mask); + ipic_write(primary_ipic->regs, IPIC_SERSR, mask); } /* Return an interrupt vector or 0 if no interrupt is pending. */ diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index d9c4c9366049..091f1d0d0af1 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); if (rc) { - pr_err("Error %lld getting queue info prio %d\n", rc, prio); + pr_err("Error %lld getting queue info CPU %d prio %d\n", rc, + target, prio); rc = -EIO; goto fail; } @@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, /* Configure and enable the queue in HW */ rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); if (rc) { - pr_err("Error %lld setting queue for prio %d\n", rc, prio); + pr_err("Error %lld setting queue for CPU %d prio %d\n", rc, + target, prio); rc = -EIO; } else { q->qpage = qpage; @@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc, if (IS_ERR(qpage)) return PTR_ERR(qpage); - return xive_spapr_configure_queue(cpu, q, prio, qpage, - xive_queue_shift); + return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu), + q, prio, qpage, xive_queue_shift); } static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, @@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, struct xive_q *q = &xc->queue[prio]; unsigned int alloc_order; long rc; + int hw_cpu = get_hard_smp_processor_id(cpu); - rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); + rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0); if (rc) - pr_err("Error %ld setting queue for prio %d\n", rc, prio); + pr_err("Error %ld setting queue for CPU %d prio %d\n", rc, + hw_cpu, prio); alloc_order = xive_alloc_order(xive_queue_shift); free_pages((unsigned long)q->qpage, alloc_order); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 33351c6704b1..2c8b325591cc 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -530,14 +530,19 @@ static int xmon_core(struct pt_regs *regs, int fromipi) waiting: secondary = 1; + spin_begin(); while (secondary && !xmon_gate) { if (in_xmon == 0) { - if (fromipi) + if (fromipi) { + spin_end(); goto leave; + } secondary = test_and_set_bit(0, &in_xmon); } - barrier(); + spin_cpu_relax(); + touch_nmi_watchdog(); } + spin_end(); if (!secondary && !xmon_gate) { /* we are the first cpu to come in */ @@ -568,21 +573,25 @@ static int xmon_core(struct pt_regs *regs, int fromipi) mb(); xmon_gate = 1; barrier(); + touch_nmi_watchdog(); } cmdloop: while (in_xmon) { if (secondary) { + spin_begin(); if (cpu == xmon_owner) { if (!test_and_set_bit(0, &xmon_taken)) { secondary = 0; + spin_end(); continue; } /* missed it */ while (cpu == xmon_owner) - barrier(); + spin_cpu_relax(); } - barrier(); + spin_cpu_relax(); + touch_nmi_watchdog(); } else { cmd = cmds(regs); if (cmd != 0) { @@ -2475,6 +2484,11 @@ static void dump_xives(void) unsigned long num; int c; + if (!xive_enabled()) { + printf("Xive disabled on this system\n"); + return; + } + c = inchar(); if (c == 'a') { dump_all_xives(); diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 992e630c227b..6f4985f357c6 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c @@ -238,6 +238,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32", .cra_driver_name = "crc32-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, @@ -258,6 +259,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32be", .cra_driver_name = "crc32be-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, @@ -278,6 +280,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32c", .cra_driver_name = "crc32c-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 1b60eb3676d5..5e6a63641a5f 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -263,7 +263,6 @@ typedef struct compat_siginfo { #define si_overrun _sifields._timer._overrun #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 9a3cb3983c01..1a61b1b997f2 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -194,13 +194,14 @@ struct arch_elf_state { #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE -/* - * This is the base location for PIE (ET_DYN with INTERP) loads. On - * 64-bit, this is raised to 4GB to leave the entire 32-bit address - * space open for things that want to use the area for 32-bit pointers. - */ -#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ - 0x100000000UL) +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. 64-bit + tasks are aligned to 4GB. */ +#define ELF_ET_DYN_BASE (is_compat_task() ? \ + (STACK_TOP / 3 * 2) : \ + (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 43607bb12cc2..a6cc744ff5fb 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste || test_thread_flag(TIF_PGSTE) || - current->mm->context.alloc_pgste; + (current->mm && current->mm->context.alloc_pgste); mm->context.has_pgste = 0; mm->context.use_skey = 0; mm->context.use_cmma = 0; diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 419e83fa4721..ba22a6ea51a1 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -82,6 +82,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range); int zpci_load(u64 *data, u64 req, u64 offset); int zpci_store(u64 data, u64 req, u64 offset); int zpci_store_block(const u64 *data, u64 req, u64 offset); -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); #endif diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h index ea8896ba5afc..2502d05403ef 100644 --- a/arch/s390/include/asm/runtime_instr.h +++ b/arch/s390/include/asm/runtime_instr.h @@ -86,6 +86,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, load_runtime_instr_cb(&runtime_instr_empty_cb); } -void exit_thread_runtime_instr(void); +struct task_struct; + +void runtime_instr_release(struct task_struct *tsk); #endif /* _RUNTIME_INSTR_H */ diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index c21fe1d57c00..c61b2cc1a8a8 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs) asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); } -#define switch_to(prev,next,last) do { \ - if (prev->mm) { \ - save_fpu_regs(); \ - save_access_regs(&prev->thread.acrs[0]); \ - save_ri_cb(prev->thread.ri_cb); \ - save_gs_cb(prev->thread.gs_cb); \ - } \ - if (next->mm) { \ - update_cr_regs(next); \ - set_cpu_flag(CIF_FPU); \ - restore_access_regs(&next->thread.acrs[0]); \ - restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ - restore_gs_cb(next->thread.gs_cb); \ - } \ - prev = __switch_to(prev,next); \ +#define switch_to(prev, next, last) do { \ + /* save_fpu_regs() sets the CIF_FPU flag, which enforces \ + * a restore of the floating point / vector registers as \ + * soon as the next task returns to user space \ + */ \ + save_fpu_regs(); \ + save_access_regs(&prev->thread.acrs[0]); \ + save_ri_cb(prev->thread.ri_cb); \ + save_gs_cb(prev->thread.gs_cb); \ + update_cr_regs(next); \ + restore_access_regs(&next->thread.acrs[0]); \ + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ + restore_gs_cb(next->thread.gs_cb); \ + prev = __switch_to(prev, next); \ } while (0) #endif /* __ASM_SWITCH_TO_H */ diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 55de4eb73604..de0a8b17bcaa 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -51,6 +51,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu); static inline void topology_init_early(void) { } static inline void topology_schedule_update(void) { } static inline int topology_cpu_init(struct cpu *cpu) { return 0; } +static inline int topology_cpu_dedicated(int cpu_nr) { return 0; } static inline void topology_expect_change(void) { } #endif /* CONFIG_SCHED_TOPOLOGY */ diff --git a/arch/s390/include/uapi/asm/virtio-ccw.h b/arch/s390/include/uapi/asm/virtio-ccw.h index 967aad390105..9e62587d9472 100644 --- a/arch/s390/include/uapi/asm/virtio-ccw.h +++ b/arch/s390/include/uapi/asm/virtio-ccw.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* * Definitions for virtio-ccw devices. * diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index f04db3779b34..79b7a3438d54 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid) COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) { - return sys_setgid((gid_t)gid); + return sys_setgid(low2highgid(gid)); } COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) @@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) { - return sys_setuid((uid_t)uid); + return sys_setuid(low2highuid(uid)); } COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) @@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp, COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) { - return sys_setfsuid((uid_t)uid); + return sys_setfsuid(low2highuid(uid)); } COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) { - return sys_setfsgid((gid_t)gid); + return sys_setfsgid(low2highgid(gid)); } static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) @@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis return retval; } + groups_sort(group_info); retval = set_current_groups(group_info); put_group_info(group_info); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index f7e82302a71e..2394557653d5 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = { { "vfsq", 0xce, INSTR_VRR_VV000MM }, { "vfs", 0xe2, INSTR_VRR_VVV00MM }, { "vftci", 0x4a, INSTR_VRI_VVIMM }, + { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_eb[] = { @@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs) { char *mode = user_mode(regs) ? "User" : "Krnl"; unsigned char code[64]; - char buffer[64], *ptr; + char buffer[128], *ptr; mm_segment_t old_fs; unsigned long addr; int start, end, opsize, hops, i; @@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs) start += opsize; pr_cont("%s", buffer); ptr = buffer; - ptr += sprintf(ptr, "\n "); + ptr += sprintf(ptr, "\n\t "); hops++; } pr_cont("\n"); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index b945448b9eae..f7b280f0ab16 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -375,8 +375,10 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; if (test_facility(40)) S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; - if (test_facility(50) && test_facility(73)) + if (test_facility(50) && test_facility(73)) { S390_lowcore.machine_flags |= MACHINE_FLAG_TE; + __ctl_set_bit(0, 55); + } if (test_facility(51)) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) { diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c index bff39b66c9ff..9ee794e14f33 100644 --- a/arch/s390/kernel/guarded_storage.c +++ b/arch/s390/kernel/guarded_storage.c @@ -14,9 +14,11 @@ void exit_thread_gs(void) { + preempt_disable(); kfree(current->thread.gs_cb); kfree(current->thread.gs_bc_cb); current->thread.gs_cb = current->thread.gs_bc_cb = NULL; + preempt_enable(); } static int gs_enable(void) diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 8e622bb52f7a..d1a0e2c521d7 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -799,6 +799,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb, /* copy and convert to ebcdic */ memcpy(ipb->hdr.loadparm, buf, lp_len); ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN); + ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID; return len; } diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b0ba2c26b45e..d6f7782e75c9 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -269,6 +269,7 @@ static void __do_machine_kexec(void *data) s390_reset_system(); data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); + __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */ /* Call the moving routine */ (*data_mover)(&image->head, image->start); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index a4a84fb08046..7d4c5500c6c2 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -49,10 +49,8 @@ extern void kernel_thread_starter(void); */ void exit_thread(struct task_struct *tsk) { - if (tsk == current) { - exit_thread_runtime_instr(); + if (tsk == current) exit_thread_gs(); - } } void flush_thread(void) @@ -65,6 +63,7 @@ void release_thread(struct task_struct *dead_task) void arch_release_task_struct(struct task_struct *tsk) { + runtime_instr_release(tsk); } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) @@ -100,6 +99,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); clear_tsk_thread_flag(p, TIF_SINGLE_STEP); + p->thread.per_flags = 0; /* Initialize per thread user and system timer values */ p->thread.user_timer = 0; p->thread.guest_timer = 0; diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 1427d60ce628..56e0190d6e65 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1172,26 +1172,37 @@ static int s390_gs_cb_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct gs_cb *data = target->thread.gs_cb; + struct gs_cb gs_cb = { }, *data = NULL; int rc; if (!MACHINE_HAS_GS) return -ENODEV; - if (!data) { + if (!target->thread.gs_cb) { data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - data->gsd = 25; - target->thread.gs_cb = data; - if (target == current) - __ctl_set_bit(2, 4); - } else if (target == current) { - save_gs_cb(data); } + if (!target->thread.gs_cb) + gs_cb.gsd = 25; + else if (target == current) + save_gs_cb(&gs_cb); + else + gs_cb = *target->thread.gs_cb; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - data, 0, sizeof(struct gs_cb)); - if (target == current) - restore_gs_cb(data); + &gs_cb, 0, sizeof(gs_cb)); + if (rc) { + kfree(data); + return -EFAULT; + } + preempt_disable(); + if (!target->thread.gs_cb) + target->thread.gs_cb = data; + *target->thread.gs_cb = gs_cb; + if (target == current) { + __ctl_set_bit(2, 4); + restore_gs_cb(target->thread.gs_cb); + } + preempt_enable(); return rc; } diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index ca37e5d5b40c..9c2c96da23d0 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -29,7 +29,6 @@ ENTRY(relocate_kernel) basr %r13,0 # base address .base: - stnsm sys_msk-.base(%r13),0xfb # disable DAT stctg %c0,%c15,ctlregs-.base(%r13) stmg %r0,%r15,gprregs-.base(%r13) lghi %r0,3 @@ -103,8 +102,6 @@ ENTRY(relocate_kernel) .align 8 load_psw: .long 0x00080000,0x80000000 - sys_msk: - .quad 0 ctlregs: .rept 16 .quad 0 diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 32aefb215e59..94c9ba72cf83 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -21,11 +21,24 @@ /* empty control block to disable RI by loading it */ struct runtime_instr_cb runtime_instr_empty_cb; +void runtime_instr_release(struct task_struct *tsk) +{ + kfree(tsk->thread.ri_cb); +} + static void disable_runtime_instr(void) { - struct pt_regs *regs = task_pt_regs(current); + struct task_struct *task = current; + struct pt_regs *regs; + if (!task->thread.ri_cb) + return; + regs = task_pt_regs(task); + preempt_disable(); load_runtime_instr_cb(&runtime_instr_empty_cb); + kfree(task->thread.ri_cb); + task->thread.ri_cb = NULL; + preempt_enable(); /* * Make sure the RI bit is deleted from the PSW. If the user did not @@ -46,17 +59,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->valid = 1; } -void exit_thread_runtime_instr(void) -{ - struct task_struct *task = current; - - if (!task->thread.ri_cb) - return; - disable_runtime_instr(); - kfree(task->thread.ri_cb); - task->thread.ri_cb = NULL; -} - SYSCALL_DEFINE1(s390_runtime_instr, int, command) { struct runtime_instr_cb *cb; @@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command) return -EOPNOTSUPP; if (command == S390_RUNTIME_INSTR_STOP) { - preempt_disable(); - exit_thread_runtime_instr(); - preempt_enable(); + disable_runtime_instr(); return 0; } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 092c4154abd7..7ffaf9fd6d19 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "entry.h" enum { diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index d39f121e67a9..bc905ae1d5c8 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg) SYSCALL(sys_sendmmsg,compat_sys_sendmmsg) SYSCALL(sys_socket,sys_socket) SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */ -SYSCALL(sys_bind,sys_bind) -SYSCALL(sys_connect,sys_connect) +SYSCALL(sys_bind,compat_sys_bind) +SYSCALL(sys_connect,compat_sys_connect) SYSCALL(sys_listen,sys_listen) -SYSCALL(sys_accept4,sys_accept4) +SYSCALL(sys_accept4,compat_sys_accept4) SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */ SYSCALL(sys_setsockopt,compat_sys_setsockopt) SYSCALL(sys_getsockname,compat_sys_getsockname) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index a832ad031cee..5185be314661 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -173,8 +173,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) static int ckc_irq_pending(struct kvm_vcpu *vcpu) { - if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) + const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); + const u64 ckc = vcpu->arch.sie_block->ckc; + + if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { + if ((s64)ckc >= (s64)now) + return 0; + } else if (ckc >= now) { return 0; + } return ckc_interrupts_enabled(vcpu); } @@ -1004,13 +1011,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) static u64 __calculate_sltime(struct kvm_vcpu *vcpu) { - u64 now, cputm, sltime = 0; + const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); + const u64 ckc = vcpu->arch.sie_block->ckc; + u64 cputm, sltime = 0; if (ckc_interrupts_enabled(vcpu)) { - now = kvm_s390_get_tod_clock_fast(vcpu->kvm); - sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); - /* already expired or overflow? */ - if (!sltime || vcpu->arch.sie_block->ckc <= now) + if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { + if ((s64)now < (s64)ckc) + sltime = tod_to_ns((s64)ckc - (s64)now); + } else if (now < ckc) { + sltime = tod_to_ns(ckc - now); + } + /* already expired */ + if (!sltime) return 0; if (cpu_timer_interrupts_enabled(vcpu)) { cputm = kvm_s390_get_cpu_timer(vcpu); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 40d0a1a97889..0fa3a788dd20 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -169,6 +169,28 @@ int kvm_arch_hardware_enable(void) static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, unsigned long end); +static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta) +{ + u8 delta_idx = 0; + + /* + * The TOD jumps by delta, we have to compensate this by adding + * -delta to the epoch. + */ + delta = -delta; + + /* sign-extension - we're adding to signed values below */ + if ((s64)delta < 0) + delta_idx = -1; + + scb->epoch += delta; + if (scb->ecd & ECD_MEF) { + scb->epdx += delta_idx; + if (scb->epoch < delta) + scb->epdx += 1; + } +} + /* * This callback is executed during stop_machine(). All CPUs are therefore * temporarily stopped. In order not to change guest behavior, we have to @@ -184,13 +206,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, unsigned long long *delta = v; list_for_each_entry(kvm, &vm_list, vm_list) { - kvm->arch.epoch -= *delta; kvm_for_each_vcpu(i, vcpu, kvm) { - vcpu->arch.sie_block->epoch -= *delta; + kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); + if (i == 0) { + kvm->arch.epoch = vcpu->arch.sie_block->epoch; + kvm->arch.epdx = vcpu->arch.sie_block->epdx; + } if (vcpu->arch.cputm_enabled) vcpu->arch.cputm_start += *delta; if (vcpu->arch.vsie_block) - vcpu->arch.vsie_block->epoch -= *delta; + kvm_clock_sync_scb(vcpu->arch.vsie_block, + *delta); } } return NOTIFY_OK; @@ -768,7 +794,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) /* * Must be called with kvm->srcu held to avoid races on memslots, and with - * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration. + * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration. */ static int kvm_s390_vm_start_migration(struct kvm *kvm) { @@ -794,11 +820,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) if (kvm->arch.use_cmma) { /* - * Get the last slot. They should be sorted by base_gfn, so the - * last slot is also the one at the end of the address space. - * We have verified above that at least one slot is present. + * Get the first slot. They are reverse sorted by base_gfn, so + * the first slot is also the one at the end of the address + * space. We have verified above that at least one slot is + * present. */ - ms = slots->memslots + slots->used_slots - 1; + ms = slots->memslots; /* round up so we only use full longs */ ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG); /* allocate enough bytes to store all the bits */ @@ -823,7 +850,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) } /* - * Must be called with kvm->lock to avoid races with ourselves and + * Must be called with kvm->slots_lock to avoid races with ourselves and * kvm_s390_vm_start_migration. */ static int kvm_s390_vm_stop_migration(struct kvm *kvm) @@ -838,6 +865,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) if (kvm->arch.use_cmma) { kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); + /* We have to wait for the essa emulation to finish */ + synchronize_srcu(&kvm->srcu); vfree(mgs->pgste_bitmap); } kfree(mgs); @@ -847,14 +876,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) static int kvm_s390_vm_set_migration(struct kvm *kvm, struct kvm_device_attr *attr) { - int idx, res = -ENXIO; + int res = -ENXIO; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->slots_lock); switch (attr->attr) { case KVM_S390_VM_MIGRATION_START: - idx = srcu_read_lock(&kvm->srcu); res = kvm_s390_vm_start_migration(kvm); - srcu_read_unlock(&kvm->srcu, idx); break; case KVM_S390_VM_MIGRATION_STOP: res = kvm_s390_vm_stop_migration(kvm); @@ -862,7 +889,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm, default: break; } - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->slots_lock); return res; } @@ -887,12 +914,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) return -EFAULT; - if (test_kvm_facility(kvm, 139)) - kvm_s390_set_tod_clock_ext(kvm, >od); - else if (gtod.epoch_idx == 0) - kvm_s390_set_tod_clock(kvm, gtod.tod); - else + if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) return -EINVAL; + kvm_s390_set_tod_clock(kvm, >od); VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", gtod.epoch_idx, gtod.tod); @@ -917,13 +941,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) { - u64 gtod; + struct kvm_s390_vm_tod_clock gtod = { 0 }; - if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) + if (copy_from_user(>od.tod, (void __user *)attr->addr, + sizeof(gtod.tod))) return -EFAULT; - kvm_s390_set_tod_clock(kvm, gtod); - VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); + kvm_s390_set_tod_clock(kvm, >od); + VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); return 0; } @@ -1752,7 +1777,9 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&args, argp, sizeof(args))) break; + mutex_lock(&kvm->slots_lock); r = kvm_s390_get_cmma_bits(kvm, &args); + mutex_unlock(&kvm->slots_lock); if (!r) { r = copy_to_user(argp, &args, sizeof(args)); if (r) @@ -1766,7 +1793,9 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&args, argp, sizeof(args))) break; + mutex_lock(&kvm->slots_lock); r = kvm_s390_set_cmma_bits(kvm, &args); + mutex_unlock(&kvm->slots_lock); break; } default: @@ -2090,6 +2119,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu) /* we still need the basic sca for the ipte control */ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; + return; } read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { @@ -2354,6 +2384,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) mutex_lock(&vcpu->kvm->lock); preempt_disable(); vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; + vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; preempt_enable(); mutex_unlock(&vcpu->kvm->lock); if (!kvm_is_ucontrol(vcpu->kvm)) { @@ -2940,8 +2971,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) return 0; } -void kvm_s390_set_tod_clock_ext(struct kvm *kvm, - const struct kvm_s390_vm_tod_clock *gtod) +void kvm_s390_set_tod_clock(struct kvm *kvm, + const struct kvm_s390_vm_tod_clock *gtod) { struct kvm_vcpu *vcpu; struct kvm_s390_tod_clock_ext htod; @@ -2953,10 +2984,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, get_tod_clock_ext((char *)&htod); kvm->arch.epoch = gtod->tod - htod.tod; - kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; - - if (kvm->arch.epoch > gtod->tod) - kvm->arch.epdx -= 1; + kvm->arch.epdx = 0; + if (test_kvm_facility(kvm, 139)) { + kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; + if (kvm->arch.epoch > gtod->tod) + kvm->arch.epdx -= 1; + } kvm_s390_vcpu_block_all(kvm); kvm_for_each_vcpu(i, vcpu, kvm) { @@ -2969,22 +3002,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, mutex_unlock(&kvm->lock); } -void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) -{ - struct kvm_vcpu *vcpu; - int i; - - mutex_lock(&kvm->lock); - preempt_disable(); - kvm->arch.epoch = tod - get_tod_clock(); - kvm_s390_vcpu_block_all(kvm); - kvm_for_each_vcpu(i, vcpu, kvm) - vcpu->arch.sie_block->epoch = kvm->arch.epoch; - kvm_s390_vcpu_unblock_all(kvm); - preempt_enable(); - mutex_unlock(&kvm->lock); -} - /** * kvm_arch_fault_in_page - fault-in guest page if necessary * @vcpu: The corresponding virtual cpu @@ -3373,7 +3390,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int rc; - sigset_t sigsaved; if (kvm_run->immediate_exit) return -EINTR; @@ -3383,8 +3399,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { kvm_s390_vcpu_start(vcpu); @@ -3418,8 +3433,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) disable_cpu_timer_accounting(vcpu); store_regs(vcpu, kvm_run); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); vcpu->stat.exit_userspace++; return rc; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 9f8fdd7b2311..e22d94f494a7 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -272,9 +272,8 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); int handle_sthyi(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ -void kvm_s390_set_tod_clock_ext(struct kvm *kvm, - const struct kvm_s390_vm_tod_clock *gtod); -void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); +void kvm_s390_set_tod_clock(struct kvm *kvm, + const struct kvm_s390_vm_tod_clock *gtod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index c954ac49eee4..734283a21677 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -84,9 +84,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) /* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { + struct kvm_s390_vm_tod_clock gtod = { 0 }; int rc; u8 ar; - u64 op2, val; + u64 op2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -94,12 +95,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) op2 = kvm_s390_get_base_disp_s(vcpu, &ar); if (op2 & 7) /* Operand must be on a doubleword boundary */ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); + rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); - kvm_s390_set_tod_clock(vcpu->kvm, val); + VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); + kvm_s390_set_tod_clock(vcpu->kvm, >od); kvm_s390_set_psw_cc(vcpu, 0); return 0; @@ -235,8 +236,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return -EAGAIN; } - if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) - return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return 0; } @@ -247,6 +246,9 @@ static int handle_iske(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; @@ -276,6 +278,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; @@ -311,6 +316,9 @@ static int handle_sske(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; @@ -1002,7 +1010,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) cbrlo[entries] = gfn << PAGE_SHIFT; } - if (orc) { + if (orc && gfn < ms->bitmap_size) { /* increment only if we are really flipping the bit to 1 */ if (!test_and_set_bit(gfn, ms->pgste_bitmap)) atomic64_inc(&ms->dirty_pages); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index cc2faffa7d6e..334b6d103cbd 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -85,8 +85,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); - if (end >= TASK_SIZE_MAX) - return -ENOMEM; rc = 0; notify = 0; while (mm->context.asce_limit < end) { diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index b15cd2f0320f..33e2785f6842 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -55,8 +55,7 @@ struct bpf_jit { #define SEEN_LITERAL 8 /* code uses literals */ #define SEEN_FUNC 16 /* calls C functions */ #define SEEN_TAIL_CALL 32 /* code uses tail calls */ -#define SEEN_SKB_CHANGE 64 /* code changes skb data */ -#define SEEN_REG_AX 128 /* code uses constant blinding */ +#define SEEN_REG_AX 64 /* code uses constant blinding */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) /* @@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit) EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); } - if (jit->seen & SEEN_SKB) + if (jit->seen & SEEN_SKB) { emit_load_skb_data_hlen(jit); - if (jit->seen & SEEN_SKB_CHANGE) /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); + } } /* @@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT2(0x0d00, REG_14, REG_W1); /* lgr %b0,%r2: load return value into %b0 */ EMIT4(0xb9040000, BPF_REG_0, REG_2); - if (bpf_helper_changes_pkt_data((void *)func)) { - jit->seen |= SEEN_SKB_CHANGE; + if ((jit->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data((void *)func)) { /* lg %b1,ST_OFF_SKBP(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a25d95a6612d..0fe649c0d542 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -368,7 +368,8 @@ static void zpci_irq_handler(struct airq_struct *airq) /* End of second scan with interrupts on. */ break; /* First scan complete, reenable interrupts. */ - zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); + if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC)) + break; si = 0; continue; } @@ -956,7 +957,7 @@ static int __init pci_base_init(void) if (!s390_pci_probe) return 0; - if (!test_facility(69) || !test_facility(71) || !test_facility(72)) + if (!test_facility(69) || !test_facility(71)) return 0; rc = zpci_debug_init(); diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index ea34086c8674..81b840bc6e4e 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } /* Set Interruption Controls */ -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) { + if (!test_facility(72)) + return -EIO; asm volatile ( " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); + return 0; } /* PCI Load */ diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c index 77c35350ee77..b7fa7a87e946 100644 --- a/arch/sh/boards/mach-se/770x/setup.c +++ b/arch/sh/boards/mach-se/770x/setup.c @@ -9,6 +9,7 @@ */ #include #include +#include #include #include #include @@ -115,6 +116,11 @@ static struct platform_device heartbeat_device = { #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ defined(CONFIG_CPU_SUBTYPE_SH7712) /* SH771X Ethernet driver */ +static struct sh_eth_plat_data sh_eth_plat = { + .phy = PHY_ID, + .phy_interface = PHY_INTERFACE_MODE_MII, +}; + static struct resource sh_eth0_resources[] = { [0] = { .start = SH_ETH0_BASE, @@ -132,7 +138,7 @@ static struct platform_device sh_eth0_device = { .name = "sh771x-ether", .id = 0, .dev = { - .platform_data = PHY_ID, + .platform_data = &sh_eth_plat, }, .num_resources = ARRAY_SIZE(sh_eth0_resources), .resource = sh_eth0_resources, @@ -155,7 +161,7 @@ static struct platform_device sh_eth1_device = { .name = "sh771x-ether", .id = 1, .dev = { - .platform_data = PHY_ID, + .platform_data = &sh_eth_plat, }, .num_resources = ARRAY_SIZE(sh_eth1_resources), .resource = sh_eth1_resources, diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index e1d751ae2498..1a2526676a87 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void) dwarf_frame_cachep = kmem_cache_create("dwarf_frames", sizeof(struct dwarf_frame), 0, - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); dwarf_reg_cachep = kmem_cache_create("dwarf_regs", sizeof(struct dwarf_reg), 0, - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ, dwarf_frame_cachep); diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index b2d9963d5978..68b1a67533ce 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -59,7 +59,7 @@ void arch_task_cache_init(void) task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, __alignof__(union thread_xstate), - SLAB_PANIC | SLAB_NOTRACK, NULL); + SLAB_PANIC, NULL); } #ifdef CONFIG_SH_FPU_EMU diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 57cff00cad17..b3770bb26211 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -609,7 +609,8 @@ asmlinkage void do_divide_error(unsigned long r4) break; } - force_sig_info(SIGFPE, &info, current); + info.si_signo = SIGFPE; + force_sig_info(info.si_signo, &info, current); } #endif diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index d1064e46efe8..8aa664638c3c 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c @@ -133,6 +133,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_alignmask = 7, diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 3e3823db303e..c73b5a3ab7b9 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -63,6 +63,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) (unsigned long)_n_, sizeof(*(ptr))); \ }) +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new); +#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new) + #include /* diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 977c3f280ba1..fa38c78de0f0 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -209,7 +209,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index e25d25b0a34b..b361702ef52a 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -8,9 +8,11 @@ #include #include +#include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index 6a339a78f4f4..71dd82b43cc5 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h @@ -7,6 +7,7 @@ #if defined(__sparc__) && defined(__arch64__) #ifndef __ASSEMBLY__ +#include #include #include diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 25b6abdb3908..522a677e050d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -217,7 +217,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; sllx REG2, 32, REG2; \ andcc REG1, REG2, %g0; \ be,pt %xcc, 700f; \ - sethi %hi(0x1ffc0000), REG2; \ + sethi %hi(0xffe00000), REG2; \ sllx REG2, 1, REG2; \ brgez,pn REG1, FAIL_LABEL; \ andn REG1, REG2, REG1; \ diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 5010df497387..465a901a0ada 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -173,6 +173,20 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) } EXPORT_SYMBOL(__cmpxchg_u32); +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new) +{ + unsigned long flags; + u64 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + if ((prev = *ptr) == old) + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return prev; +} +EXPORT_SYMBOL(__cmpxchg_u64); + unsigned long __xchg_u32(volatile u32 *ptr, u32 new) { unsigned long flags; diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S index e5547b22cd18..0ddbbb031822 100644 --- a/arch/sparc/lib/hweight.S +++ b/arch/sparc/lib/hweight.S @@ -44,8 +44,8 @@ EXPORT_SYMBOL(__arch_hweight32) .previous ENTRY(__arch_hweight64) - sethi %hi(__sw_hweight16), %g1 - jmpl %g1 + %lo(__sw_hweight16), %g0 + sethi %hi(__sw_hweight64), %g1 + jmpl %g1 + %lo(__sw_hweight64), %g0 nop ENDPROC(__arch_hweight64) EXPORT_SYMBOL(__arch_hweight64) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 61bdc1270d19..984e9d65ea0d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2540,9 +2540,16 @@ void __init mem_init(void) { high_memory = __va(last_valid_pfn << PAGE_SHIFT); - register_page_bootmem_info(); free_all_bootmem(); + /* + * Must be done after boot memory is put on freelist, because here we + * might set fields in deferred struct pages that have not yet been + * initialized, and free_all_bootmem() initializes all the reserved + * deferred pages for us. + */ + register_page_bootmem_info(); + /* * Set up the zero page, mark it reserved, so that page count * is not manipulated when freeing the page from user ptes. @@ -2927,7 +2934,7 @@ void __flush_tlb_all(void) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); pte_t *pte = NULL; if (page) @@ -2939,7 +2946,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) return NULL; if (!pgtable_page_ctor(page)) { diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 5765e7e711f7..ff5f9cb3039a 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) u8 *func = ((u8 *)__bpf_call_base) + imm; ctx->saw_call = true; + if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func)) + emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx); emit_call((u32 *)func, ctx); emit_nop(ctx); emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); - if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind) - load_skb_regs(ctx, bpf2sparc[BPF_REG_6]); + if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func)) + load_skb_regs(ctx, L7); break; } diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index c14e36f008c8..62a7b83025dd 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -173,7 +173,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 50a32c33d729..73c57f614c9e 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += barrier.h +generic-y += bpf_perf_event.h generic-y += bug.h generic-y += clkdev.h generic-y += current.h diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index b668e351fd6c..fca34b2177e2 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm); /* * Needed since we do not use the asm-generic/mm_hooks.h: */ -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { uml_setup_stubs(mm); + return 0; } extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h index 390572daa40d..b3f5865a92c9 100644 --- a/arch/um/include/shared/init.h +++ b/arch/um/include/shared/init.h @@ -41,7 +41,7 @@ typedef int (*initcall_t)(void); typedef void (*exitcall_t)(void); -#include +#include /* These are for everybody (although not all archs will actually discard it in modules) */ diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index 59b06b48f27d..5c205a9cb5a6 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -81,9 +81,10 @@ do { \ } \ } while (0) -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_unmap(struct mm_struct *mm, diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h index 26775793c204..f0fdb268f8f2 100644 --- a/arch/unicore32/include/asm/pgalloc.h +++ b/arch/unicore32/include/asm/pgalloc.h @@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); #define pgd_alloc(mm) get_pgd_slow(mm) #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) /* * Allocate one PTE table. diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 5f25b39f04d4..c4ac6043ebb0 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -298,7 +298,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 0038a2d10a7a..466219296cd6 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -7,6 +7,8 @@ obj-$(CONFIG_KVM) += kvm/ # Xen paravirtualization support obj-$(CONFIG_XEN) += xen/ +obj-$(CONFIG_ACRN) += acrn/ + # Hyper-V paravirtualization support obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2fdb23313dd5..b4d015ee3fee 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -56,7 +56,7 @@ config X86 select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_PMEM_API if X86_64 # Causing hangs/crashes, see the commit that added this change for details. - select ARCH_HAS_REFCOUNT if BROKEN + select ARCH_HAS_REFCOUNT select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN @@ -89,6 +89,7 @@ config X86 select GENERIC_CLOCKEVENTS_MIN_ADJUST select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP select GENERIC_FIND_FIRST_BIT select GENERIC_IOMAP @@ -108,9 +109,8 @@ config X86 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP + select HAVE_ARCH_KASAN if X86_64 select HAVE_ARCH_KGDB - select HAVE_ARCH_KMEMCHECK select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT @@ -171,7 +171,7 @@ config X86 select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER_UNWINDER && STACK_VALIDATION + select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION select HAVE_STACK_VALIDATION if X86_64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK @@ -303,7 +303,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC config KASAN_SHADOW_OFFSET hex depends on KASAN - default 0xdff8000000000000 if X86_5LEVEL default 0xdffffc0000000000 config HAVE_INTEL_TXT @@ -429,6 +428,20 @@ config GOLDFISH def_bool y depends on X86_GOLDFISH +config RETPOLINE + bool "Avoid speculative indirect branches in kernel" + default y + select STACK_VALIDATION if HAVE_STACK_VALIDATION + help + Compile kernel with the retpoline compiler options to guard against + kernel-to-user data leaks by avoiding speculative indirect + branches. Requires a compiler with -mindirect-branch=thunk-extern + support for full protection. The kernel may run slower. + + Without compiler support, at least indirect branches in assembler + code are eliminated. Since this includes the syscall entry path, + it is not entirely pointless. + config INTEL_RDT bool "Intel Resource Director Technology support" default n @@ -759,6 +772,8 @@ config QUEUED_LOCK_STAT behavior of paravirtualized queued spinlocks and report them on debugfs. +source "arch/x86/acrn/Kconfig" + source "arch/x86/xen/Kconfig" config KVM_GUEST @@ -926,7 +941,8 @@ config MAXSMP config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP range 2 8 if SMP && X86_32 && !X86_BIGSMP - range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK + range 2 64 if SMP && X86_32 && X86_BIGSMP + range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64 range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 default "1" if !SMP default "8192" if MAXSMP @@ -1429,7 +1445,7 @@ config ARCH_DMA_ADDR_T_64BIT config X86_DIRECT_GBPAGES def_bool y - depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK + depends on X86_64 && !DEBUG_PAGEALLOC ---help--- Certain kernel features effectively disable kernel linear 1 GB mappings (even if the CPU otherwise @@ -1803,6 +1819,16 @@ config X86_SMAP If unsure, say Y. +config X86_INTEL_UMIP + def_bool n + depends on CPU_SUP_INTEL + prompt "Intel User Mode Instruction Prevention" if EXPERT + ---help--- + The User Mode Instruction Prevention (UMIP) is a security + feature in newer Intel processors. If enabled, a general + protection fault is issued if the instructions SGDT, SLDT, + SIDT, SMSW and STR are executed in user mode. + config X86_INTEL_MPX prompt "Intel MPX (Memory Protection Extensions)" def_bool n diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 90b123056f4b..6293a8768a91 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -359,28 +359,14 @@ config PUNIT_ATOM_DEBUG choice prompt "Choose kernel unwinder" - default FRAME_POINTER_UNWINDER + default UNWINDER_ORC if X86_64 + default UNWINDER_FRAME_POINTER if X86_32 ---help--- This determines which method will be used for unwinding kernel stack traces for panics, oopses, bugs, warnings, perf, /proc//stack, livepatch, lockdep, and more. -config FRAME_POINTER_UNWINDER - bool "Frame pointer unwinder" - select FRAME_POINTER - ---help--- - This option enables the frame pointer unwinder for unwinding kernel - stack traces. - - The unwinder itself is fast and it uses less RAM than the ORC - unwinder, but the kernel text size will grow by ~3% and the kernel's - overall performance will degrade by roughly 5-10%. - - This option is recommended if you want to use the livepatch - consistency model, as this is currently the only way to get a - reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE). - -config ORC_UNWINDER +config UNWINDER_ORC bool "ORC unwinder" depends on X86_64 select STACK_VALIDATION @@ -396,7 +382,22 @@ config ORC_UNWINDER Enabling this option will increase the kernel's runtime memory usage by roughly 2-4MB, depending on your kernel config. -config GUESS_UNWINDER +config UNWINDER_FRAME_POINTER + bool "Frame pointer unwinder" + select FRAME_POINTER + ---help--- + This option enables the frame pointer unwinder for unwinding kernel + stack traces. + + The unwinder itself is fast and it uses less RAM than the ORC + unwinder, but the kernel text size will grow by ~3% and the kernel's + overall performance will degrade by roughly 5-10%. + + This option is recommended if you want to use the livepatch + consistency model, as this is currently the only way to get a + reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE). + +config UNWINDER_GUESS bool "Guess unwinder" depends on EXPERT ---help--- @@ -411,7 +412,7 @@ config GUESS_UNWINDER endchoice config FRAME_POINTER - depends on !ORC_UNWINDER && !GUESS_UNWINDER + depends on !UNWINDER_ORC && !UNWINDER_GUESS bool endmenu diff --git a/arch/x86/Makefile b/arch/x86/Makefile index a20eacd9c7e9..9451ea021131 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -111,6 +111,8 @@ else KBUILD_CFLAGS += $(call cc-option,-mno-80387) KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387) + KBUILD_CFLAGS += -fno-pic + # By default gcc and clang use a stack alignment of 16 bytes for x86. # However the standard kernel entry on x86-64 leaves the stack on an # 8-byte boundary. If the compiler isn't informed about the actual @@ -158,11 +160,6 @@ ifdef CONFIG_X86_X32 endif export CONFIG_X86_X32_ABI -# Don't unroll struct assignments with kmemcheck enabled -ifeq ($(CONFIG_KMEMCHECK),y) - KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) -endif - # # If the function graph tracer is used with mcount instead of fentry, # '-maccumulate-outgoing-args' is needed to prevent a GCC bug @@ -228,6 +225,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) LDFLAGS := -m elf_$(UTS_MACHINE) +# +# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to +# the linker to force 2MB page size regardless of the default page size used +# by the linker. +# +ifdef CONFIG_X86_64 +LDFLAGS += $(call ld-option, -z max-page-size=0x200000) +endif + # Speed up the build KBUILD_CFLAGS += -pipe # Workaround for a gcc prelease that unfortunately was shipped in a suse release @@ -235,6 +241,13 @@ KBUILD_CFLAGS += -Wno-sign-compare # KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +# Avoid indirect branches in kernel to deal with Spectre +ifdef CONFIG_RETPOLINE +ifneq ($(RETPOLINE_CFLAGS),) + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE +endif +endif + archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/x86/tools relocs diff --git a/arch/x86/acrn/Kconfig b/arch/x86/acrn/Kconfig new file mode 100644 index 000000000000..b7cf03d7cacf --- /dev/null +++ b/arch/x86/acrn/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# This Kconfig describes ACRN options +# + +config ACRN + bool "ACRN guest support" + depends on X86 + depends on PARAVIRT + help + This is the Linux ACRN port. Enabling this will allow the + kernel to boot in a paravirtualized environment under the + ACRN hypervisor. + +config ACRN_HV + bool "Enable services run on ACRN hypervisor" + depends on ACRN + help + This option is needed if were to run ACRN services linux on top of + ACRN hypervisor. diff --git a/arch/x86/acrn/Makefile b/arch/x86/acrn/Makefile new file mode 100644 index 000000000000..3f42025a326a --- /dev/null +++ b/arch/x86/acrn/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ACRN_HV) += acrn_hv.o diff --git a/arch/x86/acrn/acrn_hv.c b/arch/x86/acrn/acrn_hv.c new file mode 100644 index 000000000000..10fcc9a963ff --- /dev/null +++ b/arch/x86/acrn/acrn_hv.c @@ -0,0 +1,89 @@ +/* + * ACRN hypervisor support + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Jason Chen CJ + * + */ +#include +#include + +static unsigned long cpu_khz_from_acrn(void) +{ + unsigned int eax, ebx, ecx, edx; + + /* Get TSC frequency from cpuid 0x40000010 */ + eax = 0x40000010; + ebx = ecx = edx = 0; + __cpuid(&eax, &ebx, &ecx, &edx); + + return (unsigned long)eax; +} + +static uint32_t __init acrn_detect(void) +{ + return hypervisor_cpuid_base("ACRNACRNACRN", 0); +} + +static void __init acrn_init_platform(void) +{ + pv_cpu_ops.cpu_khz = cpu_khz_from_acrn; + +#ifdef CONFIG_ACRN_VHM + pv_irq_ops.write_msi = acrn_write_msi_msg; +#endif /* CONFIG_ACRN_VHM */ +} + +static void acrn_pin_vcpu(int cpu) +{ + /* do nothing here now */ +} + +static bool acrn_x2apic_available(void) +{ + /* do not support x2apic */ + return false; +} + +static void __init acrn_init_mem_mapping(void) +{ + /* do nothing here now */ +} + +const struct hypervisor_x86 x86_hyper_acrn = { + .name = "ACRN", + .detect = acrn_detect, + .type = X86_HYPER_ACRN, + .init.init_platform = acrn_init_platform, + .runtime.pin_vcpu = acrn_pin_vcpu, + .init.x2apic_available = acrn_x2apic_available, + .init.init_mem_mapping = acrn_init_mem_mapping, +}; +EXPORT_SYMBOL(x86_hyper_acrn); diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 4b7575b00563..98018a621f6b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -78,6 +78,7 @@ vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o + vmlinux-objs-y += $(obj)/pgtable_64.o endif $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index beb255b66447..4b3d92a37c80 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -289,10 +289,18 @@ ENTRY(startup_64) leaq boot_stack_end(%rbx), %rsp #ifdef CONFIG_X86_5LEVEL - /* Check if 5-level paging has already enabled */ - movq %cr4, %rax - testl $X86_CR4_LA57, %eax - jnz lvl5 + /* + * Check if we need to enable 5-level paging. + * RSI holds real mode data and need to be preserved across + * a function call. + */ + pushq %rsi + call l5_paging_required + popq %rsi + + /* If l5_paging_required() returned zero, we're done here. */ + cmpq $0, %rax + je lvl5 /* * At this point we are in long mode with 4-level paging enabled, diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b50c42455e25..252fee320816 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -169,6 +169,16 @@ void __puthex(unsigned long value) } } +static bool l5_supported(void) +{ + /* Check if leaf 7 is supported. */ + if (native_cpuid_eax(0) < 7) + return 0; + + /* Check if la57 is supported. */ + return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)); +} + #if CONFIG_X86_NEED_RELOCS static void handle_relocations(void *output, unsigned long output_len, unsigned long virt_addr) @@ -299,6 +309,10 @@ static void parse_elf(void *output) switch (phdr->p_type) { case PT_LOAD: +#ifdef CONFIG_X86_64 + if ((phdr->p_align % 0x200000) != 0) + error("Alignment of LOAD segment isn't multiple of 2MB"); +#endif #ifdef CONFIG_RELOCATABLE dest = output; dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); @@ -362,6 +376,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, console_init(); debug_putstr("early console in extract_kernel\n"); + if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) { + error("This linux kernel as configured requires 5-level paging\n" + "This CPU does not support the required 'cr4.la57' feature\n" + "Unable to boot - please use a kernel appropriate for your CPU\n"); + } + free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c index 972319ff5b01..e691ff734cb5 100644 --- a/arch/x86/boot/compressed/pagetable.c +++ b/arch/x86/boot/compressed/pagetable.c @@ -23,6 +23,9 @@ */ #undef CONFIG_AMD_MEM_ENCRYPT +/* No PAGE_TABLE_ISOLATION support needed either: */ +#undef CONFIG_PAGE_TABLE_ISOLATION + #include "misc.h" /* These actually do the work of building the kernel identity maps. */ diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c new file mode 100644 index 000000000000..b4469a37e9a1 --- /dev/null +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -0,0 +1,28 @@ +#include + +/* + * __force_order is used by special_insns.h asm code to force instruction + * serialization. + * + * It is not referenced from the code, but GCC < 5 with -fPIE would fail + * due to an undefined symbol. Define it to make these ancient GCCs work. + */ +unsigned long __force_order; + +int l5_paging_required(void) +{ + /* Check if leaf 7 is supported. */ + + if (native_cpuid_eax(0) < 7) + return 0; + + /* Check if la57 is supported. */ + if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) + return 0; + + /* Check if 5-level paging has already been enabled. */ + if (native_read_cr4() & X86_CR4_LA57) + return 0; + + return 1; +} diff --git a/arch/x86/configs/abl_diffconfig b/arch/x86/configs/abl_diffconfig new file mode 100644 index 000000000000..899be931801b --- /dev/null +++ b/arch/x86/configs/abl_diffconfig @@ -0,0 +1 @@ +CONFIG_ABL_BOOTLOADER_CONTROL=y diff --git a/arch/x86/configs/i386_ranchu_defconfig b/arch/x86/configs/i386_ranchu_defconfig new file mode 100644 index 000000000000..18d3675d28f0 --- /dev/null +++ b/arch/x86/configs/i386_ranchu_defconfig @@ -0,0 +1,422 @@ +# CONFIG_64BIT is not set +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_ARCH_MMAP_RND_BITS=16 +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SMP=y +CONFIG_X86_BIGSMP=y +CONFIG_MCORE2=y +CONFIG_X86_GENERIC=y +CONFIG_HPET_TIMER=y +CONFIG_NR_CPUS=512 +CONFIG_PREEMPT=y +# CONFIG_X86_MCE is not set +CONFIG_X86_REBOOTFIXUPS=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_CMA=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_100=y +CONFIG_PHYSICAL_START=0x100000 +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIEASPM is not set +CONFIG_PCCARD=y +CONFIG_YENTA=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CONNECTOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_PATA_MPIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_BNX2=y +CONFIG_TIGON3=y +CONFIG_NET_TULIP=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_SKY2=y +CONFIG_NE2K_PCI=y +CONFIG_FORCEDETH=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +CONFIG_FDDI=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_NVRAM=y +CONFIG_I2C_I801=y +CONFIG_BATTERY_GOLDFISH=y +CONFIG_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_EFI=y +CONFIG_FB_GOLDFISH=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=y +CONFIG_USB_STORAGE=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRTIO_PCI=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SYNC_FILE=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_SND_HDA_INTEL=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_GOLDFISH_SYNC=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_FUSE_FS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_AES_586=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_CRC_T10DIF=y diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config index 550cd5012b73..66c9e2aab16c 100644 --- a/arch/x86/configs/tiny.config +++ b/arch/x86/configs/tiny.config @@ -1,5 +1,5 @@ CONFIG_NOHIGHMEM=y # CONFIG_HIGHMEM4G is not set # CONFIG_HIGHMEM64G is not set -CONFIG_GUESS_UNWINDER=y -# CONFIG_FRAME_POINTER_UNWINDER is not set +CONFIG_UNWINDER_GUESS=y +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 4a4b16e56d35..e32fc1f274d8 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -299,6 +299,7 @@ CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y +CONFIG_UNWINDER_ORC=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/x86/configs/x86_64_ranchu_defconfig b/arch/x86/configs/x86_64_ranchu_defconfig new file mode 100644 index 000000000000..7eff3002db18 --- /dev/null +++ b/arch/x86/configs/x86_64_ranchu_defconfig @@ -0,0 +1,417 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_ARCH_MMAP_RND_BITS=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SMP=y +CONFIG_MCORE2=y +CONFIG_MAXSMP=y +CONFIG_PREEMPT=y +# CONFIG_X86_MCE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_CMA=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_100=y +CONFIG_PHYSICAL_START=0x100000 +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIEASPM is not set +CONFIG_PCCARD=y +CONFIG_YENTA=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_IA32_EMULATION=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DMA_CMA=y +CONFIG_CONNECTOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_PATA_MPIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_BNX2=y +CONFIG_TIGON3=y +CONFIG_NET_TULIP=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_SKY2=y +CONFIG_NE2K_PCI=y +CONFIG_FORCEDETH=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +CONFIG_FDDI=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_NVRAM=y +CONFIG_I2C_I801=y +CONFIG_BATTERY_GOLDFISH=y +CONFIG_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_EFI=y +CONFIG_FB_GOLDFISH=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=y +CONFIG_USB_STORAGE=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRTIO_PCI=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SYNC_FILE=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_SND_HDA_INTEL=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_GOLDFISH_SYNC=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_FUSE_FS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_CRC_T10DIF=y diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 16627fec80b2..12e8484a8ee7 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -32,6 +32,7 @@ #include #include #include +#include /* * The following macros are used to move an (un)aligned 16 byte value to/from @@ -89,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 ALL_F: .octa 0xffffffffffffffffffffffffffffffff .octa 0x00000000000000000000000000000000 -.section .rodata -.align 16 -.type aad_shift_arr, @object -.size aad_shift_arr, 272 -aad_shift_arr: - .octa 0xffffffffffffffffffffffffffffffff - .octa 0xffffffffffffffffffffffffffffff0C - .octa 0xffffffffffffffffffffffffffff0D0C - .octa 0xffffffffffffffffffffffffff0E0D0C - .octa 0xffffffffffffffffffffffff0F0E0D0C - .octa 0xffffffffffffffffffffff0C0B0A0908 - .octa 0xffffffffffffffffffff0D0C0B0A0908 - .octa 0xffffffffffffffffff0E0D0C0B0A0908 - .octa 0xffffffffffffffff0F0E0D0C0B0A0908 - .octa 0xffffffffffffff0C0B0A090807060504 - .octa 0xffffffffffff0D0C0B0A090807060504 - .octa 0xffffffffff0E0D0C0B0A090807060504 - .octa 0xffffffff0F0E0D0C0B0A090807060504 - .octa 0xffffff0C0B0A09080706050403020100 - .octa 0xffff0D0C0B0A09080706050403020100 - .octa 0xff0E0D0C0B0A09080706050403020100 - .octa 0x0F0E0D0C0B0A09080706050403020100 - - .text @@ -256,6 +233,37 @@ aad_shift_arr: pxor \TMP1, \GH # result is in TMP1 .endm +# Reads DLEN bytes starting at DPTR and stores in XMMDst +# where 0 < DLEN < 16 +# Clobbers %rax, DLEN and XMM1 +.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst + cmp $8, \DLEN + jl _read_lt8_\@ + mov (\DPTR), %rax + MOVQ_R64_XMM %rax, \XMMDst + sub $8, \DLEN + jz _done_read_partial_block_\@ + xor %eax, %eax +_read_next_byte_\@: + shl $8, %rax + mov 7(\DPTR, \DLEN, 1), %al + dec \DLEN + jnz _read_next_byte_\@ + MOVQ_R64_XMM %rax, \XMM1 + pslldq $8, \XMM1 + por \XMM1, \XMMDst + jmp _done_read_partial_block_\@ +_read_lt8_\@: + xor %eax, %eax +_read_next_byte_lt8_\@: + shl $8, %rax + mov -1(\DPTR, \DLEN, 1), %al + dec \DLEN + jnz _read_next_byte_lt8_\@ + MOVQ_R64_XMM %rax, \XMMDst +_done_read_partial_block_\@: +.endm + /* * if a = number of total plaintext bytes * b = floor(a/16) @@ -272,62 +280,30 @@ aad_shift_arr: XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 mov arg7, %r10 # %r10 = AAD - mov arg8, %r12 # %r12 = aadLen - mov %r12, %r11 + mov arg8, %r11 # %r11 = aadLen pxor %xmm\i, %xmm\i pxor \XMM2, \XMM2 cmp $16, %r11 - jl _get_AAD_rest8\num_initial_blocks\operation + jl _get_AAD_rest\num_initial_blocks\operation _get_AAD_blocks\num_initial_blocks\operation: movdqu (%r10), %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor %xmm\i, \XMM2 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 add $16, %r10 - sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\num_initial_blocks\operation movdqu \XMM2, %xmm\i + + /* read the last <16B of AAD */ +_get_AAD_rest\num_initial_blocks\operation: cmp $0, %r11 je _get_AAD_done\num_initial_blocks\operation - pxor %xmm\i,%xmm\i - - /* read the last <16B of AAD. since we have at least 4B of - data right after the AAD (the ICV, and maybe some CT), we can - read 4B/8B blocks safely, and then get rid of the extra stuff */ -_get_AAD_rest8\num_initial_blocks\operation: - cmp $4, %r11 - jle _get_AAD_rest4\num_initial_blocks\operation - movq (%r10), \TMP1 - add $8, %r10 - sub $8, %r11 - pslldq $8, \TMP1 - psrldq $8, %xmm\i - pxor \TMP1, %xmm\i - jmp _get_AAD_rest8\num_initial_blocks\operation -_get_AAD_rest4\num_initial_blocks\operation: - cmp $0, %r11 - jle _get_AAD_rest0\num_initial_blocks\operation - mov (%r10), %eax - movq %rax, \TMP1 - add $4, %r10 - sub $4, %r10 - pslldq $12, \TMP1 - psrldq $4, %xmm\i - pxor \TMP1, %xmm\i -_get_AAD_rest0\num_initial_blocks\operation: - /* finalize: shift out the extra bytes we read, and align - left. since pslldq can only shift by an immediate, we use - vpshufb and an array of shuffle masks */ - movq %r12, %r11 - salq $4, %r11 - movdqu aad_shift_arr(%r11), \TMP1 - PSHUFB_XMM \TMP1, %xmm\i -_get_AAD_rest_final\num_initial_blocks\operation: + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor \XMM2, %xmm\i GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 @@ -531,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation: XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 mov arg7, %r10 # %r10 = AAD - mov arg8, %r12 # %r12 = aadLen - mov %r12, %r11 + mov arg8, %r11 # %r11 = aadLen pxor %xmm\i, %xmm\i pxor \XMM2, \XMM2 cmp $16, %r11 - jl _get_AAD_rest8\num_initial_blocks\operation + jl _get_AAD_rest\num_initial_blocks\operation _get_AAD_blocks\num_initial_blocks\operation: movdqu (%r10), %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor %xmm\i, \XMM2 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 add $16, %r10 - sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\num_initial_blocks\operation movdqu \XMM2, %xmm\i + + /* read the last <16B of AAD */ +_get_AAD_rest\num_initial_blocks\operation: cmp $0, %r11 je _get_AAD_done\num_initial_blocks\operation - pxor %xmm\i,%xmm\i - - /* read the last <16B of AAD. since we have at least 4B of - data right after the AAD (the ICV, and maybe some PT), we can - read 4B/8B blocks safely, and then get rid of the extra stuff */ -_get_AAD_rest8\num_initial_blocks\operation: - cmp $4, %r11 - jle _get_AAD_rest4\num_initial_blocks\operation - movq (%r10), \TMP1 - add $8, %r10 - sub $8, %r11 - pslldq $8, \TMP1 - psrldq $8, %xmm\i - pxor \TMP1, %xmm\i - jmp _get_AAD_rest8\num_initial_blocks\operation -_get_AAD_rest4\num_initial_blocks\operation: - cmp $0, %r11 - jle _get_AAD_rest0\num_initial_blocks\operation - mov (%r10), %eax - movq %rax, \TMP1 - add $4, %r10 - sub $4, %r10 - pslldq $12, \TMP1 - psrldq $4, %xmm\i - pxor \TMP1, %xmm\i -_get_AAD_rest0\num_initial_blocks\operation: - /* finalize: shift out the extra bytes we read, and align - left. since pslldq can only shift by an immediate, we use - vpshufb and an array of shuffle masks */ - movq %r12, %r11 - salq $4, %r11 - movdqu aad_shift_arr(%r11), \TMP1 - PSHUFB_XMM \TMP1, %xmm\i -_get_AAD_rest_final\num_initial_blocks\operation: + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor \XMM2, %xmm\i GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 @@ -1385,14 +1329,6 @@ _esb_loop_\@: * * AAD Format with 64-bit Extended Sequence Number * -* aadLen: -* from the definition of the spec, aadLen can only be 8 or 12 bytes. -* The code supports 16 too but for other sizes, the code will fail. -* -* TLen: -* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. -* For other sizes, the code will fail. -* * poly = x^128 + x^127 + x^126 + x^121 + 1 * *****************************************************************************/ @@ -1486,19 +1422,16 @@ _zero_cipher_left_decrypt: PSHUFB_XMM %xmm10, %xmm0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) - sub $16, %r11 - add %r13, %r11 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 -# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes -# (%r13 is the number of bytes in plaintext mod 16) - movdqu (%r12), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes + lea (%arg3,%r11,1), %r10 + mov %r13, %r12 + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 + + lea ALL_F+16(%rip), %r12 + sub %r13, %r12 movdqa %xmm1, %xmm2 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + movdqu (%r12), %xmm1 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 pand %xmm1, %xmm2 @@ -1507,9 +1440,6 @@ _zero_cipher_left_decrypt: pxor %xmm2, %xmm8 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 - # GHASH computation for the last <16 byte block - sub %r13, %r11 - add $16, %r11 # output %r13 bytes MOVQ_R64_XMM %xmm0, %rax @@ -1663,14 +1593,6 @@ ENDPROC(aesni_gcm_dec) * * AAD Format with 64-bit Extended Sequence Number * -* aadLen: -* from the definition of the spec, aadLen can only be 8 or 12 bytes. -* The code supports 16 too but for other sizes, the code will fail. -* -* TLen: -* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. -* For other sizes, the code will fail. -* * poly = x^128 + x^127 + x^126 + x^121 + 1 ***************************************************************************/ ENTRY(aesni_gcm_enc) @@ -1763,19 +1685,16 @@ _zero_cipher_left_encrypt: movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm0 - ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) - sub $16, %r11 - add %r13, %r11 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks - lea SHIFT_MASK+16(%rip), %r12 + + lea (%arg3,%r11,1), %r10 + mov %r13, %r12 + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 + + lea ALL_F+16(%rip), %r12 sub %r13, %r12 - # adjust the shuffle mask pointer to be able to shift 16-r13 bytes - # (%r13 is the number of bytes in plaintext mod 16) - movdqu (%r12), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + movdqu (%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm0 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 movdqa SHUF_MASK(%rip), %xmm10 @@ -1784,9 +1703,6 @@ _zero_cipher_left_encrypt: pxor %xmm0, %xmm8 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 # GHASH computation for the last <16 byte block - sub %r13, %r11 - add $16, %r11 - movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm0 @@ -2884,7 +2800,7 @@ ENTRY(aesni_xts_crypt8) pxor INC, STATE4 movdqu IV, 0x30(OUTP) - call *%r11 + CALL_NOSPEC %r11 movdqu 0x00(OUTP), INC pxor INC, STATE1 @@ -2929,7 +2845,7 @@ ENTRY(aesni_xts_crypt8) _aesni_gf128mul_x_ble() movups IV, (IVP) - call *%r11 + CALL_NOSPEC %r11 movdqu 0x40(OUTP), INC pxor INC, STATE1 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 5c15d6b57329..c690ddc78c03 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -689,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); } -static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, - unsigned int key_len) +static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key, + unsigned int key_len) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; @@ -715,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead, /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ -static int rfc4106_set_authsize(struct crypto_aead *parent, - unsigned int authsize) +static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent, + unsigned int authsize) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; @@ -823,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, if (sg_is_last(req->src) && (!PageHighMem(sg_page(req->src)) || req->src->offset + req->src->length <= PAGE_SIZE) && - sg_is_last(req->dst) && + sg_is_last(req->dst) && req->dst->length && (!PageHighMem(sg_page(req->dst)) || req->dst->offset + req->dst->length <= PAGE_SIZE)) { one_entry_in_sg = 1; @@ -928,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req) aes_ctx); } -static int rfc4106_encrypt(struct aead_request *req) +static int gcmaes_wrapper_encrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); @@ -944,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req) return crypto_aead_encrypt(req); } -static int rfc4106_decrypt(struct aead_request *req) +static int gcmaes_wrapper_decrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); @@ -1115,7 +1116,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req) { __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); + struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); @@ -1126,12 +1127,36 @@ static int generic_gcmaes_decrypt(struct aead_request *req) aes_ctx); } +static int generic_gcmaes_init(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni", + CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + + return 0; +} + +static void generic_gcmaes_exit(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} + static struct aead_alg aesni_aead_algs[] = { { .setkey = common_rfc4106_set_key, .setauthsize = common_rfc4106_set_authsize, .encrypt = helper_rfc4106_encrypt, .decrypt = helper_rfc4106_decrypt, - .ivsize = 8, + .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "__gcm-aes-aesni", @@ -1145,11 +1170,11 @@ static struct aead_alg aesni_aead_algs[] = { { }, { .init = rfc4106_init, .exit = rfc4106_exit, - .setkey = rfc4106_set_key, - .setauthsize = rfc4106_set_authsize, - .encrypt = rfc4106_encrypt, - .decrypt = rfc4106_decrypt, - .ivsize = 8, + .setkey = gcmaes_wrapper_set_key, + .setauthsize = gcmaes_wrapper_set_authsize, + .encrypt = gcmaes_wrapper_encrypt, + .decrypt = gcmaes_wrapper_decrypt, + .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "rfc4106(gcm(aes))", @@ -1165,7 +1190,26 @@ static struct aead_alg aesni_aead_algs[] = { { .setauthsize = generic_gcmaes_set_authsize, .encrypt = generic_gcmaes_encrypt, .decrypt = generic_gcmaes_decrypt, - .ivsize = 12, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = 16, + .base = { + .cra_name = "__generic-gcm-aes-aesni", + .cra_driver_name = "__driver-generic-gcm-aes-aesni", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), + .cra_alignmask = AESNI_ALIGN - 1, + .cra_module = THIS_MODULE, + }, +}, { + .init = generic_gcmaes_init, + .exit = generic_gcmaes_exit, + .setkey = gcmaes_wrapper_set_key, + .setauthsize = gcmaes_wrapper_set_authsize, + .encrypt = gcmaes_wrapper_encrypt, + .decrypt = gcmaes_wrapper_decrypt, + .ivsize = GCM_AES_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "gcm(aes)", @@ -1173,8 +1217,7 @@ static struct aead_alg aesni_aead_algs[] = { { .cra_priority = 400, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), - .cra_alignmask = AESNI_ALIGN - 1, + .cra_ctxsize = sizeof(struct cryptd_aead *), .cra_module = THIS_MODULE, }, } }; diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index f7c495e2863c..a14af6eb09cb 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -17,6 +17,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way: vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; - call *%r9; + CALL_NOSPEC %r9; addq $(16 * 16), %rsp; diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index eee5b3982cfd..b66bbfa62f50 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -12,6 +12,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way: vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; - call *%r9; + CALL_NOSPEC %r9; addq $(16 * 32), %rsp; diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index dbea6020ffe7..575292a33bdf 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -66,8 +66,6 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src); int err; - fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way; - err = blkcipher_walk_virt(desc, walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; @@ -79,6 +77,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, /* Process multi-block batch */ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { + fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way; do { fn(ctx, wdst, wsrc); diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c index 27226df3f7d8..c8d9cdacbf10 100644 --- a/arch/x86/crypto/crc32-pclmul_glue.c +++ b/arch/x86/crypto/crc32-pclmul_glue.c @@ -162,6 +162,7 @@ static struct shash_alg alg = { .cra_name = "crc32", .cra_driver_name = "crc32-pclmul", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index c194d5717ae5..5773e1161072 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -226,6 +226,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-intel", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 7a7de27c6f41..d9b734d0c8cc 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -45,6 +45,7 @@ #include #include +#include ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction @@ -172,7 +173,7 @@ continue_block: movzxw (bufp, %rax, 2), len lea crc_array(%rip), bufp lea (bufp, len, 1), bufp - jmp *bufp + JMP_NOSPEC bufp ################################################################ ## 2a) PROCESS FULL BLOCKS: diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index e32142bc071d..28c372003e44 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -164,7 +164,6 @@ static struct shash_alg alg = { .init = poly1305_simd_init, .update = poly1305_simd_update, .final = crypto_poly1305_final, - .setkey = crypto_poly1305_setkey, .descsize = sizeof(struct poly1305_simd_desc_ctx), .base = { .cra_name = "poly1305", diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c index 399a29d067d6..cb91a64a99e7 100644 --- a/arch/x86/crypto/salsa20_glue.c +++ b/arch/x86/crypto/salsa20_glue.c @@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc, salsa20_ivsetup(ctx, walk.iv); - if (likely(walk.nbytes == nbytes)) - { - salsa20_encrypt_bytes(ctx, walk.src.virt.addr, - walk.dst.virt.addr, nbytes); - return blkcipher_walk_done(desc, &walk, 0); - } - while (walk.nbytes >= 64) { salsa20_encrypt_bytes(ctx, walk.src.virt.addr, walk.dst.virt.addr, diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c index 36870b26067a..d08805032f01 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c @@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state) { unsigned int j; - state->lens[0] = 0; - state->lens[1] = 1; - state->lens[2] = 2; - state->lens[3] = 3; + /* initially all lanes are unused */ + state->lens[0] = 0xFFFFFFFF00000000; + state->lens[1] = 0xFFFFFFFF00000001; + state->lens[2] = 0xFFFFFFFF00000002; + state->lens[3] = 0xFFFFFFFF00000003; + state->unused_lanes = 0xFF03020100; for (j = 0; j < 4; j++) state->ldata[j].job_in_lane = NULL; diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index 1c3b7ceb36d2..e7273a606a07 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S @@ -55,29 +55,31 @@ #define RAB1bl %bl #define RAB2bl %cl +#define CD0 0x0(%rsp) +#define CD1 0x8(%rsp) +#define CD2 0x10(%rsp) + +# used only before/after all rounds #define RCD0 %r8 #define RCD1 %r9 #define RCD2 %r10 -#define RCD0d %r8d -#define RCD1d %r9d -#define RCD2d %r10d - -#define RX0 %rbp -#define RX1 %r11 -#define RX2 %r12 +# used only during rounds +#define RX0 %r8 +#define RX1 %r9 +#define RX2 %r10 -#define RX0d %ebp -#define RX1d %r11d -#define RX2d %r12d +#define RX0d %r8d +#define RX1d %r9d +#define RX2d %r10d -#define RY0 %r13 -#define RY1 %r14 -#define RY2 %r15 +#define RY0 %r11 +#define RY1 %r12 +#define RY2 %r13 -#define RY0d %r13d -#define RY1d %r14d -#define RY2d %r15d +#define RY0d %r11d +#define RY1d %r12d +#define RY2d %r13d #define RT0 %rdx #define RT1 %rsi @@ -85,6 +87,8 @@ #define RT0d %edx #define RT1d %esi +#define RT1bl %sil + #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ movzbl ab ## bl, tmp2 ## d; \ movzbl ab ## bh, tmp1 ## d; \ @@ -92,6 +96,11 @@ op1##l T0(CTX, tmp2, 4), dst ## d; \ op2##l T1(CTX, tmp1, 4), dst ## d; +#define swap_ab_with_cd(ab, cd, tmp) \ + movq cd, tmp; \ + movq ab, cd; \ + movq tmp, ab; + /* * Combined G1 & G2 function. Reordered with help of rotates to have moves * at begining. @@ -110,15 +119,15 @@ /* G1,2 && G2,2 */ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ - xchgq cd ## 0, ab ## 0; \ + swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ - xchgq cd ## 1, ab ## 1; \ + swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ - xchgq cd ## 2, ab ## 2; + swap_ab_with_cd(ab ## 2, cd ## 2, RT0); #define enc_round_end(ab, x, y, n) \ addl y ## d, x ## d; \ @@ -168,6 +177,16 @@ decrypt_round3(ba, dc, (n*2)+1); \ decrypt_round3(ba, dc, (n*2)); +#define push_cd() \ + pushq RCD2; \ + pushq RCD1; \ + pushq RCD0; + +#define pop_cd() \ + popq RCD0; \ + popq RCD1; \ + popq RCD2; + #define inpack3(in, n, xy, m) \ movq 4*(n)(in), xy ## 0; \ xorq w+4*m(CTX), xy ## 0; \ @@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way) * %rdx: src, RIO * %rcx: bool, if true: xor output */ - pushq %r15; - pushq %r14; pushq %r13; pushq %r12; - pushq %rbp; pushq %rbx; pushq %rcx; /* bool xor */ @@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way) inpack_enc3(); - encrypt_cycle3(RAB, RCD, 0); - encrypt_cycle3(RAB, RCD, 1); - encrypt_cycle3(RAB, RCD, 2); - encrypt_cycle3(RAB, RCD, 3); - encrypt_cycle3(RAB, RCD, 4); - encrypt_cycle3(RAB, RCD, 5); - encrypt_cycle3(RAB, RCD, 6); - encrypt_cycle3(RAB, RCD, 7); + push_cd(); + encrypt_cycle3(RAB, CD, 0); + encrypt_cycle3(RAB, CD, 1); + encrypt_cycle3(RAB, CD, 2); + encrypt_cycle3(RAB, CD, 3); + encrypt_cycle3(RAB, CD, 4); + encrypt_cycle3(RAB, CD, 5); + encrypt_cycle3(RAB, CD, 6); + encrypt_cycle3(RAB, CD, 7); + pop_cd(); popq RIO; /* dst */ - popq %rbp; /* bool xor */ + popq RT1; /* bool xor */ - testb %bpl, %bpl; + testb RT1bl, RT1bl; jnz .L__enc_xor3; outunpack_enc3(mov); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; .L__enc_xor3: outunpack_enc3(xor); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; ENDPROC(__twofish_enc_blk_3way) @@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way) * %rsi: dst * %rdx: src, RIO */ - pushq %r15; - pushq %r14; pushq %r13; pushq %r12; - pushq %rbp; pushq %rbx; pushq %rsi; /* dst */ inpack_dec3(); - decrypt_cycle3(RAB, RCD, 7); - decrypt_cycle3(RAB, RCD, 6); - decrypt_cycle3(RAB, RCD, 5); - decrypt_cycle3(RAB, RCD, 4); - decrypt_cycle3(RAB, RCD, 3); - decrypt_cycle3(RAB, RCD, 2); - decrypt_cycle3(RAB, RCD, 1); - decrypt_cycle3(RAB, RCD, 0); + push_cd(); + decrypt_cycle3(RAB, CD, 7); + decrypt_cycle3(RAB, CD, 6); + decrypt_cycle3(RAB, CD, 5); + decrypt_cycle3(RAB, CD, 4); + decrypt_cycle3(RAB, CD, 3); + decrypt_cycle3(RAB, CD, 2); + decrypt_cycle3(RAB, CD, 1); + decrypt_cycle3(RAB, CD, 0); + pop_cd(); popq RIO; /* dst */ outunpack_dec3(); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; ENDPROC(twofish_dec_blk_3way) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 6e160031cfea..5d10b7a85cad 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -1,6 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include #include +#include +#include +#include +#include +#include /* @@ -92,111 +97,78 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 - .macro ALLOC_PT_GPREGS_ON_STACK - addq $-(15*8), %rsp - .endm - - .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 - .if \r11 - movq %r11, 6*8+\offset(%rsp) - .endif - .if \r8910 - movq %r10, 7*8+\offset(%rsp) - movq %r9, 8*8+\offset(%rsp) - movq %r8, 9*8+\offset(%rsp) - .endif - .if \rax - movq %rax, 10*8+\offset(%rsp) - .endif - .if \rcx - movq %rcx, 11*8+\offset(%rsp) - .endif - movq %rdx, 12*8+\offset(%rsp) - movq %rsi, 13*8+\offset(%rsp) - movq %rdi, 14*8+\offset(%rsp) - UNWIND_HINT_REGS offset=\offset extra=0 - .endm - .macro SAVE_C_REGS offset=0 - SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0 - SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_R891011 - SAVE_C_REGS_HELPER 0, 1, 1, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RCX_R891011 - SAVE_C_REGS_HELPER 0, 1, 0, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11 - SAVE_C_REGS_HELPER 0, 0, 0, 1, 0 - .endm - - .macro SAVE_EXTRA_REGS offset=0 - movq %r15, 0*8+\offset(%rsp) - movq %r14, 1*8+\offset(%rsp) - movq %r13, 2*8+\offset(%rsp) - movq %r12, 3*8+\offset(%rsp) - movq %rbp, 4*8+\offset(%rsp) - movq %rbx, 5*8+\offset(%rsp) - UNWIND_HINT_REGS offset=\offset - .endm - - .macro RESTORE_EXTRA_REGS offset=0 - movq 0*8+\offset(%rsp), %r15 - movq 1*8+\offset(%rsp), %r14 - movq 2*8+\offset(%rsp), %r13 - movq 3*8+\offset(%rsp), %r12 - movq 4*8+\offset(%rsp), %rbp - movq 5*8+\offset(%rsp), %rbx - UNWIND_HINT_REGS offset=\offset extra=0 - .endm - - .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 - .if \rstor_r11 - movq 6*8(%rsp), %r11 +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 + /* + * Push registers and sanitize registers of values that a + * speculation attack might otherwise want to exploit. The + * lower registers are likely clobbered well before they + * could be put to use in a speculative execution gadget. + * Interleave XOR with PUSH for better uop scheduling: + */ + .if \save_ret + pushq %rsi /* pt_regs->si */ + movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ + movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ + .else + pushq %rdi /* pt_regs->di */ + pushq %rsi /* pt_regs->si */ .endif - .if \rstor_r8910 - movq 7*8(%rsp), %r10 - movq 8*8(%rsp), %r9 - movq 9*8(%rsp), %r8 + pushq \rdx /* pt_regs->dx */ + pushq %rcx /* pt_regs->cx */ + pushq \rax /* pt_regs->ax */ + pushq %r8 /* pt_regs->r8 */ + xorl %r8d, %r8d /* nospec r8 */ + pushq %r9 /* pt_regs->r9 */ + xorl %r9d, %r9d /* nospec r9 */ + pushq %r10 /* pt_regs->r10 */ + xorl %r10d, %r10d /* nospec r10 */ + pushq %r11 /* pt_regs->r11 */ + xorl %r11d, %r11d /* nospec r11*/ + pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx*/ + pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp*/ + pushq %r12 /* pt_regs->r12 */ + xorl %r12d, %r12d /* nospec r12*/ + pushq %r13 /* pt_regs->r13 */ + xorl %r13d, %r13d /* nospec r13*/ + pushq %r14 /* pt_regs->r14 */ + xorl %r14d, %r14d /* nospec r14*/ + pushq %r15 /* pt_regs->r15 */ + xorl %r15d, %r15d /* nospec r15*/ + UNWIND_HINT_REGS + .if \save_ret + pushq %rsi /* return address on top of stack */ .endif - .if \rstor_rax - movq 10*8(%rsp), %rax +.endm + +.macro POP_REGS pop_rdi=1 skip_r11rcx=0 + popq %r15 + popq %r14 + popq %r13 + popq %r12 + popq %rbp + popq %rbx + .if \skip_r11rcx + popq %rsi + .else + popq %r11 .endif - .if \rstor_rcx - movq 11*8(%rsp), %rcx + popq %r10 + popq %r9 + popq %r8 + popq %rax + .if \skip_r11rcx + popq %rsi + .else + popq %rcx .endif - .if \rstor_rdx - movq 12*8(%rsp), %rdx + popq %rdx + popq %rsi + .if \pop_rdi + popq %rdi .endif - movq 13*8(%rsp), %rsi - movq 14*8(%rsp), %rdi - UNWIND_HINT_IRET_REGS offset=16*8 - .endm - .macro RESTORE_C_REGS - RESTORE_C_REGS_HELPER 1,1,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RAX - RESTORE_C_REGS_HELPER 0,1,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RCX - RESTORE_C_REGS_HELPER 1,0,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_R11 - RESTORE_C_REGS_HELPER 1,1,0,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RCX_R11 - RESTORE_C_REGS_HELPER 1,0,0,1,1 - .endm - - .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 - subq $-(15*8+\addskip), %rsp - .endm - - .macro icebp - .byte 0xf1 - .endm +.endm /* * This is a sneaky trick to help the unwinder find pt_regs on the stack. The @@ -204,7 +176,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts + * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 @@ -218,6 +190,148 @@ For 32-bit we have the following conventions - kernel is built with #endif .endm +#ifdef CONFIG_PAGE_TABLE_ISOLATION + +/* + * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two + * halves: + */ +#define PTI_USER_PGTABLE_BIT PAGE_SHIFT +#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) +#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT +#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) +#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) + +.macro SET_NOFLUSH_BIT reg:req + bts $X86_CR3_PCID_NOFLUSH_BIT, \reg +.endm + +.macro ADJUST_KERNEL_CR3 reg:req + ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID + /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ + andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg +.endm + +.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + mov %cr3, \scratch_reg + ADJUST_KERNEL_CR3 \scratch_reg + mov \scratch_reg, %cr3 +.Lend_\@: +.endm + +#define THIS_CPU_user_pcid_flush_mask \ + PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask + +.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + mov %cr3, \scratch_reg + + ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID + + /* + * Test if the ASID needs a flush. + */ + movq \scratch_reg, \scratch_reg2 + andq $(0x7FF), \scratch_reg /* mask ASID */ + bt \scratch_reg, THIS_CPU_user_pcid_flush_mask + jnc .Lnoflush_\@ + + /* Flush needed, clear the bit */ + btr \scratch_reg, THIS_CPU_user_pcid_flush_mask + movq \scratch_reg2, \scratch_reg + jmp .Lwrcr3_pcid_\@ + +.Lnoflush_\@: + movq \scratch_reg2, \scratch_reg + SET_NOFLUSH_BIT \scratch_reg + +.Lwrcr3_pcid_\@: + /* Flip the ASID to the user version */ + orq $(PTI_USER_PCID_MASK), \scratch_reg + +.Lwrcr3_\@: + /* Flip the PGD to the user version */ + orq $(PTI_USER_PGTABLE_MASK), \scratch_reg + mov \scratch_reg, %cr3 +.Lend_\@: +.endm + +.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req + pushq %rax + SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax + popq %rax +.endm + +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req + ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI + movq %cr3, \scratch_reg + movq \scratch_reg, \save_reg + /* + * Test the user pagetable bit. If set, then the user page tables + * are active. If clear CR3 already has the kernel page table + * active. + */ + bt $PTI_USER_PGTABLE_BIT, \scratch_reg + jnc .Ldone_\@ + + ADJUST_KERNEL_CR3 \scratch_reg + movq \scratch_reg, %cr3 + +.Ldone_\@: +.endm + +.macro RESTORE_CR3 scratch_reg:req save_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + + ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID + + /* + * KERNEL pages can always resume with NOFLUSH as we do + * explicit flushes. + */ + bt $PTI_USER_PGTABLE_BIT, \save_reg + jnc .Lnoflush_\@ + + /* + * Check if there's a pending flush for the user ASID we're + * about to set. + */ + movq \save_reg, \scratch_reg + andq $(0x7FF), \scratch_reg + bt \scratch_reg, THIS_CPU_user_pcid_flush_mask + jnc .Lnoflush_\@ + + btr \scratch_reg, THIS_CPU_user_pcid_flush_mask + jmp .Lwrcr3_\@ + +.Lnoflush_\@: + SET_NOFLUSH_BIT \save_reg + +.Lwrcr3_\@: + /* + * The CR3 write could be avoided when not changing its value, + * but would require a CR3 read *and* a scratch register. + */ + movq \save_reg, %cr3 +.Lend_\@: +.endm + +#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ + +.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req +.endm +.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req +.endm +.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req +.endm +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req +.endm +.macro RESTORE_CR3 scratch_reg:req save_reg:req +.endm + +#endif + #endif /* CONFIG_X86_64 */ /* diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 03505ffbe1b6..60e21ccfb6d6 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -208,7 +209,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) * special case only applies after poking regs and before the * very next return to user mode. */ - current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED); + ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); #endif user_enter_irqoff(); @@ -284,7 +285,8 @@ __visible void do_syscall_64(struct pt_regs *regs) * regs->orig_ax, which changes the behavior of some syscalls. */ if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) { - regs->ax = sys_call_table[nr & __SYSCALL_MASK]( + nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls); + regs->ax = sys_call_table[nr]( regs->di, regs->si, regs->dx, regs->r10, regs->r8, regs->r9); } @@ -306,7 +308,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) unsigned int nr = (unsigned int)regs->orig_ax; #ifdef CONFIG_IA32_EMULATION - current->thread.status |= TS_COMPAT; + ti->status |= TS_COMPAT; #endif if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { @@ -320,6 +322,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) } if (likely(nr < IA32_NR_syscalls)) { + nr = array_index_nospec(nr, IA32_NR_syscalls); /* * It's possible that a 32-bit syscall implementation * takes a 64-bit parameter but nonetheless assumes that diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 4838037f97f6..60c4c342316c 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -44,6 +44,7 @@ #include #include #include +#include .section .entry.text, "ax" @@ -243,6 +244,17 @@ ENTRY(__switch_to_asm) movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif +#ifdef CONFIG_RETPOLINE + /* + * When switching from a shallower to a deeper call stack + * the RSB may either underflow or use entries populated + * with userspace addresses. On CPUs where those concerns + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ + FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW +#endif + /* restore callee-saved registers */ popl %esi popl %edi @@ -290,7 +302,7 @@ ENTRY(ret_from_fork) /* kernel thread */ 1: movl %edi, %eax - call *%ebx + CALL_NOSPEC %ebx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -919,7 +931,7 @@ common_exception: movl %ecx, %es TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer - call *%edi + CALL_NOSPEC %edi jmp ret_from_exception END(common_exception) @@ -941,9 +953,10 @@ ENTRY(debug) movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) - subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ - cmpl $SIZEOF_SYSENTER_stack, %ecx + movl PER_CPU_VAR(cpu_entry_area), %ecx + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx + subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ + cmpl $SIZEOF_entry_stack, %ecx jb .Ldebug_from_sysenter_stack TRACE_IRQS_OFF @@ -984,9 +997,10 @@ ENTRY(nmi) movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) - subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ - cmpl $SIZEOF_SYSENTER_stack, %ecx + movl PER_CPU_VAR(cpu_entry_area), %ecx + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx + subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ + cmpl $SIZEOF_entry_stack, %ecx jb .Lnmi_from_sysenter_stack /* Not on SYSENTER stack. */ diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index bcfc5668dcb2..f7bfa701219b 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -23,7 +23,6 @@ #include #include #include -#include "calling.h" #include #include #include @@ -38,8 +37,11 @@ #include #include #include +#include #include +#include "calling.h" + .code64 .section .entry.text, "ax" @@ -136,6 +138,67 @@ END(native_usergs_sysret64) * with them due to bugs in both AMD and Intel CPUs. */ + .pushsection .entry_trampoline, "ax" + +/* + * The code in here gets remapped into cpu_entry_area's trampoline. This means + * that the assembler and linker have the wrong idea as to where this code + * lives (and, in fact, it's mapped more than once, so it's not even at a + * fixed address). So we can't reference any symbols outside the entry + * trampoline and expect it to work. + * + * Instead, we carefully abuse %rip-relative addressing. + * _entry_trampoline(%rip) refers to the start of the remapped) entry + * trampoline. We can thus find cpu_entry_area with this macro: + */ + +#define CPU_ENTRY_AREA \ + _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) + +/* The top word of the SYSENTER stack is hot and is usable as scratch space. */ +#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ + SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA + +ENTRY(entry_SYSCALL_64_trampoline) + UNWIND_HINT_EMPTY + swapgs + + /* Stash the user RSP. */ + movq %rsp, RSP_SCRATCH + + /* Note: using %rsp as a scratch reg. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + + /* Load the top of the task stack into RSP */ + movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp + + /* Start building the simulated IRET frame. */ + pushq $__USER_DS /* pt_regs->ss */ + pushq RSP_SCRATCH /* pt_regs->sp */ + pushq %r11 /* pt_regs->flags */ + pushq $__USER_CS /* pt_regs->cs */ + pushq %rcx /* pt_regs->ip */ + + /* + * x86 lacks a near absolute jump, and we can't jump to the real + * entry text with a relative jump. We could push the target + * address and then use retq, but this destroys the pipeline on + * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, + * spill RDI and restore it in a second-stage trampoline. + */ + pushq %rdi + movq $entry_SYSCALL_64_stage2, %rdi + JMP_NOSPEC %rdi +END(entry_SYSCALL_64_trampoline) + + .popsection + +ENTRY(entry_SYSCALL_64_stage2) + UNWIND_HINT_EMPTY + popq %rdi + jmp entry_SYSCALL_64_after_hwframe +END(entry_SYSCALL_64_stage2) + ENTRY(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* @@ -145,11 +208,13 @@ ENTRY(entry_SYSCALL_64) */ swapgs + /* + * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it + * is not required to switch CR3. + */ movq %rsp, PER_CPU_VAR(rsp_scratch) movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp - TRACE_IRQS_OFF - /* Construct struct pt_regs on stack */ pushq $__USER_DS /* pt_regs->ss */ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ @@ -158,105 +223,27 @@ ENTRY(entry_SYSCALL_64) pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %rax /* pt_regs->orig_ax */ - pushq %rdi /* pt_regs->di */ - pushq %rsi /* pt_regs->si */ - pushq %rdx /* pt_regs->dx */ - pushq %rcx /* pt_regs->cx */ - pushq $-ENOSYS /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ - pushq %r9 /* pt_regs->r9 */ - pushq %r10 /* pt_regs->r10 */ - pushq %r11 /* pt_regs->r11 */ - sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ - UNWIND_HINT_REGS extra=0 - - /* - * If we need to do entry work or if we guess we'll need to do - * exit work, go straight to the slow path. - */ - movq PER_CPU_VAR(current_task), %r11 - testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) - jnz entry_SYSCALL64_slow_path -entry_SYSCALL_64_fastpath: - /* - * Easy case: enable interrupts and issue the syscall. If the syscall - * needs pt_regs, we'll call a stub that disables interrupts again - * and jumps to the slow path. - */ - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) -#if __SYSCALL_MASK == ~0 - cmpq $__NR_syscall_max, %rax -#else - andl $__SYSCALL_MASK, %eax - cmpl $__NR_syscall_max, %eax -#endif - ja 1f /* return -ENOSYS (already in pt_regs->ax) */ - movq %r10, %rcx - - /* - * This call instruction is handled specially in stub_ptregs_64. - * It might end up jumping to the slow path. If it jumps, RAX - * and all argument registers are clobbered. - */ - call *sys_call_table(, %rax, 8) -.Lentry_SYSCALL_64_after_fastpath_call: - - movq %rax, RAX(%rsp) -1: + PUSH_AND_CLEAR_REGS rax=$-ENOSYS - /* - * If we get here, then we know that pt_regs is clean for SYSRET64. - * If we see that no exit work is required (which we are required - * to check with IRQs off), then we can go straight to SYSRET64. - */ - DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - movq PER_CPU_VAR(current_task), %r11 - testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) - jnz 1f - - LOCKDEP_SYS_EXIT - TRACE_IRQS_ON /* user mode is traced as IRQs on */ - movq RIP(%rsp), %rcx - movq EFLAGS(%rsp), %r11 - RESTORE_C_REGS_EXCEPT_RCX_R11 - movq RSP(%rsp), %rsp - UNWIND_HINT_EMPTY - USERGS_SYSRET64 - -1: - /* - * The fast path looked good when we started, but something changed - * along the way and we need to switch to the slow path. Calling - * raise(3) will trigger this, for example. IRQs are off. - */ - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_ANY) - SAVE_EXTRA_REGS - movq %rsp, %rdi - call syscall_return_slowpath /* returns with IRQs disabled */ - jmp return_from_SYSCALL_64 -entry_SYSCALL64_slow_path: /* IRQs are off. */ - SAVE_EXTRA_REGS movq %rsp, %rdi call do_syscall_64 /* returns with IRQs disabled */ -return_from_SYSCALL_64: - RESTORE_EXTRA_REGS TRACE_IRQS_IRETQ /* we're about to change IF */ /* * Try to use SYSRET instead of IRET if we're returning to - * a completely clean 64-bit userspace context. + * a completely clean 64-bit userspace context. If we're not, + * go to the slow exit path. */ movq RCX(%rsp), %rcx movq RIP(%rsp), %r11 - cmpq %rcx, %r11 /* RCX == RIP */ - jne opportunistic_sysret_failed + + cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ + jne swapgs_restore_regs_and_return_to_usermode /* * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP @@ -274,14 +261,14 @@ return_from_SYSCALL_64: /* If this changed %rcx, it was not canonical */ cmpq %rcx, %r11 - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode movq R11(%rsp), %r11 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode /* * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot @@ -302,12 +289,12 @@ return_from_SYSCALL_64: * would never get past 'stuck_here'. */ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 - jnz opportunistic_sysret_failed + jnz swapgs_restore_regs_and_return_to_usermode /* nothing to check for RSP */ cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode /* * We win! This label is here just for ease of understanding @@ -315,56 +302,29 @@ return_from_SYSCALL_64: */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ - RESTORE_C_REGS_EXCEPT_RCX_R11 - movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY - USERGS_SYSRET64 + POP_REGS pop_rdi=0 skip_r11rcx=1 -opportunistic_sysret_failed: - SWAPGS - jmp restore_c_regs_and_iret -END(entry_SYSCALL_64) - -ENTRY(stub_ptregs_64) /* - * Syscalls marked as needing ptregs land here. - * If we are on the fast path, we need to save the extra regs, - * which we achieve by trying again on the slow path. If we are on - * the slow path, the extra regs are already saved. - * - * RAX stores a pointer to the C function implementing the syscall. - * IRQs are on. + * Now all regs are restored except RSP and RDI. + * Save old stack pointer and switch to trampoline stack. */ - cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) - jne 1f + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + + pushq RSP-RDI(%rdi) /* RSP */ + pushq (%rdi) /* RDI */ /* - * Called from fast path -- disable IRQs again, pop return address - * and jump to slow path + * We are on the trampoline stack. All regs except RDI are live. + * We can do future final exit work right here. */ - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - popq %rax - UNWIND_HINT_REGS extra=0 - jmp entry_SYSCALL64_slow_path + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi -1: - jmp *%rax /* Called from C */ -END(stub_ptregs_64) - -.macro ptregs_stub func -ENTRY(ptregs_\func) - UNWIND_HINT_FUNC - leaq \func(%rip), %rax - jmp stub_ptregs_64 -END(ptregs_\func) -.endm - -/* Instantiate ptregs_stub for each ptregs-using syscall */ -#define __SYSCALL_64_QUAL_(sym) -#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym -#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) -#include + popq %rdi + popq %rsp + USERGS_SYSRET64 +END(entry_SYSCALL_64) /* * %rdi: prev task @@ -392,6 +352,17 @@ ENTRY(__switch_to_asm) movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif +#ifdef CONFIG_RETPOLINE + /* + * When switching from a shallower to a deeper call stack + * the RSB may either underflow or use entries populated + * with userspace addresses. On CPUs where those concerns + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ + FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW +#endif + /* restore callee-saved registers */ popq %r15 popq %r14 @@ -423,13 +394,12 @@ ENTRY(ret_from_fork) movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ - SWAPGS - jmp restore_regs_and_iret + jmp swapgs_restore_regs_and_return_to_usermode 1: /* kernel thread */ movq %r12, %rdi - call *%rbx + CALL_NOSPEC %rbx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -457,12 +427,13 @@ END(irq_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY - pushfq - testl $X86_EFLAGS_IF, (%rsp) + pushq %rax + SAVE_FLAGS(CLBR_RAX) + testl $X86_EFLAGS_IF, %eax jz .Lokay_\@ ud2 .Lokay_\@: - addq $8, %rsp + popq %rax #endif .endm @@ -554,21 +525,22 @@ END(irq_entries_start) /* 0(%rsp): ~(interrupt number) */ .macro interrupt func cld - ALLOC_PT_GPREGS_ON_STACK - SAVE_C_REGS - SAVE_EXTRA_REGS + + testb $3, CS-ORIG_RAX(%rsp) + jz 1f + SWAPGS + call switch_to_thread_stack +1: + + PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER testb $3, CS(%rsp) jz 1f /* - * IRQ from user mode. Switch to kernel gsbase and inform context - * tracking that we're in kernel mode. - */ - SWAPGS - - /* + * IRQ from user mode. + * * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode * (which can take locks). Since TRACE_IRQS_OFF idempotent, @@ -612,8 +584,46 @@ GLOBAL(retint_user) mov %rsp,%rdi call prepare_exit_to_usermode TRACE_IRQS_IRETQ + +GLOBAL(swapgs_restore_regs_and_return_to_usermode) +#ifdef CONFIG_DEBUG_ENTRY + /* Assert that pt_regs indicates user mode. */ + testb $3, CS(%rsp) + jnz 1f + ud2 +1: +#endif + POP_REGS pop_rdi=0 + + /* + * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. + * Save old stack pointer and switch to trampoline stack. + */ + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + + /* Copy the IRET frame to the trampoline stack. */ + pushq 6*8(%rdi) /* SS */ + pushq 5*8(%rdi) /* RSP */ + pushq 4*8(%rdi) /* EFLAGS */ + pushq 3*8(%rdi) /* CS */ + pushq 2*8(%rdi) /* RIP */ + + /* Push user RDI on the trampoline stack. */ + pushq (%rdi) + + /* + * We are on the trampoline stack. All regs except RDI are live. + * We can do future final exit work right here. + */ + + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi + + /* Restore RDI. */ + popq %rdi SWAPGS - jmp restore_regs_and_iret + INTERRUPT_RETURN + /* Returning to kernel space */ retint_kernel: @@ -633,15 +643,16 @@ retint_kernel: */ TRACE_IRQS_IRETQ -/* - * At this label, code paths which return to kernel and to user, - * which come from interrupts/exception and from syscalls, merge. - */ -GLOBAL(restore_regs_and_iret) - RESTORE_EXTRA_REGS -restore_c_regs_and_iret: - RESTORE_C_REGS - REMOVE_PT_GPREGS_FROM_STACK 8 +GLOBAL(restore_regs_and_return_to_kernel) +#ifdef CONFIG_DEBUG_ENTRY + /* Assert that pt_regs indicates kernel mode. */ + testb $3, CS(%rsp) + jz 1f + ud2 +1: +#endif + POP_REGS + addq $8, %rsp /* skip regs->orig_ax */ INTERRUPT_RETURN ENTRY(native_iret) @@ -689,7 +700,9 @@ native_irq_return_ldt: */ pushq %rdi /* Stash user RDI */ - SWAPGS + SWAPGS /* to kernel GS */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ + movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ movq (1*8)(%rsp), %rax /* user RIP */ @@ -705,7 +718,6 @@ native_irq_return_ldt: /* Now RAX == RSP. */ andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ - popq %rdi /* Restore user RDI */ /* * espfix_stack[31:16] == 0. The page tables are set up such that @@ -716,7 +728,11 @@ native_irq_return_ldt: * still points to an RO alias of the ESPFIX stack. */ orq PER_CPU_VAR(espfix_stack), %rax - SWAPGS + + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi + SWAPGS /* to user GS */ + popq %rdi /* Restore user RDI */ + movq %rax, %rsp UNWIND_HINT_IRET_REGS offset=8 @@ -805,7 +821,35 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt /* * Exception entry points. */ -#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) +#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) + +/* + * Switch to the thread stack. This is called with the IRET frame and + * orig_ax on the stack. (That is, RDI..R12 are not on the stack and + * space has not been allocated for them.) + */ +ENTRY(switch_to_thread_stack) + UNWIND_HINT_FUNC + + pushq %rdi + /* Need to switch before accessing the thread stack. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI + + pushq 7*8(%rdi) /* regs->ss */ + pushq 6*8(%rdi) /* regs->rsp */ + pushq 5*8(%rdi) /* regs->eflags */ + pushq 4*8(%rdi) /* regs->cs */ + pushq 3*8(%rdi) /* regs->ip */ + pushq 2*8(%rdi) /* regs->orig_ax */ + pushq 8(%rdi) /* return address */ + UNWIND_HINT_FUNC + + movq (%rdi), %rdi + ret +END(switch_to_thread_stack) .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ENTRY(\sym) @@ -818,17 +862,16 @@ ENTRY(\sym) ASM_CLAC - .ifeq \has_error_code + .if \has_error_code == 0 pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif - ALLOC_PT_GPREGS_ON_STACK + .if \paranoid < 2 + testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ + jnz .Lfrom_usermode_switch_stack_\@ + .endif .if \paranoid - .if \paranoid == 1 - testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ - jnz 1f - .endif call paranoid_entry .else call error_entry @@ -870,20 +913,15 @@ ENTRY(\sym) jmp error_exit .endif - .if \paranoid == 1 + .if \paranoid < 2 /* - * Paranoid entry from userspace. Switch stacks and treat it + * Entry from userspace. Switch stacks and treat it * as a normal entry. This means that paranoid handlers * run in real process context if user_mode(regs). */ -1: +.Lfrom_usermode_switch_stack_\@: call error_entry - - movq %rsp, %rdi /* pt_regs pointer */ - call sync_regs - movq %rax, %rsp /* switch stack */ - movq %rsp, %rdi /* pt_regs pointer */ .if \has_error_code @@ -1037,9 +1075,7 @@ ENTRY(xen_failsafe_callback) addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ - ALLOC_PT_GPREGS_ON_STACK - SAVE_C_REGS - SAVE_EXTRA_REGS + PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) @@ -1055,10 +1091,11 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK +idtentry int3 do_int3 has_error_code=0 idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN +idtentry xennmi do_nmi has_error_code=0 idtentry xendebug do_debug has_error_code=0 idtentry xenint3 do_int3 has_error_code=0 #endif @@ -1071,7 +1108,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1 #endif #ifdef CONFIG_X86_MCE -idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) +idtentry machine_check do_mce has_error_code=0 paranoid=1 #endif /* @@ -1082,8 +1119,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld - SAVE_C_REGS 8 - SAVE_EXTRA_REGS 8 + PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx @@ -1092,7 +1128,11 @@ ENTRY(paranoid_entry) js 1f /* negative -> in kernel */ SWAPGS xorl %ebx, %ebx -1: ret + +1: + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 + + ret END(paranoid_entry) /* @@ -1112,30 +1152,27 @@ ENTRY(paranoid_exit) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ - jnz paranoid_exit_no_swapgs + jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK - jmp paranoid_exit_restore -paranoid_exit_no_swapgs: + jmp .Lparanoid_exit_restore +.Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG -paranoid_exit_restore: - RESTORE_EXTRA_REGS - RESTORE_C_REGS - REMOVE_PT_GPREGS_FROM_STACK 8 - INTERRUPT_RETURN + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 +.Lparanoid_exit_restore: + jmp restore_regs_and_return_to_kernel END(paranoid_exit) /* - * Save all registers in pt_regs, and switch gs if needed. + * Save all registers in pt_regs, and switch GS if needed. * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) UNWIND_HINT_FUNC cld - SAVE_C_REGS 8 - SAVE_EXTRA_REGS 8 + PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 - xorl %ebx, %ebx testb $3, CS+8(%rsp) jz .Lerror_kernelspace @@ -1144,8 +1181,18 @@ ENTRY(error_entry) * from user mode due to an IRET fault. */ SWAPGS + /* We have user CR3. Change to kernel CR3. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax .Lerror_entry_from_usermode_after_swapgs: + /* Put us onto the real thread stack. */ + popq %r12 /* save return addr in %12 */ + movq %rsp, %rdi /* arg0 = pt_regs pointer */ + call sync_regs + movq %rax, %rsp /* switch stack */ + ENCODE_FRAME_POINTER + pushq %r12 + /* * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode @@ -1182,6 +1229,7 @@ ENTRY(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax jmp .Lerror_entry_done .Lbstep_iret: @@ -1191,10 +1239,11 @@ ENTRY(error_entry) .Lerror_bad_iret: /* - * We came from an IRET to user mode, so we have user gsbase. - * Switch to kernel gsbase: + * We came from an IRET to user mode, so we have user + * gsbase and CR3. Switch to kernel gsbase and CR3: */ SWAPGS + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax /* * Pretend that the exception came from user mode: set up pt_regs @@ -1223,10 +1272,17 @@ ENTRY(error_exit) jmp retint_user END(error_exit) -/* Runs on exception stack */ -/* XXX: broken on Xen PV */ +/* + * Runs on exception stack. Xen PV does not go through this path at all, + * so we can use real assembly here. + * + * Registers: + * %r14: Used to save/restore the CR3 of the interrupted context + * when PAGE_TABLE_ISOLATION is in use. Do not clobber. + */ ENTRY(nmi) UNWIND_HINT_IRET_REGS + /* * We allow breakpoints in NMIs. If a breakpoint occurs, then * the iretq it performs will take us out of NMI context. @@ -1284,8 +1340,9 @@ ENTRY(nmi) * stacks lest we corrupt the "NMI executing" variable. */ - SWAPGS_UNSAFE_STACK + swapgs cld + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 @@ -1296,22 +1353,7 @@ ENTRY(nmi) pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ - pushq %rdi /* pt_regs->di */ - pushq %rsi /* pt_regs->si */ - pushq (%rdx) /* pt_regs->dx */ - pushq %rcx /* pt_regs->cx */ - pushq %rax /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ - pushq %r9 /* pt_regs->r9 */ - pushq %r10 /* pt_regs->r10 */ - pushq %r11 /* pt_regs->r11 */ - pushq %rbx /* pt_regs->rbx */ - pushq %rbp /* pt_regs->rbp */ - pushq %r12 /* pt_regs->r12 */ - pushq %r13 /* pt_regs->r13 */ - pushq %r14 /* pt_regs->r14 */ - pushq %r15 /* pt_regs->r15 */ - UNWIND_HINT_REGS + PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER /* @@ -1328,8 +1370,7 @@ ENTRY(nmi) * Return back to user mode. We must *not* do the normal exit * work, because we don't want to enable interrupts. */ - SWAPGS - jmp restore_regs_and_iret + jmp swapgs_restore_regs_and_return_to_usermode .Lnmi_from_kernel: /* @@ -1450,7 +1491,7 @@ nested_nmi_out: popq %rdx /* We are returning to kernel mode, so this cannot result in a fault. */ - INTERRUPT_RETURN + iretq first_nmi: /* Restore rdx. */ @@ -1481,7 +1522,7 @@ first_nmi: pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ pushq $1f /* RIP */ - INTERRUPT_RETURN /* continues at repeat_nmi below */ + iretq /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: #endif @@ -1522,7 +1563,6 @@ end_repeat_nmi: * frame to point back to repeat_nmi. */ pushq $-1 /* ORIG_RAX: no syscall to restart */ - ALLOC_PT_GPREGS_ON_STACK /* * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit @@ -1539,34 +1579,40 @@ end_repeat_nmi: movq $-1, %rsi call do_nmi + RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 + testl %ebx, %ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: - RESTORE_EXTRA_REGS - RESTORE_C_REGS + POP_REGS - /* Point RSP at the "iret" frame. */ - REMOVE_PT_GPREGS_FROM_STACK 6*8 + /* + * Skip orig_ax and the "outermost" frame to point RSP at the "iret" + * at the "iret" frame. + */ + addq $6*8, %rsp /* * Clear "NMI executing". Set DF first so that we can easily * distinguish the remaining code between here and IRET from - * the SYSCALL entry and exit paths. On a native kernel, we - * could just inspect RIP, but, on paravirt kernels, - * INTERRUPT_RETURN can translate into a jump into a - * hypercall page. + * the SYSCALL entry and exit paths. + * + * We arguably should just inspect RIP instead, but I (Andy) wrote + * this code when I had the misapprehension that Xen PV supported + * NMIs, and Xen PV would break that approach. */ std movq $0, 5*8(%rsp) /* clear "NMI executing" */ /* - * INTERRUPT_RETURN reads the "iret" frame and exits the NMI - * stack in a single instruction. We are returning to kernel - * mode, so this cannot result in a fault. + * iretq reads the "iret" frame and exits the NMI stack in a + * single instruction. We are returning to kernel mode, so this + * cannot result in a fault. Similarly, we don't need to worry + * about espfix64 on the way back to kernel mode. */ - INTERRUPT_RETURN + iretq END(nmi) ENTRY(ignore_sysret) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index b5c7a56ed256..364ea4a207be 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -48,7 +48,11 @@ */ ENTRY(entry_SYSENTER_compat) /* Interrupts are off on entry. */ - SWAPGS_UNSAFE_STACK + SWAPGS + + /* We are about to clobber %rsp anyway, clobbering here is OK */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* @@ -81,15 +85,25 @@ ENTRY(entry_SYSENTER_compat) pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorl %r8d, %r8d /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorl %r9d, %r9d /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorl %r10d, %r10d /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ + xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ + xorl %r12d, %r12d /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ + xorl %r13d, %r13d /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ + xorl %r14d, %r14d /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ + xorl %r15d, %r15d /* nospec r15 */ cld /* @@ -186,8 +200,13 @@ ENTRY(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs - /* Stash user ESP and switch to the kernel stack. */ + /* Stash user ESP */ movl %esp, %r8d + + /* Use %rsp as scratch reg. User ESP is stashed in r8 */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + + /* Switch to the kernel stack */ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ @@ -205,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) pushq %rbp /* pt_regs->cx (stashed in bp) */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorl %r8d, %r8d /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorl %r9d, %r9d /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorl %r10d, %r10d /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ + xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ + xorl %r12d, %r12d /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ + xorl %r13d, %r13d /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ + xorl %r14d, %r14d /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ + xorl %r15d, %r15d /* nospec r15 */ /* * User mode is traced as though IRQs are on, and SYSENTER @@ -256,10 +285,22 @@ sysret32_from_system_call: * when the system call started, which is already known to user * code. We zero R8-R10 to avoid info leaks. */ - xorq %r8, %r8 - xorq %r9, %r9 - xorq %r10, %r10 movq RSP-ORIG_RAX(%rsp), %rsp + + /* + * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored + * on the process stack which is not mapped to userspace and + * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 + * switch until after after the last reference to the process + * stack. + * + * %r8/%r9 are zeroed before the sysret, thus safe to clobber. + */ + SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 + + xorl %r8d, %r8d + xorl %r9d, %r9d + xorl %r10d, %r10d swapgs sysretl END(entry_SYSCALL_compat) @@ -306,23 +347,36 @@ ENTRY(entry_INT80_compat) */ movl %eax, %eax - /* Construct struct pt_regs on stack (iret frame is already on stack) */ pushq %rax /* pt_regs->orig_ax */ + + /* switch to thread stack expects orig_ax to be pushed */ + call switch_to_thread_stack + pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorl %r8d, %r8d /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorl %r9d, %r9d /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorl %r10d, %r10d /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp */ pushq %r12 /* pt_regs->r12 */ + xorl %r12d, %r12d /* nospec r12 */ pushq %r13 /* pt_regs->r13 */ + xorl %r13d, %r13d /* nospec r13 */ pushq %r14 /* pt_regs->r14 */ + xorl %r14d, %r14d /* nospec r14 */ pushq %r15 /* pt_regs->r15 */ + xorl %r15d, %r15d /* nospec r15 */ cld /* @@ -337,8 +391,7 @@ ENTRY(entry_INT80_compat) /* Go back to user mode. */ TRACE_IRQS_ON - SWAPGS - jmp restore_regs_and_iret + jmp swapgs_restore_regs_and_return_to_usermode END(entry_INT80_compat) ENTRY(stub32_clone) diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index 9c09775e589d..c176d2fab1da 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -7,14 +7,11 @@ #include #include -#define __SYSCALL_64_QUAL_(sym) sym -#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym - -#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); #include #undef __SYSCALL_64 -#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym), +#define __SYSCALL_64(nr, sym, qual) [nr] = sym, extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile index 331f1dca5085..6fb9b57ed5ba 100644 --- a/arch/x86/entry/syscalls/Makefile +++ b/arch/x86/entry/syscalls/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -out := $(obj)/../../include/generated/asm -uapi := $(obj)/../../include/generated/uapi/asm +out := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm # Create output directory if not already present _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \ diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index f279ba2643dc..542392b6aab6 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -37,6 +37,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" @@ -138,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) WARN_ON_ONCE(address != regs->ip); + /* This should be unreachable in NATIVE mode. */ + if (WARN_ON(vsyscall_mode == NATIVE)) + return false; + if (vsyscall_mode == NONE) { warn_bad_vsyscall(KERN_INFO, regs, "vsyscall attempted with vsyscall=none"); @@ -329,16 +334,47 @@ int in_gate_area_no_mm(unsigned long addr) return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; } +/* + * The VSYSCALL page is the only user-accessible page in the kernel address + * range. Normally, the kernel page tables can have _PAGE_USER clear, but + * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls + * are enabled. + * + * Some day we may create a "minimal" vsyscall mode in which we emulate + * vsyscalls but leave the page not present. If so, we skip calling + * this. + */ +void __init set_vsyscall_pgtable_user_bits(pgd_t *root) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); + p4d = p4d_offset(pgd, VSYSCALL_ADDR); +#if CONFIG_PGTABLE_LEVELS >= 5 + set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER)); +#endif + pud = pud_offset(p4d, VSYSCALL_ADDR); + set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); + pmd = pmd_offset(pud, VSYSCALL_ADDR); + set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); +} + void __init map_vsyscall(void) { extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); - if (vsyscall_mode != NONE) + if (vsyscall_mode != NONE) { __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); + set_vsyscall_pgtable_user_bits(swapper_pg_dir); + } BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index a6eee5ac4f58..2aefacf5c5b2 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c @@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void) int ret; if (!x86_match_cpu(cpu_match)) - return 0; + return -ENODEV; if (!boot_cpu_has(X86_FEATURE_ACC_POWER)) return -ENODEV; diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 80534d3c2480..589af1eec7c1 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment) struct ldt_struct *ldt; /* IRQs are off, so this synchronizes with smp_store_release */ - ldt = lockless_dereference(current->active_mm->context.ldt); + ldt = READ_ONCE(current->active_mm->context.ldt); if (!ldt || idx >= ldt->nr_entries) return 0; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 141e07b06216..24ffa1e88cf9 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -582,6 +582,24 @@ static __init int bts_init(void) if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) return -ENODEV; + if (boot_cpu_has(X86_FEATURE_PTI)) { + /* + * BTS hardware writes through a virtual memory map we must + * either use the kernel physical map, or the user mapping of + * the AUX buffer. + * + * However, since this driver supports per-CPU and per-task inherit + * we cannot use the user mapping since it will not be availble + * if we're not running the owning process. + * + * With PTI we can't use the kernal map either, because its not + * there when we run userspace. + * + * For now, disable this driver when using PTI. + */ + return -ENODEV; + } + bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | PERF_PMU_CAP_EXCLUSIVE; bts_pmu.task_ctx_nr = perf_sw_context; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9fb9a1f1e47b..9b18a227fff7 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2958,6 +2958,10 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event) if (event->attr.use_clockid) flags &= ~PERF_SAMPLE_TIME; + if (!event->attr.exclude_kernel) + flags &= ~PERF_SAMPLE_REGS_USER; + if (event->attr.sample_regs_user & ~PEBS_REGS) + flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); return flags; } @@ -3190,7 +3194,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left) X86_CONFIG(.event=0xc0, .umask=0x01)) { if (left < 128) left = 128; - left &= ~0x3fu; + left &= ~0x3fULL; } return left; } @@ -3555,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu) break; case INTEL_FAM6_SANDYBRIDGE_X: - switch (cpu_data(cpu).x86_mask) { + switch (cpu_data(cpu).x86_stepping) { case 6: rev = 0x618; break; case 7: rev = 0x70c; break; } @@ -3730,6 +3734,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); static struct attribute *hsw_events_attrs[] = { + EVENT_PTR(mem_ld_hsw), + EVENT_PTR(mem_st_hsw), + EVENT_PTR(td_slots_issued), + EVENT_PTR(td_slots_retired), + EVENT_PTR(td_fetch_bubbles), + EVENT_PTR(td_total_slots), + EVENT_PTR(td_total_slots_scale), + EVENT_PTR(td_recovery_bubbles), + EVENT_PTR(td_recovery_bubbles_scale), + NULL +}; + +static struct attribute *hsw_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_commit), EVENT_PTR(tx_abort), @@ -3742,18 +3759,16 @@ static struct attribute *hsw_events_attrs[] = { EVENT_PTR(el_conflict), EVENT_PTR(cycles_t), EVENT_PTR(cycles_ct), - EVENT_PTR(mem_ld_hsw), - EVENT_PTR(mem_st_hsw), - EVENT_PTR(td_slots_issued), - EVENT_PTR(td_slots_retired), - EVENT_PTR(td_fetch_bubbles), - EVENT_PTR(td_total_slots), - EVENT_PTR(td_total_slots_scale), - EVENT_PTR(td_recovery_bubbles), - EVENT_PTR(td_recovery_bubbles_scale), NULL }; +static __init struct attribute **get_hsw_events_attrs(void) +{ + return boot_cpu_has(X86_FEATURE_RTM) ? + merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) : + hsw_events_attrs; +} + static ssize_t freeze_on_smi_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -3832,6 +3847,8 @@ static struct attribute *intel_pmu_attrs[] = { __init int intel_pmu_init(void) { + struct attribute **extra_attr = NULL; + struct attribute **to_free = NULL; union cpuid10_edx edx; union cpuid10_eax eax; union cpuid10_ebx ebx; @@ -3839,7 +3856,6 @@ __init int intel_pmu_init(void) unsigned int unused; struct extra_reg *er; int version, i; - struct attribute **extra_attr = NULL; char *name; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { @@ -4182,7 +4198,7 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.lbr_double_abort = true; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; @@ -4221,7 +4237,7 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.limit_period = bdw_limit_period; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; @@ -4279,7 +4295,8 @@ __init int intel_pmu_init(void) extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; extra_attr = merge_attr(extra_attr, skl_format_attr); - x86_pmu.cpu_events = hsw_events_attrs; + to_free = extra_attr; + x86_pmu.cpu_events = get_hsw_events_attrs(); intel_pmu_pebs_data_source_skl( boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); pr_cont("Skylake events, "); @@ -4386,6 +4403,7 @@ __init int intel_pmu_init(void) pr_cont("full-width counters, "); } + kfree(to_free); return 0; } diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 3674a4b6f8bd..8156e47da7ba 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -3,16 +3,19 @@ #include #include +#include #include +#include #include #include "../perf_event.h" +/* Waste a full page so it can be mapped into the cpu_entry_area */ +DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + /* The size of a BTS record in bytes: */ #define BTS_RECORD_SIZE 24 -#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) -#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) #define PEBS_FIXUP_SIZE PAGE_SIZE /* @@ -279,17 +282,67 @@ void fini_debug_store_on_cpu(int cpu) static DEFINE_PER_CPU(void *, insn_buffer); -static int alloc_pebs_buffer(int cpu) +static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + unsigned long start = (unsigned long)cea; + phys_addr_t pa; + size_t msz = 0; + + pa = virt_to_phys(addr); + + preempt_disable(); + for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) + cea_set_pte(cea, pa, prot); + + /* + * This is a cross-CPU update of the cpu_entry_area, we must shoot down + * all TLB entries for it. + */ + flush_tlb_kernel_range(start, start + size); + preempt_enable(); +} + +static void ds_clear_cea(void *cea, size_t size) +{ + unsigned long start = (unsigned long)cea; + size_t msz = 0; + + preempt_disable(); + for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); + + flush_tlb_kernel_range(start, start + size); + preempt_enable(); +} + +static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) +{ + unsigned int order = get_order(size); int node = cpu_to_node(cpu); - int max; - void *buffer, *ibuffer; + struct page *page; + + page = __alloc_pages_node(node, flags | __GFP_ZERO, order); + return page ? page_address(page) : NULL; +} + +static void dsfree_pages(const void *buffer, size_t size) +{ + if (buffer) + free_pages((unsigned long)buffer, get_order(size)); +} + +static int alloc_pebs_buffer(int cpu) +{ + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + size_t bsiz = x86_pmu.pebs_buffer_size; + int max, node = cpu_to_node(cpu); + void *buffer, *ibuffer, *cea; if (!x86_pmu.pebs) return 0; - buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node); + buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); if (unlikely(!buffer)) return -ENOMEM; @@ -300,25 +353,27 @@ static int alloc_pebs_buffer(int cpu) if (x86_pmu.intel_cap.pebs_format < 2) { ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); if (!ibuffer) { - kfree(buffer); + dsfree_pages(buffer, bsiz); return -ENOMEM; } per_cpu(insn_buffer, cpu) = ibuffer; } - - max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size; - - ds->pebs_buffer_base = (u64)(unsigned long)buffer; + hwev->ds_pebs_vaddr = buffer; + /* Update the cpu entry area mapping */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds->pebs_buffer_base = (unsigned long) cea; + ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL); ds->pebs_index = ds->pebs_buffer_base; - ds->pebs_absolute_maximum = ds->pebs_buffer_base + - max * x86_pmu.pebs_record_size; - + max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); + ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; return 0; } static void release_pebs_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *cea; if (!ds || !x86_pmu.pebs) return; @@ -326,73 +381,70 @@ static void release_pebs_buffer(int cpu) kfree(per_cpu(insn_buffer, cpu)); per_cpu(insn_buffer, cpu) = NULL; - kfree((void *)(unsigned long)ds->pebs_buffer_base); + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds_clear_cea(cea, x86_pmu.pebs_buffer_size); ds->pebs_buffer_base = 0; + dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); + hwev->ds_pebs_vaddr = NULL; } static int alloc_bts_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - int node = cpu_to_node(cpu); - int max, thresh; - void *buffer; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *buffer, *cea; + int max; if (!x86_pmu.bts) return 0; - buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); + buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu); if (unlikely(!buffer)) { WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); return -ENOMEM; } - - max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; - thresh = max / 16; - - ds->bts_buffer_base = (u64)(unsigned long)buffer; + hwev->ds_bts_vaddr = buffer; + /* Update the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; + ds->bts_buffer_base = (unsigned long) cea; + ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); ds->bts_index = ds->bts_buffer_base; - ds->bts_absolute_maximum = ds->bts_buffer_base + - max * BTS_RECORD_SIZE; - ds->bts_interrupt_threshold = ds->bts_absolute_maximum - - thresh * BTS_RECORD_SIZE; - + max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE); + ds->bts_absolute_maximum = ds->bts_buffer_base + max; + ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16); return 0; } static void release_bts_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *cea; if (!ds || !x86_pmu.bts) return; - kfree((void *)(unsigned long)ds->bts_buffer_base); + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; + ds_clear_cea(cea, BTS_BUFFER_SIZE); ds->bts_buffer_base = 0; + dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); + hwev->ds_bts_vaddr = NULL; } static int alloc_ds_buffer(int cpu) { - int node = cpu_to_node(cpu); - struct debug_store *ds; - - ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node); - if (unlikely(!ds)) - return -ENOMEM; + struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; + memset(ds, 0, sizeof(*ds)); per_cpu(cpu_hw_events, cpu).ds = ds; - return 0; } static void release_ds_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - - if (!ds) - return; - per_cpu(cpu_hw_events, cpu).ds = NULL; - kfree(ds); } void release_ds_buffers(void) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index ae64d0b69729..cf372b90557e 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void) * on PMU interrupt */ if (boot_cpu_data.x86_model == 28 - && boot_cpu_data.x86_mask < 10) { + && boot_cpu_data.x86_stepping < 10) { pr_cont("LBR disabled due to erratum"); return; } diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c index a5604c352930..408879b0c0d4 100644 --- a/arch/x86/events/intel/p6.c +++ b/arch/x86/events/intel/p6.c @@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = { static __init void p6_pmu_rdpmc_quirk(void) { - if (boot_cpu_data.x86_mask < 9) { + if (boot_cpu_data.x86_stepping < 9) { /* * PPro erratum 26; fixed in stepping 9 and above. */ diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 95cb19f4e06f..8243fdbb9b9c 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3554,24 +3554,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = { NULL, }; +/* + * To determine the number of CHAs, it should read bits 27:0 in the CAPID6 + * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083. + */ +#define SKX_CAPID6 0x9c +#define SKX_CHA_BIT_MASK GENMASK(27, 0) + static int skx_count_chabox(void) { - struct pci_dev *chabox_dev = NULL; - int bus, count = 0; + struct pci_dev *dev = NULL; + u32 val = 0; - while (1) { - chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev); - if (!chabox_dev) - break; - if (count == 0) - bus = chabox_dev->bus->number; - if (bus != chabox_dev->bus->number) - break; - count++; - } + dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev); + if (!dev) + goto out; - pci_dev_put(chabox_dev); - return count; + pci_read_config_dword(dev, SKX_CAPID6, &val); + val &= SKX_CHA_BIT_MASK; +out: + pci_dev_put(dev); + return hweight32(val); } void skx_uncore_cpu_init(void) @@ -3598,7 +3601,7 @@ static struct intel_uncore_type skx_uncore_imc = { }; static struct attribute *skx_upi_uncore_formats_attr[] = { - &format_attr_event_ext.attr, + &format_attr_event.attr, &format_attr_umask_ext.attr, &format_attr_edge.attr, &format_attr_inv.attr, diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 4196f81ec0e1..8e4ea143ed96 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -14,6 +14,8 @@ #include +#include + /* To enable MSR tracing please use the generic trace points. */ /* @@ -77,38 +79,41 @@ struct amd_nb { struct event_constraint event_constraints[X86_PMC_IDX_MAX]; }; -/* The maximal number of PEBS events: */ -#define MAX_PEBS_EVENTS 8 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) /* * Flags PEBS can handle without an PMI. * * TID can only be handled by flushing at context switch. + * REGS_USER can be handled for events limited to ring 3. * */ #define PEBS_FREERUNNING_FLAGS \ (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ - PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR) - -/* - * A debug store configuration. - * - * We only support architectures that use 64bit fields. - */ -struct debug_store { - u64 bts_buffer_base; - u64 bts_index; - u64 bts_absolute_maximum; - u64 bts_interrupt_threshold; - u64 pebs_buffer_base; - u64 pebs_index; - u64 pebs_absolute_maximum; - u64 pebs_interrupt_threshold; - u64 pebs_event_reset[MAX_PEBS_EVENTS]; -}; + PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ + PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER) + +#define PEBS_REGS \ + (PERF_REG_X86_AX | \ + PERF_REG_X86_BX | \ + PERF_REG_X86_CX | \ + PERF_REG_X86_DX | \ + PERF_REG_X86_DI | \ + PERF_REG_X86_SI | \ + PERF_REG_X86_SP | \ + PERF_REG_X86_BP | \ + PERF_REG_X86_IP | \ + PERF_REG_X86_FLAGS | \ + PERF_REG_X86_R8 | \ + PERF_REG_X86_R9 | \ + PERF_REG_X86_R10 | \ + PERF_REG_X86_R11 | \ + PERF_REG_X86_R12 | \ + PERF_REG_X86_R13 | \ + PERF_REG_X86_R14 | \ + PERF_REG_X86_R15) /* * Per register state. @@ -194,6 +199,8 @@ struct cpu_hw_events { * Intel DebugStore bits */ struct debug_store *ds; + void *ds_pebs_vaddr; + void *ds_bts_vaddr; u64 pebs_enabled; int n_pebs; int n_large_pebs; diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index a5db63f728a2..a0b86cf486e0 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -113,7 +113,7 @@ void hyperv_init(void) u64 guest_id; union hv_x64_msr_hypercall_contents hypercall_msr; - if (x86_hyper != &x86_hyper_ms_hyperv) + if (x86_hyper_type != X86_HYPER_MS_HYPERV) return; /* Allocate percpu VP index */ diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 8d0ec9df1cbe..f077401869ee 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) if (boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86_model <= 0x05 && - boot_cpu_data.x86_mask < 0x0A) + boot_cpu_data.x86_stepping < 0x0A) return 1; else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E)) return 1; diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index dbfd0854651f..cf5961ca8677 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ - ".popsection" + ".popsection\n" #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ OLDINSTR_2(oldinstr, 1, 2) \ @@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ - ".popsection" + ".popsection\n" /* * Alternative instructions for different CPU types or capabilities. diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index 4d4015ddcf26..c356098b6fb9 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h @@ -7,6 +7,8 @@ #ifndef _ASM_X86_MACH_DEFAULT_APM_H #define _ASM_X86_MACH_DEFAULT_APM_H +#include + #ifdef APM_ZERO_SEGS # define APM_DO_ZERO_SEGS \ "pushl %%ds\n\t" \ @@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ + firmware_restrict_branch_speculation_start(); __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" @@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, "=S" (*esi) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); + firmware_restrict_branch_speculation_end(); } static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, @@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ + firmware_restrict_branch_speculation_start(); __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" @@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, "=S" (si) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); + firmware_restrict_branch_speculation_end(); return error; } diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 5b0579abb398..3ac991d81e74 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h @@ -45,7 +45,7 @@ static inline bool rdrand_long(unsigned long *v) bool ok; unsigned int retry = RDRAND_RETRY_LOOPS; do { - asm volatile(RDRAND_LONG "\n\t" + asm volatile(RDRAND_LONG CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); if (ok) @@ -59,7 +59,7 @@ static inline bool rdrand_int(unsigned int *v) bool ok; unsigned int retry = RDRAND_RETRY_LOOPS; do { - asm volatile(RDRAND_INT "\n\t" + asm volatile(RDRAND_INT CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); if (ok) @@ -71,7 +71,7 @@ static inline bool rdrand_int(unsigned int *v) static inline bool rdseed_long(unsigned long *v) { bool ok; - asm volatile(RDSEED_LONG "\n\t" + asm volatile(RDSEED_LONG CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); return ok; @@ -80,7 +80,7 @@ static inline bool rdseed_long(unsigned long *v) static inline bool rdseed_int(unsigned int *v) { bool ok; - asm volatile(RDSEED_INT "\n\t" + asm volatile(RDSEED_INT CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); return ok; diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index ff700d81e91e..1908214b9125 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -11,7 +11,31 @@ #include #include #include +#include #ifndef CONFIG_X86_CMPXCHG64 extern void cmpxchg8b_emu(void); #endif + +#ifdef CONFIG_RETPOLINE +#ifdef CONFIG_X86_32 +#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void); +#else +#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void); +INDIRECT_THUNK(8) +INDIRECT_THUNK(9) +INDIRECT_THUNK(10) +INDIRECT_THUNK(11) +INDIRECT_THUNK(12) +INDIRECT_THUNK(13) +INDIRECT_THUNK(14) +INDIRECT_THUNK(15) +#endif +INDIRECT_THUNK(ax) +INDIRECT_THUNK(bx) +INDIRECT_THUNK(cx) +INDIRECT_THUNK(dx) +INDIRECT_THUNK(si) +INDIRECT_THUNK(di) +INDIRECT_THUNK(bp) +#endif /* CONFIG_RETPOLINE */ diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 219faaec51df..386a6900e206 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -136,6 +136,7 @@ #endif #ifndef __ASSEMBLY__ +#ifndef __BPF__ /* * This output constraint should be used for any inline asm which has a "call" * instruction. Otherwise the asm may be inserted before the frame pointer @@ -145,5 +146,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP); #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) #endif +#endif #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 01727dbc294a..4db77731e130 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -24,6 +24,34 @@ #define wmb() asm volatile("sfence" ::: "memory") #endif +/** + * array_index_mask_nospec() - generate a mask that is ~0UL when the + * bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * Returns: + * 0 - (index < size) + */ +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + asm ("cmp %1,%2; sbb %0,%0;" + :"=r" (mask) + :"g"(size),"r" (index) + :"cc"); + return mask; +} + +/* Override the default implementation from linux/nospec.h. */ +#define array_index_mask_nospec array_index_mask_nospec + +/* Prevent speculative execution past this barrier. */ +#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \ + "lfence", X86_FEATURE_LFENCE_RDTSC) + #ifdef CONFIG_X86_PPRO_FENCE #define dma_rmb() rmb() #else diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 2bcf47314959..3fa039855b8f 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -143,7 +143,7 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { bool negative; - asm volatile(LOCK_PREFIX "andb %2,%1\n\t" + asm volatile(LOCK_PREFIX "andb %2,%1" CC_SET(s) : CC_OUT(s) (negative), ADDR : "ir" ((char) ~(1 << nr)) : "memory"); @@ -246,7 +246,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * { bool oldbit; - asm("bts %2,%1\n\t" + asm("bts %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -286,7 +286,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long { bool oldbit; - asm volatile("btr %2,%1\n\t" + asm volatile("btr %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -298,7 +298,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon { bool oldbit; - asm volatile("btc %2,%1\n\t" + asm volatile("btc %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr) : "memory"); @@ -329,7 +329,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l { bool oldbit; - asm volatile("bt %2,%1\n\t" + asm volatile("bt %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 34d99af43994..6804d6642767 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -5,23 +5,20 @@ #include /* - * Since some emulators terminate on UD2, we cannot use it for WARN. - * Since various instruction decoders disagree on the length of UD1, - * we cannot use it either. So use UD0 for WARN. + * Despite that some emulators terminate on UD2, we use it for WARN(). * - * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas - * our kernel decoder thinks it takes a ModRM byte, which seems consistent - * with various things like the Intel SDM instruction encoding rules) + * Since various instruction decoders/specs disagree on the encoding of + * UD0/UD1. */ -#define ASM_UD0 ".byte 0x0f, 0xff" +#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */ #define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */ #define ASM_UD2 ".byte 0x0f, 0x0b" #define INSN_UD0 0xff0f #define INSN_UD2 0x0b0f -#define LEN_UD0 2 +#define LEN_UD2 2 #ifdef CONFIG_GENERIC_BUG @@ -77,7 +74,11 @@ do { \ unreachable(); \ } while (0) -#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags)) +#define __WARN_FLAGS(flags) \ +do { \ + _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \ + annotate_reachable(); \ +} while (0) #include diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 9eef9cc64c68..2cbd75dd2fd3 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -7,6 +7,7 @@ */ #include #include +#include #include #include #include @@ -209,7 +210,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h new file mode 100644 index 000000000000..4a7884b8dca5 --- /dev/null +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef _ASM_X86_CPU_ENTRY_AREA_H +#define _ASM_X86_CPU_ENTRY_AREA_H + +#include +#include +#include + +/* + * cpu_entry_area is a percpu region that contains things needed by the CPU + * and early entry/exit code. Real types aren't used for all fields here + * to avoid circular header dependencies. + * + * Every field is a virtual alias of some other allocated backing store. + * There is no direct allocation of a struct cpu_entry_area. + */ +struct cpu_entry_area { + char gdt[PAGE_SIZE]; + + /* + * The GDT is just below entry_stack and thus serves (on x86_64) as + * a a read-only guard page. + */ + struct entry_stack_page entry_stack_page; + + /* + * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because + * we need task switches to work, and task switches write to the TSS. + */ + struct tss_struct tss; + + char entry_trampoline[PAGE_SIZE]; + +#ifdef CONFIG_X86_64 + /* + * Exception stacks used for IST entries. + * + * In the future, this should have a separate slot for each stack + * with guard pages between them. + */ + char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; +#endif +#ifdef CONFIG_CPU_SUP_INTEL + /* + * Per CPU debug store for Intel performance monitoring. Wastes a + * full page at the moment. + */ + struct debug_store cpu_debug_store; + /* + * The actual PEBS/BTS buffers must be mapped to user space + * Reserve enough fixmap PTEs. + */ + struct debug_store_buffers cpu_debug_buffers; +#endif +}; + +#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) +#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) + +DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); + +extern void setup_cpu_entry_areas(void); +extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); + +#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE +#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) + +#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) + +#define CPU_ENTRY_AREA_MAP_SIZE \ + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) + +extern struct cpu_entry_area *get_cpu_entry_area(int cpu); + +static inline struct entry_stack *cpu_entry_stack(int cpu) +{ + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; +} + +#endif diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0dfa68438e80..70eddb3922ff 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -29,6 +29,7 @@ enum cpuid_leafs CPUID_8000_000A_EDX, CPUID_7_ECX, CPUID_8000_0007_EBX, + CPUID_7_EDX, }; #ifdef CONFIG_X86_FEATURE_NAMES @@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + BUILD_BUG_ON_ZERO(NCAPINTS != 19)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + BUILD_BUG_ON_ZERO(NCAPINTS != 19)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ @@ -126,16 +129,17 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) -#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) -#define setup_clear_cpu_cap(bit) do { \ - clear_cpu_cap(&boot_cpu_data, bit); \ - set_bit(bit, (unsigned long *)cpu_caps_cleared); \ -} while (0) + +extern void setup_clear_cpu_cap(unsigned int bit); +extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); + #define setup_force_cpu_cap(bit) do { \ set_cpu_cap(&boot_cpu_data, bit); \ set_bit(bit, (unsigned long *)cpu_caps_set); \ } while (0) +#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) + #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS) /* * Static testing of CPU features. Used the same as boot_cpu_has(). diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 793690fbda36..23a65439c37c 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,173 +13,176 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 18 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NCAPINTS 19 /* N 32-bit words worth of info */ +#define NBUGINTS 1 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used * in /proc/cpuinfo instead of the macro name. If the string is "", * this feature bit is not displayed in /proc/cpuinfo at all. + * + * When adding new features here that depend on other features, + * please update the table in kernel/cpu/cpuid-deps.c as well. */ -/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ -#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ -#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ -#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ - /* (plus FCMOVcc, FCOMI with FPU) */ -#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ -#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ -#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ -#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ -#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ -#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ -#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ -#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ -#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ +/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */ +#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ +#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ +#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ +#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ +#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ +#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ +#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ +#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ +#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ +#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ +#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ +#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ +#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ +#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ +#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */ +#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ +#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ +#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ +#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ +#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ +#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ +#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ +#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ +#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ +#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ +#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ +#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ +#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ +#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ +#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ -#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ -#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ -#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ -#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ +#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ +#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */ +#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ +#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ +#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ +#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ +#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ +#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */ +#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */ +#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */ /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ +#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ +#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ +#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ /* Other features, Linux-defined mapping, word 3 */ /* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ -/* cpu types for specific tunings: */ -#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ -#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ -#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ -#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ -#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ -#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ -#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ -#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ -#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ -#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ -#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ -#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ -#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ -#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ -#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ -#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ -#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ -#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ -#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ -#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ -#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ -#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ -#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ - -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ -#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ -#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ -#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ -#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ -#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ -#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ -#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ -#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ -#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ -#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ -#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ -#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ -#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ -#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ -#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ -#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ -#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ -#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ -#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ -#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ -#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ -#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ -#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ -#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ -#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ -#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ -#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ +#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ +#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ +#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ +#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ + +/* CPU types for specific tunings: */ +#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ +#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ +#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ +#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ +#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ +#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */ +#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */ +#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ +#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ +#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ +#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ +#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ +#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */ +#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ +#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ +#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ +#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */ +#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ +#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ +#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ +#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */ +#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */ +#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */ +#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ +#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ + +/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */ +#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ +#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ +#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ +#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */ +#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */ +#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ +#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */ +#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ +#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ +#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ +#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ +#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ +#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */ +#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ +#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */ +#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ +#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ +#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ +#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ +#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */ +#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ +#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ +#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */ +#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ +#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */ +#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */ +#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */ +#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */ +#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ -#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ -#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ -#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ -#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ -#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ -#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ -#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ -#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ -#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ - -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ -#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ -#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ -#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ -#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ -#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ -#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ -#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ -#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ -#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ -#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ -#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ -#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ -#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ -#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ -#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ -#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ -#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ -#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ -#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ -#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ -#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ -#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ -#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ -#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ +#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ +#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ +#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ +#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ +#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ +#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ +#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ +#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ +#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ + +/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ +#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ +#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */ +#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ +#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ +#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ +#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ +#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ +#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ +#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ +#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ +#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ +#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ +#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ +#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ +#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ +#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */ +#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ +#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */ +#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */ +#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */ +#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */ +#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */ +#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -187,146 +190,175 @@ * * Reuse free bits when adding new feature flags! */ -#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ -#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ -#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ -#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ -#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ -#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ -#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */ +#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ +#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ +#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ +#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ +#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ +#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ -#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ -#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ -#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ +#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ +#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ +#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ +#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ +#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ -#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ -#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ -#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ +#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ /* Virtualization flags: Linux defined, word 8 */ -#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ -#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ -#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ -#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ -#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ - -#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ -#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ - - -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ -#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ -#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ -#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ -#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ -#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ -#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ -#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ -#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ -#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ -#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ -#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ -#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ -#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ -#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ -#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ -#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ -#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ -#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ -#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ -#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ -#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ -#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ -#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ -#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ -#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ -#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ - -/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ -#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ -#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ -#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ -#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ - -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ -#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ - -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ -#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ -#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ -#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ - -/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ -#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ -#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ - -/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ -#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ -#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ -#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ -#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ -#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ -#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ -#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ -#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ -#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ -#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ - -/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ -#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ -#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ -#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ -#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ -#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ -#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ -#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ -#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ -#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ -#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ -#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ -#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ -#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ - -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ -#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ -#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ -#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ -#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ -#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ -#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ - -/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ -#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ -#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ -#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ +#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ +#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ +#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ +#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ + +#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ +#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ + + +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ +#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ +#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */ +#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ +#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ +#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ +#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ +#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ +#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ +#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ +#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ +#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ +#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ +#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ +#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ +#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */ +#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */ +#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ +#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ +#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ +#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ +#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */ +#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ +#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ +#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ +#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ +#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ +#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ + +/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */ +#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */ +#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */ +#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ +#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ + +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */ +#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ + +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */ +#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */ +#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ +#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ + +/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ +#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ +#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ +#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ +#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ +#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ + +/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ +#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ +#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ +#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ +#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ +#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ +#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ +#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ +#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ +#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ + +/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ +#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ +#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ +#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ +#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ +#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ +#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ +#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ +#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ +#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ +#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ +#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ +#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ +#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ + +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ +#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ +#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */ +#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ +#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ +#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ +#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ +#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ +#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ +#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ +#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ +#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */ +#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ +#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ +#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ + +/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ +#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ +#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ +#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */ + +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ +#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ +#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ +#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ +#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ /* * BUG word(s) */ -#define X86_BUG(x) (NCAPINTS*32 + (x)) - -#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ -#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ -#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ -#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ -#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ -#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ -#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ -#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ -#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ +#define X86_BUG(x) (NCAPINTS*32 + (x)) + +#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ +#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ +#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ +#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ +#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ +#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ +#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ +#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ #ifdef CONFIG_X86_32 /* * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional * to avoid confusion. */ -#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ +#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ #endif -#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ -#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ -#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ -#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ +#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ +#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ +#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ +#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ +#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ + #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 0a3e808b9123..85e23bb7b34e 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -20,6 +21,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; + /* Set the ACCESS bit so it can be mapped RO */ + desc->type |= 1; desc->s = 1; desc->dpl = 0x3; @@ -60,17 +63,10 @@ static inline struct desc_struct *get_current_gdt_rw(void) return this_cpu_ptr(&gdt_page)->gdt; } -/* Get the fixmap index for a specific processor */ -static inline unsigned int get_cpu_gdt_ro_index(int cpu) -{ - return FIX_GDT_REMAP_BEGIN + cpu; -} - /* Provide the fixmap address of the remapped GDT */ static inline struct desc_struct *get_cpu_gdt_ro(int cpu) { - unsigned int idx = get_cpu_gdt_ro_index(cpu); - return (struct desc_struct *)__fix_to_virt(idx); + return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt; } /* Provide the current read-only GDT */ @@ -185,7 +181,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, #endif } -static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) +static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr) { struct desc_struct *d = get_cpu_gdt_rw(cpu); tss_desc tss; diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index c10c9128f54e..33833d1909af 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -16,6 +16,12 @@ # define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) #endif +#ifdef CONFIG_X86_INTEL_UMIP +# define DISABLE_UMIP 0 +#else +# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31)) +#endif + #ifdef CONFIG_X86_64 # define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) @@ -44,6 +50,12 @@ # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) #endif +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define DISABLE_PTI 0 +#else +# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -54,7 +66,7 @@ #define DISABLED_MASK4 (DISABLE_PCID) #define DISABLED_MASK5 0 #define DISABLED_MASK6 0 -#define DISABLED_MASK7 0 +#define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_MPX) #define DISABLED_MASK10 0 @@ -63,8 +75,9 @@ #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 -#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) +#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP) #define DISABLED_MASK17 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) +#define DISABLED_MASK18 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 836ca1178a6a..69f16f0729d0 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -7,7 +7,6 @@ * Documentation/DMA-API.txt for documentation. */ -#include #include #include #include diff --git a/arch/x86/include/asm/early_intel_th.h b/arch/x86/include/asm/early_intel_th.h new file mode 100644 index 000000000000..bf93609995c8 --- /dev/null +++ b/arch/x86/include/asm/early_intel_th.h @@ -0,0 +1,20 @@ +/* + * early_intel_th.h: Intel Trace Hub early printk + * + * (C) Copyright 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _ASM_X86_EARLY_INTEL_TH_H +#define _ASM_X86_EARLY_INTEL_TH_H + +#ifdef CONFIG_INTEL_TH_EARLY_PRINTK +extern struct console intel_th_early_console; +extern void early_intel_th_init(const char *); +#endif /* CONFIG_INTEL_TH_EARLY_PRINTK */ + +#endif /* _ASM_X86_EARLY_INTEL_TH_H */ + diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 85f6ccb80b91..a399c1ebf6f0 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -6,6 +6,7 @@ #include #include #include +#include /* * We map the EFI regions needed for runtime services non-contiguously, @@ -36,8 +37,18 @@ extern asmlinkage unsigned long efi_call_phys(void *, ...); -#define arch_efi_call_virt_setup() kernel_fpu_begin() -#define arch_efi_call_virt_teardown() kernel_fpu_end() +#define arch_efi_call_virt_setup() \ +({ \ + kernel_fpu_begin(); \ + firmware_restrict_branch_speculation_start(); \ +}) + +#define arch_efi_call_virt_teardown() \ +({ \ + firmware_restrict_branch_speculation_end(); \ + kernel_fpu_end(); \ +}) + /* * Wrap all the virtual calls in a way that forces the parameters on the stack. @@ -73,6 +84,7 @@ struct efi_scratch { efi_sync_low_kernel_mappings(); \ preempt_disable(); \ __kernel_fpu_begin(); \ + firmware_restrict_branch_speculation_start(); \ \ if (efi_scratch.use_pgd) { \ efi_scratch.prev_cr3 = __read_cr3(); \ @@ -91,6 +103,7 @@ struct efi_scratch { __flush_tlb_all(); \ } \ \ + firmware_restrict_branch_speculation_end(); \ __kernel_fpu_end(); \ preempt_enable(); \ }) diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h index 0211029076ea..6777480d8a42 100644 --- a/arch/x86/include/asm/espfix.h +++ b/arch/x86/include/asm/espfix.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_ESPFIX_H #define _ASM_X86_ESPFIX_H -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_ESPFIX64 #include @@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); extern void init_espfix_bsp(void); extern void init_espfix_ap(int cpu); - -#endif /* CONFIG_X86_64 */ +#else +static inline void init_espfix_ap(int cpu) { } +#endif #endif /* _ASM_X86_ESPFIX_H */ diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index dcd9fb55e679..e203169931c7 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -44,7 +44,6 @@ extern unsigned long __FIXADDR_TOP; PAGE_SIZE) #endif - /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at @@ -84,7 +83,6 @@ enum fixed_addresses { FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, #endif - FIX_RO_IDT, /* Virtual mapping for read-only IDT */ #ifdef CONFIG_X86_32 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, @@ -100,9 +98,12 @@ enum fixed_addresses { #ifdef CONFIG_X86_INTEL_MID FIX_LNW_VRTC, #endif - /* Fixmap entries to remap the GDTs, one per processor. */ - FIX_GDT_REMAP_BEGIN, - FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1, + +#ifdef CONFIG_ACPI_APEI_GHES + /* Used for GHES mapping from assorted contexts */ + FIX_APEI_GHES_IRQ, + FIX_APEI_GHES_NMI, +#endif __end_of_permanent_fixed_addresses, @@ -136,8 +137,10 @@ enum fixed_addresses { extern void reserve_top_address(unsigned long reserve); -#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE) extern int fixmaps_set; diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 8ec99a55e6b9..bf253ad93bbc 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -34,6 +34,7 @@ extern asmlinkage void kvm_posted_intr_wakeup_ipi(void); extern asmlinkage void kvm_posted_intr_nested_ipi(void); extern asmlinkage void error_interrupt(void); extern asmlinkage void irq_work_interrupt(void); +extern asmlinkage void uv_bau_message_intr1(void); extern asmlinkage void spurious_interrupt(void); extern asmlinkage void thermal_interrupt(void); diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 0ead9dbb9130..9e114b8c901e 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -20,14 +20,23 @@ #ifndef _ASM_X86_HYPERVISOR_H #define _ASM_X86_HYPERVISOR_H +/* x86 hypervisor types */ +enum x86_hypervisor_type { + X86_HYPER_NATIVE = 0, + X86_HYPER_VMWARE, + X86_HYPER_MS_HYPERV, + X86_HYPER_XEN_PV, + X86_HYPER_XEN_HVM, + X86_HYPER_KVM, + X86_HYPER_ACRN, +}; + #ifdef CONFIG_HYPERVISOR_GUEST #include +#include #include -/* - * x86 hypervisor information - */ struct hypervisor_x86 { /* Hypervisor name */ const char *name; @@ -35,40 +44,27 @@ struct hypervisor_x86 { /* Detection routine */ uint32_t (*detect)(void); - /* Platform setup (run once per boot) */ - void (*init_platform)(void); - - /* X2APIC detection (run once per boot) */ - bool (*x2apic_available)(void); + /* Hypervisor type */ + enum x86_hypervisor_type type; - /* pin current vcpu to specified physical cpu (run rarely) */ - void (*pin_vcpu)(int); + /* init time callbacks */ + struct x86_hyper_init init; - /* called during init_mem_mapping() to setup early mappings. */ - void (*init_mem_mapping)(void); + /* runtime callbacks */ + struct x86_hyper_runtime runtime; }; -extern const struct hypervisor_x86 *x86_hyper; - -/* Recognized hypervisors */ -extern const struct hypervisor_x86 x86_hyper_vmware; -extern const struct hypervisor_x86 x86_hyper_ms_hyperv; -extern const struct hypervisor_x86 x86_hyper_xen_pv; -extern const struct hypervisor_x86 x86_hyper_xen_hvm; -extern const struct hypervisor_x86 x86_hyper_kvm; - +extern enum x86_hypervisor_type x86_hyper_type; extern void init_hypervisor_platform(void); -extern bool hypervisor_x2apic_available(void); -extern void hypervisor_pin_vcpu(int cpu); - -static inline void hypervisor_init_mem_mapping(void) +static inline bool hypervisor_is_type(enum x86_hypervisor_type type) { - if (x86_hyper && x86_hyper->init_mem_mapping) - x86_hyper->init_mem_mapping(); + return x86_hyper_type == type; } #else static inline void init_hypervisor_platform(void) { } -static inline bool hypervisor_x2apic_available(void) { return false; } -static inline void hypervisor_init_mem_mapping(void) { } +static inline bool hypervisor_is_type(enum x86_hypervisor_type type) +{ + return type == X86_HYPER_NATIVE; +} #endif /* CONFIG_HYPERVISOR_GUEST */ #endif /* _ASM_X86_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 02aff0867211..1c78580e58be 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -97,6 +97,16 @@ #define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) #define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) +/* Identifiers for segment registers */ +#define INAT_SEG_REG_IGNORE 0 +#define INAT_SEG_REG_DEFAULT 1 +#define INAT_SEG_REG_CS 2 +#define INAT_SEG_REG_SS 3 +#define INAT_SEG_REG_DS 4 +#define INAT_SEG_REG_ES 5 +#define INAT_SEG_REG_FS 6 +#define INAT_SEG_REG_GS 7 + /* Attribute search APIs */ extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); extern int inat_get_last_prefix_id(insn_byte_t last_pfx); diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h new file mode 100644 index 000000000000..e8c3e7cd1673 --- /dev/null +++ b/arch/x86/include/asm/insn-eval.h @@ -0,0 +1,24 @@ +#ifndef _ASM_X86_INSN_EVAL_H +#define _ASM_X86_INSN_EVAL_H +/* + * A collection of utility functions for x86 instruction analysis to be + * used in a kernel context. Useful when, for instance, making sense + * of the registers indicated by operands. + */ + +#include +#include +#include +#include + +#define INSN_CODE_SEG_ADDR_SZ(params) ((params >> 4) & 0xf) +#define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf) +#define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4)) + +void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); +int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); +unsigned long insn_get_seg_base(struct pt_regs *regs, struct insn *insn, + int regoff); +char insn_get_code_seg_defaults(struct pt_regs *regs); + +#endif /* _ASM_X86_INSN_EVAL_H */ diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h new file mode 100644 index 000000000000..62a9f4966b42 --- /dev/null +++ b/arch/x86/include/asm/intel_ds.h @@ -0,0 +1,36 @@ +#ifndef _ASM_INTEL_DS_H +#define _ASM_INTEL_DS_H + +#include + +#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) +#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) + +/* The maximal number of PEBS events: */ +#define MAX_PEBS_EVENTS 8 + +/* + * A debug store configuration. + * + * We only support architectures that use 64bit fields. + */ +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[MAX_PEBS_EVENTS]; +} __aligned(PAGE_SIZE); + +DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + +struct debug_store_buffers { + char bts_buffer[BTS_BUFFER_SIZE]; + char pebs_buffer[PEBS_BUFFER_SIZE]; +}; + +#endif diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h new file mode 100644 index 000000000000..989cfa86de85 --- /dev/null +++ b/arch/x86/include/asm/invpcid.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_INVPCID +#define _ASM_X86_INVPCID + +static inline void __invpcid(unsigned long pcid, unsigned long addr, + unsigned long type) +{ + struct { u64 d[2]; } desc = { { pcid, addr } }; + + /* + * The memory clobber is because the whole point is to invalidate + * stale TLB entries and, especially if we're flushing global + * mappings, we don't want the compiler to reorder any subsequent + * memory accesses before the TLB flush. + * + * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and + * invpcid (%rcx), %rax in long mode. + */ + asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" + : : "m" (desc), "a" (type), "c" (&desc) : "memory"); +} + +#define INVPCID_TYPE_INDIV_ADDR 0 +#define INVPCID_TYPE_SINGLE_CTXT 1 +#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 +#define INVPCID_TYPE_ALL_NON_GLOBAL 3 + +/* Flush all mappings for a given pcid and addr, not including globals. */ +static inline void invpcid_flush_one(unsigned long pcid, + unsigned long addr) +{ + __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); +} + +/* Flush all mappings for a given PCID, not including globals. */ +static inline void invpcid_flush_single_context(unsigned long pcid) +{ + __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); +} + +/* Flush all mappings, including globals, for all PCIDs. */ +static inline void invpcid_flush_all(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); +} + +/* Flush all mappings for all PCIDs except globals. */ +static inline void invpcid_flush_all_nonglobals(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); +} + +#endif /* _ASM_X86_INVPCID */ diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c8ef23f2c28f..89f08955fff7 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -142,6 +142,9 @@ static inline notrace unsigned long arch_local_irq_save(void) swapgs; \ sysretl +#ifdef CONFIG_DEBUG_ENTRY +#define SAVE_FLAGS(x) pushfq; popq %rax +#endif #else #define INTERRUPT_RETURN iret #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index f86a8caa561e..395c9631e000 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h @@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long); extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_stack_regs(struct pt_regs *regs); extern void __show_regs(struct pt_regs *regs, int all); +extern void show_iret_regs(struct pt_regs *regs); extern unsigned long oops_begin(void); extern void oops_end(unsigned long, struct pt_regs *, int signr); diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h deleted file mode 100644 index 945a0337fbcf..000000000000 --- a/arch/x86/include/asm/kmemcheck.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_X86_KMEMCHECK_H -#define ASM_X86_KMEMCHECK_H - -#include -#include - -#ifdef CONFIG_KMEMCHECK -bool kmemcheck_active(struct pt_regs *regs); - -void kmemcheck_show(struct pt_regs *regs); -void kmemcheck_hide(struct pt_regs *regs); - -bool kmemcheck_fault(struct pt_regs *regs, - unsigned long address, unsigned long error_code); -bool kmemcheck_trap(struct pt_regs *regs); -#else -static inline bool kmemcheck_active(struct pt_regs *regs) -{ - return false; -} - -static inline void kmemcheck_show(struct pt_regs *regs) -{ -} - -static inline void kmemcheck_hide(struct pt_regs *regs) -{ -} - -static inline bool kmemcheck_fault(struct pt_regs *regs, - unsigned long address, unsigned long error_code) -{ - return false; -} - -static inline bool kmemcheck_trap(struct pt_regs *regs) -{ - return false; -} -#endif /* CONFIG_KMEMCHECK */ - -#endif diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c73e493adf07..4f8b80199672 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1156,7 +1156,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, static inline int emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) { - return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); + return x86_emulate_instruction(vcpu, 0, + emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); } void kvm_enable_efer_bits(u64); @@ -1426,4 +1427,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) #endif } +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); + #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index b1e8d8db921f..340070415c2c 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -346,6 +346,7 @@ enum smca_bank_types { SMCA_IF, /* Instruction Fetch */ SMCA_L2_CACHE, /* L2 Cache */ SMCA_DE, /* Decoder Unit */ + SMCA_RESERVED, /* Reserved */ SMCA_EX, /* Execution Unit */ SMCA_FP, /* Floating Point */ SMCA_L3_CACHE, /* L3 Cache */ @@ -376,6 +377,7 @@ struct smca_bank { extern struct smca_bank smca_banks[MAX_NR_BANKS]; extern const char *smca_get_long_name(enum smca_bank_types t); +extern bool amd_mce_is_memory_error(struct mce *m); extern int mce_threshold_create_device(unsigned int cpu); extern int mce_threshold_remove_device(unsigned int cpu); @@ -384,6 +386,7 @@ extern int mce_threshold_remove_device(unsigned int cpu); static inline int mce_threshold_create_device(unsigned int cpu) { return 0; }; static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; }; +static inline bool amd_mce_is_memory_error(struct mce *m) { return false; }; #endif diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 6a77c63540f7..e7d96c0766fe 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data); void __init sme_early_init(void); -void __init sme_encrypt_kernel(void); +void __init sme_encrypt_kernel(struct boot_params *bp); void __init sme_enable(struct boot_params *bp); /* Architecture __weak replacement functions */ @@ -61,7 +61,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { } static inline void __init sme_early_init(void) { } -static inline void __init sme_encrypt_kernel(void) { } +static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { } #endif /* CONFIG_AMD_MEM_ENCRYPT */ diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 55520cec8b27..6cf0e4cb7b97 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -37,7 +37,13 @@ struct cpu_signature { struct device; -enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; +enum ucode_state { + UCODE_OK = 0, + UCODE_NEW, + UCODE_UPDATED, + UCODE_NFOUND, + UCODE_ERROR, +}; struct microcode_ops { enum ucode_state (*request_microcode_user) (int cpu, @@ -54,7 +60,7 @@ struct microcode_ops { * are being called. * See also the "Synchronization" section in microcode_core.c. */ - int (*apply_microcode) (int cpu); + enum ucode_state (*apply_microcode) (int cpu); int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); }; diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 9ea26f167497..5ff3e8af2c20 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -3,6 +3,7 @@ #define _ASM_X86_MMU_H #include +#include #include #include @@ -27,7 +28,8 @@ typedef struct { atomic64_t tlb_gen; #ifdef CONFIG_MODIFY_LDT_SYSCALL - struct ldt_struct *ldt; + struct rw_semaphore ldt_usr_sem; + struct ldt_struct *ldt; #endif #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 6699fc441644..1de72ce514cd 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -50,22 +50,54 @@ struct ldt_struct { * call gates. On native, we could merge the ldt_struct and LDT * allocations, but it's not worth trying to optimize. */ - struct desc_struct *entries; - unsigned int nr_entries; + struct desc_struct *entries; + unsigned int nr_entries; + + /* + * If PTI is in use, then the entries array is not mapped while we're + * in user mode. The whole array will be aliased at the addressed + * given by ldt_slot_va(slot). We use two slots so that we can allocate + * and map, and enable a new LDT without invalidating the mapping + * of an older, still-in-use LDT. + * + * slot will be -1 if this LDT doesn't have an alias mapping. + */ + int slot; }; +/* This is a multiple of PAGE_SIZE. */ +#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) + +static inline void *ldt_slot_va(int slot) +{ +#ifdef CONFIG_X86_64 + return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); +#else + BUG(); + return (void *)fix_to_virt(FIX_HOLE); +#endif +} + /* * Used for LDT copy/destruction. */ -int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); +static inline void init_new_context_ldt(struct mm_struct *mm) +{ + mm->context.ldt = NULL; + init_rwsem(&mm->context.ldt_usr_sem); +} +int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); void destroy_context_ldt(struct mm_struct *mm); +void ldt_arch_exit_mmap(struct mm_struct *mm); #else /* CONFIG_MODIFY_LDT_SYSCALL */ -static inline int init_new_context_ldt(struct task_struct *tsk, - struct mm_struct *mm) +static inline void init_new_context_ldt(struct mm_struct *mm) { } +static inline int ldt_dup_context(struct mm_struct *oldmm, + struct mm_struct *mm) { return 0; } -static inline void destroy_context_ldt(struct mm_struct *mm) {} +static inline void destroy_context_ldt(struct mm_struct *mm) { } +static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } #endif static inline void load_mm_ldt(struct mm_struct *mm) @@ -73,8 +105,8 @@ static inline void load_mm_ldt(struct mm_struct *mm) #ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; - /* lockless_dereference synchronizes with smp_store_release */ - ldt = lockless_dereference(mm->context.ldt); + /* READ_ONCE synchronizes with smp_store_release */ + ldt = READ_ONCE(mm->context.ldt); /* * Any change to mm->context.ldt is followed by an IPI to all @@ -90,10 +122,31 @@ static inline void load_mm_ldt(struct mm_struct *mm) * that we can see. */ - if (unlikely(ldt)) - set_ldt(ldt->entries, ldt->nr_entries); - else + if (unlikely(ldt)) { + if (static_cpu_has(X86_FEATURE_PTI)) { + if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { + /* + * Whoops -- either the new LDT isn't mapped + * (if slot == -1) or is mapped into a bogus + * slot (if slot > 1). + */ + clear_LDT(); + return; + } + + /* + * If page table isolation is enabled, ldt->entries + * will not be mapped in the userspace pagetables. + * Tell the CPU to access the LDT through the alias + * at ldt_slot_va(ldt->slot). + */ + set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); + } else { + set_ldt(ldt->entries, ldt->nr_entries); + } + } else { clear_LDT(); + } #else clear_LDT(); #endif @@ -132,18 +185,21 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + mutex_init(&mm->context.lock); + mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); atomic64_set(&mm->context.tlb_gen, 0); - #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { /* pkey 0 is the default and always allocated */ mm->context.pkey_allocation_map = 0x1; /* -1 means unallocated or invalid */ mm->context.execute_only_pkey = -1; } - #endif - return init_new_context_ldt(tsk, mm); +#endif + init_new_context_ldt(mm); + return 0; } static inline void destroy_context(struct mm_struct *mm) { @@ -176,15 +232,16 @@ do { \ } while (0) #endif -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { paravirt_arch_dup_mmap(oldmm, mm); + return ldt_dup_context(oldmm, mm); } static inline void arch_exit_mmap(struct mm_struct *mm) { paravirt_arch_exit_mmap(mm); + ldt_arch_exit_mmap(mm); } #ifdef CONFIG_X86_64 @@ -281,33 +338,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, return __pkru_allows_pkey(vma_pkey(vma), write); } -/* - * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID - * bits. This serves two purposes. It prevents a nasty situation in - * which PCID-unaware code saves CR3, loads some other value (with PCID - * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if - * the saved ASID was nonzero. It also means that any bugs involving - * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger - * deterministically. - */ - -static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) -{ - if (static_cpu_has(X86_FEATURE_PCID)) { - VM_WARN_ON_ONCE(asid > 4094); - return __sme_pa(mm->pgd) | (asid + 1); - } else { - VM_WARN_ON_ONCE(asid != 0); - return __sme_pa(mm->pgd); - } -} - -static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) -{ - VM_WARN_ON_ONCE(asid > 4094); - return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; -} - /* * This can be used from process context to figure out what the value of * CR3 is without needing to do a (slow) __read_cr3(). @@ -317,7 +347,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) */ static inline unsigned long __get_current_cr3_fast(void) { - unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), + unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, this_cpu_read(cpu_tlbstate.loaded_mm_asid)); /* For now, be very restrictive about when this can be called. */ diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 8546fafa21a9..7948a17febb4 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -6,7 +6,7 @@ #include struct mod_arch_specific { -#ifdef CONFIG_ORC_UNWINDER +#ifdef CONFIG_UNWINDER_ORC unsigned int num_orcs; int *orc_unwind_ip; struct orc_entry *orc_unwind; diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 581bb54dd464..5119e4b555cc 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent @@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) return U64_MAX; __asm__ __volatile__("mov %4, %%r8\n" - "call *%5" + CALL_NOSPEC : "=a" (hv_status), ASM_CALL_CONSTRAINT, "+c" (control), "+d" (input_address) - : "r" (output_address), "m" (hv_hypercall_pg) + : "r" (output_address), + THUNK_TARGET(hv_hypercall_pg) : "cc", "memory", "r8", "r9", "r10", "r11"); #else u32 input_address_hi = upper_32_bits(input_address); @@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) if (!hv_hypercall_pg) return U64_MAX; - __asm__ __volatile__("call *%7" + __asm__ __volatile__(CALL_NOSPEC : "=A" (hv_status), "+c" (input_address_lo), ASM_CALL_CONSTRAINT : "A" (control), "b" (input_address_hi), "D"(output_address_hi), "S"(output_address_lo), - "m" (hv_hypercall_pg) + THUNK_TARGET(hv_hypercall_pg) : "cc", "memory"); #endif /* !x86_64 */ return hv_status; @@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) #ifdef CONFIG_X86_64 { - __asm__ __volatile__("call *%4" + __asm__ __volatile__(CALL_NOSPEC : "=a" (hv_status), ASM_CALL_CONSTRAINT, "+c" (control), "+d" (input1) - : "m" (hv_hypercall_pg) + : THUNK_TARGET(hv_hypercall_pg) : "cc", "r8", "r9", "r10", "r11"); } #else @@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) u32 input1_hi = upper_32_bits(input1); u32 input1_lo = lower_32_bits(input1); - __asm__ __volatile__ ("call *%5" + __asm__ __volatile__ (CALL_NOSPEC : "=A"(hv_status), "+c"(input1_lo), ASM_CALL_CONSTRAINT : "A" (control), "b" (input1_hi), - "m" (hv_hypercall_pg) + THUNK_TARGET(hv_hypercall_pg) : "cc", "edi", "esi"); } #endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ab022618a50a..eb83ff1bae8f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -39,6 +39,13 @@ /* Intel MSRs. Some also available on other CPUs */ +#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ +#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ +#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ + +#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ +#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ + #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f @@ -57,6 +64,11 @@ #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) #define MSR_MTRRcap 0x000000fe + +#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a +#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ +#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ + #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e @@ -352,6 +364,9 @@ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define MSR_FAM10H_NODE_ID 0xc001100c +#define MSR_F10H_DECFG 0xc0011029 +#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 +#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 07962f5f6fba..30df295f6d94 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -214,8 +214,7 @@ static __always_inline unsigned long long rdtsc_ordered(void) * that some other imaginary CPU is updating continuously with a * time stamp. */ - alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, - "lfence", X86_FEATURE_LFENCE_RDTSC); + barrier_nospec(); return rdtsc(); } diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h new file mode 100644 index 000000000000..f928ad9b143f --- /dev/null +++ b/arch/x86/include/asm/nospec-branch.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_X86_NOSPEC_BRANCH_H_ +#define _ASM_X86_NOSPEC_BRANCH_H_ + +#include +#include +#include +#include + +/* + * Fill the CPU return stack buffer. + * + * Each entry in the RSB, if used for a speculative 'ret', contains an + * infinite 'pause; lfence; jmp' loop to capture speculative execution. + * + * This is required in various cases for retpoline and IBRS-based + * mitigations for the Spectre variant 2 vulnerability. Sometimes to + * eliminate potentially bogus entries from the RSB, and sometimes + * purely to ensure that it doesn't get empty, which on some CPUs would + * allow predictions from other (unwanted!) sources to be used. + * + * We define a CPP macro such that it can be used from both .S files and + * inline assembly. It's possible to do a .macro and then include that + * from C via asm(".include ") but let's not go there. + */ + +#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ +#define RSB_FILL_LOOPS 16 /* To avoid underflow */ + +/* + * Google experimented with loop-unrolling and this turned out to be + * the optimal version — two calls, each with their own speculation + * trap should their return address end up getting used, in a loop. + */ +#define __FILL_RETURN_BUFFER(reg, nr, sp) \ + mov $(nr/2), reg; \ +771: \ + call 772f; \ +773: /* speculation trap */ \ + pause; \ + lfence; \ + jmp 773b; \ +772: \ + call 774f; \ +775: /* speculation trap */ \ + pause; \ + lfence; \ + jmp 775b; \ +774: \ + dec reg; \ + jnz 771b; \ + add $(BITS_PER_LONG/8) * nr, sp; + +#ifdef __ASSEMBLY__ + +/* + * This should be used immediately before a retpoline alternative. It tells + * objtool where the retpolines are so that it can make sense of the control + * flow by just reading the original instruction(s) and ignoring the + * alternatives. + */ +.macro ANNOTATE_NOSPEC_ALTERNATIVE + .Lannotate_\@: + .pushsection .discard.nospec + .long .Lannotate_\@ - . + .popsection +.endm + +/* + * This should be used immediately before an indirect jump/call. It tells + * objtool the subsequent indirect jump/call is vouched safe for retpoline + * builds. + */ +.macro ANNOTATE_RETPOLINE_SAFE + .Lannotate_\@: + .pushsection .discard.retpoline_safe + _ASM_PTR .Lannotate_\@ + .popsection +.endm + +/* + * These are the bare retpoline primitives for indirect jmp and call. + * Do not use these directly; they only exist to make the ALTERNATIVE + * invocation below less ugly. + */ +.macro RETPOLINE_JMP reg:req + call .Ldo_rop_\@ +.Lspec_trap_\@: + pause + lfence + jmp .Lspec_trap_\@ +.Ldo_rop_\@: + mov \reg, (%_ASM_SP) + ret +.endm + +/* + * This is a wrapper around RETPOLINE_JMP so the called function in reg + * returns to the instruction after the macro. + */ +.macro RETPOLINE_CALL reg:req + jmp .Ldo_call_\@ +.Ldo_retpoline_jmp_\@: + RETPOLINE_JMP \reg +.Ldo_call_\@: + call .Ldo_retpoline_jmp_\@ +.endm + +/* + * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple + * indirect jmp/call which may be susceptible to the Spectre variant 2 + * attack. + */ +.macro JMP_NOSPEC reg:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ + __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD +#else + jmp *\reg +#endif +.endm + +.macro CALL_NOSPEC reg:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \ + __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD +#else + call *\reg +#endif +.endm + + /* + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP + * monstrosity above, manually. + */ +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE "jmp .Lskip_rsb_\@", \ + __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ + \ftr +.Lskip_rsb_\@: +#endif +.endm + +#else /* __ASSEMBLY__ */ + +#define ANNOTATE_NOSPEC_ALTERNATIVE \ + "999:\n\t" \ + ".pushsection .discard.nospec\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + +#define ANNOTATE_RETPOLINE_SAFE \ + "999:\n\t" \ + ".pushsection .discard.retpoline_safe\n\t" \ + _ASM_PTR " 999b\n\t" \ + ".popsection\n\t" + +#if defined(CONFIG_X86_64) && defined(RETPOLINE) + +/* + * Since the inline asm uses the %V modifier which is only in newer GCC, + * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. + */ +# define CALL_NOSPEC \ + ANNOTATE_NOSPEC_ALTERNATIVE \ + ALTERNATIVE( \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + "call __x86_indirect_thunk_%V[thunk_target]\n", \ + X86_FEATURE_RETPOLINE) +# define THUNK_TARGET(addr) [thunk_target] "r" (addr) + +#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) +/* + * For i386 we use the original ret-equivalent retpoline, because + * otherwise we'll run out of registers. We don't care about CET + * here, anyway. + */ +# define CALL_NOSPEC \ + ALTERNATIVE( \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + " jmp 904f;\n" \ + " .align 16\n" \ + "901: call 903f;\n" \ + "902: pause;\n" \ + " lfence;\n" \ + " jmp 902b;\n" \ + " .align 16\n" \ + "903: addl $4, %%esp;\n" \ + " pushl %[thunk_target];\n" \ + " ret;\n" \ + " .align 16\n" \ + "904: call 901b;\n", \ + X86_FEATURE_RETPOLINE) + +# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) +#else /* No retpoline for C / inline asm */ +# define CALL_NOSPEC "call *%[thunk_target]\n" +# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) +#endif + +/* The Spectre V2 mitigation variants */ +enum spectre_v2_mitigation { + SPECTRE_V2_NONE, + SPECTRE_V2_RETPOLINE_MINIMAL, + SPECTRE_V2_RETPOLINE_MINIMAL_AMD, + SPECTRE_V2_RETPOLINE_GENERIC, + SPECTRE_V2_RETPOLINE_AMD, + SPECTRE_V2_IBRS, +}; + +extern char __indirect_thunk_start[]; +extern char __indirect_thunk_end[]; + +/* + * On VMEXIT we must ensure that no RSB predictions learned in the guest + * can be followed in the host, by overwriting the RSB completely. Both + * retpoline and IBRS mitigations for Spectre v2 need this; only on future + * CPUs with IBRS_ALL *might* it be avoided. + */ +static inline void vmexit_fill_RSB(void) +{ +#ifdef CONFIG_RETPOLINE + unsigned long loops; + + asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE("jmp 910f", + __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), + X86_FEATURE_RETPOLINE) + "910:" + : "=r" (loops), ASM_CALL_CONSTRAINT + : : "memory" ); +#endif +} + +#define alternative_msr_write(_msr, _val, _feature) \ + asm volatile(ALTERNATIVE("", \ + "movl %[msr], %%ecx\n\t" \ + "movl %[val], %%eax\n\t" \ + "movl $0, %%edx\n\t" \ + "wrmsr", \ + _feature) \ + : : [msr] "i" (_msr), [val] "i" (_val) \ + : "eax", "ecx", "edx", "memory") + +static inline void indirect_branch_prediction_barrier(void) +{ + alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, + X86_FEATURE_USE_IBPB); +} + +/* + * With retpoline, we must use IBRS to restrict branch prediction + * before calling into firmware. + * + * (Implemented as CPP macros due to header hell.) + */ +#define firmware_restrict_branch_speculation_start() \ +do { \ + preempt_disable(); \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ + X86_FEATURE_USE_IBRS_FW); \ +} while (0) + +#define firmware_restrict_branch_speculation_end() \ +do { \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ + X86_FEATURE_USE_IBRS_FW); \ + preempt_enable(); \ +} while (0) + +#endif /* __ASSEMBLY__ */ + +/* + * Below is used in the eBPF JIT compiler and emits the byte sequence + * for the following assembly: + * + * With retpolines configured: + * + * callq do_rop + * spec_trap: + * pause + * lfence + * jmp spec_trap + * do_rop: + * mov %rax,(%rsp) + * retq + * + * Without retpolines configured: + * + * jmp *%rax + */ +#ifdef CONFIG_RETPOLINE +# define RETPOLINE_RAX_BPF_JIT_SIZE 17 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT1_off32(0xE8, 7); /* callq do_rop */ \ + /* spec_trap: */ \ + EMIT2(0xF3, 0x90); /* pause */ \ + EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ + EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ + /* do_rop: */ \ + EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ + EMIT1(0xC3); /* retq */ +#else +# define RETPOLINE_RAX_BPF_JIT_SIZE 2 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT2(0xFF, 0xE0); /* jmp *%rax */ +#endif + +#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb232..d652a3808065 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -52,10 +52,6 @@ static inline void clear_page(void *page) void copy_page(void *to, void *from); -#ifdef CONFIG_X86_MCE -#define arch_unmap_kpfn arch_unmap_kpfn -#endif - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_VSYSCALL_EMULATION diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index fd81228e8037..93be6ee2f2be 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -7,6 +7,7 @@ #ifdef CONFIG_PARAVIRT #include #include +#include #include @@ -16,10 +17,9 @@ #include #include -static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void load_sp0(unsigned long sp0) { - PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); + PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0); } /* The paravirtualized CPUID instruction. */ @@ -273,6 +273,13 @@ static inline void slow_down_io(void) #endif } +static inline unsigned long cpu_khz_from_paravirt(void) +{ + if (pv_cpu_ops.cpu_khz == NULL) + return 0; + return PVOP_CALL0(unsigned long, pv_cpu_ops.cpu_khz); +} + static inline void paravirt_activate_mm(struct mm_struct *prev, struct mm_struct *next) { @@ -298,9 +305,9 @@ static inline void __flush_tlb_global(void) { PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); } -static inline void __flush_tlb_single(unsigned long addr) +static inline void __flush_tlb_one_user(unsigned long addr) { - PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); + PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr); } static inline void flush_tlb_others(const struct cpumask *cpumask, @@ -797,6 +804,12 @@ static inline notrace unsigned long arch_local_irq_save(void) return f; } +static inline void write_msi_msg_paravirt(struct msi_desc *entry, + struct msi_msg *msg) +{ + return PVOP_VCALL2(pv_irq_ops.write_msi, entry, msg); +} + /* Make sure as little as possible of this mess escapes. */ #undef PARAVIRT_CALL @@ -880,23 +893,27 @@ extern void default_banner(void); #define INTERRUPT_RETURN \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) + ANNOTATE_RETPOLINE_SAFE; \ + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);) #define DISABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ + ANNOTATE_RETPOLINE_SAFE; \ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) #define ENABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ + ANNOTATE_RETPOLINE_SAFE; \ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) #ifdef CONFIG_X86_32 #define GET_CR0_INTO_EAX \ push %ecx; push %edx; \ + ANNOTATE_RETPOLINE_SAFE; \ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ pop %edx; pop %ecx #else /* !CONFIG_X86_32 */ @@ -918,16 +935,29 @@ extern void default_banner(void); */ #define SWAPGS \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ + ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ ) #define GET_CR2_INTO_RAX \ - call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2) + ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); #define USERGS_SYSRET64 \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) + ANNOTATE_RETPOLINE_SAFE; \ + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);) + +#ifdef CONFIG_DEBUG_ENTRY +#define SAVE_FLAGS(clobbers) \ + PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \ + PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ + ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ + PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) +#endif + #endif /* CONFIG_X86_32 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 10cc3b9709fe..63f39a5c621d 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -43,6 +43,7 @@ #include #include #include +#include struct page; struct thread_struct; @@ -53,6 +54,8 @@ struct desc_struct; struct task_struct; struct cpumask; struct flush_tlb_info; +struct msi_desc; +struct msi_msg; /* * Wrapper type for pointers to code which uses the non-standard @@ -134,7 +137,7 @@ struct pv_cpu_ops { void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); void (*free_ldt)(struct desc_struct *ldt, unsigned entries); - void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); + void (*load_sp0)(unsigned long sp0); void (*set_iopl_mask)(unsigned mask); @@ -174,6 +177,8 @@ struct pv_cpu_ops { void (*start_context_switch)(struct task_struct *prev); void (*end_context_switch)(struct task_struct *next); + + unsigned long (*cpu_khz)(void); } __no_randomize_layout; struct pv_irq_ops { @@ -194,6 +199,7 @@ struct pv_irq_ops { void (*safe_halt)(void); void (*halt)(void); + void (*write_msi)(struct msi_desc *entry, struct msi_msg *msg); } __no_randomize_layout; struct pv_mmu_ops { @@ -217,7 +223,7 @@ struct pv_mmu_ops { /* TLB operations */ void (*flush_tlb_user)(void); void (*flush_tlb_kernel)(void); - void (*flush_tlb_single)(unsigned long addr); + void (*flush_tlb_one_user)(unsigned long addr); void (*flush_tlb_others)(const struct cpumask *cpus, const struct flush_tlb_info *info); @@ -392,7 +398,9 @@ int paravirt_disable_iospace(void); * offset into the paravirt_patch_template structure, and can therefore be * freely converted back into a structure offset. */ -#define PARAVIRT_CALL "call *%c[paravirt_opptr];" +#define PARAVIRT_CALL \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%c[paravirt_opptr];" /* * These macros are intended to wrap calls through one of the paravirt diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 377f1ffd18be..ba3c523aaf16 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr, { bool oldbit; - asm volatile("bt "__percpu_arg(2)",%1\n\t" + asm volatile("bt "__percpu_arg(2)",%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 4b5e1eafada7..aff42e1da6ee 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -30,6 +30,17 @@ static inline void paravirt_release_p4d(unsigned long pfn) {} */ extern gfp_t __userpte_alloc_gfp; +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * Instead of one PGD, we acquire two PGDs. Being order-1, it is + * both 8k in size and 8k-aligned. That lets us just flip bit 12 + * in a pointer to swap between the two 4k halves. + */ +#define PGD_ALLOCATION_ORDER 1 +#else +#define PGD_ALLOCATION_ORDER 0 +#endif + /* * Allocate and free page tables. */ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index f735c3016325..5c790e93657d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,6 +28,7 @@ extern pgd_t early_top_pgt[PTRS_PER_PGD]; int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user); void ptdump_walk_pgd_level_checkwx(void); #ifdef CONFIG_DEBUG_WX @@ -349,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) { pmdval_t v = native_pmd_val(pmd); - return __pmd(v | set); + return native_make_pmd(v | set); } static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) { pmdval_t v = native_pmd_val(pmd); - return __pmd(v & ~clear); + return native_make_pmd(v & ~clear); } static inline pmd_t pmd_mkold(pmd_t pmd) @@ -408,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set) { pudval_t v = native_pud_val(pud); - return __pud(v | set); + return native_make_pud(v | set); } static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) { pudval_t v = native_pud_val(pud); - return __pud(v & ~clear); + return native_make_pud(v & ~clear); } static inline pud_t pud_mkold(pud_t pud) @@ -667,11 +668,6 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a) return false; } -static inline int pte_hidden(pte_t pte) -{ - return pte_flags(pte) & _PAGE_HIDDEN; -} - static inline int pmd_present(pmd_t pmd) { /* @@ -846,7 +842,12 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) static inline int p4d_bad(p4d_t p4d) { - return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; + unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER; + + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + ignore_flags |= _PAGE_NX; + + return (p4d_flags(p4d) & ~ignore_flags) != 0; } #endif /* CONFIG_PGTABLE_LEVELS > 3 */ @@ -880,7 +881,12 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) static inline int pgd_bad(pgd_t pgd) { - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; + unsigned long ignore_flags = _PAGE_USER; + + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + ignore_flags |= _PAGE_NX; + + return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) @@ -909,7 +915,11 @@ static inline int pgd_none(pgd_t pgd) * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address))) +/* + * a shortcut to get a pgd_t in a given mm + */ +#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's @@ -1093,6 +1103,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); } +#define pud_write pud_write +static inline int pud_write(pud_t pud) +{ + return pud_flags(pud) & _PAGE_RW; +} + /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * @@ -1105,7 +1121,14 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, */ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) { - memcpy(dst, src, count * sizeof(pgd_t)); + memcpy(dst, src, count * sizeof(pgd_t)); +#ifdef CONFIG_PAGE_TABLE_ISOLATION + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + /* Clone the user space pgd as well */ + memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src), + count * sizeof(pgd_t)); +#endif } #define PTE_SHIFT ilog2(PTRS_PER_PTE) diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index e67c0620aec2..b3ec519e3982 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[]; static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } void paging_init(void); +void sync_initial_page_table(void); /* * Define this if things work differently on an i386 and an i486: @@ -61,7 +62,7 @@ void paging_init(void); #define kpte_clear_flush(ptep, vaddr) \ do { \ pte_clear(&init_mm, (vaddr), (ptep)); \ - __flush_tlb_one((vaddr)); \ + __flush_tlb_one_kernel((vaddr)); \ } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index f2ca9b28fd68..0777e18a1d23 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -38,13 +38,23 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ #define LAST_PKMAP 1024 #endif -#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ - & PMD_MASK) +/* + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c + * to avoid include recursion hell + */ +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) + +#define CPU_ENTRY_AREA_BASE \ + ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \ + & PMD_MASK) + +#define PKMAP_BASE \ + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) #else -# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) +# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE) #endif #define MODULES_VADDR VMALLOC_START diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index e9f05331e732..1149d2112b2e 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[]; #define swapper_pg_dir init_top_pgt extern void paging_init(void); +static inline void sync_initial_page_table(void) { } #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %p(%016lx)\n", \ @@ -131,9 +132,97 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp) #endif } +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages + * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and + * the user one is in the last 4k. To switch between them, you + * just need to flip the 12th bit in their addresses. + */ +#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT + +/* + * This generates better code than the inline assembly in + * __set_bit(). + */ +static inline void *ptr_set_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr |= BIT(bit); + return (void *)__ptr; +} +static inline void *ptr_clear_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr &= ~BIT(bit); + return (void *)__ptr; +} + +static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp) +{ + return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp) +{ + return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp) +{ + return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) +{ + return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); +} +#endif /* CONFIG_PAGE_TABLE_ISOLATION */ + +/* + * Page table pages are page-aligned. The lower half of the top + * level is used for userspace and the top half for the kernel. + * + * Returns true for parts of the PGD that map userspace and + * false for the parts that map the kernel. + */ +static inline bool pgdp_maps_userspace(void *__ptr) +{ + unsigned long ptr = (unsigned long)__ptr; + + return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2); +} + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd); + +/* + * Take a PGD location (pgdp) and a pgd value that needs to be set there. + * Populates the user and returns the resulting PGD that must be set in + * the kernel copy of the page tables. + */ +static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + return pgd; + return __pti_set_user_pgd(pgdp, pgd); +} +#else +static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + return pgd; +} +#endif + static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { +#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL) + p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd); +#else *p4dp = p4d; +#endif } static inline void native_p4d_clear(p4d_t *p4d) @@ -147,7 +236,11 @@ static inline void native_p4d_clear(p4d_t *p4d) static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { +#ifdef CONFIG_PAGE_TABLE_ISOLATION + *pgdp = pti_set_user_pgd(pgdp, pgd); +#else *pgdp = pgd; +#endif } static inline void native_pgd_clear(pgd_t *pgd) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 6d5f45dcd4a1..6b8f73dcbc2c 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -75,33 +75,52 @@ typedef struct { pteval_t pte; } pte_t; #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ -#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) +/* + * See Documentation/x86/x86_64/mm.txt for a description of the memory map. + * + * Be very careful vs. KASLR when changing anything here. The KASLR address + * range must not overlap with anything except the KASAN shadow area, which + * is correct as KASAN disables KASLR. + */ +#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) + #ifdef CONFIG_X86_5LEVEL -#define VMALLOC_SIZE_TB _AC(16384, UL) -#define __VMALLOC_BASE _AC(0xff92000000000000, UL) -#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) +# define VMALLOC_SIZE_TB _AC(12800, UL) +# define __VMALLOC_BASE _AC(0xffa0000000000000, UL) +# define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) +# define LDT_PGD_ENTRY _AC(-112, UL) +# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #else -#define VMALLOC_SIZE_TB _AC(32, UL) -#define __VMALLOC_BASE _AC(0xffffc90000000000, UL) -#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) +# define VMALLOC_SIZE_TB _AC(32, UL) +# define __VMALLOC_BASE _AC(0xffffc90000000000, UL) +# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) +# define LDT_PGD_ENTRY _AC(-3, UL) +# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #endif + #ifdef CONFIG_RANDOMIZE_MEMORY -#define VMALLOC_START vmalloc_base -#define VMEMMAP_START vmemmap_base +# define VMALLOC_START vmalloc_base +# define VMEMMAP_START vmemmap_base #else -#define VMALLOC_START __VMALLOC_BASE -#define VMEMMAP_START __VMEMMAP_BASE +# define VMALLOC_START __VMALLOC_BASE +# define VMEMMAP_START __VMEMMAP_BASE #endif /* CONFIG_RANDOMIZE_MEMORY */ -#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) -#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) + +#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) + +#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) /* The module sections ends with the start of the fixmap */ -#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) -#define MODULES_LEN (MODULES_END - MODULES_VADDR) -#define ESPFIX_PGD_ENTRY _AC(-2, UL) -#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) -#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) -#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) +#define MODULES_END _AC(0xffffffffff000000, UL) +#define MODULES_LEN (MODULES_END - MODULES_VADDR) + +#define ESPFIX_PGD_ENTRY _AC(-2, UL) +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) + +#define CPU_ENTRY_AREA_PGD _AC(-4, UL) +#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) + +#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) +#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) #define EARLY_DYNAMIC_PAGE_TABLES 64 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 59df7b47a434..246f15b4e64c 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -32,7 +32,6 @@ #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 -#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 @@ -79,18 +78,6 @@ #define _PAGE_KNL_ERRATUM_MASK 0 #endif -#ifdef CONFIG_KMEMCHECK -#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) -#else -#define _PAGE_HIDDEN (_AT(pteval_t, 0)) -#endif - -/* - * The same hidden bit is used by kmemcheck, but since kmemcheck - * works on kernel pages while soft-dirty engine on user space, - * they do not conflict with each other. - */ - #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) #else @@ -200,10 +187,9 @@ enum page_cache_mode { #define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ _PAGE_DIRTY | _PAGE_ENC) +#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC) #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC) @@ -337,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud) #else #include +static inline pud_t native_make_pud(pudval_t val) +{ + return (pud_t) { .p4d.pgd = native_make_pgd(val) }; +} + static inline pudval_t native_pud_val(pud_t pud) { return native_pgd_val(pud.p4d.pgd); @@ -358,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) #else #include +static inline pmd_t native_make_pmd(pmdval_t val) +{ + return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) }; +} + static inline pmdval_t native_pmd_val(pmd_t pmd) { return native_pgd_val(pmd.pud.p4d.pgd); diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index 43212a43ee69..625a52a5594f 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h @@ -38,6 +38,11 @@ #define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull) #define CR3_PCID_MASK 0xFFFull #define CR3_NOFLUSH BIT_ULL(63) + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define X86_CR3_PTI_PCID_USER_BIT 11 +#endif + #else /* * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index bdac19ab2488..3222c7746cb1 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -91,7 +91,7 @@ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; - __u8 x86_mask; + __u8 x86_stepping; #ifdef CONFIG_X86_64 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ int x86_tlbsize; @@ -109,7 +109,7 @@ struct cpuinfo_x86 { char x86_vendor_id[16]; char x86_model_id[64]; /* in KB - valid for CPUS which support this call: */ - int x86_cache_size; + unsigned int x86_cache_size; int x86_cache_alignment; /* In bytes */ /* Cache QoS architectural values: */ int x86_cache_max_rmid; /* max index */ @@ -162,9 +162,9 @@ enum cpuid_regs_idx { extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct doublefault_tss; -extern __u32 cpu_caps_cleared[NCAPINTS]; -extern __u32 cpu_caps_set[NCAPINTS]; +extern struct x86_hw_tss doublefault_tss; +extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; +extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; #ifdef CONFIG_SMP DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); @@ -252,6 +252,11 @@ static inline void load_cr3(pgd_t *pgdir) write_cr3(__sme_pa(pgdir)); } +/* + * Note that while the legacy 'TSS' name comes from 'Task State Segment', + * on modern x86 CPUs the TSS also holds information important to 64-bit mode, + * unrelated to the task-switch mechanism: + */ #ifdef CONFIG_X86_32 /* This is the TSS defined by the hardware. */ struct x86_hw_tss { @@ -304,7 +309,13 @@ struct x86_hw_tss { struct x86_hw_tss { u32 reserved1; u64 sp0; + + /* + * We store cpu_current_top_of_stack in sp1 so it's always accessible. + * Linux does not use ring 1, so sp1 is not otherwise needed. + */ u64 sp1; + u64 sp2; u64 reserved2; u64 ist[7]; @@ -322,12 +333,22 @@ struct x86_hw_tss { #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) +#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss)) #define INVALID_IO_BITMAP_OFFSET 0x8000 +struct entry_stack { + unsigned long words[64]; +}; + +struct entry_stack_page { + struct entry_stack stack; +} __aligned(PAGE_SIZE); + struct tss_struct { /* - * The hardware state: + * The fixed hardware portion. This must not cross a page boundary + * at risk of violating the SDM's advice and potentially triggering + * errata. */ struct x86_hw_tss x86_tss; @@ -338,18 +359,9 @@ struct tss_struct { * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; +} __aligned(PAGE_SIZE); -#ifdef CONFIG_X86_32 - /* - * Space for the temporary SYSENTER stack. - */ - unsigned long SYSENTER_stack_canary; - unsigned long SYSENTER_stack[64]; -#endif - -} ____cacheline_aligned; - -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); +DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); /* * sizeof(unsigned long) coming from an extra "long" at the end @@ -363,6 +375,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); #ifdef CONFIG_X86_32 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); +#else +/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */ +#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1 #endif /* @@ -431,7 +446,9 @@ typedef struct { struct thread_struct { /* Cached TLS descriptors: */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; +#ifdef CONFIG_X86_32 unsigned long sp0; +#endif unsigned long sp; #ifdef CONFIG_X86_32 unsigned long sysenter_cs; @@ -442,8 +459,6 @@ struct thread_struct { unsigned short gsindex; #endif - u32 status; /* thread synchronous flags */ - #ifdef CONFIG_X86_64 unsigned long fsbase; unsigned long gsbase; @@ -518,16 +533,9 @@ static inline void native_set_iopl_mask(unsigned mask) } static inline void -native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) +native_load_sp0(unsigned long sp0) { - tss->x86_tss.sp0 = thread->sp0; -#ifdef CONFIG_X86_32 - /* Only happens when SEP is enabled, no need to test "SEP"arately: */ - if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { - tss->x86_tss.ss1 = thread->sysenter_cs; - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); - } -#endif + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } static inline void native_swapgs(void) @@ -539,12 +547,18 @@ static inline void native_swapgs(void) static inline unsigned long current_top_of_stack(void) { -#ifdef CONFIG_X86_64 - return this_cpu_read_stable(cpu_tss.x86_tss.sp0); -#else - /* sp0 on x86_32 is special in and around vm86 mode. */ + /* + * We can't read directly from tss.sp0: sp0 on x86_32 is special in + * and around vm86 mode and sp0 on x86_64 is special because of the + * entry trampoline. + */ return this_cpu_read_stable(cpu_current_top_of_stack); -#endif +} + +static inline bool on_thread_stack(void) +{ + return (unsigned long)(current_top_of_stack() - + current_stack_pointer) < THREAD_SIZE; } #ifdef CONFIG_PARAVIRT @@ -552,10 +566,9 @@ static inline unsigned long current_top_of_stack(void) #else #define __cpuid native_cpuid -static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void load_sp0(unsigned long sp0) { - native_load_sp0(tss, thread); + native_load_sp0(sp0); } #define set_iopl_mask native_set_iopl_mask @@ -804,6 +817,15 @@ static inline void spin_lock_prefetch(const void *x) #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ TOP_OF_KERNEL_STACK_PADDING) +#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) + +#define task_pt_regs(task) \ +({ \ + unsigned long __ptr = (unsigned long)task_stack_page(task); \ + __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ + ((struct pt_regs *)__ptr) - 1; \ +}) + #ifdef CONFIG_X86_32 /* * User space process size: 3GB (default). @@ -823,34 +845,26 @@ static inline void spin_lock_prefetch(const void *x) .addr_limit = KERNEL_DS, \ } -/* - * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. - * This is necessary to guarantee that the entire "struct pt_regs" - * is accessible even if the CPU haven't stored the SS/ESP registers - * on the stack (interrupt gate does not save these registers - * when switching to the same priv ring). - * Therefore beware: accessing the ss/esp fields of the - * "struct pt_regs" is possible, but they may contain the - * completely wrong values. - */ -#define task_pt_regs(task) \ -({ \ - unsigned long __ptr = (unsigned long)task_stack_page(task); \ - __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ - ((struct pt_regs *)__ptr) - 1; \ -}) - #define KSTK_ESP(task) (task_pt_regs(task)->sp) #else /* - * User space process size. 47bits minus one guard page. The guard - * page is necessary on Intel CPUs: if a SYSCALL instruction is at - * the highest possible canonical userspace address, then that - * syscall will enter the kernel with a non-canonical return - * address, and SYSRET will explode dangerously. We avoid this - * particular problem by preventing anything from being mapped - * at the maximum canonical address. + * User space process size. This is the first address outside the user range. + * There are a few constraints that determine this: + * + * On Intel CPUs, if a SYSCALL instruction is at the highest canonical + * address, then that syscall will enter the kernel with a + * non-canonical return address, and SYSRET will explode dangerously. + * We avoid this particular problem by preventing anything executable + * from being mapped at the maximum canonical address. + * + * On AMD CPUs in the Ryzen family, there's a nasty bug in which the + * CPUs malfunction if they execute code from the highest canonical page. + * They'll speculate right off the end of the canonical space, and + * bad things happen. This is worked around in the same way as the + * Intel problem. + * + * With page table isolation enabled, we map the LDT in ... [stay tuned] */ #define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) @@ -873,11 +887,9 @@ static inline void spin_lock_prefetch(const void *x) #define STACK_TOP_MAX TASK_SIZE_MAX #define INIT_THREAD { \ - .sp0 = TOP_OF_INIT_STACK, \ .addr_limit = KERNEL_DS, \ } -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) extern unsigned long KSTK_ESP(struct task_struct *task); #endif /* CONFIG_X86_64 */ @@ -956,4 +968,5 @@ bool xen_set_default_idle(void); void stop_this_cpu(void *dummy); void df_debug(struct pt_regs *regs, long error_code); +void microcode_check(void); #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h new file mode 100644 index 000000000000..0b5ef05b2d2d --- /dev/null +++ b/arch/x86/include/asm/pti.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _ASM_X86_PTI_H +#define _ASM_X86_PTI_H +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +extern void pti_init(void); +extern void pti_check_boottime_disable(void); +#else +static inline void pti_check_boottime_disable(void) { } +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_X86_PTI_H */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index c0e3c45cf6ab..14131dd06b29 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -136,9 +136,9 @@ static inline int v8086_mode(struct pt_regs *regs) #endif } -#ifdef CONFIG_X86_64 static inline bool user_64bit_mode(struct pt_regs *regs) { +#ifdef CONFIG_X86_64 #ifndef CONFIG_PARAVIRT /* * On non-paravirt systems, this is the only long mode CPL 3 @@ -149,8 +149,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs) /* Headers are too twisted for this to go in paravirt.h. */ return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; #endif +#else /* !CONFIG_X86_64 */ + return false; +#endif } +#ifdef CONFIG_X86_64 #define current_user_stack_pointer() current_pt_regs()->sp #define compat_user_stack_pointer() current_pt_regs()->sp #endif diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index ff871210b9f2..d65171120e90 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -15,7 +15,7 @@ * back to the regular execution flow in .text. */ #define _REFCOUNT_EXCEPTION \ - ".pushsection .text.unlikely\n" \ + ".pushsection .text..refcount\n" \ "111:\tlea %[counter], %%" _ASM_CX "\n" \ "112:\t" ASM_UD0 "\n" \ ASM_UNREACHABLE \ @@ -67,13 +67,13 @@ static __always_inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, - r->refs.counter, "er", i, "%0", e); + r->refs.counter, "er", i, "%0", e, "cx"); } static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) { GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, - r->refs.counter, "%0", e); + r->refs.counter, "%0", e, "cx"); } static __always_inline __must_check diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index d91ba04dd007..fb3a6de7440b 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -106,6 +106,7 @@ #define REQUIRED_MASK15 0 #define REQUIRED_MASK16 (NEED_LA57) #define REQUIRED_MASK17 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) +#define REQUIRED_MASK18 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index d8f3a6ae9f6c..4914a3e7c803 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -2,8 +2,7 @@ #ifndef _ASM_X86_RMWcc #define _ASM_X86_RMWcc -#define __CLOBBERS_MEM "memory" -#define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx" +#define __CLOBBERS_MEM(clb...) "memory", ## clb #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) @@ -29,7 +28,7 @@ cc_label: \ #define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ do { \ bool c; \ - asm volatile (fullop ";" CC_SET(cc) \ + asm volatile (fullop CC_SET(cc) \ : [counter] "+m" (var), CC_OUT(cc) (c) \ : __VA_ARGS__ : clobbers); \ return c; \ @@ -40,18 +39,19 @@ do { \ #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ - __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM) + __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) -#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \ +#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\ __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ - __CLOBBERS_MEM_CC_CX) + __CLOBBERS_MEM(clobbers)) #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \ - __CLOBBERS_MEM, vcon (val)) + __CLOBBERS_MEM(), vcon (val)) -#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \ +#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \ + clobbers...) \ __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ - __CLOBBERS_MEM_CC_CX, vcon (val)) + __CLOBBERS_MEM(clobbers), vcon (val)) #endif /* _ASM_X86_RMWcc */ diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index d6baf23782bc..5c019d23d06b 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h @@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[]; #if defined(CONFIG_X86_64) extern char __end_rodata_hpage_align[]; +extern char __entry_trampoline_start[], __entry_trampoline_end[]; #endif #endif /* _ASM_X86_SECTIONS_H */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index b20f9d623f9c..8f09012b92e7 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -236,11 +236,23 @@ */ #define EARLY_IDT_HANDLER_SIZE 9 +/* + * xen_early_idt_handler_array is for Xen pv guests: for each entry in + * early_idt_handler_array it contains a prequel in the form of + * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to + * max 8 bytes. + */ +#define XEN_EARLY_IDT_HANDLER_SIZE 8 + #ifndef __ASSEMBLY__ extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; extern void early_ignore_irq(void); +#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) +extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE]; +#endif + /* * Load a segment. Fall back on loading the zero segment if something goes * wrong. This variant assumes that loading zero fully clears the segment. diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 8da111b3c342..f73706878772 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -16,6 +16,7 @@ enum stack_type { STACK_TYPE_TASK, STACK_TYPE_IRQ, STACK_TYPE_SOFTIRQ, + STACK_TYPE_ENTRY, STACK_TYPE_EXCEPTION, STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1, }; @@ -28,6 +29,8 @@ struct stack_info { bool in_task_stack(unsigned long *stack, struct task_struct *task, struct stack_info *info); +bool in_entry_stack(unsigned long *stack, struct stack_info *info); + int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask); diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 076502241eae..55d392c6bd29 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -179,8 +179,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) * No 3D Now! */ -#ifndef CONFIG_KMEMCHECK - #if (__GNUC__ >= 4) #define memcpy(t, f, n) __builtin_memcpy(t, f, n) #else @@ -189,13 +187,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) ? __constant_memcpy((t), (f), (n)) \ : __memcpy((t), (f), (n))) #endif -#else -/* - * kmemcheck becomes very happy if we use the REP instructions unconditionally, - * because it means that we know both memory operands in advance. - */ -#define memcpy(t, f, n) __memcpy((t), (f), (n)) -#endif #endif #endif /* !CONFIG_FORTIFY_SOURCE */ diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 0b1b4445f4c5..533f74c300c2 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -33,7 +33,6 @@ extern void *memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len); #ifndef CONFIG_FORTIFY_SOURCE -#ifndef CONFIG_KMEMCHECK #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 #define memcpy(dst, src, len) \ ({ \ @@ -46,13 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len); __ret; \ }) #endif -#else -/* - * kmemcheck becomes very happy if we use the REP instructions unconditionally, - * because it means that we know both memory operands in advance. - */ -#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) -#endif #endif /* !CONFIG_FORTIFY_SOURCE */ #define __HAVE_ARCH_MEMSET diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 899084b70412..9b6df68d8fd1 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_SWITCH_TO_H #define _ASM_X86_SWITCH_TO_H +#include + struct task_struct; /* one of the stranger aspects of C forward declarations */ struct task_struct *__switch_to_asm(struct task_struct *prev, @@ -73,4 +75,28 @@ do { \ ((last) = __switch_to_asm((prev), (next))); \ } while (0) +#ifdef CONFIG_X86_32 +static inline void refresh_sysenter_cs(struct thread_struct *thread) +{ + /* Only happens when SEP is enabled, no need to test "SEP"arately: */ + if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) + return; + + this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); + wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); +} +#endif + +/* This is used when switching tasks or entering/exiting vm86 mode. */ +static inline void update_sp0(struct task_struct *task) +{ + /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */ +#ifdef CONFIG_X86_32 + load_sp0(task->thread.sp0); +#else + if (static_cpu_has(X86_FEATURE_XENPV)) + load_sp0(task_top_of_stack(task)); +#endif +} + #endif /* _ASM_X86_SWITCH_TO_H */ diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index e3c95e8e61c5..03eedc21246d 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -60,7 +60,7 @@ static inline long syscall_get_error(struct task_struct *task, * TS_COMPAT is set for 32-bit syscall entries and then * remains set until we return to user mode. */ - if (task->thread.status & (TS_COMPAT|TS_I386_REGS_POKED)) + if (task->thread_info.status & (TS_COMPAT|TS_I386_REGS_POKED)) /* * Sign-extend the value so (int)-EFOO becomes (long)-EFOO * and will match correctly in comparisons. @@ -116,7 +116,7 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { # ifdef CONFIG_IA32_EMULATION - if (task->thread.status & TS_COMPAT) + if (task->thread_info.status & TS_COMPAT) switch (i) { case 0: if (!n--) break; @@ -177,7 +177,7 @@ static inline void syscall_set_arguments(struct task_struct *task, const unsigned long *args) { # ifdef CONFIG_IA32_EMULATION - if (task->thread.status & TS_COMPAT) + if (task->thread_info.status & TS_COMPAT) switch (i) { case 0: if (!n--) break; diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 91dfcafe27a6..bad25bb80679 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); asmlinkage long sys_iopl(unsigned int); /* kernel/ldt.c */ -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); +asmlinkage long sys_modify_ldt(int, void __user *, unsigned long); /* kernel/signal.c */ asmlinkage long sys_rt_sigreturn(void); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 70f425947dc5..eda3b6823ca4 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -55,6 +55,7 @@ struct task_struct; struct thread_info { unsigned long flags; /* low level flags */ + u32 status; /* thread synchronous flags */ }; #define INIT_THREAD_INFO(tsk) \ @@ -207,7 +208,7 @@ static inline int arch_within_stack_frames(const void * const stack, #else /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_64 -# define cpu_current_top_of_stack (cpu_tss + TSS_sp0) +# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1) #endif #endif @@ -221,7 +222,7 @@ static inline int arch_within_stack_frames(const void * const stack, #define in_ia32_syscall() true #else #define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \ - current->thread.status & TS_COMPAT) + current_thread_info()->status & TS_COMPAT) #endif /* diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 509046cfa5ce..704f31315dde 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -9,70 +9,130 @@ #include #include #include +#include +#include +#include -static inline void __invpcid(unsigned long pcid, unsigned long addr, - unsigned long type) -{ - struct { u64 d[2]; } desc = { { pcid, addr } }; +/* + * The x86 feature is called PCID (Process Context IDentifier). It is similar + * to what is traditionally called ASID on the RISC processors. + * + * We don't use the traditional ASID implementation, where each process/mm gets + * its own ASID and flush/restart when we run out of ASID space. + * + * Instead we have a small per-cpu array of ASIDs and cache the last few mm's + * that came by on this CPU, allowing cheaper switch_mm between processes on + * this CPU. + * + * We end up with different spaces for different things. To avoid confusion we + * use different names for each of them: + * + * ASID - [0, TLB_NR_DYN_ASIDS-1] + * the canonical identifier for an mm + * + * kPCID - [1, TLB_NR_DYN_ASIDS] + * the value we write into the PCID part of CR3; corresponds to the + * ASID+1, because PCID 0 is special. + * + * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] + * for KPTI each mm has two address spaces and thus needs two + * PCID values, but we can still do with a single ASID denomination + * for each mm. Corresponds to kPCID + 2048. + * + */ - /* - * The memory clobber is because the whole point is to invalidate - * stale TLB entries and, especially if we're flushing global - * mappings, we don't want the compiler to reorder any subsequent - * memory accesses before the TLB flush. - * - * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and - * invpcid (%rcx), %rax in long mode. - */ - asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" - : : "m" (desc), "a" (type), "c" (&desc) : "memory"); -} +/* There are 12 bits of space for ASIDS in CR3 */ +#define CR3_HW_ASID_BITS 12 -#define INVPCID_TYPE_INDIV_ADDR 0 -#define INVPCID_TYPE_SINGLE_CTXT 1 -#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 -#define INVPCID_TYPE_ALL_NON_GLOBAL 3 +/* + * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for + * user/kernel switches + */ +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define PTI_CONSUMED_PCID_BITS 1 +#else +# define PTI_CONSUMED_PCID_BITS 0 +#endif -/* Flush all mappings for a given pcid and addr, not including globals. */ -static inline void invpcid_flush_one(unsigned long pcid, - unsigned long addr) -{ - __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); -} +#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) + +/* + * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account + * for them being zero-based. Another -1 is because PCID 0 is reserved for + * use by non-PCID-aware users. + */ +#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) + +/* + * 6 because 6 should be plenty and struct tlb_state will fit in two cache + * lines. + */ +#define TLB_NR_DYN_ASIDS 6 -/* Flush all mappings for a given PCID, not including globals. */ -static inline void invpcid_flush_single_context(unsigned long pcid) +/* + * Given @asid, compute kPCID + */ +static inline u16 kern_pcid(u16 asid) { - __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + /* + * Make sure that the dynamic ASID space does not confict with the + * bit we are using to switch between user and kernel ASIDs. + */ + BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); + + /* + * The ASID being passed in here should have respected the + * MAX_ASID_AVAILABLE and thus never have the switch bit set. + */ + VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); +#endif + /* + * The dynamically-assigned ASIDs that get passed in are small + * (context.tlb_gen); - smp_mb__after_atomic(); - - return new_tlb_gen; + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); + VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID)); + return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; } #ifdef CONFIG_PARAVIRT @@ -80,7 +140,7 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) #else #define __flush_tlb() __native_flush_tlb() #define __flush_tlb_global() __native_flush_tlb_global() -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) #endif static inline bool tlb_defer_switch_to_init_mm(void) @@ -99,12 +159,6 @@ static inline bool tlb_defer_switch_to_init_mm(void) return !static_cpu_has(X86_FEATURE_PCID); } -/* - * 6 because 6 should be plenty and struct tlb_state will fit in - * two cache lines. - */ -#define TLB_NR_DYN_ASIDS 6 - struct tlb_context { u64 ctx_id; u64 tlb_gen; @@ -120,6 +174,8 @@ struct tlb_state { struct mm_struct *loaded_mm; u16 loaded_mm_asid; u16 next_asid; + /* last user mm's ctx id */ + u64 last_ctx_id; /* * We can be in one of several states: @@ -138,6 +194,24 @@ struct tlb_state { */ bool is_lazy; + /* + * If set we changed the page tables in such a way that we + * needed an invalidation of all contexts (aka. PCIDs / ASIDs). + * This tells us to go invalidate all the non-loaded ctxs[] + * on the next context switch. + * + * The current ctx was kept up-to-date as it ran and does not + * need to be invalidated. + */ + bool invalidate_other; + + /* + * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate + * the corresponding user PCID needs a flush next time we + * switch to it; see SWITCH_TO_USER_CR3. + */ + unsigned short user_pcid_flush_mask; + /* * Access to this CR4 shadow and to H/W CR4 is protected by * disabling interrupts when modifying either one. @@ -215,6 +289,14 @@ static inline unsigned long cr4_read_shadow(void) return this_cpu_read(cpu_tlbstate.cr4); } +/* + * Mark all other ASIDs as invalid, preserves the current. + */ +static inline void invalidate_other_asid(void) +{ + this_cpu_write(cpu_tlbstate.invalidate_other, true); +} + /* * Save some of cr4 feature set we're using (e.g. Pentium 4MB * enable and PPro Global page enable), so that any CPU's that boot @@ -234,37 +316,63 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) extern void initialize_tlbstate_and_flush(void); -static inline void __native_flush_tlb(void) +/* + * Given an ASID, flush the corresponding user ASID. We can delay this + * until the next time we switch to it. + * + * See SWITCH_TO_USER_CR3. + */ +static inline void invalidate_user_asid(u16 asid) { + /* There is no user ASID if address space separation is off */ + if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + return; + /* - * If current->mm == NULL then we borrow a mm which may change during a - * task switch and therefore we must not be preempted while we write CR3 - * back: + * We only have a single ASID if PCID is off and the CR3 + * write will have flushed it. */ - preempt_disable(); - native_write_cr3(__native_read_cr3()); - preempt_enable(); + if (!cpu_feature_enabled(X86_FEATURE_PCID)) + return; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + __set_bit(kern_pcid(asid), + (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); } -static inline void __native_flush_tlb_global_irq_disabled(void) +/* + * flush the entire current user mapping + */ +static inline void __native_flush_tlb(void) { - unsigned long cr4; + /* + * Preemption or interrupts must be disabled to protect the access + * to the per CPU variable and to prevent being preempted between + * read_cr3() and write_cr3(). + */ + WARN_ON_ONCE(preemptible()); - cr4 = this_cpu_read(cpu_tlbstate.cr4); - /* clear PGE */ - native_write_cr4(cr4 & ~X86_CR4_PGE); - /* write old PGE again and flush TLBs */ - native_write_cr4(cr4); + invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); + + /* If current->mm == NULL then the read_cr3() "borrows" an mm */ + native_write_cr3(__native_read_cr3()); } +/* + * flush everything + */ static inline void __native_flush_tlb_global(void) { - unsigned long flags; + unsigned long cr4, flags; if (static_cpu_has(X86_FEATURE_INVPCID)) { /* * Using INVPCID is considerably faster than a pair of writes * to CR4 sandwiched inside an IRQ flag save/restore. + * + * Note, this works with CR4.PCIDE=0 or 1. */ invpcid_flush_all(); return; @@ -277,36 +385,82 @@ static inline void __native_flush_tlb_global(void) */ raw_local_irq_save(flags); - __native_flush_tlb_global_irq_disabled(); + cr4 = this_cpu_read(cpu_tlbstate.cr4); + /* toggle PGE */ + native_write_cr4(cr4 ^ X86_CR4_PGE); + /* write old PGE again and flush TLBs */ + native_write_cr4(cr4); raw_local_irq_restore(flags); } -static inline void __native_flush_tlb_single(unsigned long addr) +/* + * flush one page in the user mapping + */ +static inline void __native_flush_tlb_one_user(unsigned long addr) { + u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + /* + * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. + * Just use invalidate_user_asid() in case we are called early. + */ + if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) + invalidate_user_asid(loaded_mm_asid); + else + invpcid_flush_one(user_pcid(loaded_mm_asid), addr); } +/* + * flush everything + */ static inline void __flush_tlb_all(void) { - if (boot_cpu_has(X86_FEATURE_PGE)) + if (boot_cpu_has(X86_FEATURE_PGE)) { __flush_tlb_global(); - else + } else { + /* + * !PGE -> !PCID (setup_pcid()), thus every flush is total. + */ __flush_tlb(); + } +} + +/* + * flush one page in the kernel mapping + */ +static inline void __flush_tlb_one_kernel(unsigned long addr) +{ + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); /* - * Note: if we somehow had PCID but not PGE, then this wouldn't work -- - * we'd end up flushing kernel translations for the current ASID but - * we might fail to flush kernel translations for other cached ASIDs. + * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its + * paravirt equivalent. Even with PCID, this is sufficient: we only + * use PCID if we also use global PTEs for the kernel mapping, and + * INVLPG flushes global translations across all address spaces. * - * To avoid this issue, we force PCID off if PGE is off. + * If PTI is on, then the kernel is mapped with non-global PTEs, and + * __flush_tlb_one_user() will flush the given address for the current + * kernel address space and for its usermode counterpart, but it does + * not flush it for other address spaces. */ -} + __flush_tlb_one_user(addr); -static inline void __flush_tlb_one(unsigned long addr) -{ - count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); - __flush_tlb_single(addr); + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + /* + * See above. We need to propagate the flush to all other address + * spaces. In principle, we only need to propagate it to kernelmode + * address spaces, but the extra bookkeeping we would need is not + * worth it. + */ + invalidate_other_asid(); } #define TLB_FLUSH_ALL -1UL @@ -367,6 +521,17 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); +static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) +{ + /* + * Bump the generation count. This also serves as a full barrier + * that synchronizes with switch_mm(): callers are required to order + * their read of mm_cpumask after their writes to the paging + * structures. + */ + return atomic64_inc_return(&mm->context.tlb_gen); +} + static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm) { diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index fa60398bbc3a..069c04be1507 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h @@ -34,11 +34,6 @@ DECLARE_EVENT_CLASS(x86_fpu, ) ); -DEFINE_EVENT(x86_fpu, x86_fpu_state, - TP_PROTO(struct fpu *fpu), - TP_ARGS(fpu) -); - DEFINE_EVENT(x86_fpu, x86_fpu_before_save, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) @@ -74,11 +69,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_activate_state, TP_ARGS(fpu) ); -DEFINE_EVENT(x86_fpu, x86_fpu_deactivate_state, - TP_PROTO(struct fpu *fpu), - TP_ARGS(fpu) -); - DEFINE_EVENT(x86_fpu, x86_fpu_init_state, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index b0cced97a6ce..3de69330e6c5 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -38,9 +38,9 @@ asmlinkage void simd_coprocessor_error(void); #if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) asmlinkage void xen_divide_error(void); +asmlinkage void xen_xennmi(void); asmlinkage void xen_xendebug(void); asmlinkage void xen_xenint3(void); -asmlinkage void xen_nmi(void); asmlinkage void xen_overflow(void); asmlinkage void xen_bounds(void); asmlinkage void xen_invalid_op(void); @@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long); dotraplinkage void do_stack_segment(struct pt_regs *, long); #ifdef CONFIG_X86_64 dotraplinkage void do_double_fault(struct pt_regs *, long); -asmlinkage struct pt_regs *sync_regs(struct pt_regs *); #endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); @@ -89,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *, long); #endif +dotraplinkage void do_mce(struct pt_regs *, long); static inline int get_si_code(unsigned long condition) { @@ -145,4 +145,22 @@ enum { X86_TRAP_IRET = 32, /* 32, IRET Exception */ }; +/* + * Page fault error code bits: + * + * bit 0 == 0: no page found 1: protection fault + * bit 1 == 0: read access 1: write access + * bit 2 == 0: kernel-mode access 1: user-mode access + * bit 3 == 1: use of reserved bit detected + * bit 4 == 1: fault was an instruction fetch + * bit 5 == 1: protection keys block access + */ +enum x86_pf_error_code { + X86_PF_PROT = 1 << 0, + X86_PF_WRITE = 1 << 1, + X86_PF_USER = 1 << 2, + X86_PF_RSVD = 1 << 3, + X86_PF_INSTR = 1 << 4, + X86_PF_PK = 1 << 5, +}; #endif /* _ASM_X86_TRAPS_H */ diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 8da0efb13544..a62413de1e2f 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -62,6 +62,15 @@ extern int notsc_setup(char *); extern void tsc_save_sched_clock_state(void); extern void tsc_restore_sched_clock_state(void); +#ifdef CONFIG_PARAVIRT +#include +#else +static inline unsigned long cpu_khz_from_paravirt(void) +{ + return 0; +} +#endif + unsigned long cpu_khz_from_msr(void); #endif /* _ASM_X86_TSC_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 574dff4d2913..aae77eb8491c 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -124,6 +124,11 @@ extern int __get_user_bad(void); #define __uaccess_begin() stac() #define __uaccess_end() clac() +#define __uaccess_begin_nospec() \ +({ \ + stac(); \ + barrier_nospec(); \ +}) /* * This is a type: either unsigned long, if the argument fits into @@ -445,7 +450,7 @@ do { \ ({ \ int __gu_err; \ __inttype(*(ptr)) __gu_val; \ - __uaccess_begin(); \ + __uaccess_begin_nospec(); \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ __uaccess_end(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ @@ -487,6 +492,10 @@ struct __large_struct { unsigned long buf[100]; }; __uaccess_begin(); \ barrier(); +#define uaccess_try_nospec do { \ + current->thread.uaccess_err = 0; \ + __uaccess_begin_nospec(); \ + #define uaccess_catch(err) \ __uaccess_end(); \ (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ @@ -548,7 +557,7 @@ struct __large_struct { unsigned long buf[100]; }; * get_user_ex(...); * } get_user_catch(err) */ -#define get_user_try uaccess_try +#define get_user_try uaccess_try_nospec #define get_user_catch(err) uaccess_catch(err) #define get_user_ex(x, ptr) do { \ @@ -582,7 +591,7 @@ extern void __cmpxchg_wrong_size(void) __typeof__(ptr) __uval = (uval); \ __typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __new = (new); \ - __uaccess_begin(); \ + __uaccess_begin_nospec(); \ switch (size) { \ case 1: \ { \ diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 72950401b223..ba2dc1930630 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -29,21 +29,21 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) switch (n) { case 1: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u8 *)to, from, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u16 *)to, from, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u32 *)to, from, ret, "l", "k", "=r", 4); __uaccess_end(); diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index f07ef3c575db..62546b3a398e 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -55,31 +55,31 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size) return copy_user_generic(dst, (__force void *)src, size); switch (size) { case 1: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, ret, "l", "k", "=r", 4); __uaccess_end(); return ret; case 8: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 8); __uaccess_end(); return ret; case 10: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 10); if (likely(!ret)) @@ -89,7 +89,7 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size) __uaccess_end(); return ret; case 16: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 16); if (likely(!ret)) diff --git a/arch/x86/include/asm/umip.h b/arch/x86/include/asm/umip.h new file mode 100644 index 000000000000..db43f2a0d92c --- /dev/null +++ b/arch/x86/include/asm/umip.h @@ -0,0 +1,12 @@ +#ifndef _ASM_X86_UMIP_H +#define _ASM_X86_UMIP_H + +#include +#include + +#ifdef CONFIG_X86_INTEL_UMIP +bool fixup_umip_exception(struct pt_regs *regs); +#else +static inline bool fixup_umip_exception(struct pt_regs *regs) { return false; } +#endif /* CONFIG_X86_INTEL_UMIP */ +#endif /* _ASM_X86_UMIP_H */ diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 87adc0d38c4a..1f86e1b0a5cd 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -7,17 +7,20 @@ #include #include +#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip)) +#define IRET_FRAME_SIZE (sizeof(struct pt_regs) - IRET_FRAME_OFFSET) + struct unwind_state { struct stack_info stack_info; unsigned long stack_mask; struct task_struct *task; int graph_idx; bool error; -#if defined(CONFIG_ORC_UNWINDER) +#if defined(CONFIG_UNWINDER_ORC) bool signal, full_regs; unsigned long sp, bp, ip; struct pt_regs *regs; -#elif defined(CONFIG_FRAME_POINTER_UNWINDER) +#elif defined(CONFIG_UNWINDER_FRAME_POINTER) bool got_irq; unsigned long *bp, *orig_sp, ip; struct pt_regs *regs; @@ -51,22 +54,35 @@ void unwind_start(struct unwind_state *state, struct task_struct *task, __unwind_start(state, task, regs, first_frame); } -#if defined(CONFIG_ORC_UNWINDER) || defined(CONFIG_FRAME_POINTER_UNWINDER) -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER) +/* + * If 'partial' returns true, only the iret frame registers are valid. + */ +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, + bool *partial) { if (unwind_done(state)) return NULL; + if (partial) { +#ifdef CONFIG_UNWINDER_ORC + *partial = !state->full_regs; +#else + *partial = false; +#endif + } + return state->regs; } #else -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, + bool *partial) { return NULL; } #endif -#ifdef CONFIG_ORC_UNWINDER +#ifdef CONFIG_UNWINDER_ORC void unwind_init(void); void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index caec8417539f..7c300299e12e 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -352,6 +352,7 @@ enum vmcs_field { #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ +#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ /* GUEST_INTERRUPTIBILITY_INFO flags. */ diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index d9a7c659009c..b986b2ca688a 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -7,6 +7,7 @@ #ifdef CONFIG_X86_VSYSCALL_EMULATION extern void map_vsyscall(void); +extern void set_vsyscall_pgtable_user_bits(pgd_t *root); /* * Called on instruction fetch fault in vsyscall page. diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 8a1ebf9540dd..ad15a0fda917 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -114,6 +114,18 @@ struct x86_init_pci { void (*fixup_irqs)(void); }; +/** + * struct x86_hyper_init - x86 hypervisor init functions + * @init_platform: platform setup + * @x2apic_available: X2APIC detection + * @init_mem_mapping: setup early mappings during init_mem_mapping() + */ +struct x86_hyper_init { + void (*init_platform)(void); + bool (*x2apic_available)(void); + void (*init_mem_mapping)(void); +}; + /** * struct x86_init_ops - functions for platform specific setup * @@ -127,6 +139,7 @@ struct x86_init_ops { struct x86_init_timers timers; struct x86_init_iommu iommu; struct x86_init_pci pci; + struct x86_hyper_init hyper; }; /** @@ -199,6 +212,15 @@ struct x86_legacy_features { struct x86_legacy_devices devices; }; +/** + * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks + * + * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely) + */ +struct x86_hyper_runtime { + void (*pin_vcpu)(int cpu); +}; + /** * struct x86_platform_ops - platform specific runtime functions * @calibrate_cpu: calibrate CPU @@ -218,6 +240,7 @@ struct x86_legacy_features { * possible in x86_early_init_platform_quirks() by * only using the current x86_hardware_subarch * semantics. + * @hyper: x86 hypervisor specific runtime callbacks */ struct x86_platform_ops { unsigned long (*calibrate_cpu)(void); @@ -233,6 +256,7 @@ struct x86_platform_ops { void (*apic_post_init)(void); struct x86_legacy_features legacy; void (*set_legacy_features)(void); + struct x86_hyper_runtime hyper; }; struct pci_dev; diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 7cb282e9e587..bfd882617613 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -217,9 +218,9 @@ privcmd_call(unsigned call, __HYPERCALL_5ARG(a1, a2, a3, a4, a5); stac(); - asm volatile("call *%[call]" + asm volatile(CALL_NOSPEC : __HYPERCALL_5PARAM - : [call] "a" (&hypercall_page[call]) + : [thunk_target] "a" (&hypercall_page[call]) : __HYPERCALL_CLOBBER5); clac(); diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index 1f5c5161ead6..45c8605467f1 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h @@ -1,7 +1,4 @@ -#ifdef CONFIG_KMEMCHECK -/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ -# include -#elif !defined(_ASM_X86_XOR_H) +#ifndef _ASM_X86_XOR_H #define _ASM_X86_XOR_H /* diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 554aa8f24f91..341db0462b85 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -25,6 +25,7 @@ #define KVM_FEATURE_STEAL_TIME 5 #define KVM_FEATURE_PV_EOI 6 #define KVM_FEATURE_PV_UNHALT 7 +#define KVM_FEATURE_ASYNC_PF_VMEXIT 10 /* The last 8 bits are used to indicate how to interpret the flags field * in pvclock structure. If no bits are set, all flags are ignored. diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 91723461dc1f..435db58a7bad 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -30,6 +30,7 @@ struct mce { __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ __u64 ppin; /* Protected Processor Inventory Number */ + __u32 microcode;/* Microcode revision */ }; #define MCE_GET_RECORD_LEN _IOR('M', 1, int) diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 6f3355399665..bcba3c643e63 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -78,7 +78,12 @@ #define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) #define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ #define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) -#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ + +#define X86_CR3_PCID_BITS 12 +#define X86_CR3_PCID_MASK (_AC((1UL << X86_CR3_PCID_BITS) - 1, UL)) + +#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */ +#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT) /* * Intel CPU features in CR4 @@ -105,6 +110,8 @@ #define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT) #define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */ #define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT) +#define X86_CR4_UMIP_BIT 11 /* enable UMIP support */ +#define X86_CR4_UMIP _BITUL(X86_CR4_UMIP_BIT) #define X86_CR4_LA57_BIT 12 /* enable 5-level page tables */ #define X86_CR4_LA57 _BITUL(X86_CR4_LA57_BIT) #define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */ @@ -152,5 +159,8 @@ #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc +#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ + X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ + X86_CR0_PG) #endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5f70044340ff..81bb565f4497 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -25,9 +25,9 @@ endif KASAN_SANITIZE_head$(BITS).o := n KASAN_SANITIZE_dumpstack.o := n KASAN_SANITIZE_dumpstack_$(BITS).o := n -KASAN_SANITIZE_stacktrace.o := n +KASAN_SANITIZE_stacktrace.o := n +KASAN_SANITIZE_paravirt.o := n -OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y OBJECT_FILES_NON_STANDARD_test_nx.o := y @@ -127,10 +127,11 @@ obj-$(CONFIG_EFI) += sysfb_efi.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o +obj-$(CONFIG_X86_INTEL_UMIP) += umip.o -obj-$(CONFIG_ORC_UNWINDER) += unwind_orc.o -obj-$(CONFIG_FRAME_POINTER_UNWINDER) += unwind_frame.o -obj-$(CONFIG_GUESS_UNWINDER) += unwind_guess.o +obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o +obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o +obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o ### # 64 bit specific files diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c index ea3046e0b0cf..28d70ac93faf 100644 --- a/arch/x86/kernel/acpi/apei.c +++ b/arch/x86/kernel/acpi/apei.c @@ -55,5 +55,5 @@ void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) void arch_apei_flush_tlb_one(unsigned long addr) { - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 079535e53e2a..9c2a002d9297 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -342,13 +342,12 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 +static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, + u8 trigger, u32 gsi); + static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { - int ioapic; - int pin; - struct mpc_intsrc mp_irq; - /* * Check bus_irq boundary. */ @@ -357,14 +356,6 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, return; } - /* - * Convert 'gsi' to 'ioapic.pin'. - */ - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) - return; - pin = mp_find_ioapic_pin(ioapic, gsi); - /* * TBD: This check is for faulty timer entries, where the override * erroneously sets the trigger to level, resulting in a HUGE @@ -373,16 +364,8 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, if ((bus_irq == 0) && (trigger == 3)) trigger = 1; - mp_irq.type = MP_INTSRC; - mp_irq.irqtype = mp_INT; - mp_irq.irqflag = (trigger << 2) | polarity; - mp_irq.srcbus = MP_ISA_BUS; - mp_irq.srcbusirq = bus_irq; /* IRQ */ - mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */ - mp_irq.dstirq = pin; /* INTIN# */ - - mp_save_irq(&mp_irq); - + if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0) + return; /* * Reset default identity mapping if gsi is also an legacy IRQ, * otherwise there will be more than one entry with the same GSI @@ -429,6 +412,34 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, return 0; } +static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, + u8 trigger, u32 gsi) +{ + struct mpc_intsrc mp_irq; + int ioapic, pin; + + /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */ + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) { + pr_warn("Failed to find ioapic for gsi : %u\n", gsi); + return ioapic; + } + + pin = mp_find_ioapic_pin(ioapic, gsi); + + mp_irq.type = MP_INTSRC; + mp_irq.irqtype = mp_INT; + mp_irq.irqflag = (trigger << 2) | polarity; + mp_irq.srcbus = MP_ISA_BUS; + mp_irq.srcbusirq = bus_irq; + mp_irq.dstapic = mpc_ioapic_id(ioapic); + mp_irq.dstirq = pin; + + mp_save_irq(&mp_irq); + + return 0; +} + static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { @@ -473,7 +484,11 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; - mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + if (bus_irq < NR_IRQS_LEGACY) + mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + else + mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi); + acpi_penalize_sci_irq(bus_irq, trigger, polarity); /* diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 3344d3382e91..b034826a0b3b 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -46,17 +46,6 @@ static int __init setup_noreplace_smp(char *str) } __setup("noreplace-smp", setup_noreplace_smp); -#ifdef CONFIG_PARAVIRT -static int __initdata_or_module noreplace_paravirt = 0; - -static int __init setup_noreplace_paravirt(char *str) -{ - noreplace_paravirt = 1; - return 1; -} -__setup("noreplace-paravirt", setup_noreplace_paravirt); -#endif - #define DPRINTK(fmt, args...) \ do { \ if (debug_alternative) \ @@ -344,9 +333,12 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) { unsigned long flags; + int i; - if (instr[0] != 0x90) - return; + for (i = 0; i < a->padlen; i++) { + if (instr[i] != 0x90) + return; + } local_irq_save(flags); add_nops(instr + (a->instrlen - a->padlen), a->padlen); @@ -596,9 +588,6 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, struct paravirt_patch_site *p; char insnbuf[MAX_PATCH_LEN]; - if (noreplace_paravirt) - return; - for (p = start; p < end; p++) { unsigned int used; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 6db28f17ff28..c88e0b127810 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -235,7 +235,7 @@ int amd_cache_northbridges(void) if (boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model >= 0x8 && (boot_cpu_data.x86_model > 0x9 || - boot_cpu_data.x86_mask >= 0x1)) + boot_cpu_data.x86_stepping >= 0x1)) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; if (boot_cpu_data.x86 == 0x15) diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index f5d92bc3b884..2c4d5ece7456 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -30,6 +30,7 @@ #include #include #include +#include /* * Using 512M as goal, in case kexec will load kernel_big @@ -56,6 +57,33 @@ int fallback_aper_force __initdata; int fix_aperture __initdata = 1; +#ifdef CONFIG_PROC_VMCORE +/* + * If the first kernel maps the aperture over e820 RAM, the kdump kernel will + * use the same range because it will remain configured in the northbridge. + * Trying to dump this area via /proc/vmcore may crash the machine, so exclude + * it from vmcore. + */ +static unsigned long aperture_pfn_start, aperture_page_count; + +static int gart_oldmem_pfn_is_ram(unsigned long pfn) +{ + return likely((pfn < aperture_pfn_start) || + (pfn >= aperture_pfn_start + aperture_page_count)); +} + +static void exclude_from_vmcore(u64 aper_base, u32 aper_order) +{ + aperture_pfn_start = aper_base >> PAGE_SHIFT; + aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; + WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram)); +} +#else +static void exclude_from_vmcore(u64 aper_base, u32 aper_order) +{ +} +#endif + /* This code runs before the PCI subsystem is initialized, so just access the northbridge directly. */ @@ -435,8 +463,16 @@ int __init gart_iommu_hole_init(void) out: if (!fix && !fallback_aper_force) { - if (last_aper_base) + if (last_aper_base) { + /* + * If this is the kdump kernel, the first kernel + * may have allocated the range over its e820 RAM + * and fixed up the northbridge + */ + exclude_from_vmcore(last_aper_base, last_aper_order); + return 1; + } return 0; } @@ -473,6 +509,14 @@ int __init gart_iommu_hole_init(void) return 0; } + /* + * If this is the kdump kernel _and_ the first kernel did not + * configure the aperture in the northbridge, this range may + * overlap with the first kernel's memory. We can't access the + * range through vmcore even though it should be part of the dump. + */ + exclude_from_vmcore(aper_alloc, aper_order); + /* Fix up the north bridges */ for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { int bus, dev_base, dev_limit; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ff891772c9f8..5942aa5f569b 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -553,7 +553,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static u32 hsx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x3a; /* EP */ case 0x04: return 0x0f; /* EX */ } @@ -563,7 +563,7 @@ static u32 hsx_deadline_rev(void) static u32 bdx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x00000011; case 0x03: return 0x0700000e; case 0x04: return 0x0f00000c; @@ -575,7 +575,7 @@ static u32 bdx_deadline_rev(void) static u32 skx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x03: return 0x01000136; case 0x04: return 0x02000014; } @@ -1645,7 +1645,7 @@ static __init void try_to_enable_x2apic(int remap_mode) * under KVM */ if (max_physical_apicid > 255 || - !hypervisor_x2apic_available()) { + !x86_init.hyper.x2apic_available()) { pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n"); x2apic_disable(); return; diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 88c214e75a6b..2ce1c708b8ee 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -369,8 +369,11 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irq_data->hwirq = virq + i; err = assign_irq_vector_policy(virq + i, node, data, info, irq_data); - if (err) + if (err) { + irq_data->chip_data = NULL; + free_apic_chip_data(data); goto error; + } /* * If the apic destination mode is physical, then the * effective affinity is restricted to a single target @@ -383,7 +386,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, return 0; error: - x86_vector_free_irqs(domain, virq, i + 1); + x86_vector_free_irqs(domain, virq, i); return err; } diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 0d57bb9079c9..c0b694810ff4 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -920,9 +920,8 @@ static __init void uv_rtc_init(void) /* * percpu heartbeat timer */ -static void uv_heartbeat(unsigned long ignored) +static void uv_heartbeat(struct timer_list *timer) { - struct timer_list *timer = &uv_scir_info->timer; unsigned char bits = uv_scir_info->state; /* Flip heartbeat bit: */ @@ -947,7 +946,7 @@ static int uv_heartbeat_enable(unsigned int cpu) struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer; uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); - setup_pinned_timer(timer, uv_heartbeat, cpu); + timer_setup(timer, uv_heartbeat, TIMER_PINNED); timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; add_timer_on(timer, cpu); uv_cpu_scir_info(cpu)->enabled = 1; diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index e4b0d92b3ae0..2a7fd56e67b3 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -2389,6 +2389,7 @@ static int __init apm_init(void) if (HZ != 100) idle_period = (idle_period * HZ) / 100; if (idle_threshold < 100) { + cpuidle_poll_state_init(&apm_idle_driver); if (!cpuidle_register_driver(&apm_idle_driver)) if (cpuidle_register_device(&apm_cpuidle_device)) cpuidle_unregister_driver(&apm_idle_driver); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 8ea78275480d..76417a9aab73 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -17,6 +17,7 @@ #include #include #include +#include #ifdef CONFIG_XEN #include @@ -93,4 +94,13 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + + /* TLB state for the entry code */ + OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); + + /* Layout info for cpu_entry_area */ + OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); + OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); + OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page); + DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack)); } diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index dedf428b20b6..f91ba53e06c8 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -18,7 +18,7 @@ void foo(void) OFFSET(CPUINFO_x86, cpuinfo_x86, x86); OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); - OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping); OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); @@ -47,13 +47,8 @@ void foo(void) BLANK(); /* Offset from the sysenter stack to tss.sp0 */ - DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - - offsetofend(struct tss_struct, SYSENTER_stack)); - - /* Offset from cpu_tss to SYSENTER_stack */ - OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack); - /* Size of SYSENTER_stack */ - DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack)); + DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - + offsetofend(struct cpu_entry_area, entry_stack_page.stack)); #ifdef CONFIG_CC_STACKPROTECTOR BLANK(); diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 630212fa9b9d..bf51e51d808d 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -23,6 +23,9 @@ int main(void) #ifdef CONFIG_PARAVIRT OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); +#ifdef CONFIG_DEBUG_ENTRY + OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl); +#endif BLANK(); #endif @@ -63,6 +66,7 @@ int main(void) OFFSET(TSS_ist, tss_struct, x86_tss.ist); OFFSET(TSS_sp0, tss_struct, x86_tss.sp0); + OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); BLANK(); #ifdef CONFIG_CC_STACKPROTECTOR diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index c60922a66385..570e8bb1f386 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -22,7 +22,8 @@ obj-y += common.o obj-y += rdrand.o obj-y += match.o obj-y += bugs.o -obj-$(CONFIG_CPU_FREQ) += aperfmperf.o +obj-y += aperfmperf.o +obj-y += cpuid-deps.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d58184b7cd44..e7d5a7883632 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } - if (c->x86_model == 6 && c->x86_mask == 1) { + if (c->x86_model == 6 && c->x86_stepping == 1) { const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); @@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) /* K6 with old style WHCR */ if (c->x86_model < 8 || - (c->x86_model == 8 && c->x86_mask < 8)) { + (c->x86_model == 8 && c->x86_stepping < 8)) { /* We can only write allocate on the low 508Mb */ if (mbytes > 508) mbytes = 508; @@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } - if ((c->x86_model == 8 && c->x86_mask > 7) || + if ((c->x86_model == 8 && c->x86_stepping > 7) || c->x86_model == 9 || c->x86_model == 13) { /* The more serious chips .. */ @@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx * As per AMD technical note 27212 0.2 */ - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { + if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { rdmsr(MSR_K7_CLK_CTL, l, h); if ((l & 0xfff00000) != 0x20000000) { pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", @@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * but they are not certified as MP capable. */ /* Athlon 660/661 is valid. */ - if ((c->x86_model == 6) && ((c->x86_mask == 0) || - (c->x86_mask == 1))) + if ((c->x86_model == 6) && ((c->x86_stepping == 0) || + (c->x86_stepping == 1))) return; /* Duron 670 is valid */ - if ((c->x86_model == 7) && (c->x86_mask == 0)) + if ((c->x86_model == 7) && (c->x86_stepping == 0)) return; /* @@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for * more. */ - if (((c->x86_model == 6) && (c->x86_mask >= 2)) || - ((c->x86_model == 7) && (c->x86_mask >= 1)) || + if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || + ((c->x86_model == 7) && (c->x86_stepping >= 1)) || (c->x86_model > 7)) if (cpu_has(c, X86_FEATURE_MP)) return; @@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) /* Set MTRR capability flag if appropriate */ if (c->x86 == 5) if (c->x86_model == 13 || c->x86_model == 9 || - (c->x86_model == 8 && c->x86_mask >= 8)) + (c->x86_model == 8 && c->x86_stepping >= 8)) set_cpu_cap(c, X86_FEATURE_K6_MTRR); #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) @@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c) * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects * all up to and including B1. */ - if (c->x86_model <= 1 && c->x86_mask <= 1) + if (c->x86_model <= 1 && c->x86_stepping <= 1) set_cpu_cap(c, X86_FEATURE_CPB); } @@ -804,8 +804,11 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x17: init_amd_zn(c); break; } - /* Enable workaround for FXSAVE leak */ - if (c->x86 >= 6) + /* + * Enable workaround for FXSAVE leak on CPUs + * without a XSaveErPtr feature + */ + if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); cpu_detect_cache_sizes(c); @@ -826,8 +829,32 @@ static void init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_K8); if (cpu_has(c, X86_FEATURE_XMM2)) { - /* MFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + unsigned long long val; + int ret; + + /* + * A serializing LFENCE has less overhead than MFENCE, so + * use it for execution serialization. On families which + * don't have that MSR, LFENCE is already serializing. + * msr_set_bit() uses the safe accessors, too, even if the MSR + * is not present. + */ + msr_set_bit(MSR_F10H_DECFG, + MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + + /* + * Verify that the MSR write was successful (could be running + * under a hypervisor) and only then assume that LFENCE is + * serializing. + */ + ret = rdmsrl_safe(MSR_F10H_DECFG, &val); + if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); + } else { + /* MFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + } } /* @@ -853,11 +880,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) /* AMD errata T13 (order #21922) */ if ((c->x86 == 6)) { /* Duron Rev A0 */ - if (c->x86_model == 3 && c->x86_mask == 0) + if (c->x86_model == 3 && c->x86_stepping == 0) size = 64; /* Tbird rev A1/A2 */ if (c->x86_model == 4 && - (c->x86_mask == 0 || c->x86_mask == 1)) + (c->x86_stepping == 0 || c->x86_stepping == 1)) size = 256; } return size; @@ -994,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) } /* OSVW unavailable or ID unknown, match family-model-stepping range */ - ms = (cpu->x86_model << 4) | cpu->x86_mask; + ms = (cpu->x86_model << 4) | cpu->x86_stepping; while ((range = *erratum++)) if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && (ms >= AMD_MODEL_RANGE_START(range)) && diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 0ee83321a313..7eba34df54c3 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -14,6 +14,8 @@ #include #include +#include "cpu.h" + struct aperfmperf_sample { unsigned int khz; ktime_t time; @@ -24,7 +26,7 @@ struct aperfmperf_sample { static DEFINE_PER_CPU(struct aperfmperf_sample, samples); #define APERFMPERF_CACHE_THRESHOLD_MS 10 -#define APERFMPERF_REFRESH_DELAY_MS 20 +#define APERFMPERF_REFRESH_DELAY_MS 10 #define APERFMPERF_STALE_THRESHOLD_MS 1000 /* @@ -38,14 +40,8 @@ static void aperfmperf_snapshot_khz(void *dummy) u64 aperf, aperf_delta; u64 mperf, mperf_delta; struct aperfmperf_sample *s = this_cpu_ptr(&samples); - ktime_t now = ktime_get(); - s64 time_delta = ktime_ms_delta(now, s->time); unsigned long flags; - /* Don't bother re-computing within the cache threshold time. */ - if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) - return; - local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); @@ -61,31 +57,68 @@ static void aperfmperf_snapshot_khz(void *dummy) if (mperf_delta == 0) return; - s->time = now; + s->time = ktime_get(); s->aperf = aperf; s->mperf = mperf; + s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta); +} - /* If the previous iteration was too long ago, discard it. */ - if (time_delta > APERFMPERF_STALE_THRESHOLD_MS) - s->khz = 0; - else - s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta); +static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait) +{ + s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu)); + + /* Don't bother re-computing within the cache threshold time. */ + if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) + return true; + + smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait); + + /* Return false if the previous iteration was too long ago. */ + return time_delta <= APERFMPERF_STALE_THRESHOLD_MS; } -unsigned int arch_freq_get_on_cpu(int cpu) +unsigned int aperfmperf_get_khz(int cpu) { - unsigned int khz; + if (!cpu_khz) + return 0; + + if (!static_cpu_has(X86_FEATURE_APERFMPERF)) + return 0; + aperfmperf_snapshot_cpu(cpu, ktime_get(), true); + return per_cpu(samples.khz, cpu); +} + +void arch_freq_prepare_all(void) +{ + ktime_t now = ktime_get(); + bool wait = false; + int cpu; + + if (!cpu_khz) + return; + + if (!static_cpu_has(X86_FEATURE_APERFMPERF)) + return; + + for_each_online_cpu(cpu) + if (!aperfmperf_snapshot_cpu(cpu, now, false)) + wait = true; + + if (wait) + msleep(APERFMPERF_REFRESH_DELAY_MS); +} + +unsigned int arch_freq_get_on_cpu(int cpu) +{ if (!cpu_khz) return 0; if (!static_cpu_has(X86_FEATURE_APERFMPERF)) return 0; - smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); - khz = per_cpu(samples.khz, cpu); - if (khz) - return khz; + if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true)) + return per_cpu(samples.khz, cpu); msleep(APERFMPERF_REFRESH_DELAY_MS); smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ba0b2424c9b0..bfca937bdcc3 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -10,6 +10,11 @@ */ #include #include +#include +#include + +#include +#include #include #include #include @@ -19,6 +24,9 @@ #include #include #include +#include + +static void __init spectre_v2_select_mitigation(void); void __init check_bugs(void) { @@ -29,6 +37,9 @@ void __init check_bugs(void) print_cpu_info(&boot_cpu_data); } + /* Select the proper spectre mitigation before patching alternatives */ + spectre_v2_select_mitigation(); + #ifdef CONFIG_X86_32 /* * Check whether we are able to run this kernel safely on SMP. @@ -60,3 +71,273 @@ void __init check_bugs(void) set_memory_4k((unsigned long)__va(0), 1); #endif } + +/* The kernel command line selection */ +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE, + SPECTRE_V2_CMD_AUTO, + SPECTRE_V2_CMD_FORCE, + SPECTRE_V2_CMD_RETPOLINE, + SPECTRE_V2_CMD_RETPOLINE_GENERIC, + SPECTRE_V2_CMD_RETPOLINE_AMD, +}; + +static const char *spectre_v2_strings[] = { + [SPECTRE_V2_NONE] = "Vulnerable", + [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", + [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", + [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", + [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", +}; + +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V2 : " fmt + +static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; + +#ifdef RETPOLINE +static bool spectre_v2_bad_module; + +bool retpoline_module_ok(bool has_retpoline) +{ + if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) + return true; + + pr_err("System may be vulnerable to spectre v2\n"); + spectre_v2_bad_module = true; + return false; +} + +static inline const char *spectre_v2_module_string(void) +{ + return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; +} +#else +static inline const char *spectre_v2_module_string(void) { return ""; } +#endif + +static void __init spec2_print_if_insecure(const char *reason) +{ + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + pr_info("%s selected on command line.\n", reason); +} + +static void __init spec2_print_if_secure(const char *reason) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + pr_info("%s selected on command line.\n", reason); +} + +static inline bool retp_compiler(void) +{ + return __is_defined(RETPOLINE); +} + +static inline bool match_option(const char *arg, int arglen, const char *opt) +{ + int len = strlen(opt); + + return len == arglen && !strncmp(arg, opt, len); +} + +static const struct { + const char *option; + enum spectre_v2_mitigation_cmd cmd; + bool secure; +} mitigation_options[] = { + { "off", SPECTRE_V2_CMD_NONE, false }, + { "on", SPECTRE_V2_CMD_FORCE, true }, + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, + { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, + { "auto", SPECTRE_V2_CMD_AUTO, false }, +}; + +static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) +{ + char arg[20]; + int ret, i; + enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; + + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + return SPECTRE_V2_CMD_NONE; + else { + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { + if (!match_option(arg, ret, mitigation_options[i].option)) + continue; + cmd = mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_CMD_AUTO; + } + } + + if ((cmd == SPECTRE_V2_CMD_RETPOLINE || + cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && + !IS_ENABLED(CONFIG_RETPOLINE)) { + pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); + return SPECTRE_V2_CMD_AUTO; + } + + if (mitigation_options[i].secure) + spec2_print_if_secure(mitigation_options[i].option); + else + spec2_print_if_insecure(mitigation_options[i].option); + + return cmd; +} + +/* Check for Skylake-like CPUs (for RSB handling) */ +static bool __init is_skylake_era(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6) { + switch (boot_cpu_data.x86_model) { + case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_SKYLAKE_X: + case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_DESKTOP: + return true; + } + } + return false; +} + +static void __init spectre_v2_select_mitigation(void) +{ + enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); + enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; + + /* + * If the CPU is not affected and the command line mode is NONE or AUTO + * then nothing to do. + */ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && + (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) + return; + + switch (cmd) { + case SPECTRE_V2_CMD_NONE: + return; + + case SPECTRE_V2_CMD_FORCE: + case SPECTRE_V2_CMD_AUTO: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_auto; + break; + case SPECTRE_V2_CMD_RETPOLINE_AMD: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_amd; + break; + case SPECTRE_V2_CMD_RETPOLINE_GENERIC: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_generic; + break; + case SPECTRE_V2_CMD_RETPOLINE: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_auto; + break; + } + pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); + return; + +retpoline_auto: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + retpoline_amd: + if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { + pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); + goto retpoline_generic; + } + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : + SPECTRE_V2_RETPOLINE_MINIMAL_AMD; + setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + } else { + retpoline_generic: + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : + SPECTRE_V2_RETPOLINE_MINIMAL; + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + } + + spectre_v2_enabled = mode; + pr_info("%s\n", spectre_v2_strings[mode]); + + /* + * If neither SMEP nor PTI are available, there is a risk of + * hitting userspace addresses in the RSB after a context switch + * from a shallow call stack to a deeper one. To prevent this fill + * the entire RSB, even when using IBRS. + * + * Skylake era CPUs have a separate issue with *underflow* of the + * RSB, when they will predict 'ret' targets from the generic BTB. + * The proper mitigation for this is IBRS. If IBRS is not supported + * or deactivated in favour of retpolines the RSB fill on context + * switch is required. + */ + if ((!boot_cpu_has(X86_FEATURE_PTI) && + !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); + pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); + } + + /* Initialize Indirect Branch Prediction Barrier if supported */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); + } + + /* + * Retpoline means the kernel is safe because it has no indirect + * branches. But firmware isn't, so use IBRS to protect that. + */ + if (boot_cpu_has(X86_FEATURE_IBRS)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); + pr_info("Enabling Restricted Speculation for firmware calls\n"); + } +} + +#undef pr_fmt + +#ifdef CONFIG_SYSFS +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + return sprintf(buf, "Not affected\n"); + if (boot_cpu_has(X86_FEATURE_PTI)) + return sprintf(buf, "Mitigation: PTI\n"); + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) + return sprintf(buf, "Not affected\n"); + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + spectre_v2_module_string()); +} +#endif diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 68bc6d9b3132..595be776727d 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, X86_FEATURE_TSC); break; case 8: - switch (c->x86_mask) { + switch (c->x86_stepping) { default: name = "2"; break; @@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) * - Note, it seems this may only be in engineering samples. */ if ((c->x86 == 6) && (c->x86_model == 9) && - (c->x86_mask == 1) && (size == 65)) + (c->x86_stepping == 1) && (size == 65)) size -= 1; return size; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c9176bae7fd8..746a60e160fb 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -47,6 +47,8 @@ #include #include #include +#include +#include #ifdef CONFIG_X86_LOCAL_APIC #include @@ -329,6 +331,28 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) } } +static __always_inline void setup_umip(struct cpuinfo_x86 *c) +{ + /* Check the boot processor, plus build option for UMIP. */ + if (!cpu_feature_enabled(X86_FEATURE_UMIP)) + goto out; + + /* Check the current processor's cpuid bits. */ + if (!cpu_has(c, X86_FEATURE_UMIP)) + goto out; + + cr4_set_bits(X86_CR4_UMIP); + + return; + +out: + /* + * Make sure UMIP is disabled in case it was enabled in a + * previous boot (e.g., via kexec). + */ + cr4_clear_bits(X86_CR4_UMIP); +} + /* * Protection Keys are not available in 32-bit mode. */ @@ -452,8 +476,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c) return NULL; /* Not found */ } -__u32 cpu_caps_cleared[NCAPINTS]; -__u32 cpu_caps_set[NCAPINTS]; +__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; +__u32 cpu_caps_set[NCAPINTS + NBUGINTS]; void load_percpu_segment(int cpu) { @@ -466,28 +490,23 @@ void load_percpu_segment(int cpu) load_stack_canary_segment(); } -/* Setup the fixmap mapping only once per-processor */ -static inline void setup_fixmap_gdt(int cpu) -{ -#ifdef CONFIG_X86_64 - /* On 64-bit systems, we use a read-only fixmap GDT. */ - pgprot_t prot = PAGE_KERNEL_RO; -#else - /* - * On native 32-bit systems, the GDT cannot be read-only because - * our double fault handler uses a task gate, and entering through - * a task gate needs to change an available TSS to busy. If the GDT - * is read-only, that will triple fault. - * - * On Xen PV, the GDT must be read-only because the hypervisor requires - * it. - */ - pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ? - PAGE_KERNEL_RO : PAGE_KERNEL; +#ifdef CONFIG_X86_32 +/* The 32-bit entry code needs to find cpu_entry_area. */ +DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); #endif - __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot); -} +#ifdef CONFIG_X86_64 +/* + * Special IST stacks which the CPU switches to when it calls + * an IST-marked descriptor entry. Up to 7 stacks (hardware + * limit), all of them are 4K, except the debug stack which + * is 8K. + */ +static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, + [DEBUG_STACK - 1] = DEBUG_STKSZ +}; +#endif /* Load the original GDT from the per-cpu structure */ void load_direct_gdt(int cpu) @@ -710,7 +729,7 @@ void cpu_detect(struct cpuinfo_x86 *c) cpuid(0x00000001, &tfms, &misc, &junk, &cap0); c->x86 = x86_family(tfms); c->x86_model = x86_model(tfms); - c->x86_mask = x86_stepping(tfms); + c->x86_stepping = x86_stepping(tfms); if (cap0 & (1<<19)) { c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; @@ -723,12 +742,32 @@ static void apply_forced_caps(struct cpuinfo_x86 *c) { int i; - for (i = 0; i < NCAPINTS; i++) { + for (i = 0; i < NCAPINTS + NBUGINTS; i++) { c->x86_capability[i] &= ~cpu_caps_cleared[i]; c->x86_capability[i] |= cpu_caps_set[i]; } } +static void init_speculation_control(struct cpuinfo_x86 *c) +{ + /* + * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, + * and they also have a different bit for STIBP support. Also, + * a hypervisor might have set the individual AMD bits even on + * Intel CPUs, for finer-grained selection of what's available. + * + * We use the AMD bits in 0x8000_0008 EBX as the generic hardware + * features, which are visible in /proc/cpuinfo and used by the + * kernel. So set those accordingly from the Intel bits. + */ + if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { + set_cpu_cap(c, X86_FEATURE_IBRS); + set_cpu_cap(c, X86_FEATURE_IBPB); + } + if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) + set_cpu_cap(c, X86_FEATURE_STIBP); +} + void get_cpu_cap(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; @@ -750,6 +789,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_7_0_EBX] = ebx; c->x86_capability[CPUID_7_ECX] = ecx; + c->x86_capability[CPUID_7_EDX] = edx; } /* Extended state features: level 0x0000000d */ @@ -822,6 +862,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); init_scattered_cpuid_features(c); + init_speculation_control(c); /* * Clear/Set all flags overridden by options, after probe. @@ -857,6 +898,41 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #endif } +static const __initconst struct x86_cpu_id cpu_no_speculation[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_CENTAUR, 5 }, + { X86_VENDOR_INTEL, 5 }, + { X86_VENDOR_NSC, 5 }, + { X86_VENDOR_ANY, 4 }, + {} +}; + +static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { + { X86_VENDOR_AMD }, + {} +}; + +static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c) +{ + u64 ia32_cap = 0; + + if (x86_match_cpu(cpu_no_meltdown)) + return false; + + if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + + /* Rogue Data Cache Load? No! */ + if (ia32_cap & ARCH_CAP_RDCL_NO) + return false; + + return true; +} + /* * Do minimum CPU detection early. * Fields really needed: vendor, cpuid_level, family, model, mask, @@ -903,6 +979,14 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) } setup_force_cpu_cap(X86_FEATURE_ALWAYS); + + if (!x86_match_cpu(cpu_no_speculation)) { + if (cpu_vulnerable_to_meltdown(c)) + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + } + fpu__init_system(c); #ifdef CONFIG_X86_32 @@ -1098,9 +1182,9 @@ static void identify_cpu(struct cpuinfo_x86 *c) int i; c->loops_per_jiffy = loops_per_jiffy; - c->x86_cache_size = -1; + c->x86_cache_size = 0; c->x86_vendor = X86_VENDOR_UNKNOWN; - c->x86_model = c->x86_mask = 0; /* So far unknown... */ + c->x86_model = c->x86_stepping = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; @@ -1147,9 +1231,10 @@ static void identify_cpu(struct cpuinfo_x86 *c) /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); - /* Set up SMEP/SMAP */ + /* Set up SMEP/SMAP/UMIP */ setup_smep(c); setup_smap(c); + setup_umip(c); /* * The vendor-specific functions might have changed features. @@ -1225,7 +1310,7 @@ void enable_sep_cpu(void) return; cpu = get_cpu(); - tss = &per_cpu(cpu_tss, cpu); + tss = &per_cpu(cpu_tss_rw, cpu); /* * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- @@ -1234,11 +1319,7 @@ void enable_sep_cpu(void) tss->x86_tss.ss1 = __KERNEL_CS; wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); - - wrmsr(MSR_IA32_SYSENTER_ESP, - (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), - 0); - + wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); put_cpu(); @@ -1295,24 +1376,22 @@ void print_cpu_info(struct cpuinfo_x86 *c) pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); - if (c->x86_mask || c->cpuid_level >= 0) - pr_cont(", stepping: 0x%x)\n", c->x86_mask); + if (c->x86_stepping || c->cpuid_level >= 0) + pr_cont(", stepping: 0x%x)\n", c->x86_stepping); else pr_cont(")\n"); } -static __init int setup_disablecpuid(char *arg) +/* + * clearcpuid= was already parsed in fpu__init_parse_early_param. + * But we need to keep a dummy __setup around otherwise it would + * show up as an environment variable for init. + */ +static __init int setup_clearcpuid(char *arg) { - int bit; - - if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32) - setup_clear_cpu_cap(bit); - else - return 0; - return 1; } -__setup("clearcpuid=", setup_disablecpuid); +__setup("clearcpuid=", setup_clearcpuid); #ifdef CONFIG_X86_64 DEFINE_PER_CPU_FIRST(union irq_stack_union, @@ -1334,25 +1413,22 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); -/* - * Special IST stacks which the CPU switches to when it calls - * an IST-marked descriptor entry. Up to 7 stacks (hardware - * limit), all of them are 4K, except the debug stack which - * is 8K. - */ -static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { - [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, - [DEBUG_STACK - 1] = DEBUG_STKSZ -}; - -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks - [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); - /* May not be marked __init: used by software suspend */ void syscall_init(void) { + extern char _entry_trampoline[]; + extern char entry_SYSCALL_64_trampoline[]; + + int cpu = smp_processor_id(); + unsigned long SYSCALL64_entry_trampoline = + (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline + + (entry_SYSCALL_64_trampoline - _entry_trampoline); + wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); - wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); + if (static_cpu_has(X86_FEATURE_PTI)) + wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline); + else + wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); @@ -1363,7 +1439,7 @@ void syscall_init(void) * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). */ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); + wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); @@ -1507,7 +1583,7 @@ void cpu_init(void) if (cpu) load_ucode_ap(); - t = &per_cpu(cpu_tss, cpu); + t = &per_cpu(cpu_tss_rw, cpu); oist = &per_cpu(orig_ist, cpu); #ifdef CONFIG_NUMA @@ -1546,7 +1622,7 @@ void cpu_init(void) * set up and load the per-CPU TSS */ if (!oist->ist[0]) { - char *estacks = per_cpu(exception_stacks, cpu); + char *estacks = get_cpu_entry_area(cpu)->exception_stacks; for (v = 0; v < N_EXCEPTION_STACKS; v++) { estacks += exception_stack_sizes[v]; @@ -1557,7 +1633,7 @@ void cpu_init(void) } } - t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; /* * <= is required because the CPU will access up to @@ -1572,9 +1648,14 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, me); - load_sp0(t, ¤t->thread); - set_tss_desc(cpu, t); + /* + * Initialize the TSS. sp0 points to the entry trampoline stack + * regardless of what task is running. + */ + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); + load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); + load_mm_ldt(&init_mm); clear_all_debug_regs(); @@ -1585,7 +1666,6 @@ void cpu_init(void) if (is_uv_system()) uv_cpu_init(); - setup_fixmap_gdt(cpu); load_fixmap_gdt(cpu); } @@ -1595,8 +1675,7 @@ void cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct *t = &per_cpu(cpu_tss, cpu); - struct thread_struct *thread = &curr->thread; + struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); wait_for_master_cpu(cpu); @@ -1627,12 +1706,16 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, curr); - load_sp0(t, thread); - set_tss_desc(cpu, t); + /* + * Initialize the TSS. Don't bother initializing sp0, as the initial + * task never enters user mode. + */ + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); + load_mm_ldt(&init_mm); - t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; #ifdef CONFIG_DOUBLEFAULT /* Set up doublefault TSS pointer in the GDT */ @@ -1644,7 +1727,6 @@ void cpu_init(void) fpu__init_cpu(); - setup_fixmap_gdt(cpu); load_fixmap_gdt(cpu); } #endif @@ -1665,3 +1747,33 @@ static int __init init_cpu_syscore(void) return 0; } core_initcall(init_cpu_syscore); + +/* + * The microcode loader calls this upon late microcode load to recheck features, + * only when microcode has been updated. Caller holds microcode_mutex and CPU + * hotplug lock. + */ +void microcode_check(void) +{ + struct cpuinfo_x86 info; + + perf_check_microcode(); + + /* Reload CPUID max function as it might've changed. */ + info.cpuid_level = cpuid_eax(0); + + /* + * Copy all capability leafs to pick up the synthetic ones so that + * memcmp() below doesn't fail on that. The ones coming from CPUID will + * get overwritten in get_cpu_cap(). + */ + memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); + + get_cpu_cap(&info); + + if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) + return; + + pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); + pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); +} diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index f52a370b6c00..e806b11a99af 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -47,4 +47,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); + +unsigned int aperfmperf_get_khz(int cpu); + #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c new file mode 100644 index 000000000000..904b0a3c4e53 --- /dev/null +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -0,0 +1,121 @@ +/* Declare dependencies between CPUIDs */ +#include +#include +#include +#include + +struct cpuid_dep { + unsigned int feature; + unsigned int depends; +}; + +/* + * Table of CPUID features that depend on others. + * + * This only includes dependencies that can be usefully disabled, not + * features part of the base set (like FPU). + * + * Note this all is not __init / __initdata because it can be + * called from cpu hotplug. It shouldn't do anything in this case, + * but it's difficult to tell that to the init reference checker. + */ +const static struct cpuid_dep cpuid_deps[] = { + { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, + { X86_FEATURE_AVX, X86_FEATURE_XSAVE }, + { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, + { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, + { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, + { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM2, X86_FEATURE_XMM }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 }, + { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, }, + { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, + { X86_FEATURE_AES, X86_FEATURE_XMM2 }, + { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, + { X86_FEATURE_FMA, X86_FEATURE_AVX }, + { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, + { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, + {} +}; + +static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature) +{ + /* + * Note: This could use the non atomic __*_bit() variants, but the + * rest of the cpufeature code uses atomics as well, so keep it for + * consistency. Cleanup all of it separately. + */ + if (!c) { + clear_cpu_cap(&boot_cpu_data, feature); + set_bit(feature, (unsigned long *)cpu_caps_cleared); + } else { + clear_bit(feature, (unsigned long *)c->x86_capability); + } +} + +/* Take the capabilities and the BUG bits into account */ +#define MAX_FEATURE_BITS ((NCAPINTS + NBUGINTS) * sizeof(u32) * 8) + +static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) +{ + DECLARE_BITMAP(disable, MAX_FEATURE_BITS); + const struct cpuid_dep *d; + bool changed; + + if (WARN_ON(feature >= MAX_FEATURE_BITS)) + return; + + clear_feature(c, feature); + + /* Collect all features to disable, handling dependencies */ + memset(disable, 0, sizeof(disable)); + __set_bit(feature, disable); + + /* Loop until we get a stable state. */ + do { + changed = false; + for (d = cpuid_deps; d->feature; d++) { + if (!test_bit(d->depends, disable)) + continue; + if (__test_and_set_bit(d->feature, disable)) + continue; + + changed = true; + clear_feature(c, d->feature); + } + } while (changed); +} + +void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) +{ + do_clear_cpu_cap(c, feature); +} + +void setup_clear_cpu_cap(unsigned int feature) +{ + do_clear_cpu_cap(NULL, feature); +} diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 6b4bb335641f..8949b7ae6d92 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) /* common case step number/rev -- exceptions handled below */ c->x86_model = (dir1 >> 4) + 1; - c->x86_mask = dir1 & 0xf; + c->x86_stepping = dir1 & 0xf; /* Now cook; the original recipe is by Channing Corn, from Cyrix. * We do the same thing for each generation: we work out diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 4fa90006ac68..cf6252f08f85 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -26,6 +26,13 @@ #include #include +extern const struct hypervisor_x86 x86_hyper_vmware; +extern const struct hypervisor_x86 x86_hyper_ms_hyperv; +extern const struct hypervisor_x86 x86_hyper_xen_pv; +extern const struct hypervisor_x86 x86_hyper_xen_hvm; +extern const struct hypervisor_x86 x86_hyper_kvm; +extern const struct hypervisor_x86 x86_hyper_acrn; + static const __initconst struct hypervisor_x86 * const hypervisors[] = { #ifdef CONFIG_XEN_PV @@ -39,56 +46,57 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = #ifdef CONFIG_KVM_GUEST &x86_hyper_kvm, #endif +#ifdef CONFIG_ACRN_HV + &x86_hyper_acrn, +#endif }; -const struct hypervisor_x86 *x86_hyper; -EXPORT_SYMBOL(x86_hyper); +enum x86_hypervisor_type x86_hyper_type; +EXPORT_SYMBOL(x86_hyper_type); -static inline void __init +static inline const struct hypervisor_x86 * __init detect_hypervisor_vendor(void) { - const struct hypervisor_x86 *h, * const *p; + const struct hypervisor_x86 *h = NULL, * const *p; uint32_t pri, max_pri = 0; for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { - h = *p; - pri = h->detect(); - if (pri != 0 && pri > max_pri) { + pri = (*p)->detect(); + if (pri > max_pri) { max_pri = pri; - x86_hyper = h; + h = *p; } } - if (max_pri) - pr_info("Hypervisor detected: %s\n", x86_hyper->name); + if (h) + pr_info("Hypervisor detected: %s\n", h->name); + + return h; } -void __init init_hypervisor_platform(void) +static void __init copy_array(const void *src, void *target, unsigned int size) { + unsigned int i, n = size / sizeof(void *); + const void * const *from = (const void * const *)src; + const void **to = (const void **)target; - detect_hypervisor_vendor(); - - if (!x86_hyper) - return; - - if (x86_hyper->init_platform) - x86_hyper->init_platform(); + for (i = 0; i < n; i++) + if (from[i]) + to[i] = from[i]; } -bool __init hypervisor_x2apic_available(void) +void __init init_hypervisor_platform(void) { - return x86_hyper && - x86_hyper->x2apic_available && - x86_hyper->x2apic_available(); -} + const struct hypervisor_x86 *h; -void hypervisor_pin_vcpu(int cpu) -{ - if (!x86_hyper) + h = detect_hypervisor_vendor(); + + if (!h) return; - if (x86_hyper->pin_vcpu) - x86_hyper->pin_vcpu(cpu); - else - WARN_ONCE(1, "vcpu pinning requested but not supported!\n"); + copy_array(&h->init, &x86_init.hyper, sizeof(h->init)); + copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime)); + + x86_hyper_type = h->type; + x86_init.hyper.init_platform(); } diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b720dacac051..c3af167d0a70 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -102,6 +102,62 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) ELF_HWCAP2 |= HWCAP2_RING3MWAIT; } +/* + * Early microcode releases for the Spectre v2 mitigation were broken. + * Information taken from; + * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf + * - https://kb.vmware.com/s/article/52345 + * - Microcode revisions observed in the wild + * - Release note from 20180108 microcode release + */ +struct sku_microcode { + u8 model; + u8 stepping; + u32 microcode; +}; +static const struct sku_microcode spectre_bad_microcodes[] = { + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, + { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, + { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, + { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, + { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, + /* Observed in the wild */ + { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, + { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, +}; + +static bool bad_spectre_microcode(struct cpuinfo_x86 *c) +{ + int i; + + /* + * We know that the hypervisor lie to us on the microcode version so + * we may as well hope that it is running the correct version. + */ + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return false; + + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { + if (c->x86_model == spectre_bad_microcodes[i].model && + c->x86_stepping == spectre_bad_microcodes[i].stepping) + return (c->microcode <= spectre_bad_microcodes[i].microcode); + } + return false; +} + static void early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; @@ -122,6 +178,19 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) c->microcode = intel_get_microcode_revision(); + /* Now if any of them are set, check the blacklist and clear the lot */ + if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || + cpu_has(c, X86_FEATURE_INTEL_STIBP) || + cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || + cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { + pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); + setup_clear_cpu_cap(X86_FEATURE_IBRS); + setup_clear_cpu_cap(X86_FEATURE_IBPB); + setup_clear_cpu_cap(X86_FEATURE_STIBP); + setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); + setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); + } + /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * @@ -130,7 +199,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && c->microcode < 0x20e) { pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); @@ -146,7 +215,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 - && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) + && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) c->x86_phys_bits = 36; /* @@ -187,21 +256,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); -#ifdef CONFIG_KMEMCHECK - /* - * P4s have a "fast strings" feature which causes single- - * stepping REP instructions to only generate a #DB on - * cache-line boundaries. - * - * Ingo Molnar reported a Pentium D (model 6) and a Xeon - * (model 2) with the same problem. - */ - if (c->x86 == 15) - if (msr_clear_bit(MSR_IA32_MISC_ENABLE, - MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0) - pr_info("kmemcheck: Disabling fast string operations\n"); -#endif - /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. @@ -259,7 +313,7 @@ int ppro_with_ram_bug(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask < 8) { + boot_cpu_data.x86_stepping < 8) { pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); return 1; } @@ -276,7 +330,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c) * Mask B, Pentium, but not Pentium MMX */ if (c->x86 == 5 && - c->x86_mask >= 1 && c->x86_mask <= 4 && + c->x86_stepping >= 1 && c->x86_stepping <= 4 && c->x86_model <= 3) { /* * Remember we have B step Pentia with bugs @@ -319,7 +373,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ - if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) + if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* @@ -337,7 +391,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * P4 Xeon erratum 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { + if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { pr_info("CPU: C0 stepping P4 Xeon detected.\n"); @@ -352,7 +406,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * Specification Update"). */ if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && - (c->x86_mask < 0x6 || c->x86_mask == 0xb)) + (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) set_cpu_bug(c, X86_BUG_11AP); @@ -599,7 +653,7 @@ static void init_intel(struct cpuinfo_x86 *c) case 6: if (l2 == 128) p = "Celeron (Mendocino)"; - else if (c->x86_mask == 0 || c->x86_mask == 5) + else if (c->x86_stepping == 0 || c->x86_stepping == 5) p = "Celeron-A"; break; diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index cd5fc61ba450..18dd8f22e353 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -267,6 +267,7 @@ static void rdt_get_cdp_l3_config(int type) r->num_closid = r_l3->num_closid / 2; r->cache.cbm_len = r_l3->cache.cbm_len; r->default_ctrl = r_l3->default_ctrl; + r->cache.shareable_bits = r_l3->cache.shareable_bits; r->data_width = (r->cache.cbm_len + 3) / 4; r->alloc_capable = true; /* @@ -524,10 +525,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) */ if (static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); - kfree(d->ctrl_val); - kfree(d->rmid_busy_llc); - kfree(d->mbm_total); - kfree(d->mbm_local); list_del(&d->list); if (is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); @@ -544,6 +541,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cancel_delayed_work(&d->cqm_limbo); } + kfree(d->ctrl_val); + kfree(d->rmid_busy_llc); + kfree(d->mbm_total); + kfree(d->mbm_local); kfree(d); return; } @@ -770,7 +771,7 @@ static __init void rdt_quirks(void) cache_alloc_hsw_probe(); break; case INTEL_FAM6_SKYLAKE_X: - if (boot_cpu_data.x86_mask <= 4) + if (boot_cpu_data.x86_stepping <= 4) set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); } } diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index a869d4a073c5..7be35b600299 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1081,6 +1081,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, struct dentry *dentry; int ret; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /* * resctrl file system can only be mounted once. @@ -1130,12 +1131,12 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, goto out_mondata; if (rdt_alloc_capable) - static_branch_enable(&rdt_alloc_enable_key); + static_branch_enable_cpuslocked(&rdt_alloc_enable_key); if (rdt_mon_capable) - static_branch_enable(&rdt_mon_enable_key); + static_branch_enable_cpuslocked(&rdt_mon_enable_key); if (rdt_alloc_capable || rdt_mon_capable) - static_branch_enable(&rdt_enable_key); + static_branch_enable_cpuslocked(&rdt_enable_key); if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3]; @@ -1157,6 +1158,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, cdp_disable(); out: mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return dentry; } @@ -1295,9 +1297,7 @@ static void rmdir_all_sub(void) kfree(rdtgrp); } /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ - get_online_cpus(); update_closid_rmid(cpu_online_mask, &rdtgroup_default); - put_online_cpus(); kernfs_remove(kn_info); kernfs_remove(kn_mongrp); @@ -1308,6 +1308,7 @@ static void rdt_kill_sb(struct super_block *sb) { struct rdt_resource *r; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /*Put everything back to default values. */ @@ -1315,11 +1316,12 @@ static void rdt_kill_sb(struct super_block *sb) reset_all_ctrls(r); cdp_disable(); rmdir_all_sub(); - static_branch_disable(&rdt_alloc_enable_key); - static_branch_disable(&rdt_mon_enable_key); - static_branch_disable(&rdt_enable_key); + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_disable_cpuslocked(&rdt_mon_enable_key); + static_branch_disable_cpuslocked(&rdt_enable_key); kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); } static struct file_system_type rdt_fs_type = { diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index aa0d5df9dc60..e956eb267061 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } extern struct mca_config mca_cfg; +#ifndef CONFIG_X86_64 +/* + * On 32-bit systems it would be difficult to safely unmap a poison page + * from the kernel 1:1 map because there are no non-canonical addresses that + * we can use to refer to the address without risking a speculative access. + * However, this isn't much of an issue because: + * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which + * are only mapped into the kernel as needed + * 2) Few people would run a 32-bit kernel on a machine that supports + * recoverable errors because they have too much memory to boot 32-bit. + */ +static inline void mce_unmap_kpfn(unsigned long pfn) {} +#define mce_unmap_kpfn mce_unmap_kpfn +#endif + #endif /* __X86_MCE_INTERNAL_H__ */ diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 87cc9ab7a13c..4b8187639c2d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -245,6 +245,9 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc if (m->status & MCI_STATUS_UC) { + if (ctx == IN_KERNEL) + return MCE_PANIC_SEVERITY; + /* * On older systems where overflow_recov flag is not present, we * should simply panic if an error overflow occurs. If @@ -255,10 +258,6 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc if (mce_flags.smca) return mce_severity_amd_smca(m, ctx); - /* software can try to contain */ - if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL)) - return MCE_PANIC_SEVERITY; - /* kill current process */ return MCE_AR_SEVERITY; } else { diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3b413065c613..28d27de08545 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -57,6 +57,9 @@ static DEFINE_MUTEX(mce_log_mutex); +/* sysfs synchronization */ +static DEFINE_MUTEX(mce_sysfs_mutex); + #define CREATE_TRACE_POINTS #include @@ -106,6 +109,10 @@ static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn); +#endif + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -127,6 +134,8 @@ void mce_setup(struct mce *m) if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) rdmsrl(MSR_PPIN, m->ppin); + + m->microcode = boot_cpu_data.microcode; } DEFINE_PER_CPU(struct mce, injectm); @@ -259,7 +268,7 @@ static void __print_mce(struct mce *m) */ pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, - cpu_data(m->extcpu).microcode); + m->microcode); } static void print_mce(struct mce *m) @@ -503,10 +512,8 @@ static int mce_usable_address(struct mce *m) bool mce_is_memory_error(struct mce *m) { if (m->cpuvendor == X86_VENDOR_AMD) { - /* ErrCodeExt[20:16] */ - u8 xec = (m->status >> 16) & 0x1f; + return amd_mce_is_memory_error(m); - return (xec == 0x0 || xec == 0x8); } else if (m->cpuvendor == X86_VENDOR_INTEL) { /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -582,7 +589,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { pfn = mce->addr >> PAGE_SHIFT; - memory_failure(pfn, MCE_VECTOR, 0); + if (memory_failure(pfn, MCE_VECTOR, 0)) + mce_unmap_kpfn(pfn); } return NOTIFY_OK; @@ -1049,12 +1057,13 @@ static int do_memory_failure(struct mce *m) ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags); if (ret) pr_err("Memory error not recovered"); + else + mce_unmap_kpfn(m->addr >> PAGE_SHIFT); return ret; } -#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) - -void arch_unmap_kpfn(unsigned long pfn) +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn) { unsigned long decoy_addr; @@ -1065,7 +1074,7 @@ void arch_unmap_kpfn(unsigned long pfn) * We would like to just call: * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); * but doing that would radically increase the odds of a - * speculative access to the posion page because we'd have + * speculative access to the poison page because we'd have * the virtual address of the kernel 1:1 mapping sitting * around in registers. * Instead we get tricky. We create a non-canonical address @@ -1090,7 +1099,6 @@ void arch_unmap_kpfn(unsigned long pfn) if (set_memory_np(decoy_addr, 1)) pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); - } #endif @@ -1788,6 +1796,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check; +dotraplinkage void do_mce(struct pt_regs *regs, long error_code) +{ + machine_check_vector(regs, error_code); +} + /* * Called for each booted CPU to set up machine checks. * Must be called with preempt off: @@ -2071,6 +2084,7 @@ static ssize_t set_ignore_ce(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.ignore_ce ^ !!new) { if (new) { /* disable ce features */ @@ -2083,6 +2097,8 @@ static ssize_t set_ignore_ce(struct device *s, on_each_cpu(mce_enable_ce, (void *)1, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2095,6 +2111,7 @@ static ssize_t set_cmci_disabled(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.cmci_disabled ^ !!new) { if (new) { /* disable cmci */ @@ -2106,6 +2123,8 @@ static ssize_t set_cmci_disabled(struct device *s, on_each_cpu(mce_enable_ce, NULL, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2113,8 +2132,19 @@ static ssize_t store_int_with_restart(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { - ssize_t ret = device_store_int(s, attr, buf, size); + unsigned long old_check_interval = check_interval; + ssize_t ret = device_store_ulong(s, attr, buf, size); + + if (check_interval == old_check_interval) + return ret; + + if (check_interval < 1) + check_interval = 1; + + mutex_lock(&mce_sysfs_mutex); mce_restart(); + mutex_unlock(&mce_sysfs_mutex); + return ret; } diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 486f640b02ef..259c75d7a2a0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -82,6 +82,7 @@ static struct smca_bank_name smca_names[] = { [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" }, [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" }, [SMCA_DE] = { "decode_unit", "Decode Unit" }, + [SMCA_RESERVED] = { "reserved", "Reserved" }, [SMCA_EX] = { "execution_unit", "Execution Unit" }, [SMCA_FP] = { "floating_point", "Floating Point Unit" }, [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" }, @@ -110,9 +111,26 @@ const char *smca_get_long_name(enum smca_bank_types t) } EXPORT_SYMBOL_GPL(smca_get_long_name); +static enum smca_bank_types smca_get_bank_type(unsigned int bank) +{ + struct smca_bank *b; + + if (bank >= MAX_NR_BANKS) + return N_SMCA_BANK_TYPES; + + b = &smca_banks[bank]; + if (!b->hwid) + return N_SMCA_BANK_TYPES; + + return b->hwid->bank_type; +} + static struct smca_hwid smca_hwid_mcatypes[] = { /* { bank_type, hwid_mcatype, xec_bitmap } */ + /* Reserved type */ + { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 }, + /* ZN Core (HWID=0xB0) MCA types */ { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF }, { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, @@ -416,7 +434,25 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi { u32 addr = 0, offset = 0; + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) + return addr; + + /* Get address from already initialized block. */ + if (per_cpu(threshold_banks, cpu)) { + struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank]; + + if (bankp && bankp->blocks) { + struct threshold_block *blockp = &bankp->blocks[block]; + + if (blockp) + return blockp->address; + } + } + if (mce_flags.smca) { + if (smca_get_bank_type(bank) == SMCA_RESERVED) + return addr; + if (!block) { addr = MSR_AMD64_SMCA_MCx_MISC(bank); } else { @@ -738,6 +774,17 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) } EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr); +bool amd_mce_is_memory_error(struct mce *m) +{ + /* ErrCodeExt[20:16] */ + u8 xec = (m->status >> 16) & 0x1f; + + if (mce_flags.smca) + return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0; + + return m->bank == 4 && xec == 0x8; +} + static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) { struct mce m; @@ -1036,7 +1083,7 @@ static struct kobj_type threshold_ktype = { static const char *get_name(unsigned int bank, struct threshold_block *b) { - unsigned int bank_type; + enum smca_bank_types bank_type; if (!mce_flags.smca) { if (b && bank == 4) @@ -1045,11 +1092,10 @@ static const char *get_name(unsigned int bank, struct threshold_block *b) return th_names[bank]; } - if (!smca_banks[bank].hwid) + bank_type = smca_get_bank_type(bank); + if (bank_type >= N_SMCA_BANK_TYPES) return NULL; - bank_type = smca_banks[bank].hwid->bank_type; - if (b && bank_type == SMCA_UMC) { if (b->block < ARRAY_SIZE(smca_umc_block_names)) return smca_umc_block_names[b->block]; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index c6daec4bdba5..48179928ff38 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) return -EINVAL; ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); - if (ret != UCODE_OK) + if (ret > UCODE_UPDATED) return -EINVAL; return 0; @@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 #define F16H_MPB_MAX_SIZE 3458 +#define F17H_MPB_MAX_SIZE 3200 switch (family) { case 0x14: @@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, case 0x16: max_size = F16H_MPB_MAX_SIZE; break; + case 0x17: + max_size = F17H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; @@ -494,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, return patch_size; } -static int apply_microcode_amd(int cpu) +static enum ucode_state apply_microcode_amd(int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_amd *mc_amd; @@ -508,7 +512,7 @@ static int apply_microcode_amd(int cpu) p = find_patch(cpu); if (!p) - return 0; + return UCODE_NFOUND; mc_amd = p->data; uci->mc = p->data; @@ -519,13 +523,13 @@ static int apply_microcode_amd(int cpu) if (rev >= mc_amd->hdr.patch_id) { c->microcode = rev; uci->cpu_sig.rev = rev; - return 0; + return UCODE_OK; } if (__apply_microcode_amd(mc_amd)) { pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); - return -1; + return UCODE_ERROR; } pr_info("CPU%d: new patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); @@ -533,7 +537,7 @@ static int apply_microcode_amd(int cpu) uci->cpu_sig.rev = mc_amd->hdr.patch_id; c->microcode = mc_amd->hdr.patch_id; - return 0; + return UCODE_UPDATED; } static int install_equiv_cpu_table(const u8 *buf) @@ -679,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, static enum ucode_state load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) { + struct ucode_patch *p; enum ucode_state ret; /* free old equiv table */ free_equiv_cpu_table(); ret = __load_microcode_amd(family, data, size); - - if (ret != UCODE_OK) + if (ret != UCODE_OK) { cleanup(); + return ret; + } -#ifdef CONFIG_X86_32 - /* save BSP's matching patch for early load */ - if (save) { - struct ucode_patch *p = find_patch(0); - if (p) { - memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); - memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), - PATCH_MAX_SIZE)); - } + p = find_patch(0); + if (!p) { + return ret; + } else { + if (boot_cpu_data.microcode == p->patch_id) + return ret; + + ret = UCODE_NEW; } -#endif + + /* save BSP's matching patch for early load */ + if (!save) + return ret; + + memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); + memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE)); + return ret; } diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index c4fa4a85d4cb..021c90464cc2 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -22,13 +22,16 @@ #define pr_fmt(fmt) "microcode: " fmt #include +#include #include #include #include #include #include +#include #include #include +#include #include #include @@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache); */ static DEFINE_MUTEX(microcode_mutex); +/* + * Serialize late loading so that CPUs get updated one-by-one. + */ +static DEFINE_SPINLOCK(update_lock); + struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; struct cpu_info_ctx { @@ -239,7 +247,7 @@ static int __init save_microcode_in_initrd(void) break; case X86_VENDOR_AMD: if (c->x86 >= 0x10) - return save_microcode_in_initrd_amd(cpuid_eax(1)); + ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; default: break; @@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu) return ret; } -struct apply_microcode_ctx { - int err; -}; - static void apply_microcode_local(void *arg) { - struct apply_microcode_ctx *ctx = arg; + enum ucode_state *err = arg; - ctx->err = microcode_ops->apply_microcode(smp_processor_id()); + *err = microcode_ops->apply_microcode(smp_processor_id()); } static int apply_microcode_on_target(int cpu) { - struct apply_microcode_ctx ctx = { .err = 0 }; + enum ucode_state err; int ret; - ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); - if (!ret) - ret = ctx.err; - + ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1); + if (!ret) { + if (err == UCODE_ERROR) + ret = 1; + } return ret; } @@ -489,31 +494,124 @@ static void __exit microcode_dev_exit(void) /* fake device for request_firmware */ static struct platform_device *microcode_pdev; -static int reload_for_cpu(int cpu) +/* + * Late loading dance. Why the heavy-handed stomp_machine effort? + * + * - HT siblings must be idle and not execute other code while the other sibling + * is loading microcode in order to avoid any negative interactions caused by + * the loading. + * + * - In addition, microcode update on the cores must be serialized until this + * requirement can be relaxed in the future. Right now, this is conservative + * and good. + */ +#define SPINUNIT 100 /* 100 nsec */ + +static int check_online_cpus(void) { - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - enum ucode_state ustate; - int err = 0; + if (num_online_cpus() == num_present_cpus()) + return 0; - if (!uci->valid) - return err; + pr_err("Not all CPUs online, aborting microcode update.\n"); - ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true); - if (ustate == UCODE_OK) - apply_microcode_on_target(cpu); - else - if (ustate == UCODE_ERROR) - err = -EINVAL; - return err; + return -EINVAL; +} + +static atomic_t late_cpus_in; +static atomic_t late_cpus_out; + +static int __wait_for_cpus(atomic_t *t, long long timeout) +{ + int all_cpus = num_online_cpus(); + + atomic_inc(t); + + while (atomic_read(t) < all_cpus) { + if (timeout < SPINUNIT) { + pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", + all_cpus - atomic_read(t)); + return 1; + } + + ndelay(SPINUNIT); + timeout -= SPINUNIT; + + touch_nmi_watchdog(); + } + return 0; +} + +/* + * Returns: + * < 0 - on error + * 0 - no update done + * 1 - microcode was updated + */ +static int __reload_late(void *info) +{ + int cpu = smp_processor_id(); + enum ucode_state err; + int ret = 0; + + /* + * Wait for all CPUs to arrive. A load will not be attempted unless all + * CPUs show up. + * */ + if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) + return -1; + + spin_lock(&update_lock); + apply_microcode_local(&err); + spin_unlock(&update_lock); + + if (err > UCODE_NFOUND) { + pr_warn("Error reloading microcode on CPU %d\n", cpu); + return -1; + /* siblings return UCODE_OK because their engine got updated already */ + } else if (err == UCODE_UPDATED || err == UCODE_OK) { + ret = 1; + } else { + return ret; + } + + /* + * Increase the wait timeout to a safe value here since we're + * serializing the microcode update and that could take a while on a + * large number of CPUs. And that is fine as the *actual* timeout will + * be determined by the last CPU finished updating and thus cut short. + */ + if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus())) + panic("Timeout during microcode update!\n"); + + return ret; +} + +/* + * Reload microcode late on all CPUs. Wait for a sec until they + * all gather together. + */ +static int microcode_reload_late(void) +{ + int ret; + + atomic_set(&late_cpus_in, 0); + atomic_set(&late_cpus_out, 0); + + ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); + if (ret > 0) + microcode_check(); + + return ret; } static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { + enum ucode_state tmp_ret = UCODE_OK; + int bsp = boot_cpu_data.cpu_index; unsigned long val; - int cpu; - ssize_t ret = 0, tmp_ret; + ssize_t ret = 0; ret = kstrtoul(buf, 0, &val); if (ret) @@ -522,23 +620,24 @@ static ssize_t reload_store(struct device *dev, if (val != 1) return size; + tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); + if (tmp_ret != UCODE_NEW) + return size; + get_online_cpus(); - mutex_lock(µcode_mutex); - for_each_online_cpu(cpu) { - tmp_ret = reload_for_cpu(cpu); - if (tmp_ret != 0) - pr_warn("Error reloading microcode on CPU %d\n", cpu); - /* save retval of the first encountered reload error */ - if (!ret) - ret = tmp_ret; - } - if (!ret) - perf_check_microcode(); + ret = check_online_cpus(); + if (ret) + goto put; + + mutex_lock(µcode_mutex); + ret = microcode_reload_late(); mutex_unlock(µcode_mutex); + +put: put_online_cpus(); - if (!ret) + if (ret >= 0) ret = size; return ret; @@ -606,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) if (system_state != SYSTEM_RUNNING) return UCODE_NFOUND; - ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, - refresh_fw); - - if (ustate == UCODE_OK) { + ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw); + if (ustate == UCODE_NEW) { pr_debug("CPU%d updated upon init\n", cpu); apply_microcode_on_target(cpu); } diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 7dbcb7adf797..32b8e5724f96 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; /* Current microcode patch used in early patching on the APs. */ static struct microcode_intel *intel_ucode_patch; +/* last level cache size per core */ +static int llc_size_per_core; + static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, unsigned int s2, unsigned int p2) { @@ -565,15 +568,6 @@ static void print_ucode(struct ucode_cpu_info *uci) } #else -/* - * Flush global tlb. We only do this in x86_64 where paging has been enabled - * already and PGE should be enabled as well. - */ -static inline void flush_tlb_early(void) -{ - __native_flush_tlb_global_irq_disabled(); -} - static inline void print_ucode(struct ucode_cpu_info *uci) { struct microcode_intel *mc; @@ -595,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) if (!mc) return 0; + /* + * Save us the MSR write below - which is a particular expensive + * operation - when the other hyperthread has updated the microcode + * already. + */ + rev = intel_get_microcode_revision(); + if (rev >= mc->hdr.rev) { + uci->cpu_sig.rev = rev; + return UCODE_OK; + } + + /* + * Writeback and invalidate caches before updating microcode to avoid + * internal issues depending on what the microcode is updating. + */ + native_wbinvd(); + /* write microcode via MSR 0x79 */ native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); @@ -602,10 +613,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) if (rev != mc->hdr.rev) return -1; -#ifdef CONFIG_X86_64 - /* Flush global tlb. This is precaution. */ - flush_tlb_early(); -#endif uci->cpu_sig.rev = rev; if (early) @@ -782,27 +789,44 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) return 0; } -static int apply_microcode_intel(int cpu) +static enum ucode_state apply_microcode_intel(int cpu) { + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_intel *mc; - struct ucode_cpu_info *uci; - struct cpuinfo_x86 *c; static int prev_rev; u32 rev; /* We should bind the task to the CPU */ if (WARN_ON(raw_smp_processor_id() != cpu)) - return -1; + return UCODE_ERROR; - uci = ucode_cpu_info + cpu; - mc = uci->mc; + /* Look for a newer patch in our cache: */ + mc = find_patch(uci); if (!mc) { - /* Look for a newer patch in our cache: */ - mc = find_patch(uci); + mc = uci->mc; if (!mc) - return 0; + return UCODE_NFOUND; } + /* + * Save us the MSR write below - which is a particular expensive + * operation - when the other hyperthread has updated the microcode + * already. + */ + rev = intel_get_microcode_revision(); + if (rev >= mc->hdr.rev) { + uci->cpu_sig.rev = rev; + c->microcode = rev; + return UCODE_OK; + } + + /* + * Writeback and invalidate caches before updating microcode to avoid + * internal issues depending on what the microcode is updating. + */ + native_wbinvd(); + /* write microcode via MSR 0x79 */ wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); @@ -811,7 +835,7 @@ static int apply_microcode_intel(int cpu) if (rev != mc->hdr.rev) { pr_err("CPU%d update to revision 0x%x failed\n", cpu, mc->hdr.rev); - return -1; + return UCODE_ERROR; } if (rev != prev_rev) { @@ -823,12 +847,10 @@ static int apply_microcode_intel(int cpu) prev_rev = rev; } - c = &cpu_data(cpu); - uci->cpu_sig.rev = rev; c->microcode = rev; - return 0; + return UCODE_UPDATED; } static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, @@ -840,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, unsigned int leftover = size; unsigned int curr_mc_size = 0, new_mc_size = 0; unsigned int csig, cpf; + enum ucode_state ret = UCODE_OK; while (leftover) { struct microcode_header_intel mc_header; @@ -881,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, new_mc = mc; new_mc_size = mc_size; mc = NULL; /* trigger new vmalloc */ + ret = UCODE_NEW; } ucode_ptr += mc_size; @@ -910,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", cpu, new_rev, uci->cpu_sig.rev); - return UCODE_OK; + return ret; } static int get_ucode_fw(void *to, const void *from, size_t n) @@ -923,8 +947,19 @@ static bool is_blacklisted(unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); - if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { - pr_err_once("late loading on model 79 is disabled.\n"); + /* + * Late loading on model 79 with microcode revision less than 0x0b000021 + * and LLC size per core bigger than 2.5MB may result in a system hang. + * This behavior is documented in item BDF90, #334165 (Intel Xeon + * Processor E7-8800/4800 v4 Product Family). + */ + if (c->x86 == 6 && + c->x86_model == INTEL_FAM6_BROADWELL_X && + c->x86_stepping == 0x01 && + llc_size_per_core > 2621440 && + c->microcode < 0x0b000021) { + pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); + pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); return true; } @@ -943,7 +978,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, return UCODE_NFOUND; sprintf(name, "intel-ucode/%02x-%02x-%02x", - c->x86, c->x86_model, c->x86_mask); + c->x86, c->x86_model, c->x86_stepping); if (request_firmware_direct(&firmware, name, device)) { pr_debug("data file %s load failed\n", name); @@ -979,6 +1014,15 @@ static struct microcode_ops microcode_intel_ops = { .apply_microcode = apply_microcode_intel, }; +static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) +{ + u64 llc_size = c->x86_cache_size * 1024ULL; + + do_div(llc_size, c->x86_max_cores); + + return (int)llc_size; +} + struct microcode_ops * __init init_intel_microcode(void) { struct cpuinfo_x86 *c = &boot_cpu_data; @@ -989,5 +1033,7 @@ struct microcode_ops * __init init_intel_microcode(void) return NULL; } + llc_size_per_core = calc_llc_size_per_core(c); + return µcode_intel_ops; } diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 236324e83a3a..85eb5fc180c8 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -254,9 +254,9 @@ static void __init ms_hyperv_init_platform(void) #endif } -const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { +const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft Hyper-V", .detect = ms_hyperv_platform, - .init_platform = ms_hyperv_init_platform, + .type = X86_HYPER_MS_HYPERV, + .init.init_platform = ms_hyperv_init_platform, }; -EXPORT_SYMBOL(x86_hyper_ms_hyperv); diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index fdc55215d44d..e12ee86906c6 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, */ if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask <= 7) { + boot_cpu_data.x86_stepping <= 7) { if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); return -EINVAL; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 40d5a8a75212..7468de429087 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -711,8 +711,8 @@ void __init mtrr_bp_init(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 0x3 && - (boot_cpu_data.x86_mask == 0x3 || - boot_cpu_data.x86_mask == 0x4)) + (boot_cpu_data.x86_stepping == 0x3 || + boot_cpu_data.x86_stepping == 0x4)) phys_addr = 36; size_or_mask = SIZE_OR_MASK_BITS(phys_addr); diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 6b7e17bf0b71..2c8522a39ed5 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -5,6 +5,8 @@ #include #include +#include "cpu.h" + /* * Get CPU information for use by the procfs. */ @@ -70,16 +72,18 @@ static int show_cpuinfo(struct seq_file *m, void *v) c->x86_model, c->x86_model_id[0] ? c->x86_model_id : "unknown"); - if (c->x86_mask || c->cpuid_level >= 0) - seq_printf(m, "stepping\t: %d\n", c->x86_mask); + if (c->x86_stepping || c->cpuid_level >= 0) + seq_printf(m, "stepping\t: %d\n", c->x86_stepping); else seq_puts(m, "stepping\t: unknown\n"); if (c->microcode) seq_printf(m, "microcode\t: 0x%x\n", c->microcode); if (cpu_has(c, X86_FEATURE_TSC)) { - unsigned int freq = cpufreq_quick_get(cpu); + unsigned int freq = aperfmperf_get_khz(cpu); + if (!freq) + freq = cpufreq_quick_get(cpu); if (!freq) freq = cpu_khz; seq_printf(m, "cpu MHz\t\t: %u.%03u\n", @@ -87,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) } /* Cache size */ - if (c->x86_cache_size >= 0) - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + if (c->x86_cache_size) + seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); show_cpuinfo_core(m, c, cpu); show_cpuinfo_misc(m, c); diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 05459ad3db46..df11f5d604be 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -21,9 +21,6 @@ struct cpuid_bit { static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, - { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 }, - { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 }, - { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 }, { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 }, { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 }, diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 40ed26852ebd..8e005329648b 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -205,10 +205,10 @@ static bool __init vmware_legacy_x2apic_available(void) (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; } -const __refconst struct hypervisor_x86 x86_hyper_vmware = { +const __initconst struct hypervisor_x86 x86_hyper_vmware = { .name = "VMware", .detect = vmware_platform, - .init_platform = vmware_platform_setup, - .x2apic_available = vmware_legacy_x2apic_available, + .type = X86_HYPER_VMWARE, + .init.init_platform = vmware_platform_setup, + .init.x2apic_available = vmware_legacy_x2apic_available, }; -EXPORT_SYMBOL(x86_hyper_vmware); diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index 0e662c55ae90..0b8cedb20d6d 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -50,25 +50,23 @@ static void doublefault_fn(void) cpu_relax(); } -struct tss_struct doublefault_tss __cacheline_aligned = { - .x86_tss = { - .sp0 = STACK_START, - .ss0 = __KERNEL_DS, - .ldt = 0, - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, - - .ip = (unsigned long) doublefault_fn, - /* 0x2 bit is always set */ - .flags = X86_EFLAGS_SF | 0x2, - .sp = STACK_START, - .es = __USER_DS, - .cs = __KERNEL_CS, - .ss = __KERNEL_DS, - .ds = __USER_DS, - .fs = __KERNEL_PERCPU, - - .__cr3 = __pa_nodebug(swapper_pg_dir), - } +struct x86_hw_tss doublefault_tss __cacheline_aligned = { + .sp0 = STACK_START, + .ss0 = __KERNEL_DS, + .ldt = 0, + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, + + .ip = (unsigned long) doublefault_fn, + /* 0x2 bit is always set */ + .flags = X86_EFLAGS_SF | 0x2, + .sp = STACK_START, + .es = __USER_DS, + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, + .ds = __USER_DS, + .fs = __KERNEL_PERCPU, + + .__cr3 = __pa_nodebug(swapper_pg_dir), }; /* dummy for do_double_fault() call */ diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index f13b4c00a5de..afbecff161d1 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -18,6 +18,7 @@ #include #include +#include #include #include @@ -43,6 +44,24 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, return true; } +bool in_entry_stack(unsigned long *stack, struct stack_info *info) +{ + struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); + + void *begin = ss; + void *end = ss + 1; + + if ((void *)stack < begin || (void *)stack >= end) + return false; + + info->type = STACK_TYPE_ENTRY; + info->begin = begin; + info->end = end; + info->next_sp = NULL; + + return true; +} + static void printk_stack_address(unsigned long address, int reliable, char *log_lvl) { @@ -50,6 +69,39 @@ static void printk_stack_address(unsigned long address, int reliable, printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); } +void show_iret_regs(struct pt_regs *regs) +{ + printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip); + printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss, + regs->sp, regs->flags); +} + +static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, + bool partial) +{ + /* + * These on_stack() checks aren't strictly necessary: the unwind code + * has already validated the 'regs' pointer. The checks are done for + * ordering reasons: if the registers are on the next stack, we don't + * want to print them out yet. Otherwise they'll be shown as part of + * the wrong stack. Later, when show_trace_log_lvl() switches to the + * next stack, this function will be called again with the same regs so + * they can be printed in the right context. + */ + if (!partial && on_stack(info, regs, sizeof(*regs))) { + __show_regs(regs, 0); + + } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, + IRET_FRAME_SIZE)) { + /* + * When an interrupt or exception occurs in entry code, the + * full pt_regs might not have been saved yet. In that case + * just print the iret frame. + */ + show_iret_regs(regs); + } +} + void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl) { @@ -57,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, struct stack_info stack_info = {0}; unsigned long visit_mask = 0; int graph_idx = 0; + bool partial; printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); stack = stack ? : get_stack_pointer(task, regs); + regs = unwind_get_entry_regs(&state, &partial); /* * Iterate through the stacks, starting with the current stack pointer. @@ -71,31 +125,35 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - task stack * - interrupt stack * - HW exception stacks (double fault, nmi, debug, mce) + * - entry stack * - * x86-32 can have up to three stacks: + * x86-32 can have up to four stacks: * - task stack * - softirq stack * - hardirq stack + * - entry stack */ - for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { + for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { const char *stack_name; - /* - * If we overflowed the task stack into a guard page, jump back - * to the bottom of the usable stack. - */ - if (task_stack_page(task) - (void *)stack < PAGE_SIZE) - stack = task_stack_page(task); - - if (get_stack_info(stack, task, &stack_info, &visit_mask)) - break; + if (get_stack_info(stack, task, &stack_info, &visit_mask)) { + /* + * We weren't on a valid stack. It's possible that + * we overflowed a valid stack into a guard page. + * See if the next page up is valid so that we can + * generate some kind of backtrace if this happens. + */ + stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack); + if (get_stack_info(stack, task, &stack_info, &visit_mask)) + break; + } stack_name = stack_type_name(stack_info.type); if (stack_name) printk("%s <%s>\n", log_lvl, stack_name); - if (regs && on_stack(&stack_info, regs, sizeof(*regs))) - __show_regs(regs, 0); + if (regs) + show_regs_if_on_stack(&stack_info, regs, partial); /* * Scan the stack, printing any text addresses we find. At the @@ -119,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, /* * Don't print regs->ip again if it was already printed - * by __show_regs() below. + * by show_regs_if_on_stack(). */ if (regs && stack == ®s->ip) goto next; @@ -154,9 +212,9 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unwind_next_frame(&state); /* if the frame has entry regs, print them */ - regs = unwind_get_entry_regs(&state); - if (regs && on_stack(&stack_info, regs, sizeof(*regs))) - __show_regs(regs, 0); + regs = unwind_get_entry_regs(&state, &partial); + if (regs) + show_regs_if_on_stack(&stack_info, regs, partial); } if (stack_name) @@ -252,11 +310,13 @@ int __die(const char *str, struct pt_regs *regs, long err) unsigned long sp; #endif printk(KERN_DEFAULT - "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter, + "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", IS_ENABLED(CONFIG_SMP) ? " SMP" : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", - IS_ENABLED(CONFIG_KASAN) ? " KASAN" : ""); + IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", + IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? + (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index daefae83a3aa..04170f63e3a1 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_SOFTIRQ) return "SOFTIRQ"; + if (type == STACK_TYPE_ENTRY) + return "ENTRY_TRAMPOLINE"; + return NULL; } @@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (task != current) goto unknown; + if (in_entry_stack(stack, info)) + goto recursion_check; + if (in_hardirq_stack(stack, info)) goto recursion_check; diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 88ce2ffdb110..563e28d14f2c 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -37,6 +37,15 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_IRQ) return "IRQ"; + if (type == STACK_TYPE_ENTRY) { + /* + * On 64-bit, we have a generic entry stack that we + * use for all the kernel entry points, including + * SYSENTER. + */ + return "ENTRY_TRAMPOLINE"; + } + if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) return exception_stack_names[type - STACK_TYPE_EXCEPTION]; @@ -115,6 +124,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (in_irq_stack(stack, info)) goto recursion_check; + if (in_entry_stack(stack, info)) + goto recursion_check; + goto unknown; recursion_check: diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 1e82f787c160..c87560e1e3ef 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_SKL_IDS(&gen9_early_ops), INTEL_BXT_IDS(&gen9_early_ops), INTEL_KBL_IDS(&gen9_early_ops), + INTEL_CFL_IDS(&gen9_early_ops), INTEL_GLK_IDS(&gen9_early_ops), INTEL_CNL_IDS(&gen9_early_ops), }; diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 5e801c8c8ce7..f65d32c7040c 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Simple VGA output */ #define VGABASE (__ISA_IO_base + 0xb8000) @@ -387,6 +388,12 @@ static int __init setup_early_printk(char *buf) if (!strncmp(buf, "xdbc", 4)) early_xdbc_parse_parameter(buf + 4); #endif +#ifdef CONFIG_INTEL_TH_EARLY_PRINTK + if (!strncmp(buf, "intelth", 7)) { + early_intel_th_init(buf + 7); + early_console_register(&intel_th_early_console, keep); + } +#endif buf++; } diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 9c4e7ba6870c..cbded50ee601 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -57,7 +57,7 @@ # error "Need more virtual address space for the ESPFIX hack" #endif -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) /* This contains the *bottom* address of the espfix stack */ DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 7affb7e3d9a5..6abd83572b01 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -249,6 +249,10 @@ static void __init fpu__init_system_ctx_switch(void) */ static void __init fpu__init_parse_early_param(void) { + char arg[32]; + char *argptr = arg; + int bit; + if (cmdline_find_option_bool(boot_command_line, "no387")) setup_clear_cpu_cap(X86_FEATURE_FPU); @@ -266,6 +270,13 @@ static void __init fpu__init_parse_early_param(void) if (cmdline_find_option_bool(boot_command_line, "noxsaves")) setup_clear_cpu_cap(X86_FEATURE_XSAVES); + + if (cmdline_find_option(boot_command_line, "clearcpuid", arg, + sizeof(arg)) && + get_option(&argptr, &bit) && + bit >= 0 && + bit < NCAPINTS * 32) + setup_clear_cpu_cap(bit); } /* diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index f1d5476c9022..87a57b7642d3 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -15,6 +15,7 @@ #include #include +#include /* * Although we spell it out in here, the Processor Trace @@ -36,6 +37,19 @@ static const char *xfeature_names[] = "unknown xstate feature" , }; +static short xsave_cpuid_features[] __initdata = { + X86_FEATURE_FPU, + X86_FEATURE_XMM, + X86_FEATURE_AVX, + X86_FEATURE_MPX, + X86_FEATURE_MPX, + X86_FEATURE_AVX512F, + X86_FEATURE_AVX512F, + X86_FEATURE_AVX512F, + X86_FEATURE_INTEL_PT, + X86_FEATURE_PKU, +}; + /* * Mask of xstate features supported by the CPU and the kernel: */ @@ -59,26 +73,6 @@ unsigned int fpu_user_xstate_size; void fpu__xstate_clear_all_cpu_caps(void) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); - setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); - setup_clear_cpu_cap(X86_FEATURE_XSAVEC); - setup_clear_cpu_cap(X86_FEATURE_XSAVES); - setup_clear_cpu_cap(X86_FEATURE_AVX); - setup_clear_cpu_cap(X86_FEATURE_AVX2); - setup_clear_cpu_cap(X86_FEATURE_AVX512F); - setup_clear_cpu_cap(X86_FEATURE_AVX512IFMA); - setup_clear_cpu_cap(X86_FEATURE_AVX512PF); - setup_clear_cpu_cap(X86_FEATURE_AVX512ER); - setup_clear_cpu_cap(X86_FEATURE_AVX512CD); - setup_clear_cpu_cap(X86_FEATURE_AVX512DQ); - setup_clear_cpu_cap(X86_FEATURE_AVX512BW); - setup_clear_cpu_cap(X86_FEATURE_AVX512VL); - setup_clear_cpu_cap(X86_FEATURE_MPX); - setup_clear_cpu_cap(X86_FEATURE_XGETBV1); - setup_clear_cpu_cap(X86_FEATURE_AVX512VBMI); - setup_clear_cpu_cap(X86_FEATURE_PKU); - setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW); - setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS); - setup_clear_cpu_cap(X86_FEATURE_AVX512_VPOPCNTDQ); } /* @@ -726,6 +720,7 @@ void __init fpu__init_system_xstate(void) unsigned int eax, ebx, ecx, edx; static int on_boot_cpu __initdata = 1; int err; + int i; WARN_ON_FPU(!on_boot_cpu); on_boot_cpu = 0; @@ -759,6 +754,14 @@ void __init fpu__init_system_xstate(void) goto out_disable; } + /* + * Clear XSAVE features that are disabled in the normal CPUID. + */ + for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) { + if (!boot_cpu_has(xsave_cpuid_features[i])) + xfeatures_mask &= ~BIT(i); + } + xfeatures_mask &= fpu__get_supported_xfeatures_mask(); /* Enable xstate instructions to be able to continue with initialization: */ diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index b6c6468e10bc..4c8440de3355 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -8,6 +8,7 @@ #include #include #include +#include #ifdef CC_USING_FENTRY # define function_hook __fentry__ @@ -197,7 +198,8 @@ ftrace_stub: movl 0x4(%ebp), %edx subl $MCOUNT_INSN_SIZE, %eax - call *ftrace_trace_function + movl ftrace_trace_function, %ecx + CALL_NOSPEC %ecx popl %edx popl %ecx @@ -241,5 +243,5 @@ return_to_handler: movl %eax, %ecx popl %edx popl %eax - jmp *%ecx + JMP_NOSPEC %ecx #endif diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index c832291d948a..7cb8ba08beb9 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -7,7 +7,7 @@ #include #include #include - +#include .code64 .section .entry.text, "ax" @@ -286,8 +286,8 @@ trace: * ip and parent ip are used and the list function is called when * function tracing is enabled. */ - call *ftrace_trace_function - + movq ftrace_trace_function, %r8 + CALL_NOSPEC %r8 restore_mcount_regs jmp fgraph_trace @@ -329,5 +329,5 @@ GLOBAL(return_to_handler) movq 8(%rsp), %rdx movq (%rsp), %rax addq $24, %rsp - jmp *%rdi + JMP_NOSPEC %rdi #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 6a5d757b9cfd..7ba5d819ebe3 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr, p = fixup_pointer(&phys_base, physaddr); *p += load_delta - sme_get_me_mask(); - /* Encrypt the kernel (if SME is active) */ - sme_encrypt_kernel(); + /* Encrypt the kernel and related (if SME is active) */ + sme_encrypt_kernel(bp); /* * Return the SME encryption mask (if SME is active) to be used as a diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index f1d528bb66a6..b59e4fb40fd9 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -37,7 +37,7 @@ #define X86 new_cpu_data+CPUINFO_x86 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor #define X86_MODEL new_cpu_data+CPUINFO_x86_model -#define X86_MASK new_cpu_data+CPUINFO_x86_mask +#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability @@ -212,9 +212,6 @@ ENTRY(startup_32_smp) #endif .Ldefault_entry: -#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ - X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ - X86_CR0_PG) movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 @@ -335,7 +332,7 @@ ENTRY(startup_32_smp) shrb $4,%al movb %al,X86_MODEL andb $0x0f,%cl # mask mask revision - movb %cl,X86_MASK + movb %cl,X86_STEPPING movl %edx,X86_CAPABILITY .Lis486: @@ -402,7 +399,7 @@ ENTRY(early_idt_handler_array) # 24(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 + .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 pushl $0 # Dummy error code, to make stack frame uniform .endif pushl $i # 20(%esp) Vector number diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 6dde3f3fc1f8..0f545b3cf926 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -23,6 +23,7 @@ #include #include "../entry/calling.h" #include +#include #ifdef CONFIG_PARAVIRT #include @@ -38,11 +39,12 @@ * */ -#define p4d_index(x) (((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) PGD_START_KERNEL = pgd_index(__START_KERNEL_map) +#endif L3_START_KERNEL = pud_index(__START_KERNEL_map) .text @@ -50,6 +52,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map) .code64 .globl startup_64 startup_64: + UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded an identity mapped page table @@ -89,6 +92,7 @@ startup_64: addq $(early_top_pgt - __START_KERNEL_map), %rax jmp 1f ENTRY(secondary_startup_64) + UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded a mapped page table. @@ -131,8 +135,10 @@ ENTRY(secondary_startup_64) /* Ensure I am executing from virtual addresses */ movq $1f, %rax + ANNOTATE_RETPOLINE_SAFE jmp *%rax 1: + UNWIND_HINT_EMPTY /* Check if nx is implemented */ movl $0x80000001, %eax @@ -150,9 +156,6 @@ ENTRY(secondary_startup_64) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ -#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ - X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ - X86_CR0_PG) movl $CR0_STATE, %eax /* Make changes effective */ movq %rax, %cr0 @@ -235,7 +238,7 @@ ENTRY(secondary_startup_64) pushq %rax # target address in negative space lretq .Lafter_lret: -ENDPROC(secondary_startup_64) +END(secondary_startup_64) #include "verify_cpu.S" @@ -247,6 +250,7 @@ ENDPROC(secondary_startup_64) */ ENTRY(start_cpu0) movq initial_stack(%rip), %rsp + UNWIND_HINT_EMPTY jmp .Ljump_to_C_code ENDPROC(start_cpu0) #endif @@ -266,26 +270,24 @@ ENDPROC(start_cpu0) .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS __FINITDATA -bad_address: - jmp bad_address - __INIT ENTRY(early_idt_handler_array) - # 104(%rsp) %rflags - # 96(%rsp) %cs - # 88(%rsp) %rip - # 80(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 - pushq $0 # Dummy error code, to make stack frame uniform + .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 + UNWIND_HINT_IRET_REGS + pushq $0 # Dummy error code, to make stack frame uniform + .else + UNWIND_HINT_IRET_REGS offset=8 .endif pushq $i # 72(%rsp) Vector number jmp early_idt_handler_common + UNWIND_HINT_IRET_REGS i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr -ENDPROC(early_idt_handler_array) + UNWIND_HINT_IRET_REGS offset=16 +END(early_idt_handler_array) early_idt_handler_common: /* @@ -313,6 +315,7 @@ early_idt_handler_common: pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ + UNWIND_HINT_REGS cmpq $14,%rsi /* Page fault? */ jnz 10f @@ -327,8 +330,8 @@ early_idt_handler_common: 20: decl early_recursion_flag(%rip) - jmp restore_regs_and_iret -ENDPROC(early_idt_handler_common) + jmp restore_regs_and_return_to_kernel +END(early_idt_handler_common) __INITDATA @@ -340,6 +343,27 @@ GLOBAL(early_recursion_flag) .balign PAGE_SIZE; \ GLOBAL(name) +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * Each PGD needs to be 8k long and 8k aligned. We do not + * ever go out to userspace with these, so we do not + * strictly *need* the second page, but this allows us to + * have a single set_pgd() implementation that does not + * need to worry about whether it has 4k or 8k to work + * with. + * + * This ensures PGDs are 8k long: + */ +#define PTI_USER_PGD_FILL 512 +/* This ensures they are 8k-aligned: */ +#define NEXT_PGD_PAGE(name) \ + .balign 2 * PAGE_SIZE; \ +GLOBAL(name) +#else +#define NEXT_PGD_PAGE(name) NEXT_PAGE(name) +#define PTI_USER_PGD_FILL 0 +#endif + /* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ @@ -349,30 +373,29 @@ GLOBAL(name) .endr __INITDATA -NEXT_PAGE(early_top_pgt) +NEXT_PGD_PAGE(early_top_pgt) .fill 511,8,0 #ifdef CONFIG_X86_5LEVEL .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #else .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #endif + .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 .data -#ifndef CONFIG_XEN -NEXT_PAGE(init_top_pgt) - .fill 512,8,0 -#else -NEXT_PAGE(init_top_pgt) +#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) +NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC + .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC @@ -382,6 +405,10 @@ NEXT_PAGE(level2_ident_pgt) * Don't set NX because code runs from these pages. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) +#else +NEXT_PGD_PAGE(init_top_pgt) + .fill 512,8,0 + .fill PTI_USER_PGD_FILL,8,0 #endif #ifdef CONFIG_X86_5LEVEL @@ -435,7 +462,7 @@ ENTRY(phys_base) EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" - + __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 014cb2fc47ff..0c5256653d6c 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -56,7 +56,7 @@ struct idt_data { * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ -static const __initdata struct idt_data early_idts[] = { +static const __initconst struct idt_data early_idts[] = { INTG(X86_TRAP_DB, debug), SYSG(X86_TRAP_BP, int3), #ifdef CONFIG_X86_32 @@ -70,7 +70,7 @@ static const __initdata struct idt_data early_idts[] = { * the traps which use them are reinitialized with IST after cpu_init() has * set up TSS. */ -static const __initdata struct idt_data def_idts[] = { +static const __initconst struct idt_data def_idts[] = { INTG(X86_TRAP_DE, divide_error), INTG(X86_TRAP_NMI, nmi), INTG(X86_TRAP_BR, bounds), @@ -108,7 +108,7 @@ static const __initdata struct idt_data def_idts[] = { /* * The APIC and SMP idt entries */ -static const __initdata struct idt_data apic_idts[] = { +static const __initconst struct idt_data apic_idts[] = { #ifdef CONFIG_SMP INTG(RESCHEDULE_VECTOR, reschedule_interrupt), INTG(CALL_FUNCTION_VECTOR, call_function_interrupt), @@ -140,6 +140,9 @@ static const __initdata struct idt_data apic_idts[] = { # ifdef CONFIG_IRQ_WORK INTG(IRQ_WORK_VECTOR, irq_work_interrupt), # endif +#ifdef CONFIG_X86_UV + INTG(UV_BAU_MESSAGE, uv_bau_message_intr1), +#endif INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt), INTG(ERROR_APIC_VECTOR, error_interrupt), #endif @@ -150,7 +153,7 @@ static const __initdata struct idt_data apic_idts[] = { * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ -static const __initdata struct idt_data early_pf_idts[] = { +static const __initconst struct idt_data early_pf_idts[] = { INTG(X86_TRAP_PF, page_fault), }; @@ -158,9 +161,8 @@ static const __initdata struct idt_data early_pf_idts[] = { * Override for the debug_idt. Same as the default, but with interrupt * stack set to DEFAULT_STACK (0). Required for NMI trap handling. */ -static const __initdata struct idt_data dbg_idts[] = { +static const __initconst struct idt_data dbg_idts[] = { INTG(X86_TRAP_DB, debug), - INTG(X86_TRAP_BP, int3), }; #endif @@ -180,10 +182,9 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss; * The exceptions which use Interrupt stacks. They are setup after * cpu_init() when the TSS has been initialized. */ -static const __initdata struct idt_data ist_idts[] = { +static const __initconst struct idt_data ist_idts[] = { ISTG(X86_TRAP_DB, debug, DEBUG_STACK), ISTG(X86_TRAP_NMI, nmi, NMI_STACK), - SISTG(X86_TRAP_BP, int3, DEBUG_STACK), ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK), #ifdef CONFIG_X86_MCE ISTG(X86_TRAP_MC, &machine_check, MCE_STACK), diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 3feb648781c4..2f723301eb58 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) * because the ->io_bitmap_max value must match the bitmap * contents: */ - tss = &per_cpu(cpu_tss, get_cpu()); + tss = &per_cpu(cpu_tss_rw, get_cpu()); if (turn_on) bitmap_clear(t->io_bitmap_ptr, from, num); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 52089c043160..aa9d51eea9d0 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -219,18 +219,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; - /* - * NB: Unlike exception entries, IRQ entries do not reliably - * handle context tracking in the low-level entry code. This is - * because syscall entries execute briefly with IRQs on before - * updating context tracking state, so we can take an IRQ from - * kernel mode with CONTEXT_USER. The low-level entry code only - * updates the context if we came from user mode, so we won't - * switch to CONTEXT_KERNEL. We'll fix that once the syscall - * code is cleaned up enough that we can cleanly defer enabling - * IRQs. - */ - entering_irq(); /* entering_irq() tells RCU that we're not quiescent. Check it. */ diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index a83b3346a0e1..c1bdbd3d3232 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -20,6 +20,7 @@ #include #include +#include #ifdef CONFIG_DEBUG_STACKOVERFLOW @@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack); static void call_on_stack(void *func, void *stack) { asm volatile("xchgl %%ebx,%%esp \n" - "call *%%edi \n" + CALL_NOSPEC "movl %%ebx,%%esp \n" : "=b" (stack) : "0" (stack), - "D"(func) + [thunk_target] "D"(func) : "memory", "cc", "edx", "ecx", "eax"); } @@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) call_on_stack(print_stack_overflow, isp); asm volatile("xchgl %%ebx,%%esp \n" - "call *%%edi \n" + CALL_NOSPEC "movl %%ebx,%%esp \n" : "=a" (arg1), "=b" (isp) : "0" (desc), "1" (isp), - "D" (desc->handle_irq) + [thunk_target] "D" (desc->handle_irq) : "memory", "cc", "ecx"); return 1; } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 020efbf5786b..d86e344f5b3d 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -57,10 +57,10 @@ static inline void stack_overflow_check(struct pt_regs *regs) if (regs->sp >= estack_top && regs->sp <= estack_bottom) return; - WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n", + WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n", current->comm, curbase, regs->sp, irq_stack_top, irq_stack_bottom, - estack_top, estack_bottom); + estack_top, estack_bottom, (void *)regs->ip); if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 0742491cbb73..ce06ec9c2323 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -1149,10 +1149,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler); bool arch_within_kprobe_blacklist(unsigned long addr) { + bool is_in_entry_trampoline_section = false; + +#ifdef CONFIG_X86_64 + is_in_entry_trampoline_section = + (addr >= (unsigned long)__entry_trampoline_start && + addr < (unsigned long)__entry_trampoline_end); +#endif return (addr >= (unsigned long)__kprobes_text_start && addr < (unsigned long)__kprobes_text_end) || (addr >= (unsigned long)__entry_text_start && - addr < (unsigned long)__entry_text_end); + addr < (unsigned long)__entry_text_end) || + is_in_entry_trampoline_section; } int __init arch_init_kprobes(void) diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 041f7b6dfa0f..bcfee4f69b0e 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -26,7 +26,7 @@ #include "common.h" static nokprobe_inline -int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, +void __skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, unsigned long orig_ip) { /* @@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, __this_cpu_write(current_kprobe, NULL); if (orig_ip) regs->ip = orig_ip; - return 1; } int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { - if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb, 0); - else - return 0; + if (kprobe_ftrace(p)) { + __skip_singlestep(p, regs, kcb, 0); + preempt_enable_no_resched(); + return 1; + } + return 0; } NOKPROBE_SYMBOL(skip_singlestep); -/* Ftrace callback handler for kprobes */ +/* Ftrace callback handler for kprobes -- called under preepmt disabed */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs) { @@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); + /* To emulate trap based kprobes, preempt_disable here */ + preempt_disable(); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) + if (!p->pre_handler || !p->pre_handler(p, regs)) { __skip_singlestep(p, regs, kcb, orig_ip); + preempt_enable_no_resched(); + } /* * If pre_handler returns !0, it sets regs->ip and - * resets current kprobe. + * resets current kprobe, and keep preempt count +1. */ } end: diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 4f98aad38237..3668f28cf5fc 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "common.h" @@ -205,7 +206,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src) } /* Check whether insn is indirect jump */ -static int insn_is_indirect_jump(struct insn *insn) +static int __insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -239,6 +240,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) return (start <= target && target <= start + len); } +static int insn_is_indirect_jump(struct insn *insn) +{ + int ret = __insn_is_indirect_jump(insn); + +#ifdef CONFIG_RETPOLINE + /* + * Jump to x86_indirect_thunk_* is treated as an indirect jump. + * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with + * older gcc may use indirect jump. So we add this check instead of + * replace indirect-jump check. + */ + if (!ret) + ret = insn_jump_into_range(insn, + (unsigned long)__indirect_thunk_start, + (unsigned long)__indirect_thunk_end - + (unsigned long)__indirect_thunk_start); +#endif + return ret; +} + /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 8bb9594d0761..652bdd867782 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void) #endif pa |= KVM_ASYNC_PF_ENABLED; - /* Async page fault support for L1 hypervisor is optional */ - if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN, - (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0) - wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); + if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) + pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; + + wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(apf_reason.enabled, 1); printk(KERN_INFO"KVM setup async PF for cpu %d\n", smp_processor_id()); @@ -544,12 +544,12 @@ static uint32_t __init kvm_detect(void) return kvm_cpuid_base(); } -const struct hypervisor_x86 x86_hyper_kvm __refconst = { +const __initconst struct hypervisor_x86 x86_hyper_kvm = { .name = "KVM", .detect = kvm_detect, - .x2apic_available = kvm_para_available, + .type = X86_HYPER_KVM, + .init.x2apic_available = kvm_para_available, }; -EXPORT_SYMBOL_GPL(x86_hyper_kvm); static __init int activate_jump_labels(void) { diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 4d17bacf4030..26d713ecad34 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -5,6 +5,11 @@ * Copyright (C) 2002 Andi Kleen * * This handles calls from both 32bit and 64bit mode. + * + * Lock order: + * contex.ldt_usr_sem + * mmap_sem + * context.lock */ #include @@ -13,11 +18,13 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -41,17 +48,15 @@ static void refresh_ldt_segments(void) #endif } -/* context.lock is held for us, so we don't need any locking. */ +/* context.lock is held by the task which issued the smp function call */ static void flush_ldt(void *__mm) { struct mm_struct *mm = __mm; - mm_context_t *pc; if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) return; - pc = &mm->context; - set_ldt(pc->ldt->entries, pc->ldt->nr_entries); + load_mm_ldt(mm); refresh_ldt_segments(); } @@ -88,25 +93,143 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) return NULL; } + /* The new LDT isn't aliased for PTI yet. */ + new_ldt->slot = -1; + new_ldt->nr_entries = num_entries; return new_ldt; } +/* + * If PTI is enabled, this maps the LDT into the kernelmode and + * usermode tables for the given mm. + * + * There is no corresponding unmap function. Even if the LDT is freed, we + * leave the PTEs around until the slot is reused or the mm is destroyed. + * This is harmless: the LDT is always in ordinary memory, and no one will + * access the freed slot. + * + * If we wanted to unmap freed LDTs, we'd also need to do a flush to make + * it useful, and the flush would slow down modify_ldt(). + */ +static int +map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + bool is_vmalloc, had_top_level_entry; + unsigned long va; + spinlock_t *ptl; + pgd_t *pgd; + int i; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return 0; + + /* + * Any given ldt_struct should have map_ldt_struct() called at most + * once. + */ + WARN_ON(ldt->slot != -1); + + /* + * Did we already have the top level entry allocated? We can't + * use pgd_none() for this because it doens't do anything on + * 4-level page table kernels. + */ + pgd = pgd_offset(mm, LDT_BASE_ADDR); + had_top_level_entry = (pgd->pgd != 0); + + is_vmalloc = is_vmalloc_addr(ldt->entries); + + for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { + unsigned long offset = i << PAGE_SHIFT; + const void *src = (char *)ldt->entries + offset; + unsigned long pfn; + pte_t pte, *ptep; + + va = (unsigned long)ldt_slot_va(slot) + offset; + pfn = is_vmalloc ? vmalloc_to_pfn(src) : + page_to_pfn(virt_to_page(src)); + /* + * Treat the PTI LDT range as a *userspace* range. + * get_locked_pte() will allocate all needed pagetables + * and account for them in this mm. + */ + ptep = get_locked_pte(mm, va, &ptl); + if (!ptep) + return -ENOMEM; + /* + * Map it RO so the easy to find address is not a primary + * target via some kernel interface which misses a + * permission check. + */ + pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)); + set_pte_at(mm, va, ptep, pte); + pte_unmap_unlock(ptep, ptl); + } + + if (mm->context.ldt) { + /* + * We already had an LDT. The top-level entry should already + * have been allocated and synchronized with the usermode + * tables. + */ + WARN_ON(!had_top_level_entry); + if (static_cpu_has(X86_FEATURE_PTI)) + WARN_ON(!kernel_to_user_pgdp(pgd)->pgd); + } else { + /* + * This is the first time we're mapping an LDT for this process. + * Sync the pgd to the usermode tables. + */ + WARN_ON(had_top_level_entry); + if (static_cpu_has(X86_FEATURE_PTI)) { + WARN_ON(kernel_to_user_pgdp(pgd)->pgd); + set_pgd(kernel_to_user_pgdp(pgd), *pgd); + } + } + + va = (unsigned long)ldt_slot_va(slot); + flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); + + ldt->slot = slot; +#endif + return 0; +} + +static void free_ldt_pgtables(struct mm_struct *mm) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + struct mmu_gather tlb; + unsigned long start = LDT_BASE_ADDR; + unsigned long end = start + (1UL << PGDIR_SHIFT); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + tlb_gather_mmu(&tlb, mm, start, end); + free_pgd_range(&tlb, start, end, start, end); + tlb_finish_mmu(&tlb, start, end); +#endif +} + /* After calling this, the LDT is immutable. */ static void finalize_ldt_struct(struct ldt_struct *ldt) { paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); } -/* context.lock is held */ -static void install_ldt(struct mm_struct *current_mm, - struct ldt_struct *ldt) +static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) { - /* Synchronizes with lockless_dereference in load_mm_ldt. */ - smp_store_release(¤t_mm->context.ldt, ldt); + mutex_lock(&mm->context.lock); + + /* Synchronizes with READ_ONCE in load_mm_ldt. */ + smp_store_release(&mm->context.ldt, ldt); + + /* Activate the LDT for all CPUs using currents mm. */ + on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); - /* Activate the LDT for all CPUs using current_mm. */ - on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); + mutex_unlock(&mm->context.lock); } static void free_ldt_struct(struct ldt_struct *ldt) @@ -123,27 +246,20 @@ static void free_ldt_struct(struct ldt_struct *ldt) } /* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. + * Called on fork from arch_dup_mmap(). Just copy the current LDT state, + * the new task is not running, so nothing can be installed. */ -int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) +int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ldt_struct *new_ldt; - struct mm_struct *old_mm; int retval = 0; - mutex_init(&mm->context.lock); - old_mm = current->mm; - if (!old_mm) { - mm->context.ldt = NULL; + if (!old_mm) return 0; - } mutex_lock(&old_mm->context.lock); - if (!old_mm->context.ldt) { - mm->context.ldt = NULL; + if (!old_mm->context.ldt) goto out_unlock; - } new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); if (!new_ldt) { @@ -155,6 +271,12 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) new_ldt->nr_entries * LDT_ENTRY_SIZE); finalize_ldt_struct(new_ldt); + retval = map_ldt_struct(mm, new_ldt, 0); + if (retval) { + free_ldt_pgtables(mm); + free_ldt_struct(new_ldt); + goto out_unlock; + } mm->context.ldt = new_ldt; out_unlock: @@ -173,13 +295,18 @@ void destroy_context_ldt(struct mm_struct *mm) mm->context.ldt = NULL; } +void ldt_arch_exit_mmap(struct mm_struct *mm) +{ + free_ldt_pgtables(mm); +} + static int read_ldt(void __user *ptr, unsigned long bytecount) { struct mm_struct *mm = current->mm; unsigned long entries_size; int retval; - mutex_lock(&mm->context.lock); + down_read(&mm->context.ldt_usr_sem); if (!mm->context.ldt) { retval = 0; @@ -208,7 +335,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount) retval = bytecount; out_unlock: - mutex_unlock(&mm->context.lock); + up_read(&mm->context.ldt_usr_sem); return retval; } @@ -268,7 +395,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ldt.avl = 0; } - mutex_lock(&mm->context.lock); + if (down_write_killable(&mm->context.ldt_usr_sem)) + return -EINTR; old_ldt = mm->context.ldt; old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; @@ -285,18 +413,37 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) new_ldt->entries[ldt_info.entry_number] = ldt; finalize_ldt_struct(new_ldt); + /* + * If we are using PTI, map the new LDT into the userspace pagetables. + * If there is already an LDT, use the other slot so that other CPUs + * will continue to use the old LDT until install_ldt() switches + * them over to the new LDT. + */ + error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); + if (error) { + /* + * This only can fail for the first LDT setup. If an LDT is + * already installed then the PTE page is already + * populated. Mop up a half populated page table. + */ + if (!WARN_ON_ONCE(old_ldt)) + free_ldt_pgtables(mm); + free_ldt_struct(new_ldt); + goto out_unlock; + } + install_ldt(mm, new_ldt); free_ldt_struct(old_ldt); error = 0; out_unlock: - mutex_unlock(&mm->context.lock); + up_write(&mm->context.ldt_usr_sem); out: return error; } -asmlinkage int sys_modify_ldt(int func, void __user *ptr, - unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , + unsigned long , bytecount) { int ret = -ENOSYS; @@ -314,5 +461,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr, ret = write_ldt(ptr, bytecount, 0); break; } - return ret; + /* + * The SYSCALL_DEFINE() macros give us an 'unsigned long' + * return type, but tht ABI for sys_modify_ldt() expects + * 'int'. This cast gives us an int-sized value in %rax + * for the return code. The 'unsigned' is necessary so + * the compiler does not try to sign-extend the negative + * return codes into the high half of the register when + * taking the value from int->long. + */ + return (unsigned int)ret; } diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 00bc751c861c..edfede768688 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -48,8 +48,6 @@ static void load_segments(void) "\tmovl $"STR(__KERNEL_DS)",%%eax\n" "\tmovl %%eax,%%ds\n" "\tmovl %%eax,%%es\n" - "\tmovl %%eax,%%fs\n" - "\tmovl %%eax,%%gs\n" "\tmovl %%eax,%%ss\n" : : : "eax", "memory"); #undef STR @@ -232,8 +230,8 @@ void machine_kexec(struct kimage *image) * The gdt & idt are now invalid. * If you want to load them you must set up your own idt & gdt. */ - set_gdt(phys_to_virt(0), 0); idt_invalidate(phys_to_virt(0)); + set_gdt(phys_to_virt(0), 0); /* now call it */ image->start = relocate_kernel_ptr((unsigned long)image->head, diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 1f790cf9d38f..3b7427aa7d85 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -542,6 +542,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, goto overflow; break; case R_X86_64_PC32: + case R_X86_64_PLT32: value -= (u64)address; *(u32 *)location = value; break; diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index da0c160e5589..f58336af095c 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -191,6 +191,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, goto overflow; break; case R_X86_64_PC32: + case R_X86_64_PLT32: if (*(u32 *)loc != 0) goto invalid_relocation; val -= (u64)loc; diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 410c5dadcee3..bc6bc6689e68 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -407,7 +407,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; processor.cpuflag = CPU_ENABLED; processor.cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping; processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; processor.reserved[0] = 0; processor.reserved[1] = 0; @@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) } static unsigned long mpf_base; +static bool mpf_found; static unsigned long __init get_mpc_size(unsigned long physptr) { @@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early) if (!smp_found_config) return; - if (!mpf_base) + if (!mpf_found) return; if (acpi_lapic && early) @@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) smp_found_config = 1; #endif mpf_base = base; + mpf_found = true; pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", base, base + sizeof(*mpf) - 1, mpf); @@ -858,7 +860,7 @@ static int __init update_mp_table(void) if (!enable_update_mptable) return 0; - if (!mpf_base) + if (!mpf_found) return 0; mpf = early_memremap(mpf_base, sizeof(*mpf)); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 19a3e8f961c7..1ef5645ae673 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -190,9 +191,9 @@ static void native_flush_tlb_global(void) __native_flush_tlb_global(); } -static void native_flush_tlb_single(unsigned long addr) +static void native_flush_tlb_one_user(unsigned long addr) { - __native_flush_tlb_single(addr); + __native_flush_tlb_one_user(addr); } struct static_key paravirt_steal_enabled; @@ -319,6 +320,9 @@ __visible struct pv_irq_ops pv_irq_ops = { .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), .safe_halt = native_safe_halt, .halt = native_halt, +#ifdef CONFIG_PCI_MSI + .write_msi = native_write_msi_msg, +#endif }; __visible struct pv_cpu_ops pv_cpu_ops = { @@ -367,6 +371,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .start_context_switch = paravirt_nop, .end_context_switch = paravirt_nop, + + .cpu_khz = paravirt_nop, }; /* At this point, native_get/set_debugreg has real function entries */ @@ -391,7 +397,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .flush_tlb_user = native_flush_tlb, .flush_tlb_kernel = native_flush_tlb_global, - .flush_tlb_single = native_flush_tlb_single, + .flush_tlb_one_user = native_flush_tlb_one_user, .flush_tlb_others = native_flush_tlb_others, .pgd_alloc = __paravirt_pgd_alloc, diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index ac0be8283325..9edadabf04f6 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); -DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); @@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, PATCH_SITE(pv_mmu_ops, read_cr2); PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); - PATCH_SITE(pv_mmu_ops, flush_tlb_single); PATCH_SITE(pv_cpu_ops, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c67685337c5a..8bd1d8292cf7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -47,9 +47,25 @@ * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { +__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = { .x86_tss = { - .sp0 = TOP_OF_INIT_STACK, + /* + * .sp0 is only used when entering ring 0 from a lower + * privilege level. Since the init task never runs anything + * but ring 0 code, there is no need for a valid value here. + * Poison it. + */ + .sp0 = (1UL << (BITS_PER_LONG-1)) + 1, + +#ifdef CONFIG_X86_64 + /* + * .sp1 is cpu_current_top_of_stack. The init task never + * runs user code, but cpu_current_top_of_stack should still + * be well defined before the first context switch. + */ + .sp1 = TOP_OF_INIT_STACK, +#endif + #ifdef CONFIG_X86_32 .ss0 = __KERNEL_DS, .ss1 = __KERNEL_CS, @@ -65,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { */ .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, #endif -#ifdef CONFIG_X86_32 - .SYSENTER_stack_canary = STACK_END_MAGIC, -#endif }; -EXPORT_PER_CPU_SYMBOL(cpu_tss); +EXPORT_PER_CPU_SYMBOL(cpu_tss_rw); DEFINE_PER_CPU(bool, __tss_limit_invalid); EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); @@ -98,7 +111,7 @@ void exit_thread(struct task_struct *tsk) struct fpu *fpu = &t->fpu; if (bp) { - struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu()); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); @@ -367,19 +380,24 @@ void stop_this_cpu(void *dummy) disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); + /* + * Use wbinvd on processors that support SME. This provides support + * for performing a successful kexec when going from SME inactive + * to SME active (or vice-versa). The cache must be cleared so that + * if there are entries with the same physical address, both with and + * without the encryption bit, they don't race each other when flushed + * and potentially end up with the wrong entry being committed to + * memory. + */ + if (boot_cpu_has(X86_FEATURE_SME)) + native_wbinvd(); for (;;) { /* - * Use wbinvd followed by hlt to stop the processor. This - * provides support for kexec on a processor that supports - * SME. With kexec, going from SME inactive to SME active - * requires clearing cache entries so that addresses without - * the encryption bit set don't corrupt the same physical - * address that has the encryption bit set when caches are - * flushed. To achieve this a wbinvd is performed followed by - * a hlt. Even if the processor is not in the kexec/SME - * scenario this only adds a wbinvd to a halting processor. + * Use native_halt() so that memory contents don't change + * (stack usage and variables) after possibly issuing the + * native_wbinvd() above. */ - asm volatile("wbinvd; hlt" : : : "memory"); + native_halt(); } } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 11966251cd42..5224c6099184 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -284,9 +284,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Reload esp0 and cpu_current_top_of_stack. This changes - * current_thread_info(). + * current_thread_info(). Refresh the SYSENTER configuration in + * case prev or next is vm86. */ - load_sp0(tss, next); + update_sp0(next_p); + refresh_sysenter_cs(next); this_cpu_write(cpu_current_top_of_stack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 302e7b2572d1..9eb448c7859d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all) unsigned int fsindex, gsindex; unsigned int ds, cs, es; - printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip); - printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss, - regs->sp, regs->flags); + show_iret_regs(regs); + if (regs->orig_ax != -1) pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); else @@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all) printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", regs->r13, regs->r14, regs->r15); + if (!all) + return; + asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%cs,%0" : "=r" (cs)); asm("movl %%es,%0" : "=r" (es)); @@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all) rdmsrl(MSR_GS_BASE, gs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); - if (!all) - return; - cr0 = read_cr0(); cr2 = read_cr2(); cr3 = __read_cr3(); @@ -274,7 +273,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct inactive_task_frame *frame; struct task_struct *me = current; - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; @@ -401,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); @@ -463,9 +461,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * Switch the PDA and FPU contexts. */ this_cpu_write(current_task, next_p); + this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); - /* Reload esp0 and ss1. This changes current_thread_info(). */ - load_sp0(tss, next); + /* Reload sp0. */ + update_sp0(next_p); /* * Now maybe reload the debug registers and handle I/O bitmaps @@ -558,7 +557,7 @@ static void __set_personality_x32(void) * Pretend to come from a x32 execve. */ task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; - current->thread.status &= ~TS_COMPAT; + current_thread_info()->status &= ~TS_COMPAT; #endif } @@ -572,7 +571,7 @@ static void __set_personality_ia32(void) current->personality |= force_personality32; /* Prepare the first "return" to user space */ task_pt_regs(current)->orig_ax = __NR_ia32_execve; - current->thread.status |= TS_COMPAT; + current_thread_info()->status |= TS_COMPAT; #endif } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index f37d18124648..ed5c4cdf0a34 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -935,7 +935,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value) */ regs->orig_ax = value; if (syscall_get_nr(child, regs) >= 0) - child->thread.status |= TS_I386_REGS_POKED; + child->thread_info.status |= TS_I386_REGS_POKED; break; case offsetof(struct user32, regs.eflags): diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 307d3bac5f04..11eda21eb697 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -68,6 +68,9 @@ relocate_kernel: movq %cr4, %rax movq %rax, CR4(%r11) + /* Save CR4. Required to enable the right paging mode later. */ + movq %rax, %r13 + /* zero out flags, and disable interrupts */ pushq $0 popfq @@ -126,8 +129,13 @@ identity_mapped: /* * Set cr4 to a known state: * - physical address extension enabled + * - 5-level paging, if it was enabled before */ movl $X86_CR4_PAE, %eax + testq $X86_CR4_LA57, %r13 + jz 1f + orl $X86_CR4_LA57, %eax +1: movq %rax, %cr4 jmp 1f diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0957dd73d127..efbcf5283520 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -376,14 +376,6 @@ static void __init reserve_initrd(void) !ramdisk_image || !ramdisk_size) return; /* No initrd provided by bootloader */ - /* - * If SME is active, this memory will be marked encrypted by the - * kernel when it is accessed (including relocation). However, the - * ramdisk image was loaded decrypted by the bootloader, so make - * sure that it is encrypted before accessing it. - */ - sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image); - initrd_start = 0; mapped_size = memblock_mem_size(max_pfn_mapped); @@ -936,9 +928,6 @@ void __init setup_arch(char **cmdline_p) set_bit(EFI_BOOT, &efi.flags); set_bit(EFI_64BIT, &efi.flags); } - - if (efi_enabled(EFI_BOOT)) - efi_memblock_x86_reserve_range(); #endif x86_init.oem.arch_setup(); @@ -992,6 +981,8 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + if (efi_enabled(EFI_BOOT)) + efi_memblock_x86_reserve_range(); #ifdef CONFIG_MEMORY_HOTPLUG /* * Memory used by the kernel cannot be hot-removed because Linux @@ -1247,20 +1238,13 @@ void __init setup_arch(char **cmdline_p) kasan_init(); -#ifdef CONFIG_X86_32 - /* sync back kernel address range */ - clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - KERNEL_PGD_PTRS); - /* - * sync back low identity map too. It is used for example - * in the 32-bit EFI stub. + * Sync back kernel address range. + * + * FIXME: Can the later sync in setup_cpu_entry_areas() replace + * this call? */ - clone_pgd_range(initial_page_table, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); -#endif + sync_initial_page_table(); tboot_probe(); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 497aa766fab3..ea554f812ee1 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void) /* Setup cpu initialized, callin, callout masks */ setup_cpu_local_masks(); -#ifdef CONFIG_X86_32 /* * Sync back kernel address range again. We already did this in * setup_arch(), but percpu data also needs to be available in * the smpboot asm. We can't reliably pick up percpu mappings * using vmalloc_fault(), because exception dispatch needs * percpu data. + * + * FIXME: Can the later sync in setup_cpu_entry_areas() replace + * this call? */ - clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - KERNEL_PGD_PTRS); - - /* - * sync back low identity map too. It is used for example - * in the 32-bit EFI stub. - */ - clone_pgd_range(initial_page_table, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); -#endif + sync_initial_page_table(); } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b9e00e8f1c9b..4cdc0b27ec82 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -787,7 +787,7 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) * than the tracee. */ #ifdef CONFIG_IA32_EMULATION - if (current->thread.status & (TS_COMPAT|TS_I386_REGS_POKED)) + if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED)) return __NR_ia32_restart_syscall; #endif #ifdef CONFIG_X86_X32_ABI diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 5c574dff4c1a..2f7ab6eb647d 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -117,6 +117,19 @@ static atomic_t stopping_cpu = ATOMIC_INIT(-1); static bool smp_no_nmi_ipi = false; +static DEFINE_PER_CPU(struct pt_regs, cpu_regs); + +/* Store regs of this CPU for RAM dump decoding help */ +static inline void store_regs(struct pt_regs *regs) +{ + struct pt_regs *print_regs; + print_regs = &get_cpu_var(cpu_regs); + crash_setup_regs(print_regs, regs); + + /* Flush CPU cache */ + wbinvd(); +} + /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -163,6 +176,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) return NMI_HANDLED; + store_regs(regs); cpu_emergency_vmxoff(); stop_this_cpu(NULL); @@ -173,9 +187,10 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) * this function calls the 'stop' function on all other CPUs in the system. */ -asmlinkage __visible void smp_reboot_interrupt(void) +__visible void smp_reboot_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); + store_regs(regs); cpu_emergency_vmxoff(); stop_this_cpu(NULL); irq_exit(); @@ -247,6 +262,7 @@ static void native_stop_other_cpus(int wait) } finish: + store_regs(NULL); local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 65a0ccdc3050..2651ca2112c4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -128,25 +128,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) spin_lock_irqsave(&rtc_lock, flags); CMOS_WRITE(0xa, 0xf); spin_unlock_irqrestore(&rtc_lock, flags); - local_flush_tlb(); - pr_debug("1.\n"); *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = start_eip >> 4; - pr_debug("2.\n"); *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = start_eip & 0xf; - pr_debug("3.\n"); } static inline void smpboot_restore_warm_reset_vector(void) { unsigned long flags; - /* - * Install writable page 0 entry to set BIOS data area. - */ - local_flush_tlb(); - /* * Paranoid: Set warm reset code and vector here back * to default values. @@ -239,7 +230,7 @@ static void notrace start_secondary(void *unused) load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif - + load_current_idt(); cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); @@ -962,8 +953,7 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle) #ifdef CONFIG_X86_32 /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); - per_cpu(cpu_current_top_of_stack, cpu) = - (unsigned long)task_stack_page(idle) + THREAD_SIZE; + per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle); #else initial_gs = per_cpu_offset(cpu); #endif @@ -991,12 +981,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, initial_code = (unsigned long)start_secondary; initial_stack = idle->thread.sp; - /* - * Enable the espfix hack for this CPU - */ -#ifdef CONFIG_X86_ESPFIX64 + /* Enable the espfix hack for this CPU */ init_espfix_ap(cpu); -#endif /* So we see what's up */ announce_cpu(cpu, apicid); diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 8dabd7bf1673..4565f31bd398 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -98,7 +98,7 @@ static int __save_stack_trace_reliable(struct stack_trace *trace, for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); unwind_next_frame(&state)) { - regs = unwind_get_entry_regs(&state); + regs = unwind_get_entry_regs(&state, NULL); if (regs) { /* * Kernel mode registers on the stack indicate an @@ -160,8 +160,12 @@ int save_stack_trace_tsk_reliable(struct task_struct *tsk, { int ret; + /* + * If the task doesn't have a stack (e.g., a zombie), the stack is + * "reliably" empty. + */ if (!try_get_task_stack(tsk)) - return -EINVAL; + return 0; ret = __save_stack_trace_reliable(trace, tsk); diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index a4eb27918ceb..a2486f444073 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, return -1; set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); pte_unmap(pte); + + /* + * PTI poisons low addresses in the kernel page tables in the + * name of making them unusable for userspace. To execute + * code at such a low address, the poison must be cleared. + * + * Note: 'pgd' actually gets set in p4d_alloc() _or_ + * pud_alloc() depending on 4/5-level paging. + */ + pgd->pgd &= ~_PAGE_NX; + return 0; } diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 9a9c9b076955..a5b802a12212 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -93,17 +93,10 @@ static void set_tls_desc(struct task_struct *p, int idx, cpu = get_cpu(); while (n-- > 0) { - if (LDT_empty(info) || LDT_zero(info)) { + if (LDT_empty(info) || LDT_zero(info)) memset(desc, 0, sizeof(*desc)); - } else { + else fill_ldt(desc, info); - - /* - * Always set the accessed bit so that the CPU - * doesn't try to write to the (read-only) GDT. - */ - desc->type |= 1; - } ++info; ++desc; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5a6b8f809792..23be58eb5608 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -42,7 +42,6 @@ #include #endif -#include #include #include #include @@ -52,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +60,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 #include @@ -141,8 +142,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) * will catch asm bugs and any attempt to use ist_preempt_enable * from double_fault. */ - BUG_ON((unsigned long)(current_top_of_stack() - - current_stack_pointer) >= THREAD_SIZE); + BUG_ON(!on_thread_stack()); preempt_enable_no_resched(); } @@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr) break; case BUG_TRAP_TYPE_WARN: - regs->ip += LEN_UD0; + regs->ip += LEN_UD2; return 1; } @@ -349,23 +349,42 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) /* * If IRET takes a non-IST fault on the espfix64 stack, then we - * end up promoting it to a doublefault. In that case, modify - * the stack to make it look like we just entered the #GP - * handler from user space, similar to bad_iret. + * end up promoting it to a doublefault. In that case, take + * advantage of the fact that we're not using the normal (TSS.sp0) + * stack right now. We can write a fake #GP(0) frame at TSS.sp0 + * and then modify our own IRET frame so that, when we return, + * we land directly at the #GP(0) vector with the stack already + * set up according to its expectations. + * + * The net result is that our #GP handler will think that we + * entered from usermode with the bad user context. * * No need for ist_enter here because we don't use RCU. */ - if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && + if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && regs->cs == __KERNEL_CS && regs->ip == (unsigned long)native_irq_return_iret) { - struct pt_regs *normal_regs = task_pt_regs(current); + struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; + + /* + * regs->sp points to the failing IRET frame on the + * ESPFIX64 stack. Copy it to the entry stack. This fills + * in gpregs->ss through gpregs->ip. + * + */ + memmove(&gpregs->ip, (void *)regs->sp, 5*8); + gpregs->orig_ax = 0; /* Missing (lost) #GP error code */ - /* Fake a #GP(0) from userspace. */ - memmove(&normal_regs->ip, (void *)regs->sp, 5*8); - normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ + /* + * Adjust our frame so that we return straight to the #GP + * vector with the expected RSP value. This is safe because + * we won't enable interupts or schedule before we invoke + * general_protection, so nothing will clobber the stack + * frame we just set up. + */ regs->ip = (unsigned long)general_protection; - regs->sp = (unsigned long)&normal_regs->orig_ax; + regs->sp = (unsigned long)&gpregs->orig_ax; return; } @@ -390,7 +409,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) * * Processors update CR2 whenever a page fault is detected. If a * second page fault occurs while an earlier page fault is being - * deliv- ered, the faulting linear address of the second fault will + * delivered, the faulting linear address of the second fault will * overwrite the contents of CR2 (replacing the previous * address). These updates to CR2 occur even if the page fault * results in a double fault or occurs during the delivery of a @@ -518,6 +537,10 @@ do_general_protection(struct pt_regs *regs, long error_code) RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); cond_local_irq_enable(regs); + if (static_cpu_has(X86_FEATURE_UMIP)) + if (user_mode(regs) && fixup_umip_exception(regs)) + return; + if (v8086_mode(regs)) { local_irq_enable(); handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); @@ -553,7 +576,6 @@ do_general_protection(struct pt_regs *regs, long error_code) } NOKPROBE_SYMBOL(do_general_protection); -/* May run on IST stack. */ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { #ifdef CONFIG_DYNAMIC_FTRACE @@ -568,6 +590,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) if (poke_int3_handler(regs)) return; + /* + * Use ist_enter despite the fact that we don't use an IST stack. + * We can be called from a kprobe in non-CONTEXT_KERNEL kernel + * mode or even during context tracking state changes. + * + * This means that we can't schedule. That's okay. + */ ist_enter(regs); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP @@ -585,15 +614,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) SIGTRAP) == NOTIFY_STOP) goto exit; - /* - * Let others (NMI) know that the debug stack is in use - * as we may switch to the interrupt stack. - */ - debug_stack_usage_inc(); cond_local_irq_enable(regs); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); cond_local_irq_disable(regs); - debug_stack_usage_dec(); + exit: ist_exit(regs); } @@ -601,14 +625,15 @@ NOKPROBE_SYMBOL(do_int3); #ifdef CONFIG_X86_64 /* - * Help handler running on IST stack to switch off the IST stack if the - * interrupted code was in user mode. The actual stack switch is done in - * entry_64.S + * Help handler running on a per-cpu (IST or entry trampoline) stack + * to switch to the normal thread stack if the interrupted code was in + * user mode. The actual stack switch is done in entry_64.S */ asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) { - struct pt_regs *regs = task_pt_regs(current); - *regs = *eregs; + struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1; + if (regs != eregs) + *regs = *eregs; return regs; } NOKPROBE_SYMBOL(sync_regs); @@ -624,13 +649,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) /* * This is called from entry_64.S early in handling a fault * caused by a bad iret to user mode. To handle the fault - * correctly, we want move our stack frame to task_pt_regs - * and we want to pretend that the exception came from the - * iret target. + * correctly, we want to move our stack frame to where it would + * be had we entered directly on the entry stack (rather than + * just below the IRET frame) and we want to pretend that the + * exception came from the IRET target. */ struct bad_iret_stack *new_stack = - container_of(task_pt_regs(current), - struct bad_iret_stack, regs); + (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; /* Copy the IRET target to the new stack. */ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); @@ -744,10 +769,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) if (!dr6 && user_mode(regs)) user_icebp = 1; - /* Catch kmemcheck conditions! */ - if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) - goto exit; - /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; @@ -795,14 +816,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_dec(); exit: -#if defined(CONFIG_X86_32) - /* - * This is the most likely code path that involves non-trivial use - * of the SYSENTER stack. Check that we haven't overrun it. - */ - WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC, - "Overran or corrupted SYSENTER stack\n"); -#endif ist_exit(regs); } NOKPROBE_SYMBOL(do_debug); @@ -929,6 +942,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) void __init trap_init(void) { + /* Init cpu_entry_area before IST entries are set up */ + setup_cpu_entry_areas(); + idt_setup_traps(); /* @@ -936,8 +952,9 @@ void __init trap_init(void) * "sidt" instruction will not leak the location of the kernel, and * to defend the IDT against arbitrary memory write vulnerabilities. * It will be reloaded in cpu_init() */ - __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); - idt_descr.address = fix_to_virt(FIX_RO_IDT); + cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), + PAGE_KERNEL_RO); + idt_descr.address = CPU_ENTRY_AREA_RO_IDT; /* * Should be a barrier for any external CPU state: diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ad2b925a808e..9cef315a29ea 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -602,7 +602,6 @@ unsigned long native_calibrate_tsc(void) case INTEL_FAM6_KABYLAKE_DESKTOP: crystal_khz = 24000; /* 24.0 MHz */ break; - case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_ATOM_DENVERTON: crystal_khz = 25000; /* 25.0 MHz */ break; @@ -612,6 +611,8 @@ unsigned long native_calibrate_tsc(void) } } + if (crystal_khz == 0) + return 0; /* * TSC frequency determined by CPUID is a "hardware reported" * frequency and is the most accurate one so far we have. This @@ -656,6 +657,10 @@ unsigned long native_calibrate_cpu(void) unsigned long flags, latch, ms, fast_calibrate; int hpet = is_hpet_enabled(), i, loopmin; + fast_calibrate = cpu_khz_from_paravirt(); + if (fast_calibrate) + return fast_calibrate; + fast_calibrate = cpu_khz_from_cpuid(); if (fast_calibrate) return fast_calibrate; diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c new file mode 100644 index 000000000000..6e38b8f5d305 --- /dev/null +++ b/arch/x86/kernel/umip.c @@ -0,0 +1,344 @@ +/* + * umip.c Emulation for instruction protected by the Intel User-Mode + * Instruction Prevention feature + * + * Copyright (c) 2017, Intel Corporation. + * Ricardo Neri + */ + +#include +#include +#include +#include +#include +#include + +/** DOC: Emulation for User-Mode Instruction Prevention (UMIP) + * + * The feature User-Mode Instruction Prevention present in recent Intel + * processor prevents a group of instructions (sgdt, sidt, sldt, smsw, and str) + * from being executed with CPL > 0. Otherwise, a general protection fault is + * issued. + * + * Rather than relaying to the user space the general protection fault caused by + * the UMIP-protected instructions (in the form of a SIGSEGV signal), it can be + * trapped and emulate the result of such instructions to provide dummy values. + * This allows to both conserve the current kernel behavior and not reveal the + * system resources that UMIP intends to protect (i.e., the locations of the + * global descriptor and interrupt descriptor tables, the segment selectors of + * the local descriptor table, the value of the task state register and the + * contents of the CR0 register). + * + * This emulation is needed because certain applications (e.g., WineHQ and + * DOSEMU2) rely on this subset of instructions to function. + * + * The instructions protected by UMIP can be split in two groups. Those which + * return a kernel memory address (sgdt and sidt) and those which return a + * value (sldt, str and smsw). + * + * For the instructions that return a kernel memory address, applications + * such as WineHQ rely on the result being located in the kernel memory space, + * not the actual location of the table. The result is emulated as a hard-coded + * value that, lies close to the top of the kernel memory. The limit for the GDT + * and the IDT are set to zero. + * + * Given that sldt and str are not commonly used in programs that run on WineHQ + * or DOSEMU2, they are not emulated. + * + * The instruction smsw is emulated to return the value that the register CR0 + * has at boot time as set in the head_32. + * + * Also, emulation is provided only for 32-bit processes; 64-bit processes + * that attempt to use the instructions that UMIP protects will receive the + * SIGSEGV signal issued as a consequence of the general protection fault. + * + * Care is taken to appropriately emulate the results when segmentation is + * used. That is, rather than relying on USER_DS and USER_CS, the function + * insn_get_addr_ref() inspects the segment descriptor pointed by the + * registers in pt_regs. This ensures that we correctly obtain the segment + * base address and the address and operand sizes even if the user space + * application uses a local descriptor table. + */ + +#define UMIP_DUMMY_GDT_BASE 0xfffe0000 +#define UMIP_DUMMY_IDT_BASE 0xffff0000 + +/* + * The SGDT and SIDT instructions store the contents of the global descriptor + * table and interrupt table registers, respectively. The destination is a + * memory operand of X+2 bytes. X bytes are used to store the base address of + * the table and 2 bytes are used to store the limit. In 32-bit processes, the + * only processes for which emulation is provided, X has a value of 4. + */ +#define UMIP_GDT_IDT_BASE_SIZE 4 +#define UMIP_GDT_IDT_LIMIT_SIZE 2 + +#define UMIP_INST_SGDT 0 /* 0F 01 /0 */ +#define UMIP_INST_SIDT 1 /* 0F 01 /1 */ +#define UMIP_INST_SMSW 3 /* 0F 01 /4 */ + +/** + * identify_insn() - Identify a UMIP-protected instruction + * @insn: Instruction structure with opcode and ModRM byte. + * + * From the instruction opcode and the reg part of the ModRM byte, identify, + * if any, a UMIP-protected instruction. + * + * Return: a constant that identifies a specific UMIP-protected instruction. + * -EINVAL when not an UMIP-protected instruction. + */ +static int identify_insn(struct insn *insn) +{ + /* By getting modrm we also get the opcode. */ + insn_get_modrm(insn); + + /* All the instructions of interest start with 0x0f. */ + if (insn->opcode.bytes[0] != 0xf) + return -EINVAL; + + if (insn->opcode.bytes[1] == 0x1) { + switch (X86_MODRM_REG(insn->modrm.value)) { + case 0: + return UMIP_INST_SGDT; + case 1: + return UMIP_INST_SIDT; + case 4: + return UMIP_INST_SMSW; + default: + return -EINVAL; + } + } + /* SLDT AND STR are not emulated */ + return -EINVAL; +} + +/** + * emulate_umip_insn() - Emulate UMIP instructions with dummy values + * @insn: Instruction structure with operands + * @umip_inst: Instruction to emulate + * @data: Buffer into which the dummy values will be copied + * @data_size: Size of the emulated result + * + * Emulate an instruction protected by UMIP. The result of the emulation + * is saved in the provided buffer. The size of the results depends on both + * the instruction and type of operand (register vs memory address). Thus, + * the size of the result needs to be updated. + * + * Result: 0 if success, -EINVAL on error while emulating. + */ +static int emulate_umip_insn(struct insn *insn, int umip_inst, + unsigned char *data, int *data_size) +{ + unsigned long dummy_base_addr, dummy_value; + unsigned short dummy_limit = 0; + + if (!data || !data_size || !insn) + return -EINVAL; + /* + * These two instructions return the base address and limit of the + * global and interrupt descriptor table, respectively. According to the + * Intel Software Development manual, the base address can be 24-bit, + * 32-bit or 64-bit. Limit is always 16-bit. If the operand size is + * 16-bit, the returned value of the base address is supposed to be a + * zero-extended 24-byte number. However, it seems that a 32-byte number + * is always returned irrespective of the operand size. + */ + + if (umip_inst == UMIP_INST_SGDT || umip_inst == UMIP_INST_SIDT) { + /* SGDT and SIDT do not use registers operands. */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) + return -EINVAL; + + if (umip_inst == UMIP_INST_SGDT) + dummy_base_addr = UMIP_DUMMY_GDT_BASE; + else + dummy_base_addr = UMIP_DUMMY_IDT_BASE; + + *data_size = UMIP_GDT_IDT_LIMIT_SIZE + UMIP_GDT_IDT_BASE_SIZE; + + memcpy(data + 2, &dummy_base_addr, UMIP_GDT_IDT_BASE_SIZE); + memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE); + + } else if (umip_inst == UMIP_INST_SMSW) { + dummy_value = CR0_STATE; + + /* + * Even though the CR0 register has 4 bytes, the number + * of bytes to be copied in the result buffer is determined + * by whether the operand is a register or a memory location. + * If operand is a register, return as many bytes as the operand + * size. If operand is memory, return only the two least + * siginificant bytes of CR0. + */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) + *data_size = insn->opnd_bytes; + else + *data_size = 2; + + memcpy(data, &dummy_value, *data_size); + /* STR and SLDT are not emulated */ + } else { + return -EINVAL; + } + + return 0; +} + +/** + * force_sig_info_umip_fault() - Force a SIGSEGV with SEGV_MAPERR + * @addr: Address that caused the signal + * @regs: Register set containing the instruction pointer + * + * Force a SIGSEGV signal with SEGV_MAPERR as the error code. This function is + * intended to be used to provide a segmentation fault when the result of the + * UMIP emulation could not be copied to the user space memory. + * + * Return: none + */ +static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs) +{ + siginfo_t info; + struct task_struct *tsk = current; + + tsk->thread.cr2 = (unsigned long)addr; + tsk->thread.error_code = X86_PF_USER | X86_PF_WRITE; + tsk->thread.trap_nr = X86_TRAP_PF; + + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = SEGV_MAPERR; + info.si_addr = addr; + force_sig_info(SIGSEGV, &info, tsk); + + if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV))) + return; + + pr_err_ratelimited("%s[%d] umip emulation segfault ip:%lx sp:%lx error:%x in %lx\n", + tsk->comm, task_pid_nr(tsk), regs->ip, + regs->sp, X86_PF_USER | X86_PF_WRITE, + regs->ip); +} + +/** + * fixup_umip_exception() - Fixup #GP faults caused by UMIP + * @regs: Registers as saved when entering the #GP trap + * + * The instructions sgdt, sidt, str, smsw, sldt cause a general protection + * fault if executed with CPL > 0 (i.e., from user space). If the offending + * user-space process is 32-bit, this function fixes the exception up and + * provides dummy values for the sgdt, sidt and smsw; str and sldt are not + * fixed up. Also 64-bit user-space processes are not fixed up. + * + * If operands are memory addresses, results are copied to user- + * space memory as indicated by the instruction pointed by EIP using the + * registers indicated in the instruction operands. If operands are registers, + * results are copied into the context that was saved when entering kernel mode. + * + * Result: true if emulation was successful; false if not. + */ +bool fixup_umip_exception(struct pt_regs *regs) +{ + int not_copied, nr_copied, reg_offset, dummy_data_size, umip_inst; + /* 10 bytes is the maximum size of the result of UMIP instructions */ + unsigned char dummy_data[10] = { 0 }; + unsigned long seg_base, *reg_addr; + unsigned char buf[MAX_INSN_SIZE]; + void __user *uaddr; + struct insn insn; + char seg_defs; + + /* Do not emulate 64-bit processes. */ + if (user_64bit_mode(regs)) + return false; + + /* + * Use the segment base in case user space used a different code + * segment, either in protected (e.g., from an LDT), virtual-8086 + * or long (via the FS or GS registers) modes. In most of the cases + * seg_base will be zero as in USER_CS. + */ + seg_base = insn_get_seg_base(regs, NULL, offsetof(struct pt_regs, ip)); + if (seg_base == -1L) + return false; + + not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip), + sizeof(buf)); + nr_copied = sizeof(buf) - not_copied; + + /* + * The copy_from_user above could have failed if user code is protected + * by a memory protection key. Give up on emulation in such a case. + * Should we issue a page fault? + */ + if (!nr_copied) + return false; + + insn_init(&insn, buf, nr_copied, user_64bit_mode(regs)); + + /* + * Override the default operand and address sizes with what is specified + * in the code segment descriptor. The instruction decoder only sets + * the address size it to either 4 or 8 address bytes and does nothing + * for the operand bytes. This OK for most of the cases, but we could + * have special cases where, for instance, a 16-bit code segment + * descriptor is used. + * If there is an address override prefix, the instruction decoder + * correctly updates these values, even for 16-bit defaults. + */ + seg_defs = insn_get_code_seg_defaults(regs); + if (seg_defs == -EINVAL) + return false; + + insn.addr_bytes = (unsigned char)INSN_CODE_SEG_ADDR_SZ(seg_defs); + insn.opnd_bytes = (unsigned char)INSN_CODE_SEG_OPND_SZ(seg_defs); + + insn_get_length(&insn); + if (nr_copied < insn.length) + return false; + + umip_inst = identify_insn(&insn); + if (umip_inst < 0) + return false; + + if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size)) + return false; + + /* + * If operand is a register, write result to the copy of the register + * value that was pushed to the stack when entering into kernel mode. + * Upon exit, the value we write will be restored to the actual hardware + * register. + */ + if (X86_MODRM_MOD(insn.modrm.value) == 3) { + reg_offset = insn_get_modrm_rm_off(&insn, regs); + + /* + * Negative values are usually errors. In memory addressing, + * the exception is -EDOM. Since we expect a register operand, + * all negative values are errors. + */ + if (reg_offset < 0) + return false; + + reg_addr = (unsigned long *)((unsigned long)regs + reg_offset); + memcpy(reg_addr, dummy_data, dummy_data_size); + } else { + uaddr = insn_get_addr_ref(&insn, regs); + if ((unsigned long)uaddr == -1L) + return false; + + nr_copied = copy_to_user(uaddr, dummy_data, dummy_data_size); + if (nr_copied > 0) { + /* + * If copy fails, send a signal and tell caller that + * fault was fixed up. + */ + force_sig_info_umip_fault(uaddr, regs); + return true; + } + } + + /* increase IP to let the program keep going */ + regs->ip += insn.length; + return true; +} diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index a3f973b2c97a..be86a865087a 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -253,22 +253,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) return NULL; } -static bool stack_access_ok(struct unwind_state *state, unsigned long addr, +static bool stack_access_ok(struct unwind_state *state, unsigned long _addr, size_t len) { struct stack_info *info = &state->stack_info; + void *addr = (void *)_addr; - /* - * If the address isn't on the current stack, switch to the next one. - * - * We may have to traverse multiple stacks to deal with the possibility - * that info->next_sp could point to an empty stack and the address - * could be on a subsequent stack. - */ - while (!on_stack(info, (void *)addr, len)) - if (get_stack_info(info->next_sp, state->task, info, - &state->stack_mask)) - return false; + if (!on_stack(info, addr, len) && + (get_stack_info(addr, state->task, info, &state->stack_mask))) + return false; return true; } @@ -283,42 +276,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr, return true; } -#define REGS_SIZE (sizeof(struct pt_regs)) -#define SP_OFFSET (offsetof(struct pt_regs, sp)) -#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip)) -#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip)) - static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, - unsigned long *ip, unsigned long *sp, bool full) + unsigned long *ip, unsigned long *sp) { - size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE; - size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET; - struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE); - - if (IS_ENABLED(CONFIG_X86_64)) { - if (!stack_access_ok(state, addr, regs_size)) - return false; + struct pt_regs *regs = (struct pt_regs *)addr; - *ip = regs->ip; - *sp = regs->sp; + /* x86-32 support will be more complicated due to the ®s->sp hack */ + BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32)); - return true; - } - - if (!stack_access_ok(state, addr, sp_offset)) + if (!stack_access_ok(state, addr, sizeof(struct pt_regs))) return false; *ip = regs->ip; + *sp = regs->sp; + return true; +} - if (user_mode(regs)) { - if (!stack_access_ok(state, addr + sp_offset, - REGS_SIZE - SP_OFFSET)) - return false; +static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr, + unsigned long *ip, unsigned long *sp) +{ + struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET; - *sp = regs->sp; - } else - *sp = (unsigned long)®s->sp; + if (!stack_access_ok(state, addr, IRET_FRAME_SIZE)) + return false; + *ip = regs->ip; + *sp = regs->sp; return true; } @@ -327,7 +310,6 @@ bool unwind_next_frame(struct unwind_state *state) unsigned long ip_p, sp, orig_ip, prev_sp = state->sp; enum stack_type prev_type = state->stack_info.type; struct orc_entry *orc; - struct pt_regs *ptregs; bool indirect = false; if (unwind_done(state)) @@ -435,7 +417,7 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_TYPE_REGS: - if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) { + if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { orc_warn("can't dereference registers at %p for ip %pB\n", (void *)sp, (void *)orig_ip); goto done; @@ -447,20 +429,14 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_TYPE_REGS_IRET: - if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) { + if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { orc_warn("can't dereference iret registers at %p for ip %pB\n", (void *)sp, (void *)orig_ip); goto done; } - ptregs = container_of((void *)sp, struct pt_regs, ip); - if ((unsigned long)ptregs >= prev_sp && - on_stack(&state->stack_info, ptregs, REGS_SIZE)) { - state->regs = ptregs; - state->full_regs = false; - } else - state->regs = NULL; - + state->regs = (void *)sp - IRET_FRAME_OFFSET; + state->full_regs = false; state->signal = true; break; @@ -553,8 +529,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, } if (get_stack_info((unsigned long *)state->sp, state->task, - &state->stack_info, &state->stack_mask)) - return; + &state->stack_info, &state->stack_mask)) { + /* + * We weren't on a valid stack. It's possible that + * we overflowed a valid stack into a guard page. + * See if the next page up is valid so that we can + * generate some kind of backtrace if this happens. + */ + void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); + if (get_stack_info(next_page, state->task, &state->stack_info, + &state->stack_mask)) + return; + } /* * The caller can provide the address of the first frame directly diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 495c776de4b4..a3755d293a48 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -271,12 +271,15 @@ static bool is_prefix_bad(struct insn *insn) int i; for (i = 0; i < insn->prefixes.nbytes; i++) { - switch (insn->prefixes.bytes[i]) { - case 0x26: /* INAT_PFX_ES */ - case 0x2E: /* INAT_PFX_CS */ - case 0x36: /* INAT_PFX_DS */ - case 0x3E: /* INAT_PFX_SS */ - case 0xF0: /* INAT_PFX_LOCK */ + insn_attr_t attr; + + attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + switch (attr) { + case INAT_MAKE_PREFIX(INAT_PFX_ES): + case INAT_MAKE_PREFIX(INAT_PFX_CS): + case INAT_MAKE_PREFIX(INAT_PFX_DS): + case INAT_MAKE_PREFIX(INAT_PFX_SS): + case INAT_MAKE_PREFIX(INAT_PFX_LOCK): return true; } } diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S index 014ea59aa153..3d3c2f71f617 100644 --- a/arch/x86/kernel/verify_cpu.S +++ b/arch/x86/kernel/verify_cpu.S @@ -33,7 +33,7 @@ #include #include -verify_cpu: +ENTRY(verify_cpu) pushf # Save caller passed flags push $0 # Kill any dangerous flags popf @@ -139,3 +139,4 @@ verify_cpu: popf # Restore caller passed flags xorl %eax, %eax ret +ENDPROC(verify_cpu) diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 68244742ecb0..9d0b5af7db91 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -55,6 +55,7 @@ #include #include #include +#include /* * Known problems: @@ -94,7 +95,6 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) { - struct tss_struct *tss; struct task_struct *tsk = current; struct vm86plus_struct __user *user; struct vm86 *vm86 = current->thread.vm86; @@ -146,12 +146,13 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) do_exit(SIGSEGV); } - tss = &per_cpu(cpu_tss, get_cpu()); + preempt_disable(); tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; - load_sp0(tss, &tsk->thread); + update_sp0(tsk); + refresh_sysenter_cs(&tsk->thread); vm86->saved_sp0 = 0; - put_cpu(); + preempt_enable(); memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs)); @@ -237,7 +238,6 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) { - struct tss_struct *tss; struct task_struct *tsk = current; struct vm86 *vm86 = tsk->thread.vm86; struct kernel_vm86_regs vm86regs; @@ -365,15 +365,17 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) vm86->saved_sp0 = tsk->thread.sp0; lazy_save_gs(vm86->regs32.gs); - tss = &per_cpu(cpu_tss, get_cpu()); /* make room for real-mode segments */ + preempt_disable(); tsk->thread.sp0 += 16; - if (static_cpu_has(X86_FEATURE_SEP)) + if (static_cpu_has(X86_FEATURE_SEP)) { tsk->thread.sysenter_cs = 0; + refresh_sysenter_cs(&tsk->thread); + } - load_sp0(tss, &tsk->thread); - put_cpu(); + update_sp0(tsk); + preempt_enable(); if (vm86->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); @@ -725,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) return; check_vip: - if (VEFLAGS & X86_EFLAGS_VIP) { + if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == + (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { save_v86_state(regs, VM86_STI); return; } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index a4009fb9be87..b854ebf5851b 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -61,11 +61,17 @@ jiffies_64 = jiffies; . = ALIGN(HPAGE_SIZE); \ __end_rodata_hpage_align = .; +#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); +#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); + #else #define X64_ALIGN_RODATA_BEGIN #define X64_ALIGN_RODATA_END +#define ALIGN_ENTRY_TEXT_BEGIN +#define ALIGN_ENTRY_TEXT_END + #endif PHDRS { @@ -102,11 +108,30 @@ SECTIONS CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT + ALIGN_ENTRY_TEXT_BEGIN ENTRY_TEXT IRQENTRY_TEXT + ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) + +#ifdef CONFIG_X86_64 + . = ALIGN(PAGE_SIZE); + VMLINUX_SYMBOL(__entry_trampoline_start) = .; + _entry_trampoline = .; + *(.entry_trampoline) + . = ALIGN(PAGE_SIZE); + VMLINUX_SYMBOL(__entry_trampoline_end) = .; + ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); +#endif + +#ifdef CONFIG_RETPOLINE + __indirect_thunk_start = .; + *(.text.__x86.indirect_thunk) + __indirect_thunk_end = .; +#endif + /* End of text section */ _etext = .; } :text = 0x9090 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index a088b2c47f73..5b2d10c1973a 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -28,6 +28,8 @@ void x86_init_noop(void) { } void __init x86_init_uint_noop(unsigned int unused) { } int __init iommu_init_noop(void) { return 0; } void iommu_shutdown_noop(void) { } +bool __init bool_x86_init_noop(void) { return false; } +void x86_op_int_noop(int cpu) { } /* * The platform setup functions are preset with the default functions @@ -81,6 +83,12 @@ struct x86_init_ops x86_init __initdata = { .init_irq = x86_default_pci_init_irq, .fixup_irqs = x86_default_pci_fixup_irqs, }, + + .hyper = { + .init_platform = x86_init_noop, + .x2apic_available = bool_x86_init_noop, + .init_mem_mapping = x86_init_noop, + }, }; struct x86_cpuinit_ops x86_cpuinit = { @@ -101,6 +109,7 @@ struct x86_platform_ops x86_platform __ro_after_init = { .get_nmi_reason = default_get_nmi_reason, .save_sched_clock_state = tsc_save_sched_clock_state, .restore_sched_clock_state = tsc_restore_sched_clock_state, + .hyper.pin_vcpu = x86_op_int_noop, }; EXPORT_SYMBOL_GPL(x86_platform); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 0099e10eb045..4f544f2a7b06 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -67,9 +67,7 @@ u64 kvm_supported_xcr0(void) #define F(x) bit(X86_FEATURE_##x) -/* These are scattered features in cpufeatures.h. */ -#define KVM_CPUID_BIT_AVX512_4VNNIW 2 -#define KVM_CPUID_BIT_AVX512_4FMAPS 3 +/* For scattered features from cpufeatures.h; we currently expose none */ #define KF(x) bit(KVM_CPUID_BIT_##x) int kvm_update_cpuid(struct kvm_vcpu *vcpu) @@ -367,6 +365,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); + /* cpuid 0x80000008.ebx */ + const u32 kvm_cpuid_8000_0008_ebx_x86_features = + F(IBPB) | F(IBRS); + /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | @@ -392,7 +394,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = - KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS); + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | + F(ARCH_CAPABILITIES); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); @@ -477,7 +480,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) entry->ecx &= ~F(PKU); entry->edx &= kvm_cpuid_7_0_edx_x86_features; - entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX); + cpuid_mask(&entry->edx, CPUID_7_EDX); } else { entry->ebx = 0; entry->ecx = 0; @@ -594,7 +597,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, (1 << KVM_FEATURE_ASYNC_PF) | (1 << KVM_FEATURE_PV_EOI) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | - (1 << KVM_FEATURE_PV_UNHALT); + (1 << KVM_FEATURE_PV_UNHALT) | + (1 << KVM_FEATURE_ASYNC_PF_VMEXIT); if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); @@ -627,7 +631,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, if (!g_phys_as) g_phys_as = phys_as; entry->eax = g_phys_as | (virt_as << 8); - entry->ebx = entry->edx = 0; + entry->edx = 0; + /* IBRS and IBPB aren't necessarily present in hardware cpuid */ + if (boot_cpu_has(X86_FEATURE_IBPB)) + entry->ebx |= F(IBPB); + if (boot_cpu_has(X86_FEATURE_IBRS)) + entry->ebx |= F(IBRS); + entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; + cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); break; } case 0x80000019: diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index cdc70a3a6583..9a327d5b6d1f 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, - [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX}, + [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX}, @@ -54,6 +54,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, + [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, }; static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d90cdc77e077..fab073b19528 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "x86.h" #include "tss.h" @@ -1021,8 +1022,8 @@ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; - asm("push %[flags]; popf; call *%[fastop]" - : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); + asm("push %[flags]; popf; " CALL_NOSPEC + : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags)); return rc; } @@ -2404,9 +2405,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) } static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, - u64 cr0, u64 cr4) + u64 cr0, u64 cr3, u64 cr4) { int bad; + u64 pcid; + + /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ + pcid = 0; + if (cr4 & X86_CR4_PCIDE) { + pcid = cr3 & 0xfff; + cr3 &= ~0xfff; + } + + bad = ctxt->ops->set_cr(ctxt, 3, cr3); + if (bad) + return X86EMUL_UNHANDLEABLE; /* * First enable PAE, long mode needs it before CR0.PG = 1 is set. @@ -2425,6 +2438,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, bad = ctxt->ops->set_cr(ctxt, 4, cr4); if (bad) return X86EMUL_UNHANDLEABLE; + if (pcid) { + bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); + if (bad) + return X86EMUL_UNHANDLEABLE; + } + } return X86EMUL_CONTINUE; @@ -2435,11 +2454,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) struct desc_struct desc; struct desc_ptr dt; u16 selector; - u32 val, cr0, cr4; + u32 val, cr0, cr3, cr4; int i; cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); - ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8)); + cr3 = GET_SMSTATE(u32, smbase, 0x7ff8); ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); @@ -2481,14 +2500,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); - return rsm_enter_protected_mode(ctxt, cr0, cr4); + return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); } static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) { struct desc_struct desc; struct desc_ptr dt; - u64 val, cr0, cr4; + u64 val, cr0, cr3, cr4; u32 base3; u16 selector; int i, r; @@ -2505,7 +2524,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); cr0 = GET_SMSTATE(u64, smbase, 0x7f58); - ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50)); + cr3 = GET_SMSTATE(u64, smbase, 0x7f50); cr4 = GET_SMSTATE(u64, smbase, 0x7f48); ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); val = GET_SMSTATE(u64, smbase, 0x7ed0); @@ -2533,7 +2552,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) dt.address = GET_SMSTATE(u64, smbase, 0x7e68); ctxt->ops->set_gdt(ctxt, &dt); - r = rsm_enter_protected_mode(ctxt, cr0, cr4); + r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); if (r != X86EMUL_CONTINUE) return r; @@ -4005,6 +4024,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt) fxstate_size(ctxt)); } +/* + * FXRSTOR might restore XMM registers not provided by the guest. Fill + * in the host registers (via FXSAVE) instead, so they won't be modified. + * (preemption has to stay disabled until FXRSTOR). + * + * Use noinline to keep the stack for other functions called by callers small. + */ +static noinline int fxregs_fixup(struct fxregs_state *fx_state, + const size_t used_size) +{ + struct fxregs_state fx_tmp; + int rc; + + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp)); + memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size, + __fxstate_size(16) - used_size); + + return rc; +} + static int em_fxrstor(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; @@ -4015,19 +4054,19 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; + size = fxstate_size(ctxt); + rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); + if (rc != X86EMUL_CONTINUE) + return rc; + ctxt->ops->get_fpu(ctxt); - size = fxstate_size(ctxt); if (size < __fxstate_size(16)) { - rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); + rc = fxregs_fixup(&fx_state, size); if (rc != X86EMUL_CONTINUE) goto out; } - rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); - if (rc != X86EMUL_CONTINUE) - goto out; - if (fx_state.mxcsr >> 16) { rc = emulate_gp(ctxt, 0); goto out; @@ -4991,6 +5030,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; + u16 dummy; + struct desc_struct desc; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; @@ -5009,6 +5050,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: + def_op_bytes = def_ad_bytes = 2; + ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); + if (desc.d) + def_op_bytes = def_ad_bytes = 4; + break; case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; @@ -5305,9 +5351,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; - asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" + asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), - [fastop]"+S"(fop), ASM_CALL_CONSTRAINT + [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index bdff437acbcb..9d270ba9643c 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) index == RTC_GSI) { if (kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, e->fields.dest_mode) || - (e->fields.trig_mode == IOAPIC_EDGE_TRIG && - kvm_apic_pending_eoi(vcpu, e->fields.vector))) + kvm_apic_pending_eoi(vcpu, e->fields.vector)) __set_bit(e->fields.vector, ioapic_handled_vectors); } @@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; bool mask_before, mask_after; + int old_remote_irr, old_delivery_status; union kvm_ioapic_redirect_entry *e; switch (ioapic->ioregsel) { @@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) return; e = &ioapic->redirtbl[index]; mask_before = e->fields.mask; + /* Preserve read-only fields */ + old_remote_irr = e->fields.remote_irr; + old_delivery_status = e->fields.delivery_status; if (ioapic->ioregsel & 1) { e->bits &= 0xffffffff; e->bits |= (u64) val << 32; } else { e->bits &= ~0xffffffffULL; e->bits |= (u32) val; - e->fields.remote_irr = 0; } + e->fields.remote_irr = old_remote_irr; + e->fields.delivery_status = old_delivery_status; + + /* + * Some OSes (Linux, Xen) assume that Remote IRR bit will + * be cleared by IOAPIC hardware when the entry is configured + * as edge-triggered. This behavior is used to simulate an + * explicit EOI on IOAPICs that don't have the EOI register. + */ + if (e->fields.trig_mode == IOAPIC_EDGE_TRIG) + e->fields.remote_irr = 0; + mask_after = e->fields.mask; if (mask_before != mask_after) kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 36c90d631096..8cfdb6484fd0 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) recalculate_apic_map(apic->vcpu->kvm); } +static inline u32 kvm_apic_calc_x2apic_ldr(u32 id) +{ + return ((id >> 4) << 16) | (1 << (id & 0xf)); +} + static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) { - u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); + u32 ldr = kvm_apic_calc_x2apic_ldr(id); WARN_ON_ONCE(id != apic->vcpu->vcpu_id); @@ -1939,14 +1944,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) { - struct kvm_lapic *apic; + struct kvm_lapic *apic = vcpu->arch.apic; int i; - apic_debug("%s\n", __func__); + if (!apic) + return; - ASSERT(vcpu); - apic = vcpu->arch.apic; - ASSERT(apic != NULL); + apic_debug("%s\n", __func__); /* Stop the timer in case it's a reset to an active apic */ hrtimer_cancel(&apic->lapic_timer.timer); @@ -2102,7 +2106,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) */ vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ - kvm_lapic_reset(vcpu, false); kvm_iodevice_init(&apic->dev, &apic_mmio_ops); return 0; @@ -2196,6 +2199,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, { if (apic_x2apic_mode(vcpu->arch.apic)) { u32 *id = (u32 *)(s->regs + APIC_ID); + u32 *ldr = (u32 *)(s->regs + APIC_LDR); if (vcpu->kvm->arch.x2apic_format) { if (*id != vcpu->vcpu_id) @@ -2206,6 +2210,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, else *id <<= 24; } + + /* In x2APIC mode, the LDR is fixed and based on the id */ + if (set) + *ldr = kvm_apic_calc_x2apic_ldr(*id); } return 0; @@ -2501,7 +2509,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) pe = xchg(&apic->pending_events, 0); if (test_bit(KVM_APIC_INIT, &pe)) { - kvm_lapic_reset(vcpu, true); kvm_vcpu_reset(vcpu, true); if (kvm_vcpu_is_bsp(apic->vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7a69cf053711..f438e0c4aa8c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -150,6 +150,20 @@ module_param(dbg, bool, 0644); /* make pte_list_desc fit well in cache line */ #define PTE_LIST_EXT 3 +/* + * Return values of handle_mmio_page_fault and mmu.page_fault: + * RET_PF_RETRY: let CPU fault again on the address. + * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. + * + * For handle_mmio_page_fault only: + * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. + */ +enum { + RET_PF_RETRY = 0, + RET_PF_EMULATE = 1, + RET_PF_INVALID = 2, +}; + struct pte_list_desc { u64 *sptes[PTE_LIST_EXT]; struct pte_list_desc *more; @@ -2744,8 +2758,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, else pte_access &= ~ACC_WRITE_MASK; + if (!kvm_is_mmio_pfn(pfn)) + spte |= shadow_me_mask; + spte |= (u64)pfn << PAGE_SHIFT; - spte |= shadow_me_mask; if (pte_access & ACC_WRITE_MASK) { @@ -2794,13 +2810,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, return ret; } -static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, - int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, - bool speculative, bool host_writable) +static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, + int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, + bool speculative, bool host_writable) { int was_rmapped = 0; int rmap_count; - bool emulate = false; + int ret = RET_PF_RETRY; pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, *sptep, write_fault, gfn); @@ -2830,12 +2846,12 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, true, host_writable)) { if (write_fault) - emulate = true; + ret = RET_PF_EMULATE; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } if (unlikely(is_mmio_spte(*sptep))) - emulate = true; + ret = RET_PF_EMULATE; pgprintk("%s: setting spte %llx\n", __func__, *sptep); pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", @@ -2855,7 +2871,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, kvm_release_pfn_clean(pfn); - return emulate; + return ret; } static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, @@ -2994,17 +3010,16 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) * Do not cache the mmio info caused by writing the readonly gfn * into the spte otherwise read access on readonly gfn also can * caused mmio page fault and treat it as mmio access. - * Return 1 to tell kvm to emulate it. */ if (pfn == KVM_PFN_ERR_RO_FAULT) - return 1; + return RET_PF_EMULATE; if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); - return 0; + return RET_PF_RETRY; } - return -EFAULT; + return RET_PF_EMULATE; } static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, @@ -3286,13 +3301,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, } if (fast_page_fault(vcpu, v, level, error_code)) - return 0; + return RET_PF_RETRY; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) return r; @@ -3312,7 +3327,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } @@ -3382,7 +3397,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if(make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, 0, 0, vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); @@ -3397,7 +3412,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL); @@ -3437,7 +3452,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, root_gfn, 0, vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); @@ -3474,7 +3489,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, 0, ACC_ALL); @@ -3659,54 +3674,38 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) return reserved; } -/* - * Return values of handle_mmio_page_fault: - * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction - * directly. - * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page - * fault path update the mmio spte. - * RET_MMIO_PF_RETRY: let CPU fault again on the address. - * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed). - */ -enum { - RET_MMIO_PF_EMULATE = 1, - RET_MMIO_PF_INVALID = 2, - RET_MMIO_PF_RETRY = 0, - RET_MMIO_PF_BUG = -1 -}; - static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) { u64 spte; bool reserved; if (mmio_info_in_cache(vcpu, addr, direct)) - return RET_MMIO_PF_EMULATE; + return RET_PF_EMULATE; reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); if (WARN_ON(reserved)) - return RET_MMIO_PF_BUG; + return -EINVAL; if (is_mmio_spte(spte)) { gfn_t gfn = get_mmio_spte_gfn(spte); unsigned access = get_mmio_spte_access(spte); if (!check_mmio_spte(vcpu, spte)) - return RET_MMIO_PF_INVALID; + return RET_PF_INVALID; if (direct) addr = 0; trace_handle_mmio_page_fault(addr, gfn, access); vcpu_cache_mmio_info(vcpu, addr, gfn, access); - return RET_MMIO_PF_EMULATE; + return RET_PF_EMULATE; } /* * If the page table is zapped by other cpus, let CPU fault again on * the address. */ - return RET_MMIO_PF_RETRY; + return RET_PF_RETRY; } EXPORT_SYMBOL_GPL(handle_mmio_page_fault); @@ -3756,7 +3755,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); if (page_fault_handle_page_track(vcpu, error_code, gfn)) - return 1; + return RET_PF_EMULATE; r = mmu_topup_memory_caches(vcpu); if (r) @@ -3784,7 +3783,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) { if (unlikely(!lapic_in_kernel(vcpu) || - kvm_event_needs_reinjection(vcpu))) + kvm_event_needs_reinjection(vcpu) || + vcpu->arch.exception.pending)) return false; if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) @@ -3876,7 +3876,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); if (page_fault_handle_page_track(vcpu, error_code, gfn)) - return 1; + return RET_PF_EMULATE; r = mmu_topup_memory_caches(vcpu); if (r) @@ -3893,13 +3893,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, } if (fast_page_fault(vcpu, gpa, level, error_code)) - return 0; + return RET_PF_RETRY; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) return r; @@ -3919,7 +3919,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } static void nonpaging_init_context(struct kvm_vcpu *vcpu, @@ -4918,25 +4918,25 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, vcpu->arch.gpa_val = cr2; } + r = RET_PF_INVALID; if (unlikely(error_code & PFERR_RSVD_MASK)) { r = handle_mmio_page_fault(vcpu, cr2, direct); - if (r == RET_MMIO_PF_EMULATE) { + if (r == RET_PF_EMULATE) { emulation_type = 0; goto emulate; } - if (r == RET_MMIO_PF_RETRY) - return 1; - if (r < 0) - return r; - /* Must be RET_MMIO_PF_INVALID. */ } - r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), - false); + if (r == RET_PF_INVALID) { + r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), + false); + WARN_ON(r == RET_PF_INVALID); + } + + if (r == RET_PF_RETRY) + return 1; if (r < 0) return r; - if (!r) - return 1; /* * Before emulating the instruction, check if the error code @@ -5062,7 +5062,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); /* The caller should hold mmu-lock before calling this function. */ -static bool +static __always_inline bool slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) @@ -5092,7 +5092,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, return flush; } -static bool +static __always_inline bool slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, bool lock_flush_tlb) @@ -5103,7 +5103,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5111,7 +5111,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5119,7 +5119,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5476,13 +5476,13 @@ int kvm_mmu_module_init(void) pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), - 0, 0, NULL); + 0, SLAB_ACCOUNT, NULL); if (!pte_list_desc_cache) goto nomem; mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", sizeof(struct kvm_mmu_page), - 0, 0, NULL); + 0, SLAB_ACCOUNT, NULL); if (!mmu_page_header_cache) goto nomem; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index f18d1f8d332b..5abae72266b7 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -593,7 +593,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, struct kvm_mmu_page *sp = NULL; struct kvm_shadow_walk_iterator it; unsigned direct_access, access = gw->pt_access; - int top_level, emulate; + int top_level, ret; direct_access = gw->pte_access; @@ -659,15 +659,15 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, } clear_sp_write_flooding_count(it.sptep); - emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, - it.level, gw->gfn, pfn, prefault, map_writable); + ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, + it.level, gw->gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); - return emulate; + return ret; out_gpte_changed: kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } /* @@ -762,12 +762,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (!prefault) inject_page_fault(vcpu, &walker.fault); - return 0; + return RET_PF_RETRY; } if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { shadow_page_table_clear_flood(vcpu, addr); - return 1; + return RET_PF_EMULATE; } vcpu->arch.write_fault_to_shadow_pgtable = false; @@ -789,7 +789,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) return r; @@ -834,7 +834,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0e68f0b3cbf7..9fb0daf628cb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -45,6 +45,8 @@ #include #include #include +#include +#include #include #include "trace.h" @@ -183,6 +185,8 @@ struct vcpu_svm { u64 gs_base; } host; + u64 spec_ctrl; + u32 *msrpm; ulong nmi_iret_rip; @@ -248,6 +252,8 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK, .always = true }, #endif + { .index = MSR_IA32_SPEC_CTRL, .always = false }, + { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, @@ -528,6 +534,7 @@ struct svm_cpu_data { struct kvm_ldttss_desc *tss_desc; struct page *save_area; + struct vmcb *current_vmcb; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); @@ -879,6 +886,25 @@ static bool valid_msr_intercept(u32 index) return false; } +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr) +{ + u8 bit_write; + unsigned long tmp; + u32 offset; + u32 *msrpm; + + msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: + to_svm(vcpu)->msrpm; + + offset = svm_msrpm_offset(msr); + bit_write = 2 * (msr & 0x0f) + 1; + tmp = msrpm[offset]; + + BUG_ON(offset == MSR_INVALID); + + return !!test_bit(bit_write, &tmp); +} + static void set_msr_interception(u32 *msrpm, unsigned msr, int read, int write) { @@ -1584,6 +1610,8 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + svm->spec_ctrl = 0; + if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; @@ -1705,11 +1733,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); + /* + * The vmcb page can be recycled, causing a false negative in + * svm_vcpu_load(). So do a full IBPB now. + */ + indirect_branch_prediction_barrier(); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_svm *svm = to_svm(vcpu); + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); int i; if (unlikely(cpu != vcpu->cpu)) { @@ -1738,6 +1772,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (static_cpu_has(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, svm->tsc_aux); + if (sd->current_vmcb != svm->vmcb) { + sd->current_vmcb = svm->vmcb; + indirect_branch_prediction_barrier(); + } avic_vcpu_load(vcpu, cpu); } @@ -2189,6 +2227,8 @@ static int ud_interception(struct vcpu_svm *svm) int er; er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; @@ -3576,6 +3616,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + msr_info->data = svm->spec_ctrl; + break; case MSR_IA32_UCODE_REV: msr_info->data = 0x01000065; break; @@ -3657,9 +3704,59 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { + case MSR_IA32_CR_PAT: + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) + return 1; + vcpu->arch.pat = data; + svm->vmcb->save.g_pat = data; + mark_dirty(svm->vmcb, VMCB_NPT); + break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + svm->spec_ctrl = data; + + if (!data) + break; + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_svm_vmrun_msrpm. + * We update the L1 MSR bit as well since it will end up + * touching the MSR anyway now. + */ + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + break; + case MSR_IA32_PRED_CMD: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + if (is_guest_mode(vcpu)) + break; + set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); + break; case MSR_STAR: svm->vmcb->save.star = data; break; @@ -4912,6 +5009,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + if (svm->spec_ctrl) + native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -4955,6 +5061,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" +#endif + /* + * Clear host registers marked as clobbered to prevent + * speculative use. + */ + "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t" + "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t" + "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t" + "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t" + "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t" +#ifdef CONFIG_X86_64 + "xor %%r8, %%r8 \n\t" + "xor %%r9, %%r9 \n\t" + "xor %%r10, %%r10 \n\t" + "xor %%r11, %%r11 \n\t" + "xor %%r12, %%r12 \n\t" + "xor %%r13, %%r13 \n\t" + "xor %%r14, %%r14 \n\t" + "xor %%r15, %%r15 \n\t" #endif "pop %%" _ASM_BP : @@ -4985,6 +5110,30 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* + * We do not use IBRS in the kernel. If this vCPU has used the + * SPEC_CTRL MSR it may have left it on; save the value and + * turn it off. This is much more efficient than blindly adding + * it to the atomic save/restore list. Especially as the former + * (Saving guest MSRs on vmexit) doesn't even exist in KVM. + * + * For non-nested case: + * If the L01 MSR bitmap does not intercept the MSR, then we need to + * save it. + * + * For nested case: + * If the L02 MSR bitmap does not intercept the MSR, then we need to + * save it. + */ + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + + if (svm->spec_ctrl) + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); + + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a6f4f095f8f4..ae4803b213d0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "kvm_cache_regs.h" #include "x86.h" @@ -50,6 +51,8 @@ #include #include #include +#include +#include #include "trace.h" #include "pmu.h" @@ -107,6 +110,14 @@ static u64 __read_mostly host_xss; static bool __read_mostly enable_pml = 1; module_param_named(pml, enable_pml, bool, S_IRUGO); +#define MSR_TYPE_R 1 +#define MSR_TYPE_W 2 +#define MSR_TYPE_RW 3 + +#define MSR_BITMAP_MODE_X2APIC 1 +#define MSR_BITMAP_MODE_X2APIC_APICV 2 +#define MSR_BITMAP_MODE_LM 4 + #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ @@ -181,7 +192,6 @@ module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; #define NR_AUTOLOAD_MSRS 8 -#define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; @@ -202,6 +212,11 @@ struct loaded_vmcs { bool nmi_known_unmasked; unsigned long vmcs_host_cr3; /* May not match real cr3 */ unsigned long vmcs_host_cr4; /* May not match real cr4 */ + /* Support for vnmi-less CPUs */ + int soft_vnmi_blocked; + ktime_t entry_time; + s64 vnmi_blocked_time; + unsigned long *msr_bitmap; struct list_head loaded_vmcss_on_cpu_link; }; @@ -218,7 +233,7 @@ struct shared_msr_entry { * stored in guest memory specified by VMPTRLD, but is opaque to the guest, * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. * More than one of these structures may exist, if L1 runs multiple L2 guests. - * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the + * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). @@ -401,13 +416,6 @@ struct __packed vmcs12 { */ #define VMCS12_SIZE 0x1000 -/* Used to remember the last vmcs02 used for some recently used vmcs12s */ -struct vmcs02_list { - struct list_head list; - gpa_t vmptr; - struct loaded_vmcs vmcs02; -}; - /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -432,15 +440,15 @@ struct nested_vmx { */ bool sync_shadow_vmcs; - /* vmcs02_list cache of VMCSs recently used to run L2 guests */ - struct list_head vmcs02_pool; - int vmcs02_num; bool change_vmcs01_virtual_x2apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; + + struct loaded_vmcs vmcs02; + /* - * Guest pages referred to in vmcs02 with host-physical pointers, so - * we must keep them pinned while L2 runs. + * Guest pages referred to in the vmcs02 with host-physical + * pointers, so we must keep them pinned while L2 runs. */ struct page *apic_access_page; struct page *virtual_apic_page; @@ -449,8 +457,6 @@ struct nested_vmx { bool pi_pending; u16 posted_intr_nv; - unsigned long *msr_bitmap; - struct hrtimer preemption_timer; bool preemption_timer_expired; @@ -565,6 +571,7 @@ struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; + u8 msr_bitmap_mode; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; @@ -576,6 +583,10 @@ struct vcpu_vmx { u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif + + u64 arch_capabilities; + u64 spec_ctrl; + u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; u32 secondary_exec_control; @@ -882,13 +893,18 @@ static const unsigned short vmcs_field_to_offset_table[] = { static inline short vmcs_field_to_offset(unsigned long field) { - BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); + const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table); + unsigned short offset; - if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || - vmcs_field_to_offset_table[field] == 0) + BUILD_BUG_ON(size > SHRT_MAX); + if (field >= size) return -ENOENT; - return vmcs_field_to_offset_table[field]; + field = array_index_nospec(field, size); + offset = vmcs_field_to_offset_table[field]; + if (offset == 0) + return -ENOENT; + return offset; } static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) @@ -914,6 +930,9 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code); +static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); +static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -933,12 +952,6 @@ static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); enum { VMX_IO_BITMAP_A, VMX_IO_BITMAP_B, - VMX_MSR_BITMAP_LEGACY, - VMX_MSR_BITMAP_LONGMODE, - VMX_MSR_BITMAP_LEGACY_X2APIC_APICV, - VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV, - VMX_MSR_BITMAP_LEGACY_X2APIC, - VMX_MSR_BITMAP_LONGMODE_X2APIC, VMX_VMREAD_BITMAP, VMX_VMWRITE_BITMAP, VMX_BITMAP_NR @@ -948,12 +961,6 @@ static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; #define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A]) #define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B]) -#define vmx_msr_bitmap_legacy (vmx_bitmap[VMX_MSR_BITMAP_LEGACY]) -#define vmx_msr_bitmap_longmode (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE]) -#define vmx_msr_bitmap_legacy_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV]) -#define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV]) -#define vmx_msr_bitmap_legacy_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC]) -#define vmx_msr_bitmap_longmode_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC]) #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) @@ -1064,6 +1071,13 @@ static inline bool is_machine_check(u32 intr_info) (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } +/* Undocumented: icebp/int1 */ +static inline bool is_icebp(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); +} + static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; @@ -1286,6 +1300,11 @@ static inline bool cpu_has_vmx_invpcid(void) SECONDARY_EXEC_ENABLE_INVPCID; } +static inline bool cpu_has_virtual_nmis(void) +{ + return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; +} + static inline bool cpu_has_vmx_wbinvd_exit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & @@ -1343,11 +1362,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) (vmcs12->secondary_vm_exec_control & bit); } -static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) -{ - return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; -} - static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & @@ -1900,6 +1914,52 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) vmcs_write32(EXCEPTION_BITMAP, eb); } +/* + * Check if MSR is intercepted for currently loaded MSR bitmap. + */ +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) +{ + unsigned long *msr_bitmap; + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return true; + + msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; + + if (msr <= 0x1fff) { + return !!test_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + return !!test_bit(msr, msr_bitmap + 0xc00 / f); + } + + return true; +} + +/* + * Check if MSR is intercepted for L01 MSR bitmap. + */ +static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) +{ + unsigned long *msr_bitmap; + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return true; + + msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; + + if (msr <= 0x1fff) { + return !!test_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + return !!test_bit(msr, msr_bitmap + 0xc00 / f); + } + + return true; +} + static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { @@ -2278,6 +2338,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); + indirect_branch_prediction_barrier(); } if (!already_loaded) { @@ -2291,7 +2352,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) * processors. See 22.2.4. */ vmcs_writel(HOST_TR_BASE, - (unsigned long)this_cpu_ptr(&cpu_tss)); + (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ /* @@ -2554,36 +2615,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) vmx->guest_msrs[from] = tmp; } -static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) -{ - unsigned long *msr_bitmap; - - if (is_guest_mode(vcpu)) - msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap; - else if (cpu_has_secondary_exec_ctrls() && - (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { - if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv; - else - msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv; - } else { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode_x2apic; - else - msr_bitmap = vmx_msr_bitmap_legacy_x2apic; - } - } else { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode; - else - msr_bitmap = vmx_msr_bitmap_legacy; - } - - vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); -} - /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy @@ -2624,7 +2655,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) vmx->save_nmsrs = save_nmsrs; if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(&vmx->vcpu); + vmx_update_msr_bitmap(&vmx->vcpu); } /* @@ -2841,8 +2872,9 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) * Advertise EPTP switching unconditionally * since we emulate it */ - vmx->nested.nested_vmx_vmfunc_controls = - VMX_VMFUNC_EPTP_SWITCHING; + if (enable_ept) + vmx->nested.nested_vmx_vmfunc_controls = + VMX_VMFUNC_EPTP_SWITCHING; } /* @@ -3259,6 +3291,20 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: msr_info->data = guest_read_tsc(vcpu); break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + msr_info->data = to_vmx(vcpu)->spec_ctrl; + break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) + return 1; + msr_info->data = to_vmx(vcpu)->arch_capabilities; + break; case MSR_IA32_SYSENTER_CS: msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); break; @@ -3366,6 +3412,70 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + vmx->spec_ctrl = data; + + if (!data) + break; + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_merge_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. We update the vmcs01 here for L1 as well + * since it will end up touching the MSR anyway now. + */ + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, + MSR_IA32_SPEC_CTRL, + MSR_TYPE_RW); + break; + case MSR_IA32_PRED_CMD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_merge_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. + */ + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, + MSR_TYPE_W); + break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated) + return 1; + vmx->arch_capabilities = data; + break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) @@ -3699,9 +3809,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) &_vmexit_control) < 0) return -EIO; - min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | - PIN_BASED_VIRTUAL_NMIS; - opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER; + min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; + opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | + PIN_BASED_VMX_PREEMPTION_TIMER; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control) < 0) return -EIO; @@ -3808,11 +3918,6 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) return vmcs; } -static struct vmcs *alloc_vmcs(void) -{ - return alloc_vmcs_cpu(raw_smp_processor_id()); -} - static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); @@ -3828,9 +3933,38 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) loaded_vmcs_clear(loaded_vmcs); free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; + if (loaded_vmcs->msr_bitmap) + free_page((unsigned long)loaded_vmcs->msr_bitmap); WARN_ON(loaded_vmcs->shadow_vmcs != NULL); } +static struct vmcs *alloc_vmcs(void) +{ + return alloc_vmcs_cpu(raw_smp_processor_id()); +} + +static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) +{ + loaded_vmcs->vmcs = alloc_vmcs(); + if (!loaded_vmcs->vmcs) + return -ENOMEM; + + loaded_vmcs->shadow_vmcs = NULL; + loaded_vmcs_init(loaded_vmcs); + + if (cpu_has_vmx_msr_bitmap()) { + loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); + if (!loaded_vmcs->msr_bitmap) + goto out_vmcs; + memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); + } + return 0; + +out_vmcs: + free_loaded_vmcs(loaded_vmcs); + return -ENOMEM; +} + static void free_kvm_area(void) { int cpu; @@ -4903,10 +5037,8 @@ static void free_vpid(int vpid) spin_unlock(&vmx_vpid_lock); } -#define MSR_TYPE_R 1 -#define MSR_TYPE_W 2 -static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, - u32 msr, int type) +static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) { int f = sizeof(unsigned long); @@ -4940,6 +5072,50 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, } } +static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) +{ + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return; + + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals + * have the write-low and read-high bitmap offsets the wrong way round. + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if (msr <= 0x1fff) { + if (type & MSR_TYPE_R) + /* read-low */ + __set_bit(msr, msr_bitmap + 0x000 / f); + + if (type & MSR_TYPE_W) + /* write-low */ + __set_bit(msr, msr_bitmap + 0x800 / f); + + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + if (type & MSR_TYPE_R) + /* read-high */ + __set_bit(msr, msr_bitmap + 0x400 / f); + + if (type & MSR_TYPE_W) + /* write-high */ + __set_bit(msr, msr_bitmap + 0xc00 / f); + + } +} + +static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type, bool value) +{ + if (value) + vmx_enable_intercept_for_msr(msr_bitmap, msr, type); + else + vmx_disable_intercept_for_msr(msr_bitmap, msr, type); +} + /* * If a msr is allowed by L0, we should check whether it is allowed by L1. * The corresponding bit will be cleared unless both of L0 and L1 allow it. @@ -4986,28 +5162,68 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, } } -static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) +static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) { - if (!longmode_only) - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, - msr, MSR_TYPE_R | MSR_TYPE_W); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, - msr, MSR_TYPE_R | MSR_TYPE_W); + u8 mode = 0; + + if (cpu_has_secondary_exec_ctrls() && + (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { + mode |= MSR_BITMAP_MODE_X2APIC; + if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) + mode |= MSR_BITMAP_MODE_X2APIC_APICV; + } + + if (is_long_mode(vcpu)) + mode |= MSR_BITMAP_MODE_LM; + + return mode; } -static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active) +#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) + +static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap, + u8 mode) { - if (apicv_active) { - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv, - msr, type); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv, - msr, type); - } else { - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, - msr, type); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, - msr, type); + int msr; + + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; + msr_bitmap[word + (0x800 / sizeof(long))] = ~0; } + + if (mode & MSR_BITMAP_MODE_X2APIC) { + /* + * TPR reads and writes can be virtualized even if virtual interrupt + * delivery is not in use. + */ + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW); + if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { + vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R); + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); + } + } +} + +static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; + u8 mode = vmx_msr_bitmap_mode(vcpu); + u8 changed = mode ^ vmx->msr_bitmap_mode; + + if (!changed) + return; + + vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW, + !(mode & MSR_BITMAP_MODE_LM)); + + if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) + vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); + + vmx->msr_bitmap_mode = mode; } static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) @@ -5114,14 +5330,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { - /* the PIR and ON have been set by L1. */ - kvm_vcpu_trigger_posted_interrupt(vcpu, true); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. */ vmx->nested.pi_pending = true; kvm_make_request(KVM_REQ_EVENT, vcpu); + /* the PIR and ON have been set by L1. */ + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) + kvm_vcpu_kick(vcpu); return 0; } return -1; @@ -5255,7 +5472,7 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) } if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); } static u32 vmx_exec_control(struct vcpu_vmx *vmx) @@ -5442,7 +5659,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); } if (cpu_has_vmx_msr_bitmap()) - vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); + vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ @@ -5520,6 +5737,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; } + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities); vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); @@ -5550,6 +5769,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u64 cr0; vmx->rmode.vm86_active = 0; + vmx->spec_ctrl = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(vcpu, 0); @@ -5592,7 +5812,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } - vmcs_writel(GUEST_RFLAGS, 0x02); + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); kvm_rip_write(vcpu, 0xfff0); vmcs_writel(GUEST_GDTR_BASE, 0); @@ -5667,7 +5887,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu) { - if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { + if (!cpu_has_virtual_nmis() || + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { enable_irq_window(vcpu); return; } @@ -5707,6 +5928,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (!cpu_has_virtual_nmis()) { + /* + * Tracking the NMI-blocked state in software is built upon + * finding the next open IRQ window. This, in turn, depends on + * well-behaving guests: They have to keep IRQs disabled at + * least as long as the NMI handler runs. Otherwise we may + * cause NMI nesting, maybe breaking the guest. But as this is + * highly unlikely, we can live with the residual risk. + */ + vmx->loaded_vmcs->soft_vnmi_blocked = 1; + vmx->loaded_vmcs->vnmi_blocked_time = 0; + } + ++vcpu->stat.nmi_injections; vmx->loaded_vmcs->nmi_known_unmasked = false; @@ -5725,6 +5959,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); bool masked; + if (!cpu_has_virtual_nmis()) + return vmx->loaded_vmcs->soft_vnmi_blocked; if (vmx->loaded_vmcs->nmi_known_unmasked) return false; masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; @@ -5736,13 +5972,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_vmx *vmx = to_vmx(vcpu); - vmx->loaded_vmcs->nmi_known_unmasked = !masked; - if (masked) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - else - vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); + if (!cpu_has_virtual_nmis()) { + if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { + vmx->loaded_vmcs->soft_vnmi_blocked = masked; + vmx->loaded_vmcs->vnmi_blocked_time = 0; + } + } else { + vmx->loaded_vmcs->nmi_known_unmasked = !masked; + if (masked) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + else + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + } } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) @@ -5750,6 +5993,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) if (to_vmx(vcpu)->nested.nested_run_pending) return 0; + if (!cpu_has_virtual_nmis() && + to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) + return 0; + return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); @@ -5878,11 +6125,9 @@ static int handle_exception(struct kvm_vcpu *vcpu) return 1; /* already handled by vmx_vcpu_run() */ if (is_invalid_opcode(intr_info)) { - if (is_guest_mode(vcpu)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; @@ -5931,7 +6176,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; - if (!(dr6 & ~DR6_RESERVED)) /* icebp */ + if (is_icebp(intr_info)) skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); @@ -6478,6 +6723,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) * AAK134, BY25. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); @@ -6564,7 +6810,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_EVENT, vcpu)) return 1; - err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); + err = emulate_instruction(vcpu, 0); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; @@ -6699,7 +6945,7 @@ void vmx_enable_tdp(void) static __init int hardware_setup(void) { - int r = -ENOMEM, i, msr; + int r = -ENOMEM, i; rdmsrl_safe(MSR_EFER, &host_efer); @@ -6712,22 +6958,13 @@ static __init int hardware_setup(void) goto out; } - vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); - /* - * Allow direct access to the PC debug port (it is often used for I/O - * delays, but the vmexits simply slow things down). - */ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); - clear_bit(0x80, vmx_io_bitmap_a); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); - memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); - memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); - if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; goto out; @@ -6790,42 +7027,8 @@ static __init int hardware_setup(void) kvm_tsc_scaling_ratio_frac_bits = 48; } - vmx_disable_intercept_for_msr(MSR_FS_BASE, false); - vmx_disable_intercept_for_msr(MSR_GS_BASE, false); - vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); - - memcpy(vmx_msr_bitmap_legacy_x2apic_apicv, - vmx_msr_bitmap_legacy, PAGE_SIZE); - memcpy(vmx_msr_bitmap_longmode_x2apic_apicv, - vmx_msr_bitmap_longmode, PAGE_SIZE); - memcpy(vmx_msr_bitmap_legacy_x2apic, - vmx_msr_bitmap_legacy, PAGE_SIZE); - memcpy(vmx_msr_bitmap_longmode_x2apic, - vmx_msr_bitmap_longmode, PAGE_SIZE); - set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ - for (msr = 0x800; msr <= 0x8ff; msr++) { - if (msr == 0x839 /* TMCCT */) - continue; - vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true); - } - - /* - * TPR reads and writes can be virtualized even if virtual interrupt - * delivery is not in use. - */ - vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true); - vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false); - - /* EOI */ - vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true); - /* SELF-IPI */ - vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true); - if (enable_ept) vmx_enable_tdp(); else @@ -6928,94 +7131,6 @@ static int handle_monitor(struct kvm_vcpu *vcpu) return handle_nop(vcpu); } -/* - * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. - * We could reuse a single VMCS for all the L2 guests, but we also want the - * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this - * allows keeping them loaded on the processor, and in the future will allow - * optimizations where prepare_vmcs02 doesn't need to set all the fields on - * every entry if they never change. - * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE - * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. - * - * The following functions allocate and free a vmcs02 in this pool. - */ - -/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ -static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) -{ - struct vmcs02_list *item; - list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) - if (item->vmptr == vmx->nested.current_vmptr) { - list_move(&item->list, &vmx->nested.vmcs02_pool); - return &item->vmcs02; - } - - if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { - /* Recycle the least recently used VMCS. */ - item = list_last_entry(&vmx->nested.vmcs02_pool, - struct vmcs02_list, list); - item->vmptr = vmx->nested.current_vmptr; - list_move(&item->list, &vmx->nested.vmcs02_pool); - return &item->vmcs02; - } - - /* Create a new VMCS */ - item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); - if (!item) - return NULL; - item->vmcs02.vmcs = alloc_vmcs(); - item->vmcs02.shadow_vmcs = NULL; - if (!item->vmcs02.vmcs) { - kfree(item); - return NULL; - } - loaded_vmcs_init(&item->vmcs02); - item->vmptr = vmx->nested.current_vmptr; - list_add(&(item->list), &(vmx->nested.vmcs02_pool)); - vmx->nested.vmcs02_num++; - return &item->vmcs02; -} - -/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ -static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) -{ - struct vmcs02_list *item; - list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) - if (item->vmptr == vmptr) { - free_loaded_vmcs(&item->vmcs02); - list_del(&item->list); - kfree(item); - vmx->nested.vmcs02_num--; - return; - } -} - -/* - * Free all VMCSs saved for this vcpu, except the one pointed by - * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs - * must be &vmx->vmcs01. - */ -static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) -{ - struct vmcs02_list *item, *n; - - WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); - list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { - /* - * Something will leak if the above WARN triggers. Better than - * a use-after-free. - */ - if (vmx->loaded_vmcs == &item->vmcs02) - continue; - - free_loaded_vmcs(&item->vmcs02); - list_del(&item->list); - kfree(item); - vmx->nested.vmcs02_num--; - } -} - /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * set the success or error code of an emulated VMX instruction, as specified @@ -7196,13 +7311,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; + int r; - if (cpu_has_vmx_msr_bitmap()) { - vmx->nested.msr_bitmap = - (unsigned long *)__get_free_page(GFP_KERNEL); - if (!vmx->nested.msr_bitmap) - goto out_msr_bitmap; - } + r = alloc_loaded_vmcs(&vmx->nested.vmcs02); + if (r < 0) + goto out_vmcs02; vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_vmcs12) @@ -7219,9 +7332,6 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) vmx->vmcs01.shadow_vmcs = shadow_vmcs; } - INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); - vmx->nested.vmcs02_num = 0; - hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; @@ -7233,9 +7343,9 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) kfree(vmx->nested.cached_vmcs12); out_cached_vmcs12: - free_page((unsigned long)vmx->nested.msr_bitmap); + free_loaded_vmcs(&vmx->nested.vmcs02); -out_msr_bitmap: +out_vmcs02: return -ENOMEM; } @@ -7377,10 +7487,6 @@ static void free_nested(struct vcpu_vmx *vmx) free_vpid(vmx->nested.vpid02); vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; - if (vmx->nested.msr_bitmap) { - free_page((unsigned long)vmx->nested.msr_bitmap); - vmx->nested.msr_bitmap = NULL; - } if (enable_shadow_vmcs) { vmx_disable_shadow_vmcs(vmx); vmcs_clear(vmx->vmcs01.shadow_vmcs); @@ -7388,7 +7494,7 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->vmcs01.shadow_vmcs = NULL; } kfree(vmx->nested.cached_vmcs12); - /* Unpin physical memory we referred to in current vmcs02 */ + /* Unpin physical memory we referred to in the vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; @@ -7404,7 +7510,7 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->nested.pi_desc = NULL; } - nested_free_all_saved_vmcss(vmx); + free_loaded_vmcs(&vmx->nested.vmcs02); } /* Emulate the VMXOFF instruction */ @@ -7447,8 +7553,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) vmptr + offsetof(struct vmcs12, launch_state), &zero, sizeof(zero)); - nested_free_vmcs02(vmx, vmptr); - nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } @@ -7978,6 +8082,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) * "blocked by NMI" bit has to be set before next VM entry. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); @@ -8359,10 +8464,11 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) /* * The host physical addresses of some pages of guest memory - * are loaded into VMCS02 (e.g. L1's Virtual APIC Page). The CPU - * may write to these pages via their host physical address while - * L2 is running, bypassing any address-translation-based dirty - * tracking (e.g. EPT write protection). + * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC + * Page). The CPU may write to these pages via their host + * physical address while L2 is running, bypassing any + * address-translation-based dirty tracking (e.g. EPT write + * protection). * * Mark them dirty on every exit from L2 to prevent them from * getting out of sync with dirty tracking. @@ -8822,6 +8928,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) return 0; } + if (unlikely(!cpu_has_virtual_nmis() && + vmx->loaded_vmcs->soft_vnmi_blocked)) { + if (vmx_interrupt_allowed(vcpu)) { + vmx->loaded_vmcs->soft_vnmi_blocked = 0; + } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && + vcpu->arch.nmi_pending) { + /* + * This CPU don't support us in finding the end of an + * NMI-blocked window if the guest runs with IRQs + * disabled. So we pull the trigger after 1 s of + * futile waiting, but inform the user about this. + */ + printk(KERN_WARNING "%s: Breaking out of NMI-blocked " + "state on VCPU %d after 1 s timeout\n", + __func__, vcpu->vcpu_id); + vmx->loaded_vmcs->soft_vnmi_blocked = 0; + } + } + if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); @@ -8877,7 +9002,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); } static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) @@ -9063,14 +9188,14 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) #endif "pushf\n\t" __ASM_SIZE(push) " $%c[cs]\n\t" - "call *%[entry]\n\t" + CALL_NOSPEC : #ifdef CONFIG_X86_64 [sp]"=&r"(tmp), #endif ASM_CALL_CONSTRAINT : - [entry]"r"(entry), + THUNK_TARGET(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); @@ -9104,33 +9229,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; - if (vmx->loaded_vmcs->nmi_known_unmasked) - return; - /* - * Can't use vmx->exit_intr_info since we're not sure what - * the exit reason is. - */ - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; - vector = exit_intr_info & INTR_INFO_VECTOR_MASK; - /* - * SDM 3: 27.7.1.2 (September 2008) - * Re-set bit "block by NMI" before VM entry if vmexit caused by - * a guest IRET fault. - * SDM 3: 23.2.2 (September 2008) - * Bit 12 is undefined in any of the following cases: - * If the VM exit sets the valid bit in the IDT-vectoring - * information field. - * If the VM exit is due to a double fault. - */ - if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && - vector != DF_VECTOR && !idtv_info_valid) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - else - vmx->loaded_vmcs->nmi_known_unmasked = - !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) - & GUEST_INTR_STATE_NMI); + if (cpu_has_virtual_nmis()) { + if (vmx->loaded_vmcs->nmi_known_unmasked) + return; + /* + * Can't use vmx->exit_intr_info since we're not sure what + * the exit reason is. + */ + exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; + /* + * SDM 3: 27.7.1.2 (September 2008) + * Re-set bit "block by NMI" before VM entry if vmexit caused by + * a guest IRET fault. + * SDM 3: 23.2.2 (September 2008) + * Bit 12 is undefined in any of the following cases: + * If the VM exit sets the valid bit in the IDT-vectoring + * information field. + * If the VM exit is due to a double fault. + */ + if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && + vector != DF_VECTOR && !idtv_info_valid) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + else + vmx->loaded_vmcs->nmi_known_unmasked = + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) + & GUEST_INTR_STATE_NMI); + } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->vnmi_blocked_time += + ktime_to_ns(ktime_sub(ktime_get(), + vmx->loaded_vmcs->entry_time)); } static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, @@ -9247,6 +9377,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long debugctlmsr, cr3, cr4; + /* Record the guest's net vcpu time for enforced NMI injections. */ + if (unlikely(!cpu_has_virtual_nmis() && + vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->entry_time = ktime_get(); + /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ if (vmx->emulation_required) @@ -9297,6 +9432,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_arm_hv_timer(vcpu); + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + if (vmx->spec_ctrl) + native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ @@ -9345,6 +9489,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" + "setbe %c[fail](%0)\n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" @@ -9361,12 +9506,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" + "xor %%r8d, %%r8d \n\t" + "xor %%r9d, %%r9d \n\t" + "xor %%r10d, %%r10d \n\t" + "xor %%r11d, %%r11d \n\t" + "xor %%r12d, %%r12d \n\t" + "xor %%r13d, %%r13d \n\t" + "xor %%r14d, %%r14d \n\t" + "xor %%r15d, %%r15d \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" + "xor %%eax, %%eax \n\t" + "xor %%ebx, %%ebx \n\t" + "xor %%esi, %%esi \n\t" + "xor %%edi, %%edi \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" - "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" @@ -9403,6 +9559,30 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* + * We do not use IBRS in the kernel. If this vCPU has used the + * SPEC_CTRL MSR it may have left it on; save the value and + * turn it off. This is much more efficient than blindly adding + * it to the atomic save/restore list. Especially as the former + * (Saving guest MSRs on vmexit) doesn't even exist in KVM. + * + * For non-nested case: + * If the L01 MSR bitmap does not intercept the MSR, then we need to + * save it. + * + * For nested case: + * If the L02 MSR bitmap does not intercept the MSR, then we need to + * save it. + */ + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + + if (vmx->spec_ctrl) + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); + + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); @@ -9514,6 +9694,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + unsigned long *msr_bitmap; int cpu; if (!vmx) @@ -9546,13 +9727,20 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (!vmx->guest_msrs) goto free_pml; - vmx->loaded_vmcs = &vmx->vmcs01; - vmx->loaded_vmcs->vmcs = alloc_vmcs(); - vmx->loaded_vmcs->shadow_vmcs = NULL; - if (!vmx->loaded_vmcs->vmcs) + err = alloc_loaded_vmcs(&vmx->vmcs01); + if (err < 0) goto free_msrs; - loaded_vmcs_init(vmx->loaded_vmcs); + msr_bitmap = vmx->vmcs01.msr_bitmap; + vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); + vmx->msr_bitmap_mode = 0; + + vmx->loaded_vmcs = &vmx->vmcs01; cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; @@ -9946,7 +10134,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, if (cpu_has_vmx_msr_bitmap() && nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) && nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) - ; + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_USE_MSR_BITMAPS); else vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_USE_MSR_BITMAPS); @@ -10021,10 +10210,25 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, int msr; struct page *page; unsigned long *msr_bitmap_l1; - unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap; + unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; + /* + * pred_cmd & spec_ctrl are trying to verify two things: + * + * 1. L0 gave a permission to L1 to actually passthrough the MSR. This + * ensures that we do not accidentally generate an L02 MSR bitmap + * from the L12 MSR bitmap that is too permissive. + * 2. That L1 or L2s have actually used the MSR. This avoids + * unnecessarily merging of the bitmap if the MSR is unused. This + * works properly because we only update the L01 MSR bitmap lazily. + * So even if L0 should pass L1 these MSRs, the L01 bitmap is only + * updated to reflect this when L1 (or its L2s) actually write to + * the MSR. + */ + bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); + bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); - /* This shortcut is ok because we support only x2APIC MSRs so far. */ - if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && + !pred_cmd && !spec_ctrl) return false; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); @@ -10057,6 +10261,19 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, MSR_TYPE_W); } } + + if (spec_ctrl) + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_SPEC_CTRL, + MSR_TYPE_R | MSR_TYPE_W); + + if (pred_cmd) + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_PRED_CMD, + MSR_TYPE_W); + kunmap(page); kvm_release_page_clean(page); @@ -10598,6 +10815,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); + if (cpu_has_vmx_msr_bitmap()) + vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); + if (enable_vpid) { /* * There is no direct mapping between vpid02 and vpid12, the @@ -10814,20 +11034,15 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - struct loaded_vmcs *vmcs02; u32 msr_entry_idx; u32 exit_qual; - vmcs02 = nested_get_current_vmcs02(vmx); - if (!vmcs02) - return -ENOMEM; - enter_guest_mode(vcpu); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); - vmx_switch_vmcs(vcpu, vmcs02); + vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); vmx_segment_cache_clear(vmx); if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { @@ -11031,29 +11246,27 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qual; - - if (kvm_event_needs_reinjection(vcpu)) - return -EBUSY; + bool block_nested_events = + vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); if (vcpu->arch.exception.pending && nested_vmx_check_exception(vcpu, &exit_qual)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_inject_exception_vmexit(vcpu, exit_qual); - vcpu->arch.exception.pending = false; return 0; } if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); return 0; } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | @@ -11069,7 +11282,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && nested_exit_on_intr(vcpu)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); return 0; @@ -11256,6 +11469,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, kvm_clear_interrupt_queue(vcpu); } +static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + u32 entry_failure_code; + + nested_ept_uninit_mmu_context(vcpu); + + /* + * Only PDPTE load can fail as the value of cr3 was checked on entry and + * couldn't have changed. + */ + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); + + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; +} + /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified @@ -11269,7 +11500,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; - u32 entry_failure_code; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; @@ -11296,17 +11526,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); vmx_set_cr4(vcpu, vmcs12->host_cr4); - nested_ept_uninit_mmu_context(vcpu); - - /* - * Only PDPTE load can fail as the value of cr3 was checked on entry and - * couldn't have changed. - */ - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); - - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; + load_vmcs12_mmu_host_state(vcpu, vmcs12); if (enable_vpid) { /* @@ -11325,6 +11545,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); + vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); + vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) @@ -11388,7 +11610,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_write64(GUEST_IA32_DEBUGCTL, 0); if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, vmcs12->vm_exit_msr_load_count)) @@ -11434,10 +11656,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vm_exit_controls_reset_shadow(vmx); vmx_segment_cache_clear(vmx); - /* if no vmcs02 cache requested, remove the one we used */ - if (VMCS02_POOL_SIZE == 0) - nested_free_vmcs02(vmx, vmx->nested.current_vmptr); - /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); @@ -11530,6 +11748,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, * accordingly. */ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + + load_vmcs12_mmu_host_state(vcpu, vmcs12); + /* * The emulated instruction was already skipped in * nested_vmx_run, but the updated RIP was never diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 03869eb7fcd6..b9afb4784d12 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1006,6 +1006,7 @@ static u32 msrs_to_save[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, + MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES }; static unsigned num_msrs_to_save; @@ -1795,10 +1796,13 @@ u64 get_kvmclock_ns(struct kvm *kvm) /* both __this_cpu_read() and rdtsc() should be on the same cpu */ get_cpu(); - kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, - &hv_clock.tsc_shift, - &hv_clock.tsc_to_system_mul); - ret = __pvclock_read_cycles(&hv_clock, rdtsc()); + if (__this_cpu_read(cpu_tsc_khz)) { + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, + &hv_clock.tsc_shift, + &hv_clock.tsc_to_system_mul); + ret = __pvclock_read_cycles(&hv_clock, rdtsc()); + } else + ret = ktime_get_boot_ns() + ka->kvmclock_offset; put_cpu(); @@ -1830,6 +1834,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) */ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); + if (guest_hv_clock.version & 1) + ++guest_hv_clock.version; /* first time write, random junk */ + vcpu->hv_clock.version = guest_hv_clock.version + 1; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, @@ -2919,6 +2926,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = rdtsc(); + /* + * If userspace has set any breakpoints or watchpoints, dr6 is restored + * on every vmexit, but if not, we might have a stale dr6 from the + * guest. do_debug expects dr6 to be cleared after it runs, do the same. + */ + set_debugreg(0, 6); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, @@ -4359,7 +4372,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) addr, n, v)) && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) break; - trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); + trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); handled += n; addr += n; len -= n; @@ -4618,7 +4631,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, - vcpu->mmio_fragments[0].gpa, *(u64 *)val); + vcpu->mmio_fragments[0].gpa, val); vcpu->mmio_read_completed = 0; return 1; } @@ -4640,14 +4653,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { - trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { - trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); + trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); return X86EMUL_IO_NEEDED; } @@ -5413,7 +5426,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; - r = EMULATE_FAIL; + r = EMULATE_USER_EXIT; } kvm_queue_exception(vcpu, UD_VECTOR); @@ -5705,6 +5718,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; + if (ctxt->have_exception && inject_emulated_exception(vcpu)) + return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); @@ -6740,6 +6755,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) kvm_x86_ops->tlb_flush(vcpu); } +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) +{ + unsigned long apic_address; + + /* + * The physical address of apic access page is stored in the VMCS. + * Update it when it becomes invalid. + */ + apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); + if (start <= apic_address && apic_address < end) + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { struct page *page = NULL; @@ -7223,12 +7252,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct fpu *fpu = ¤t->thread.fpu; int r; - sigset_t sigsaved; fpu__initialize(fpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { if (kvm_run->immediate_exit) { @@ -7271,8 +7298,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) out: post_kvm_run_save(vcpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } @@ -7340,7 +7366,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) #endif kvm_rip_write(vcpu, regs->rip); - kvm_set_rflags(vcpu, regs->rflags); + kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); vcpu->arch.exception.pending = false; @@ -7454,6 +7480,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, } EXPORT_SYMBOL_GPL(kvm_task_switch); +int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { + /* + * When EFER.LME and CR0.PG are set, the processor is in + * 64-bit mode (though maybe in a 32-bit code segment). + * CR4.PAE and EFER.LMA must be set. + */ + if (!(sregs->cr4 & X86_CR4_PAE) + || !(sregs->efer & EFER_LMA)) + return -EINVAL; + } else { + /* + * Not in 64-bit mode: EFER.LMA is clear and the code + * segment cannot be 64-bit. + */ + if (sregs->efer & EFER_LMA || sregs->cs.l) + return -EINVAL; + } + + return 0; +} + int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { @@ -7466,6 +7515,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, (sregs->cr4 & X86_CR4_OSXSAVE)) return -EINVAL; + if (kvm_valid_sregs(vcpu, sregs)) + return -EINVAL; + apic_base_msr.data = sregs->apic_base; apic_base_msr.host_initiated = true; if (kvm_set_apic_base(vcpu, &apic_base_msr)) @@ -7769,6 +7821,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { + kvm_lapic_reset(vcpu, init_event); + vcpu->arch.hflags = 0; vcpu->arch.smi_pending = 0; @@ -8197,10 +8251,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) return r; } - if (!size) { - r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); - WARN_ON(r < 0); - } + if (!size) + vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); return 0; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index d0b95b7a90b4..6d112d8f799c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -12,6 +12,7 @@ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { + vcpu->arch.exception.pending = false; vcpu->arch.exception.injected = false; } diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 457f681ef379..f23934bbaf4e 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -24,8 +24,9 @@ lib-y := delay.o misc.o cmdline.o cpu.o lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += memcpy_$(BITS).o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o -lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o +lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o +lib-$(CONFIG_RETPOLINE) += retpoline.o obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4d34bb548b41..46e71a74e612 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -29,7 +29,8 @@ #include #include #include - +#include + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ @@ -156,7 +157,7 @@ ENTRY(csum_partial) negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi - jmp *%ebx + JMP_NOSPEC %ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax @@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic) andl $-32,%edx lea 3f(%ebx,%ebx), %ebx testl %esi, %esi - jmp *%ebx + JMP_NOSPEC %ebx 1: addl $64,%esi addl $64,%edi SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c index d6f848d1211d..2dd1fe13a37b 100644 --- a/arch/x86/lib/cpu.c +++ b/arch/x86/lib/cpu.c @@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig) { unsigned int fam, model; - fam = x86_family(sig); + fam = x86_family(sig); model = (sig >> 4) & 0xf; diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 553f8fd23cc4..4846eff7e4c8 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops) delay = min_t(u64, MWAITX_MAX_LOOPS, loops); /* - * Use cpu_tss as a cacheline-aligned, seldomly + * Use cpu_tss_rw as a cacheline-aligned, seldomly * accessed per-cpu variable as the monitor target. */ - __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); + __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0); /* * AMD, like Intel, supports the EAX hint and EAX=0xf diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index c97d935a29e8..49b167f73215 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -40,6 +40,8 @@ ENTRY(__get_user_1) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 1: movzbl (%_ASM_AX),%edx xor %eax,%eax @@ -54,6 +56,8 @@ ENTRY(__get_user_2) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 2: movzwl -1(%_ASM_AX),%edx xor %eax,%eax @@ -68,6 +72,8 @@ ENTRY(__get_user_4) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 3: movl -3(%_ASM_AX),%edx xor %eax,%eax @@ -83,6 +89,8 @@ ENTRY(__get_user_8) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 4: movq -7(%_ASM_AX),%rdx xor %eax,%eax @@ -94,6 +102,8 @@ ENTRY(__get_user_8) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user_8 + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 4: movl -7(%_ASM_AX),%edx 5: movl -3(%_ASM_AX),%ecx diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c new file mode 100644 index 000000000000..e52857d64cac --- /dev/null +++ b/arch/x86/lib/insn-eval.c @@ -0,0 +1,1127 @@ +/* + * Utility functions for x86 operand and address decoding + * + * Copyright (C) Intel Corporation 2017 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "insn: " fmt + +enum reg_type { + REG_TYPE_RM = 0, + REG_TYPE_INDEX, + REG_TYPE_BASE, +}; + +/** + * is_string_insn() - Determine if instruction is a string instruction + * @insn: Instruction structure containing the opcode + * + * Return: true if the instruction, determined by the opcode, is any of the + * string instructions as defined in the Intel Software Development manual. + * False otherwise. + */ +static bool is_string_insn(struct insn *insn) +{ + insn_get_opcode(insn); + + /* All string instructions have a 1-byte opcode. */ + if (insn->opcode.nbytes != 1) + return false; + + switch (insn->opcode.bytes[0]) { + case 0x6c ... 0x6f: /* INS, OUTS */ + case 0xa4 ... 0xa7: /* MOVS, CMPS */ + case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ + return true; + default: + return false; + } +} + +/** + * get_overridden_seg_reg() - obtain segment register to use from prefixes + * @insn: Instruction structure with segment override prefixes + * @regs: Structure with register values as seen when entering kernel mode + * @regoff: Operand offset, in pt_regs, used to deterimine segment register + * + * The segment register to which an effective address refers depends on + * a) whether running in long mode (in such a case semgment override prefixes + * are ignored. b) Whether segment override prefixes must be ignored for certain + * registers: always use CS when the register is (R|E)IP; always use ES when + * operand register is (E)DI with a string instruction as defined in the Intel + * documentation. c) If segment overrides prefixes are found in the instruction + * prefixes. d) Use the default segment register associated with the operand + * register. + * + * This function returns the overridden segment register to use, if any, as per + * the conditions described above. Please note that this function + * does not return the value in the segment register (i.e., the segment + * selector). The segment selector needs to be obtained using + * get_segment_selector() and passing the segment register resolved by + * this function. + * + * Return: A constant identifying the segment register to use, among CS, SS, DS, + * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. + * INAT_SEG_REG_DEFAULT is returned if no segment override prefixes were found + * and the default segment register shall be used. -EINVAL in case of error. + */ +static int get_overridden_seg_reg(struct insn *insn, struct pt_regs *regs, + int regoff) +{ + int i; + int sel_overrides = 0; + int seg_register = INAT_SEG_REG_DEFAULT; + + /* + * Segment override prefixes should not be used for (E)IP. Check this + * case first as we might not have (and not needed at all) a + * valid insn structure to evaluate segment override prefixes. + */ + if (regoff == offsetof(struct pt_regs, ip)) { + if (user_64bit_mode(regs)) + return INAT_SEG_REG_IGNORE; + else + return INAT_SEG_REG_DEFAULT; + } + + if (!insn) + return -EINVAL; + + insn_get_prefixes(insn); + + /* Look for any segment override prefixes. */ + for (i = 0; i < insn->prefixes.nbytes; i++) { + insn_attr_t attr; + + attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + switch (attr) { + case INAT_MAKE_PREFIX(INAT_PFX_CS): + seg_register = INAT_SEG_REG_CS; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_SS): + seg_register = INAT_SEG_REG_SS; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_DS): + seg_register = INAT_SEG_REG_DS; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_ES): + seg_register = INAT_SEG_REG_ES; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_FS): + seg_register = INAT_SEG_REG_FS; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_GS): + seg_register = INAT_SEG_REG_GS; + sel_overrides++; + break; + /* No default action needed. */ + } + } + + /* + * In long mode, segment override prefixes are ignored, except for + * overrides for FS and GS. + */ + if (user_64bit_mode(regs)) { + if (seg_register != INAT_SEG_REG_FS && + seg_register != INAT_SEG_REG_GS) + return INAT_SEG_REG_IGNORE; + /* More than one segment override prefix leads to undefined behavior. */ + } else if (sel_overrides > 1) { + return -EINVAL; + /* + * Segment override prefixes are always ignored for string instructions + * that involve the use the (E)DI register. + */ + } else if ((regoff == offsetof(struct pt_regs, di)) && + is_string_insn(insn)) { + return INAT_SEG_REG_DEFAULT; + } + + return seg_register; +} + +/** + * resolve_seg_register() - obtain segment register + * @insn: Instruction structure with segment override prefixes + * @regs: Structure with register values as seen when entering kernel mode + * @regoff: Operand offset, in pt_regs, used to deterimine segment register + * + * Determine the segment register associated with the operands and, if + * applicable, prefixes and the instruction pointed by insn. The function first + * checks if the segment register shall be ignored or has been overridden in the + * instruction prefixes. Otherwise, it resolves the segment register to use + * based on the defaults described in the Intel documentation. + * + * The operand register, regoff, is represented as the offset from the base of + * pt_regs. Also, regoff can be -EDOM for cases in which registers are not + * used as operands (e.g., displacement-only memory addressing). + * + * Return: A constant identifying the segment register to use, among CS, SS, DS, + * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. + * -EINVAL in case of error. + */ +static int resolve_seg_register(struct insn *insn, struct pt_regs *regs, + int regoff) +{ + int seg_reg; + + seg_reg = get_overridden_seg_reg(insn, regs, regoff); + + if (seg_reg < 0) + return seg_reg; + + if (seg_reg == INAT_SEG_REG_IGNORE) + return seg_reg; + + if (seg_reg != INAT_SEG_REG_DEFAULT) + return seg_reg; + + /* + * If we are here, we use the default segment register as described + * in the Intel documentation: + * + DS for all references involving (E)AX, (E)CX, (E)DX, (E)BX, and + * (E)SI. + * + If used in a string instruction, ES for (E)DI. Otherwise, DS. + * + AX, CX and DX are not valid register operands in 16-bit address + * encodings but are valid for 32-bit and 64-bit encodings. + * + -EDOM is reserved to identify for cases in which no register + * is used (i.e., displacement-only addressing). Use DS. + * + SS for (E)SP or (E)BP. + * + CS for (E)IP. + */ + + switch (regoff) { + case offsetof(struct pt_regs, ax): + case offsetof(struct pt_regs, cx): + case offsetof(struct pt_regs, dx): + /* Need insn to verify address size. */ + if (!insn || insn->addr_bytes == 2) + return -EINVAL; + case -EDOM: + case offsetof(struct pt_regs, bx): + case offsetof(struct pt_regs, si): + return INAT_SEG_REG_DS; + case offsetof(struct pt_regs, di): + /* Need insn to see if insn is string instruction. */ + if (!insn) + return -EINVAL; + if (is_string_insn(insn)) + return INAT_SEG_REG_ES; + return INAT_SEG_REG_DS; + case offsetof(struct pt_regs, bp): + case offsetof(struct pt_regs, sp): + return INAT_SEG_REG_SS; + case offsetof(struct pt_regs, ip): + return INAT_SEG_REG_CS; + default: + return -EINVAL; + } +} + +/** + * get_segment_selector() - obtain segment selector + * @regs: Structure with register values as seen when entering kernel mode + * @seg_reg: Segment register to use + * + * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment + * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or + * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained + * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU + * registers. This done for only for completeness as in CONFIG_X86_64 segment + * registers are ignored. + * + * Return: Value of the segment selector, including null when running in + * long mode. -1 on error. + */ +static short get_segment_selector(struct pt_regs *regs, int seg_reg) +{ +#ifdef CONFIG_X86_64 + unsigned short sel; + + switch (seg_reg) { + case INAT_SEG_REG_IGNORE: + return 0; + case INAT_SEG_REG_CS: + return (unsigned short)(regs->cs & 0xffff); + case INAT_SEG_REG_SS: + return (unsigned short)(regs->ss & 0xffff); + case INAT_SEG_REG_DS: + savesegment(ds, sel); + return sel; + case INAT_SEG_REG_ES: + savesegment(es, sel); + return sel; + case INAT_SEG_REG_FS: + savesegment(fs, sel); + return sel; + case INAT_SEG_REG_GS: + savesegment(gs, sel); + return sel; + default: + return -EINVAL; + } +#else /* CONFIG_X86_32 */ + struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; + + if (v8086_mode(regs)) { + switch (seg_reg) { + case INAT_SEG_REG_CS: + return (unsigned short)(regs->cs & 0xffff); + case INAT_SEG_REG_SS: + return (unsigned short)(regs->ss & 0xffff); + case INAT_SEG_REG_DS: + return vm86regs->ds; + case INAT_SEG_REG_ES: + return vm86regs->es; + case INAT_SEG_REG_FS: + return vm86regs->fs; + case INAT_SEG_REG_GS: + return vm86regs->gs; + case INAT_SEG_REG_IGNORE: + /* fall through */ + default: + return -EINVAL; + } + } + + switch (seg_reg) { + case INAT_SEG_REG_CS: + return (unsigned short)(regs->cs & 0xffff); + case INAT_SEG_REG_SS: + return (unsigned short)(regs->ss & 0xffff); + case INAT_SEG_REG_DS: + return (unsigned short)(regs->ds & 0xffff); + case INAT_SEG_REG_ES: + return (unsigned short)(regs->es & 0xffff); + case INAT_SEG_REG_FS: + return (unsigned short)(regs->fs & 0xffff); + case INAT_SEG_REG_GS: + /* + * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. + * The macro below takes care of both cases. + */ + return get_user_gs(regs); + case INAT_SEG_REG_IGNORE: + /* fall through */ + default: + return -EINVAL; + } +#endif /* CONFIG_X86_64 */ +} + +static int get_reg_offset(struct insn *insn, struct pt_regs *regs, + enum reg_type type) +{ + int regno = 0; + + static const int regoff[] = { + offsetof(struct pt_regs, ax), + offsetof(struct pt_regs, cx), + offsetof(struct pt_regs, dx), + offsetof(struct pt_regs, bx), + offsetof(struct pt_regs, sp), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), +#ifdef CONFIG_X86_64 + offsetof(struct pt_regs, r8), + offsetof(struct pt_regs, r9), + offsetof(struct pt_regs, r10), + offsetof(struct pt_regs, r11), + offsetof(struct pt_regs, r12), + offsetof(struct pt_regs, r13), + offsetof(struct pt_regs, r14), + offsetof(struct pt_regs, r15), +#endif + }; + int nr_registers = ARRAY_SIZE(regoff); + /* + * Don't possibly decode a 32-bit instructions as + * reading a 64-bit-only register. + */ + if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) + nr_registers -= 8; + + switch (type) { + case REG_TYPE_RM: + regno = X86_MODRM_RM(insn->modrm.value); + + /* + * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement + * follows the ModRM byte. + */ + if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) + return -EDOM; + + if (X86_REX_B(insn->rex_prefix.value)) + regno += 8; + break; + + case REG_TYPE_INDEX: + regno = X86_SIB_INDEX(insn->sib.value); + if (X86_REX_X(insn->rex_prefix.value)) + regno += 8; + + /* + * If ModRM.mod != 3 and SIB.index = 4 the scale*index + * portion of the address computation is null. This is + * true only if REX.X is 0. In such a case, the SIB index + * is used in the address computation. + */ + if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) + return -EDOM; + break; + + case REG_TYPE_BASE: + regno = X86_SIB_BASE(insn->sib.value); + /* + * If ModRM.mod is 0 and SIB.base == 5, the base of the + * register-indirect addressing is 0. In this case, a + * 32-bit displacement follows the SIB byte. + */ + if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) + return -EDOM; + + if (X86_REX_B(insn->rex_prefix.value)) + regno += 8; + break; + + default: + pr_err_ratelimited("invalid register type: %d\n", type); + return -EINVAL; + } + + if (regno >= nr_registers) { + WARN_ONCE(1, "decoded an instruction with an invalid register"); + return -EINVAL; + } + return regoff[regno]; +} + +/** + * get_reg_offset_16() - Obtain offset of register indicated by instruction + * @insn: Instruction structure containing ModRM and SIB bytes + * @regs: Structure with register values as seen when entering kernel mode + * @offs1: Offset of the first operand register + * @offs2: Offset of the second opeand register, if applicable + * + * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte + * within insn. This function is to be used with 16-bit address encodings. The + * offs1 and offs2 will be written with the offset of the two registers + * indicated by the instruction. In cases where any of the registers is not + * referenced by the instruction, the value will be set to -EDOM. + * + * Return: 0 on success, -EINVAL on failure. + */ +static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, + int *offs1, int *offs2) +{ + /* + * 16-bit addressing can use one or two registers. Specifics of + * encodings are given in Table 2-1. "16-Bit Addressing Forms with the + * ModR/M Byte" of the Intel Software Development Manual. + */ + static const int regoff1[] = { + offsetof(struct pt_regs, bx), + offsetof(struct pt_regs, bx), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, bx), + }; + + static const int regoff2[] = { + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), + -EDOM, + -EDOM, + -EDOM, + -EDOM, + }; + + if (!offs1 || !offs2) + return -EINVAL; + + /* Operand is a register, use the generic function. */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + *offs1 = insn_get_modrm_rm_off(insn, regs); + *offs2 = -EDOM; + return 0; + } + + *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; + *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; + + /* + * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- + * only addressing. This means that no registers are involved in + * computing the effective address. Thus, ensure that the first + * register offset is invalild. The second register offset is already + * invalid under the aforementioned conditions. + */ + if ((X86_MODRM_MOD(insn->modrm.value) == 0) && + (X86_MODRM_RM(insn->modrm.value) == 6)) + *offs1 = -EDOM; + + return 0; +} + +/** + * get_desc() - Obtain address of segment descriptor + * @sel: Segment selector + * + * Given a segment selector, obtain a pointer to the segment descriptor. + * Both global and local descriptor tables are supported. + * + * Return: pointer to segment descriptor on success. NULL on error. + */ +static struct desc_struct *get_desc(unsigned short sel) +{ + struct desc_ptr gdt_desc = {0, 0}; + struct desc_struct *desc = NULL; + unsigned long desc_base; + +#ifdef CONFIG_MODIFY_LDT_SYSCALL + if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { + /* Bits [15:3] contain the index of the desired entry. */ + sel >>= 3; + + mutex_lock(¤t->active_mm->context.lock); + /* The size of the LDT refers to the number of entries. */ + if (!current->active_mm->context.ldt || + sel >= current->active_mm->context.ldt->nr_entries) { + mutex_unlock(¤t->active_mm->context.lock); + return NULL; + } + + desc = ¤t->active_mm->context.ldt->entries[sel]; + mutex_unlock(¤t->active_mm->context.lock); + return desc; + } +#endif + native_store_gdt(&gdt_desc); + + /* + * Segment descriptors have a size of 8 bytes. Thus, the index is + * multiplied by 8 to obtain the memory offset of the desired descriptor + * from the base of the GDT. As bits [15:3] of the segment selector + * contain the index, it can be regarded as multiplied by 8 already. + * All that remains is to clear bits [2:0]. + */ + desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); + + if (desc_base > gdt_desc.size) + return NULL; + + desc = (struct desc_struct *)(gdt_desc.address + desc_base); + return desc; +} + +/** + * insn_get_seg_base() - Obtain base address of segment descriptor. + * @regs: Structure with register values as seen when entering kernel mode + * @insn: Instruction structure with selector override prefixes + * @regoff: Operand offset, in pt_regs, of which the selector is needed + * + * Obtain the base address of the segment descriptor as indicated by either + * any segment override prefixes contained in insn or the default segment + * applicable to the register indicated by regoff. regoff is specified as the + * offset in bytes from the base of pt_regs. + * + * Return: In protected mode, base address of the segment. Zero in long mode, + * except when FS or GS are used. In virtual-8086 mode, the segment + * selector shifted 4 positions to the right. -1L in case of + * error. + */ +unsigned long insn_get_seg_base(struct pt_regs *regs, struct insn *insn, + int regoff) +{ + struct desc_struct *desc; + int seg_reg; + short sel; + + seg_reg = resolve_seg_register(insn, regs, regoff); + if (seg_reg < 0) + return -1L; + + sel = get_segment_selector(regs, seg_reg); + if (sel < 0) + return -1L; + + if (v8086_mode(regs)) + /* + * Base is simply the segment selector shifted 4 + * positions to the right. + */ + return (unsigned long)(sel << 4); + + if (user_64bit_mode(regs)) { + /* + * Only FS or GS will have a base address, the rest of + * the segments' bases are forced to 0. + */ + unsigned long base; + + if (seg_reg == INAT_SEG_REG_FS) + rdmsrl(MSR_FS_BASE, base); + else if (seg_reg == INAT_SEG_REG_GS) + /* + * swapgs was called at the kernel entry point. Thus, + * MSR_KERNEL_GS_BASE will have the user-space GS base. + */ + rdmsrl(MSR_KERNEL_GS_BASE, base); + else if (seg_reg != INAT_SEG_REG_IGNORE) + /* We should ignore the rest of segment registers. */ + base = -1L; + else + base = 0; + return base; + } + + /* In protected mode the segment selector cannot be null. */ + if (!sel) + return -1L; + + desc = get_desc(sel); + if (!desc) + return -1L; + + return get_desc_base(desc); +} + +/** + * get_seg_limit() - Obtain the limit of a segment descriptor + * @regs: Structure with register values as seen when entering kernel mode + * @insn: Instruction structure with selector override prefixes + * @regoff: Operand offset, in pt_regs, of which the selector is needed + * + * Obtain the limit of the segment descriptor. The segment selector is obtained + * from the relevant segment register determined by inspecting any segment + * override prefixes or the default segment register associated with regoff. + * regoff is specified as the offset in bytes from the base * of pt_regs. + * + * Return: In protected mode, the limit of the segment descriptor in bytes. + * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, + * limit is returned as -1L to imply a limit-less segment. Zero is returned on + * error. + */ +static unsigned long get_seg_limit(struct pt_regs *regs, struct insn *insn, + int regoff) +{ + struct desc_struct *desc; + unsigned long limit; + int seg_reg; + short sel; + + seg_reg = resolve_seg_register(insn, regs, regoff); + if (seg_reg < 0) + return 0; + + sel = get_segment_selector(regs, seg_reg); + if (sel < 0) + return 0; + + if (user_64bit_mode(regs) || v8086_mode(regs)) + return -1L; + + if (!sel) + return 0; + + desc = get_desc(sel); + if (!desc) + return 0; + + /* + * If the granularity bit is set, the limit is given in multiples + * of 4096. This also means that the 12 least significant bits are + * not tested when checking the segment limits. In practice, + * this means that the segment ends in (limit << 12) + 0xfff. + */ + limit = get_desc_limit(desc); + if (desc->g) + limit = (limit << 12) + 0xfff; + + return limit; +} + +/** + * insn_get_code_seg_defaults() - Obtain code segment default parameters + * @regs: Structure with register values as seen when entering kernel mode + * + * Obtain the default parameters of the code segment: address and operand sizes. + * The code segment is obtained from the selector contained in the CS register + * in regs. In protected mode, the default address is determined by inspecting + * the L and D bits of the segment descriptor. In virtual-8086 mode, the default + * is always two bytes for both address and operand sizes. + * + * Return: A signed 8-bit value containing the default parameters on success and + * -EINVAL on error. + */ +char insn_get_code_seg_defaults(struct pt_regs *regs) +{ + struct desc_struct *desc; + unsigned short sel; + + if (v8086_mode(regs)) + /* Address and operand size are both 16-bit. */ + return INSN_CODE_SEG_PARAMS(2, 2); + + sel = (unsigned short)regs->cs; + + desc = get_desc(sel); + if (!desc) + return -EINVAL; + + /* + * The most significant byte of the Type field of the segment descriptor + * determines whether a segment contains data or code. If this is a data + * segment, return error. + */ + if (!(desc->type & BIT(3))) + return -EINVAL; + + switch ((desc->l << 1) | desc->d) { + case 0: /* + * Legacy mode. CS.L=0, CS.D=0. Address and operand size are + * both 16-bit. + */ + return INSN_CODE_SEG_PARAMS(2, 2); + case 1: /* + * Legacy mode. CS.L=0, CS.D=1. Address and operand size are + * both 32-bit. + */ + return INSN_CODE_SEG_PARAMS(4, 4); + case 2: /* + * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; + * operand size is 32-bit. + */ + return INSN_CODE_SEG_PARAMS(4, 8); + case 3: /* Invalid setting. CS.L=1, CS.D=1 */ + /* fall through */ + default: + return -EINVAL; + } +} + +/** + * insn_get_modrm_rm_off() - Obtain register in r/m part of ModRM byte + * @insn: Instruction structure containing the ModRM byte + * @regs: Structure with register values as seen when entering kernel mode + * + * Return: The register indicated by the r/m part of the ModRM byte. The + * register is obtained as an offset from the base of pt_regs. In specific + * cases, the returned value can be -EDOM to indicate that the particular value + * of ModRM does not refer to a register and shall be ignored. + */ +int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) +{ + return get_reg_offset(insn, regs, REG_TYPE_RM); +} + +/** + * get_addr_ref_16() - Obtain the 16-bit address referred by instruction + * @insn: Instruction structure containing ModRM byte and displacement + * @regs: Structure with register values as seen when entering kernel mode + * + * This function is to be used with 16-bit address encodings. Obtain the memory + * address referred by the instruction's ModRM and displacement bytes. Also, the + * segment used as base is determined by either any segment override prefixes in + * insn or the default segment of the registers involved in the address + * computation. In protected mode, segment limits are enforced. + * + * Return: linear address referenced by instruction and registers on success. + * -1L on error. + */ +static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) +{ + unsigned long linear_addr = -1L, seg_base_addr, seg_limit; + short eff_addr, addr1 = 0, addr2 = 0; + int addr_offset1, addr_offset2; + int ret; + + insn_get_modrm(insn); + insn_get_displacement(insn); + + if (insn->addr_bytes != 2) + goto out; + + /* + * If operand is a register, the layout is the same as in + * 32-bit and 64-bit addressing. + */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + addr_offset1 = get_reg_offset(insn, regs, REG_TYPE_RM); + if (addr_offset1 < 0) + goto out; + + eff_addr = regs_get_register(regs, addr_offset1); + + seg_base_addr = insn_get_seg_base(regs, insn, addr_offset1); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, addr_offset1); + } else { + ret = get_reg_offset_16(insn, regs, &addr_offset1, + &addr_offset2); + if (ret < 0) + goto out; + + /* + * Don't fail on invalid offset values. They might be invalid + * because they cannot be used for this particular value of + * the ModRM. Instead, use them in the computation only if + * they contain a valid value. + */ + if (addr_offset1 != -EDOM) + addr1 = 0xffff & regs_get_register(regs, addr_offset1); + if (addr_offset2 != -EDOM) + addr2 = 0xffff & regs_get_register(regs, addr_offset2); + + eff_addr = addr1 + addr2; + + /* + * The first operand register could indicate to use of either SS + * or DS registers to obtain the segment selector. The second + * operand register can only indicate the use of DS. Thus, use + * the first register to obtain the segment selector. + */ + seg_base_addr = insn_get_seg_base(regs, insn, addr_offset1); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, addr_offset1); + + eff_addr += (insn->displacement.value & 0xffff); + } + + /* + * Before computing the linear address, make sure the effective address + * is within the limits of the segment. In virtual-8086 mode, segment + * limits are not enforced. In such a case, the segment limit is -1L to + * reflect this fact. + */ + if ((unsigned long)(eff_addr & 0xffff) > seg_limit) + goto out; + + linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base_addr; + + /* Limit linear address to 20 bits */ + if (v8086_mode(regs)) + linear_addr &= 0xfffff; + +out: + return (void __user *)linear_addr; +} + +/** + * get_addr_ref_32() - Obtain a 32-bit linear address + * @insn: Instruction struct with ModRM and SIB bytes and displacement + * @regs: Structure with register values as seen when entering kernel mode + * + * This function is to be used with 32-bit address encodings to obtain the + * linear memory address referred by the instruction's ModRM, SIB, + * displacement bytes and segment base address, as applicable. If in protected + * mode, segment limits are enforced. + * + * Return: linear address referenced by instruction and registers on success. + * -1L on error. + */ +static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) +{ + int eff_addr, base, indx, addr_offset, base_offset, indx_offset; + unsigned long linear_addr = -1L, seg_base_addr, seg_limit, tmp; + insn_byte_t sib; + + insn_get_modrm(insn); + insn_get_sib(insn); + sib = insn->sib.value; + + if (insn->addr_bytes != 4) + goto out; + + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + if (addr_offset < 0) + goto out; + + tmp = regs_get_register(regs, addr_offset); + /* The 4 most significant bytes must be zero. */ + if (tmp & ~0xffffffffL) + goto out; + + eff_addr = (int)(tmp & 0xffffffff); + + seg_base_addr = insn_get_seg_base(regs, insn, addr_offset); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, addr_offset); + } else { + if (insn->sib.nbytes) { + /* + * Negative values in the base and index offset means + * an error when decoding the SIB byte. Except -EDOM, + * which means that the registers should not be used + * in the address computation. + */ + base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); + if (base_offset == -EDOM) { + base = 0; + } else if (base_offset < 0) { + goto out; + } else { + tmp = regs_get_register(regs, base_offset); + /* The 4 most significant bytes must be zero. */ + if (tmp & ~0xffffffffL) + goto out; + + base = (int)(tmp & 0xffffffff); + } + + indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); + if (indx_offset == -EDOM) { + indx = 0; + } else if (indx_offset < 0) { + goto out; + } else { + tmp = regs_get_register(regs, indx_offset); + /* The 4 most significant bytes must be zero. */ + if (tmp & ~0xffffffffL) + goto out; + + indx = (int)(tmp & 0xffffffff); + } + + eff_addr = base + indx * (1 << X86_SIB_SCALE(sib)); + + seg_base_addr = insn_get_seg_base(regs, insn, + base_offset); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, base_offset); + } else { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + + /* + * -EDOM means that we must ignore the address_offset. + * In such a case, in 64-bit mode the effective address + * relative to the RIP of the following instruction. + */ + if (addr_offset == -EDOM) { + if (user_64bit_mode(regs)) + eff_addr = (long)regs->ip + insn->length; + else + eff_addr = 0; + } else if (addr_offset < 0) { + goto out; + } else { + tmp = regs_get_register(regs, addr_offset); + /* The 4 most significant bytes must be zero. */ + if (tmp & ~0xffffffffL) + goto out; + + eff_addr = (int)(tmp & 0xffffffff); + } + + seg_base_addr = insn_get_seg_base(regs, insn, + addr_offset); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, addr_offset); + } + eff_addr += insn->displacement.value; + } + + /* + * In protected mode, before computing the linear address, make sure + * the effective address is within the limits of the segment. + * 32-bit addresses can be used in long and virtual-8086 modes if an + * address override prefix is used. In such cases, segment limits are + * not enforced. When in virtual-8086 mode, the segment limit is -1L + * to reflect this situation. + * + * After computed, the effective address is treated as an unsigned + * quantity. + */ + if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) + goto out; + + /* + * Even though 32-bit address encodings are allowed in virtual-8086 + * mode, the address range is still limited to [0x-0xffff]. + */ + if (v8086_mode(regs) && (eff_addr & ~0xffff)) + goto out; + + /* + * Data type long could be 64 bits in size. Ensure that our 32-bit + * effective address is not sign-extended when computing the linear + * address. + */ + linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base_addr; + + /* Limit linear address to 20 bits */ + if (v8086_mode(regs)) + linear_addr &= 0xfffff; + +out: + return (void __user *)linear_addr; +} + +/** + * get_addr_ref_64() - Obtain a 64-bit linear address + * @insn: Instruction struct with ModRM and SIB bytes and displacement + * @regs: Structure with register values as seen when entering kernel mode + * + * This function is to be used with 64-bit address encodings to obtain the + * linear memory address referred by the instruction's ModRM, SIB, + * displacement bytes and segment base address, as applicable. + * + * Return: linear address referenced by instruction and registers on success. + * -1L on error. + */ +#ifndef CONFIG_X86_64 +static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) +{ + return (void __user *)-1L; +} +#else +static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) +{ + unsigned long linear_addr = -1L, seg_base_addr; + int addr_offset, base_offset, indx_offset; + long eff_addr, base, indx; + insn_byte_t sib; + + insn_get_modrm(insn); + insn_get_sib(insn); + sib = insn->sib.value; + + if (insn->addr_bytes != 8) + goto out; + + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + if (addr_offset < 0) + goto out; + + eff_addr = regs_get_register(regs, addr_offset); + + seg_base_addr = insn_get_seg_base(regs, insn, addr_offset); + if (seg_base_addr == -1L) + goto out; + } else { + if (insn->sib.nbytes) { + /* + * Negative values in the base and index offset means + * an error when decoding the SIB byte. Except -EDOM, + * which means that the registers should not be used + * in the address computation. + */ + base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); + if (base_offset == -EDOM) + base = 0; + else if (base_offset < 0) + goto out; + else + base = regs_get_register(regs, base_offset); + + indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); + + if (indx_offset == -EDOM) + indx = 0; + else if (indx_offset < 0) + goto out; + else + indx = regs_get_register(regs, indx_offset); + + eff_addr = base + indx * (1 << X86_SIB_SCALE(sib)); + + seg_base_addr = insn_get_seg_base(regs, insn, + base_offset); + if (seg_base_addr == -1L) + goto out; + } else { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + /* + * -EDOM means that we must ignore the address_offset. + * In such a case, in 64-bit mode the effective address + * relative to the RIP of the following instruction. + */ + if (addr_offset == -EDOM) { + if (user_64bit_mode(regs)) + eff_addr = (long)regs->ip + insn->length; + else + eff_addr = 0; + } else if (addr_offset < 0) { + goto out; + } else { + eff_addr = regs_get_register(regs, addr_offset); + } + + seg_base_addr = insn_get_seg_base(regs, insn, + addr_offset); + if (seg_base_addr == -1L) + goto out; + } + + eff_addr += insn->displacement.value; + } + + linear_addr = (unsigned long)eff_addr + seg_base_addr; + +out: + return (void __user *)linear_addr; +} +#endif /* CONFIG_X86_64 */ + +/** + * insn_get_addr_ref() - Obtain the linear address referred by instruction + * @insn: Instruction structure containing ModRM byte and displacement + * @regs: Structure with register values as seen when entering kernel mode + * + * Obtain the linear address referred by the instruction's ModRM, SIB and + * displacement bytes, and segment base, as applicable. In protected mode, + * segment limits are enforced. + * + * Return: linear address referenced by instruction and registers on success. + * -1L on error. + */ +void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) +{ + switch (insn->addr_bytes) { + case 2: + return get_addr_ref_16(insn, regs); + case 4: + return get_addr_ref_32(insn, regs); + case 8: + return get_addr_ref_64(insn, regs); + default: + return (void __user *)-1L; + } +} diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S new file mode 100644 index 000000000000..c909961e678a --- /dev/null +++ b/arch/x86/lib/retpoline.S @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include +#include +#include +#include +#include + +.macro THUNK reg + .section .text.__x86.indirect_thunk + +ENTRY(__x86_indirect_thunk_\reg) + CFI_STARTPROC + JMP_NOSPEC %\reg + CFI_ENDPROC +ENDPROC(__x86_indirect_thunk_\reg) +.endm + +/* + * Despite being an assembler file we can't just use .irp here + * because __KSYM_DEPS__ only uses the C preprocessor and would + * only see one instance of "__x86_indirect_thunk_\reg" rather + * than one per register with the correct names. So we do it + * the simple and nasty way... + */ +#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) +#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) +#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) + +GENERATE_THUNK(_ASM_AX) +GENERATE_THUNK(_ASM_BX) +GENERATE_THUNK(_ASM_CX) +GENERATE_THUNK(_ASM_DX) +GENERATE_THUNK(_ASM_SI) +GENERATE_THUNK(_ASM_DI) +GENERATE_THUNK(_ASM_BP) +#ifdef CONFIG_64BIT +GENERATE_THUNK(r8) +GENERATE_THUNK(r9) +GENERATE_THUNK(r10) +GENERATE_THUNK(r11) +GENERATE_THUNK(r12) +GENERATE_THUNK(r13) +GENERATE_THUNK(r14) +GENERATE_THUNK(r15) +#endif diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1b377f734e64..7add8ba06887 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -331,12 +331,12 @@ do { \ unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) { - stac(); + __uaccess_begin_nospec(); if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel(to, from, n); - clac(); + __uaccess_end(); return n; } EXPORT_SYMBOL(__copy_user_ll); @@ -344,7 +344,7 @@ EXPORT_SYMBOL(__copy_user_ll); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { - stac(); + __uaccess_begin_nospec(); #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) n = __copy_user_intel_nocache(to, from, n); @@ -353,7 +353,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr #else __copy_user(to, from, n); #endif - clac(); + __uaccess_end(); return n; } EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 12e377184ee4..e0b85930dd77 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) -ff: +ff: UD0 EndTable Table: 3-byte opcode 1 (0x0f 0x38) @@ -717,7 +717,7 @@ AVXcode: 2 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 80: INVEPT Gy,Mdq (66) -81: INVPID Gy,Mdq (66) +81: INVVPID Gy,Mdq (66) 82: INVPCID Gy,Mdq (66) 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) 88: vexpandps/d Vpd,Wpd (66),(ev) @@ -896,7 +896,7 @@ EndTable GrpTable: Grp3_1 0: TEST Eb,Ib -1: +1: TEST Eb,Ib 2: NOT Eb 3: NEG Eb 4: MUL AL,Eb @@ -970,6 +970,15 @@ GrpTable: Grp9 EndTable GrpTable: Grp10 +# all are UD1 +0: UD1 +1: UD1 +2: UD1 +3: UD1 +4: UD1 +5: UD1 +6: UD1 +7: UD1 EndTable # Grp11A and Grp11B are expressed as Grp11 in Intel SDM diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 7ba7f3d7f477..27e9e90a8d35 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o physaddr.o setup_nx.o tlb.o + pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o # Make sure __phys_addr has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) @@ -29,8 +29,6 @@ obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o -obj-$(CONFIG_KMEMCHECK) += kmemcheck/ - KASAN_SANITIZE_kasan_init_$(BITS).o := n obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o @@ -43,9 +41,10 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o -obj-$(CONFIG_X86_INTEL_MPX) += mpx.o -obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o -obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +obj-$(CONFIG_X86_INTEL_MPX) += mpx.o +obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o +obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c new file mode 100644 index 000000000000..476d810639a8 --- /dev/null +++ b/arch/x86/mm/cpu_entry_area.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include +#include + +static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); + +#ifdef CONFIG_X86_64 +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); +#endif + +struct cpu_entry_area *get_cpu_entry_area(int cpu) +{ + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); + + return (struct cpu_entry_area *) va; +} +EXPORT_SYMBOL(get_cpu_entry_area); + +void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) +{ + unsigned long va = (unsigned long) cea_vaddr; + + set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); +} + +static void __init +cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) +{ + for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) + cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); +} + +static void percpu_setup_debug_store(int cpu) +{ +#ifdef CONFIG_CPU_SUP_INTEL + int npages; + void *cea; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; + + cea = &get_cpu_entry_area(cpu)->cpu_debug_store; + npages = sizeof(struct debug_store) / PAGE_SIZE; + BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0); + cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, + PAGE_KERNEL); + + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers; + /* + * Force the population of PMDs for not yet allocated per cpu + * memory like debug store buffers. + */ + npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; + for (; npages; npages--, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); +#endif +} + +/* Setup the fixmap mappings only once per-processor */ +static void __init setup_cpu_entry_area(int cpu) +{ +#ifdef CONFIG_X86_64 + extern char _entry_trampoline[]; + + /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ + pgprot_t gdt_prot = PAGE_KERNEL_RO; + pgprot_t tss_prot = PAGE_KERNEL_RO; +#else + /* + * On native 32-bit systems, the GDT cannot be read-only because + * our double fault handler uses a task gate, and entering through + * a task gate needs to change an available TSS to busy. If the + * GDT is read-only, that will triple fault. The TSS cannot be + * read-only because the CPU writes to it on task switches. + * + * On Xen PV, the GDT must be read-only because the hypervisor + * requires it. + */ + pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? + PAGE_KERNEL_RO : PAGE_KERNEL; + pgprot_t tss_prot = PAGE_KERNEL; +#endif + + cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), + gdt_prot); + + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, + per_cpu_ptr(&entry_stack_storage, cpu), 1, + PAGE_KERNEL); + + /* + * The Intel SDM says (Volume 3, 7.2.1): + * + * Avoid placing a page boundary in the part of the TSS that the + * processor reads during a task switch (the first 104 bytes). The + * processor may not correctly perform address translations if a + * boundary occurs in this area. During a task switch, the processor + * reads and writes into the first 104 bytes of each TSS (using + * contiguous physical addresses beginning with the physical address + * of the first byte of the TSS). So, after TSS access begins, if + * part of the 104 bytes is not physically contiguous, the processor + * will access incorrect information without generating a page-fault + * exception. + * + * There are also a lot of errata involving the TSS spanning a page + * boundary. Assert that we're not doing that. + */ + BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ + offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); + BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, + &per_cpu(cpu_tss_rw, cpu), + sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); + +#ifdef CONFIG_X86_32 + per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); +#endif + +#ifdef CONFIG_X86_64 + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); + BUILD_BUG_ON(sizeof(exception_stacks) != + sizeof(((struct cpu_entry_area *)0)->exception_stacks)); + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, + &per_cpu(exception_stacks, cpu), + sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); + + cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, + __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); +#endif + percpu_setup_debug_store(cpu); +} + +static __init void setup_cpu_entry_area_ptes(void) +{ +#ifdef CONFIG_X86_32 + unsigned long start, end; + + BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); + + start = CPU_ENTRY_AREA_BASE; + end = start + CPU_ENTRY_AREA_MAP_SIZE; + + /* Careful here: start + PMD_SIZE might wrap around */ + for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE) + populate_extra_pte(start); +#endif +} + +void __init setup_cpu_entry_areas(void) +{ + unsigned int cpu; + + setup_cpu_entry_area_ptes(); + + for_each_possible_cpu(cpu) + setup_cpu_entry_area(cpu); + + /* + * This is the last essential update to swapper_pgdir which needs + * to be synchronized to initial_page_table on 32bit. + */ + sync_initial_page_table(); +} diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index bfcffdf6c577..421f2664ffa0 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -5,7 +5,7 @@ static int ptdump_show(struct seq_file *m, void *v) { - ptdump_walk_pgd_level(m, NULL); + ptdump_walk_pgd_level_debugfs(m, NULL, false); return 0; } @@ -22,21 +22,89 @@ static const struct file_operations ptdump_fops = { .release = single_release, }; -static struct dentry *pe; +static int ptdump_show_curknl(struct seq_file *m, void *v) +{ + if (current->mm->pgd) { + down_read(¤t->mm->mmap_sem); + ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false); + up_read(¤t->mm->mmap_sem); + } + return 0; +} + +static int ptdump_open_curknl(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_curknl, NULL); +} + +static const struct file_operations ptdump_curknl_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_curknl, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +static struct dentry *pe_curusr; + +static int ptdump_show_curusr(struct seq_file *m, void *v) +{ + if (current->mm->pgd) { + down_read(¤t->mm->mmap_sem); + ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true); + up_read(¤t->mm->mmap_sem); + } + return 0; +} + +static int ptdump_open_curusr(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_curusr, NULL); +} + +static const struct file_operations ptdump_curusr_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_curusr, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static struct dentry *dir, *pe_knl, *pe_curknl; static int __init pt_dump_debug_init(void) { - pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL, - &ptdump_fops); - if (!pe) + dir = debugfs_create_dir("page_tables", NULL); + if (!dir) return -ENOMEM; + pe_knl = debugfs_create_file("kernel", 0400, dir, NULL, + &ptdump_fops); + if (!pe_knl) + goto err; + + pe_curknl = debugfs_create_file("current_kernel", 0400, + dir, NULL, &ptdump_curknl_fops); + if (!pe_curknl) + goto err; + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + pe_curusr = debugfs_create_file("current_user", 0400, + dir, NULL, &ptdump_curusr_fops); + if (!pe_curusr) + goto err; +#endif return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; } static void __exit pt_dump_debug_exit(void) { - debugfs_remove_recursive(pe); + debugfs_remove_recursive(dir); } module_init(pt_dump_debug_init); diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 5e3ac6fe6c9e..2a4849e92831 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -44,68 +44,97 @@ struct addr_marker { unsigned long max_lines; }; -/* indices for address_markers; keep sync'd w/ address_markers below */ +/* Address space markers hints */ + +#ifdef CONFIG_X86_64 + enum address_markers_idx { USER_SPACE_NR = 0, -#ifdef CONFIG_X86_64 KERNEL_SPACE_NR, LOW_KERNEL_NR, +#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL) + LDT_NR, +#endif VMALLOC_START_NR, VMEMMAP_START_NR, #ifdef CONFIG_KASAN KASAN_SHADOW_START_NR, KASAN_SHADOW_END_NR, #endif -# ifdef CONFIG_X86_ESPFIX64 + CPU_ENTRY_AREA_NR, +#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) + LDT_NR, +#endif +#ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, -# endif +#endif +#ifdef CONFIG_EFI + EFI_END_NR, +#endif HIGH_KERNEL_NR, MODULES_VADDR_NR, MODULES_END_NR, -#else + FIXADDR_START_NR, + END_OF_SPACE_NR, +}; + +static struct addr_marker address_markers[] = { + [USER_SPACE_NR] = { 0, "User Space" }, + [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" }, + [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" }, + [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, + [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, +#ifdef CONFIG_KASAN + [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, + [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, +#endif +#ifdef CONFIG_MODIFY_LDT_SYSCALL + [LDT_NR] = { LDT_BASE_ADDR, "LDT remap" }, +#endif + [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, +#ifdef CONFIG_X86_ESPFIX64 + [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, +#endif +#ifdef CONFIG_EFI + [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" }, +#endif + [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" }, + [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" }, + [MODULES_END_NR] = { MODULES_END, "End Modules" }, + [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" }, + [END_OF_SPACE_NR] = { -1, NULL } +}; + +#else /* CONFIG_X86_64 */ + +enum address_markers_idx { + USER_SPACE_NR = 0, KERNEL_SPACE_NR, VMALLOC_START_NR, VMALLOC_END_NR, -# ifdef CONFIG_HIGHMEM +#ifdef CONFIG_HIGHMEM PKMAP_BASE_NR, -# endif - FIXADDR_START_NR, #endif + CPU_ENTRY_AREA_NR, + FIXADDR_START_NR, + END_OF_SPACE_NR, }; -/* Address space markers hints */ static struct addr_marker address_markers[] = { - { 0, "User Space" }, -#ifdef CONFIG_X86_64 - { 0x8000000000000000UL, "Kernel Space" }, - { 0/* PAGE_OFFSET */, "Low Kernel Mapping" }, - { 0/* VMALLOC_START */, "vmalloc() Area" }, - { 0/* VMEMMAP_START */, "Vmemmap" }, -#ifdef CONFIG_KASAN - { KASAN_SHADOW_START, "KASAN shadow" }, - { KASAN_SHADOW_END, "KASAN shadow end" }, + [USER_SPACE_NR] = { 0, "User Space" }, + [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" }, + [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, + [VMALLOC_END_NR] = { 0UL, "vmalloc() End" }, +#ifdef CONFIG_HIGHMEM + [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, #endif -# ifdef CONFIG_X86_ESPFIX64 - { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, -# endif -# ifdef CONFIG_EFI - { EFI_VA_END, "EFI Runtime Services" }, -# endif - { __START_KERNEL_map, "High Kernel Mapping" }, - { MODULES_VADDR, "Modules" }, - { MODULES_END, "End Modules" }, -#else - { PAGE_OFFSET, "Kernel Mapping" }, - { 0/* VMALLOC_START */, "vmalloc() Area" }, - { 0/*VMALLOC_END*/, "vmalloc() End" }, -# ifdef CONFIG_HIGHMEM - { 0/*PKMAP_BASE*/, "Persistent kmap() Area" }, -# endif - { 0/*FIXADDR_START*/, "Fixmap Area" }, -#endif - { -1, NULL } /* End of list */ + [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, + [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, + [END_OF_SPACE_NR] = { -1, NULL } }; +#endif /* !CONFIG_X86_64 */ + /* Multipliers for offsets within the PTEs */ #define PTE_LEVEL_MULT (PAGE_SIZE) #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) @@ -140,7 +169,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) static const char * const level_name[] = { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; - if (!pgprot_val(prot)) { + if (!(pr & _PAGE_PRESENT)) { /* Not present */ pt_dump_cont_printf(m, dmsg, " "); } else { @@ -447,7 +476,7 @@ static inline bool is_hypervisor_range(int idx) } static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, - bool checkwx) + bool checkwx, bool dmesg) { #ifdef CONFIG_X86_64 pgd_t *start = (pgd_t *) &init_top_pgt; @@ -460,7 +489,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, if (pgd) { start = pgd; - st.to_dmesg = true; + st.to_dmesg = dmesg; } st.check_wx = checkwx; @@ -498,13 +527,37 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) { - ptdump_walk_pgd_level_core(m, pgd, false); + ptdump_walk_pgd_level_core(m, pgd, false, true); +} + +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + if (user && static_cpu_has(X86_FEATURE_PTI)) + pgd = kernel_to_user_pgdp(pgd); +#endif + ptdump_walk_pgd_level_core(m, pgd, false, false); +} +EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); + +static void ptdump_walk_user_pgd_level_checkwx(void) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + pgd_t *pgd = (pgd_t *) &init_top_pgt; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + pr_info("x86/mm: Checking user space page tables\n"); + pgd = kernel_to_user_pgdp(pgd); + ptdump_walk_pgd_level_core(NULL, pgd, true, false); +#endif } -EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level); void ptdump_walk_pgd_level_checkwx(void) { - ptdump_walk_pgd_level_core(NULL, NULL, true); + ptdump_walk_pgd_level_core(NULL, NULL, true, false); + ptdump_walk_user_pgd_level_checkwx(); } static int __init pt_dump_init(void) @@ -525,8 +578,8 @@ static int __init pt_dump_init(void) address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; # endif address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; + address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; #endif - return 0; } __initcall(pt_dump_init); diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index c3521e2be396..9fe656c42aa5 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -67,17 +68,22 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup, * wrapped around) will be set. Additionally, seeing the refcount * reach 0 will set ZF (Zero Flag: result was zero). In each of * these cases we want a report, since it's a boundary condition. - * + * The SF case is not reported since it indicates post-boundary + * manipulations below zero or above INT_MAX. And if none of the + * flags are set, something has gone very wrong, so report it. */ if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) { bool zero = regs->flags & X86_EFLAGS_ZF; refcount_error_report(regs, zero ? "hit zero" : "overflow"); + } else if ((regs->flags & X86_EFLAGS_SF) == 0) { + /* Report if none of OF, ZF, nor SF are set. */ + refcount_error_report(regs, "unexpected saturation"); } return true; } -EXPORT_SYMBOL_GPL(ex_handler_refcount); +EXPORT_SYMBOL(ex_handler_refcount); /* * Handler for when we fail to restore a task's FPU state. We should never get @@ -207,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) * Old CPUs leave the high bits of CS on the stack * undefined. I'm not sure which CPUs do this, but at least * the 486 DX works this way. + * Xen pv domains are not using the default __KERNEL_CS. */ - if (regs->cs != __KERNEL_CS) + if (!xen_pv_domain() && regs->cs != __KERNEL_CS) goto fail; /* diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b0ff378650a9..0133d26f16be 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -20,7 +20,6 @@ #include /* boot_cpu_has, ... */ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ -#include /* kmemcheck_*(), ... */ #include /* VSYSCALL_ADDR */ #include /* emulate_vsyscall */ #include /* struct vm86 */ @@ -29,26 +28,6 @@ #define CREATE_TRACE_POINTS #include -/* - * Page fault error code bits: - * - * bit 0 == 0: no page found 1: protection fault - * bit 1 == 0: read access 1: write access - * bit 2 == 0: kernel-mode access 1: user-mode access - * bit 3 == 1: use of reserved bit detected - * bit 4 == 1: fault was an instruction fetch - * bit 5 == 1: protection keys block access - */ -enum x86_pf_error_code { - - PF_PROT = 1 << 0, - PF_WRITE = 1 << 1, - PF_USER = 1 << 2, - PF_RSVD = 1 << 3, - PF_INSTR = 1 << 4, - PF_PK = 1 << 5, -}; - /* * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: @@ -150,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * If it was a exec (instruction fetch) fault on NX page, then * do not ignore the fault: */ - if (error_code & PF_INSTR) + if (error_code & X86_PF_INSTR) return 0; instr = (void *)convert_ip_to_linear(current, regs); @@ -180,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * siginfo so userspace can discover which protection key was set * on the PTE. * - * If we get here, we know that the hardware signaled a PF_PK + * If we get here, we know that the hardware signaled a X86_PF_PK * fault and that there was a VMA once we got in the fault * handler. It does *not* guarantee that the VMA we find here * was the one that we faulted on. @@ -193,19 +172,20 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ -static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) +static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info, + u32 *pkey) { /* This is effectively an #ifdef */ if (!boot_cpu_has(X86_FEATURE_OSPKE)) return; /* Fault not from Protection Keys: nothing to do */ - if (si_code != SEGV_PKUERR) + if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV)) return; /* * force_sig_info_fault() is called from a number of * contexts, some of which have a VMA and some of which - * do not. The PF_PK handing happens after we have a + * do not. The X86_PF_PK handing happens after we have a * valid VMA, so we should never reach this without a * valid VMA. */ @@ -239,7 +219,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, lsb = PAGE_SHIFT; info.si_addr_lsb = lsb; - fill_sig_info_pkey(si_code, &info, pkey); + fill_sig_info_pkey(si_signo, si_code, &info, pkey); force_sig_info(si_signo, &info, tsk); } @@ -350,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address) if (!pmd_k) return -1; - if (pmd_huge(*pmd_k)) + if (pmd_large(*pmd_k)) return 0; pte_k = pte_offset_kernel(pmd_k, address); @@ -499,7 +479,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) BUG(); - if (pud_huge(*pud)) + if (pud_large(*pud)) return 0; pmd = pmd_offset(pud, address); @@ -510,7 +490,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) BUG(); - if (pmd_huge(*pmd)) + if (pmd_large(*pmd)) return 0; pte_ref = pte_offset_kernel(pmd_ref, address); @@ -698,7 +678,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, if (!oops_may_print()) return; - if (error_code & PF_INSTR) { + if (error_code & X86_PF_INSTR) { unsigned int level; pgd_t *pgd; pte_t *pte; @@ -780,7 +760,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, */ if (current->thread.sig_on_uaccess_err && signal) { tsk->thread.trap_nr = X86_TRAP_PF; - tsk->thread.error_code = error_code | PF_USER; + tsk->thread.error_code = error_code | X86_PF_USER; tsk->thread.cr2 = address; /* XXX: hwpoison faults will set the wrong code. */ @@ -898,7 +878,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; /* User mode accesses just cause a SIGSEGV */ - if (error_code & PF_USER) { + if (error_code & X86_PF_USER) { /* * It's possible to have interrupts off here: */ @@ -919,7 +899,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, * Instruction fetch faults in the vsyscall page might need * emulation. */ - if (unlikely((error_code & PF_INSTR) && + if (unlikely((error_code & X86_PF_INSTR) && ((address & ~0xfff) == VSYSCALL_ADDR))) { if (emulate_vsyscall(regs, address)) return; @@ -932,7 +912,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, * are always protection faults. */ if (address >= TASK_SIZE_MAX) - error_code |= PF_PROT; + error_code |= X86_PF_PROT; if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); @@ -993,11 +973,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, if (!boot_cpu_has(X86_FEATURE_OSPKE)) return false; - if (error_code & PF_PK) + if (error_code & X86_PF_PK) return true; /* this checks permission keys on the VMA: */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), - (error_code & PF_INSTR), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), + (error_code & X86_PF_INSTR), foreign)) return true; return false; } @@ -1025,7 +1005,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, int code = BUS_ADRERR; /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) { + if (!(error_code & X86_PF_USER)) { no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); return; } @@ -1053,14 +1033,14 @@ static noinline void mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, u32 *pkey, unsigned int fault) { - if (fatal_signal_pending(current) && !(error_code & PF_USER)) { + if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { no_context(regs, error_code, address, 0, 0); return; } if (fault & VM_FAULT_OOM) { /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) { + if (!(error_code & X86_PF_USER)) { no_context(regs, error_code, address, SIGSEGV, SEGV_MAPERR); return; @@ -1085,16 +1065,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, static int spurious_fault_check(unsigned long error_code, pte_t *pte) { - if ((error_code & PF_WRITE) && !pte_write(*pte)) + if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) return 0; - if ((error_code & PF_INSTR) && !pte_exec(*pte)) + if ((error_code & X86_PF_INSTR) && !pte_exec(*pte)) return 0; /* * Note: We do not do lazy flushing on protection key - * changes, so no spurious fault will ever set PF_PK. + * changes, so no spurious fault will ever set X86_PF_PK. */ - if ((error_code & PF_PK)) + if ((error_code & X86_PF_PK)) return 1; return 1; @@ -1140,8 +1120,8 @@ spurious_fault(unsigned long error_code, unsigned long address) * change, so user accesses are not expected to cause spurious * faults. */ - if (error_code != (PF_WRITE | PF_PROT) - && error_code != (PF_INSTR | PF_PROT)) + if (error_code != (X86_PF_WRITE | X86_PF_PROT) && + error_code != (X86_PF_INSTR | X86_PF_PROT)) return 0; pgd = init_mm.pgd + pgd_index(address); @@ -1201,19 +1181,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) * always an unconditional error and can never result in * a follow-up action to resolve the fault, like a COW. */ - if (error_code & PF_PK) + if (error_code & X86_PF_PK) return 1; /* * Make sure to check the VMA so that we do not perform - * faults just to hit a PF_PK as soon as we fill in a + * faults just to hit a X86_PF_PK as soon as we fill in a * page. */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), - (error_code & PF_INSTR), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), + (error_code & X86_PF_INSTR), foreign)) return 1; - if (error_code & PF_WRITE) { + if (error_code & X86_PF_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) return 1; @@ -1221,7 +1201,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) } /* read, present: */ - if (unlikely(error_code & PF_PROT)) + if (unlikely(error_code & X86_PF_PROT)) return 1; /* read, not present: */ @@ -1244,7 +1224,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) if (!static_cpu_has(X86_FEATURE_SMAP)) return false; - if (error_code & PF_USER) + if (error_code & X86_PF_USER) return false; if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) @@ -1272,12 +1252,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, tsk = current; mm = tsk->mm; - /* - * Detect and handle instructions that would cause a page fault for - * both a tracked kernel page and a userspace page. - */ - if (kmemcheck_active(regs)) - kmemcheck_hide(regs); prefetchw(&mm->mmap_sem); if (unlikely(kmmio_fault(regs, address))) @@ -1297,12 +1271,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * protection error (error_code & 9) == 0. */ if (unlikely(fault_in_kernel_space(address))) { - if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { + if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { if (vmalloc_fault(address) >= 0) return; - - if (kmemcheck_fault(regs, address, error_code)) - return; } /* Can handle a stale RO->RW TLB: */ @@ -1325,7 +1296,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (unlikely(kprobes_fault(regs))) return; - if (unlikely(error_code & PF_RSVD)) + if (unlikely(error_code & X86_PF_RSVD)) pgtable_bad(regs, error_code, address); if (unlikely(smap_violation(error_code, regs))) { @@ -1351,7 +1322,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, */ if (user_mode(regs)) { local_irq_enable(); - error_code |= PF_USER; + error_code |= X86_PF_USER; flags |= FAULT_FLAG_USER; } else { if (regs->flags & X86_EFLAGS_IF) @@ -1360,9 +1331,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (error_code & PF_WRITE) + if (error_code & X86_PF_WRITE) flags |= FAULT_FLAG_WRITE; - if (error_code & PF_INSTR) + if (error_code & X86_PF_INSTR) flags |= FAULT_FLAG_INSTRUCTION; /* @@ -1382,7 +1353,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * space check, thus avoiding the deadlock: */ if (unlikely(!down_read_trylock(&mm->mmap_sem))) { - if ((error_code & PF_USER) == 0 && + if (!(error_code & X86_PF_USER) && !search_exception_tables(regs->ip)) { bad_area_nosemaphore(regs, error_code, address, NULL); return; @@ -1409,7 +1380,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, bad_area(regs, error_code, address); return; } - if (error_code & PF_USER) { + if (error_code & X86_PF_USER) { /* * Accessing the stack below %sp is always a bug. * The large cushion allows instructions like enter diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index af5c1ed21d43..82f5252c723a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -20,6 +20,7 @@ #include #include #include +#include /* * We need to define the tracepoints somewhere, and tlb.c @@ -92,8 +93,7 @@ __ref void *alloc_low_pages(unsigned int num) unsigned int order; order = get_order((unsigned long)num << PAGE_SHIFT); - return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | - __GFP_ZERO, order); + return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); } if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { @@ -161,15 +161,20 @@ struct map_range { static int page_size_mask; +static void enable_global_pages(void) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + __supported_pte_mask |= _PAGE_GLOBAL; +} + static void __init probe_page_size_mask(void) { /* - * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will - * use small pages. + * For pagealloc debugging, identity mapping will use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ - if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK)) + if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) page_size_mask |= 1 << PG_LEVEL_2M; else direct_gbpages = 0; @@ -179,11 +184,11 @@ static void __init probe_page_size_mask(void) cr4_set_bits_and_update_boot(X86_CR4_PSE); /* Enable PGE if available */ + __supported_pte_mask &= ~_PAGE_GLOBAL; if (boot_cpu_has(X86_FEATURE_PGE)) { cr4_set_bits_and_update_boot(X86_CR4_PGE); - __supported_pte_mask |= _PAGE_GLOBAL; - } else - __supported_pte_mask &= ~_PAGE_GLOBAL; + enable_global_pages(); + } /* Enable 1 GB linear kernel mappings if available: */ if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { @@ -196,34 +201,44 @@ static void __init probe_page_size_mask(void) static void setup_pcid(void) { -#ifdef CONFIG_X86_64 - if (boot_cpu_has(X86_FEATURE_PCID)) { - if (boot_cpu_has(X86_FEATURE_PGE)) { - /* - * This can't be cr4_set_bits_and_update_boot() -- - * the trampoline code can't handle CR4.PCIDE and - * it wouldn't do any good anyway. Despite the name, - * cr4_set_bits_and_update_boot() doesn't actually - * cause the bits in question to remain set all the - * way through the secondary boot asm. - * - * Instead, we brute-force it and set CR4.PCIDE - * manually in start_secondary(). - */ - cr4_set_bits(X86_CR4_PCIDE); - } else { - /* - * flush_tlb_all(), as currently implemented, won't - * work if PCID is on but PGE is not. Since that - * combination doesn't exist on real hardware, there's - * no reason to try to fully support it, but it's - * polite to avoid corrupting data if we're on - * an improperly configured VM. - */ - setup_clear_cpu_cap(X86_FEATURE_PCID); - } + if (!IS_ENABLED(CONFIG_X86_64)) + return; + + if (!boot_cpu_has(X86_FEATURE_PCID)) + return; + + if (boot_cpu_has(X86_FEATURE_PGE)) { + /* + * This can't be cr4_set_bits_and_update_boot() -- the + * trampoline code can't handle CR4.PCIDE and it wouldn't + * do any good anyway. Despite the name, + * cr4_set_bits_and_update_boot() doesn't actually cause + * the bits in question to remain set all the way through + * the secondary boot asm. + * + * Instead, we brute-force it and set CR4.PCIDE manually in + * start_secondary(). + */ + cr4_set_bits(X86_CR4_PCIDE); + + /* + * INVPCID's single-context modes (2/3) only work if we set + * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable + * on systems that have X86_CR4_PCIDE clear, or that have + * no INVPCID support at all. + */ + if (boot_cpu_has(X86_FEATURE_INVPCID)) + setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE); + } else { + /* + * flush_tlb_all(), as currently implemented, won't work if + * PCID is on but PGE is not. Since that combination + * doesn't exist on real hardware, there's no reason to try + * to fully support it, but it's polite to avoid corrupting + * data if we're on an improperly configured VM. + */ + setup_clear_cpu_cap(X86_FEATURE_PCID); } -#endif } #ifdef CONFIG_X86_32 @@ -624,6 +639,7 @@ void __init init_mem_mapping(void) { unsigned long end; + pti_check_boottime_disable(); probe_page_size_mask(); setup_pcid(); @@ -671,7 +687,7 @@ void __init init_mem_mapping(void) load_cr3(swapper_pg_dir); __flush_tlb_all(); - hypervisor_init_mem_mapping(); + x86_init.hyper.init_mem_mapping(); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } @@ -847,12 +863,12 @@ void __init zone_sizes_init(void) free_area_init_nodes(max_zone_pfns); } -DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { +__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { .loaded_mm = &init_mm, .next_asid = 1, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ }; -EXPORT_SYMBOL_GPL(cpu_tlbstate); +EXPORT_PER_CPU_SYMBOL(cpu_tlbstate); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8a64a6f2848d..3141e67ec24c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include "mm_internal.h" @@ -452,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base) } #endif /* CONFIG_HIGHMEM */ +void __init sync_initial_page_table(void) +{ + clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + + /* + * sync back low identity map too. It is used for example + * in the 32-bit EFI stub. + */ + clone_pgd_range(initial_page_table, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); +} + void __init native_pagetable_init(void) { unsigned long pfn, va; @@ -766,6 +782,7 @@ void __init mem_init(void) mem_init_print_info(NULL); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif @@ -777,6 +794,10 @@ void __init mem_init(void) FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, + CPU_ENTRY_AREA_BASE, + CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, + CPU_ENTRY_AREA_MAP_SIZE >> 10, + #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 048fbe8fc274..fe85d1204db8 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -184,7 +184,7 @@ static __ref void *spp_getpage(void) void *ptr; if (after_bootmem) - ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); + ptr = (void *) get_zeroed_page(GFP_ATOMIC); else ptr = alloc_bootmem_pages(PAGE_SIZE); @@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + __flush_tlb_one_kernel(vaddr); } void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) @@ -1426,16 +1426,16 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) void register_page_bootmem_memmap(unsigned long section_nr, - struct page *start_page, unsigned long size) + struct page *start_page, unsigned long nr_pages) { unsigned long addr = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + size); + unsigned long end = (unsigned long)(start_page + nr_pages); unsigned long next; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; - unsigned int nr_pages; + unsigned int nr_pmd_pages; struct page *page; for (; addr < end; addr = next) { @@ -1482,9 +1482,9 @@ void register_page_bootmem_memmap(unsigned long section_nr, if (pmd_none(*pmd)) continue; - nr_pages = 1 << (get_order(PMD_SIZE)); + nr_pmd_pages = 1 << get_order(PMD_SIZE); page = pmd_page(*pmd); - while (nr_pages--) + while (nr_pmd_pages--) get_page_bootmem(section_nr, page++, SECTION_INFO); } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 34f0e1847dd6..7bebdd0273d3 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -349,11 +349,11 @@ void iounmap(volatile void __iomem *addr) return; } + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by @@ -749,5 +749,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx, set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); else pte_clear(&init_mm, addr, pte); - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 8f5be3eb40dd..af6f2f9c6a26 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -4,19 +4,155 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include +#include extern struct range pfn_mapped[E820_MAX_ENTRIES]; -static int __init map_range(struct range *range) +static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); + +static __init void *early_alloc(size_t size, int nid, bool panic) +{ + if (panic) + return memblock_virt_alloc_try_nid(size, size, + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); + else + return memblock_virt_alloc_try_nid_nopanic(size, size, + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); +} + +static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, + unsigned long end, int nid) +{ + pte_t *pte; + + if (pmd_none(*pmd)) { + void *p; + + if (boot_cpu_has(X86_FEATURE_PSE) && + ((end - addr) == PMD_SIZE) && + IS_ALIGNED(addr, PMD_SIZE)) { + p = early_alloc(PMD_SIZE, nid, false); + if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) + return; + else if (p) + memblock_free(__pa(p), PMD_SIZE); + } + + p = early_alloc(PAGE_SIZE, nid, true); + pmd_populate_kernel(&init_mm, pmd, p); + } + + pte = pte_offset_kernel(pmd, addr); + do { + pte_t entry; + void *p; + + if (!pte_none(*pte)) + continue; + + p = early_alloc(PAGE_SIZE, nid, true); + entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); + set_pte_at(&init_mm, addr, pte, entry); + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, + unsigned long end, int nid) +{ + pmd_t *pmd; + unsigned long next; + + if (pud_none(*pud)) { + void *p; + + if (boot_cpu_has(X86_FEATURE_GBPAGES) && + ((end - addr) == PUD_SIZE) && + IS_ALIGNED(addr, PUD_SIZE)) { + p = early_alloc(PUD_SIZE, nid, false); + if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) + return; + else if (p) + memblock_free(__pa(p), PUD_SIZE); + } + + p = early_alloc(PAGE_SIZE, nid, true); + pud_populate(&init_mm, pud, p); + } + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (!pmd_large(*pmd)) + kasan_populate_pmd(pmd, addr, next, nid); + } while (pmd++, addr = next, addr != end); +} + +static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, + unsigned long end, int nid) +{ + pud_t *pud; + unsigned long next; + + if (p4d_none(*p4d)) { + void *p = early_alloc(PAGE_SIZE, nid, true); + + p4d_populate(&init_mm, p4d, p); + } + + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_large(*pud)) + kasan_populate_pud(pud, addr, next, nid); + } while (pud++, addr = next, addr != end); +} + +static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, + unsigned long end, int nid) +{ + void *p; + p4d_t *p4d; + unsigned long next; + + if (pgd_none(*pgd)) { + p = early_alloc(PAGE_SIZE, nid, true); + pgd_populate(&init_mm, pgd, p); + } + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + kasan_populate_p4d(p4d, addr, next, nid); + } while (p4d++, addr = next, addr != end); +} + +static void __init kasan_populate_shadow(unsigned long addr, unsigned long end, + int nid) +{ + pgd_t *pgd; + unsigned long next; + + addr = addr & PAGE_MASK; + end = round_up(end, PAGE_SIZE); + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + kasan_populate_pgd(pgd, addr, next, nid); + } while (pgd++, addr = next, addr != end); +} + +static void __init map_range(struct range *range) { unsigned long start; unsigned long end; @@ -24,15 +160,17 @@ static int __init map_range(struct range *range) start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); - return vmemmap_populate(start, end, NUMA_NO_NODE); + kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); } static void __init clear_pgds(unsigned long start, unsigned long end) { pgd_t *pgd; + /* See comment in kasan_init() */ + unsigned long pgd_end = end & PGDIR_MASK; - for (; start < end; start += PGDIR_SIZE) { + for (; start < pgd_end; start += PGDIR_SIZE) { pgd = pgd_offset_k(start); /* * With folded p4d, pgd_clear() is nop, use p4d_clear() @@ -43,29 +181,61 @@ static void __init clear_pgds(unsigned long start, else pgd_clear(pgd); } + + pgd = pgd_offset_k(start); + for (; start < end; start += P4D_SIZE) + p4d_clear(p4d_offset(pgd, start)); +} + +static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) +{ + unsigned long p4d; + + if (!IS_ENABLED(CONFIG_X86_5LEVEL)) + return (p4d_t *)pgd; + + p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; + p4d += __START_KERNEL_map - phys_base; + return (p4d_t *)p4d + p4d_index(addr); +} + +static void __init kasan_early_p4d_populate(pgd_t *pgd, + unsigned long addr, + unsigned long end) +{ + pgd_t pgd_entry; + p4d_t *p4d, p4d_entry; + unsigned long next; + + if (pgd_none(*pgd)) { + pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d)); + set_pgd(pgd, pgd_entry); + } + + p4d = early_p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + + if (!p4d_none(*p4d)) + continue; + + p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud)); + set_p4d(p4d, p4d_entry); + } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); } static void __init kasan_map_early_shadow(pgd_t *pgd) { - int i; - unsigned long start = KASAN_SHADOW_START; + /* See comment in kasan_init() */ + unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK; unsigned long end = KASAN_SHADOW_END; + unsigned long next; - for (i = pgd_index(start); start < end; i++) { - switch (CONFIG_PGTABLE_LEVELS) { - case 4: - pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) | - _KERNPG_TABLE); - break; - case 5: - pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) | - _KERNPG_TABLE); - break; - default: - BUILD_BUG(); - } - start += PGDIR_SIZE; - } + pgd += pgd_index(addr); + do { + next = pgd_addr_end(addr, end); + kasan_early_p4d_populate(pgd, addr, next); + } while (pgd++, addr = next, addr != end); } #ifdef CONFIG_KASAN_INLINE @@ -102,7 +272,7 @@ void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PUD; i++) kasan_zero_pud[i] = __pud(pud_val); - for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++) + for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++) kasan_zero_p4d[i] = __p4d(p4d_val); kasan_map_early_shadow(early_top_pgt); @@ -112,37 +282,78 @@ void __init kasan_early_init(void) void __init kasan_init(void) { int i; + void *shadow_cpu_entry_begin, *shadow_cpu_entry_end; #ifdef CONFIG_KASAN_INLINE register_die_notifier(&kasan_die_notifier); #endif memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); + + /* + * We use the same shadow offset for 4- and 5-level paging to + * facilitate boot-time switching between paging modes. + * As result in 5-level paging mode KASAN_SHADOW_START and + * KASAN_SHADOW_END are not aligned to PGD boundary. + * + * KASAN_SHADOW_START doesn't share PGD with anything else. + * We claim whole PGD entry to make things easier. + * + * KASAN_SHADOW_END lands in the last PGD entry and it collides with + * bunch of things like kernel code, modules, EFI mapping, etc. + * We need to take extra steps to not overwrite them. + */ + if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + void *ptr; + + ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); + memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table)); + set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)], + __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE)); + } + load_cr3(early_top_pgt); __flush_tlb_all(); - clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); + clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); - kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, + kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), kasan_mem_to_shadow((void *)PAGE_OFFSET)); for (i = 0; i < E820_MAX_ENTRIES; i++) { if (pfn_mapped[i].end == 0) break; - if (map_range(&pfn_mapped[i])) - panic("kasan: unable to allocate shadow!"); + map_range(&pfn_mapped[i]); } + + shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; + shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); + shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, + PAGE_SIZE); + + shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + + CPU_ENTRY_AREA_MAP_SIZE); + shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); + shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, + PAGE_SIZE); + kasan_populate_zero_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), - kasan_mem_to_shadow((void *)__START_KERNEL_map)); + shadow_cpu_entry_begin); + + kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, + (unsigned long)shadow_cpu_entry_end, 0); + + kasan_populate_zero_shadow(shadow_cpu_entry_end, + kasan_mem_to_shadow((void *)__START_KERNEL_map)); - vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), - (unsigned long)kasan_mem_to_shadow(_end), - NUMA_NO_NODE); + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), + (unsigned long)kasan_mem_to_shadow(_end), + early_pfn_to_nid(__pa(_stext))); kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), - (void *)KASAN_SHADOW_END); + (void *)KASAN_SHADOW_END); load_cr3(init_top_pgt); __flush_tlb_all(); diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 879ef930e2c2..aedebd2ebf1e 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -34,25 +34,14 @@ #define TB_SHIFT 40 /* - * Virtual address start and end range for randomization. The end changes base - * on configuration to have the highest amount of space for randomization. - * It increases the possible random position for each randomized region. + * Virtual address start and end range for randomization. * - * You need to add an if/def entry if you introduce a new memory region - * compatible with KASLR. Your entry must be in logical order with memory - * layout. For example, ESPFIX is before EFI because its virtual address is - * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to - * ensure that this order is correct and won't be changed. + * The end address could depend on more configuration options to make the + * highest amount of space for randomization available, but that's too hard + * to keep straight and caused issues already. */ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; - -#if defined(CONFIG_X86_ESPFIX64) -static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; -#elif defined(CONFIG_EFI) -static const unsigned long vaddr_end = EFI_VA_END; -#else -static const unsigned long vaddr_end = __START_KERNEL_map; -#endif +static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; /* Default values */ unsigned long page_offset_base = __PAGE_OFFSET_BASE; @@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void) unsigned long remain_entropy; /* - * All these BUILD_BUG_ON checks ensures the memory layout is - * consistent with the vaddr_start/vaddr_end variables. + * These BUILD_BUG_ON checks ensure the memory layout is consistent + * with the vaddr_start/vaddr_end variables. These checks are very + * limited.... */ BUILD_BUG_ON(vaddr_start >= vaddr_end); - BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && - vaddr_end >= EFI_VA_END); - BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || - IS_ENABLED(CONFIG_EFI)) && - vaddr_end >= __START_KERNEL_map); + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); if (!kaslr_memory_enabled()) diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile deleted file mode 100644 index 520b3bce4095..000000000000 --- a/arch/x86/mm/kmemcheck/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c deleted file mode 100644 index 872ec4159a68..000000000000 --- a/arch/x86/mm/kmemcheck/error.c +++ /dev/null @@ -1,228 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include - -#include "error.h" -#include "shadow.h" - -enum kmemcheck_error_type { - KMEMCHECK_ERROR_INVALID_ACCESS, - KMEMCHECK_ERROR_BUG, -}; - -#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT) - -struct kmemcheck_error { - enum kmemcheck_error_type type; - - union { - /* KMEMCHECK_ERROR_INVALID_ACCESS */ - struct { - /* Kind of access that caused the error */ - enum kmemcheck_shadow state; - /* Address and size of the erroneous read */ - unsigned long address; - unsigned int size; - }; - }; - - struct pt_regs regs; - struct stack_trace trace; - unsigned long trace_entries[32]; - - /* We compress it to a char. */ - unsigned char shadow_copy[SHADOW_COPY_SIZE]; - unsigned char memory_copy[SHADOW_COPY_SIZE]; -}; - -/* - * Create a ring queue of errors to output. We can't call printk() directly - * from the kmemcheck traps, since this may call the console drivers and - * result in a recursive fault. - */ -static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE]; -static unsigned int error_count; -static unsigned int error_rd; -static unsigned int error_wr; -static unsigned int error_missed_count; - -static struct kmemcheck_error *error_next_wr(void) -{ - struct kmemcheck_error *e; - - if (error_count == ARRAY_SIZE(error_fifo)) { - ++error_missed_count; - return NULL; - } - - e = &error_fifo[error_wr]; - if (++error_wr == ARRAY_SIZE(error_fifo)) - error_wr = 0; - ++error_count; - return e; -} - -static struct kmemcheck_error *error_next_rd(void) -{ - struct kmemcheck_error *e; - - if (error_count == 0) - return NULL; - - e = &error_fifo[error_rd]; - if (++error_rd == ARRAY_SIZE(error_fifo)) - error_rd = 0; - --error_count; - return e; -} - -void kmemcheck_error_recall(void) -{ - static const char *desc[] = { - [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated", - [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized", - [KMEMCHECK_SHADOW_INITIALIZED] = "initialized", - [KMEMCHECK_SHADOW_FREED] = "freed", - }; - - static const char short_desc[] = { - [KMEMCHECK_SHADOW_UNALLOCATED] = 'a', - [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u', - [KMEMCHECK_SHADOW_INITIALIZED] = 'i', - [KMEMCHECK_SHADOW_FREED] = 'f', - }; - - struct kmemcheck_error *e; - unsigned int i; - - e = error_next_rd(); - if (!e) - return; - - switch (e->type) { - case KMEMCHECK_ERROR_INVALID_ACCESS: - printk(KERN_WARNING "WARNING: kmemcheck: Caught %d-bit read from %s memory (%p)\n", - 8 * e->size, e->state < ARRAY_SIZE(desc) ? - desc[e->state] : "(invalid shadow state)", - (void *) e->address); - - printk(KERN_WARNING); - for (i = 0; i < SHADOW_COPY_SIZE; ++i) - printk(KERN_CONT "%02x", e->memory_copy[i]); - printk(KERN_CONT "\n"); - - printk(KERN_WARNING); - for (i = 0; i < SHADOW_COPY_SIZE; ++i) { - if (e->shadow_copy[i] < ARRAY_SIZE(short_desc)) - printk(KERN_CONT " %c", short_desc[e->shadow_copy[i]]); - else - printk(KERN_CONT " ?"); - } - printk(KERN_CONT "\n"); - printk(KERN_WARNING "%*c\n", 2 + 2 - * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); - break; - case KMEMCHECK_ERROR_BUG: - printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n"); - break; - } - - __show_regs(&e->regs, 1); - print_stack_trace(&e->trace, 0); -} - -static void do_wakeup(unsigned long data) -{ - while (error_count > 0) - kmemcheck_error_recall(); - - if (error_missed_count > 0) { - printk(KERN_WARNING "kmemcheck: Lost %d error reports because " - "the queue was too small\n", error_missed_count); - error_missed_count = 0; - } -} - -static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0); - -/* - * Save the context of an error report. - */ -void kmemcheck_error_save(enum kmemcheck_shadow state, - unsigned long address, unsigned int size, struct pt_regs *regs) -{ - static unsigned long prev_ip; - - struct kmemcheck_error *e; - void *shadow_copy; - void *memory_copy; - - /* Don't report several adjacent errors from the same EIP. */ - if (regs->ip == prev_ip) - return; - prev_ip = regs->ip; - - e = error_next_wr(); - if (!e) - return; - - e->type = KMEMCHECK_ERROR_INVALID_ACCESS; - - e->state = state; - e->address = address; - e->size = size; - - /* Save regs */ - memcpy(&e->regs, regs, sizeof(*regs)); - - /* Save stack trace */ - e->trace.nr_entries = 0; - e->trace.entries = e->trace_entries; - e->trace.max_entries = ARRAY_SIZE(e->trace_entries); - e->trace.skip = 0; - save_stack_trace_regs(regs, &e->trace); - - /* Round address down to nearest 16 bytes */ - shadow_copy = kmemcheck_shadow_lookup(address - & ~(SHADOW_COPY_SIZE - 1)); - BUG_ON(!shadow_copy); - - memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE); - - kmemcheck_show_addr(address); - memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1)); - memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE); - kmemcheck_hide_addr(address); - - tasklet_hi_schedule_first(&kmemcheck_tasklet); -} - -/* - * Save the context of a kmemcheck bug. - */ -void kmemcheck_error_save_bug(struct pt_regs *regs) -{ - struct kmemcheck_error *e; - - e = error_next_wr(); - if (!e) - return; - - e->type = KMEMCHECK_ERROR_BUG; - - memcpy(&e->regs, regs, sizeof(*regs)); - - e->trace.nr_entries = 0; - e->trace.entries = e->trace_entries; - e->trace.max_entries = ARRAY_SIZE(e->trace_entries); - e->trace.skip = 1; - save_stack_trace(&e->trace); - - tasklet_hi_schedule_first(&kmemcheck_tasklet); -} diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h deleted file mode 100644 index 39f80d7a874d..000000000000 --- a/arch/x86/mm/kmemcheck/error.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H -#define ARCH__X86__MM__KMEMCHECK__ERROR_H - -#include - -#include "shadow.h" - -void kmemcheck_error_save(enum kmemcheck_shadow state, - unsigned long address, unsigned int size, struct pt_regs *regs); - -void kmemcheck_error_save_bug(struct pt_regs *regs); - -void kmemcheck_error_recall(void); - -#endif diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c deleted file mode 100644 index 4515bae36bbe..000000000000 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ /dev/null @@ -1,658 +0,0 @@ -/** - * kmemcheck - a heavyweight memory checker for the linux kernel - * Copyright (C) 2007, 2008 Vegard Nossum - * (With a lot of help from Ingo Molnar and Pekka Enberg.) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2) as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "error.h" -#include "opcode.h" -#include "pte.h" -#include "selftest.h" -#include "shadow.h" - - -#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT -# define KMEMCHECK_ENABLED 0 -#endif - -#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT -# define KMEMCHECK_ENABLED 1 -#endif - -#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT -# define KMEMCHECK_ENABLED 2 -#endif - -int kmemcheck_enabled = KMEMCHECK_ENABLED; - -int __init kmemcheck_init(void) -{ -#ifdef CONFIG_SMP - /* - * Limit SMP to use a single CPU. We rely on the fact that this code - * runs before SMP is set up. - */ - if (setup_max_cpus > 1) { - printk(KERN_INFO - "kmemcheck: Limiting number of CPUs to 1.\n"); - setup_max_cpus = 1; - } -#endif - - if (!kmemcheck_selftest()) { - printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n"); - kmemcheck_enabled = 0; - return -EINVAL; - } - - printk(KERN_INFO "kmemcheck: Initialized\n"); - return 0; -} - -early_initcall(kmemcheck_init); - -/* - * We need to parse the kmemcheck= option before any memory is allocated. - */ -static int __init param_kmemcheck(char *str) -{ - int val; - int ret; - - if (!str) - return -EINVAL; - - ret = kstrtoint(str, 0, &val); - if (ret) - return ret; - kmemcheck_enabled = val; - return 0; -} - -early_param("kmemcheck", param_kmemcheck); - -int kmemcheck_show_addr(unsigned long address) -{ - pte_t *pte; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return 0; - - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); - __flush_tlb_one(address); - return 1; -} - -int kmemcheck_hide_addr(unsigned long address) -{ - pte_t *pte; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return 0; - - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); - __flush_tlb_one(address); - return 1; -} - -struct kmemcheck_context { - bool busy; - int balance; - - /* - * There can be at most two memory operands to an instruction, but - * each address can cross a page boundary -- so we may need up to - * four addresses that must be hidden/revealed for each fault. - */ - unsigned long addr[4]; - unsigned long n_addrs; - unsigned long flags; - - /* Data size of the instruction that caused a fault. */ - unsigned int size; -}; - -static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); - -bool kmemcheck_active(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - return data->balance > 0; -} - -/* Save an address that needs to be shown/hidden */ -static void kmemcheck_save_addr(unsigned long addr) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); - data->addr[data->n_addrs++] = addr; -} - -static unsigned int kmemcheck_show_all(void) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - unsigned int i; - unsigned int n; - - n = 0; - for (i = 0; i < data->n_addrs; ++i) - n += kmemcheck_show_addr(data->addr[i]); - - return n; -} - -static unsigned int kmemcheck_hide_all(void) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - unsigned int i; - unsigned int n; - - n = 0; - for (i = 0; i < data->n_addrs; ++i) - n += kmemcheck_hide_addr(data->addr[i]); - - return n; -} - -/* - * Called from the #PF handler. - */ -void kmemcheck_show(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - BUG_ON(!irqs_disabled()); - - if (unlikely(data->balance != 0)) { - kmemcheck_show_all(); - kmemcheck_error_save_bug(regs); - data->balance = 0; - return; - } - - /* - * None of the addresses actually belonged to kmemcheck. Note that - * this is not an error. - */ - if (kmemcheck_show_all() == 0) - return; - - ++data->balance; - - /* - * The IF needs to be cleared as well, so that the faulting - * instruction can run "uninterrupted". Otherwise, we might take - * an interrupt and start executing that before we've had a chance - * to hide the page again. - * - * NOTE: In the rare case of multiple faults, we must not override - * the original flags: - */ - if (!(regs->flags & X86_EFLAGS_TF)) - data->flags = regs->flags; - - regs->flags |= X86_EFLAGS_TF; - regs->flags &= ~X86_EFLAGS_IF; -} - -/* - * Called from the #DB handler. - */ -void kmemcheck_hide(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - int n; - - BUG_ON(!irqs_disabled()); - - if (unlikely(data->balance != 1)) { - kmemcheck_show_all(); - kmemcheck_error_save_bug(regs); - data->n_addrs = 0; - data->balance = 0; - - if (!(data->flags & X86_EFLAGS_TF)) - regs->flags &= ~X86_EFLAGS_TF; - if (data->flags & X86_EFLAGS_IF) - regs->flags |= X86_EFLAGS_IF; - return; - } - - if (kmemcheck_enabled) - n = kmemcheck_hide_all(); - else - n = kmemcheck_show_all(); - - if (n == 0) - return; - - --data->balance; - - data->n_addrs = 0; - - if (!(data->flags & X86_EFLAGS_TF)) - regs->flags &= ~X86_EFLAGS_TF; - if (data->flags & X86_EFLAGS_IF) - regs->flags |= X86_EFLAGS_IF; -} - -void kmemcheck_show_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) { - unsigned long address; - pte_t *pte; - unsigned int level; - - address = (unsigned long) page_address(&p[i]); - pte = lookup_address(address, &level); - BUG_ON(!pte); - BUG_ON(level != PG_LEVEL_4K); - - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN)); - __flush_tlb_one(address); - } -} - -bool kmemcheck_page_is_tracked(struct page *p) -{ - /* This will also check the "hidden" flag of the PTE. */ - return kmemcheck_pte_lookup((unsigned long) page_address(p)); -} - -void kmemcheck_hide_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) { - unsigned long address; - pte_t *pte; - unsigned int level; - - address = (unsigned long) page_address(&p[i]); - pte = lookup_address(address, &level); - BUG_ON(!pte); - BUG_ON(level != PG_LEVEL_4K); - - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); - set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN)); - __flush_tlb_one(address); - } -} - -/* Access may NOT cross page boundary */ -static void kmemcheck_read_strict(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - void *shadow; - enum kmemcheck_shadow status; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return; - - kmemcheck_save_addr(addr); - status = kmemcheck_shadow_test(shadow, size); - if (status == KMEMCHECK_SHADOW_INITIALIZED) - return; - - if (kmemcheck_enabled) - kmemcheck_error_save(status, addr, size, regs); - - if (kmemcheck_enabled == 2) - kmemcheck_enabled = 0; - - /* Don't warn about it again. */ - kmemcheck_shadow_set(shadow, size); -} - -bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) -{ - enum kmemcheck_shadow status; - void *shadow; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return true; - - status = kmemcheck_shadow_test_all(shadow, size); - - return status == KMEMCHECK_SHADOW_INITIALIZED; -} - -/* Access may cross page boundary */ -static void kmemcheck_read(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - unsigned long page = addr & PAGE_MASK; - unsigned long next_addr = addr + size - 1; - unsigned long next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - kmemcheck_read_strict(regs, addr, size); - return; - } - - /* - * What we do is basically to split the access across the - * two pages and handle each part separately. Yes, this means - * that we may now see reads that are 3 + 5 bytes, for - * example (and if both are uninitialized, there will be two - * reports), but it makes the code a lot simpler. - */ - kmemcheck_read_strict(regs, addr, next_page - addr); - kmemcheck_read_strict(regs, next_page, next_addr - next_page); -} - -static void kmemcheck_write_strict(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - void *shadow; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return; - - kmemcheck_save_addr(addr); - kmemcheck_shadow_set(shadow, size); -} - -static void kmemcheck_write(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - unsigned long page = addr & PAGE_MASK; - unsigned long next_addr = addr + size - 1; - unsigned long next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - kmemcheck_write_strict(regs, addr, size); - return; - } - - /* See comment in kmemcheck_read(). */ - kmemcheck_write_strict(regs, addr, next_page - addr); - kmemcheck_write_strict(regs, next_page, next_addr - next_page); -} - -/* - * Copying is hard. We have two addresses, each of which may be split across - * a page (and each page will have different shadow addresses). - */ -static void kmemcheck_copy(struct pt_regs *regs, - unsigned long src_addr, unsigned long dst_addr, unsigned int size) -{ - uint8_t shadow[8]; - enum kmemcheck_shadow status; - - unsigned long page; - unsigned long next_addr; - unsigned long next_page; - - uint8_t *x; - unsigned int i; - unsigned int n; - - BUG_ON(size > sizeof(shadow)); - - page = src_addr & PAGE_MASK; - next_addr = src_addr + size - 1; - next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - /* Same page */ - x = kmemcheck_shadow_lookup(src_addr); - if (x) { - kmemcheck_save_addr(src_addr); - for (i = 0; i < size; ++i) - shadow[i] = x[i]; - } else { - for (i = 0; i < size; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } else { - n = next_page - src_addr; - BUG_ON(n > sizeof(shadow)); - - /* First page */ - x = kmemcheck_shadow_lookup(src_addr); - if (x) { - kmemcheck_save_addr(src_addr); - for (i = 0; i < n; ++i) - shadow[i] = x[i]; - } else { - /* Not tracked */ - for (i = 0; i < n; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - - /* Second page */ - x = kmemcheck_shadow_lookup(next_page); - if (x) { - kmemcheck_save_addr(next_page); - for (i = n; i < size; ++i) - shadow[i] = x[i - n]; - } else { - /* Not tracked */ - for (i = n; i < size; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - - page = dst_addr & PAGE_MASK; - next_addr = dst_addr + size - 1; - next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - /* Same page */ - x = kmemcheck_shadow_lookup(dst_addr); - if (x) { - kmemcheck_save_addr(dst_addr); - for (i = 0; i < size; ++i) { - x[i] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - } else { - n = next_page - dst_addr; - BUG_ON(n > sizeof(shadow)); - - /* First page */ - x = kmemcheck_shadow_lookup(dst_addr); - if (x) { - kmemcheck_save_addr(dst_addr); - for (i = 0; i < n; ++i) { - x[i] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - - /* Second page */ - x = kmemcheck_shadow_lookup(next_page); - if (x) { - kmemcheck_save_addr(next_page); - for (i = n; i < size; ++i) { - x[i - n] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - } - - status = kmemcheck_shadow_test(shadow, size); - if (status == KMEMCHECK_SHADOW_INITIALIZED) - return; - - if (kmemcheck_enabled) - kmemcheck_error_save(status, src_addr, size, regs); - - if (kmemcheck_enabled == 2) - kmemcheck_enabled = 0; -} - -enum kmemcheck_method { - KMEMCHECK_READ, - KMEMCHECK_WRITE, -}; - -static void kmemcheck_access(struct pt_regs *regs, - unsigned long fallback_address, enum kmemcheck_method fallback_method) -{ - const uint8_t *insn; - const uint8_t *insn_primary; - unsigned int size; - - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - /* Recursive fault -- ouch. */ - if (data->busy) { - kmemcheck_show_addr(fallback_address); - kmemcheck_error_save_bug(regs); - return; - } - - data->busy = true; - - insn = (const uint8_t *) regs->ip; - insn_primary = kmemcheck_opcode_get_primary(insn); - - kmemcheck_opcode_decode(insn, &size); - - switch (insn_primary[0]) { -#ifdef CONFIG_KMEMCHECK_BITOPS_OK - /* AND, OR, XOR */ - /* - * Unfortunately, these instructions have to be excluded from - * our regular checking since they access only some (and not - * all) bits. This clears out "bogus" bitfield-access warnings. - */ - case 0x80: - case 0x81: - case 0x82: - case 0x83: - switch ((insn_primary[1] >> 3) & 7) { - /* OR */ - case 1: - /* AND */ - case 4: - /* XOR */ - case 6: - kmemcheck_write(regs, fallback_address, size); - goto out; - - /* ADD */ - case 0: - /* ADC */ - case 2: - /* SBB */ - case 3: - /* SUB */ - case 5: - /* CMP */ - case 7: - break; - } - break; -#endif - - /* MOVS, MOVSB, MOVSW, MOVSD */ - case 0xa4: - case 0xa5: - /* - * These instructions are special because they take two - * addresses, but we only get one page fault. - */ - kmemcheck_copy(regs, regs->si, regs->di, size); - goto out; - - /* CMPS, CMPSB, CMPSW, CMPSD */ - case 0xa6: - case 0xa7: - kmemcheck_read(regs, regs->si, size); - kmemcheck_read(regs, regs->di, size); - goto out; - } - - /* - * If the opcode isn't special in any way, we use the data from the - * page fault handler to determine the address and type of memory - * access. - */ - switch (fallback_method) { - case KMEMCHECK_READ: - kmemcheck_read(regs, fallback_address, size); - goto out; - case KMEMCHECK_WRITE: - kmemcheck_write(regs, fallback_address, size); - goto out; - } - -out: - data->busy = false; -} - -bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) -{ - pte_t *pte; - - /* - * XXX: Is it safe to assume that memory accesses from virtual 86 - * mode or non-kernel code segments will _never_ access kernel - * memory (e.g. tracked pages)? For now, we need this to avoid - * invoking kmemcheck for PnP BIOS calls. - */ - if (regs->flags & X86_VM_MASK) - return false; - if (regs->cs != __KERNEL_CS) - return false; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return false; - - WARN_ON_ONCE(in_nmi()); - - if (error_code & 2) - kmemcheck_access(regs, address, KMEMCHECK_WRITE); - else - kmemcheck_access(regs, address, KMEMCHECK_READ); - - kmemcheck_show(regs); - return true; -} - -bool kmemcheck_trap(struct pt_regs *regs) -{ - if (!kmemcheck_active(regs)) - return false; - - /* We're done. */ - kmemcheck_hide(regs); - return true; -} diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c deleted file mode 100644 index df8109ddf7fe..000000000000 --- a/arch/x86/mm/kmemcheck/opcode.c +++ /dev/null @@ -1,107 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include "opcode.h" - -static bool opcode_is_prefix(uint8_t b) -{ - return - /* Group 1 */ - b == 0xf0 || b == 0xf2 || b == 0xf3 - /* Group 2 */ - || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 - || b == 0x64 || b == 0x65 - /* Group 3 */ - || b == 0x66 - /* Group 4 */ - || b == 0x67; -} - -#ifdef CONFIG_X86_64 -static bool opcode_is_rex_prefix(uint8_t b) -{ - return (b & 0xf0) == 0x40; -} -#else -static bool opcode_is_rex_prefix(uint8_t b) -{ - return false; -} -#endif - -#define REX_W (1 << 3) - -/* - * This is a VERY crude opcode decoder. We only need to find the size of the - * load/store that caused our #PF and this should work for all the opcodes - * that we care about. Moreover, the ones who invented this instruction set - * should be shot. - */ -void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size) -{ - /* Default operand size */ - int operand_size_override = 4; - - /* prefixes */ - for (; opcode_is_prefix(*op); ++op) { - if (*op == 0x66) - operand_size_override = 2; - } - - /* REX prefix */ - if (opcode_is_rex_prefix(*op)) { - uint8_t rex = *op; - - ++op; - if (rex & REX_W) { - switch (*op) { - case 0x63: - *size = 4; - return; - case 0x0f: - ++op; - - switch (*op) { - case 0xb6: - case 0xbe: - *size = 1; - return; - case 0xb7: - case 0xbf: - *size = 2; - return; - } - - break; - } - - *size = 8; - return; - } - } - - /* escape opcode */ - if (*op == 0x0f) { - ++op; - - /* - * This is move with zero-extend and sign-extend, respectively; - * we don't have to think about 0xb6/0xbe, because this is - * already handled in the conditional below. - */ - if (*op == 0xb7 || *op == 0xbf) - operand_size_override = 2; - } - - *size = (*op & 1) ? operand_size_override : 1; -} - -const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) -{ - /* skip prefixes */ - while (opcode_is_prefix(*op)) - ++op; - if (opcode_is_rex_prefix(*op)) - ++op; - return op; -} diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h deleted file mode 100644 index 51a1ce94c24a..000000000000 --- a/arch/x86/mm/kmemcheck/opcode.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H -#define ARCH__X86__MM__KMEMCHECK__OPCODE_H - -#include - -void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size); -const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op); - -#endif diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c deleted file mode 100644 index 8a03be90272a..000000000000 --- a/arch/x86/mm/kmemcheck/pte.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include - -#include "pte.h" - -pte_t *kmemcheck_pte_lookup(unsigned long address) -{ - pte_t *pte; - unsigned int level; - - pte = lookup_address(address, &level); - if (!pte) - return NULL; - if (level != PG_LEVEL_4K) - return NULL; - if (!pte_hidden(*pte)) - return NULL; - - return pte; -} - diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h deleted file mode 100644 index b595612382c2..000000000000 --- a/arch/x86/mm/kmemcheck/pte.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H -#define ARCH__X86__MM__KMEMCHECK__PTE_H - -#include - -#include - -pte_t *kmemcheck_pte_lookup(unsigned long address); - -#endif diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c deleted file mode 100644 index 7ce0be1f99eb..000000000000 --- a/arch/x86/mm/kmemcheck/selftest.c +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include - -#include "opcode.h" -#include "selftest.h" - -struct selftest_opcode { - unsigned int expected_size; - const uint8_t *insn; - const char *desc; -}; - -static const struct selftest_opcode selftest_opcodes[] = { - /* REP MOVS */ - {1, "\xf3\xa4", "rep movsb , "}, - {4, "\xf3\xa5", "rep movsl , "}, - - /* MOVZX / MOVZXD */ - {1, "\x66\x0f\xb6\x51\xf8", "movzwq , "}, - {1, "\x0f\xb6\x51\xf8", "movzwq , "}, - - /* MOVSX / MOVSXD */ - {1, "\x66\x0f\xbe\x51\xf8", "movswq , "}, - {1, "\x0f\xbe\x51\xf8", "movswq , "}, - -#ifdef CONFIG_X86_64 - /* MOVZX / MOVZXD */ - {1, "\x49\x0f\xb6\x51\xf8", "movzbq , "}, - {2, "\x49\x0f\xb7\x51\xf8", "movzbq , "}, - - /* MOVSX / MOVSXD */ - {1, "\x49\x0f\xbe\x51\xf8", "movsbq , "}, - {2, "\x49\x0f\xbf\x51\xf8", "movsbq , "}, - {4, "\x49\x63\x51\xf8", "movslq , "}, -#endif -}; - -static bool selftest_opcode_one(const struct selftest_opcode *op) -{ - unsigned size; - - kmemcheck_opcode_decode(op->insn, &size); - - if (size == op->expected_size) - return true; - - printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n", - op->desc, op->expected_size, size); - return false; -} - -static bool selftest_opcodes_all(void) -{ - bool pass = true; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i) - pass = pass && selftest_opcode_one(&selftest_opcodes[i]); - - return pass; -} - -bool kmemcheck_selftest(void) -{ - bool pass = true; - - pass = pass && selftest_opcodes_all(); - - return pass; -} diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h deleted file mode 100644 index 8d759aae453d..000000000000 --- a/arch/x86/mm/kmemcheck/selftest.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H -#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H - -bool kmemcheck_selftest(void); - -#endif diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c deleted file mode 100644 index c2638a7d2c10..000000000000 --- a/arch/x86/mm/kmemcheck/shadow.c +++ /dev/null @@ -1,173 +0,0 @@ -#include -#include -#include - -#include -#include - -#include "pte.h" -#include "shadow.h" - -/* - * Return the shadow address for the given address. Returns NULL if the - * address is not tracked. - * - * We need to be extremely careful not to follow any invalid pointers, - * because this function can be called for *any* possible address. - */ -void *kmemcheck_shadow_lookup(unsigned long address) -{ - pte_t *pte; - struct page *page; - - if (!virt_addr_valid(address)) - return NULL; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return NULL; - - page = virt_to_page(address); - if (!page->shadow) - return NULL; - return page->shadow + (address & (PAGE_SIZE - 1)); -} - -static void mark_shadow(void *address, unsigned int n, - enum kmemcheck_shadow status) -{ - unsigned long addr = (unsigned long) address; - unsigned long last_addr = addr + n - 1; - unsigned long page = addr & PAGE_MASK; - unsigned long last_page = last_addr & PAGE_MASK; - unsigned int first_n; - void *shadow; - - /* If the memory range crosses a page boundary, stop there. */ - if (page == last_page) - first_n = n; - else - first_n = page + PAGE_SIZE - addr; - - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, first_n); - - addr += first_n; - n -= first_n; - - /* Do full-page memset()s. */ - while (n >= PAGE_SIZE) { - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, PAGE_SIZE); - - addr += PAGE_SIZE; - n -= PAGE_SIZE; - } - - /* Do the remaining page, if any. */ - if (n > 0) { - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, n); - } -} - -void kmemcheck_mark_unallocated(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); -} - -void kmemcheck_mark_uninitialized(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); -} - -/* - * Fill the shadow memory of the given address such that the memory at that - * address is marked as being initialized. - */ -void kmemcheck_mark_initialized(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); -} -EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); - -void kmemcheck_mark_freed(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); -} - -void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); -} - -void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); -} - -void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); -} - -enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) -{ -#ifdef CONFIG_KMEMCHECK_PARTIAL_OK - uint8_t *x; - unsigned int i; - - x = shadow; - - /* - * Make sure _some_ bytes are initialized. Gcc frequently generates - * code to access neighboring bytes. - */ - for (i = 0; i < size; ++i) { - if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) - return x[i]; - } - - return x[0]; -#else - return kmemcheck_shadow_test_all(shadow, size); -#endif -} - -enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, unsigned int size) -{ - uint8_t *x; - unsigned int i; - - x = shadow; - - /* All bytes must be initialized. */ - for (i = 0; i < size; ++i) { - if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) - return x[i]; - } - - return x[0]; -} - -void kmemcheck_shadow_set(void *shadow, unsigned int size) -{ - uint8_t *x; - unsigned int i; - - x = shadow; - for (i = 0; i < size; ++i) - x[i] = KMEMCHECK_SHADOW_INITIALIZED; -} diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h deleted file mode 100644 index 49768dc18664..000000000000 --- a/arch/x86/mm/kmemcheck/shadow.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H -#define ARCH__X86__MM__KMEMCHECK__SHADOW_H - -enum kmemcheck_shadow { - KMEMCHECK_SHADOW_UNALLOCATED, - KMEMCHECK_SHADOW_UNINITIALIZED, - KMEMCHECK_SHADOW_INITIALIZED, - KMEMCHECK_SHADOW_FREED, -}; - -void *kmemcheck_shadow_lookup(unsigned long address); - -enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size); -enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, - unsigned int size); -void kmemcheck_shadow_set(void *shadow, unsigned int size); - -#endif diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index c21c2ed04612..7c8686709636 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) return -1; } - __flush_tlb_one(f->addr); + __flush_tlb_one_kernel(f->addr); return 0; } @@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p) unsigned long flags; int ret = 0; unsigned long size = 0; + unsigned long addr = p->addr & PAGE_MASK; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); unsigned int l; pte_t *pte; spin_lock_irqsave(&kmmio_lock, flags); - if (get_kmmio_probe(p->addr)) { + if (get_kmmio_probe(addr)) { ret = -EEXIST; goto out; } - pte = lookup_address(p->addr, &l); + pte = lookup_address(addr, &l); if (!pte) { ret = -EINVAL; goto out; @@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p) kmmio_count++; list_add_rcu(&p->list, &kmmio_probes); while (size < size_lim) { - if (add_kmmio_fault_page(p->addr + size)) + if (add_kmmio_fault_page(addr + size)) pr_err("Unable to set page fault.\n"); size += page_level_size(l); } @@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p) { unsigned long flags; unsigned long size = 0; + unsigned long addr = p->addr & PAGE_MASK; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); struct kmmio_fault_page *release_list = NULL; struct kmmio_delayed_release *drelease; unsigned int l; pte_t *pte; - pte = lookup_address(p->addr, &l); + pte = lookup_address(addr, &l); if (!pte) return; spin_lock_irqsave(&kmmio_lock, flags); while (size < size_lim) { - release_kmmio_fault_page(p->addr + size, &release_list); + release_kmmio_fault_page(addr + size, &release_list); size += page_level_size(l); } list_del_rcu(&p->list); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 0286327e65fa..48c03c74c7f4 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -213,37 +213,62 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); } -static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, - unsigned long end) +struct sme_populate_pgd_data { + void *pgtable_area; + pgd_t *pgd; + + pmdval_t pmd_flags; + pteval_t pte_flags; + unsigned long paddr; + + unsigned long vaddr; + unsigned long vaddr_end; +}; + +static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) { unsigned long pgd_start, pgd_end, pgd_size; pgd_t *pgd_p; - pgd_start = start & PGDIR_MASK; - pgd_end = end & PGDIR_MASK; + pgd_start = ppd->vaddr & PGDIR_MASK; + pgd_end = ppd->vaddr_end & PGDIR_MASK; - pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1); - pgd_size *= sizeof(pgd_t); + pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t); - pgd_p = pgd_base + pgd_index(start); + pgd_p = ppd->pgd + pgd_index(ppd->vaddr); memset(pgd_p, 0, pgd_size); } -#define PGD_FLAGS _KERNPG_TABLE_NOENC -#define P4D_FLAGS _KERNPG_TABLE_NOENC -#define PUD_FLAGS _KERNPG_TABLE_NOENC -#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) +#define PGD_FLAGS _KERNPG_TABLE_NOENC +#define P4D_FLAGS _KERNPG_TABLE_NOENC +#define PUD_FLAGS _KERNPG_TABLE_NOENC +#define PMD_FLAGS _KERNPG_TABLE_NOENC + +#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) + +#define PMD_FLAGS_DEC PMD_FLAGS_LARGE +#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ + (_PAGE_PAT | _PAGE_PWT)) + +#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) + +#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) + +#define PTE_FLAGS_DEC PTE_FLAGS +#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ + (_PAGE_PAT | _PAGE_PWT)) + +#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) -static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, - unsigned long vaddr, pmdval_t pmd_val) +static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) { pgd_t *pgd_p; p4d_t *p4d_p; pud_t *pud_p; pmd_t *pmd_p; - pgd_p = pgd_base + pgd_index(vaddr); + pgd_p = ppd->pgd + pgd_index(ppd->vaddr); if (native_pgd_val(*pgd_p)) { if (IS_ENABLED(CONFIG_X86_5LEVEL)) p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); @@ -253,15 +278,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, pgd_t pgd; if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p = pgtable_area; + p4d_p = ppd->pgtable_area; memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); - pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; + ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); } else { - pud_p = pgtable_area; + pud_p = ppd->pgtable_area; memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; + ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); } @@ -269,58 +294,160 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, } if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p += p4d_index(vaddr); + p4d_p += p4d_index(ppd->vaddr); if (native_p4d_val(*p4d_p)) { pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); } else { p4d_t p4d; - pud_p = pgtable_area; + pud_p = ppd->pgtable_area; memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; + ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); native_set_p4d(p4d_p, p4d); } } - pud_p += pud_index(vaddr); + pud_p += pud_index(ppd->vaddr); if (native_pud_val(*pud_p)) { if (native_pud_val(*pud_p) & _PAGE_PSE) - goto out; + return NULL; pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); } else { pud_t pud; - pmd_p = pgtable_area; + pmd_p = ppd->pgtable_area; memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); - pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; + ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); native_set_pud(pud_p, pud); } - pmd_p += pmd_index(vaddr); + return pmd_p; +} + +static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) +{ + pmd_t *pmd_p; + + pmd_p = sme_prepare_pgd(ppd); + if (!pmd_p) + return; + + pmd_p += pmd_index(ppd->vaddr); if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) - native_set_pmd(pmd_p, native_make_pmd(pmd_val)); + native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); +} + +static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) +{ + pmd_t *pmd_p; + pte_t *pte_p; + + pmd_p = sme_prepare_pgd(ppd); + if (!pmd_p) + return; + + pmd_p += pmd_index(ppd->vaddr); + if (native_pmd_val(*pmd_p)) { + if (native_pmd_val(*pmd_p) & _PAGE_PSE) + return; + + pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); + } else { + pmd_t pmd; + + pte_p = ppd->pgtable_area; + memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); + ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; + + pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); + native_set_pmd(pmd_p, pmd); + } -out: - return pgtable_area; + pte_p += pte_index(ppd->vaddr); + if (!native_pte_val(*pte_p)) + native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); +} + +static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) +{ + while (ppd->vaddr < ppd->vaddr_end) { + sme_populate_pgd_large(ppd); + + ppd->vaddr += PMD_PAGE_SIZE; + ppd->paddr += PMD_PAGE_SIZE; + } +} + +static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) +{ + while (ppd->vaddr < ppd->vaddr_end) { + sme_populate_pgd(ppd); + + ppd->vaddr += PAGE_SIZE; + ppd->paddr += PAGE_SIZE; + } +} + +static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, + pmdval_t pmd_flags, pteval_t pte_flags) +{ + unsigned long vaddr_end; + + ppd->pmd_flags = pmd_flags; + ppd->pte_flags = pte_flags; + + /* Save original end value since we modify the struct value */ + vaddr_end = ppd->vaddr_end; + + /* If start is not 2MB aligned, create PTE entries */ + ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); + __sme_map_range_pte(ppd); + + /* Create PMD entries */ + ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; + __sme_map_range_pmd(ppd); + + /* If end is not 2MB aligned, create PTE entries */ + ppd->vaddr_end = vaddr_end; + __sme_map_range_pte(ppd); +} + +static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); +} + +static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); +} + +static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); } static unsigned long __init sme_pgtable_calc(unsigned long len) { - unsigned long p4d_size, pud_size, pmd_size; + unsigned long p4d_size, pud_size, pmd_size, pte_size; unsigned long total; /* * Perform a relatively simplistic calculation of the pagetable - * entries that are needed. That mappings will be covered by 2MB - * PMD entries so we can conservatively calculate the required + * entries that are needed. Those mappings will be covered mostly + * by 2MB PMD entries so we can conservatively calculate the required * number of P4D, PUD and PMD structures needed to perform the - * mappings. Incrementing the count for each covers the case where - * the addresses cross entries. + * mappings. For mappings that are not 2MB aligned, PTE mappings + * would be needed for the start and end portion of the address range + * that fall outside of the 2MB alignment. This results in, at most, + * two extra pages to hold PTE entries for each range that is mapped. + * Incrementing the count for each covers the case where the addresses + * cross entries. */ if (IS_ENABLED(CONFIG_X86_5LEVEL)) { p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; @@ -334,8 +461,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) } pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; + pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; - total = p4d_size + pud_size + pmd_size; + total = p4d_size + pud_size + pmd_size + pte_size; /* * Now calculate the added pagetable structures needed to populate @@ -359,29 +487,29 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) return total; } -void __init sme_encrypt_kernel(void) +void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) { unsigned long workarea_start, workarea_end, workarea_len; unsigned long execute_start, execute_end, execute_len; unsigned long kernel_start, kernel_end, kernel_len; + unsigned long initrd_start, initrd_end, initrd_len; + struct sme_populate_pgd_data ppd; unsigned long pgtable_area_len; - unsigned long paddr, pmd_flags; unsigned long decrypted_base; - void *pgtable_area; - pgd_t *pgd; if (!sme_active()) return; /* - * Prepare for encrypting the kernel by building new pagetables with - * the necessary attributes needed to encrypt the kernel in place. + * Prepare for encrypting the kernel and initrd by building new + * pagetables with the necessary attributes needed to encrypt the + * kernel in place. * * One range of virtual addresses will map the memory occupied - * by the kernel as encrypted. + * by the kernel and initrd as encrypted. * * Another range of virtual addresses will map the memory occupied - * by the kernel as decrypted and write-protected. + * by the kernel and initrd as decrypted and write-protected. * * The use of write-protect attribute will prevent any of the * memory from being cached. @@ -392,6 +520,20 @@ void __init sme_encrypt_kernel(void) kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); kernel_len = kernel_end - kernel_start; + initrd_start = 0; + initrd_end = 0; + initrd_len = 0; +#ifdef CONFIG_BLK_DEV_INITRD + initrd_len = (unsigned long)bp->hdr.ramdisk_size | + ((unsigned long)bp->ext_ramdisk_size << 32); + if (initrd_len) { + initrd_start = (unsigned long)bp->hdr.ramdisk_image | + ((unsigned long)bp->ext_ramdisk_image << 32); + initrd_end = PAGE_ALIGN(initrd_start + initrd_len); + initrd_len = initrd_end - initrd_start; + } +#endif + /* Set the encryption workarea to be immediately after the kernel */ workarea_start = kernel_end; @@ -414,16 +556,21 @@ void __init sme_encrypt_kernel(void) */ pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; + if (initrd_len) + pgtable_area_len += sme_pgtable_calc(initrd_len) * 2; /* PUDs and PMDs needed in the current pagetables for the workarea */ pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); /* * The total workarea includes the executable encryption area and - * the pagetable area. + * the pagetable area. The start of the workarea is already 2MB + * aligned, align the end of the workarea on a 2MB boundary so that + * we don't try to create/allocate PTE entries from the workarea + * before it is mapped. */ workarea_len = execute_len + pgtable_area_len; - workarea_end = workarea_start + workarea_len; + workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); /* * Set the address to the start of where newly created pagetable @@ -432,45 +579,30 @@ void __init sme_encrypt_kernel(void) * pagetables and when the new encrypted and decrypted kernel * mappings are populated. */ - pgtable_area = (void *)execute_end; + ppd.pgtable_area = (void *)execute_end; /* * Make sure the current pagetable structure has entries for * addressing the workarea. */ - pgd = (pgd_t *)native_read_cr3_pa(); - paddr = workarea_start; - while (paddr < workarea_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + PMD_FLAGS); - - paddr += PMD_PAGE_SIZE; - } + ppd.pgd = (pgd_t *)native_read_cr3_pa(); + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start; + ppd.vaddr_end = workarea_end; + sme_map_range_decrypted(&ppd); /* Flush the TLB - no globals so cr3 is enough */ native_write_cr3(__native_read_cr3()); /* * A new pagetable structure is being built to allow for the kernel - * to be encrypted. It starts with an empty PGD that will then be - * populated with new PUDs and PMDs as the encrypted and decrypted - * kernel mappings are created. + * and initrd to be encrypted. It starts with an empty PGD that will + * then be populated with new PUDs and PMDs as the encrypted and + * decrypted kernel mappings are created. */ - pgd = pgtable_area; - memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD); - pgtable_area += sizeof(*pgd) * PTRS_PER_PGD; - - /* Add encrypted kernel (identity) mappings */ - pmd_flags = PMD_FLAGS | _PAGE_ENC; - paddr = kernel_start; - while (paddr < kernel_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + pmd_flags); - - paddr += PMD_PAGE_SIZE; - } + ppd.pgd = ppd.pgtable_area; + memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); + ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; /* * A different PGD index/entry must be used to get different @@ -479,47 +611,79 @@ void __init sme_encrypt_kernel(void) * the base of the mapping. */ decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); + if (initrd_len) { + unsigned long check_base; + + check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); + decrypted_base = max(decrypted_base, check_base); + } decrypted_base <<= PGDIR_SHIFT; + /* Add encrypted kernel (identity) mappings */ + ppd.paddr = kernel_start; + ppd.vaddr = kernel_start; + ppd.vaddr_end = kernel_end; + sme_map_range_encrypted(&ppd); + /* Add decrypted, write-protected kernel (non-identity) mappings */ - pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); - paddr = kernel_start; - while (paddr < kernel_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr + decrypted_base, - paddr + pmd_flags); - - paddr += PMD_PAGE_SIZE; + ppd.paddr = kernel_start; + ppd.vaddr = kernel_start + decrypted_base; + ppd.vaddr_end = kernel_end + decrypted_base; + sme_map_range_decrypted_wp(&ppd); + + if (initrd_len) { + /* Add encrypted initrd (identity) mappings */ + ppd.paddr = initrd_start; + ppd.vaddr = initrd_start; + ppd.vaddr_end = initrd_end; + sme_map_range_encrypted(&ppd); + /* + * Add decrypted, write-protected initrd (non-identity) mappings + */ + ppd.paddr = initrd_start; + ppd.vaddr = initrd_start + decrypted_base; + ppd.vaddr_end = initrd_end + decrypted_base; + sme_map_range_decrypted_wp(&ppd); } /* Add decrypted workarea mappings to both kernel mappings */ - paddr = workarea_start; - while (paddr < workarea_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + PMD_FLAGS); + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start; + ppd.vaddr_end = workarea_end; + sme_map_range_decrypted(&ppd); - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr + decrypted_base, - paddr + PMD_FLAGS); - - paddr += PMD_PAGE_SIZE; - } + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start + decrypted_base; + ppd.vaddr_end = workarea_end + decrypted_base; + sme_map_range_decrypted(&ppd); /* Perform the encryption */ sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, - kernel_len, workarea_start, (unsigned long)pgd); + kernel_len, workarea_start, (unsigned long)ppd.pgd); + + if (initrd_len) + sme_encrypt_execute(initrd_start, initrd_start + decrypted_base, + initrd_len, workarea_start, + (unsigned long)ppd.pgd); /* * At this point we are running encrypted. Remove the mappings for * the decrypted areas - all that is needed for this is to remove * the PGD entry/entries. */ - sme_clear_pgd(pgd, kernel_start + decrypted_base, - kernel_end + decrypted_base); + ppd.vaddr = kernel_start + decrypted_base; + ppd.vaddr_end = kernel_end + decrypted_base; + sme_clear_pgd(&ppd); + + if (initrd_len) { + ppd.vaddr = initrd_start + decrypted_base; + ppd.vaddr_end = initrd_end + decrypted_base; + sme_clear_pgd(&ppd); + } - sme_clear_pgd(pgd, workarea_start + decrypted_base, - workarea_end + decrypted_base); + ppd.vaddr = workarea_start + decrypted_base; + ppd.vaddr_end = workarea_end + decrypted_base; + sme_clear_pgd(&ppd); /* Flush the TLB - no globals so cr3 is enough */ native_write_cr3(__native_read_cr3()); diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 730e6d541df1..40a6085063d6 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -15,6 +15,7 @@ #include #include #include +#include .text .code64 @@ -22,9 +23,9 @@ ENTRY(sme_encrypt_execute) /* * Entry parameters: - * RDI - virtual address for the encrypted kernel mapping - * RSI - virtual address for the decrypted kernel mapping - * RDX - length of kernel + * RDI - virtual address for the encrypted mapping + * RSI - virtual address for the decrypted mapping + * RDX - length to encrypt * RCX - virtual address of the encryption workarea, including: * - stack page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE) @@ -41,9 +42,9 @@ ENTRY(sme_encrypt_execute) addq $PAGE_SIZE, %rax /* Workarea encryption routine */ push %r12 - movq %rdi, %r10 /* Encrypted kernel */ - movq %rsi, %r11 /* Decrypted kernel */ - movq %rdx, %r12 /* Kernel length */ + movq %rdi, %r10 /* Encrypted area */ + movq %rsi, %r11 /* Decrypted area */ + movq %rdx, %r12 /* Area length */ /* Copy encryption routine into the workarea */ movq %rax, %rdi /* Workarea encryption routine */ @@ -52,13 +53,14 @@ ENTRY(sme_encrypt_execute) rep movsb /* Setup registers for call */ - movq %r10, %rdi /* Encrypted kernel */ - movq %r11, %rsi /* Decrypted kernel */ + movq %r10, %rdi /* Encrypted area */ + movq %r11, %rsi /* Decrypted area */ movq %r8, %rdx /* Pagetables used for encryption */ - movq %r12, %rcx /* Kernel length */ + movq %r12, %rcx /* Area length */ movq %rax, %r8 /* Workarea encryption routine */ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ + ANNOTATE_RETPOLINE_SAFE call *%rax /* Call the encryption routine */ pop %r12 @@ -71,7 +73,7 @@ ENDPROC(sme_encrypt_execute) ENTRY(__enc_copy) /* - * Routine used to encrypt kernel. + * Routine used to encrypt memory in place. * This routine must be run outside of the kernel proper since * the kernel will be encrypted during the process. So this * routine is defined here and then copied to an area outside @@ -79,19 +81,19 @@ ENTRY(__enc_copy) * during execution. * * On entry the registers must be: - * RDI - virtual address for the encrypted kernel mapping - * RSI - virtual address for the decrypted kernel mapping + * RDI - virtual address for the encrypted mapping + * RSI - virtual address for the decrypted mapping * RDX - address of the pagetables to use for encryption - * RCX - length of kernel + * RCX - length of area * R8 - intermediate copy buffer * * RAX - points to this routine * - * The kernel will be encrypted by copying from the non-encrypted - * kernel space to an intermediate buffer and then copying from the - * intermediate buffer back to the encrypted kernel space. The physical - * addresses of the two kernel space mappings are the same which - * results in the kernel being encrypted "in place". + * The area will be encrypted by copying from the non-encrypted + * memory space to an intermediate buffer and then copying from the + * intermediate buffer back to the encrypted memory space. The physical + * addresses of the two mappings are the same which results in the area + * being encrypted "in place". */ /* Enable the new page tables */ mov %rdx, %cr3 @@ -103,47 +105,55 @@ ENTRY(__enc_copy) orq $X86_CR4_PGE, %rdx mov %rdx, %cr4 + push %r15 + push %r12 + + movq %rcx, %r9 /* Save area length */ + movq %rdi, %r10 /* Save encrypted area address */ + movq %rsi, %r11 /* Save decrypted area address */ + /* Set the PAT register PA5 entry to write-protect */ - push %rcx movl $MSR_IA32_CR_PAT, %ecx rdmsr - push %rdx /* Save original PAT value */ + mov %rdx, %r15 /* Save original PAT value */ andl $0xffff00ff, %edx /* Clear PA5 */ orl $0x00000500, %edx /* Set PA5 to WP */ wrmsr - pop %rdx /* RDX contains original PAT value */ - pop %rcx - - movq %rcx, %r9 /* Save kernel length */ - movq %rdi, %r10 /* Save encrypted kernel address */ - movq %rsi, %r11 /* Save decrypted kernel address */ wbinvd /* Invalidate any cache entries */ - /* Copy/encrypt 2MB at a time */ + /* Copy/encrypt up to 2MB at a time */ + movq $PMD_PAGE_SIZE, %r12 1: - movq %r11, %rsi /* Source - decrypted kernel */ + cmpq %r12, %r9 + jnb 2f + movq %r9, %r12 + +2: + movq %r11, %rsi /* Source - decrypted area */ movq %r8, %rdi /* Dest - intermediate copy buffer */ - movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ + movq %r12, %rcx rep movsb movq %r8, %rsi /* Source - intermediate copy buffer */ - movq %r10, %rdi /* Dest - encrypted kernel */ - movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ + movq %r10, %rdi /* Dest - encrypted area */ + movq %r12, %rcx rep movsb - addq $PMD_PAGE_SIZE, %r11 - addq $PMD_PAGE_SIZE, %r10 - subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */ + addq %r12, %r11 + addq %r12, %r10 + subq %r12, %r9 /* Kernel length decrement */ jnz 1b /* Kernel length not zero? */ /* Restore PAT register */ - push %rdx /* Save original PAT value */ movl $MSR_IA32_CR_PAT, %ecx rdmsr - pop %rdx /* Restore original PAT value */ + mov %r15, %rdx /* Restore original PAT value */ wrmsr + pop %r12 + pop %r15 + ret .L__enc_copy_end: ENDPROC(__enc_copy) diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 7eb06701a935..e500949bae24 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -61,123 +62,6 @@ static unsigned long mpx_mmap(unsigned long len) return addr; } -enum reg_type { - REG_TYPE_RM = 0, - REG_TYPE_INDEX, - REG_TYPE_BASE, -}; - -static int get_reg_offset(struct insn *insn, struct pt_regs *regs, - enum reg_type type) -{ - int regno = 0; - - static const int regoff[] = { - offsetof(struct pt_regs, ax), - offsetof(struct pt_regs, cx), - offsetof(struct pt_regs, dx), - offsetof(struct pt_regs, bx), - offsetof(struct pt_regs, sp), - offsetof(struct pt_regs, bp), - offsetof(struct pt_regs, si), - offsetof(struct pt_regs, di), -#ifdef CONFIG_X86_64 - offsetof(struct pt_regs, r8), - offsetof(struct pt_regs, r9), - offsetof(struct pt_regs, r10), - offsetof(struct pt_regs, r11), - offsetof(struct pt_regs, r12), - offsetof(struct pt_regs, r13), - offsetof(struct pt_regs, r14), - offsetof(struct pt_regs, r15), -#endif - }; - int nr_registers = ARRAY_SIZE(regoff); - /* - * Don't possibly decode a 32-bit instructions as - * reading a 64-bit-only register. - */ - if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) - nr_registers -= 8; - - switch (type) { - case REG_TYPE_RM: - regno = X86_MODRM_RM(insn->modrm.value); - if (X86_REX_B(insn->rex_prefix.value)) - regno += 8; - break; - - case REG_TYPE_INDEX: - regno = X86_SIB_INDEX(insn->sib.value); - if (X86_REX_X(insn->rex_prefix.value)) - regno += 8; - break; - - case REG_TYPE_BASE: - regno = X86_SIB_BASE(insn->sib.value); - if (X86_REX_B(insn->rex_prefix.value)) - regno += 8; - break; - - default: - pr_err("invalid register type"); - BUG(); - break; - } - - if (regno >= nr_registers) { - WARN_ONCE(1, "decoded an instruction with an invalid register"); - return -EINVAL; - } - return regoff[regno]; -} - -/* - * return the address being referenced be instruction - * for rm=3 returning the content of the rm reg - * for rm!=3 calculates the address using SIB and Disp - */ -static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs) -{ - unsigned long addr, base, indx; - int addr_offset, base_offset, indx_offset; - insn_byte_t sib; - - insn_get_modrm(insn); - insn_get_sib(insn); - sib = insn->sib.value; - - if (X86_MODRM_MOD(insn->modrm.value) == 3) { - addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); - if (addr_offset < 0) - goto out_err; - addr = regs_get_register(regs, addr_offset); - } else { - if (insn->sib.nbytes) { - base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); - if (base_offset < 0) - goto out_err; - - indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); - if (indx_offset < 0) - goto out_err; - - base = regs_get_register(regs, base_offset); - indx = regs_get_register(regs, indx_offset); - addr = base + indx * (1 << X86_SIB_SCALE(sib)); - } else { - addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); - if (addr_offset < 0) - goto out_err; - addr = regs_get_register(regs, addr_offset); - } - addr += insn->displacement.value; - } - return (void __user *)addr; -out_err: - return (void __user *)-1; -} - static int mpx_insn_decode(struct insn *insn, struct pt_regs *regs) { @@ -290,7 +174,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) info->si_signo = SIGSEGV; info->si_errno = 0; info->si_code = SEGV_BNDERR; - info->si_addr = mpx_get_addr_ref(&insn, regs); + info->si_addr = insn_get_addr_ref(&insn, regs); /* * We were not able to extract an address from the instruction, * probably because there was something invalid in it. diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index dfb7d657cf43..3ed9a08885c5 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -753,7 +753,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte, if (!debug_pagealloc_enabled()) spin_unlock(&cpa_lock); - base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); + base = alloc_pages(GFP_KERNEL, 0); if (!debug_pagealloc_enabled()) spin_lock(&cpa_lock); if (!base) @@ -904,7 +904,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) static int alloc_pte_page(pmd_t *pmd) { - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); if (!pte) return -1; @@ -914,7 +914,7 @@ static int alloc_pte_page(pmd_t *pmd) static int alloc_pmd_page(pud_t *pud) { - pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); if (!pmd) return -1; @@ -1120,7 +1120,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) pgd_entry = cpa->pgd + pgd_index(addr); if (pgd_none(*pgd_entry)) { - p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); if (!p4d) return -1; @@ -1132,7 +1132,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) */ p4d = p4d_offset(pgd_entry, addr); if (p4d_none(*p4d)) { - pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pud = (pud_t *)get_zeroed_page(GFP_KERNEL); if (!pud) return -1; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 17ebc5a978cc..34cda7e0551b 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -7,7 +7,7 @@ #include #include -#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) #ifdef CONFIG_HIGHPTE #define PGALLOC_USER_GFP __GFP_HIGHMEM @@ -355,14 +355,15 @@ static inline void _pgd_free(pgd_t *pgd) kmem_cache_free(pgd_cache, pgd); } #else + static inline pgd_t *_pgd_alloc(void) { - return (pgd_t *)__get_free_page(PGALLOC_GFP); + return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); } static inline void _pgd_free(pgd_t *pgd) { - free_page((unsigned long)pgd); + free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); } #endif /* CONFIG_X86_PAE */ @@ -701,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } + +/** + * pud_free_pmd_page - Clear pud entry and free pmd page. + * @pud: Pointer to a PUD. + * + * Context: The pud range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pud_free_pmd_page(pud_t *pud) +{ + pmd_t *pmd; + int i; + + if (pud_none(*pud)) + return 1; + + pmd = (pmd_t *)pud_page_vaddr(*pud); + + for (i = 0; i < PTRS_PER_PMD; i++) + if (!pmd_free_pte_page(&pmd[i])) + return 0; + + pud_clear(pud); + free_page((unsigned long)pmd); + + return 1; +} + +/** + * pmd_free_pte_page - Clear pmd entry and free pte page. + * @pmd: Pointer to a PMD. + * + * Context: The pmd range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pmd_free_pte_page(pmd_t *pmd) +{ + pte_t *pte; + + if (pmd_none(*pmd)) + return 1; + + pte = (pte_t *)pmd_page_vaddr(*pmd); + pmd_clear(pmd); + free_page((unsigned long)pte); + + return 1; +} #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 6b9bf023a700..9bb7f0ab9fe6 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -62,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + __flush_tlb_one_kernel(vaddr); } unsigned long __FIXADDR_TOP = 0xfffff000; diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c new file mode 100644 index 000000000000..ce38f165489b --- /dev/null +++ b/arch/x86/mm/pti.c @@ -0,0 +1,368 @@ +/* + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * This code is based in part on work published here: + * + * https://github.com/IAIK/KAISER + * + * The original work was written by and and signed off by for the Linux + * kernel by: + * + * Signed-off-by: Richard Fellner + * Signed-off-by: Moritz Lipp + * Signed-off-by: Daniel Gruss + * Signed-off-by: Michael Schwarz + * + * Major changes to the original code by: Dave Hansen + * Mostly rewritten by Thomas Gleixner and + * Andy Lutomirsky + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt + +/* Backporting helper */ +#ifndef __GFP_NOTRACK +#define __GFP_NOTRACK 0 +#endif + +static void __init pti_print_if_insecure(const char *reason) +{ + if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + pr_info("%s\n", reason); +} + +static void __init pti_print_if_secure(const char *reason) +{ + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + pr_info("%s\n", reason); +} + +void __init pti_check_boottime_disable(void) +{ + char arg[5]; + int ret; + + if (hypervisor_is_type(X86_HYPER_XEN_PV)) { + pti_print_if_insecure("disabled on XEN PV."); + return; + } + + ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg)); + if (ret > 0) { + if (ret == 3 && !strncmp(arg, "off", 3)) { + pti_print_if_insecure("disabled on command line."); + return; + } + if (ret == 2 && !strncmp(arg, "on", 2)) { + pti_print_if_secure("force enabled on command line."); + goto enable; + } + if (ret == 4 && !strncmp(arg, "auto", 4)) + goto autosel; + } + + if (cmdline_find_option_bool(boot_command_line, "nopti")) { + pti_print_if_insecure("disabled on command line."); + return; + } + +autosel: + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + return; +enable: + setup_force_cpu_cap(X86_FEATURE_PTI); +} + +pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + /* + * Changes to the high (kernel) portion of the kernelmode page + * tables are not automatically propagated to the usermode tables. + * + * Users should keep in mind that, unlike the kernelmode tables, + * there is no vmalloc_fault equivalent for the usermode tables. + * Top-level entries added to init_mm's usermode pgd after boot + * will not be automatically propagated to other mms. + */ + if (!pgdp_maps_userspace(pgdp)) + return pgd; + + /* + * The user page tables get the full PGD, accessible from + * userspace: + */ + kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; + + /* + * If this is normal user memory, make it NX in the kernel + * pagetables so that, if we somehow screw up and return to + * usermode with the kernel CR3 loaded, we'll get a page fault + * instead of allowing user code to execute with the wrong CR3. + * + * As exceptions, we don't set NX if: + * - _PAGE_USER is not set. This could be an executable + * EFI runtime mapping or something similar, and the kernel + * may execute from it + * - we don't have NX support + * - we're clearing the PGD (i.e. the new pgd is not present). + */ + if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && + (__supported_pte_mask & _PAGE_NX)) + pgd.pgd |= _PAGE_NX; + + /* return the copy of the PGD we want the kernel to use: */ + return pgd; +} + +/* + * Walk the user copy of the page tables (optionally) trying to allocate + * page table pages on the way down. + * + * Returns a pointer to a P4D on success, or NULL on failure. + */ +static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) +{ + pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + + if (address < PAGE_OFFSET) { + WARN_ONCE(1, "attempt to walk user address\n"); + return NULL; + } + + if (pgd_none(*pgd)) { + unsigned long new_p4d_page = __get_free_page(gfp); + if (!new_p4d_page) + return NULL; + + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); + } + BUILD_BUG_ON(pgd_large(*pgd) != 0); + + return p4d_offset(pgd, address); +} + +/* + * Walk the user copy of the page tables (optionally) trying to allocate + * page table pages on the way down. + * + * Returns a pointer to a PMD on success, or NULL on failure. + */ +static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + p4d_t *p4d = pti_user_pagetable_walk_p4d(address); + pud_t *pud; + + BUILD_BUG_ON(p4d_large(*p4d) != 0); + if (p4d_none(*p4d)) { + unsigned long new_pud_page = __get_free_page(gfp); + if (!new_pud_page) + return NULL; + + set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); + } + + pud = pud_offset(p4d, address); + /* The user page tables do not use large mappings: */ + if (pud_large(*pud)) { + WARN_ON(1); + return NULL; + } + if (pud_none(*pud)) { + unsigned long new_pmd_page = __get_free_page(gfp); + if (!new_pmd_page) + return NULL; + + set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); + } + + return pmd_offset(pud, address); +} + +#ifdef CONFIG_X86_VSYSCALL_EMULATION +/* + * Walk the shadow copy of the page tables (optionally) trying to allocate + * page table pages on the way down. Does not support large pages. + * + * Note: this is only used when mapping *new* kernel data into the + * user/shadow page tables. It is never used for userspace data. + * + * Returns a pointer to a PTE on success, or NULL on failure. + */ +static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + pmd_t *pmd = pti_user_pagetable_walk_pmd(address); + pte_t *pte; + + /* We can't do anything sensible if we hit a large mapping. */ + if (pmd_large(*pmd)) { + WARN_ON(1); + return NULL; + } + + if (pmd_none(*pmd)) { + unsigned long new_pte_page = __get_free_page(gfp); + if (!new_pte_page) + return NULL; + + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); + } + + pte = pte_offset_kernel(pmd, address); + if (pte_flags(*pte) & _PAGE_USER) { + WARN_ONCE(1, "attempt to walk to user pte\n"); + return NULL; + } + return pte; +} + +static void __init pti_setup_vsyscall(void) +{ + pte_t *pte, *target_pte; + unsigned int level; + + pte = lookup_address(VSYSCALL_ADDR, &level); + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) + return; + + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); + if (WARN_ON(!target_pte)) + return; + + *target_pte = *pte; + set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir)); +} +#else +static void __init pti_setup_vsyscall(void) { } +#endif + +static void __init +pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) +{ + unsigned long addr; + + /* + * Clone the populated PMDs which cover start to end. These PMD areas + * can have holes. + */ + for (addr = start; addr < end; addr += PMD_SIZE) { + pmd_t *pmd, *target_pmd; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + + pgd = pgd_offset_k(addr); + if (WARN_ON(pgd_none(*pgd))) + return; + p4d = p4d_offset(pgd, addr); + if (WARN_ON(p4d_none(*p4d))) + return; + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + continue; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + continue; + + target_pmd = pti_user_pagetable_walk_pmd(addr); + if (WARN_ON(!target_pmd)) + return; + + /* + * Copy the PMD. That is, the kernelmode and usermode + * tables will share the last-level page tables of this + * address range + */ + *target_pmd = pmd_clear_flags(*pmd, clear); + } +} + +/* + * Clone a single p4d (i.e. a top-level entry on 4-level systems and a + * next-level entry on 5-level systems. + */ +static void __init pti_clone_p4d(unsigned long addr) +{ + p4d_t *kernel_p4d, *user_p4d; + pgd_t *kernel_pgd; + + user_p4d = pti_user_pagetable_walk_p4d(addr); + kernel_pgd = pgd_offset_k(addr); + kernel_p4d = p4d_offset(kernel_pgd, addr); + *user_p4d = *kernel_p4d; +} + +/* + * Clone the CPU_ENTRY_AREA into the user space visible page table. + */ +static void __init pti_clone_user_shared(void) +{ + pti_clone_p4d(CPU_ENTRY_AREA_BASE); +} + +/* + * Clone the ESPFIX P4D into the user space visinble page table + */ +static void __init pti_setup_espfix64(void) +{ +#ifdef CONFIG_X86_ESPFIX64 + pti_clone_p4d(ESPFIX_BASE_ADDR); +#endif +} + +/* + * Clone the populated PMDs of the entry and irqentry text and force it RO. + */ +static void __init pti_clone_entry_text(void) +{ + pti_clone_pmds((unsigned long) __entry_text_start, + (unsigned long) __irqentry_text_end, + _PAGE_RW | _PAGE_GLOBAL); +} + +/* + * Initialize kernel page table isolation + */ +void __init pti_init(void) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + pr_info("enabled\n"); + + pti_clone_user_shared(); + pti_clone_entry_text(); + pti_setup_espfix64(); + pti_setup_vsyscall(); +} diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3118392cdf75..0c936435ea93 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -6,13 +6,14 @@ #include #include #include +#include #include #include +#include #include #include #include -#include /* * TLB flushing, formerly SMP-only @@ -28,6 +29,38 @@ * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ +/* + * We get here when we do something requiring a TLB invalidation + * but could not go invalidate all of the contexts. We do the + * necessary invalidation by clearing out the 'ctx_id' which + * forces a TLB flush when the context is loaded. + */ +void clear_asid_other(void) +{ + u16 asid; + + /* + * This is only expected to be set if we have disabled + * kernel _PAGE_GLOBAL pages. + */ + if (!static_cpu_has(X86_FEATURE_PTI)) { + WARN_ON_ONCE(1); + return; + } + + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { + /* Do not need to flush the current asid */ + if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) + continue; + /* + * Make sure the next time we go to switch to + * this asid, we do a flush: + */ + this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); + } + this_cpu_write(cpu_tlbstate.invalidate_other, false); +} + atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); @@ -42,6 +75,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, return; } + if (this_cpu_read(cpu_tlbstate.invalidate_other)) + clear_asid_other(); + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != next->context.ctx_id) @@ -65,6 +101,25 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, *need_flush = true; } +static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush) +{ + unsigned long new_mm_cr3; + + if (need_flush) { + invalidate_user_asid(new_asid); + new_mm_cr3 = build_cr3(pgdir, new_asid); + } else { + new_mm_cr3 = build_cr3_noflush(pgdir, new_asid); + } + + /* + * Caution: many callers of this function expect + * that load_cr3() is serializing and orders TLB + * fills with respect to the mm_cpumask writes. + */ + write_cr3(new_mm_cr3); +} + void leave_mm(int cpu) { struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); @@ -97,6 +152,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, local_irq_restore(flags); } +static void sync_current_stack_to_mm(struct mm_struct *mm) +{ + unsigned long sp = current_stack_pointer; + pgd_t *pgd = pgd_offset(mm, sp); + + if (CONFIG_PGTABLE_LEVELS > 4) { + if (unlikely(pgd_none(*pgd))) { + pgd_t *pgd_ref = pgd_offset_k(sp); + + set_pgd(pgd, *pgd_ref); + } + } else { + /* + * "pgd" is faked. The top level entries are "p4d"s, so sync + * the p4d. This compiles to approximately the same code as + * the 5-level case. + */ + p4d_t *p4d = p4d_offset(pgd, sp); + + if (unlikely(p4d_none(*p4d))) { + pgd_t *pgd_ref = pgd_offset_k(sp); + p4d_t *p4d_ref = p4d_offset(pgd_ref, sp); + + set_p4d(p4d, *p4d_ref); + } + } +} + void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -128,7 +211,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * isn't free. */ #ifdef CONFIG_DEBUG_VM - if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { + if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) { /* * If we were to BUG here, we'd be very likely to kill * the system so hard that we don't see the call trace. @@ -165,6 +248,27 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, } else { u16 new_asid; bool need_flush; + u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id); + + /* + * Avoid user/user BTB poisoning by flushing the branch + * predictor when switching between processes. This stops + * one process from doing Spectre-v2 attacks on another. + * + * As an optimization, flush indirect branches only when + * switching into processes that disable dumping. This + * protects high value processes like gpg, without having + * too high performance overhead. IBPB is *expensive*! + * + * This will not flush branches when switching into kernel + * threads. It will also not flush if we switch to idle + * thread and back to the same process. It will flush if we + * switch to a different non-dumpable process. + */ + if (tsk && tsk->mm && + tsk->mm->context.ctx_id != last_ctx_id && + get_dumpable(tsk->mm) != SUID_DUMP_USER) + indirect_branch_prediction_barrier(); if (IS_ENABLED(CONFIG_VMAP_STACK)) { /* @@ -172,11 +276,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * mapped in the new pgd, we'll double-fault. Forcibly * map it. */ - unsigned int index = pgd_index(current_stack_pointer); - pgd_t *pgd = next->pgd + index; - - if (unlikely(pgd_none(*pgd))) - set_pgd(pgd, init_mm.pgd[index]); + sync_current_stack_to_mm(next); } /* Stop remote flushes for the previous mm */ @@ -195,7 +295,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); - write_cr3(build_cr3(next, new_asid)); + load_new_mm_cr3(next->pgd, new_asid, true); /* * NB: This gets called via leave_mm() in the idle path @@ -208,12 +308,20 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ - write_cr3(build_cr3_noflush(next, new_asid)); + load_new_mm_cr3(next->pgd, new_asid, false); /* See above wrt _rcuidle. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); } + /* + * Record last user mm's context id, so we can avoid + * flushing branch buffer with IBPB if we switch back + * to the same user. + */ + if (next != &init_mm) + this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); + this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); } @@ -288,9 +396,10 @@ void initialize_tlbstate_and_flush(void) !(cr4_read_shadow() & X86_CR4_PCIDE)); /* Force ASID 0 and force a TLB flush. */ - write_cr3(build_cr3(mm, 0)); + write_cr3(build_cr3(mm->pgd, 0)); /* Reinitialize tlbstate. */ + this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id); this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); this_cpu_write(cpu_tlbstate.next_asid, 1); this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); @@ -383,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, * flush that changes context.tlb_gen from 2 to 3. If they get * processed on this CPU in reverse order, we'll see * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. - * If we were to use __flush_tlb_single() and set local_tlb_gen to + * If we were to use __flush_tlb_one_user() and set local_tlb_gen to * 3, we'd be break the invariant: we'd update local_tlb_gen above * 1 without the full flush that's needed for tlb_gen 2. * @@ -404,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, addr = f->start; while (addr < f->end) { - __flush_tlb_single(addr); + __flush_tlb_one_user(addr); addr += PAGE_SIZE; } if (local) @@ -551,7 +660,7 @@ static void do_kernel_range_flush(void *info) /* flush range by one by one 'invlpg' */ for (addr = f->start; addr < f->end; addr += PAGE_SIZE) - __flush_tlb_single(addr); + __flush_tlb_one_kernel(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 0554e8aef4d5..bb77606d04e0 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -13,6 +13,7 @@ #include #include #include +#include #include int bpf_jit_enable __read_mostly; @@ -287,7 +288,7 @@ static void emit_bpf_tail_call(u8 **pprog) EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); -#define OFFSET1 43 /* number of bytes to jump */ +#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ label1 = cnt; @@ -296,7 +297,7 @@ static void emit_bpf_tail_call(u8 **pprog) */ EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 32 +#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ @@ -310,7 +311,7 @@ static void emit_bpf_tail_call(u8 **pprog) * goto out; */ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ -#define OFFSET3 10 +#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JE, OFFSET3); /* je out */ label3 = cnt; @@ -323,7 +324,7 @@ static void emit_bpf_tail_call(u8 **pprog) * rdi == ctx (1st arg) * rax == prog->bpf_func + prologue_size */ - EMIT2(0xFF, 0xE0); /* jmp rax */ + RETPOLINE_RAX_BPF_JIT(); /* out: */ BUILD_BUG_ON(cnt - label1 != OFFSET1); @@ -1155,7 +1156,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) * may converge on the last pass. In such case do one more * pass to emit the final image */ - for (pass = 0; pass < 10 || image; pass++) { + for (pass = 0; pass < 20 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { image = NULL; @@ -1182,6 +1183,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } } oldproglen = proglen; + cond_resched(); } if (bpf_jit_enable > 1) diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index ffdbc4836b4f..abff76bda9e6 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -460,7 +460,7 @@ static int nmi_setup(void) goto fail; for_each_possible_cpu(cpu) { - if (!cpu) + if (!IS_ENABLED(CONFIG_SMP) || !cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c index bb461cfd01ab..526536c81ddc 100644 --- a/arch/x86/pci/broadcom_bus.c +++ b/arch/x86/pci/broadcom_bus.c @@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void) * We should get host bridge information from ACPI unless the BIOS * doesn't support it. */ - if (acpi_os_get_root_pointer()) + if (!acpi_disabled && acpi_os_get_root_pointer()) return 0; #endif diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 20fb31579b69..f7af598c4f55 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -134,7 +134,9 @@ pgd_t * __init efi_call_phys_prolog(void) pud[j] = *pud_offset(p4d_k, vaddr); } } + pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; } + out: __flush_tlb_all(); @@ -195,6 +197,9 @@ static pgd_t *efi_pgd; * because we want to avoid inserting EFI region mappings (EFI_VA_END * to EFI_VA_START) into the standard kernel page tables. Everything * else can be shared, see efi_sync_low_kernel_mappings(). + * + * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the + * allocation. */ int __init efi_alloc_page_tables(void) { @@ -206,8 +211,8 @@ int __init efi_alloc_page_tables(void) if (efi_enabled(EFI_OLD_MEMMAP)) return 0; - gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; - efi_pgd = (pgd_t *)__get_free_page(gfp_mask); + gfp_mask = GFP_KERNEL | __GFP_ZERO; + efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER); if (!efi_pgd) return -ENOMEM; @@ -222,7 +227,7 @@ int __init efi_alloc_page_tables(void) if (!pud) { if (CONFIG_PGTABLE_LEVELS > 4) free_page((unsigned long) pgd_page_vaddr(*pgd)); - free_page((unsigned long)efi_pgd); + free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER); return -ENOMEM; } diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 8a99a2e96537..5b513ccffde4 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff, /* * Update the first page pointer to skip over the CSH header. */ - cap_info->pages[0] += csh->headersize; + cap_info->phys[0] += csh->headersize; + + /* + * cap_info->capsule should point at a virtual mapping of the entire + * capsule, starting at the capsule header. Our image has the Quark + * security header prepended, so we cannot rely on the default vmap() + * mapping created by the generic capsule code. + * Given that the Quark firmware does not appear to care about the + * virtual mapping, let's just point cap_info->capsule at our copy + * of the capsule header. + */ + cap_info->capsule = &cap_info->header; return 1; } diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c index dc036e511f48..5a0483e7bf66 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c @@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) return 0; } -static const struct bt_sfi_data tng_bt_sfi_data __initdata = { +static struct bt_sfi_data tng_bt_sfi_data __initdata = { .setup = tng_bt_sfi_setup, }; diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 86676cec99a1..09dd7f3cf621 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -79,7 +79,7 @@ static void intel_mid_power_off(void) static void intel_mid_reboot(void) { - intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); } static unsigned long __init intel_mid_calibrate_tsc(void) diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index f44c0bc95aa2..0b530c53de1f 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, local_flush_tlb(); stat->d_alltlb++; } else { - __flush_tlb_one(msg->address); + __flush_tlb_one_user(msg->address); stat->d_onetlb++; } stat->d_requestee++; @@ -2254,8 +2254,6 @@ static int __init uv_bau_init(void) init_uvhub(uvhub, vector, uv_base_pnode); } - alloc_intr_gate(vector, uv_bau_message_intr1); - for_each_possible_blade(uvhub) { if (uv_blade_nr_possible_cpus(uvhub)) { unsigned long val; diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 84fcfde53f8f..04d5157fe7f8 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -160,17 +160,19 @@ static void do_fpu_end(void) static void fix_processor_context(void) { int cpu = smp_processor_id(); - struct tss_struct *t = &per_cpu(cpu_tss, cpu); #ifdef CONFIG_X86_64 struct desc_struct *desc = get_cpu_gdt_rw(cpu); tss_desc tss; #endif - set_tss_desc(cpu, t); /* - * This just modifies memory; should not be - * necessary. But... This is necessary, because - * 386 hardware has concept of busy TSS or some - * similar stupidity. - */ + + /* + * We need to reload TR, which requires that we change the + * GDT entry to indicate "available" first. + * + * XXX: This could probably all be replaced by a call to + * force_reload_TR(). + */ + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); #ifdef CONFIG_X86_64 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index de53bd15df5a..24bb7598774e 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -102,7 +102,7 @@ ENTRY(startup_32) * don't we'll eventually crash trying to execute encrypted * instructions. */ - bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags + btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags jnc .Ldone movl $MSR_K8_SYSCFG, %ecx rdmsr diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 5d73c443e778..220e97841e49 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -770,9 +770,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, break; case R_X86_64_PC32: + case R_X86_64_PLT32: /* * PC relative relocations don't need to be adjusted unless * referencing a percpu symbol. + * + * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32. */ if (is_percpu_sym(sym, symname)) add_reloc(&relocs32neg, offset); diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c index 836a1eb5df43..3ee234b6234d 100644 --- a/arch/x86/um/ldt.c +++ b/arch/x86/um/ldt.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm) mm->arch.ldt.entry_count = 0; } -int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , + unsigned long , bytecount) { - return do_modify_ldt_skas(func, ptr, bytecount); + /* See non-um modify_ldt() for why we do this cast */ + return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount); } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index d669e9d89001..c9081c6671f0 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1,8 +1,12 @@ +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +#include +#endif #include #include #include #include +#include #include #include @@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num) } EXPORT_SYMBOL(xen_arch_unregister_cpu); #endif + +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +void __init arch_xen_balloon_init(struct resource *hostmem_resource) +{ + struct xen_memory_map memmap; + int rc; + unsigned int i, last_guest_ram; + phys_addr_t max_addr = PFN_PHYS(max_pfn); + struct e820_table *xen_e820_table; + const struct e820_entry *entry; + struct resource *res; + + if (!xen_initial_domain()) + return; + + xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL); + if (!xen_e820_table) + return; + + memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries); + set_xen_guest_handle(memmap.buffer, xen_e820_table->entries); + rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap); + if (rc) { + pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc); + goto out; + } + + last_guest_ram = 0; + for (i = 0; i < memmap.nr_entries; i++) { + if (xen_e820_table->entries[i].addr >= max_addr) + break; + if (xen_e820_table->entries[i].type == E820_TYPE_RAM) + last_guest_ram = i; + } + + entry = &xen_e820_table->entries[last_guest_ram]; + if (max_addr >= entry->addr + entry->size) + goto out; /* No unallocated host RAM. */ + + hostmem_resource->start = max_addr; + hostmem_resource->end = entry->addr + entry->size; + + /* + * Mark non-RAM regions between the end of dom0 RAM and end of host RAM + * as unavailable. The rest of that region can be used for hotplug-based + * ballooning. + */ + for (; i < memmap.nr_entries; i++) { + entry = &xen_e820_table->entries[i]; + + if (entry->type == E820_TYPE_RAM) + continue; + + if (entry->addr >= hostmem_resource->end) + break; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + goto out; + + res->name = "Unavailable host RAM"; + res->start = entry->addr; + res->end = (entry->addr + entry->size < hostmem_resource->end) ? + entry->addr + entry->size : hostmem_resource->end; + rc = insert_resource(hostmem_resource, res); + if (rc) { + pr_warn("%s: Can't insert [%llx - %llx) (%d)\n", + __func__, res->start, res->end, rc); + kfree(res); + goto out; + } + } + + out: + kfree(xen_e820_table); +} +#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index de503c225ae1..754d5391d9fa 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -226,12 +226,12 @@ static uint32_t __init xen_platform_hvm(void) return xen_cpuid_base(); } -const struct hypervisor_x86 x86_hyper_xen_hvm = { +const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = { .name = "Xen HVM", .detect = xen_platform_hvm, - .init_platform = xen_hvm_guest_init, - .pin_vcpu = xen_pin_vcpu, - .x2apic_available = xen_x2apic_para_available, - .init_mem_mapping = xen_hvm_init_mem_mapping, + .type = X86_HYPER_XEN_HVM, + .init.init_platform = xen_hvm_guest_init, + .init.x2apic_available = xen_x2apic_para_available, + .init.init_mem_mapping = xen_hvm_init_mem_mapping, + .runtime.pin_vcpu = xen_pin_vcpu, }; -EXPORT_SYMBOL(x86_hyper_xen_hvm); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index d4396e27b1fb..f896c2975545 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -88,6 +88,8 @@ #include "multicalls.h" #include "pmu.h" +#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */ + void *xen_initial_gdt; static int xen_cpu_up_prepare_pv(unsigned int cpu); @@ -601,7 +603,7 @@ static struct trap_array_entry trap_array[] = { #ifdef CONFIG_X86_MCE { machine_check, xen_machine_check, true }, #endif - { nmi, xen_nmi, true }, + { nmi, xen_xennmi, true }, { overflow, xen_overflow, false }, #ifdef CONFIG_IA32_EMULATION { entry_INT80_compat, xen_entry_INT80_compat, false }, @@ -622,7 +624,7 @@ static struct trap_array_entry trap_array[] = { { simd_coprocessor_error, xen_simd_coprocessor_error, false }, }; -static bool get_trap_addr(void **addr, unsigned int ist) +static bool __ref get_trap_addr(void **addr, unsigned int ist) { unsigned int nr; bool ist_okay = false; @@ -644,6 +646,14 @@ static bool get_trap_addr(void **addr, unsigned int ist) } } + if (nr == ARRAY_SIZE(trap_array) && + *addr >= (void *)early_idt_handler_array[0] && + *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) { + nr = (*addr - (void *)early_idt_handler_array[0]) / + EARLY_IDT_HANDLER_SIZE; + *addr = (void *)xen_early_idt_handler_array[nr]; + } + if (WARN_ON(ist != 0 && !ist_okay)) return false; @@ -811,15 +821,14 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, } } -static void xen_load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static void xen_load_sp0(unsigned long sp0) { struct multicall_space mcs; mcs = xen_mc_entry(0); - MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); + MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); xen_mc_issue(PARAVIRT_LAZY_CPU); - tss->x86_tss.sp0 = thread->sp0; + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } void xen_set_iopl_mask(unsigned mask) @@ -1250,6 +1259,7 @@ asmlinkage __visible void __init xen_start_kernel(void) __userpte_alloc_gfp &= ~__GFP_HIGHMEM; /* Work out if we support NX */ + get_cpu_cap(&boot_cpu_data); x86_configure_nx(); /* Get mfn list */ @@ -1262,6 +1272,21 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_setup_gdt(0); xen_init_irq_ops(); + + /* Let's presume PV guests always boot on vCPU with id 0. */ + per_cpu(xen_vcpu_id, 0) = 0; + + /* + * Setup xen_vcpu early because idt_setup_early_handler needs it for + * local_irq_disable(), irqs_disabled(). + * + * Don't do the full vcpu_info placement stuff until we have + * the cpu_possible_mask and a non-dummy shared_info. + */ + xen_vcpu_info_reset(0); + + idt_setup_early_handler(); + xen_init_capabilities(); #ifdef CONFIG_X86_LOCAL_APIC @@ -1295,18 +1320,6 @@ asmlinkage __visible void __init xen_start_kernel(void) */ acpi_numa = -1; #endif - /* Let's presume PV guests always boot on vCPU with id 0. */ - per_cpu(xen_vcpu_id, 0) = 0; - - /* - * Setup xen_vcpu early because start_kernel needs it for - * local_irq_disable(), irqs_disabled(). - * - * Don't do the full vcpu_info placement stuff until we have - * the cpu_possible_mask and a non-dummy shared_info. - */ - xen_vcpu_info_reset(0); - WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); local_irq_disable(); @@ -1460,9 +1473,9 @@ static uint32_t __init xen_platform_pv(void) return 0; } -const struct hypervisor_x86 x86_hyper_xen_pv = { +const __initconst struct hypervisor_x86 x86_hyper_xen_pv = { .name = "Xen PV", .detect = xen_platform_pv, - .pin_vcpu = xen_pin_vcpu, + .type = X86_HYPER_XEN_PV, + .runtime.pin_vcpu = xen_pin_vcpu, }; -EXPORT_SYMBOL(x86_hyper_xen_pv); diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c index 2cfcfe4f6b2a..dd2ad82eee80 100644 --- a/arch/x86/xen/mmu_hvm.c +++ b/arch/x86/xen/mmu_hvm.c @@ -75,6 +75,6 @@ void __init xen_hvm_init_mmu_ops(void) if (is_pagetable_dying_supported()) pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; #ifdef CONFIG_PROC_VMCORE - register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram); + WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram)); #endif } diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 71495f1a86d7..042e9c422b21 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -449,7 +449,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); -#if CONFIG_PGTABLE_LEVELS == 4 +#ifdef CONFIG_X86_64 __visible pudval_t xen_pud_val(pud_t pud) { return pte_mfn_to_pfn(pud.pud); @@ -538,7 +538,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val) xen_mc_issue(PARAVIRT_LAZY_MMU); } -#endif /* CONFIG_PGTABLE_LEVELS == 4 */ +#endif /* CONFIG_X86_64 */ static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, int (*func)(struct mm_struct *mm, struct page *, enum pt_level), @@ -580,21 +580,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, int (*func)(struct mm_struct *mm, struct page *, enum pt_level), bool last, unsigned long limit) { - int i, nr, flush = 0; + int flush = 0; + pud_t *pud; - nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D; - for (i = 0; i < nr; i++) { - pud_t *pud; - if (p4d_none(p4d[i])) - continue; + if (p4d_none(*p4d)) + return flush; - pud = pud_offset(&p4d[i], 0); - if (PTRS_PER_PUD > 1) - flush |= (*func)(mm, virt_to_page(pud), PT_PUD); - flush |= xen_pud_walk(mm, pud, func, - last && i == nr - 1, limit); - } + pud = pud_offset(p4d, 0); + if (PTRS_PER_PUD > 1) + flush |= (*func)(mm, virt_to_page(pud), PT_PUD); + flush |= xen_pud_walk(mm, pud, func, last, limit); return flush; } @@ -644,8 +640,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, continue; p4d = p4d_offset(&pgd[i], 0); - if (PTRS_PER_P4D > 1) - flush |= (*func)(mm, virt_to_page(p4d), PT_P4D); flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); } @@ -1176,22 +1170,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr) { pgd_t *pgd; p4d_t *p4d; - unsigned int i; bool unpin; unpin = (vaddr == 2 * PGDIR_SIZE); vaddr &= PMD_MASK; pgd = pgd_offset_k(vaddr); p4d = p4d_offset(pgd, 0); - for (i = 0; i < PTRS_PER_P4D; i++) { - if (p4d_none(p4d[i])) - continue; - xen_cleanmfnmap_p4d(p4d + i, unpin); - } - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - set_pgd(pgd, __pgd(0)); - xen_cleanmfnmap_free_pgtbl(p4d, unpin); - } + if (!p4d_none(*p4d)) + xen_cleanmfnmap_p4d(p4d, unpin); } static void __init xen_pagetable_p2m_free(void) @@ -1314,12 +1300,12 @@ static void xen_flush_tlb(void) preempt_enable(); } -static void xen_flush_tlb_single(unsigned long addr) +static void xen_flush_tlb_one_user(unsigned long addr) { struct mmuext_op *op; struct multicall_space mcs; - trace_xen_mmu_flush_tlb_single(addr); + trace_xen_mmu_flush_tlb_one_user(addr); preempt_disable(); @@ -1692,7 +1678,7 @@ static void xen_release_pmd(unsigned long pfn) xen_release_ptpage(pfn, PT_PMD); } -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) { xen_alloc_ptpage(mm, pfn, PT_PUD); @@ -1916,6 +1902,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) /* Graft it onto L4[511][510] */ copy_page(level2_kernel_pgt, l2); + /* + * Zap execute permission from the ident map. Due to the sharing of + * L1 entries we need to do this in the L2. + */ + if (__supported_pte_mask & _PAGE_NX) { + for (i = 0; i < PTRS_PER_PMD; ++i) { + if (pmd_none(level2_ident_pgt[i])) + continue; + level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX); + } + } + /* Copy the initial P->M table mappings if necessary. */ i = pgd_index(xen_start_info->mfn_list); if (i && i < pgd_index(__START_KERNEL_map)) @@ -2029,13 +2027,12 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) */ void __init xen_relocate_p2m(void) { - phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys; + phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys; unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; - int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d; + int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud; pte_t *pt; pmd_t *pmd; pud_t *pud; - p4d_t *p4d = NULL; pgd_t *pgd; unsigned long *new_p2m; int save_pud; @@ -2045,11 +2042,7 @@ void __init xen_relocate_p2m(void) n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; - if (PTRS_PER_P4D > 1) - n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT; - else - n_p4d = 0; - n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d; + n_frames = n_pte + n_pt + n_pmd + n_pud; new_area = xen_find_free_area(PFN_PHYS(n_frames)); if (!new_area) { @@ -2065,76 +2058,56 @@ void __init xen_relocate_p2m(void) * To avoid any possible virtual address collision, just use * 2 * PUD_SIZE for the new area. */ - p4d_phys = new_area; - pud_phys = p4d_phys + PFN_PHYS(n_p4d); + pud_phys = new_area; pmd_phys = pud_phys + PFN_PHYS(n_pud); pt_phys = pmd_phys + PFN_PHYS(n_pmd); p2m_pfn = PFN_DOWN(pt_phys) + n_pt; pgd = __va(read_cr3_pa()); new_p2m = (unsigned long *)(2 * PGDIR_SIZE); - idx_p4d = 0; save_pud = n_pud; - do { - if (n_p4d > 0) { - p4d = early_memremap(p4d_phys, PAGE_SIZE); - clear_page(p4d); - n_pud = min(save_pud, PTRS_PER_P4D); - } - for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { - pud = early_memremap(pud_phys, PAGE_SIZE); - clear_page(pud); - for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); - idx_pmd++) { - pmd = early_memremap(pmd_phys, PAGE_SIZE); - clear_page(pmd); - for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); - idx_pt++) { - pt = early_memremap(pt_phys, PAGE_SIZE); - clear_page(pt); - for (idx_pte = 0; - idx_pte < min(n_pte, PTRS_PER_PTE); - idx_pte++) { - set_pte(pt + idx_pte, - pfn_pte(p2m_pfn, PAGE_KERNEL)); - p2m_pfn++; - } - n_pte -= PTRS_PER_PTE; - early_memunmap(pt, PAGE_SIZE); - make_lowmem_page_readonly(__va(pt_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, - PFN_DOWN(pt_phys)); - set_pmd(pmd + idx_pt, - __pmd(_PAGE_TABLE | pt_phys)); - pt_phys += PAGE_SIZE; + for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { + pud = early_memremap(pud_phys, PAGE_SIZE); + clear_page(pud); + for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); + idx_pmd++) { + pmd = early_memremap(pmd_phys, PAGE_SIZE); + clear_page(pmd); + for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); + idx_pt++) { + pt = early_memremap(pt_phys, PAGE_SIZE); + clear_page(pt); + for (idx_pte = 0; + idx_pte < min(n_pte, PTRS_PER_PTE); + idx_pte++) { + set_pte(pt + idx_pte, + pfn_pte(p2m_pfn, PAGE_KERNEL)); + p2m_pfn++; } - n_pt -= PTRS_PER_PMD; - early_memunmap(pmd, PAGE_SIZE); - make_lowmem_page_readonly(__va(pmd_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, - PFN_DOWN(pmd_phys)); - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); - pmd_phys += PAGE_SIZE; + n_pte -= PTRS_PER_PTE; + early_memunmap(pt, PAGE_SIZE); + make_lowmem_page_readonly(__va(pt_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, + PFN_DOWN(pt_phys)); + set_pmd(pmd + idx_pt, + __pmd(_PAGE_TABLE | pt_phys)); + pt_phys += PAGE_SIZE; } - n_pmd -= PTRS_PER_PUD; - early_memunmap(pud, PAGE_SIZE); - make_lowmem_page_readonly(__va(pud_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); - if (n_p4d > 0) - set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys)); - else - set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); - pud_phys += PAGE_SIZE; + n_pt -= PTRS_PER_PMD; + early_memunmap(pmd, PAGE_SIZE); + make_lowmem_page_readonly(__va(pmd_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, + PFN_DOWN(pmd_phys)); + set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); + pmd_phys += PAGE_SIZE; } - if (n_p4d > 0) { - save_pud -= PTRS_PER_P4D; - early_memunmap(p4d, PAGE_SIZE); - make_lowmem_page_readonly(__va(p4d_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys)); - set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys)); - p4d_phys += PAGE_SIZE; - } - } while (++idx_p4d < n_p4d); + n_pmd -= PTRS_PER_PUD; + early_memunmap(pud, PAGE_SIZE); + make_lowmem_page_readonly(__va(pud_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); + set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); + pud_phys += PAGE_SIZE; + } /* Now copy the old p2m info to the new area. */ memcpy(new_p2m, xen_p2m_addr, size); @@ -2300,7 +2273,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) switch (idx) { case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: - case FIX_RO_IDT: #ifdef CONFIG_X86_32 case FIX_WP_TEST: # ifdef CONFIG_HIGHMEM @@ -2311,7 +2283,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: - case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END: /* All local page mappings */ pte = pfn_pte(phys, prot); break; @@ -2361,7 +2332,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 pv_mmu_ops.set_p4d = xen_set_p4d; #endif @@ -2371,7 +2342,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.alloc_pmd = xen_alloc_pmd; pv_mmu_ops.release_pte = xen_release_pte; pv_mmu_ops.release_pmd = xen_release_pmd; -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.release_pud = xen_release_pud; #endif @@ -2401,7 +2372,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .flush_tlb_user = xen_flush_tlb, .flush_tlb_kernel = xen_flush_tlb, - .flush_tlb_single = xen_flush_tlb_single, + .flush_tlb_one_user = xen_flush_tlb_one_user, .flush_tlb_others = xen_flush_tlb_others, .pgd_alloc = xen_pgd_alloc, @@ -2435,14 +2406,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 .pud_val = PV_CALLEE_SAVE(xen_pud_val), .make_pud = PV_CALLEE_SAVE(xen_make_pud), .set_p4d = xen_set_p4d_hyper, .alloc_pud = xen_alloc_pmd_init, .release_pud = xen_release_pmd_init, -#endif /* CONFIG_PGTABLE_LEVELS == 4 */ +#endif /* CONFIG_X86_64 */ .activate_mm = xen_activate_mm, .dup_mmap = xen_dup_mmap, diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 6083ba462f35..15812e553b95 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i, ret = 0; pte_t *pte; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + if (kmap_ops) { ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, kmap_ops, count); @@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, { int i, ret = 0; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + for (i = 0; i < count; i++) { unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index c114ca767b3b..6e0d2086eacb 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -808,7 +808,6 @@ char * __init xen_memory_setup(void) addr = xen_e820_table.entries[0].addr; size = xen_e820_table.entries[0].size; while (i < xen_e820_table.nr_entries) { - bool discard = false; chunk_size = size; type = xen_e820_table.entries[i].type; @@ -824,11 +823,10 @@ char * __init xen_memory_setup(void) xen_add_extra_mem(pfn_s, n_pfns); xen_max_p2m_pfn = pfn_s + n_pfns; } else - discard = true; + type = E820_TYPE_UNUSABLE; } - if (!discard) - xen_align_and_add_e820_region(addr, chunk_size, type); + xen_align_and_add_e820_region(addr, chunk_size, type); addr += chunk_size; size -= chunk_size; diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 05f91ce9b55e..c0c756c76afe 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -14,6 +14,7 @@ * single-threaded. */ #include +#include #include #include #include @@ -294,12 +295,19 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) #endif memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); + /* + * Bring up the CPU in cpu_bringup_and_idle() with the stack + * pointing just below where pt_regs would be if it were a normal + * kernel entry. + */ ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->flags = VGCF_IN_KERNEL; ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ ctxt->user_regs.ds = __USER_DS; ctxt->user_regs.es = __USER_DS; ctxt->user_regs.ss = __KERNEL_DS; + ctxt->user_regs.cs = __KERNEL_CS; + ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle); xen_copy_trap_info(ctxt->trap_ctxt); @@ -314,8 +322,13 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ctxt->gdt_frames[0] = gdt_mfn; ctxt->gdt_ents = GDT_ENTRIES; + /* + * Set SS:SP that Xen will use when entering guest kernel mode + * from guest user mode. Subsequent calls to load_sp0() can + * change this value. + */ ctxt->kernel_ss = __KERNEL_DS; - ctxt->kernel_sp = idle->thread.sp0; + ctxt->kernel_sp = task_top_of_stack(idle); #ifdef CONFIG_X86_32 ctxt->event_callback_cs = __KERNEL_CS; @@ -327,10 +340,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) (unsigned long)xen_hypervisor_callback; ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; - ctxt->user_regs.cs = __KERNEL_CS; per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); - ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) BUG(); diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 92bf5ecb6baf..3e3a58ea669e 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -1,12 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include #include +#include +#include #include #include #include @@ -15,6 +18,8 @@ #include "mmu.h" #include "pmu.h" +static DEFINE_PER_CPU(u64, spec_ctrl); + void xen_arch_pre_suspend(void) { if (xen_pv_domain()) @@ -31,6 +36,9 @@ void xen_arch_post_suspend(int cancelled) static void xen_vcpu_notify_restore(void *data) { + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) + wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); + /* Boot processor notified via generic timekeeping_resume() */ if (smp_processor_id() == 0) return; @@ -40,7 +48,15 @@ static void xen_vcpu_notify_restore(void *data) static void xen_vcpu_notify_suspend(void *data) { + u64 tmp; + tick_suspend_local(); + + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { + rdmsrl(MSR_IA32_SPEC_CTRL, tmp); + this_cpu_write(spec_ctrl, tmp); + wrmsrl(MSR_IA32_SPEC_CTRL, 0); + } } void xen_arch_resume(void) diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index c98a48c861fd..417b339e5c8e 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -15,6 +15,7 @@ #include +#include #include .macro xen_pv_trap name @@ -30,7 +31,7 @@ xen_pv_trap debug xen_pv_trap xendebug xen_pv_trap int3 xen_pv_trap xenint3 -xen_pv_trap nmi +xen_pv_trap xennmi xen_pv_trap overflow xen_pv_trap bounds xen_pv_trap invalid_op @@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat #endif xen_pv_trap hypervisor_callback + __INIT +ENTRY(xen_early_idt_handler_array) + i = 0 + .rept NUM_EXCEPTION_VECTORS + pop %rcx + pop %r11 + jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE + i = i + 1 + .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr +END(xen_early_idt_handler_array) + __FINIT + hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 /* * Xen64 iret frame: diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index b5b8d7f43557..96f26e026783 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -9,7 +9,10 @@ #include #include +#include #include +#include +#include #include #include @@ -20,6 +23,7 @@ #ifdef CONFIG_XEN_PV __INIT ENTRY(startup_xen) + UNWIND_HINT_EMPTY cld /* Clear .bss */ @@ -33,22 +37,39 @@ ENTRY(startup_xen) mov %_ASM_SI, xen_start_info mov $init_thread_union+THREAD_SIZE, %_ASM_SP - jmp xen_start_kernel +#ifdef CONFIG_X86_64 + /* Set up %gs. + * + * The base of %gs always points to the bottom of the irqstack + * union. If the stack protector canary is enabled, it is + * located at %gs:40. Note that, on SMP, the boot cpu uses + * init data section till per cpu areas are set up. + */ + movl $MSR_GS_BASE,%ecx + movq $INIT_PER_CPU_VAR(irq_stack_union),%rax + cdq + wrmsr +#endif + jmp xen_start_kernel +END(startup_xen) __FINIT #endif .pushsection .text .balign PAGE_SIZE ENTRY(hypercall_page) - .skip PAGE_SIZE + .rept (PAGE_SIZE / 32) + UNWIND_HINT_EMPTY + .skip 32 + .endr #define HYPERCALL(n) \ .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 #include #undef HYPERCALL - +END(hypercall_page) .popsection ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore index be7655998b26..38177c7ebcab 100644 --- a/arch/xtensa/boot/.gitignore +++ b/arch/xtensa/boot/.gitignore @@ -1,3 +1,2 @@ uImage zImage.redboot -*.dtb diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index eaaf1ebcc7a4..5bfbc1c401d4 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h @@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { int ret = 0; - u32 prev; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; @@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __asm__ __volatile__ ( " # futex_atomic_cmpxchg_inatomic\n" - "1: l32i %1, %3, 0\n" - " mov %0, %5\n" - " wsr %1, scompare1\n" - "2: s32c1i %0, %3, 0\n" - "3:\n" + " wsr %5, scompare1\n" + "1: s32c1i %1, %4, 0\n" + " s32i %1, %6, 0\n" + "2:\n" " .section .fixup,\"ax\"\n" " .align 4\n" - "4: .long 3b\n" - "5: l32r %1, 4b\n" - " movi %0, %6\n" + "3: .long 2b\n" + "4: l32r %1, 3b\n" + " movi %0, %7\n" " jx %1\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .long 1b,5b,2b,5b\n" + " .long 1b,4b\n" " .previous\n" - : "+r" (ret), "=&r" (prev), "+m" (*uaddr) - : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) + : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) + : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) : "memory"); - *uval = prev; return ret; } diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 720fe4e8b497..8dad076661fc 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -79,19 +79,75 @@ void __init zones_init(void) free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); } +#ifdef CONFIG_HIGHMEM +static void __init free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} + +static void __init free_highpages(void) +{ + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; + + reset_all_zones_managed_pages(); + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + if (memblock_is_nomap(mem)) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); + } +} +#else +static void __init free_highpages(void) +{ +} +#endif + /* * Initialize memory pages. */ void __init mem_init(void) { -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - reset_all_zones_managed_pages(); - for (tmp = max_low_pfn; tmp < max_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); -#endif + free_highpages(); max_mapnr = max_pfn - ARCH_PFN_OFFSET; high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); diff --git a/block/badblocks.c b/block/badblocks.c index 43c71166e1e2..91f7bcf979d3 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -178,7 +178,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, if (bb->shift < 0) /* badblocks are disabled */ - return 0; + return 1; if (bb->shift) { /* round the start down, and the end up */ diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index ceefb9a706d6..5d53e504acae 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -749,10 +749,11 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) unsigned long flags; int i; + spin_lock_irqsave(&bfqd->lock, flags); + if (!entity) /* root group */ - return; + goto put_async_queues; - spin_lock_irqsave(&bfqd->lock, flags); /* * Empty all service_trees belonging to this group before * deactivating the group itself. @@ -783,6 +784,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) } __bfq_deactivate_entity(entity, false); + +put_async_queues: bfq_put_async_queues(bfqd, bfqg); spin_unlock_irqrestore(&bfqd->lock, flags); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index a4783da90ba8..0f860cf0d56d 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -108,6 +108,7 @@ #include "blk-mq-tag.h" #include "blk-mq-sched.h" #include "bfq-iosched.h" +#include "blk-wbt.h" #define BFQ_BFQQ_FNS(name) \ void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ @@ -4775,7 +4776,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfq_init_root_group(bfqd->root_group, bfqd); bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); - + wbt_disable_default(q); return 0; out_free: diff --git a/block/bio.c b/block/bio.c index 101c2a9b5481..dbaa82c967f4 100644 --- a/block/bio.c +++ b/block/bio.c @@ -43,9 +43,9 @@ * break badly! cannot be bigger than what you can fit into an * unsigned short */ -#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } +#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n } static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { - BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), + BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max), }; #undef BV @@ -597,7 +597,10 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) * so we don't set nor calculate new physical/hw segment counts here */ bio->bi_disk = bio_src->bi_disk; + bio->bi_partno = bio_src->bi_partno; bio_set_flag(bio, BIO_CLONED); + if (bio_flagged(bio_src, BIO_THROTTLED)) + bio_set_flag(bio, BIO_THROTTLED); bio->bi_opf = bio_src->bi_opf; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; diff --git a/block/blk-core.c b/block/blk-core.c index 048be4aa6024..c01f4907dbbc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -333,11 +333,13 @@ EXPORT_SYMBOL(blk_stop_queue); void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); + cancel_work_sync(&q->timeout_work); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; int i; + cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { @@ -529,6 +531,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) } } +void blk_drain_queue(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + __blk_drain_queue(q, true); + spin_unlock_irq(q->queue_lock); +} + /** * blk_queue_bypass_start - enter queue bypass mode * @q: queue of interest @@ -604,8 +613,8 @@ void blk_set_queue_dying(struct request_queue *q) spin_lock_irq(q->queue_lock); blk_queue_for_each_rl(rl, q) { if (rl->rq_pool) { - wake_up(&rl->wait[BLK_RW_SYNC]); - wake_up(&rl->wait[BLK_RW_ASYNC]); + wake_up_all(&rl->wait[BLK_RW_SYNC]); + wake_up_all(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); @@ -653,11 +662,18 @@ void blk_cleanup_queue(struct request_queue *q) */ blk_freeze_queue(q); spin_lock_irq(lock); - if (!q->mq_ops) - __blk_drain_queue(q, true); queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); + /* + * make sure all in-progress dispatch are completed because + * blk_freeze_queue() can only complete all requests, and + * dispatch may still be in-progress since we dispatch requests + * from more than one contexts + */ + if (q->mq_ops) + blk_mq_quiesce_queue(q); + /* for synchronous bio-based driver finish in-flight integrity i/o */ blk_flush_integrity(); @@ -844,6 +860,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); + INIT_WORK(&q->timeout_work, NULL); INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); @@ -2260,7 +2277,7 @@ blk_qc_t submit_bio(struct bio *bio) unsigned int count; if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) - count = queue_logical_block_size(bio->bi_disk->queue); + count = queue_logical_block_size(bio->bi_disk->queue) >> 9; else count = bio_sectors(bio); diff --git a/block/blk-map.c b/block/blk-map.c index d5251edcc0dd..e31be14da8ea 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -12,22 +12,29 @@ #include "blk.h" /* - * Append a bio to a passthrough request. Only works can be merged into - * the request based on the driver constraints. + * Append a bio to a passthrough request. Only works if the bio can be merged + * into the request based on the driver constraints. */ -int blk_rq_append_bio(struct request *rq, struct bio *bio) +int blk_rq_append_bio(struct request *rq, struct bio **bio) { - blk_queue_bounce(rq->q, &bio); + struct bio *orig_bio = *bio; + + blk_queue_bounce(rq->q, bio); if (!rq->bio) { - blk_rq_bio_prep(rq->q, rq, bio); + blk_rq_bio_prep(rq->q, rq, *bio); } else { - if (!ll_back_merge_fn(rq->q, rq, bio)) + if (!ll_back_merge_fn(rq->q, rq, *bio)) { + if (orig_bio != *bio) { + bio_put(*bio); + *bio = orig_bio; + } return -EINVAL; + } - rq->biotail->bi_next = bio; - rq->biotail = bio; - rq->__data_len += bio->bi_iter.bi_size; + rq->biotail->bi_next = *bio; + rq->biotail = *bio; + rq->__data_len += (*bio)->bi_iter.bi_size; } return 0; @@ -80,14 +87,12 @@ static int __blk_rq_map_user_iov(struct request *rq, * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ - ret = blk_rq_append_bio(rq, bio); - bio_get(bio); + ret = blk_rq_append_bio(rq, &bio); if (ret) { - bio_endio(bio); __blk_rq_unmap_user(orig_bio); - bio_put(bio); return ret; } + bio_get(bio); return 0; } @@ -121,7 +126,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); struct bio *bio = NULL; struct iov_iter i; - int ret; + int ret = -EINVAL; if (!iter_is_iovec(iter)) goto fail; @@ -150,7 +155,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, __blk_rq_unmap_user(bio); fail: rq->bio = NULL; - return -EINVAL; + return ret; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -220,7 +225,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, int reading = rq_data_dir(rq) == READ; unsigned long addr = (unsigned long) kbuf; int do_copy = 0; - struct bio *bio; + struct bio *bio, *orig_bio; int ret; if (len > (queue_max_hw_sectors(q) << 9)) @@ -243,10 +248,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (do_copy) rq->rq_flags |= RQF_COPY_USER; - ret = blk_rq_append_bio(rq, bio); + orig_bio = bio; + ret = blk_rq_append_bio(rq, &bio); if (unlikely(ret)) { /* request is too big */ - bio_put(bio); + bio_put(orig_bio); return ret; } diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 9f8cffc8a701..3eb169f15842 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -16,11 +16,6 @@ static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) { - /* - * Non present CPU will be mapped to queue index 0. - */ - if (!cpu_present(cpu)) - return 0; return cpu % nr_queues; } diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 4ab69435708c..eca011fdfa0e 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -94,7 +94,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) struct request_queue *q = hctx->queue; struct elevator_queue *e = q->elevator; const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; - bool did_work = false; + bool do_sched_dispatch = true; LIST_HEAD(rq_list); /* RCU or SRCU read lock is needed before checking quiesced flag */ @@ -125,18 +125,18 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); - did_work = blk_mq_dispatch_rq_list(q, &rq_list); + do_sched_dispatch = blk_mq_dispatch_rq_list(q, &rq_list); } else if (!has_sched_dispatch) { blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_dispatch_rq_list(q, &rq_list); } /* - * We want to dispatch from the scheduler if we had no work left - * on the dispatch list, OR if we did have work but weren't able - * to make progress. + * We want to dispatch from the scheduler if there was nothing + * on the dispatch list or we were able to dispatch from the + * dispatch list. */ - if (!did_work && has_sched_dispatch) { + if (do_sched_dispatch && has_sched_dispatch) { do { struct request *rq; diff --git a/block/blk-mq.c b/block/blk-mq.c index 98a18609755e..6f899669cbdd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -159,6 +159,8 @@ void blk_freeze_queue(struct request_queue *q) * exported to drivers as the only user for unfreeze is blk_mq. */ blk_freeze_queue_start(q); + if (!q->mq_ops) + blk_drain_queue(q); blk_mq_freeze_queue_wait(q); } @@ -636,7 +638,6 @@ static void __blk_mq_requeue_request(struct request *rq) trace_block_rq_requeue(q, rq); wbt_requeue(q->rq_wb, &rq->issue_stat); - blk_mq_sched_requeue_request(rq); if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { if (q->dma_drain_size && blk_rq_bytes(rq)) @@ -648,6 +649,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) { __blk_mq_requeue_request(rq); + /* this request will be re-inserted to io scheduler queue */ + blk_mq_sched_requeue_request(rq); + BUG_ON(blk_queued_rq(rq)); blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); } @@ -1924,7 +1928,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, { blk_mq_debugfs_unregister_hctx(hctx); - blk_mq_tag_idle(hctx); + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_idle(hctx); if (set->ops->exit_request) set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); @@ -2310,6 +2315,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; blk_mq_sysfs_unregister(q); + + /* protect against switching io scheduler */ + mutex_lock(&q->sysfs_lock); for (i = 0; i < set->nr_hw_queues; i++) { int node; @@ -2354,6 +2362,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, } } q->nr_hw_queues = i; + mutex_unlock(&q->sysfs_lock); blk_mq_sysfs_register(q); } @@ -2524,9 +2533,27 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) { - if (set->ops->map_queues) + if (set->ops->map_queues) { + int cpu; + /* + * transport .map_queues is usually done in the following + * way: + * + * for (queue = 0; queue < set->nr_hw_queues; queue++) { + * mask = get_cpu_mask(queue) + * for_each_cpu(cpu, mask) + * set->mq_map[cpu] = queue; + * } + * + * When we need to remap, the table has to be cleared for + * killing stale mapping since one CPU may not be mapped + * to any hw queue. + */ + for_each_possible_cpu(cpu) + set->mq_map[cpu] = 0; + return set->ops->map_queues(set); - else + } else return blk_mq_map_queues(set); } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8631763866c6..a8cd7b3d9647 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2223,13 +2223,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, out_unlock: spin_unlock_irq(q->queue_lock); out: - /* - * As multiple blk-throtls may stack in the same issue path, we - * don't want bios to leave with the flag set. Clear the flag if - * being issued. - */ - if (!throttled) - bio_clear_flag(bio, BIO_THROTTLED); + bio_set_flag(bio, BIO_THROTTLED); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW if (throttled || !td->track_bio_latency) diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 17ec83bb0900..6427be7ac363 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work) struct request *rq, *tmp; int next_set = 0; - if (blk_queue_enter(q, true)) - return; spin_lock_irqsave(q->queue_lock, flags); list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) @@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work) mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); - blk_queue_exit(q); } /** diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 6a9a0f03a67b..5c105514bca7 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -654,7 +654,7 @@ void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on) } /* - * Disable wbt, if enabled by default. Only called from CFQ. + * Disable wbt, if enabled by default. */ void wbt_disable_default(struct request_queue *q) { @@ -698,7 +698,15 @@ u64 wbt_default_latency_nsec(struct request_queue *q) static int wbt_data_dir(const struct request *rq) { - return rq_data_dir(rq); + const int op = req_op(rq); + + if (op == REQ_OP_READ) + return READ; + else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH) + return WRITE; + + /* don't account */ + return -1; } int wbt_init(struct request_queue *q) diff --git a/block/blk.h b/block/blk.h index 85be8b232b37..b2c287c2c6a3 100644 --- a/block/blk.h +++ b/block/blk.h @@ -362,4 +362,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) } #endif /* CONFIG_BOUNCE */ +extern void blk_drain_queue(struct request_queue *q); + #endif /* BLK_INTERNAL_H */ diff --git a/block/bounce.c b/block/bounce.c index fceb1a96480b..1d05c422c932 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, unsigned i = 0; bool bounce = false; int sectors = 0; + bool passthrough = bio_is_passthrough(*bio_orig); bio_for_each_segment(from, *bio_orig, iter) { if (i++ < BIO_MAX_PAGES) @@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, if (!bounce) return; - if (sectors < bio_sectors(*bio_orig)) { + if (!passthrough && sectors < bio_sectors(*bio_orig)) { bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); bio_chain(bio, *bio_orig); generic_make_request(*bio_orig); *bio_orig = bio; } - bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set); + bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL : + bounce_bio_set); bio_for_each_segment_all(to, bio, i) { struct page *page = to->bv_page; diff --git a/block/genhd.c b/block/genhd.c index dd305c65ffb0..0a6776dac0ae 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1220,6 +1220,7 @@ static void disk_release(struct device *dev) struct class block_class = { .name = "block", }; +EXPORT_SYMBOL_GPL(block_class); static char *block_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid) diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index f58cab82105b..09cd5cf2e459 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -814,6 +814,7 @@ static struct elevator_type kyber_sched = { .limit_depth = kyber_limit_depth, .prepare_request = kyber_prepare_request, .finish_request = kyber_finish_request, + .requeue_request = kyber_finish_request, .completed_request = kyber_completed_request, .dispatch_request = kyber_dispatch_request, .has_work = kyber_has_work, diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index 0af3a3db6fb0..82c44f7df911 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c @@ -301,7 +301,9 @@ static void parse_bsd(struct parsed_partitions *state, continue; bsd_start = le32_to_cpu(p->p_offset); bsd_size = le32_to_cpu(p->p_size); - if (memcmp(flavour, "bsd\0", 4) == 0) + /* FreeBSD has relative offset if C partition offset is zero */ + if (memcmp(flavour, "bsd\0", 4) == 0 && + le32_to_cpu(l->d_partitions[2].p_offset) == 0) bsd_start += offset; if (offset == bsd_start && size == bsd_size) /* full parent partition, we have it already */ diff --git a/build.config.goldfish.arm b/build.config.goldfish.arm new file mode 100644 index 000000000000..866da9361b71 --- /dev/null +++ b/build.config.goldfish.arm @@ -0,0 +1,12 @@ +ARCH=arm +BRANCH=android-4.4 +CROSS_COMPILE=arm-linux-androidkernel- +DEFCONFIG=ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin +FILES=" +arch/arm/boot/zImage +vmlinux +System.map +" diff --git a/build.config.goldfish.arm64 b/build.config.goldfish.arm64 new file mode 100644 index 000000000000..9c963cf4a3d8 --- /dev/null +++ b/build.config.goldfish.arm64 @@ -0,0 +1,12 @@ +ARCH=arm64 +BRANCH=android-4.4 +CROSS_COMPILE=aarch64-linux-android- +DEFCONFIG=ranchu64_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin +FILES=" +arch/arm64/boot/Image +vmlinux +System.map +" diff --git a/build.config.goldfish.mips b/build.config.goldfish.mips new file mode 100644 index 000000000000..8af53d2c2940 --- /dev/null +++ b/build.config.goldfish.mips @@ -0,0 +1,11 @@ +ARCH=mips +BRANCH=android-4.4 +CROSS_COMPILE=mips64el-linux-android- +DEFCONFIG=ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9/bin +FILES=" +vmlinux +System.map +" diff --git a/build.config.goldfish.mips64 b/build.config.goldfish.mips64 new file mode 100644 index 000000000000..2a33d36dc4c8 --- /dev/null +++ b/build.config.goldfish.mips64 @@ -0,0 +1,11 @@ +ARCH=mips +BRANCH=android-4.4 +CROSS_COMPILE=mips64el-linux-android- +DEFCONFIG=ranchu64_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9/bin +FILES=" +vmlinux +System.map +" diff --git a/build.config.goldfish.x86 b/build.config.goldfish.x86 new file mode 100644 index 000000000000..f86253f58d4d --- /dev/null +++ b/build.config.goldfish.x86 @@ -0,0 +1,12 @@ +ARCH=x86 +BRANCH=android-4.4 +CROSS_COMPILE=x86_64-linux-android- +DEFCONFIG=i386_ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" diff --git a/build.config.goldfish.x86_64 b/build.config.goldfish.x86_64 new file mode 100644 index 000000000000..e1738861ec5c --- /dev/null +++ b/build.config.goldfish.x86_64 @@ -0,0 +1,12 @@ +ARCH=x86_64 +BRANCH=android-4.4 +CROSS_COMPILE=x86_64-linux-android- +DEFCONFIG=x86_64_ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" diff --git a/crypto/Kconfig b/crypto/Kconfig index ac5fb37e6f4b..d8b96515abe8 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -130,7 +130,7 @@ config CRYPTO_DH config CRYPTO_ECDH tristate "ECDH algorithm" - select CRYTPO_KPP + select CRYPTO_KPP select CRYPTO_RNG_DEFAULT help Generic implementation of the ECDH algorithm @@ -1494,6 +1494,20 @@ config CRYPTO_SERPENT_AVX2_X86_64 See also: +config CRYPTO_SPECK + tristate "Speck cipher algorithm" + select CRYPTO_ALGAPI + help + Speck is a lightweight block cipher that is tuned for optimal + performance in software (rather than hardware). + + Speck may not be as secure as AES, and should only be used on systems + where AES is not fast enough. + + See also: + + If unsure, say N. + config CRYPTO_TEA tristate "TEA, XTEA and XETA cipher algorithms" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index da190be60ce2..59220c2264e1 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -98,6 +98,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o +CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o @@ -108,6 +109,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o +obj-$(CONFIG_CRYPTO_SPECK) += speck.o obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 337cf382718e..4e4640bb82b9 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent); static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { - const u32 forbidden = CRYPTO_ALG_INTERNAL; + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct sockaddr_alg *sa = (void *)uaddr; @@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) void *private; int err; + /* If caller uses non-allowed flag, return error. */ + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) + return -EINVAL; + if (sock->state == SS_CONNECTED) return -EINVAL; @@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (IS_ERR(type)) return PTR_ERR(type); - private = type->bind(sa->salg_name, - sa->salg_feat & ~forbidden, - sa->salg_mask & ~forbidden); + private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); if (IS_ERR(private)) { module_put(type->owner); return PTR_ERR(private); @@ -691,7 +693,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) unsigned int i; list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { - ctx->rcvused -= rsgl->sg_num_bytes; + atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused); af_alg_free_sg(&rsgl->sgl); list_del(&rsgl->list); if (rsgl != &areq->first_rsgl) @@ -699,14 +701,15 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) } tsgl = areq->tsgl; - for_each_sg(tsgl, sg, areq->tsgl_entries, i) { - if (!sg_page(sg)) - continue; - put_page(sg_page(sg)); - } + if (tsgl) { + for_each_sg(tsgl, sg, areq->tsgl_entries, i) { + if (!sg_page(sg)) + continue; + put_page(sg_page(sg)); + } - if (areq->tsgl && areq->tsgl_entries) sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); + } } EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); @@ -1047,6 +1050,18 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page, } EXPORT_SYMBOL_GPL(af_alg_sendpage); +/** + * af_alg_free_resources - release resources required for crypto request + */ +void af_alg_free_resources(struct af_alg_async_req *areq) +{ + struct sock *sk = areq->sk; + + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); +} +EXPORT_SYMBOL_GPL(af_alg_free_resources); + /** * af_alg_async_cb - AIO callback handler * @@ -1063,18 +1078,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) struct kiocb *iocb = areq->iocb; unsigned int resultlen; - lock_sock(sk); - /* Buffer size written by crypto operation. */ resultlen = areq->outlen; - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); - __sock_put(sk); + af_alg_free_resources(areq); + sock_put(sk); iocb->ki_complete(iocb, err ? err : resultlen, 0); - - release_sock(sk); } EXPORT_SYMBOL_GPL(af_alg_async_cb); @@ -1157,12 +1167,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, if (!af_alg_readable(sk)) break; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); - if (err) - return err; - } - seglen = min_t(size_t, (maxsize - len), msg_data_left(msg)); @@ -1188,7 +1192,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, areq->last_rsgl = rsgl; len += err; - ctx->rcvused += err; + atomic_add(err, &ctx->rcvused); rsgl->sg_num_bytes = err; iov_iter_advance(&msg->msg_iter, err); } diff --git a/crypto/ahash.c b/crypto/ahash.c index 5e8666e6ccae..3980e9e45289 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -92,13 +92,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) if (nbytes && walk->offset & alignmask && !err) { walk->offset = ALIGN(walk->offset, alignmask + 1); - walk->data += walk->offset; - nbytes = min(nbytes, ((unsigned int)(PAGE_SIZE)) - walk->offset); walk->entrylen -= nbytes; - return nbytes; + if (nbytes) { + walk->data += walk->offset; + return nbytes; + } } if (walk->flags & CRYPTO_ALG_ASYNC) @@ -193,11 +194,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned long alignmask = crypto_ahash_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return ahash_setkey_unaligned(tfm, key, keylen); + err = ahash_setkey_unaligned(tfm, key, keylen); + else + err = tfm->setkey(tfm, key, keylen); + + if (err) + return err; - return tfm->setkey(tfm, key, keylen); + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); @@ -370,7 +378,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { - return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_ahash_op(req, tfm->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -456,7 +469,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct ahash_alg *alg = crypto_ahash_alg(hash); hash->setkey = ahash_nosetkey; - hash->has_setkey = false; hash->export = ahash_no_export; hash->import = ahash_no_import; @@ -471,7 +483,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) if (alg->setkey) { hash->setkey = alg->setkey; - hash->has_setkey = true; + if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); } if (alg->export) hash->export = alg->export; @@ -655,5 +668,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(ahash_attr_alg); +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +{ + struct crypto_alg *alg = &halg->base; + + if (alg->cra_type != &crypto_ahash_type) + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); + + return __crypto_ahash_alg(alg)->setkey != NULL; +} +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index aa699ff6c876..50eb828db767 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, spawn->alg = NULL; spawns = &inst->alg.cra_users; + + /* + * We may encounter an unregistered instance here, since + * an instance's spawns are set up prior to the instance + * being registered. An unregistered instance will have + * NULL ->cra_users.next, since ->cra_users isn't + * properly initialized until registration. But an + * unregistered instance cannot have any users, so treat + * it the same as ->cra_users being empty. + */ + if (spawns->next == NULL) + break; } } while ((spawns = crypto_more_spawns(alg, &stack, &top, &secondary_spawns))); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 516b38c3a169..f138af18b500 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -101,16 +101,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, struct aead_tfm *aeadc = pask->private; struct crypto_aead *tfm = aeadc->aead; struct crypto_skcipher *null_tfm = aeadc->null_tfm; - unsigned int as = crypto_aead_authsize(tfm); + unsigned int i, as = crypto_aead_authsize(tfm); struct af_alg_async_req *areq; - struct af_alg_tsgl *tsgl; - struct scatterlist *src; + struct af_alg_tsgl *tsgl, *tmp; + struct scatterlist *rsgl_src, *tsgl_src = NULL; int err = 0; size_t used = 0; /* [in] TX bufs to be en/decrypted */ size_t outlen = 0; /* [out] RX bufs produced by kernel */ size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + /* * Data length provided by caller via sendmsg/sendpage that has not * yet been processed. @@ -178,7 +184,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, } processed = used + ctx->aead_assoclen; - tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); + list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) { + for (i = 0; i < tsgl->cur; i++) { + struct scatterlist *process_sg = tsgl->sg + i; + + if (!(process_sg->length) || !sg_page(process_sg)) + continue; + tsgl_src = process_sg; + break; + } + if (tsgl_src) + break; + } + if (processed && !tsgl_src) { + err = -EFAULT; + goto free; + } /* * Copy of AAD from source to destination @@ -194,7 +215,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Use the RX SGL as source (and destination) for crypto op. */ - src = areq->first_rsgl.sgl.sg; + rsgl_src = areq->first_rsgl.sgl.sg; if (ctx->enc) { /* @@ -207,7 +228,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * v v * RX SGL: AAD || PT || Tag */ - err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + err = crypto_aead_copy_sgl(null_tfm, tsgl_src, areq->first_rsgl.sgl.sg, processed); if (err) goto free; @@ -225,7 +246,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Copy AAD || CT to RX SGL buffer for in-place operation. */ - err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + err = crypto_aead_copy_sgl(null_tfm, tsgl_src, areq->first_rsgl.sgl.sg, outlen); if (err) goto free; @@ -257,23 +278,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, areq->tsgl); } else /* no RX SGL present (e.g. authentication only) */ - src = areq->tsgl; + rsgl_src = areq->tsgl; } /* Initialize the crypto operation */ - aead_request_set_crypt(&areq->cra_u.aead_req, src, + aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, areq->first_rsgl.sgl.sg, used, ctx->iv); aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); aead_request_set_tfm(&areq->cra_u.aead_req, tfm); if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ + sock_hold(sk); areq->iocb = msg->msg_iocb; + + /* Remember output size that will be generated. */ + areq->outlen = outlen; + aead_request_set_callback(&areq->cra_u.aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_async_cb, areq); err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_decrypt(&areq->cra_u.aead_req); + + /* AIO operation in progress */ + if (err == -EINPROGRESS || err == -EBUSY) + return -EIOCBQUEUED; + + sock_put(sk); } else { /* Synchronous operation */ aead_request_set_callback(&areq->cra_u.aead_req, @@ -285,19 +317,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, &ctx->completion); } - /* AIO operation in progress */ - if (err == -EINPROGRESS) { - sock_hold(sk); - - /* Remember output size that will be generated. */ - areq->outlen = outlen; - - return -EIOCBQUEUED; - } free: - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); + af_alg_free_resources(areq); return err ? err : outlen; } @@ -487,6 +509,7 @@ static void aead_release(void *private) struct aead_tfm *tfm = private; crypto_free_aead(tfm->aead); + crypto_put_default_null_skcipher2(); kfree(tfm); } @@ -519,7 +542,6 @@ static void aead_sock_destruct(struct sock *sk) unsigned int ivlen = crypto_aead_ivsize(tfm); af_alg_pull_tsgl(sk, ctx->used, NULL, 0); - crypto_put_default_null_skcipher2(); sock_kzfree_s(sk, ctx->iv, ivlen); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); @@ -549,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; - ctx->rcvused = 0; + atomic_set(&ctx->rcvused, 0); ctx->more = 0; ctx->merge = 0; ctx->enc = 0; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5e92bd275ef3..39cebd3256bf 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -34,11 +34,6 @@ struct hash_ctx { struct ahash_request req; }; -struct algif_hash_tfm { - struct crypto_ahash *hash; - bool has_key; -}; - static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; @@ -309,7 +304,7 @@ static int hash_check_key(struct socket *sock) int err = 0; struct sock *psk; struct alg_sock *pask; - struct algif_hash_tfm *tfm; + struct crypto_ahash *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -323,7 +318,7 @@ static int hash_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (!tfm->has_key) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; if (!pask->refcnt++) @@ -414,41 +409,17 @@ static struct proto_ops algif_hash_ops_nokey = { static void *hash_bind(const char *name, u32 type, u32 mask) { - struct algif_hash_tfm *tfm; - struct crypto_ahash *hash; - - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); - if (!tfm) - return ERR_PTR(-ENOMEM); - - hash = crypto_alloc_ahash(name, type, mask); - if (IS_ERR(hash)) { - kfree(tfm); - return ERR_CAST(hash); - } - - tfm->hash = hash; - - return tfm; + return crypto_alloc_ahash(name, type, mask); } static void hash_release(void *private) { - struct algif_hash_tfm *tfm = private; - - crypto_free_ahash(tfm->hash); - kfree(tfm); + crypto_free_ahash(private); } static int hash_setkey(void *private, const u8 *key, unsigned int keylen) { - struct algif_hash_tfm *tfm = private; - int err; - - err = crypto_ahash_setkey(tfm->hash, key, keylen); - tfm->has_key = !err; - - return err; + return crypto_ahash_setkey(private, key, keylen); } static void hash_sock_destruct(struct sock *sk) @@ -463,11 +434,10 @@ static void hash_sock_destruct(struct sock *sk) static int hash_accept_parent_nokey(void *private, struct sock *sk) { - struct hash_ctx *ctx; + struct crypto_ahash *tfm = private; struct alg_sock *ask = alg_sk(sk); - struct algif_hash_tfm *tfm = private; - struct crypto_ahash *hash = tfm->hash; - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); + struct hash_ctx *ctx; + unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -480,7 +450,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ask->private = ctx; - ahash_request_set_tfm(&ctx->req, hash); + ahash_request_set_tfm(&ctx->req, tfm); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); @@ -491,9 +461,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) static int hash_accept_parent(void *private, struct sock *sk) { - struct algif_hash_tfm *tfm = private; + struct crypto_ahash *tfm = private; - if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return hash_accept_parent_nokey(private, sk); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 8ae4170aaeb4..90bc4e0f0785 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + /* Allocate cipher request for current operation. */ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + crypto_skcipher_reqsize(tfm)); @@ -117,13 +123,24 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ + sock_hold(sk); areq->iocb = msg->msg_iocb; + + /* Remember output size that will be generated. */ + areq->outlen = len; + skcipher_request_set_callback(&areq->cra_u.skcipher_req, CRYPTO_TFM_REQ_MAY_SLEEP, af_alg_async_cb, areq); err = ctx->enc ? crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); + + /* AIO operation in progress */ + if (err == -EINPROGRESS || err == -EBUSY) + return -EIOCBQUEUED; + + sock_put(sk); } else { /* Synchronous operation */ skcipher_request_set_callback(&areq->cra_u.skcipher_req, @@ -137,19 +154,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, &ctx->completion); } - /* AIO operation in progress */ - if (err == -EINPROGRESS) { - sock_hold(sk); - - /* Remember output size that will be generated. */ - areq->outlen = len; - - return -EIOCBQUEUED; - } free: - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); + af_alg_free_resources(areq); return err ? err : len; } @@ -384,7 +391,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; - ctx->rcvused = 0; + atomic_set(&ctx->rcvused, 0); ctx->more = 0; ctx->merge = 0; ctx->enc = 0; diff --git a/crypto/api.c b/crypto/api.c index 941cd4c6c7ec..2a2479d168aa 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "internal.h" LIST_HEAD(crypto_alg_list); @@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); +void crypto_req_done(struct crypto_async_request *req, int err) +{ + struct crypto_wait *wait = req->data; + + if (err == -EINPROGRESS) + return; + + wait->err = err; + complete(&wait->completion); +} +EXPORT_SYMBOL_GPL(crypto_req_done); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c index 2d93d9eccb4d..a18295651077 100644 --- a/crypto/asymmetric_keys/pkcs7_verify.c +++ b/crypto/asymmetric_keys/pkcs7_verify.c @@ -150,7 +150,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7, pr_devel("Sig %u: Found cert serial match X.509[%u]\n", sinfo->index, certix); - if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) { + if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) { pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", sinfo->index); continue; @@ -273,7 +273,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, sinfo->index); return 0; } - ret = public_key_verify_signature(p->pub, p->sig); + ret = public_key_verify_signature(p->pub, x509->sig); if (ret < 0) return ret; x509->signer = p; @@ -369,8 +369,7 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7, * * (*) -EBADMSG if some part of the message was invalid, or: * - * (*) 0 if no signature chains were found to be blacklisted or to contain - * unsupported crypto, or: + * (*) 0 if a signature chain passed verification, or: * * (*) -EKEYREJECTED if a blacklisted key was encountered, or: * @@ -426,8 +425,11 @@ int pkcs7_verify(struct pkcs7_message *pkcs7, for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { ret = pkcs7_verify_one(pkcs7, sinfo); - if (sinfo->blacklisted && actual_ret == -ENOPKG) - actual_ret = -EKEYREJECTED; + if (sinfo->blacklisted) { + if (actual_ret == -ENOPKG) + actual_ret = -EKEYREJECTED; + continue; + } if (ret < 0) { if (ret == -ENOPKG) { sinfo->unsupported_crypto = true; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 3cd6e12cfc46..d1af69d2ff85 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -93,9 +93,11 @@ int public_key_verify_signature(const struct public_key *pkey, BUG_ON(!pkey); BUG_ON(!sig); - BUG_ON(!sig->digest); BUG_ON(!sig->s); + if (!sig->digest) + return -ENOPKG; + alg_name = sig->pkey_algo; if (strcmp(sig->pkey_algo, "rsa") == 0) { /* The data wangled by the RSA algorithm is typically padded diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c index 86fb68508952..7c93c7728454 100644 --- a/crypto/asymmetric_keys/restrict.c +++ b/crypto/asymmetric_keys/restrict.c @@ -67,8 +67,9 @@ __setup("ca_keys=", ca_keys_setup); * * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a * matching parent certificate in the trusted list, -EKEYREJECTED if the - * signature check fails or the key is blacklisted and some other error if - * there is a matching certificate but the signature check cannot be performed. + * signature check fails or the key is blacklisted, -ENOPKG if the signature + * uses unsupported crypto, or some other error if there is a matching + * certificate but the signature check cannot be performed. */ int restrict_link_by_signature(struct key *dest_keyring, const struct key_type *type, @@ -88,6 +89,8 @@ int restrict_link_by_signature(struct key *dest_keyring, return -EOPNOTSUPP; sig = payload->data[asym_auth]; + if (!sig) + return -ENOPKG; if (!sig->auth_ids[0] && !sig->auth_ids[1]) return -ENOKEY; @@ -139,6 +142,8 @@ static int key_or_keyring_common(struct key *dest_keyring, return -EOPNOTSUPP; sig = payload->data[asym_auth]; + if (!sig) + return -ENOPKG; if (!sig->auth_ids[0] && !sig->auth_ids[1]) return -ENOKEY; @@ -222,9 +227,9 @@ static int key_or_keyring_common(struct key *dest_keyring, * * Returns 0 if the new certificate was accepted, -ENOKEY if we * couldn't find a matching parent certificate in the trusted list, - * -EKEYREJECTED if the signature check fails, and some other error if - * there is a matching certificate but the signature check cannot be - * performed. + * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses + * unsupported crypto, or some other error if there is a matching certificate + * but the signature check cannot be performed. */ int restrict_link_by_key_or_keyring(struct key *dest_keyring, const struct key_type *type, @@ -249,9 +254,9 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring, * * Returns 0 if the new certificate was accepted, -ENOKEY if we * couldn't find a matching parent certificate in the trusted list, - * -EKEYREJECTED if the signature check fails, and some other error if - * there is a matching certificate but the signature check cannot be - * performed. + * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses + * unsupported crypto, or some other error if there is a matching certificate + * but the signature check cannot be performed. */ int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring, const struct key_type *type, diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index dd03fead1ca3..ce2df8c9c583 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -409,6 +409,8 @@ int x509_extract_key_data(void *context, size_t hdrlen, ctx->cert->pub->pkey_algo = "rsa"; /* Discard the BIT STRING metadata */ + if (vlen < 1 || *(const u8 *)value != 0) + return -EBADMSG; ctx->key = value + 1; ctx->key_size = vlen - 1; return 0; diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index eea71dc9686c..1bd0cf71a22d 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -135,7 +135,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert) } ret = -EKEYREJECTED; - if (cert->pub->pkey_algo != cert->sig->pkey_algo) + if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0) goto out; ret = public_key_verify_signature(cert->pub, cert->sig); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index db1bc3147bc4..600afa99941f 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, algt->mask)); if (IS_ERR(poly)) return PTR_ERR(poly); + poly_hash = __crypto_hash_alg_common(poly); + + err = -EINVAL; + if (poly_hash->digestsize != POLY1305_DIGEST_SIZE) + goto out_put_poly; err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); @@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, ctx = aead_instance_ctx(inst); ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; - poly_hash = __crypto_hash_alg_common(poly); err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, aead_crypto_instance(inst)); if (err) diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index aa2a25fc7482..718cbce8d169 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -133,6 +133,7 @@ static struct shash_alg alg = { .cra_name = "crc32", .cra_driver_name = "crc32-generic", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 4c0a0e271876..372320399622 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -146,6 +146,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-generic", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct chksum_ctx), diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0508c48a45c4..248f6ba41688 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -895,10 +895,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto out_free_inst; - type = CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.halg.base.cra_flags = type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); inst->alg.halg.digestsize = salg->digestsize; inst->alg.halg.statesize = salg->statesize; @@ -913,7 +912,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = cryptd_hash_finup_enqueue; inst->alg.export = cryptd_hash_export; inst->alg.import = cryptd_hash_import; - inst->alg.setkey = cryptd_hash_setkey; + if (crypto_shash_alg_has_setkey(salg)) + inst->alg.setkey = cryptd_hash_setkey; inst->alg.digest = cryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); diff --git a/crypto/dh.c b/crypto/dh.c index b1032a5c1bfa..aadaf36fb56f 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -21,19 +21,12 @@ struct dh_ctx { MPI xa; }; -static inline void dh_clear_params(struct dh_ctx *ctx) +static void dh_clear_ctx(struct dh_ctx *ctx) { mpi_free(ctx->p); mpi_free(ctx->g); - ctx->p = NULL; - ctx->g = NULL; -} - -static void dh_free_ctx(struct dh_ctx *ctx) -{ - dh_clear_params(ctx); mpi_free(ctx->xa); - ctx->xa = NULL; + memset(ctx, 0, sizeof(*ctx)); } /* @@ -71,10 +64,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params) return -EINVAL; ctx->g = mpi_read_raw_data(params->g, params->g_size); - if (!ctx->g) { - mpi_free(ctx->p); + if (!ctx->g) return -EINVAL; - } return 0; } @@ -86,21 +77,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf, struct dh params; /* Free the old MPI key if any */ - dh_free_ctx(ctx); + dh_clear_ctx(ctx); if (crypto_dh_decode_key(buf, len, ¶ms) < 0) - return -EINVAL; + goto err_clear_ctx; if (dh_set_params(ctx, ¶ms) < 0) - return -EINVAL; + goto err_clear_ctx; ctx->xa = mpi_read_raw_data(params.key, params.key_size); - if (!ctx->xa) { - dh_clear_params(ctx); - return -EINVAL; - } + if (!ctx->xa) + goto err_clear_ctx; return 0; + +err_clear_ctx: + dh_clear_ctx(ctx); + return -EINVAL; } static int dh_compute_value(struct kpp_request *req) @@ -158,7 +151,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm) { struct dh_ctx *ctx = dh_get_ctx(tfm); - dh_free_ctx(ctx); + dh_clear_ctx(ctx); } static struct kpp_alg dh = { diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 8ba8a3f82620..7f00c771fe8d 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c @@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) if (secret.len != crypto_dh_key_len(params)) return -EINVAL; + /* + * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since + * some drivers assume otherwise. + */ + if (params->key_size > params->p_size || + params->g_size > params->p_size) + return -EINVAL; + /* Don't allocate memory. Set pointers to data within * the given buffer */ @@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) params->p = (void *)(ptr + params->key_size); params->g = (void *)(ptr + params->key_size + params->p_size); + /* + * Don't permit 'p' to be 0. It's not a prime number, and it's subject + * to corner cases such as 'mod 0' being undefined or + * crypto_kpp_maxsize() returning 0. + */ + if (memchr_inv(params->p, 0, params->p_size) == NULL) + return -EINVAL; + return 0; } EXPORT_SYMBOL_GPL(crypto_dh_decode_key); diff --git a/crypto/ecc.c b/crypto/ecc.c index 633a9bcdc574..18f32f2a5e1c 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -964,7 +964,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) * DRBG with a security strength of 256. */ if (crypto_get_default_rng()) - err = -EFAULT; + return -EFAULT; err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); crypto_put_default_rng(); diff --git a/crypto/hmac.c b/crypto/hmac.c index 92871dc2a63e..e74730224f0a 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -195,11 +195,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) salg = shash_attr_alg(tb[1], 0, 0); if (IS_ERR(salg)) return PTR_ERR(salg); + alg = &salg->base; + /* The underlying hash algorithm must be unkeyed */ err = -EINVAL; + if (crypto_shash_alg_has_setkey(salg)) + goto out_put_alg; + ds = salg->digestsize; ss = salg->statesize; - alg = &salg->base; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) goto out_put_alg; diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4451bf..fdba6dd6db63 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -313,7 +313,7 @@ static void exit_crypt(struct skcipher_request *req) rctx->left = 0; if (rctx->ext) - kfree(rctx->ext); + kzfree(rctx->ext); } static int do_encrypt(struct skcipher_request *req, int err) @@ -610,8 +610,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ecb_name[len - 1] = 0; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; + "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { + err = -ENAMETOOLONG; + goto err_drop_spawn; + } } inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 4e6472658852..e0732d979e3b 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue, pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); + spin_lock_init(&cpu_queue->q_lock); } return 0; } @@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue, int cpu, err; struct mcryptd_cpu_queue *cpu_queue; - cpu = get_cpu(); - cpu_queue = this_cpu_ptr(queue->cpu_queue); - rctx->tag.cpu = cpu; + cpu_queue = raw_cpu_ptr(queue->cpu_queue); + spin_lock(&cpu_queue->q_lock); + cpu = smp_processor_id(); + rctx->tag.cpu = smp_processor_id(); err = crypto_enqueue_request(&cpu_queue->queue, request); pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", cpu, cpu_queue, request); + spin_unlock(&cpu_queue->q_lock); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); - put_cpu(); return err; } @@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); i = 0; while (i < MCRYPTD_BATCH || single_task_running()) { - /* - * preempt_disable/enable is used to prevent - * being preempted by mcryptd_enqueue_request() - */ - local_bh_disable(); - preempt_disable(); + + spin_lock_bh(&cpu_queue->q_lock); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); - local_bh_enable(); + spin_unlock_bh(&cpu_queue->q_lock); if (!req) { mcryptd_opportunistic_flush(); @@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work) ++i; } if (cpu_queue->queue.qlen) - queue_work(kcrypto_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); } void mcryptd_flusher(struct work_struct *__work) @@ -520,10 +517,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto out_free_inst; - type = CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.halg.base.cra_flags = type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); inst->alg.halg.digestsize = halg->digestsize; inst->alg.halg.statesize = halg->statesize; @@ -538,7 +534,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = mcryptd_hash_finup_enqueue; inst->alg.export = mcryptd_hash_export; inst->alg.import = mcryptd_hash_import; - inst->alg.setkey = mcryptd_hash_setkey; + if (crypto_hash_alg_has_setkey(halg)) + inst->alg.setkey = mcryptd_hash_setkey; inst->alg.digest = mcryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index ee9cfb99fe25..f8ec3d4ba4a8 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) crypto_free_aead(ctx->child); } +static void pcrypt_free(struct aead_instance *inst) +{ + struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_aead(&ctx->spawn); + kfree(inst); +} + static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) { @@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.encrypt = pcrypt_aead_encrypt; inst->alg.decrypt = pcrypt_aead_decrypt; + inst->free = pcrypt_free; + err = aead_register_instance(tmpl, inst); if (err) goto out_drop_aead; @@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) return -EINVAL; } -static void pcrypt_free(struct crypto_instance *inst) -{ - struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); - - crypto_drop_aead(&ctx->spawn); - kfree(inst); -} - static int pcrypt_cpumask_change_notify(struct notifier_block *self, unsigned long val, void *data) { @@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, - .free = pcrypt_free, .module = THIS_MODULE, }; diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index b1c2d57dc734..ba39eb308c79 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc) } EXPORT_SYMBOL_GPL(crypto_poly1305_init); -int crypto_poly1305_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - /* Poly1305 requires a unique key for each tag, which implies that - * we can't set it on the tfm that gets accessed by multiple users - * simultaneously. Instead we expect the key as the first 32 bytes in - * the update() call. */ - return -ENOTSUPP; -} -EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); - static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) { /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ @@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) dctx->s[3] = get_unaligned_le32(key + 12); } +/* + * Poly1305 requires a unique key for each tag, which implies that we can't set + * it on the tfm that gets accessed by multiple users simultaneously. Instead we + * expect the key as the first 32 bytes in the update() call. + */ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { @@ -281,7 +275,6 @@ static struct shash_alg poly1305_alg = { .init = crypto_poly1305_init, .update = crypto_poly1305_update, .final = crypto_poly1305_final, - .setkey = crypto_poly1305_setkey, .descsize = sizeof(struct poly1305_desc_ctx), .base = { .cra_name = "poly1305", diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c index 0b66dc824606..cad395d70d78 100644 --- a/crypto/rsa_helper.c +++ b/crypto/rsa_helper.c @@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, return -EINVAL; if (fips_enabled) { - while (!*ptr && n_sz) { + while (n_sz && !*ptr) { ptr++; n_sz--; } diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index f550b5d94630..d7da0eea5622 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc, salsa20_ivsetup(ctx, walk.iv); - if (likely(walk.nbytes == nbytes)) - { - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, - walk.src.virt.addr, nbytes); - return blkcipher_walk_done(desc, &walk, 0); - } - while (walk.nbytes >= 64) { salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, walk.src.virt.addr, diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 7e8ed96236ce..a68be626017c 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -18,6 +18,7 @@ #include #include #include +#include #define KECCAK_ROUNDS 24 @@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, unsigned int i; for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= ((u64 *) src)[i]; + sctx->st[i] ^= get_unaligned_le64(src + 8 * i); keccakf(sctx->st); done += sctx->rsiz; @@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) sctx->buf[sctx->rsiz - 1] |= 0x80; for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= ((u64 *) sctx->buf)[i]; + sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); keccakf(sctx->st); diff --git a/crypto/shash.c b/crypto/shash.c index 325a14da5827..5d732c6bb4b2 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -25,11 +25,12 @@ static const struct crypto_type crypto_shash_type; -static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) { return -ENOSYS; } +EXPORT_SYMBOL_GPL(shash_no_setkey); static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) @@ -57,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, { struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return shash_setkey_unaligned(tfm, key, keylen); + err = shash_setkey_unaligned(tfm, key, keylen); + else + err = shash->setkey(tfm, key, keylen); + + if (err) + return err; - return shash->setkey(tfm, key, keylen); + crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_shash_setkey); @@ -180,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (((unsigned long)data | (unsigned long)out) & alignmask) return shash_digest_unaligned(desc, data, len, out); @@ -359,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->digest = shash_async_digest; crt->setkey = shash_async_setkey; - crt->has_setkey = alg->setkey != shash_no_setkey; + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); if (alg->export) crt->export = shash_async_export; @@ -374,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); + struct shash_alg *alg = crypto_shash_alg(hash); + + hash->descsize = alg->descsize; + + if (crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); - hash->descsize = crypto_shash_alg(hash)->descsize; return 0; } diff --git a/crypto/skcipher.c b/crypto/skcipher.c index d5692e35fab1..11af5fd6a443 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, walk->total = req->cryptlen; walk->nbytes = 0; + walk->iv = req->iv; + walk->oiv = req->iv; if (unlikely(!walk->total)) return 0; @@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); - walk->iv = req->iv; - walk->oiv = req->iv; - walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? SKCIPHER_WALK_SLEEP : 0; @@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, int err; walk->nbytes = 0; + walk->iv = req->iv; + walk->oiv = req->iv; if (unlikely(!walk->total)) return 0; @@ -522,8 +523,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); - walk->iv = req->iv; - walk->oiv = req->iv; + scatterwalk_done(&walk->in, 0, walk->total); + scatterwalk_done(&walk->out, 0, walk->total); if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) walk->flags |= SKCIPHER_WALK_SLEEP; diff --git a/crypto/speck.c b/crypto/speck.c new file mode 100644 index 000000000000..58aa9f7f91f7 --- /dev/null +++ b/crypto/speck.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Speck: a lightweight block cipher + * + * Copyright (c) 2018 Google, Inc + * + * Speck has 10 variants, including 5 block sizes. For now we only implement + * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and + * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits + * and a key size of K bits. The Speck128 variants are believed to be the most + * secure variants, and they use the same block size and key sizes as AES. The + * Speck64 variants are less secure, but on 32-bit processors are usually + * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less + * secure and/or not as well suited for implementation on either 32-bit or + * 64-bit processors, so are omitted. + * + * Reference: "The Simon and Speck Families of Lightweight Block Ciphers" + * https://eprint.iacr.org/2013/404.pdf + * + * In a correspondence, the Speck designers have also clarified that the words + * should be interpreted in little-endian format, and the words should be + * ordered such that the first word of each block is 'y' rather than 'x', and + * the first key word (rather than the last) becomes the first round key. + */ + +#include +#include +#include +#include +#include +#include + +/* Speck128 */ + +static __always_inline void speck128_round(u64 *x, u64 *y, u64 k) +{ + *x = ror64(*x, 8); + *x += *y; + *x ^= k; + *y = rol64(*y, 3); + *y ^= *x; +} + +static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k) +{ + *y ^= *x; + *y = ror64(*y, 3); + *x ^= k; + *x -= *y; + *x = rol64(*x, 8); +} + +void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u64 y = get_unaligned_le64(in); + u64 x = get_unaligned_le64(in + 8); + int i; + + for (i = 0; i < ctx->nrounds; i++) + speck128_round(&x, &y, ctx->round_keys[i]); + + put_unaligned_le64(y, out); + put_unaligned_le64(x, out + 8); +} +EXPORT_SYMBOL_GPL(crypto_speck128_encrypt); + +static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in); +} + +void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u64 y = get_unaligned_le64(in); + u64 x = get_unaligned_le64(in + 8); + int i; + + for (i = ctx->nrounds - 1; i >= 0; i--) + speck128_unround(&x, &y, ctx->round_keys[i]); + + put_unaligned_le64(y, out); + put_unaligned_le64(x, out + 8); +} +EXPORT_SYMBOL_GPL(crypto_speck128_decrypt); + +static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in); +} + +int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key, + unsigned int keylen) +{ + u64 l[3]; + u64 k; + int i; + + switch (keylen) { + case SPECK128_128_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + ctx->nrounds = SPECK128_128_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[0], &k, i); + } + break; + case SPECK128_192_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + l[1] = get_unaligned_le64(key + 16); + ctx->nrounds = SPECK128_192_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[i % 2], &k, i); + } + break; + case SPECK128_256_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + l[1] = get_unaligned_le64(key + 16); + l[2] = get_unaligned_le64(key + 24); + ctx->nrounds = SPECK128_256_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[i % 3], &k, i); + } + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_speck128_setkey); + +static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen); +} + +/* Speck64 */ + +static __always_inline void speck64_round(u32 *x, u32 *y, u32 k) +{ + *x = ror32(*x, 8); + *x += *y; + *x ^= k; + *y = rol32(*y, 3); + *y ^= *x; +} + +static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k) +{ + *y ^= *x; + *y = ror32(*y, 3); + *x ^= k; + *x -= *y; + *x = rol32(*x, 8); +} + +void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u32 y = get_unaligned_le32(in); + u32 x = get_unaligned_le32(in + 4); + int i; + + for (i = 0; i < ctx->nrounds; i++) + speck64_round(&x, &y, ctx->round_keys[i]); + + put_unaligned_le32(y, out); + put_unaligned_le32(x, out + 4); +} +EXPORT_SYMBOL_GPL(crypto_speck64_encrypt); + +static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in); +} + +void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u32 y = get_unaligned_le32(in); + u32 x = get_unaligned_le32(in + 4); + int i; + + for (i = ctx->nrounds - 1; i >= 0; i--) + speck64_unround(&x, &y, ctx->round_keys[i]); + + put_unaligned_le32(y, out); + put_unaligned_le32(x, out + 4); +} +EXPORT_SYMBOL_GPL(crypto_speck64_decrypt); + +static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in); +} + +int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key, + unsigned int keylen) +{ + u32 l[3]; + u32 k; + int i; + + switch (keylen) { + case SPECK64_96_KEY_SIZE: + k = get_unaligned_le32(key); + l[0] = get_unaligned_le32(key + 4); + l[1] = get_unaligned_le32(key + 8); + ctx->nrounds = SPECK64_96_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck64_round(&l[i % 2], &k, i); + } + break; + case SPECK64_128_KEY_SIZE: + k = get_unaligned_le32(key); + l[0] = get_unaligned_le32(key + 4); + l[1] = get_unaligned_le32(key + 8); + l[2] = get_unaligned_le32(key + 12); + ctx->nrounds = SPECK64_128_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck64_round(&l[i % 3], &k, i); + } + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_speck64_setkey); + +static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen); +} + +/* Algorithm definitions */ + +static struct crypto_alg speck_algs[] = { + { + .cra_name = "speck128", + .cra_driver_name = "speck128-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SPECK128_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct speck128_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = SPECK128_128_KEY_SIZE, + .cia_max_keysize = SPECK128_256_KEY_SIZE, + .cia_setkey = speck128_setkey, + .cia_encrypt = speck128_encrypt, + .cia_decrypt = speck128_decrypt + } + } + }, { + .cra_name = "speck64", + .cra_driver_name = "speck64-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SPECK64_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct speck64_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = SPECK64_96_KEY_SIZE, + .cia_max_keysize = SPECK64_128_KEY_SIZE, + .cia_setkey = speck64_setkey, + .cia_encrypt = speck64_encrypt, + .cia_decrypt = speck64_decrypt + } + } + } +}; + +static int __init speck_module_init(void) +{ + return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_module_exit(void) +{ + crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_module_init); +module_exit(speck_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (generic)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers "); +MODULE_ALIAS_CRYPTO("speck128"); +MODULE_ALIAS_CRYPTO("speck128-generic"); +MODULE_ALIAS_CRYPTO("speck64"); +MODULE_ALIAS_CRYPTO("speck64-generic"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0022a18d36ee..e339960dcac7 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -221,11 +221,13 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], } sg_init_table(sg, np + 1); - np--; + if (rem) + np--; for (k = 0; k < np; k++) sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); - sg_set_buf(&sg[k + 1], xbuf[k], rem); + if (rem) + sg_set_buf(&sg[k + 1], xbuf[k], rem); } static void test_aead_speed(const char *algo, int enc, unsigned int secs, @@ -340,7 +342,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, } sg_init_aead(sg, xbuf, - *b_size + (enc ? authsize : 0)); + *b_size + (enc ? 0 : authsize)); sg_init_aead(sgout, xoutbuf, *b_size + (enc ? authsize : 0)); @@ -348,7 +350,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, sg_set_buf(&sg[0], assoc, aad_size); sg_set_buf(&sgout[0], assoc, aad_size); - aead_request_set_crypt(req, sg, sgout, *b_size, iv); + aead_request_set_crypt(req, sg, sgout, + *b_size + (enc ? 0 : authsize), + iv); aead_request_set_ad(req, aad_size); if (secs) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 7125ba3880af..b5bb45a89ff8 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3033,6 +3033,24 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(serpent_dec_tv_template) } } + }, { + .alg = "ecb(speck128)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = __VECS(speck128_enc_tv_template), + .dec = __VECS(speck128_dec_tv_template) + } + } + }, { + .alg = "ecb(speck64)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = __VECS(speck64_enc_tv_template), + .dec = __VECS(speck64_dec_tv_template) + } + } }, { .alg = "ecb(tea)", .test = alg_test_skcipher, @@ -3584,6 +3602,24 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(serpent_xts_dec_tv_template) } } + }, { + .alg = "xts(speck128)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = __VECS(speck128_xts_enc_tv_template), + .dec = __VECS(speck128_xts_dec_tv_template) + } + } + }, { + .alg = "xts(speck64)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = __VECS(speck64_xts_enc_tv_template), + .dec = __VECS(speck64_xts_dec_tv_template) + } + } }, { .alg = "xts(twofish)", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d54971d2d1c8..b0de033a5299 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -548,7 +548,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { { .key = - "\x30\x82\x03\x1f\x02\x01\x10\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" + "\x30\x82\x03\x1f\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" "\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28" "\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67" "\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d" @@ -597,8 +597,8 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { "\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a" "\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda" "\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46" - "\xb8\x35\xdf\x41\x02\x01\x30\x02\x01\x30\x02\x01\x30\x02\x01\x30" - "\x02\x01\x30", + "\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00" + "\x02\x01\x00", .key_len = 804, /* * m is SHA256 hash of following message: @@ -13706,6 +13706,1492 @@ static const struct cipher_testvec serpent_xts_dec_tv_template[] = { }, }; +/* + * Speck test vectors taken from the original paper: + * "The Simon and Speck Families of Lightweight Block Ciphers" + * https://eprint.iacr.org/2013/404.pdf + * + * Note that the paper does not make byte and word order clear. But it was + * confirmed with the authors that the intended orders are little endian byte + * order and (y, x) word order. Equivalently, the printed test vectors, when + * looking at only the bytes (ignoring the whitespace that divides them into + * words), are backwards: the left-most byte is actually the one with the + * highest memory address, while the right-most byte is actually the one with + * the lowest memory address. + */ + +static const struct cipher_testvec speck128_enc_tv_template[] = { + { /* Speck128/128 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .klen = 16, + .input = "\x20\x6d\x61\x64\x65\x20\x69\x74" + "\x20\x65\x71\x75\x69\x76\x61\x6c", + .ilen = 16, + .result = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" + "\x65\x32\x78\x79\x51\x98\x5d\xa6", + .rlen = 16, + }, { /* Speck128/192 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17", + .klen = 24, + .input = "\x65\x6e\x74\x20\x74\x6f\x20\x43" + "\x68\x69\x65\x66\x20\x48\x61\x72", + .ilen = 16, + .result = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9" + "\x66\x55\x13\x13\x3a\xcf\xe4\x1b", + .rlen = 16, + }, { /* Speck128/256 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .klen = 32, + .input = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20" + "\x49\x6e\x20\x74\x68\x6f\x73\x65", + .ilen = 16, + .result = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e" + "\x3e\xf5\xc0\x05\x04\x01\x09\x41", + .rlen = 16, + }, +}; + +static const struct cipher_testvec speck128_dec_tv_template[] = { + { /* Speck128/128 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .klen = 16, + .input = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" + "\x65\x32\x78\x79\x51\x98\x5d\xa6", + .ilen = 16, + .result = "\x20\x6d\x61\x64\x65\x20\x69\x74" + "\x20\x65\x71\x75\x69\x76\x61\x6c", + .rlen = 16, + }, { /* Speck128/192 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17", + .klen = 24, + .input = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9" + "\x66\x55\x13\x13\x3a\xcf\xe4\x1b", + .ilen = 16, + .result = "\x65\x6e\x74\x20\x74\x6f\x20\x43" + "\x68\x69\x65\x66\x20\x48\x61\x72", + .rlen = 16, + }, { /* Speck128/256 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .klen = 32, + .input = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e" + "\x3e\xf5\xc0\x05\x04\x01\x09\x41", + .ilen = 16, + .result = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20" + "\x49\x6e\x20\x74\x68\x6f\x73\x65", + .rlen = 16, + }, +}; + +/* + * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the + * result recomputed with Speck128 as the cipher + */ + +static const struct cipher_testvec speck128_xts_enc_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ilen = 32, + .result = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62" + "\x3b\x99\x4a\x64\x74\x77\xac\xed" + "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42" + "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\xfb\x53\x81\x75\x6f\x9f\x34\xad" + "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a" + "\xd4\x84\xa4\x53\xd5\x88\x73\x1b" + "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x21\x52\x84\x15\xd1\xf7\x21\x55" + "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" + "\xda\x63\xb2\xf1\x82\xb0\x89\x59" + "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82" + "\x53\xd0\xed\x2d\x30\xc1\x20\xef" + "\x70\x67\x5e\xff\x09\x70\xbb\xc1" + "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48" + "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7" + "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9" + "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44" + "\x19\xc5\x58\x84\x63\xb9\x12\x68" + "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c" + "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd" + "\x74\x79\x2e\xb4\x44\xd7\x69\xc4" + "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d" + "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb" + "\x6d\x13\x65\xa0\xf9\x31\x12\xe2" + "\x26\xd1\xec\x2b\x0a\x8b\x59\x99" + "\xa7\x49\xa0\x0e\x09\x33\x85\x50" + "\xc3\x23\xca\x7a\xdd\x13\x45\x5f" + "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f" + "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6" + "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f" + "\x79\x91\x8d\x36\x13\x7b\xd0\x4a" + "\x6c\x39\xfb\x53\xb8\x6f\x02\x51" + "\xa5\x20\xac\x24\x1c\x73\x59\x73" + "\x58\x61\x3a\x87\x58\xb3\x20\x56" + "\x39\x06\x2b\x4d\xd3\x20\x2b\x89" + "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd" + "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91" + "\x09\x35\x71\x50\x65\xac\x92\xe3" + "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92" + "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9" + "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d" + "\x77\x04\x80\xa9\xbf\x38\xb5\xbd" + "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8" + "\x2a\x26\xcc\x49\x14\x6d\x55\x01" + "\x06\x94\xd8\xb2\x2d\x53\x83\x1b" + "\x8f\xd4\xdd\x57\x12\x7e\x18\xba" + "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d" + "\x24\xa9\x60\xa4\x97\x85\x86\x2a" + "\x01\x00\x09\xf1\xcb\x4a\x24\x1c" + "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4" + "\x97\x1c\x10\xc6\x4d\x66\x4f\x98" + "\x87\x30\xac\xd5\xea\x73\x49\x10" + "\x80\xea\xe5\x5f\x4d\x5f\x03\x33" + "\x66\x02\x35\x3d\x60\x06\x36\x4f" + "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8" + "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28" + "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93" + "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30" + "\xcc\x75\xcf\x16\x26\xa9\x26\x3b" + "\xe7\x68\x2f\x15\x21\x5b\xe4\x00" + "\xbd\x48\x50\xcd\x75\x70\xc4\x62" + "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b" + "\x51\x66\x02\x69\x04\x97\x36\xd4" + "\x75\xae\x0b\xa3\x42\xf8\xca\x79" + "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2" + "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd" + "\xea\x15\x5a\xa0\x85\x7e\x81\x0d" + "\x03\xe7\x05\x39\xf5\x05\x26\xee" + "\xec\xaa\x1f\x3d\xc9\x98\x76\x01" + "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4" + "\x50\x65\x50\x6d\x04\x1f\xdf\x5a" + "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca" + "\x47\x26\xef\x39\xb8\xb4\xf2\xd1" + "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95" + "\x02\x88\x41\x97\x16\x93\x99\x37" + "\x51\x05\x82\x09\x74\x94\x45\x92", + .klen = 64, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1" + "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb" + "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73" + "\x92\x99\xde\xd3\x76\xed\xcd\x63" + "\x64\x3a\x22\x57\xc1\x43\x49\xd4" + "\x79\x36\x31\x19\x62\xae\x10\x7e" + "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa" + "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0" + "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00" + "\xfc\x81\x99\x8a\x14\x62\xf5\x7e" + "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec" + "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6" + "\x62\x62\x37\xfe\x0a\x4c\x4a\x37" + "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e" + "\x85\x3c\x4f\x26\x64\x85\xbc\x68" + "\xb0\xe0\x86\x5e\x26\x41\xce\x11" + "\x50\xda\x97\x14\xe9\x9e\xc7\x6d" + "\x3b\xdc\x43\xde\x2b\x27\x69\x7d" + "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31" + "\x14\x4d\xf0\x74\x37\xfd\x07\x25" + "\x96\x55\xe5\xfc\x9e\x27\x2a\x74" + "\x1b\x83\x4d\x15\x83\xac\x57\xa0" + "\xac\xa5\xd0\x38\xef\x19\x56\x53" + "\x25\x4b\xfc\xce\x04\x23\xe5\x6b" + "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5" + "\xed\x22\x34\x1c\x5d\xed\x17\x06" + "\x36\xa3\xe6\x77\xb9\x97\x46\xb8" + "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc" + "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82" + "\x35\x91\x3d\x1b\xe4\x97\x9f\x92" + "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1" + "\x8d\x39\xfc\x42\xfb\x38\x80\xb9" + "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1" + "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7" + "\xa1\xbf\xf7\xda\x95\x93\x4b\x78" + "\x19\xf5\x94\xf9\xd2\x00\x33\x37" + "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee" + "\x42\xb2\x9e\x2c\x5f\x48\x23\x26" + "\x15\x25\x17\x03\x3d\xfe\x2c\xfc" + "\xeb\xba\xda\xe0\x00\x05\xb6\xa6" + "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf" + "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a" + "\x49\xa1\xc3\xfa\x10\x52\xb9\x14" + "\xad\xb7\x73\xf8\x78\x12\xc8\x59" + "\x17\x80\x4c\x57\x39\xf1\x6d\x80" + "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21" + "\xec\xce\xb7\xc8\x02\x8a\xed\x53" + "\x2c\x25\x68\x2e\x1f\x85\x5e\x67" + "\xd1\x07\x7a\x3a\x89\x08\xe0\x34" + "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40" + "\x31\x15\x72\xa0\xf0\x73\xd9\x3b" + "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2" + "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8" + "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6" + "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58" + "\xcc\x1f\x48\x49\x65\x47\x75\xe9" + "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07" + "\xf2\xec\x76\xd8\x8f\x09\xf3\x16" + "\xa1\x51\x89\x3b\xeb\x96\x42\xac" + "\x65\xe0\x67\x63\x29\xdc\xb4\x7d" + "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb" + "\x66\x8d\x13\xca\xe0\x59\x2a\x00" + "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5" + "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static const struct cipher_testvec speck128_xts_dec_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62" + "\x3b\x99\x4a\x64\x74\x77\xac\xed" + "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42" + "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54", + .ilen = 32, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xfb\x53\x81\x75\x6f\x9f\x34\xad" + "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a" + "\xd4\x84\xa4\x53\xd5\x88\x73\x1b" + "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x21\x52\x84\x15\xd1\xf7\x21\x55" + "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" + "\xda\x63\xb2\xf1\x82\xb0\x89\x59" + "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82" + "\x53\xd0\xed\x2d\x30\xc1\x20\xef" + "\x70\x67\x5e\xff\x09\x70\xbb\xc1" + "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48" + "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7" + "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9" + "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44" + "\x19\xc5\x58\x84\x63\xb9\x12\x68" + "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c" + "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd" + "\x74\x79\x2e\xb4\x44\xd7\x69\xc4" + "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d" + "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb" + "\x6d\x13\x65\xa0\xf9\x31\x12\xe2" + "\x26\xd1\xec\x2b\x0a\x8b\x59\x99" + "\xa7\x49\xa0\x0e\x09\x33\x85\x50" + "\xc3\x23\xca\x7a\xdd\x13\x45\x5f" + "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f" + "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6" + "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f" + "\x79\x91\x8d\x36\x13\x7b\xd0\x4a" + "\x6c\x39\xfb\x53\xb8\x6f\x02\x51" + "\xa5\x20\xac\x24\x1c\x73\x59\x73" + "\x58\x61\x3a\x87\x58\xb3\x20\x56" + "\x39\x06\x2b\x4d\xd3\x20\x2b\x89" + "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd" + "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91" + "\x09\x35\x71\x50\x65\xac\x92\xe3" + "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92" + "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9" + "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d" + "\x77\x04\x80\xa9\xbf\x38\xb5\xbd" + "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8" + "\x2a\x26\xcc\x49\x14\x6d\x55\x01" + "\x06\x94\xd8\xb2\x2d\x53\x83\x1b" + "\x8f\xd4\xdd\x57\x12\x7e\x18\xba" + "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d" + "\x24\xa9\x60\xa4\x97\x85\x86\x2a" + "\x01\x00\x09\xf1\xcb\x4a\x24\x1c" + "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4" + "\x97\x1c\x10\xc6\x4d\x66\x4f\x98" + "\x87\x30\xac\xd5\xea\x73\x49\x10" + "\x80\xea\xe5\x5f\x4d\x5f\x03\x33" + "\x66\x02\x35\x3d\x60\x06\x36\x4f" + "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8" + "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28" + "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93" + "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30" + "\xcc\x75\xcf\x16\x26\xa9\x26\x3b" + "\xe7\x68\x2f\x15\x21\x5b\xe4\x00" + "\xbd\x48\x50\xcd\x75\x70\xc4\x62" + "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b" + "\x51\x66\x02\x69\x04\x97\x36\xd4" + "\x75\xae\x0b\xa3\x42\xf8\xca\x79" + "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2" + "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd" + "\xea\x15\x5a\xa0\x85\x7e\x81\x0d" + "\x03\xe7\x05\x39\xf5\x05\x26\xee" + "\xec\xaa\x1f\x3d\xc9\x98\x76\x01" + "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4" + "\x50\x65\x50\x6d\x04\x1f\xdf\x5a" + "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca" + "\x47\x26\xef\x39\xb8\xb4\xf2\xd1" + "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95" + "\x02\x88\x41\x97\x16\x93\x99\x37" + "\x51\x05\x82\x09\x74\x94\x45\x92", + .klen = 64, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1" + "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb" + "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73" + "\x92\x99\xde\xd3\x76\xed\xcd\x63" + "\x64\x3a\x22\x57\xc1\x43\x49\xd4" + "\x79\x36\x31\x19\x62\xae\x10\x7e" + "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa" + "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0" + "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00" + "\xfc\x81\x99\x8a\x14\x62\xf5\x7e" + "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec" + "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6" + "\x62\x62\x37\xfe\x0a\x4c\x4a\x37" + "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e" + "\x85\x3c\x4f\x26\x64\x85\xbc\x68" + "\xb0\xe0\x86\x5e\x26\x41\xce\x11" + "\x50\xda\x97\x14\xe9\x9e\xc7\x6d" + "\x3b\xdc\x43\xde\x2b\x27\x69\x7d" + "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31" + "\x14\x4d\xf0\x74\x37\xfd\x07\x25" + "\x96\x55\xe5\xfc\x9e\x27\x2a\x74" + "\x1b\x83\x4d\x15\x83\xac\x57\xa0" + "\xac\xa5\xd0\x38\xef\x19\x56\x53" + "\x25\x4b\xfc\xce\x04\x23\xe5\x6b" + "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5" + "\xed\x22\x34\x1c\x5d\xed\x17\x06" + "\x36\xa3\xe6\x77\xb9\x97\x46\xb8" + "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc" + "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82" + "\x35\x91\x3d\x1b\xe4\x97\x9f\x92" + "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1" + "\x8d\x39\xfc\x42\xfb\x38\x80\xb9" + "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1" + "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7" + "\xa1\xbf\xf7\xda\x95\x93\x4b\x78" + "\x19\xf5\x94\xf9\xd2\x00\x33\x37" + "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee" + "\x42\xb2\x9e\x2c\x5f\x48\x23\x26" + "\x15\x25\x17\x03\x3d\xfe\x2c\xfc" + "\xeb\xba\xda\xe0\x00\x05\xb6\xa6" + "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf" + "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a" + "\x49\xa1\xc3\xfa\x10\x52\xb9\x14" + "\xad\xb7\x73\xf8\x78\x12\xc8\x59" + "\x17\x80\x4c\x57\x39\xf1\x6d\x80" + "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21" + "\xec\xce\xb7\xc8\x02\x8a\xed\x53" + "\x2c\x25\x68\x2e\x1f\x85\x5e\x67" + "\xd1\x07\x7a\x3a\x89\x08\xe0\x34" + "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40" + "\x31\x15\x72\xa0\xf0\x73\xd9\x3b" + "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2" + "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8" + "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6" + "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58" + "\xcc\x1f\x48\x49\x65\x47\x75\xe9" + "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07" + "\xf2\xec\x76\xd8\x8f\x09\xf3\x16" + "\xa1\x51\x89\x3b\xeb\x96\x42\xac" + "\x65\xe0\x67\x63\x29\xdc\xb4\x7d" + "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb" + "\x66\x8d\x13\xca\xe0\x59\x2a\x00" + "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5" + "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static const struct cipher_testvec speck64_enc_tv_template[] = { + { /* Speck64/96 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13", + .klen = 12, + .input = "\x65\x61\x6e\x73\x20\x46\x61\x74", + .ilen = 8, + .result = "\x6c\x94\x75\x41\xec\x52\x79\x9f", + .rlen = 8, + }, { /* Speck64/128 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13\x18\x19\x1a\x1b", + .klen = 16, + .input = "\x2d\x43\x75\x74\x74\x65\x72\x3b", + .ilen = 8, + .result = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", + .rlen = 8, + }, +}; + +static const struct cipher_testvec speck64_dec_tv_template[] = { + { /* Speck64/96 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13", + .klen = 12, + .input = "\x6c\x94\x75\x41\xec\x52\x79\x9f", + .ilen = 8, + .result = "\x65\x61\x6e\x73\x20\x46\x61\x74", + .rlen = 8, + }, { /* Speck64/128 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13\x18\x19\x1a\x1b", + .klen = 16, + .input = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", + .ilen = 8, + .result = "\x2d\x43\x75\x74\x74\x65\x72\x3b", + .rlen = 8, + }, +}; + +/* + * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the result + * recomputed with Speck64 as the cipher, and key lengths adjusted + */ + +static const struct cipher_testvec speck64_xts_enc_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ilen = 32, + .result = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" + "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" + "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" + "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x12\x56\x73\xcd\x15\x87\xa8\x59" + "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" + "\xb3\x12\x69\x7e\x36\xeb\x52\xff" + "\x62\xdd\xba\x90\xb3\xe1\xee\x99", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c" + "\x27\x36\xc0\xbf\x5d\xea\x36\x37" + "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b" + "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e" + "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09" + "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3" + "\x11\xc7\x39\x96\xd0\x95\xf4\x56" + "\xf4\xdd\x03\x38\x01\x44\x2c\xcf" + "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66" + "\xfe\x3d\xc6\xfb\x01\x23\x51\x43" + "\xd5\xd2\x13\x86\x94\x34\xe9\x62" + "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef" + "\x76\x35\x04\x3f\xdb\x23\x9d\x0b" + "\x85\x42\xb9\x02\xd6\xcc\xdb\x96" + "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d" + "\xae\xd2\x04\xd5\xda\xc1\x7e\x24" + "\x8c\x73\xbe\x48\x7e\xcf\x65\x28" + "\x29\xe5\xbe\x54\x30\xcb\x46\x95" + "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe" + "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69" + "\xa1\x09\x95\x71\x26\xe9\xc4\xdf" + "\xe6\x31\xc3\x46\xda\xaf\x0b\x41" + "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3" + "\x82\xc0\x37\x27\xfc\x91\xa7\x05" + "\xfb\xc5\xdc\x2b\x74\x96\x48\x43" + "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f" + "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a" + "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c" + "\x07\xff\xf3\x72\x74\x48\xb5\x40" + "\x50\xb5\xdd\x90\x43\x31\x18\x15" + "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a" + "\x29\x93\x90\x8b\xda\x07\xf0\x35" + "\x6d\x90\x88\x09\x4e\x83\xf5\x5b" + "\x94\x12\xbb\x33\x27\x1d\x3f\x23" + "\x51\xa8\x7c\x07\xa2\xae\x77\xa6" + "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f" + "\x66\xdd\xcd\x75\x24\x8b\x33\xf7" + "\x20\xdb\x83\x9b\x4f\x11\x63\x6e" + "\xcf\x37\xef\xc9\x11\x01\x5c\x45" + "\x32\x99\x7c\x3c\x9e\x42\x89\xe3" + "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05" + "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc" + "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d" + "\xa0\xa8\x89\x3b\x73\x39\xa5\x94" + "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89" + "\x10\xff\xaf\xef\xca\xdd\x4f\x80" + "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7" + "\x33\xca\x00\x8b\x8b\x3f\xea\xec" + "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f" + "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5" + "\x64\xa3\xf1\x1a\x76\x28\xcc\x35" + "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b" + "\xc7\x1b\x53\x17\x02\xea\xd1\xad" + "\x13\x51\x73\xc0\xa0\xb2\x05\x32" + "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19" + "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d" + "\x59\xda\xee\x1a\x22\x18\xda\x0d" + "\x88\x0f\x55\x8b\x72\x62\xfd\xc1" + "\x69\x13\xcd\x0d\x5f\xc1\x09\x52" + "\xee\xd6\xe3\x84\x4d\xee\xf6\x88" + "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f" + "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54" + "\x7d\x69\x8d\x00\x62\x77\x0d\x14" + "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3" + "\x50\xf7\x5f\xf4\xc2\xca\x41\x97" + "\x37\xbe\x75\x74\xcd\xf0\x75\x6e" + "\x25\x23\x94\xbd\xda\x8d\xb0\xd4", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27", + .klen = 32, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\x55\xed\x71\xd3\x02\x8e\x15\x3b" + "\xc6\x71\x29\x2d\x3e\x89\x9f\x59" + "\x68\x6a\xcc\x8a\x56\x97\xf3\x95" + "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c" + "\x78\x16\xea\x80\xdb\x33\x75\x94" + "\xf9\x29\xc4\x2b\x76\x75\x97\xc7" + "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b" + "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee" + "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a" + "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c" + "\xf5\xec\x32\x74\xa3\xb8\x03\x88" + "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f" + "\x84\x5e\x46\xed\x20\x89\xb6\x44" + "\x8d\xd0\xed\x54\x47\x16\xbe\x95" + "\x8a\xb3\x6b\x72\xc4\x32\x52\x13" + "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6" + "\x44\x18\xdd\x8c\x6e\xca\x6e\x45" + "\x8f\x1e\x10\x07\x57\x25\x98\x7b" + "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8" + "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb" + "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff" + "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e" + "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d" + "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65" + "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a" + "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a" + "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78" + "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3" + "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e" + "\x35\x10\x30\x82\x0d\xe7\xc5\x9b" + "\xde\x44\x18\xbd\x9f\xd1\x45\xa9" + "\x7b\x7a\x4a\xad\x35\x65\x27\xca" + "\xb2\xc3\xd4\x9b\x71\x86\x70\xee" + "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf" + "\xfc\x42\xc8\x31\x59\xbe\x16\x60" + "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14" + "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef" + "\x52\x7f\x29\x51\x94\x20\x67\x3c" + "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63" + "\xe7\xff\x73\x25\xd1\xdd\x96\x8a" + "\x98\x52\x6d\xf3\xac\x3e\xf2\x18" + "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed" + "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e" + "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad" + "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa" + "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81" + "\x65\x53\x0f\x41\x11\xbd\x98\x99" + "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d" + "\x84\x98\xf9\x34\xed\x33\x2a\x1f" + "\x82\xed\xc1\x73\x98\xd3\x02\xdc" + "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76" + "\x63\x51\x34\x9d\x96\x12\xae\xce" + "\x83\xc9\x76\x5e\xa4\x1b\x53\x37" + "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d" + "\x54\x27\x74\xbb\x10\x86\x57\x46" + "\x68\xe1\xed\x14\xe7\x9d\xfc\x84" + "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf" + "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d" + "\x7b\x4f\x38\x55\x36\x71\x64\xc1" + "\xfc\x5c\x75\x52\x33\x02\x18\xf8" + "\x17\xe1\x2b\xc2\x43\x39\xbd\x76" + "\x9b\x63\x76\x32\x2f\x19\x72\x10" + "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5" + "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static const struct cipher_testvec speck64_xts_dec_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" + "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" + "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" + "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", + .ilen = 32, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x12\x56\x73\xcd\x15\x87\xa8\x59" + "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" + "\xb3\x12\x69\x7e\x36\xeb\x52\xff" + "\x62\xdd\xba\x90\xb3\xe1\xee\x99", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c" + "\x27\x36\xc0\xbf\x5d\xea\x36\x37" + "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b" + "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e" + "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09" + "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3" + "\x11\xc7\x39\x96\xd0\x95\xf4\x56" + "\xf4\xdd\x03\x38\x01\x44\x2c\xcf" + "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66" + "\xfe\x3d\xc6\xfb\x01\x23\x51\x43" + "\xd5\xd2\x13\x86\x94\x34\xe9\x62" + "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef" + "\x76\x35\x04\x3f\xdb\x23\x9d\x0b" + "\x85\x42\xb9\x02\xd6\xcc\xdb\x96" + "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d" + "\xae\xd2\x04\xd5\xda\xc1\x7e\x24" + "\x8c\x73\xbe\x48\x7e\xcf\x65\x28" + "\x29\xe5\xbe\x54\x30\xcb\x46\x95" + "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe" + "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69" + "\xa1\x09\x95\x71\x26\xe9\xc4\xdf" + "\xe6\x31\xc3\x46\xda\xaf\x0b\x41" + "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3" + "\x82\xc0\x37\x27\xfc\x91\xa7\x05" + "\xfb\xc5\xdc\x2b\x74\x96\x48\x43" + "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f" + "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a" + "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c" + "\x07\xff\xf3\x72\x74\x48\xb5\x40" + "\x50\xb5\xdd\x90\x43\x31\x18\x15" + "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a" + "\x29\x93\x90\x8b\xda\x07\xf0\x35" + "\x6d\x90\x88\x09\x4e\x83\xf5\x5b" + "\x94\x12\xbb\x33\x27\x1d\x3f\x23" + "\x51\xa8\x7c\x07\xa2\xae\x77\xa6" + "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f" + "\x66\xdd\xcd\x75\x24\x8b\x33\xf7" + "\x20\xdb\x83\x9b\x4f\x11\x63\x6e" + "\xcf\x37\xef\xc9\x11\x01\x5c\x45" + "\x32\x99\x7c\x3c\x9e\x42\x89\xe3" + "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05" + "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc" + "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d" + "\xa0\xa8\x89\x3b\x73\x39\xa5\x94" + "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89" + "\x10\xff\xaf\xef\xca\xdd\x4f\x80" + "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7" + "\x33\xca\x00\x8b\x8b\x3f\xea\xec" + "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f" + "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5" + "\x64\xa3\xf1\x1a\x76\x28\xcc\x35" + "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b" + "\xc7\x1b\x53\x17\x02\xea\xd1\xad" + "\x13\x51\x73\xc0\xa0\xb2\x05\x32" + "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19" + "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d" + "\x59\xda\xee\x1a\x22\x18\xda\x0d" + "\x88\x0f\x55\x8b\x72\x62\xfd\xc1" + "\x69\x13\xcd\x0d\x5f\xc1\x09\x52" + "\xee\xd6\xe3\x84\x4d\xee\xf6\x88" + "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f" + "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54" + "\x7d\x69\x8d\x00\x62\x77\x0d\x14" + "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3" + "\x50\xf7\x5f\xf4\xc2\xca\x41\x97" + "\x37\xbe\x75\x74\xcd\xf0\x75\x6e" + "\x25\x23\x94\xbd\xda\x8d\xb0\xd4", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27", + .klen = 32, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x55\xed\x71\xd3\x02\x8e\x15\x3b" + "\xc6\x71\x29\x2d\x3e\x89\x9f\x59" + "\x68\x6a\xcc\x8a\x56\x97\xf3\x95" + "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c" + "\x78\x16\xea\x80\xdb\x33\x75\x94" + "\xf9\x29\xc4\x2b\x76\x75\x97\xc7" + "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b" + "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee" + "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a" + "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c" + "\xf5\xec\x32\x74\xa3\xb8\x03\x88" + "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f" + "\x84\x5e\x46\xed\x20\x89\xb6\x44" + "\x8d\xd0\xed\x54\x47\x16\xbe\x95" + "\x8a\xb3\x6b\x72\xc4\x32\x52\x13" + "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6" + "\x44\x18\xdd\x8c\x6e\xca\x6e\x45" + "\x8f\x1e\x10\x07\x57\x25\x98\x7b" + "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8" + "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb" + "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff" + "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e" + "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d" + "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65" + "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a" + "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a" + "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78" + "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3" + "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e" + "\x35\x10\x30\x82\x0d\xe7\xc5\x9b" + "\xde\x44\x18\xbd\x9f\xd1\x45\xa9" + "\x7b\x7a\x4a\xad\x35\x65\x27\xca" + "\xb2\xc3\xd4\x9b\x71\x86\x70\xee" + "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf" + "\xfc\x42\xc8\x31\x59\xbe\x16\x60" + "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14" + "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef" + "\x52\x7f\x29\x51\x94\x20\x67\x3c" + "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63" + "\xe7\xff\x73\x25\xd1\xdd\x96\x8a" + "\x98\x52\x6d\xf3\xac\x3e\xf2\x18" + "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed" + "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e" + "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad" + "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa" + "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81" + "\x65\x53\x0f\x41\x11\xbd\x98\x99" + "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d" + "\x84\x98\xf9\x34\xed\x33\x2a\x1f" + "\x82\xed\xc1\x73\x98\xd3\x02\xdc" + "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76" + "\x63\x51\x34\x9d\x96\x12\xae\xce" + "\x83\xc9\x76\x5e\xa4\x1b\x53\x37" + "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d" + "\x54\x27\x74\xbb\x10\x86\x57\x46" + "\x68\xe1\xed\x14\xe7\x9d\xfc\x84" + "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf" + "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d" + "\x7b\x4f\x38\x55\x36\x71\x64\xc1" + "\xfc\x5c\x75\x52\x33\x02\x18\xf8" + "\x17\xe1\x2b\xc2\x43\x39\xbd\x76" + "\x9b\x63\x76\x32\x2f\x19\x72\x10" + "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5" + "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + /* Cast6 test vectors from RFC 2612 */ static const struct cipher_testvec cast6_enc_tv_template[] = { { diff --git a/crypto/xor.c b/crypto/xor.c index 263af9fb45ea..bce9fe7af40a 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -122,12 +122,7 @@ calibrate_xor_blocks(void) goto out; } - /* - * Note: Since the memory is not actually used for _anything_ but to - * test the XOR speed, we don't really want kmemcheck to warn about - * reading uninitialized bytes here. - */ - b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2); + b1 = (void *) __get_free_pages(GFP_KERNEL, 2); if (!b1) { printk(KERN_WARNING "xor: Yikes! No memory available.\n"); return -ENOMEM; diff --git a/drivers/Kconfig b/drivers/Kconfig index 1d7af3c2ff27..cb03e487342d 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -77,6 +77,8 @@ source "drivers/hwmon/Kconfig" source "drivers/thermal/Kconfig" +source "drivers/trusty/Kconfig" + source "drivers/watchdog/Kconfig" source "drivers/ssb/Kconfig" @@ -209,4 +211,13 @@ source "drivers/tee/Kconfig" source "drivers/mux/Kconfig" +source "drivers/sdw/Kconfig" + +source "drivers/opp/Kconfig" + +source "drivers/vbs/Kconfig" + +source "drivers/acrn/Kconfig" + +source "drivers/vhm/Kconfig" endmenu diff --git a/drivers/Makefile b/drivers/Makefile index d242d3514d30..30166d10a899 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -105,6 +105,7 @@ obj-$(CONFIG_TC) += tc/ obj-$(CONFIG_UWB) += uwb/ obj-$(CONFIG_USB_PHY) += usb/ obj-$(CONFIG_USB) += usb/ +obj-$(CONFIG_USB_SUPPORT) += usb/ obj-$(CONFIG_PCI) += usb/ obj-$(CONFIG_USB_GADGET) += usb/ obj-$(CONFIG_OF) += usb/ @@ -119,6 +120,7 @@ obj-$(CONFIG_W1) += w1/ obj-y += power/ obj-$(CONFIG_HWMON) += hwmon/ obj-$(CONFIG_THERMAL) += thermal/ +obj-$(CONFIG_TRUSTY) += trusty/ obj-$(CONFIG_WATCHDOG) += watchdog/ obj-$(CONFIG_MD) += md/ obj-$(CONFIG_BT) += bluetooth/ @@ -126,6 +128,7 @@ obj-$(CONFIG_ACCESSIBILITY) += accessibility/ obj-$(CONFIG_ISDN) += isdn/ obj-$(CONFIG_EDAC) += edac/ obj-$(CONFIG_EISA) += eisa/ +obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_IDLE) += cpuidle/ obj-y += mmc/ @@ -146,6 +149,7 @@ obj-$(CONFIG_OF) += of/ obj-$(CONFIG_SSB) += ssb/ obj-$(CONFIG_BCMA) += bcma/ obj-$(CONFIG_VHOST_RING) += vhost/ +obj-$(CONFIG_VBS) += vbs/ obj-$(CONFIG_VHOST) += vhost/ obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_STAGING) += staging/ @@ -182,3 +186,6 @@ obj-$(CONFIG_FPGA) += fpga/ obj-$(CONFIG_FSI) += fsi/ obj-$(CONFIG_TEE) += tee/ obj-$(CONFIG_MULTIPLEXER) += mux/ +obj-$(CONFIG_SDW) += sdw/ +obj-$(CONFIG_ACRN_VHM) += vhm/ +obj-$(CONFIG_ACRN) += acrn/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5b1938f4b626..c9781175e59e 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -536,4 +536,20 @@ if ARM64 source "drivers/acpi/arm64/Kconfig" endif +config TPS68470_PMIC_OPREGION + bool "ACPI operation region support for TPS68470 PMIC" + depends on MFD_TPS68470 + help + This config adds ACPI operation region support for TI TPS68470 PMIC. + TPS68470 device is an advanced power management unit that powers + a Compact Camera Module (CCM), generates clocks for image sensors, + drives a dual LED for flash and incorporates two LED drivers for + general purpose indicators. + This driver enables ACPI operation region support control voltage + regulators and clocks. + + This option is a bool as it provides an ACPI operation + region, which must be available before any of the devices + using this, are probed. + endif # ACPI diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index cd1abc9bc325..709c1120f315 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -108,6 +108,8 @@ obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o +obj-$(CONFIG_TPS68470_PMIC_OPREGION) += pmic/tps68470_pmic.o + video-objs += acpi_video.o video_detect.o obj-y += dptf/ diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 032ae44710e5..97b753dd2e6e 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -693,7 +693,7 @@ static int acpi_lpss_activate(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; - ret = acpi_dev_runtime_resume(dev); + ret = acpi_dev_resume(dev); if (ret) return ret; @@ -737,7 +737,7 @@ static int acpi_lpss_resume_early(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; - ret = acpi_dev_resume_early(dev); + ret = acpi_dev_resume(dev); if (ret) return ret; @@ -872,7 +872,7 @@ static int acpi_lpss_runtime_resume(struct device *dev) if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_exit_d3_state(); - ret = acpi_dev_runtime_resume(dev); + ret = acpi_dev_resume(dev); if (ret) return ret; @@ -894,7 +894,7 @@ static struct dev_pm_domain acpi_lpss_pm_domain = { #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP .prepare = acpi_subsys_prepare, - .complete = pm_complete_with_resume_check, + .complete = acpi_subsys_complete, .suspend = acpi_subsys_suspend, .suspend_late = acpi_lpss_suspend_late, .resume_early = acpi_lpss_resume_early, diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 0972ec0e2eb8..f53ccc680238 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events, static bool device_id_scheme = false; module_param(device_id_scheme, bool, 0444); -static bool only_lcd = false; -module_param(only_lcd, bool, 0444); +static int only_lcd = -1; +module_param(only_lcd, int, 0444); static int register_count; static DEFINE_MUTEX(register_count_mutex); @@ -2136,6 +2136,16 @@ int acpi_video_register(void) goto leave; } + /* + * We're seeing a lot of bogus backlight interfaces on newer machines + * without a LCD such as desktops, servers and HDMI sticks. Checking + * the lcd flag fixes this, so enable this on any machines which are + * win8 ready (where we also prefer the native backlight driver, so + * normally the acpi_video code should not register there anyways). + */ + if (only_lcd == -1) + only_lcd = acpi_osi_is_win8(); + dmi_check_system(video_dmi_table); ret = acpi_bus_register_driver(&acpi_video_bus); diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index 11b113f8e367..ebb626ffb5fa 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void) res.start = gas->address; if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { res.flags = IORESOURCE_MEM; - res.end = res.start + ALIGN(gas->access_width, 4); + res.end = res.start + ALIGN(gas->access_width, 4) - 1; } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { res.flags = IORESOURCE_IO; - res.end = res.start + gas->access_width; + res.end = res.start + gas->access_width - 1; } else { pr_warn("Unsupported address space: %u\n", gas->space_id); diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index e05232da0588..71f6f2624deb 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -178,6 +178,7 @@ acpi-y += \ utresrc.o \ utstate.o \ utstring.o \ + utstrsuppt.o \ utstrtoul64.o \ utxface.o \ utxfinit.o \ diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index fd4f3cacb356..cd722d8edacb 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -66,9 +66,9 @@ acpi_status acpi_hw_validate_register(struct acpi_generic_address *reg, u8 max_bit_width, u64 *address); -acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg); +acpi_status acpi_hw_read(u64 *value, struct acpi_generic_address *reg); -acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg); +acpi_status acpi_hw_write(u64 value, struct acpi_generic_address *reg); struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id); diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index 29a863c85318..29555c8789a3 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -101,7 +101,8 @@ typedef const struct acpi_exdump_info { */ acpi_status acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc, - union acpi_operand_object **result_desc, u32 flags); + union acpi_operand_object **result_desc, + u32 implicit_conversion); acpi_status acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, @@ -424,9 +425,6 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, struct acpi_walk_state *walk_state, u8 implicit_conversion); -#define ACPI_IMPLICIT_CONVERSION TRUE -#define ACPI_NO_IMPLICIT_CONVERSION FALSE - /* * exstoren - resolve/store object */ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 745134ade35f..83b75e9db7ef 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -141,6 +141,11 @@ extern const char *acpi_gbl_ptyp_decode[]; #define ACPI_MSG_SUFFIX \ acpi_os_printf (" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number) +/* Flags to indicate implicit or explicit string-to-integer conversion */ + +#define ACPI_IMPLICIT_CONVERSION TRUE +#define ACPI_NO_IMPLICIT_CONVERSION FALSE + /* Types for Resource descriptor entries */ #define ACPI_INVALID_RESOURCE 0 @@ -197,15 +202,31 @@ void acpi_ut_strlwr(char *src_string); int acpi_ut_stricmp(char *string1, char *string2); -acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *ret_integer); +/* + * utstrsuppt - string-to-integer conversion support functions + */ +acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value); + +acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr); + +acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr); + +char acpi_ut_remove_whitespace(char **string); + +char acpi_ut_remove_leading_zeros(char **string); + +u8 acpi_ut_detect_hex_prefix(char **string); + +u8 acpi_ut_detect_octal_prefix(char **string); /* - * Values for Flags above - * Note: LIMIT values correspond to acpi_gbl_integer_byte_width values (4/8) + * utstrtoul64 - string-to-integer conversion functions */ -#define ACPI_STRTOUL_32BIT 0x04 /* 4 bytes */ -#define ACPI_STRTOUL_64BIT 0x08 /* 8 bytes */ -#define ACPI_STRTOUL_BASE16 0x10 /* Default: Base10/16 */ +acpi_status acpi_ut_strtoul64(char *string, u64 *ret_integer); + +u64 acpi_ut_explicit_strtoul64(char *string); + +u64 acpi_ut_implicit_strtoul64(char *string); /* * utglobal - Global data structures and procedures diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c index 857dbc43a9b1..32d546f0db2f 100644 --- a/drivers/acpi/acpica/dbconvert.c +++ b/drivers/acpi/acpica/dbconvert.c @@ -277,10 +277,7 @@ acpi_db_convert_to_object(acpi_object_type type, default: object->type = ACPI_TYPE_INTEGER; - status = acpi_ut_strtoul64(string, - (acpi_gbl_integer_byte_width | - ACPI_STRTOUL_BASE16), - &object->integer.value); + status = acpi_ut_strtoul64(string, &object->integer.value); break; } diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 20d7744b06ae..22f45d090733 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -134,7 +134,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, * object. Implicitly convert the argument if necessary. */ status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); if (ACPI_FAILURE(status)) { goto cleanup; } diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 229382035550..263d8fc4a9e2 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -390,8 +390,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) struct acpi_gpe_handler_info *gpe_handler_info; u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u8 enabled_status_byte; - u32 status_reg; - u32 enable_reg; + u64 status_reg; + u64 enable_reg; acpi_cpu_flags flags; u32 i; u32 j; @@ -472,7 +472,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) gpe_register_info->base_gpe_number, gpe_register_info->base_gpe_number + (ACPI_GPE_REGISTER_WIDTH - 1), - status_reg, enable_reg, + (u32)status_reg, (u32)enable_reg, gpe_register_info->enable_for_run, gpe_register_info->enable_for_wake)); diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index 76bfb7dcae2f..59b8de2f07d3 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -156,7 +156,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, status = acpi_ex_convert_to_integer(local_operand1, &temp_operand1, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_BUFFER: diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index f71028e334ee..23ebadb06a95 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -57,10 +57,10 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length); * * FUNCTION: acpi_ex_convert_to_integer * - * PARAMETERS: obj_desc - Object to be converted. Must be an - * Integer, Buffer, or String - * result_desc - Where the new Integer object is returned - * flags - Used for string conversion + * PARAMETERS: obj_desc - Object to be converted. Must be an + * Integer, Buffer, or String + * result_desc - Where the new Integer object is returned + * implicit_conversion - Used for string conversion * * RETURN: Status * @@ -70,14 +70,14 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length); acpi_status acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc, - union acpi_operand_object **result_desc, u32 flags) + union acpi_operand_object **result_desc, + u32 implicit_conversion) { union acpi_operand_object *return_desc; u8 *pointer; u64 result; u32 i; u32 count; - acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc); @@ -123,12 +123,18 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc, * hexadecimal as per the ACPI specification. The only exception (as * of ACPI 3.0) is that the to_integer() operator allows both decimal * and hexadecimal strings (hex prefixed with "0x"). + * + * Explicit conversion is used only by to_integer. + * All other string-to-integer conversions are implicit conversions. */ - status = acpi_ut_strtoul64(ACPI_CAST_PTR(char, pointer), - (acpi_gbl_integer_byte_width | - flags), &result); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + if (implicit_conversion) { + result = + acpi_ut_implicit_strtoul64(ACPI_CAST_PTR + (char, pointer)); + } else { + result = + acpi_ut_explicit_strtoul64(ACPI_CAST_PTR + (char, pointer)); } break; @@ -631,7 +637,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type, */ status = acpi_ex_convert_to_integer(source_desc, result_desc, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_STRING: diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 1e7649ce0a7b..dbad3ebd7df5 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -330,7 +330,7 @@ acpi_ex_do_logical_op(u16 opcode, case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(operand1, &local_operand1, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_STRING: diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index c4852429e2ff..1c7c9962b0de 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -415,7 +415,7 @@ acpi_ex_resolve_operands(u16 opcode, * Known as "Implicit Source Operand Conversion" */ status = acpi_ex_convert_to_integer(obj_desc, stack_ptr, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); if (ACPI_FAILURE(status)) { if (status == AE_TYPE) { ACPI_ERROR((AE_INFO, diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 5eb11b30a79e..09b6822aa5cc 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -99,7 +99,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) { struct acpi_gpe_register_info *gpe_register_info; acpi_status status = AE_OK; - u32 enable_mask; + u64 enable_mask; u32 register_bit; ACPI_FUNCTION_ENTRY(); @@ -214,7 +214,7 @@ acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, acpi_event_status *event_status) { - u32 in_byte; + u64 in_byte; u32 register_bit; struct acpi_gpe_register_info *gpe_register_info; acpi_event_status local_event_status = 0; diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index acb417b58bbb..aa6e00081915 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -220,16 +220,15 @@ acpi_hw_validate_register(struct acpi_generic_address *reg, * * RETURN: Status * - * DESCRIPTION: Read from either memory or IO space. This is a 32-bit max - * version of acpi_read, used internally since the overhead of - * 64-bit values is not needed. + * DESCRIPTION: Read from either memory or IO space. This is a 64-bit max + * version of acpi_read. * * LIMITATIONS: * space_ID must be system_memory or system_IO. * ******************************************************************************/ -acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) +acpi_status acpi_hw_read(u64 *value, struct acpi_generic_address *reg) { u64 address; u8 access_width; @@ -244,17 +243,17 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) /* Validate contents of the GAS register */ - status = acpi_hw_validate_register(reg, 32, &address); + status = acpi_hw_validate_register(reg, 64, &address); if (ACPI_FAILURE(status)) { return (status); } /* - * Initialize entire 32-bit return value to zero, convert access_width + * Initialize entire 64-bit return value to zero, convert access_width * into number of bits based */ *value = 0; - access_width = acpi_hw_get_access_bit_width(address, reg, 32); + access_width = acpi_hw_get_access_bit_width(address, reg, 64); bit_width = reg->bit_offset + reg->bit_width; bit_offset = reg->bit_offset; @@ -265,7 +264,7 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) index = 0; while (bit_width) { if (bit_offset >= access_width) { - value32 = 0; + value64 = 0; bit_offset -= access_width; } else { if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { @@ -276,7 +275,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) ACPI_DIV_8 (access_width), &value64, access_width); - value32 = (u32)value64; } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ status = acpi_hw_read_port((acpi_io_address) @@ -286,15 +284,16 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) (access_width), &value32, access_width); + value64 = (u64)value32; } } /* * Use offset style bit writes because "Index * AccessWidth" is - * ensured to be less than 32-bits by acpi_hw_validate_register(). + * ensured to be less than 64-bits by acpi_hw_validate_register(). */ ACPI_SET_BITS(value, index * access_width, - ACPI_MASK_BITS_ABOVE_32(access_width), value32); + ACPI_MASK_BITS_ABOVE_64(access_width), value64); bit_width -= bit_width > access_width ? access_width : bit_width; @@ -302,8 +301,9 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) } ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", - *value, access_width, ACPI_FORMAT_UINT64(address), + "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n", + ACPI_FORMAT_UINT64(*value), access_width, + ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); @@ -318,20 +318,18 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) * * RETURN: Status * - * DESCRIPTION: Write to either memory or IO space. This is a 32-bit max - * version of acpi_write, used internally since the overhead of - * 64-bit values is not needed. + * DESCRIPTION: Write to either memory or IO space. This is a 64-bit max + * version of acpi_write. * ******************************************************************************/ -acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) +acpi_status acpi_hw_write(u64 value, struct acpi_generic_address *reg) { u64 address; u8 access_width; u32 bit_width; u8 bit_offset; u64 value64; - u32 value32; u8 index; acpi_status status; @@ -339,14 +337,14 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) /* Validate contents of the GAS register */ - status = acpi_hw_validate_register(reg, 32, &address); + status = acpi_hw_validate_register(reg, 64, &address); if (ACPI_FAILURE(status)) { return (status); } /* Convert access_width into number of bits based */ - access_width = acpi_hw_get_access_bit_width(address, reg, 32); + access_width = acpi_hw_get_access_bit_width(address, reg, 64); bit_width = reg->bit_offset + reg->bit_width; bit_offset = reg->bit_offset; @@ -358,16 +356,15 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) while (bit_width) { /* * Use offset style bit reads because "Index * AccessWidth" is - * ensured to be less than 32-bits by acpi_hw_validate_register(). + * ensured to be less than 64-bits by acpi_hw_validate_register(). */ - value32 = ACPI_GET_BITS(&value, index * access_width, - ACPI_MASK_BITS_ABOVE_32(access_width)); + value64 = ACPI_GET_BITS(&value, index * access_width, + ACPI_MASK_BITS_ABOVE_64(access_width)); if (bit_offset >= access_width) { bit_offset -= access_width; } else { if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - value64 = (u64)value32; status = acpi_os_write_memory((acpi_physical_address) address + @@ -382,7 +379,7 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) index * ACPI_DIV_8 (access_width), - value32, + (u32)value64, access_width); } } @@ -397,8 +394,9 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) } ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", - value, access_width, ACPI_FORMAT_UINT64(address), + "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n", + ACPI_FORMAT_UINT64(value), access_width, + ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); @@ -526,6 +524,7 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control) acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value) { u32 value = 0; + u64 value64; acpi_status status; ACPI_FUNCTION_TRACE(hw_register_read); @@ -564,12 +563,14 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value) case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ status = - acpi_hw_read(&value, &acpi_gbl_FADT.xpm2_control_block); + acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block); + value = (u32)value64; break; case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ - status = acpi_hw_read(&value, &acpi_gbl_FADT.xpm_timer_block); + status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block); + value = (u32)value64; break; case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ @@ -586,7 +587,7 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value) } if (ACPI_SUCCESS(status)) { - *return_value = value; + *return_value = (u32)value; } return_ACPI_STATUS(status); @@ -622,6 +623,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value) { acpi_status status; u32 read_value; + u64 read_value64; ACPI_FUNCTION_TRACE(hw_register_write); @@ -685,11 +687,12 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value) * as per the ACPI spec. */ status = - acpi_hw_read(&read_value, + acpi_hw_read(&read_value64, &acpi_gbl_FADT.xpm2_control_block); if (ACPI_FAILURE(status)) { goto exit; } + read_value = (u32)read_value64; /* Insert the bits to be preserved */ @@ -745,22 +748,25 @@ acpi_hw_read_multiple(u32 *value, { u32 value_a = 0; u32 value_b = 0; + u64 value64; acpi_status status; /* The first register is always required */ - status = acpi_hw_read(&value_a, register_a); + status = acpi_hw_read(&value64, register_a); if (ACPI_FAILURE(status)) { return (status); } + value_a = (u32)value64; /* Second register is optional */ if (register_b->address) { - status = acpi_hw_read(&value_b, register_b); + status = acpi_hw_read(&value64, register_b); if (ACPI_FAILURE(status)) { return (status); } + value_b = (u32)value64; } /* diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index b3c5d8c754bb..a2f4e25d45b1 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -94,6 +94,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution) acpi_status acpi_get_timer(u32 * ticks) { acpi_status status; + u64 timer_value; ACPI_FUNCTION_TRACE(acpi_get_timer); @@ -107,7 +108,14 @@ acpi_status acpi_get_timer(u32 * ticks) return_ACPI_STATUS(AE_SUPPORT); } - status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); + status = acpi_hw_read(&timer_value, &acpi_gbl_FADT.xpm_timer_block); + if (ACPI_SUCCESS(status)) { + + /* ACPI PM Timer is defined to be 32 bits (PM_TMR_LEN) */ + + *ticks = (u32)timer_value; + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 34684ae89981..b3c6e439933c 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -125,76 +125,12 @@ ACPI_EXPORT_SYMBOL(acpi_reset) ******************************************************************************/ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) { - u32 value_lo; - u32 value_hi; - u32 width; - u64 address; acpi_status status; ACPI_FUNCTION_NAME(acpi_read); - if (!return_value) { - return (AE_BAD_PARAMETER); - } - - /* Validate contents of the GAS register. Allow 64-bit transfers */ - - status = acpi_hw_validate_register(reg, 64, &address); - if (ACPI_FAILURE(status)) { - return (status); - } - - /* - * Two address spaces supported: Memory or I/O. PCI_Config is - * not supported here because the GAS structure is insufficient - */ - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - status = acpi_os_read_memory((acpi_physical_address) - address, return_value, - reg->bit_width); - if (ACPI_FAILURE(status)) { - return (status); - } - } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ - - value_lo = 0; - value_hi = 0; - - width = reg->bit_width; - if (width == 64) { - width = 32; /* Break into two 32-bit transfers */ - } - - status = acpi_hw_read_port((acpi_io_address) - address, &value_lo, width); - if (ACPI_FAILURE(status)) { - return (status); - } - - if (reg->bit_width == 64) { - - /* Read the top 32 bits */ - - status = acpi_hw_read_port((acpi_io_address) - (address + 4), &value_hi, - 32); - if (ACPI_FAILURE(status)) { - return (status); - } - } - - /* Set the return value only if status is AE_OK */ - - *return_value = (value_lo | ((u64)value_hi << 32)); - } - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n", - ACPI_FORMAT_UINT64(*return_value), reg->bit_width, - ACPI_FORMAT_UINT64(address), - acpi_ut_get_region_name(reg->space_id))); - - return (AE_OK); + status = acpi_hw_read(return_value, reg); + return (status); } ACPI_EXPORT_SYMBOL(acpi_read) @@ -213,59 +149,11 @@ ACPI_EXPORT_SYMBOL(acpi_read) ******************************************************************************/ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg) { - u32 width; - u64 address; acpi_status status; ACPI_FUNCTION_NAME(acpi_write); - /* Validate contents of the GAS register. Allow 64-bit transfers */ - - status = acpi_hw_validate_register(reg, 64, &address); - if (ACPI_FAILURE(status)) { - return (status); - } - - /* - * Two address spaces supported: Memory or IO. PCI_Config is - * not supported here because the GAS structure is insufficient - */ - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - status = acpi_os_write_memory((acpi_physical_address) - address, value, reg->bit_width); - if (ACPI_FAILURE(status)) { - return (status); - } - } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ - - width = reg->bit_width; - if (width == 64) { - width = 32; /* Break into two 32-bit transfers */ - } - - status = acpi_hw_write_port((acpi_io_address) - address, ACPI_LODWORD(value), - width); - if (ACPI_FAILURE(status)) { - return (status); - } - - if (reg->bit_width == 64) { - status = acpi_hw_write_port((acpi_io_address) - (address + 4), - ACPI_HIDWORD(value), 32); - if (ACPI_FAILURE(status)) { - return (status); - } - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n", - ACPI_FORMAT_UINT64(value), reg->bit_width, - ACPI_FORMAT_UINT64(address), - acpi_ut_get_region_name(reg->space_id))); - + status = acpi_hw_write(value, reg); return (status); } diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index e4a7da8a11f0..539d775bbc92 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -78,8 +78,8 @@ acpi_ns_convert_to_integer(union acpi_operand_object *original_object, /* String-to-Integer conversion */ - status = acpi_ut_strtoul64(original_object->string.pointer, - acpi_gbl_integer_byte_width, &value); + status = + acpi_ut_strtoul64(original_object->string.pointer, &value); if (ACPI_FAILURE(status)) { return (status); } diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 26ad596c973e..5ecb8d2e6834 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -173,10 +173,13 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void) ACPI_FUNCTION_TRACE(acpi_reallocate_root_table); /* - * Only reallocate the root table if the host provided a static buffer - * for the table array in the call to acpi_initialize_tables. + * If there are tables unverified, it is required to reallocate the + * root table list to clean up invalid table entries. Otherwise only + * reallocate the root table list if the host provided a static buffer + * for the table array in the call to acpi_initialize_tables(). */ - if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { + if ((acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) && + acpi_gbl_enable_table_validation) { return_ACPI_STATUS(AE_SUPPORT); } diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c new file mode 100644 index 000000000000..965fb5cec94f --- /dev/null +++ b/drivers/acpi/acpica/utstrsuppt.c @@ -0,0 +1,438 @@ +/******************************************************************************* + * + * Module Name: utstrsuppt - Support functions for string-to-integer conversion + * + ******************************************************************************/ + +/* + * Copyright (C) 2000 - 2017, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include "accommon.h" + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utstrsuppt") + +/* Local prototypes */ +static acpi_status +acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit); + +static acpi_status +acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product); + +static acpi_status +acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum); + +/******************************************************************************* + * + * FUNCTION: acpi_ut_convert_octal_string + * + * PARAMETERS: string - Null terminated input string + * return_value_ptr - Where the converted value is returned + * + * RETURN: Status and 64-bit converted integer + * + * DESCRIPTION: Performs a base 8 conversion of the input string to an + * integer value, either 32 or 64 bits. + * + * NOTE: Maximum 64-bit unsigned octal value is 01777777777777777777777 + * Maximum 32-bit unsigned octal value is 037777777777 + * + ******************************************************************************/ + +acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value_ptr) +{ + u64 accumulated_value = 0; + acpi_status status = AE_OK; + + /* Convert each ASCII byte in the input string */ + + while (*string) { + + /* Character must be ASCII 0-7, otherwise terminate with no error */ + + if (!(ACPI_IS_OCTAL_DIGIT(*string))) { + break; + } + + /* Convert and insert this octal digit into the accumulator */ + + status = acpi_ut_insert_digit(&accumulated_value, 8, *string); + if (ACPI_FAILURE(status)) { + status = AE_OCTAL_OVERFLOW; + break; + } + + string++; + } + + /* Always return the value that has been accumulated */ + + *return_value_ptr = accumulated_value; + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_convert_decimal_string + * + * PARAMETERS: string - Null terminated input string + * return_value_ptr - Where the converted value is returned + * + * RETURN: Status and 64-bit converted integer + * + * DESCRIPTION: Performs a base 10 conversion of the input string to an + * integer value, either 32 or 64 bits. + * + * NOTE: Maximum 64-bit unsigned decimal value is 18446744073709551615 + * Maximum 32-bit unsigned decimal value is 4294967295 + * + ******************************************************************************/ + +acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr) +{ + u64 accumulated_value = 0; + acpi_status status = AE_OK; + + /* Convert each ASCII byte in the input string */ + + while (*string) { + + /* Character must be ASCII 0-9, otherwise terminate with no error */ + + if (!isdigit(*string)) { + break; + } + + /* Convert and insert this decimal digit into the accumulator */ + + status = acpi_ut_insert_digit(&accumulated_value, 10, *string); + if (ACPI_FAILURE(status)) { + status = AE_DECIMAL_OVERFLOW; + break; + } + + string++; + } + + /* Always return the value that has been accumulated */ + + *return_value_ptr = accumulated_value; + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_convert_hex_string + * + * PARAMETERS: string - Null terminated input string + * return_value_ptr - Where the converted value is returned + * + * RETURN: Status and 64-bit converted integer + * + * DESCRIPTION: Performs a base 16 conversion of the input string to an + * integer value, either 32 or 64 bits. + * + * NOTE: Maximum 64-bit unsigned hex value is 0xFFFFFFFFFFFFFFFF + * Maximum 32-bit unsigned hex value is 0xFFFFFFFF + * + ******************************************************************************/ + +acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr) +{ + u64 accumulated_value = 0; + acpi_status status = AE_OK; + + /* Convert each ASCII byte in the input string */ + + while (*string) { + + /* Must be ASCII A-F, a-f, or 0-9, otherwise terminate with no error */ + + if (!isxdigit(*string)) { + break; + } + + /* Convert and insert this hex digit into the accumulator */ + + status = acpi_ut_insert_digit(&accumulated_value, 16, *string); + if (ACPI_FAILURE(status)) { + status = AE_HEX_OVERFLOW; + break; + } + + string++; + } + + /* Always return the value that has been accumulated */ + + *return_value_ptr = accumulated_value; + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_remove_leading_zeros + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: Next character after any leading zeros. This character may be + * used by the caller to detect end-of-string. + * + * DESCRIPTION: Remove any leading zeros in the input string. Return the + * next character after the final ASCII zero to enable the caller + * to check for the end of the string (NULL terminator). + * + ******************************************************************************/ + +char acpi_ut_remove_leading_zeros(char **string) +{ + + while (**string == ACPI_ASCII_ZERO) { + *string += 1; + } + + return (**string); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_remove_whitespace + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: Next character after any whitespace. This character may be + * used by the caller to detect end-of-string. + * + * DESCRIPTION: Remove any leading whitespace in the input string. Return the + * next character after the final ASCII zero to enable the caller + * to check for the end of the string (NULL terminator). + * + ******************************************************************************/ + +char acpi_ut_remove_whitespace(char **string) +{ + + while (isspace((u8)**string)) { + *string += 1; + } + + return (**string); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_detect_hex_prefix + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: TRUE if a "0x" prefix was found at the start of the string + * + * DESCRIPTION: Detect and remove a hex "0x" prefix + * + ******************************************************************************/ + +u8 acpi_ut_detect_hex_prefix(char **string) +{ + + if ((**string == ACPI_ASCII_ZERO) && + (tolower((int)*(*string + 1)) == 'x')) { + *string += 2; /* Go past the leading 0x */ + return (TRUE); + } + + return (FALSE); /* Not a hex string */ +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_detect_octal_prefix + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: True if an octal "0" prefix was found at the start of the + * string + * + * DESCRIPTION: Detect and remove an octal prefix (zero) + * + ******************************************************************************/ + +u8 acpi_ut_detect_octal_prefix(char **string) +{ + + if (**string == ACPI_ASCII_ZERO) { + *string += 1; /* Go past the leading 0 */ + return (TRUE); + } + + return (FALSE); /* Not an octal string */ +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_insert_digit + * + * PARAMETERS: accumulated_value - Current value of the integer value + * accumulator. The new value is + * returned here. + * base - Radix, either 8/10/16 + * ascii_digit - ASCII single digit to be inserted + * + * RETURN: Status and result of the convert/insert operation. The only + * possible returned exception code is numeric overflow of + * either the multiply or add conversion operations. + * + * DESCRIPTION: Generic conversion and insertion function for all bases: + * + * 1) Multiply the current accumulated/converted value by the + * base in order to make room for the new character. + * + * 2) Convert the new character to binary and add it to the + * current accumulated value. + * + * Note: The only possible exception indicates an integer + * overflow (AE_NUMERIC_OVERFLOW) + * + ******************************************************************************/ + +static acpi_status +acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit) +{ + acpi_status status; + u64 product; + + /* Make room in the accumulated value for the incoming digit */ + + status = acpi_ut_strtoul_multiply64(*accumulated_value, base, &product); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Add in the new digit, and store the sum to the accumulated value */ + + status = + acpi_ut_strtoul_add64(product, + acpi_ut_ascii_char_to_hex(ascii_digit), + accumulated_value); + + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul_multiply64 + * + * PARAMETERS: multiplicand - Current accumulated converted integer + * multiplier - Base/Radix + * out_product - Where the product is returned + * + * RETURN: Status and 64-bit product + * + * DESCRIPTION: Multiply two 64-bit values, with checking for 64-bit overflow as + * well as 32-bit overflow if necessary (if the current global + * integer width is 32). + * + ******************************************************************************/ + +static acpi_status +acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product) +{ + u64 val; + + /* Exit if either operand is zero */ + + *out_product = 0; + if (!multiplicand || !multiplier) { + return (AE_OK); + } + + /* Check for 64-bit overflow before the actual multiplication */ + + acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL); + if (multiplicand > val) { + return (AE_NUMERIC_OVERFLOW); + } + + val = multiplicand * multiplier; + + /* Check for 32-bit overflow if necessary */ + + if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) { + return (AE_NUMERIC_OVERFLOW); + } + + *out_product = val; + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul_add64 + * + * PARAMETERS: addend1 - Current accumulated converted integer + * addend2 - New hex value/char + * out_sum - Where sum is returned (Accumulator) + * + * RETURN: Status and 64-bit sum + * + * DESCRIPTION: Add two 64-bit values, with checking for 64-bit overflow as + * well as 32-bit overflow if necessary (if the current global + * integer width is 32). + * + ******************************************************************************/ + +static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum) +{ + u64 sum; + + /* Check for 64-bit overflow before the actual addition */ + + if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) { + return (AE_NUMERIC_OVERFLOW); + } + + sum = addend1 + addend2; + + /* Check for 32-bit overflow if necessary */ + + if ((acpi_gbl_integer_bit_width == 32) && (sum > ACPI_UINT32_MAX)) { + return (AE_NUMERIC_OVERFLOW); + } + + *out_sum = sum; + return (AE_OK); +} diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c index 9633ee142855..e2067dcb9389 100644 --- a/drivers/acpi/acpica/utstrtoul64.c +++ b/drivers/acpi/acpica/utstrtoul64.c @@ -1,6 +1,7 @@ /******************************************************************************* * - * Module Name: utstrtoul64 - string to 64-bit integer support + * Module Name: utstrtoul64 - String-to-integer conversion support for both + * 64-bit and 32-bit integers * ******************************************************************************/ @@ -44,304 +45,319 @@ #include #include "accommon.h" -/******************************************************************************* - * - * The functions in this module satisfy the need for 64-bit string-to-integer - * conversions on both 32-bit and 64-bit platforms. - * - ******************************************************************************/ - #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstrtoul64") -/* Local prototypes */ -static u64 acpi_ut_strtoul_base10(char *string, u32 flags); - -static u64 acpi_ut_strtoul_base16(char *string, u32 flags); - /******************************************************************************* * - * String conversion rules as written in the ACPI specification. The error - * conditions and behavior are different depending on the type of conversion. - * - * - * Implicit data type conversion: string-to-integer - * -------------------------------------------------- - * - * Base is always 16. This is the ACPI_STRTOUL_BASE16 case. - * - * Example: - * Add ("BA98", Arg0, Local0) - * - * The integer is initialized to the value zero. - * The ASCII string is interpreted as a hexadecimal constant. + * This module contains the top-level string to 64/32-bit unsigned integer + * conversion functions: * - * 1) A "0x" prefix is not allowed. However, ACPICA allows this for - * compatibility with previous ACPICA. (NO ERROR) + * 1) A standard strtoul() function that supports 64-bit integers, base + * 8/10/16, with integer overflow support. This is used mainly by the + * iASL compiler, which implements tighter constraints on integer + * constants than the runtime (interpreter) integer-to-string conversions. + * 2) Runtime "Explicit conversion" as defined in the ACPI specification. + * 3) Runtime "Implicit conversion" as defined in the ACPI specification. * - * 2) Terminates when the size of an integer is reached (32 or 64 bits). - * (NO ERROR) + * Current users of this module: * - * 3) The first non-hex character terminates the conversion without error. - * (NO ERROR) - * - * 4) Conversion of a null (zero-length) string to an integer is not - * allowed. However, ACPICA allows this for compatibility with previous - * ACPICA. This conversion returns the value 0. (NO ERROR) - * - * - * Explicit data type conversion: to_integer() with string operand - * --------------------------------------------------------------- - * - * Base is either 10 (default) or 16 (with 0x prefix) - * - * Examples: - * to_integer ("1000") - * to_integer ("0xABCD") - * - * 1) Can be (must be) either a decimal or hexadecimal numeric string. - * A hex value must be prefixed by "0x" or it is interpreted as a decimal. + * iASL - Preprocessor (constants and math expressions) + * iASL - Main parser, conversion of constants to integers + * iASL - Data Table Compiler parser (constants and math expressions) + * interpreter - Implicit and explicit conversions, GPE method names + * interpreter - Repair code for return values from predefined names + * debugger - Command line input string conversion + * acpi_dump - ACPI table physical addresses + * acpi_exec - Support for namespace overrides * - * 2) The value must not exceed the maximum of an integer value. ACPI spec - * states the behavior is "unpredictable", so ACPICA matches the behavior - * of the implicit conversion case.(NO ERROR) + * Notes concerning users of these interfaces: * - * 3) Behavior on the first non-hex character is not specified by the ACPI - * spec, so ACPICA matches the behavior of the implicit conversion case - * and terminates. (NO ERROR) + * acpi_gbl_integer_byte_width is used to set the 32/64 bit limit for explicit + * and implicit conversions. This global must be set to the proper width. + * For the core ACPICA code, the width depends on the DSDT version. For the + * acpi_ut_strtoul64 interface, all conversions are 64 bits. This interface is + * used primarily for iASL, where the default width is 64 bits for all parsers, + * but error checking is performed later to flag cases where a 64-bit constant + * is wrongly defined in a 32-bit DSDT/SSDT. * - * 4) A null (zero-length) string is illegal. - * However, ACPICA allows this for compatibility with previous ACPICA. - * This conversion returns the value 0. (NO ERROR) + * In ACPI, the only place where octal numbers are supported is within + * the ASL language itself. This is implemented via the main acpi_ut_strtoul64 + * interface. According the ACPI specification, there is no ACPI runtime + * support (explicit/implicit) for octal string conversions. * ******************************************************************************/ - /******************************************************************************* * * FUNCTION: acpi_ut_strtoul64 * - * PARAMETERS: string - Null terminated input string - * flags - Conversion info, see below + * PARAMETERS: string - Null terminated input string, + * must be a valid pointer * return_value - Where the converted integer is - * returned - * - * RETURN: Status and Converted value + * returned. Must be a valid pointer * - * DESCRIPTION: Convert a string into an unsigned value. Performs either a - * 32-bit or 64-bit conversion, depending on the input integer - * size in Flags (often the current mode of the interpreter). + * RETURN: Status and converted integer. Returns an exception on a + * 64-bit numeric overflow * - * Values for Flags: - * ACPI_STRTOUL_32BIT - Max integer value is 32 bits - * ACPI_STRTOUL_64BIT - Max integer value is 64 bits - * ACPI_STRTOUL_BASE16 - Input string is hexadecimal. Default - * is 10/16 based on string prefix (0x). + * DESCRIPTION: Convert a string into an unsigned integer. Always performs a + * full 64-bit conversion, regardless of the current global + * integer width. Supports Decimal, Hex, and Octal strings. * - * NOTES: - * Negative numbers are not supported, as they are not supported by ACPI. + * Current users of this function: * - * Supports only base 16 or base 10 strings/values. Does not - * support Octal strings, as these are not supported by ACPI. - * - * Current users of this support: - * - * interpreter - Implicit and explicit conversions, GPE method names - * debugger - Command line input string conversion - * iASL - Main parser, conversion of constants to integers - * iASL - Data Table Compiler parser (constant math expressions) - * iASL - Preprocessor (constant math expressions) - * acpi_dump - Input table addresses - * acpi_exec - Testing of the acpi_ut_strtoul64 function - * - * Note concerning callers: - * acpi_gbl_integer_byte_width can be used to set the 32/64 limit. If used, - * this global should be set to the proper width. For the core ACPICA code, - * this width depends on the DSDT version. For iASL, the default byte - * width is always 8 for the parser, but error checking is performed later - * to flag cases where a 64-bit constant is defined in a 32-bit DSDT/SSDT. + * iASL - Preprocessor (constants and math expressions) + * iASL - Main ASL parser, conversion of ASL constants to integers + * iASL - Data Table Compiler parser (constants and math expressions) + * interpreter - Repair code for return values from predefined names + * acpi_dump - ACPI table physical addresses + * acpi_exec - Support for namespace overrides * ******************************************************************************/ - -acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *return_value) +acpi_status acpi_ut_strtoul64(char *string, u64 *return_value) { acpi_status status = AE_OK; - u32 base; + u8 original_bit_width; + u32 base = 10; /* Default is decimal */ ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string); - /* Parameter validation */ - - if (!string || !return_value) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - *return_value = 0; - /* Check for zero-length string, returns 0 */ + /* A NULL return string returns a value of zero */ if (*string == 0) { return_ACPI_STATUS(AE_OK); } - /* Skip over any white space at start of string */ - - while (isspace((int)*string)) { - string++; - } - - /* End of string? return 0 */ - - if (*string == 0) { + if (!acpi_ut_remove_whitespace(&string)) { return_ACPI_STATUS(AE_OK); } /* - * 1) The "0x" prefix indicates base 16. Per the ACPI specification, - * the "0x" prefix is only allowed for implicit (non-strict) conversions. - * However, we always allow it for compatibility with older ACPICA. + * 1) Check for a hex constant. A "0x" prefix indicates base 16. */ - if ((*string == ACPI_ASCII_ZERO) && - (tolower((int)*(string + 1)) == 'x')) { - string += 2; /* Go past the 0x */ - if (*string == 0) { - return_ACPI_STATUS(AE_OK); /* Return value 0 */ - } - + if (acpi_ut_detect_hex_prefix(&string)) { base = 16; } - /* 2) Force to base 16 (implicit conversion case) */ - - else if (flags & ACPI_STRTOUL_BASE16) { - base = 16; + /* + * 2) Check for an octal constant, defined to be a leading zero + * followed by sequence of octal digits (0-7) + */ + else if (acpi_ut_detect_octal_prefix(&string)) { + base = 8; } - /* 3) Default fallback is to Base 10 */ - - else { - base = 10; + if (!acpi_ut_remove_leading_zeros(&string)) { + return_ACPI_STATUS(AE_OK); /* Return value 0 */ } - /* Skip all leading zeros */ + /* + * Force a full 64-bit conversion. The caller (usually iASL) must + * check for a 32-bit overflow later as necessary (If current mode + * is 32-bit, meaning a 32-bit DSDT). + */ + original_bit_width = acpi_gbl_integer_bit_width; + acpi_gbl_integer_bit_width = 64; - while (*string == ACPI_ASCII_ZERO) { - string++; - if (*string == 0) { - return_ACPI_STATUS(AE_OK); /* Return value 0 */ - } + /* + * Perform the base 8, 10, or 16 conversion. A 64-bit numeric overflow + * will return an exception (to allow iASL to flag the statement). + */ + switch (base) { + case 8: + status = acpi_ut_convert_octal_string(string, return_value); + break; + + case 10: + status = acpi_ut_convert_decimal_string(string, return_value); + break; + + case 16: + default: + status = acpi_ut_convert_hex_string(string, return_value); + break; } - /* Perform the base 16 or 10 conversion */ - - if (base == 16) { - *return_value = acpi_ut_strtoul_base16(string, flags); - } else { - *return_value = acpi_ut_strtoul_base10(string, flags); - } + /* Only possible exception from above is a 64-bit overflow */ + acpi_gbl_integer_bit_width = original_bit_width; return_ACPI_STATUS(status); } /******************************************************************************* * - * FUNCTION: acpi_ut_strtoul_base10 + * FUNCTION: acpi_ut_implicit_strtoul64 + * + * PARAMETERS: string - Null terminated input string, + * must be a valid pointer + * + * RETURN: Converted integer + * + * DESCRIPTION: Perform a 64-bit conversion with restrictions placed upon + * an "implicit conversion" by the ACPI specification. Used by + * many ASL operators that require an integer operand, and support + * an automatic (implicit) conversion from a string operand + * to the final integer operand. The major restriction is that + * only hex strings are supported. + * + * ----------------------------------------------------------------------------- + * + * Base is always 16, either with or without the 0x prefix. Decimal and + * Octal strings are not supported, as per the ACPI specification. + * + * Examples (both are hex values): + * Add ("BA98", Arg0, Local0) + * Subtract ("0x12345678", Arg1, Local1) + * + * Conversion rules as extracted from the ACPI specification: + * + * The converted integer is initialized to the value zero. + * The ASCII string is always interpreted as a hexadecimal constant. + * + * 1) According to the ACPI specification, a "0x" prefix is not allowed. + * However, ACPICA allows this as an ACPI extension on general + * principle. (NO ERROR) + * + * 2) The conversion terminates when the size of an integer is reached + * (32 or 64 bits). There are no numeric overflow conditions. (NO ERROR) + * + * 3) The first non-hex character terminates the conversion and returns + * the current accumulated value of the converted integer (NO ERROR). * - * PARAMETERS: string - Null terminated input string - * flags - Conversion info + * 4) Conversion of a null (zero-length) string to an integer is + * technically not allowed. However, ACPICA allows this as an ACPI + * extension. The conversion returns the value 0. (NO ERROR) * - * RETURN: 64-bit converted integer + * NOTE: There are no error conditions returned by this function. At + * the minimum, a value of zero is returned. * - * DESCRIPTION: Performs a base 10 conversion of the input string to an - * integer value, either 32 or 64 bits. - * Note: String must be valid and non-null. + * Current users of this function: + * + * interpreter - All runtime implicit conversions, as per ACPI specification + * iASL - Data Table Compiler parser (constants and math expressions) * ******************************************************************************/ -static u64 acpi_ut_strtoul_base10(char *string, u32 flags) +u64 acpi_ut_implicit_strtoul64(char *string) { - int ascii_digit; - u64 next_value; - u64 return_value = 0; - - /* Main loop: convert each ASCII byte in the input string */ - - while (*string) { - ascii_digit = *string; - if (!isdigit(ascii_digit)) { - - /* Not ASCII 0-9, terminate */ - - goto exit; - } - - /* Convert and insert (add) the decimal digit */ + u64 converted_integer = 0; - acpi_ut_short_multiply(return_value, 10, &next_value); - next_value += (ascii_digit - ACPI_ASCII_ZERO); + ACPI_FUNCTION_TRACE_STR(ut_implicit_strtoul64, string); - /* Check for overflow (32 or 64 bit) - return current converted value */ + if (!acpi_ut_remove_whitespace(&string)) { + return_VALUE(0); + } - if (((flags & ACPI_STRTOUL_32BIT) && (next_value > ACPI_UINT32_MAX)) || (next_value < return_value)) { /* 64-bit overflow case */ - goto exit; - } + /* + * Per the ACPI specification, only hexadecimal is supported for + * implicit conversions, and the "0x" prefix is "not allowed". + * However, allow a "0x" prefix as an ACPI extension. + */ + acpi_ut_detect_hex_prefix(&string); - return_value = next_value; - string++; + if (!acpi_ut_remove_leading_zeros(&string)) { + return_VALUE(0); } -exit: - return (return_value); + /* + * Ignore overflow as per the ACPI specification. This is implemented by + * ignoring the return status from the conversion function called below. + * On overflow, the input string is simply truncated. + */ + acpi_ut_convert_hex_string(string, &converted_integer); + return_VALUE(converted_integer); } /******************************************************************************* * - * FUNCTION: acpi_ut_strtoul_base16 + * FUNCTION: acpi_ut_explicit_strtoul64 + * + * PARAMETERS: string - Null terminated input string, + * must be a valid pointer * - * PARAMETERS: string - Null terminated input string - * flags - conversion info + * RETURN: Converted integer * - * RETURN: 64-bit converted integer + * DESCRIPTION: Perform a 64-bit conversion with the restrictions placed upon + * an "explicit conversion" by the ACPI specification. The + * main restriction is that only hex and decimal are supported. * - * DESCRIPTION: Performs a base 16 conversion of the input string to an - * integer value, either 32 or 64 bits. - * Note: String must be valid and non-null. + * ----------------------------------------------------------------------------- + * + * Base is either 10 (default) or 16 (with 0x prefix). Octal (base 8) strings + * are not supported, as per the ACPI specification. + * + * Examples: + * to_integer ("1000") Decimal + * to_integer ("0xABCD") Hex + * + * Conversion rules as extracted from the ACPI specification: + * + * 1) The input string is either a decimal or hexadecimal numeric string. + * A hex value must be prefixed by "0x" or it is interpreted as decimal. + * + * 2) The value must not exceed the maximum of an integer value + * (32 or 64 bits). The ACPI specification states the behavior is + * "unpredictable", so ACPICA matches the behavior of the implicit + * conversion case. There are no numeric overflow conditions. (NO ERROR) + * + * 3) Behavior on the first non-hex character is not defined by the ACPI + * specification (for the to_integer operator), so ACPICA matches the + * behavior of the implicit conversion case. It terminates the + * conversion and returns the current accumulated value of the converted + * integer. (NO ERROR) + * + * 4) Conversion of a null (zero-length) string to an integer is + * technically not allowed. However, ACPICA allows this as an ACPI + * extension. The conversion returns the value 0. (NO ERROR) + * + * NOTE: There are no error conditions returned by this function. At the + * minimum, a value of zero is returned. + * + * Current users of this function: + * + * interpreter - Runtime ASL to_integer operator, as per the ACPI specification * ******************************************************************************/ -static u64 acpi_ut_strtoul_base16(char *string, u32 flags) +u64 acpi_ut_explicit_strtoul64(char *string) { - int ascii_digit; - u32 valid_digits = 1; - u64 return_value = 0; - - /* Main loop: convert each ASCII byte in the input string */ + u64 converted_integer = 0; + u32 base = 10; /* Default is decimal */ - while (*string) { + ACPI_FUNCTION_TRACE_STR(ut_explicit_strtoul64, string); - /* Check for overflow (32 or 64 bit) - return current converted value */ - - if ((valid_digits > 16) || - ((valid_digits > 8) && (flags & ACPI_STRTOUL_32BIT))) { - goto exit; - } - - ascii_digit = *string; - if (!isxdigit(ascii_digit)) { - - /* Not Hex ASCII A-F, a-f, or 0-9, terminate */ - - goto exit; - } + if (!acpi_ut_remove_whitespace(&string)) { + return_VALUE(0); + } - /* Convert and insert the hex digit */ + /* + * Only Hex and Decimal are supported, as per the ACPI specification. + * A "0x" prefix indicates hex; otherwise decimal is assumed. + */ + if (acpi_ut_detect_hex_prefix(&string)) { + base = 16; + } - acpi_ut_short_shift_left(return_value, 4, &return_value); - return_value |= acpi_ut_ascii_char_to_hex(ascii_digit); + if (!acpi_ut_remove_leading_zeros(&string)) { + return_VALUE(0); + } - string++; - valid_digits++; + /* + * Ignore overflow as per the ACPI specification. This is implemented by + * ignoring the return status from the conversion functions called below. + * On overflow, the input string is simply truncated. + */ + switch (base) { + case 10: + default: + acpi_ut_convert_decimal_string(string, &converted_integer); + break; + + case 16: + acpi_ut_convert_hex_string(string, &converted_integer); + break; } -exit: - return (return_value); + return_VALUE(converted_integer); } diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 2c462beee551..a943cf17faa7 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -1007,7 +1007,7 @@ static ssize_t erst_reader(struct pstore_record *record) /* The record may be cleared by others, try read next record */ if (len == -ENOENT) goto skip; - else if (len < sizeof(*rcd)) { + else if (len < 0 || len < sizeof(*rcd)) { rc = -EIO; goto out; } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 3c3a37b8503b..572b6c7303ed 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -112,7 +113,7 @@ static DEFINE_MUTEX(ghes_list_mutex); * Because the memory area used to transfer hardware error information * from BIOS to Linux can be determined only in NMI, IRQ or timer * handler, but general ioremap can not be used in atomic context, so - * a special version of atomic ioremap is implemented for that. + * the fixmap is used instead. */ /* @@ -126,8 +127,8 @@ static DEFINE_MUTEX(ghes_list_mutex); /* virtual memory area for atomic ioremap */ static struct vm_struct *ghes_ioremap_area; /* - * These 2 spinlock is used to prevent atomic ioremap virtual memory - * area from being mapped simultaneously. + * These 2 spinlocks are used to prevent the fixmap entries from being used + * simultaneously. */ static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); @@ -159,52 +160,36 @@ static void ghes_ioremap_exit(void) static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) { - unsigned long vaddr; phys_addr_t paddr; pgprot_t prot; - vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); - paddr = pfn << PAGE_SHIFT; prot = arch_apei_get_mem_attribute(paddr); - ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); + __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot); - return (void __iomem *)vaddr; + return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI); } static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) { - unsigned long vaddr, paddr; + phys_addr_t paddr; pgprot_t prot; - vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); - paddr = pfn << PAGE_SHIFT; prot = arch_apei_get_mem_attribute(paddr); + __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot); - ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); - - return (void __iomem *)vaddr; + return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ); } -static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) +static void ghes_iounmap_nmi(void) { - unsigned long vaddr = (unsigned long __force)vaddr_ptr; - void *base = ghes_ioremap_area->addr; - - BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); - unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - arch_apei_flush_tlb_one(vaddr); + clear_fixmap(FIX_APEI_GHES_NMI); } -static void ghes_iounmap_irq(void __iomem *vaddr_ptr) +static void ghes_iounmap_irq(void) { - unsigned long vaddr = (unsigned long __force)vaddr_ptr; - void *base = ghes_ioremap_area->addr; - - BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); - unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - arch_apei_flush_tlb_one(vaddr); + clear_fixmap(FIX_APEI_GHES_IRQ); } static int ghes_estatus_pool_init(void) @@ -360,10 +345,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, paddr += trunk; buffer += trunk; if (in_nmi) { - ghes_iounmap_nmi(vaddr); + ghes_iounmap_nmi(); raw_spin_unlock(&ghes_ioremap_lock_nmi); } else { - ghes_iounmap_irq(vaddr); + ghes_iounmap_irq(); spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); } } @@ -851,17 +836,8 @@ static void ghes_sea_remove(struct ghes *ghes) synchronize_rcu(); } #else /* CONFIG_ACPI_APEI_SEA */ -static inline void ghes_sea_add(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to add SEA notification which is not supported\n", - ghes->generic->header.source_id); -} - -static inline void ghes_sea_remove(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to remove SEA notification which is not supported\n", - ghes->generic->header.source_id); -} +static inline void ghes_sea_add(struct ghes *ghes) { } +static inline void ghes_sea_remove(struct ghes *ghes) { } #endif /* CONFIG_ACPI_APEI_SEA */ #ifdef CONFIG_HAVE_ACPI_APEI_NMI @@ -1063,23 +1039,9 @@ static void ghes_nmi_init_cxt(void) init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); } #else /* CONFIG_HAVE_ACPI_APEI_NMI */ -static inline void ghes_nmi_add(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n", - ghes->generic->header.source_id); - BUG(); -} - -static inline void ghes_nmi_remove(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n", - ghes->generic->header.source_id); - BUG(); -} - -static inline void ghes_nmi_init_cxt(void) -{ -} +static inline void ghes_nmi_add(struct ghes *ghes) { } +static inline void ghes_nmi_remove(struct ghes *ghes) { } +static inline void ghes_nmi_init_cxt(void) { } #endif /* CONFIG_HAVE_ACPI_APEI_NMI */ static int ghes_probe(struct platform_device *ghes_dev) diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 4d0979e02a28..b6d58cc58f5f 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id) return 0; } #endif +static int set_gbl_term_list(const struct dmi_system_id *id) +{ + acpi_gbl_parse_table_as_term_list = 1; + return 0; +} -static const struct dmi_system_id dsdt_dmi_table[] __initconst = { +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = { + /* + * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C + * mode. + * https://bugzilla.kernel.org/show_bug.cgi?id=198515 + */ + { + .callback = set_gbl_term_list, + .ident = "Dell Precision M5530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"), + }, + }, + { + .callback = set_gbl_term_list, + .ident = "Dell XPS 15 9570", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"), + }, + }, /* * Invoke DSDT corruption work-around on all Toshiba Satellite. + * DSDT will be copied to memory. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 */ { @@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = { {} }; #else -static const struct dmi_system_id dsdt_dmi_table[] __initconst = { +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = { {} }; #endif @@ -1001,11 +1028,8 @@ void __init acpi_early_init(void) acpi_permanent_mmap = true; - /* - * If the machine falls into the DMI check table, - * DSDT will be copied to memory - */ - dmi_check_system(dsdt_dmi_table); + /* Check machine-specific quirks */ + dmi_check_system(acpi_quirks_dmi_table); status = acpi_reallocate_root_table(); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index fbcc73f7a099..608e9dd565c6 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable); #ifdef CONFIG_PM static DEFINE_MUTEX(acpi_pm_notifier_lock); +static DEFINE_MUTEX(acpi_pm_notifier_install_lock); void acpi_pm_wakeup_event(struct device *dev) { @@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, if (!dev && !func) return AE_BAD_PARAMETER; - mutex_lock(&acpi_pm_notifier_lock); + mutex_lock(&acpi_pm_notifier_install_lock); if (adev->wakeup.flags.notifier_present) goto out; - adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); - adev->wakeup.context.dev = dev; - adev->wakeup.context.func = func; - status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY, acpi_pm_notify_handler, NULL); if (ACPI_FAILURE(status)) goto out; + mutex_lock(&acpi_pm_notifier_lock); + adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); + adev->wakeup.context.dev = dev; + adev->wakeup.context.func = func; adev->wakeup.flags.notifier_present = true; + mutex_unlock(&acpi_pm_notifier_lock); out: - mutex_unlock(&acpi_pm_notifier_lock); + mutex_unlock(&acpi_pm_notifier_install_lock); return status; } @@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) { acpi_status status = AE_BAD_PARAMETER; - mutex_lock(&acpi_pm_notifier_lock); + mutex_lock(&acpi_pm_notifier_install_lock); if (!adev->wakeup.flags.notifier_present) goto out; @@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) if (ACPI_FAILURE(status)) goto out; + mutex_lock(&acpi_pm_notifier_lock); adev->wakeup.context.func = NULL; adev->wakeup.context.dev = NULL; wakeup_source_unregister(adev->wakeup.ws); - adev->wakeup.flags.notifier_present = false; + mutex_unlock(&acpi_pm_notifier_lock); out: - mutex_unlock(&acpi_pm_notifier_lock); + mutex_unlock(&acpi_pm_notifier_install_lock); return status; } @@ -882,14 +885,13 @@ int acpi_dev_runtime_suspend(struct device *dev) EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend); /** - * acpi_dev_runtime_resume - Put device into the full-power state using ACPI. + * acpi_dev_resume - Put device into the full-power state using ACPI. * @dev: Device to put into the full-power state. * * Put the given device into the full-power state using the standard ACPI - * mechanism at run time. Set the power state of the device to ACPI D0 and - * disable remote wakeup. + * mechanism. Set the power state of the device to ACPI D0 and disable wakeup. */ -int acpi_dev_runtime_resume(struct device *dev) +int acpi_dev_resume(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); int error; @@ -901,7 +903,7 @@ int acpi_dev_runtime_resume(struct device *dev) acpi_device_wakeup_disable(adev); return error; } -EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume); +EXPORT_SYMBOL_GPL(acpi_dev_resume); /** * acpi_subsys_runtime_suspend - Suspend device using ACPI. @@ -926,7 +928,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); */ int acpi_subsys_runtime_resume(struct device *dev) { - int ret = acpi_dev_runtime_resume(dev); + int ret = acpi_dev_resume(dev); return ret ? ret : pm_generic_runtime_resume(dev); } EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); @@ -967,27 +969,26 @@ int acpi_dev_suspend_late(struct device *dev) } EXPORT_SYMBOL_GPL(acpi_dev_suspend_late); -/** - * acpi_dev_resume_early - Put device into the full-power state using ACPI. - * @dev: Device to put into the full-power state. - * - * Put the given device into the full-power state using the standard ACPI - * mechanism during system transition to the working state. Set the power - * state of the device to ACPI D0 and disable remote wakeup. - */ -int acpi_dev_resume_early(struct device *dev) +static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev) { - struct acpi_device *adev = ACPI_COMPANION(dev); - int error; + u32 sys_target = acpi_target_system_state(); + int ret, state; - if (!adev) - return 0; + if (device_may_wakeup(dev) != !!adev->wakeup.prepare_count) + return true; - error = acpi_dev_pm_full_power(adev); - acpi_device_wakeup_disable(adev); - return error; + if (sys_target == ACPI_STATE_S0) + return false; + + if (adev->power.flags.dsw_present) + return true; + + ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state); + if (ret) + return true; + + return state != adev->power.state; } -EXPORT_SYMBOL_GPL(acpi_dev_resume_early); /** * acpi_subsys_prepare - Prepare device for system transition to a sleep state. @@ -996,29 +997,36 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early); int acpi_subsys_prepare(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); - u32 sys_target; - int ret, state; + int ret; ret = pm_generic_prepare(dev); if (ret < 0) return ret; - if (!adev || !pm_runtime_suspended(dev) - || device_may_wakeup(dev) != !!adev->wakeup.prepare_count) - return 0; - - sys_target = acpi_target_system_state(); - if (sys_target == ACPI_STATE_S0) - return 1; - - if (adev->power.flags.dsw_present) + if (!adev || !pm_runtime_suspended(dev)) return 0; - ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state); - return !ret && state == adev->power.state; + return !acpi_dev_needs_resume(dev, adev); } EXPORT_SYMBOL_GPL(acpi_subsys_prepare); +/** + * acpi_subsys_complete - Finalize device's resume during system resume. + * @dev: Device to handle. + */ +void acpi_subsys_complete(struct device *dev) +{ + pm_generic_complete(dev); + /* + * If the device had been runtime-suspended before the system went into + * the sleep state it is going out of and it has never been resumed till + * now, resume it in case the firmware powered it up. + */ + if (dev->power.direct_complete && pm_resume_via_firmware()) + pm_request_resume(dev); +} +EXPORT_SYMBOL_GPL(acpi_subsys_complete); + /** * acpi_subsys_suspend - Run the device driver's suspend callback. * @dev: Device to handle. @@ -1057,7 +1065,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); */ int acpi_subsys_resume_early(struct device *dev) { - int ret = acpi_dev_resume_early(dev); + int ret = acpi_dev_resume(dev); return ret ? ret : pm_generic_resume_early(dev); } EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); @@ -1087,7 +1095,7 @@ static struct dev_pm_domain acpi_general_pm_domain = { .runtime_resume = acpi_subsys_runtime_resume, #ifdef CONFIG_PM_SLEEP .prepare = acpi_subsys_prepare, - .complete = pm_complete_with_resume_check, + .complete = acpi_subsys_complete, .suspend = acpi_subsys_suspend, .suspend_late = acpi_subsys_suspend_late, .resume_early = acpi_subsys_resume_early, diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 24418932612e..a041689e5701 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, int count; struct acpi_hardware_id *id; + /* Avoid unnecessarily loading modules for non present devices. */ + if (!acpi_device_is_present(acpi_dev)) + return 0; + /* * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 236b14324780..6adcda057b36 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec) { if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) ec_log_drv("event unblocked"); - if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - advance_transaction(ec); + /* + * Unconditionally invoke this once after enabling the event + * handling mechanism to detect the pending events. + */ + advance_transaction(ec); } static inline void __acpi_ec_disable_event(struct acpi_ec *ec) @@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events) if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) acpi_ec_enable_gpe(ec, true); - - /* EC is fully operational, allow queries */ - acpi_ec_enable_event(ec); } } + /* EC is fully operational, allow queries */ + acpi_ec_enable_event(ec); return 0; } @@ -1514,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events) } acpi_handle_info(ec->handle, - "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", + "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->gpe, ec->command_addr, ec->data_addr); return ret; } @@ -1595,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device) { struct acpi_ec *ec = NULL; int ret; + bool is_ecdt = false; + acpi_status status; strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_EC_CLASS); - ec = acpi_ec_alloc(); - if (!ec) - return -ENOMEM; - if (ec_parse_device(device->handle, 0, ec, NULL) != - AE_CTRL_TERMINATE) { + if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) { + is_ecdt = true; + ec = boot_ec; + } else { + ec = acpi_ec_alloc(); + if (!ec) + return -ENOMEM; + status = ec_parse_device(device->handle, 0, ec, NULL); + if (status != AE_CTRL_TERMINATE) { ret = -EINVAL; goto err_alloc; + } } if (acpi_is_boot_ec(ec)) { - boot_ec_is_ecdt = false; - /* - * Trust PNP0C09 namespace location rather than ECDT ID. - * - * But trust ECDT GPE rather than _GPE because of ASUS quirks, - * so do not change boot_ec->gpe to ec->gpe. - */ - boot_ec->handle = ec->handle; - acpi_handle_debug(ec->handle, "duplicated.\n"); - acpi_ec_free(ec); - ec = boot_ec; - ret = acpi_config_boot_ec(ec, ec->handle, true, false); + boot_ec_is_ecdt = is_ecdt; + if (!is_ecdt) { + /* + * Trust PNP0C09 namespace location rather than + * ECDT ID. But trust ECDT GPE rather than _GPE + * because of ASUS quirks, so do not change + * boot_ec->gpe to ec->gpe. + */ + boot_ec->handle = ec->handle; + acpi_handle_debug(ec->handle, "duplicated.\n"); + acpi_ec_free(ec); + ec = boot_ec; + } + ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt); } else ret = acpi_ec_setup(ec, true); if (ret) @@ -1633,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device) ret = !!request_region(ec->command_addr, 1, "EC cmd"); WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); - /* Reprobe devices depending on the EC */ - acpi_walk_dep_device_list(ec->handle); + if (!is_ecdt) { + /* Reprobe devices depending on the EC */ + acpi_walk_dep_device_list(ec->handle); + } acpi_handle_debug(ec->handle, "enumerated.\n"); return 0; @@ -1690,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) static const struct acpi_device_id ec_device_ids[] = { {"PNP0C09", 0}, + {ACPI_ECDT_HID, 0}, {"", 0}, }; @@ -1762,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void) * Note: ec->handle can be valid if this function is called after * acpi_ec_add(), hence the fast path. */ - if (boot_ec->handle != ACPI_ROOT_OBJECT) - handle = boot_ec->handle; - else if (!acpi_ec_ecdt_get_handle(&handle)) - return -ENODEV; - return acpi_config_boot_ec(boot_ec, handle, true, true); + if (boot_ec->handle == ACPI_ROOT_OBJECT) { + if (!acpi_ec_ecdt_get_handle(&handle)) + return -ENODEV; + boot_ec->handle = handle; + } + + /* Register to ACPI bus with PM ops attached */ + return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC); } #if 0 @@ -2018,6 +2035,12 @@ int __init acpi_ec_init(void) /* Drivers must be started after acpi_ec_query_init() */ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); + /* + * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is + * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT + * settings but invalid DSDT settings. + * https://bugzilla.kernel.org/show_bug.cgi?id=196847 + */ ecdt_fail = acpi_ec_ecdt_start(); return ecdt_fail && dsdt_fail ? -ENODEV : 0; } diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c index 6c7dd7af789e..dd70d6c2bca0 100644 --- a/drivers/acpi/ec_sys.c +++ b/drivers/acpi/ec_sys.c @@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) return -ENOMEM; } - if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe)) + if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe)) goto error; if (!debugfs_create_bool("use_global_lock", 0444, dev_dir, &first_ec->global_lock)) diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 4361c4415b4f..2cd2ae152ab7 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev); bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_first_physical_node(struct acpi_device *adev, const struct device *dev); +int acpi_bus_register_early_device(int type); /* -------------------------------------------------------------------------- Device Matching and Notification @@ -158,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {} -------------------------------------------------------------------------- */ struct acpi_ec { acpi_handle handle; - unsigned long gpe; + u32 gpe; unsigned long command_addr; unsigned long data_addr; bool global_lock; diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 9c2c49b6a240..67a860790560 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1457,6 +1457,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dev_name(&adev_dimm->dev)); return -ENXIO; } + /* + * Record nfit_mem for the notification path to track back to + * the nfit sysfs attributes for this dimm device object. + */ + dev_set_drvdata(&adev_dimm->dev, nfit_mem); /* * Until standardization materializes we need to consider 4 @@ -1516,9 +1521,11 @@ static void shutdown_dimm_notify(void *data) sysfs_put(nfit_mem->flags_attr); nfit_mem->flags_attr = NULL; } - if (adev_dimm) + if (adev_dimm) { acpi_remove_notify_handler(adev_dimm->handle, ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); + dev_set_drvdata(&adev_dimm->dev, NULL); + } } mutex_unlock(&acpi_desc->init_mutex); } @@ -1611,6 +1618,9 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) struct kernfs_node *nfit_kernfs; nvdimm = nfit_mem->nvdimm; + if (!nvdimm) + continue; + nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); if (nfit_kernfs) nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, @@ -2738,15 +2748,21 @@ static void acpi_nfit_scrub(struct work_struct *work) static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; - int rc; - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) - if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { - /* BLK regions don't need to wait for ars results */ - rc = acpi_nfit_register_region(acpi_desc, nfit_spa); - if (rc) - return rc; - } + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + int rc, type = nfit_spa_type(nfit_spa->spa); + + /* PMEM and VMEM will be registered by the ARS workqueue */ + if (type == NFIT_SPA_PM || type == NFIT_SPA_VOLATILE) + continue; + /* BLK apertures belong to BLK region registration below */ + if (type == NFIT_SPA_BDW) + continue; + /* BLK regions don't need to wait for ARS results */ + rc = acpi_nfit_register_region(acpi_desc, nfit_spa); + if (rc) + return rc; + } acpi_desc->ars_start_flags = 0; if (!acpi_desc->cancel) diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 917f1cc0fda4..8fb74d9011da 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm) */ int acpi_map_pxm_to_online_node(int pxm) { - int node, n, dist, min_dist; + int node, min_node; node = acpi_map_pxm_to_node(pxm); if (node == NUMA_NO_NODE) node = 0; + min_node = node; if (!node_online(node)) { - min_dist = INT_MAX; + int min_dist = INT_MAX, dist, n; + for_each_online_node(n) { dist = node_distance(node, n); if (dist < min_dist) { min_dist = dist; - node = n; + min_node = n; } } } - return node; + return min_node; } EXPORT_SYMBOL(acpi_map_pxm_to_online_node); diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c new file mode 100644 index 000000000000..7f3c567e8168 --- /dev/null +++ b/drivers/acpi/pmic/tps68470_pmic.c @@ -0,0 +1,455 @@ +/* + * TI TPS68470 PMIC operation region driver + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Author: Rajmohan Mani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Based on drivers/acpi/pmic/intel_pmic* drivers + */ + +#include +#include +#include +#include +#include + +struct tps68470_pmic_table { + u32 address; /* operation region address */ + u32 reg; /* corresponding register */ + u32 bitmask; /* bit mask for power, clock */ +}; + +#define TI_PMIC_POWER_OPREGION_ID 0xB0 +#define TI_PMIC_VR_VAL_OPREGION_ID 0xB1 +#define TI_PMIC_CLOCK_OPREGION_ID 0xB2 +#define TI_PMIC_CLKFREQ_OPREGION_ID 0xB3 + +struct tps68470_pmic_opregion { + struct mutex lock; + struct regmap *regmap; +}; + +#define S_IO_I2C_EN (BIT(0) | BIT(1)) + +static const struct tps68470_pmic_table power_table[] = { + { + .address = 0x00, + .reg = TPS68470_REG_S_I2C_CTL, + .bitmask = S_IO_I2C_EN, + /* S_I2C_CTL */ + }, + { + .address = 0x04, + .reg = TPS68470_REG_VCMCTL, + .bitmask = BIT(0), + /* VCMCTL */ + }, + { + .address = 0x08, + .reg = TPS68470_REG_VAUX1CTL, + .bitmask = BIT(0), + /* VAUX1_CTL */ + }, + { + .address = 0x0C, + .reg = TPS68470_REG_VAUX2CTL, + .bitmask = BIT(0), + /* VAUX2CTL */ + }, + { + .address = 0x10, + .reg = TPS68470_REG_VACTL, + .bitmask = BIT(0), + /* VACTL */ + }, + { + .address = 0x14, + .reg = TPS68470_REG_VDCTL, + .bitmask = BIT(0), + /* VDCTL */ + }, +}; + +/* Table to set voltage regulator value */ +static const struct tps68470_pmic_table vr_val_table[] = { + { + .address = 0x00, + .reg = TPS68470_REG_VSIOVAL, + .bitmask = TPS68470_VSIOVAL_IOVOLT_MASK, + /* TPS68470_REG_VSIOVAL */ + }, + { + .address = 0x04, + .reg = TPS68470_REG_VIOVAL, + .bitmask = TPS68470_VIOVAL_IOVOLT_MASK, + /* TPS68470_REG_VIOVAL */ + }, + { + .address = 0x08, + .reg = TPS68470_REG_VCMVAL, + .bitmask = TPS68470_VCMVAL_VCVOLT_MASK, + /* TPS68470_REG_VCMVAL */ + }, + { + .address = 0x0C, + .reg = TPS68470_REG_VAUX1VAL, + .bitmask = TPS68470_VAUX1VAL_AUX1VOLT_MASK, + /* TPS68470_REG_VAUX1VAL */ + }, + { + .address = 0x10, + .reg = TPS68470_REG_VAUX2VAL, + .bitmask = TPS68470_VAUX2VAL_AUX2VOLT_MASK, + /* TPS68470_REG_VAUX2VAL */ + }, + { + .address = 0x14, + .reg = TPS68470_REG_VAVAL, + .bitmask = TPS68470_VAVAL_AVOLT_MASK, + /* TPS68470_REG_VAVAL */ + }, + { + .address = 0x18, + .reg = TPS68470_REG_VDVAL, + .bitmask = TPS68470_VDVAL_DVOLT_MASK, + /* TPS68470_REG_VDVAL */ + }, +}; + +/* Table to configure clock frequency */ +static const struct tps68470_pmic_table clk_freq_table[] = { + { + .address = 0x00, + .reg = TPS68470_REG_POSTDIV2, + .bitmask = BIT(0) | BIT(1), + /* TPS68470_REG_POSTDIV2 */ + }, + { + .address = 0x04, + .reg = TPS68470_REG_BOOSTDIV, + .bitmask = 0x1F, + /* TPS68470_REG_BOOSTDIV */ + }, + { + .address = 0x08, + .reg = TPS68470_REG_BUCKDIV, + .bitmask = 0x0F, + /* TPS68470_REG_BUCKDIV */ + }, + { + .address = 0x0C, + .reg = TPS68470_REG_PLLSWR, + .bitmask = 0x13, + /* TPS68470_REG_PLLSWR */ + }, + { + .address = 0x10, + .reg = TPS68470_REG_XTALDIV, + .bitmask = 0xFF, + /* TPS68470_REG_XTALDIV */ + }, + { + .address = 0x14, + .reg = TPS68470_REG_PLLDIV, + .bitmask = 0xFF, + /* TPS68470_REG_PLLDIV */ + }, + { + .address = 0x18, + .reg = TPS68470_REG_POSTDIV, + .bitmask = 0x83, + /* TPS68470_REG_POSTDIV */ + }, +}; + +/* Table to configure and enable clocks */ +static const struct tps68470_pmic_table clk_table[] = { + { + .address = 0x00, + .reg = TPS68470_REG_PLLCTL, + .bitmask = 0xF5, + /* TPS68470_REG_PLLCTL */ + }, + { + .address = 0x04, + .reg = TPS68470_REG_PLLCTL2, + .bitmask = BIT(0), + /* TPS68470_REG_PLLCTL2 */ + }, + { + .address = 0x08, + .reg = TPS68470_REG_CLKCFG1, + .bitmask = TPS68470_CLKCFG1_MODE_A_MASK | + TPS68470_CLKCFG1_MODE_B_MASK, + /* TPS68470_REG_CLKCFG1 */ + }, + { + .address = 0x0C, + .reg = TPS68470_REG_CLKCFG2, + .bitmask = TPS68470_CLKCFG1_MODE_A_MASK | + TPS68470_CLKCFG1_MODE_B_MASK, + /* TPS68470_REG_CLKCFG2 */ + }, +}; + +static int pmic_get_reg_bit(u64 address, + const struct tps68470_pmic_table *table, + const unsigned int table_size, int *reg, + int *bitmask) +{ + u64 i; + + i = address / 4; + if (i >= table_size) + return -ENOENT; + + if (!reg || !bitmask) + return -EINVAL; + + *reg = table[i].reg; + *bitmask = table[i].bitmask; + + return 0; +} + +static int tps68470_pmic_get_power(struct regmap *regmap, int reg, + int bitmask, u64 *value) +{ + unsigned int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = (data & bitmask) ? 1 : 0; + return 0; +} + +static int tps68470_pmic_get_vr_val(struct regmap *regmap, int reg, + int bitmask, u64 *value) +{ + unsigned int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = data & bitmask; + return 0; +} + +static int tps68470_pmic_get_clk(struct regmap *regmap, int reg, + int bitmask, u64 *value) +{ + unsigned int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = (data & bitmask) ? 1 : 0; + return 0; +} + +static int tps68470_pmic_get_clk_freq(struct regmap *regmap, int reg, + int bitmask, u64 *value) +{ + unsigned int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = data & bitmask; + return 0; +} + +static int ti_tps68470_regmap_update_bits(struct regmap *regmap, int reg, + int bitmask, u64 value) +{ + return regmap_update_bits(regmap, reg, bitmask, value); +} + +static acpi_status tps68470_pmic_common_handler(u32 function, + acpi_physical_address address, + u32 bits, u64 *value, + void *region_context, + int (*get)(struct regmap *, + int, int, u64 *), + int (*update)(struct regmap *, + int, int, u64), + const struct tps68470_pmic_table *tbl, + unsigned int tbl_size) +{ + struct tps68470_pmic_opregion *opregion = region_context; + struct regmap *regmap = opregion->regmap; + int reg, ret, bitmask; + + if (bits != 32) + return AE_BAD_PARAMETER; + + ret = pmic_get_reg_bit(address, tbl, tbl_size, ®, &bitmask); + if (ret < 0) + return AE_BAD_PARAMETER; + + if (function == ACPI_WRITE && *value > bitmask) + return AE_BAD_PARAMETER; + + mutex_lock(&opregion->lock); + + ret = (function == ACPI_READ) ? + get(regmap, reg, bitmask, value) : + update(regmap, reg, bitmask, *value); + + mutex_unlock(&opregion->lock); + + return ret ? AE_ERROR : AE_OK; +} + +static acpi_status tps68470_pmic_cfreq_handler(u32 function, + acpi_physical_address address, + u32 bits, u64 *value, + void *handler_context, + void *region_context) +{ + return tps68470_pmic_common_handler(function, address, bits, value, + region_context, + tps68470_pmic_get_clk_freq, + ti_tps68470_regmap_update_bits, + clk_freq_table, + ARRAY_SIZE(clk_freq_table)); +} + +static acpi_status tps68470_pmic_clk_handler(u32 function, + acpi_physical_address address, u32 bits, + u64 *value, void *handler_context, + void *region_context) +{ + return tps68470_pmic_common_handler(function, address, bits, value, + region_context, + tps68470_pmic_get_clk, + ti_tps68470_regmap_update_bits, + clk_table, + ARRAY_SIZE(clk_table)); +} + +static acpi_status tps68470_pmic_vrval_handler(u32 function, + acpi_physical_address address, + u32 bits, u64 *value, + void *handler_context, + void *region_context) +{ + return tps68470_pmic_common_handler(function, address, bits, value, + region_context, + tps68470_pmic_get_vr_val, + ti_tps68470_regmap_update_bits, + vr_val_table, + ARRAY_SIZE(vr_val_table)); +} + +static acpi_status tps68470_pmic_pwr_handler(u32 function, + acpi_physical_address address, + u32 bits, u64 *value, + void *handler_context, + void *region_context) +{ + if (bits != 32) + return AE_BAD_PARAMETER; + + /* set/clear for bit 0, bits 0 and 1 together */ + if (function == ACPI_WRITE && + !(*value == 0 || *value == 1 || *value == 3)) { + return AE_BAD_PARAMETER; + } + + return tps68470_pmic_common_handler(function, address, bits, value, + region_context, + tps68470_pmic_get_power, + ti_tps68470_regmap_update_bits, + power_table, + ARRAY_SIZE(power_table)); +} + +static int tps68470_pmic_opregion_probe(struct platform_device *pdev) +{ + struct regmap *tps68470_regmap = dev_get_drvdata(pdev->dev.parent); + acpi_handle handle = ACPI_HANDLE(pdev->dev.parent); + struct device *dev = &pdev->dev; + struct tps68470_pmic_opregion *opregion; + acpi_status status; + + if (!dev || !tps68470_regmap) { + dev_warn(dev, "dev or regmap is NULL\n"); + return -EINVAL; + } + + if (!handle) { + dev_warn(dev, "acpi handle is NULL\n"); + return -ENODEV; + } + + opregion = devm_kzalloc(dev, sizeof(*opregion), GFP_KERNEL); + if (!opregion) + return -ENOMEM; + + mutex_init(&opregion->lock); + opregion->regmap = tps68470_regmap; + + status = acpi_install_address_space_handler(handle, + TI_PMIC_POWER_OPREGION_ID, + tps68470_pmic_pwr_handler, + NULL, opregion); + if (ACPI_FAILURE(status)) + goto out_mutex_destroy; + + status = acpi_install_address_space_handler(handle, + TI_PMIC_VR_VAL_OPREGION_ID, + tps68470_pmic_vrval_handler, + NULL, opregion); + if (ACPI_FAILURE(status)) + goto out_remove_power_handler; + + status = acpi_install_address_space_handler(handle, + TI_PMIC_CLOCK_OPREGION_ID, + tps68470_pmic_clk_handler, + NULL, opregion); + if (ACPI_FAILURE(status)) + goto out_remove_vr_val_handler; + + status = acpi_install_address_space_handler(handle, + TI_PMIC_CLKFREQ_OPREGION_ID, + tps68470_pmic_cfreq_handler, + NULL, opregion); + if (ACPI_FAILURE(status)) + goto out_remove_clk_handler; + + return 0; + +out_remove_clk_handler: + acpi_remove_address_space_handler(handle, TI_PMIC_CLOCK_OPREGION_ID, + tps68470_pmic_clk_handler); +out_remove_vr_val_handler: + acpi_remove_address_space_handler(handle, TI_PMIC_VR_VAL_OPREGION_ID, + tps68470_pmic_vrval_handler); +out_remove_power_handler: + acpi_remove_address_space_handler(handle, TI_PMIC_POWER_OPREGION_ID, + tps68470_pmic_pwr_handler); +out_mutex_destroy: + mutex_destroy(&opregion->lock); + return -ENODEV; +} + +static struct platform_driver tps68470_pmic_opregion_driver = { + .probe = tps68470_pmic_opregion_probe, + .driver = { + .name = "tps68470_pmic_opregion", + }, +}; + +builtin_platform_driver(tps68470_pmic_opregion_driver) diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 2fa8304171e0..7a3431018e0a 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device) device->driver_data = hc; acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); - printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", - hc->ec, hc->offset, hc->query_bit); + dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n", + hc->offset, hc->query_bit); return 0; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 602f8ff212f2..2f2f50322ffb 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device) case ACPI_BUS_TYPE_SLEEP_BUTTON: strcpy(device->pnp.bus_id, "SLPF"); break; + case ACPI_BUS_TYPE_ECDT_EC: + strcpy(device->pnp.bus_id, "ECDT"); + break; default: acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); /* Clean up trailing underscores (if any) */ @@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, case ACPI_BUS_TYPE_SLEEP_BUTTON: acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF); break; + case ACPI_BUS_TYPE_ECDT_EC: + acpi_add_id(pnp, ACPI_ECDT_HID); + break; } } @@ -2049,6 +2055,21 @@ void acpi_bus_trim(struct acpi_device *adev) } EXPORT_SYMBOL_GPL(acpi_bus_trim); +int acpi_bus_register_early_device(int type) +{ + struct acpi_device *device = NULL; + int result; + + result = acpi_add_single_object(&device, NULL, + type, ACPI_STA_DEFAULT); + if (result) + return result; + + device->flags.match_driver = true; + return device_attach(&device->dev); +} +EXPORT_SYMBOL_GPL(acpi_bus_register_early_device); + static int acpi_bus_scan_fixed(void) { int result = 0; diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 0a9e5979aaa9..93814bd7ae86 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -736,16 +736,17 @@ bool acpi_dev_found(const char *hid) } EXPORT_SYMBOL(acpi_dev_found); -struct acpi_dev_present_info { +struct acpi_dev_match_info { + const char *dev_name; struct acpi_device_id hid[2]; const char *uid; s64 hrv; }; -static int acpi_dev_present_cb(struct device *dev, void *data) +static int acpi_dev_match_cb(struct device *dev, void *data) { struct acpi_device *adev = to_acpi_device(dev); - struct acpi_dev_present_info *match = data; + struct acpi_dev_match_info *match = data; unsigned long long hrv; acpi_status status; @@ -756,6 +757,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data) strcmp(adev->pnp.unique_id, match->uid))) return 0; + match->dev_name = acpi_dev_name(adev); + if (match->hrv == -1) return 1; @@ -788,20 +791,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data) */ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) { - struct acpi_dev_present_info match = {}; + struct acpi_dev_match_info match = {}; struct device *dev; strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); match.uid = uid; match.hrv = hrv; - dev = bus_find_device(&acpi_bus_type, NULL, &match, - acpi_dev_present_cb); - + dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); return !!dev; } EXPORT_SYMBOL(acpi_dev_present); +/** + * acpi_dev_get_first_match_name - Return name of first match of ACPI device + * @hid: Hardware ID of the device. + * @uid: Unique ID of the device, pass NULL to not check _UID + * @hrv: Hardware Revision of the device, pass -1 to not check _HRV + * + * Return device name if a matching device was present + * at the moment of invocation, or NULL otherwise. + * + * See additional information in acpi_dev_present() as well. + */ +const char * +acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv) +{ + struct acpi_dev_match_info match = {}; + struct device *dev; + + strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); + match.uid = uid; + match.hrv = hrv; + + dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); + return dev ? match.dev_name : NULL; +} +EXPORT_SYMBOL(acpi_dev_get_first_match_name); + /* * acpi_backlight= handling, this is done here rather then in video_detect.c * because __setup cannot be used in modules. diff --git a/drivers/acrn/Kconfig b/drivers/acrn/Kconfig new file mode 100644 index 000000000000..9fc4cae04a56 --- /dev/null +++ b/drivers/acrn/Kconfig @@ -0,0 +1,21 @@ +config ACRN_SHARED_BUFFER + bool "Intel ACRN SHARED BUFFER" + depends on ACRN_VHM + ---help--- + Ring buffer shared between ACRN Hypervisor and its SOS. + Help ACRN performance profiling. + +config ACRN_TRACE + tristate "Intel ACRN Hypervisor Trace support" + depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor. + You can say y to build it into the kernel, or m to build + it as a module. + +config ACRN_HVLOG + bool "Intel ACRN Hypervisor Logmsg support" + depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor log. + You can say y to build it into the kernel. diff --git a/drivers/acrn/Makefile b/drivers/acrn/Makefile new file mode 100644 index 000000000000..0a157712aed6 --- /dev/null +++ b/drivers/acrn/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ACRN_SHARED_BUFFER) += sbuf.o +obj-$(CONFIG_ACRN_TRACE) += trace.o +obj-$(CONFIG_ACRN_HVLOG) += hvlog.o diff --git a/drivers/acrn/hvlog.c b/drivers/acrn/hvlog.c new file mode 100644 index 000000000000..ed1ab7919a62 --- /dev/null +++ b/drivers/acrn/hvlog.c @@ -0,0 +1,432 @@ +/* + * ACRN Hypervisor logmsg + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ +#define pr_fmt(fmt) "ACRN HVLog: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sbuf.h" + +#define LOG_ENTRY_SIZE 80 +#define PCPU_NRS 4 + +#define foreach_cpu(cpu, cpu_num) \ + for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +#define foreach_hvlog_type(idx, hvlog_type) \ + for ((idx) = 0; (idx) < (hvlog_type); (idx)++) + +enum sbuf_hvlog_index { + ACRN_CURRNET_HVLOG = 0, + ACRN_LAST_HVLOG, + ACRN_HVLOG_TYPE +}; + +struct acrn_hvlog { + struct miscdevice miscdev; + shared_buf_t *sbuf; + atomic_t open_cnt; + int pcpu_num; +}; + +static unsigned long long hvlog_buf_size; +static unsigned long long hvlog_buf_base; + +static int __init early_hvlog(char *p) +{ + int ret; + + pr_debug("%s(%s)\n", __func__, p); + hvlog_buf_size = memparse(p, &p); + if (*p != '@') + return 0; + hvlog_buf_base = memparse(p + 1, &p); + + if (!!hvlog_buf_base && !!hvlog_buf_size) { + ret = memblock_reserve(hvlog_buf_base, hvlog_buf_size); + if (ret) { + pr_err("%s: Error reserving hvlog memblock\n", + __func__); + hvlog_buf_base = 0; + hvlog_buf_size = 0; + return ret; + } + } + + return 0; +} +early_param("hvlog", early_hvlog); + + +static inline shared_buf_t *hvlog_mark_unread(shared_buf_t *sbuf) +{ + /* sbuf must point to valid data. + * clear the lowest bit in the magic to indicate that + * the sbuf point to the last boot valid data, we should + * read it later. + */ + if (sbuf != NULL) + sbuf->magic &= ~1; + + return sbuf; +} + +static int hvlog_open(struct inode *inode, struct file *filp) +{ + struct acrn_hvlog *acrn_hvlog; + + acrn_hvlog = container_of(filp->private_data, + struct acrn_hvlog, miscdev); + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + /* More than one reader at the same time could get data messed up */ + if (atomic_cmpxchg(&acrn_hvlog->open_cnt, 0, 1) != 0) + return -EBUSY; + + filp->private_data = acrn_hvlog; + + return 0; +} + +static int hvlog_release(struct inode *inode, struct file *filp) +{ + struct acrn_hvlog *acrn_hvlog; + + acrn_hvlog = filp->private_data; + + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + atomic_dec(&acrn_hvlog->open_cnt); + filp->private_data = NULL; + + return 0; +} + +static ssize_t hvlog_read(struct file *filp, char __user *buf, + size_t count, loff_t *offset) +{ + char data[LOG_ENTRY_SIZE]; + struct acrn_hvlog *acrn_hvlog; + int ret; + + acrn_hvlog = (struct acrn_hvlog *)filp->private_data; + + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + if (acrn_hvlog->sbuf != NULL) { + ret = sbuf_get(acrn_hvlog->sbuf, (uint8_t *)&data); + if (ret > 0) { + if (copy_to_user(buf, &data, ret)) + return -EFAULT; + } + + return ret; + } + + return 0; +} + +static const struct file_operations hvlog_fops = { + .owner = THIS_MODULE, + .open = hvlog_open, + .release = hvlog_release, + .read = hvlog_read, +}; + +static struct acrn_hvlog hvlog_devs[ACRN_HVLOG_TYPE][PCPU_NRS] = { + [ACRN_CURRNET_HVLOG] = { + { + .miscdev = { + .name = "acrn_hvlog_cur_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 0, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 1, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 2, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 3, + }, + }, + [ACRN_LAST_HVLOG] = { + { + .miscdev = { + .name = "acrn_hvlog_last_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 0, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 1, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 2, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 3, + }, + } +}; + +static int __init acrn_hvlog_init(void) +{ + int ret = 0; + int i, j, idx; + uint32_t pcpu_id; + uint64_t logbuf_base0; + uint64_t logbuf_base1; + uint64_t logbuf_size; + uint32_t ele_size; + uint32_t ele_num; + uint32_t size; + bool sbuf_constructed = false; + + shared_buf_t *sbuf0[PCPU_NRS]; + shared_buf_t *sbuf1[PCPU_NRS]; + + pr_info("%s\n", __func__); + if (!hvlog_buf_base || !hvlog_buf_size) { + pr_warn("no fixed memory reserve for hvlog.\n"); + return 0; + } + + logbuf_base0 = hvlog_buf_base; + logbuf_size = (hvlog_buf_size >> 1); + logbuf_base1 = hvlog_buf_base + logbuf_size; + + size = (logbuf_size / PCPU_NRS); + ele_size = LOG_ENTRY_SIZE; + ele_num = (size - SBUF_HEAD_SIZE) / ele_size; + + foreach_cpu(pcpu_id, PCPU_NRS) { + sbuf0[pcpu_id] = sbuf_check_valid(ele_num, ele_size, + logbuf_base0 + size * pcpu_id); + sbuf1[pcpu_id] = sbuf_check_valid(ele_num, ele_size, + logbuf_base1 + size * pcpu_id); + } + + foreach_cpu(pcpu_id, PCPU_NRS) { + if (sbuf0[pcpu_id] == NULL) + continue; + + foreach_cpu(pcpu_id, PCPU_NRS) { + hvlog_devs[ACRN_LAST_HVLOG][pcpu_id].sbuf = + hvlog_mark_unread(sbuf0[pcpu_id]); + hvlog_devs[ACRN_CURRNET_HVLOG][pcpu_id].sbuf = + sbuf_construct(ele_num, ele_size, + logbuf_base1 + size * pcpu_id); + } + sbuf_constructed = true; + } + + if (sbuf_constructed == false) { + foreach_cpu(pcpu_id, PCPU_NRS) { + if (sbuf1[pcpu_id] == NULL) + continue; + + foreach_cpu(pcpu_id, PCPU_NRS) { + hvlog_devs[ACRN_LAST_HVLOG][pcpu_id].sbuf = + hvlog_mark_unread(sbuf1[pcpu_id]); + } + } + foreach_cpu(pcpu_id, PCPU_NRS) { + hvlog_devs[ACRN_CURRNET_HVLOG][pcpu_id].sbuf = + sbuf_construct(ele_num, ele_size, + logbuf_base0 + size * pcpu_id); + } + sbuf_constructed = true; + } + + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(pcpu_id, PCPU_NRS) { + ret = sbuf_share_setup(pcpu_id, ACRN_HVLOG, + hvlog_devs[idx][pcpu_id].sbuf); + if (ret < 0) { + pr_err("Failed to setup %s, errno %d\n", + hvlog_devs[idx][pcpu_id].miscdev.name, ret); + goto setup_err; + } + } + } + + foreach_hvlog_type(idx, ACRN_HVLOG_TYPE) { + foreach_cpu(pcpu_id, PCPU_NRS) { + atomic_set(&hvlog_devs[idx][pcpu_id].open_cnt, 0); + + ret = misc_register( + &hvlog_devs[idx][pcpu_id].miscdev); + if (ret < 0) { + pr_err("Failed to register %s, errno %d\n", + hvlog_devs[idx][pcpu_id].miscdev.name, ret); + goto reg_err; + } + } + } + + return 0; + +reg_err: + foreach_hvlog_type(i, idx) { + foreach_cpu(j, PCPU_NRS) { + misc_deregister(&hvlog_devs[i][j].miscdev); + } + } + + foreach_cpu(j, pcpu_id) { + misc_deregister(&hvlog_devs[idx][j].miscdev); + } + + pcpu_id = PCPU_NRS; +setup_err: + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(j, pcpu_id) { + sbuf_share_setup(j, ACRN_HVLOG, 0); + sbuf_deconstruct(hvlog_devs[idx][j].sbuf); + } + } + + return ret; +} + +static void __exit acrn_hvlog_exit(void) +{ + int idx; + uint32_t pcpu_id; + + pr_info("%s\n", __func__); + + foreach_hvlog_type(idx, ACRN_HVLOG_TYPE) { + foreach_cpu(pcpu_id, PCPU_NRS) { + misc_deregister(&hvlog_devs[idx][pcpu_id].miscdev); + } + } + + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(pcpu_id, PCPU_NRS) { + sbuf_share_setup(pcpu_id, ACRN_HVLOG, 0); + sbuf_deconstruct(hvlog_devs[idx][pcpu_id].sbuf); + } + } +} + +module_init(acrn_hvlog_init); +module_exit(acrn_hvlog_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Logmsg"); +MODULE_VERSION("0.1"); diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c new file mode 100644 index 000000000000..b51ee04e12fa --- /dev/null +++ b/drivers/acrn/sbuf.c @@ -0,0 +1,241 @@ +/* + * shared buffer + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ + +#define pr_fmt(fmt) "SBuf: " fmt + +#include +#include +#include +#include +#include +#include "sbuf.h" + +static inline bool sbuf_is_empty(shared_buf_t *sbuf) +{ + return (sbuf->head == sbuf->tail); +} + +static inline uint32_t sbuf_next_ptr(uint32_t pos, + uint32_t span, uint32_t scope) +{ + pos += span; + pos = (pos >= scope) ? (pos - scope) : pos; + return pos; +} + +static inline uint32_t sbuf_calculate_allocate_size(uint32_t ele_num, + uint32_t ele_size) +{ + uint64_t sbuf_allocate_size; + + sbuf_allocate_size = ele_num * ele_size; + sbuf_allocate_size += SBUF_HEAD_SIZE; + if (sbuf_allocate_size > SBUF_MAX_SIZE) { + pr_err("num=0x%x, size=0x%x exceed 0x%llx!\n", + ele_num, ele_size, SBUF_MAX_SIZE); + return 0; + } + + /* align to PAGE_SIZE */ + return (sbuf_allocate_size + PAGE_SIZE - 1) & PAGE_MASK; +} + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size) +{ + shared_buf_t *sbuf; + struct page *page; + uint32_t sbuf_allocate_size; + + if (!ele_num || !ele_size) { + pr_err("invalid parameter %s!\n", __func__); + return NULL; + } + + sbuf_allocate_size = sbuf_calculate_allocate_size(ele_num, ele_size); + if (!sbuf_allocate_size) + return NULL; + + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(sbuf_allocate_size)); + if (page == NULL) { + pr_err("failed to alloc pages!\n"); + return NULL; + } + + sbuf = phys_to_virt(page_to_phys(page)); + sbuf->ele_num = ele_num; + sbuf->ele_size = ele_size; + sbuf->size = ele_num * ele_size; + sbuf->magic = SBUF_MAGIC; + pr_info("ele_num=0x%x, ele_size=0x%x allocated!\n", + ele_num, ele_size); + return sbuf; +} +EXPORT_SYMBOL(sbuf_allocate); + +void sbuf_free(shared_buf_t *sbuf) +{ + uint32_t sbuf_allocate_size; + + if ((sbuf == NULL) || sbuf->magic != SBUF_MAGIC) { + pr_err("invalid parameter %s\n", __func__); + return; + } + + sbuf_allocate_size = sbuf_calculate_allocate_size(sbuf->ele_num, + sbuf->ele_size); + if (!sbuf_allocate_size) + return; + + sbuf->magic = 0; + __free_pages((struct page *)virt_to_page(sbuf), + get_order(sbuf_allocate_size)); +} +EXPORT_SYMBOL(sbuf_free); + +int sbuf_get(shared_buf_t *sbuf, uint8_t *data) +{ + const void *from; + + if ((sbuf == NULL) || (data == NULL)) + return -EINVAL; + + if (sbuf_is_empty(sbuf)) { + /* no data available */ + return 0; + } + + from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head; + + memcpy(data, from, sbuf->ele_size); + + sbuf->head = sbuf_next_ptr(sbuf->head, sbuf->ele_size, sbuf->size); + + return sbuf->ele_size; +} +EXPORT_SYMBOL(sbuf_get); + +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf) +{ + struct sbuf_setup_param ssp; + + if (x86_hyper_type != X86_HYPER_ACRN) + return -ENODEV; + + ssp.pcpu_id = pcpu_id; + ssp.sbuf_id = sbuf_id; + + if (!sbuf) { + ssp.gpa = 0; + } else { + BUG_ON(!virt_addr_valid(sbuf)); + ssp.gpa = virt_to_phys(sbuf); + } + pr_info("setup phys add = 0x%llx\n", ssp.gpa); + + return hcall_setup_sbuf(virt_to_phys(&ssp)); +} +EXPORT_SYMBOL(sbuf_share_setup); + +shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) +{ + shared_buf_t *sbuf; + + if (!ele_num || !ele_size || !paddr) + return NULL; + + sbuf = (shared_buf_t *)phys_to_virt(paddr); + BUG_ON(!virt_addr_valid(sbuf)); + + if ((sbuf->magic == SBUF_MAGIC) && + (sbuf->ele_num == ele_num) && + (sbuf->ele_size == ele_size)) { + return sbuf; + } + + return NULL; +} +EXPORT_SYMBOL(sbuf_check_valid); + +shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) +{ + shared_buf_t *sbuf; + + if (!ele_num || !ele_size || !paddr) + return NULL; + + sbuf = (shared_buf_t *)phys_to_virt(paddr); + BUG_ON(!virt_addr_valid(sbuf)); + + memset(sbuf, 0, SBUF_HEAD_SIZE); + sbuf->magic = SBUF_MAGIC; + sbuf->ele_num = ele_num; + sbuf->ele_size = ele_size; + sbuf->size = ele_num * ele_size; + pr_info("construct sbuf at 0x%llx.\n", paddr); + return sbuf; +} +EXPORT_SYMBOL(sbuf_construct); + +void sbuf_deconstruct(shared_buf_t *sbuf) +{ + if (sbuf == NULL) + return; + + sbuf->magic = 0; +} +EXPORT_SYMBOL(sbuf_deconstruct); diff --git a/drivers/acrn/sbuf.h b/drivers/acrn/sbuf.h new file mode 100644 index 000000000000..4fae7a258bce --- /dev/null +++ b/drivers/acrn/sbuf.h @@ -0,0 +1,129 @@ +/* + * shared buffer + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ + +#ifndef SHARED_BUF_H +#define SHARED_BUF_H + +#include + + +#define SBUF_MAGIC 0x5aa57aa71aa13aa3 +#define SBUF_MAX_SIZE (1ULL << 22) +#define SBUF_HEAD_SIZE 64 + +/* sbuf flags */ +#define OVERRUN_CNT_EN (1ULL << 0) /* whether overrun counting is enabled */ +#define OVERWRITE_EN (1ULL << 1) /* whether overwrite is enabled */ + +enum sbuf_type { + ACRN_TRACE, + ACRN_HVLOG, + ACRN_SBUF_TYPE_MAX, +}; +/** + * (sbuf) head + buf (store (ele_num - 1) elements at most) + * buffer empty: tail == head + * buffer full: (tail + ele_size) % size == head + * + * Base of memory for elements + * | + * | + * --------------------------------------------------------------------------------------- + * | shared_buf_t | raw data (ele_size)| raw date (ele_size) | ... | raw data (ele_size) | + * --------------------------------------------------------------------------------------- + * | + * | + * shared_buf_t *buf + */ + +/* Make sure sizeof(shared_buf_t) == SBUF_HEAD_SIZE */ +typedef struct shared_buf { + uint64_t magic; + uint32_t ele_num; /* number of elements */ + uint32_t ele_size; /* sizeof of elements */ + uint32_t head; /* offset from base, to read */ + uint32_t tail; /* offset from base, to write */ + uint64_t flags; + uint32_t overrun_cnt; /* count of overrun */ + uint32_t size; /* ele_num * ele_size */ + uint32_t padding[6]; +} ____cacheline_aligned shared_buf_t; + +static inline void sbuf_clear_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags &= ~flags; +} + +static inline void sbuf_set_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags = flags; +} + +static inline void sbuf_add_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags |= flags; +} + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); +void sbuf_free(shared_buf_t *sbuf); +int sbuf_get(shared_buf_t *sbuf, uint8_t *data); +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); +shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t gpa); +shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t gpa); +void sbuf_deconstruct(shared_buf_t *sbuf); + +#endif /* SHARED_BUF_H */ diff --git a/drivers/acrn/trace.c b/drivers/acrn/trace.c new file mode 100644 index 000000000000..2a9d87e39c5e --- /dev/null +++ b/drivers/acrn/trace.c @@ -0,0 +1,299 @@ +/* +* +* ACRN Trace module +* +* This file is provided under a dual BSD/GPLv2 license.  When using or +* redistributing this file, you may do so under either license. +* +* GPL LICENSE SUMMARY +* +* Copyright (c) 2017 Intel Corporation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of version 2 of the GNU General Public License as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +* General Public License for more details. +* +* Contact Information: Yan, Like +* +* BSD LICENSE +* +* Copyright (c) 2017 Intel Corporation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +*   * Redistributions of source code must retain the above copyright +*     notice, this list of conditions and the following disclaimer. +*   * Redistributions in binary form must reproduce the above copyright +*     notice, this list of conditions and the following disclaimer in +*     the documentation and/or other materials provided with the +*     distribution. +*   * Neither the name of Intel Corporation nor the names of its +*     contributors may be used to endorse or promote products derived +*     from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* Like Yan +* +*/ + +#define pr_fmt(fmt) "ACRNTrace: " fmt + +#include +#include +#include +#include +#include +#include + +#include + +#include "sbuf.h" + + +#define TRACE_SBUF_SIZE (4 * 1024 * 1024) +#define TRACE_ELEMENT_SIZE 32 /* byte */ +#define TRACE_ELEMENT_NUM ((TRACE_SBUF_SIZE - SBUF_HEAD_SIZE) / \ + TRACE_ELEMENT_SIZE) + +#define foreach_cpu(cpu, cpu_num) \ + for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +#define MAX_NR_CPUS 4 +/* actual physical cpu number, initialized by module init */ +static int pcpu_num; + +static int nr_cpus = MAX_NR_CPUS; +module_param(nr_cpus, int, S_IRUSR | S_IWUSR); + +static atomic_t open_cnt[MAX_NR_CPUS]; +static shared_buf_t *sbuf_per_cpu[MAX_NR_CPUS]; + +static inline int get_id_from_devname(struct file *filep) +{ + uint32_t cpuid; + int err; + char id_str[16]; + struct miscdevice *dev = filep->private_data; + + strncpy(id_str, (void *)dev->name + sizeof("acrn_trace_") - 1, 16); + id_str[15] = '\0'; + err = kstrtoul(&id_str[0], 10, (unsigned long *)&cpuid); + + if (err) + return err; + + if (cpuid >= pcpu_num) { + pr_err("%s, failed to get cpuid, cpuid %d\n", + __func__, cpuid); + return -1; + } + + return cpuid; +} + +/************************************************************************ + * + * file_operations functions + * + ***********************************************************************/ +static int trace_open(struct inode *inode, struct file *filep) +{ + int cpuid = get_id_from_devname(filep); + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + /* More than one reader at the same time could get data messed up */ + if (atomic_read(&open_cnt[cpuid])) + return -EBUSY; + + atomic_inc(&open_cnt[cpuid]); + + return 0; +} + +static int trace_release(struct inode *inode, struct file *filep) +{ + int cpuid = get_id_from_devname(filep); + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + atomic_dec(&open_cnt[cpuid]); + + return 0; +} + +static int trace_mmap(struct file *filep, struct vm_area_struct *vma) +{ + int cpuid = get_id_from_devname(filep); + phys_addr_t paddr; + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + BUG_ON(!virt_addr_valid(sbuf_per_cpu[cpuid])); + paddr = virt_to_phys(sbuf_per_cpu[cpuid]); + + if (remap_pfn_range(vma, vma->vm_start, + paddr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) { + pr_err("Failed to mmap sbuf for cpu%d\n", cpuid); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations trace_fops = { + .owner = THIS_MODULE, + .open = trace_open, + .release = trace_release, + .mmap = trace_mmap, +}; + +static struct miscdevice trace_dev0 = { + .name = "acrn_trace_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &trace_fops, +}; + +static struct miscdevice trace_dev1 = { + .name = "acrn_trace_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &trace_fops, +}; + +static struct miscdevice trace_dev2 = { + .name = "acrn_trace_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &trace_fops, +}; + +static struct miscdevice trace_dev3 = { + .name = "acrn_trace_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &trace_fops, +}; + +static struct miscdevice *trace_devs[4] = { + &trace_dev0, + &trace_dev1, + &trace_dev2, + &trace_dev3, +}; + +/* + * acrn_trace_init() + */ +static int __init acrn_trace_init(void) +{ + int ret = 0; + int i, cpu; + + /* TBD: we could get the native cpu number by hypercall later */ + pr_info("%s, cpu_num %d\n", __func__, nr_cpus); + if (nr_cpus > MAX_NR_CPUS) { + pr_err("nr_cpus %d exceed MAX_NR_CPUS %d !\n", + nr_cpus, MAX_NR_CPUS); + return -EINVAL; + } + pcpu_num = nr_cpus; + + foreach_cpu(cpu, pcpu_num) { + /* allocate shared_buf */ + sbuf_per_cpu[cpu] = sbuf_allocate(TRACE_ELEMENT_NUM, + TRACE_ELEMENT_SIZE); + if (!sbuf_per_cpu[cpu]) { + pr_err("Failed alloc SBuf, cpuid %d\n", cpu); + ret = -ENOMEM; + goto out_free; + } + } + + foreach_cpu(cpu, pcpu_num) { + ret = sbuf_share_setup(cpu, ACRN_TRACE, sbuf_per_cpu[cpu]); + if (ret < 0) { + pr_err("Failed to setup SBuf, cpuid %d\n", cpu); + goto out_sbuf; + } + } + + foreach_cpu(cpu, pcpu_num) { + ret = misc_register(trace_devs[cpu]); + if (ret < 0) { + pr_err("Failed to register acrn_trace_%d, errno %d\n", + cpu, ret); + goto out_dereg; + } + } + + return ret; + +out_dereg: + for (i = --cpu; i >= 0; i--) + misc_deregister(trace_devs[i]); + cpu = pcpu_num; + +out_sbuf: + for (i = --cpu; i >= 0; i--) + sbuf_share_setup(i, ACRN_TRACE, NULL); + cpu = pcpu_num; + +out_free: + for (i = --cpu; i >= 0; i--) + sbuf_free(sbuf_per_cpu[i]); + + return ret; +} + +/* + * acrn_trace_exit() + */ +static void __exit acrn_trace_exit(void) +{ + int cpu; + + pr_info("%s, cpu_num %d\n", __func__, pcpu_num); + + foreach_cpu(cpu, pcpu_num) { + /* deregister devices */ + misc_deregister(trace_devs[cpu]); + + /* set sbuf pointer to NULL in HV */ + sbuf_share_setup(cpu, ACRN_TRACE, NULL); + + /* free sbuf, sbuf_per_cpu[cpu] should be set NULL */ + sbuf_free(sbuf_per_cpu[cpu]); + } +} + +module_init(acrn_trace_init); +module_exit(acrn_trace_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Trace"); +MODULE_VERSION("0.1"); diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index e0f74ddc22b7..88313da1407e 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -83,7 +83,8 @@ static ssize_t driver_override_store(struct device *_dev, struct amba_device *dev = to_amba_device(_dev); char *driver_override, *old = dev->driver_override, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index fddf76ef5bd6..ac976864b3d1 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -77,6 +77,7 @@ #endif #include +#include #include "binder_alloc.h" #include "binder_trace.h" @@ -351,10 +352,14 @@ struct binder_error { * and by @lock) * @has_async_transaction: async transaction to node in progress * (protected by @lock) + * @sched_policy: minimum scheduling policy for node + * (invariant after initialized) * @accept_fds: file descriptor operations supported for node * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) + * @inherit_rt: inherit RT scheduling policy from caller + * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -390,6 +395,8 @@ struct binder_node { /* * invariant after initialization */ + u8 sched_policy:2; + u8 inherit_rt:1; u8 accept_fds:1; u8 min_priority; }; @@ -463,6 +470,22 @@ enum binder_deferred_state { BINDER_DEFERRED_RELEASE = 0x04, }; +/** + * struct binder_priority - scheduler policy and priority + * @sched_policy scheduler policy + * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT + * + * The binder driver supports inheriting the following scheduler policies: + * SCHED_NORMAL + * SCHED_BATCH + * SCHED_FIFO + * SCHED_RR + */ +struct binder_priority { + unsigned int sched_policy; + int prio; +}; + /** * struct binder_proc - binder process bookkeeping * @proc_node: element for binder_procs list @@ -482,7 +505,8 @@ enum binder_deferred_state { * @tsk task_struct for group_leader of process * (invariant after initialized) * @files files_struct for process - * (invariant after initialized) + * (protected by @files_lock) + * @files_lock mutex to protect @files * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -492,8 +516,6 @@ enum binder_deferred_state { * (protected by @inner_lock) * @todo: list of work for this process * (protected by @inner_lock) - * @wait: wait queue head to wait for proc work - * (invariant after initialized) * @stats: per-process binder statistics * (atomics, no lock needed) * @delivered_death: list of delivered death notification @@ -530,19 +552,19 @@ struct binder_proc { int pid; struct task_struct *tsk; struct files_struct *files; + struct mutex files_lock; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; struct list_head todo; - wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; int requested_threads; int requested_threads_started; int tmp_ref; - long default_priority; + struct binder_priority default_priority; struct dentry *debugfs_entry; struct binder_alloc alloc; struct binder_context *context; @@ -577,6 +599,8 @@ enum { * (protected by @proc->inner_lock) * @todo: list of work to do for this thread * (protected by @proc->inner_lock) + * @process_todo: whether work in @todo should be processed + * (protected by @proc->inner_lock) * @return_error: transaction errors reported by this thread * (only accessed by this thread) * @reply_error: transaction errors reported by target thread @@ -590,6 +614,7 @@ enum { * @is_dead: thread is dead and awaiting free * when outstanding transactions are cleaned up * (protected by @proc->inner_lock) + * @task: struct task_struct for this thread * * Bookkeeping structure for binder threads. */ @@ -602,12 +627,14 @@ struct binder_thread { bool looper_need_return; /* can be written by other thread */ struct binder_transaction *transaction_stack; struct list_head todo; + bool process_todo; struct binder_error return_error; struct binder_error reply_error; wait_queue_head_t wait; struct binder_stats stats; atomic_t tmp_ref; bool is_dead; + struct task_struct *task; }; struct binder_transaction { @@ -624,8 +651,9 @@ struct binder_transaction { struct binder_buffer *buffer; unsigned int code; unsigned int flags; - long priority; - long saved_priority; + struct binder_priority priority; + struct binder_priority saved_priority; + bool set_priority_called; kuid_t sender_euid; /** * @lock: protects @from, @to_proc, and @to_thread @@ -787,6 +815,16 @@ static bool binder_worklist_empty(struct binder_proc *proc, return ret; } +/** + * binder_enqueue_work_ilocked() - Add an item to the work list + * @work: struct binder_work to add to list + * @target_list: list to add work to + * + * Adds the work to the specified list. Asserts that work + * is not already on a list. + * + * Requires the proc->inner_lock to be held. + */ static void binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list) @@ -797,22 +835,56 @@ binder_enqueue_work_ilocked(struct binder_work *work, } /** - * binder_enqueue_work() - Add an item to the work list - * @proc: binder_proc associated with list + * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work + * @thread: thread to queue work to * @work: struct binder_work to add to list - * @target_list: list to add work to * - * Adds the work to the specified list. Asserts that work - * is not already on a list. + * Adds the work to the todo list of the thread. Doesn't set the process_todo + * flag, which means that (if it wasn't already set) the thread will go to + * sleep without handling this work when it calls read. + * + * Requires the proc->inner_lock to be held. */ static void -binder_enqueue_work(struct binder_proc *proc, - struct binder_work *work, - struct list_head *target_list) +binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) { - binder_inner_proc_lock(proc); - binder_enqueue_work_ilocked(work, target_list); - binder_inner_proc_unlock(proc); + binder_enqueue_work_ilocked(work, &thread->todo); +} + +/** + * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list + * @thread: thread to queue work to + * @work: struct binder_work to add to list + * + * Adds the work to the todo list of the thread, and enables processing + * of the todo queue. + * + * Requires the proc->inner_lock to be held. + */ +static void +binder_enqueue_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) +{ + binder_enqueue_work_ilocked(work, &thread->todo); + thread->process_todo = true; +} + +/** + * binder_enqueue_thread_work() - Add an item to the thread work list + * @thread: thread to queue work to + * @work: struct binder_work to add to list + * + * Adds the work to the todo list of the thread, and enables processing + * of the todo queue. + */ +static void +binder_enqueue_thread_work(struct binder_thread *thread, + struct binder_work *work) +{ + binder_inner_proc_lock(thread->proc); + binder_enqueue_thread_work_ilocked(thread, work); + binder_inner_proc_unlock(thread->proc); } static void @@ -877,20 +949,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node); static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { - struct files_struct *files = proc->files; unsigned long rlim_cur; unsigned long irqs; + int ret; - if (files == NULL) - return -ESRCH; - - if (!lock_task_sighand(proc->tsk, &irqs)) - return -EMFILE; - + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + ret = -ESRCH; + goto err; + } + if (!lock_task_sighand(proc->tsk, &irqs)) { + ret = -EMFILE; + goto err; + } rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); - return __alloc_fd(files, 0, rlim_cur, flags); + ret = __alloc_fd(proc->files, 0, rlim_cur, flags); +err: + mutex_unlock(&proc->files_lock); + return ret; } /* @@ -899,8 +977,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { + mutex_lock(&proc->files_lock); if (proc->files) __fd_install(proc->files, fd, file); + mutex_unlock(&proc->files_lock); } /* @@ -910,9 +990,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { int retval; - if (proc->files == NULL) - return -ESRCH; - + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + retval = -ESRCH; + goto err; + } retval = __close_fd(proc->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || @@ -920,14 +1002,15 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; - +err: + mutex_unlock(&proc->files_lock); return retval; } static bool binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work) { - return !binder_worklist_empty_ilocked(&thread->todo) || + return thread->process_todo || thread->looper_need_return || (do_proc_work && !binder_worklist_empty_ilocked(&thread->proc->todo)); @@ -1051,22 +1134,145 @@ static void binder_wakeup_proc_ilocked(struct binder_proc *proc) binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); } -static void binder_set_nice(long nice) +static bool is_rt_policy(int policy) { - long min_nice; + return policy == SCHED_FIFO || policy == SCHED_RR; +} - if (can_nice(current, nice)) { - set_user_nice(current, nice); +static bool is_fair_policy(int policy) +{ + return policy == SCHED_NORMAL || policy == SCHED_BATCH; +} + +static bool binder_supported_policy(int policy) +{ + return is_fair_policy(policy) || is_rt_policy(policy); +} + +static int to_userspace_prio(int policy, int kernel_priority) +{ + if (is_fair_policy(policy)) + return PRIO_TO_NICE(kernel_priority); + else + return MAX_USER_RT_PRIO - 1 - kernel_priority; +} + +static int to_kernel_prio(int policy, int user_priority) +{ + if (is_fair_policy(policy)) + return NICE_TO_PRIO(user_priority); + else + return MAX_USER_RT_PRIO - 1 - user_priority; +} + +static void binder_do_set_priority(struct task_struct *task, + struct binder_priority desired, + bool verify) +{ + int priority; /* user-space prio value */ + bool has_cap_nice; + unsigned int policy = desired.sched_policy; + + if (task->policy == policy && task->normal_prio == desired.prio) return; + + has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE); + + priority = to_userspace_prio(policy, desired.prio); + + if (verify && is_rt_policy(policy) && !has_cap_nice) { + long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO); + + if (max_rtprio == 0) { + policy = SCHED_NORMAL; + priority = MIN_NICE; + } else if (priority > max_rtprio) { + priority = max_rtprio; + } + } + + if (verify && is_fair_policy(policy) && !has_cap_nice) { + long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE)); + + if (min_nice > MAX_NICE) { + binder_user_error("%d RLIMIT_NICE not set\n", + task->pid); + return; + } else if (priority < min_nice) { + priority = min_nice; + } + } + + if (policy != desired.sched_policy || + to_kernel_prio(policy, priority) != desired.prio) + binder_debug(BINDER_DEBUG_PRIORITY_CAP, + "%d: priority %d not allowed, using %d instead\n", + task->pid, desired.prio, + to_kernel_prio(policy, priority)); + + trace_binder_set_priority(task->tgid, task->pid, task->normal_prio, + to_kernel_prio(policy, priority), + desired.prio); + + /* Set the actual priority */ + if (task->policy != policy || is_rt_policy(policy)) { + struct sched_param params; + + params.sched_priority = is_rt_policy(policy) ? priority : 0; + + sched_setscheduler_nocheck(task, + policy | SCHED_RESET_ON_FORK, + ¶ms); } - min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); - binder_debug(BINDER_DEBUG_PRIORITY_CAP, - "%d: nice value %ld not allowed use %ld instead\n", - current->pid, nice, min_nice); - set_user_nice(current, min_nice); - if (min_nice <= MAX_NICE) + if (is_fair_policy(policy)) + set_user_nice(task, priority); +} + +static void binder_set_priority(struct task_struct *task, + struct binder_priority desired) +{ + binder_do_set_priority(task, desired, /* verify = */ true); +} + +static void binder_restore_priority(struct task_struct *task, + struct binder_priority desired) +{ + binder_do_set_priority(task, desired, /* verify = */ false); +} + +static void binder_transaction_priority(struct task_struct *task, + struct binder_transaction *t, + struct binder_priority node_prio, + bool inherit_rt) +{ + struct binder_priority desired_prio = t->priority; + + if (t->set_priority_called) return; - binder_user_error("%d RLIMIT_NICE not set\n", current->pid); + + t->set_priority_called = true; + t->saved_priority.sched_policy = task->policy; + t->saved_priority.prio = task->normal_prio; + + if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) { + desired_prio.prio = NICE_TO_PRIO(0); + desired_prio.sched_policy = SCHED_NORMAL; + } + + if (node_prio.prio < t->priority.prio || + (node_prio.prio == t->priority.prio && + node_prio.sched_policy == SCHED_FIFO)) { + /* + * In case the minimum priority on the node is + * higher (lower value), use that priority. If + * the priority is the same, but the node uses + * SCHED_FIFO, prefer SCHED_FIFO, since it can + * run unbounded, unlike SCHED_RR. + */ + desired_prio = node_prio; + } + + binder_set_priority(task, desired_prio); } static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, @@ -1119,6 +1325,7 @@ static struct binder_node *binder_init_node_ilocked( binder_uintptr_t ptr = fp ? fp->binder : 0; binder_uintptr_t cookie = fp ? fp->cookie : 0; __u32 flags = fp ? fp->flags : 0; + s8 priority; assert_spin_locked(&proc->inner_lock); @@ -1151,8 +1358,12 @@ static struct binder_node *binder_init_node_ilocked( node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; - node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >> + FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT; + node->min_priority = to_kernel_prio(node->sched_policy, priority); node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -1215,6 +1426,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->local_strong_refs++; if (!node->has_strong_ref && target_list) { binder_dequeue_work_ilocked(&node->work); + /* + * Note: this function is the only place where we queue + * directly to a thread->todo without using the + * corresponding binder_enqueue_thread_work() helper + * functions; in this case it's ok to not set the + * process_todo flag, since we know this node work will + * always be followed by other work that starts queue + * processing: in case of synchronous transactions, a + * BR_REPLY or BR_ERROR; in case of oneway + * transactions, a BR_TRANSACTION_COMPLETE. + */ binder_enqueue_work_ilocked(&node->work, target_list); } } else { @@ -1226,6 +1448,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->debug_id); return -EINVAL; } + /* + * See comment above + */ binder_enqueue_work_ilocked(&node->work, target_list); } } @@ -1915,13 +2140,19 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_pop_transaction_ilocked(target_thread, t); if (target_thread->reply_error.cmd == BR_OK) { target_thread->reply_error.cmd = error_code; - binder_enqueue_work_ilocked( - &target_thread->reply_error.work, - &target_thread->todo); + binder_enqueue_thread_work_ilocked( + target_thread, + &target_thread->reply_error.work); wake_up_interruptible(&target_thread->wait); } else { - WARN(1, "Unexpected reply error: %u\n", - target_thread->reply_error.cmd); + /* + * Cannot get here for normal operation, but + * we can if multiple synchronous transactions + * are sent without blocking for responses. + * Just ignore the 2nd error in this case. + */ + pr_warn("Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } binder_inner_proc_unlock(target_thread->proc); binder_thread_dec_tmpref(target_thread); @@ -1947,6 +2178,26 @@ static void binder_send_failed_reply(struct binder_transaction *t, } } +/** + * binder_cleanup_transaction() - cleans up undelivered transaction + * @t: transaction that needs to be cleaned up + * @reason: reason the transaction wasn't delivered + * @error_code: error to return to caller (if synchronous call) + */ +static void binder_cleanup_transaction(struct binder_transaction *t, + const char *reason, + uint32_t error_code) +{ + if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { + binder_send_failed_reply(t, error_code); + } else { + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered transaction %d, %s\n", + t->debug_id, reason); + binder_free_transaction(t); + } +} + /** * binder_validate_object() - checks for a valid metadata object in a buffer. * @buffer: binder_buffer that we're parsing. @@ -2102,7 +2353,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %p\n", + "%d buffer release %d, size %zd-%zd, failed at %pK\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); @@ -2536,18 +2787,20 @@ static bool binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { - struct list_head *target_list = NULL; struct binder_node *node = t->buffer->target_node; + struct binder_priority node_prio; bool oneway = !!(t->flags & TF_ONE_WAY); - bool wakeup = true; + bool pending_async = false; BUG_ON(!node); binder_node_lock(node); + node_prio.prio = node->min_priority; + node_prio.sched_policy = node->sched_policy; + if (oneway) { BUG_ON(thread); if (node->has_async_transaction) { - target_list = &node->async_todo; - wakeup = false; + pending_async = true; } else { node->has_async_transaction = 1; } @@ -2561,19 +2814,20 @@ static bool binder_proc_transaction(struct binder_transaction *t, return false; } - if (!thread && !target_list) + if (!thread && !pending_async) thread = binder_select_thread_ilocked(proc); - if (thread) - target_list = &thread->todo; - else if (!target_list) - target_list = &proc->todo; - else - BUG_ON(target_list != &node->async_todo); - - binder_enqueue_work_ilocked(&t->work, target_list); + if (thread) { + binder_transaction_priority(thread->task, t, node_prio, + node->inherit_rt); + binder_enqueue_thread_work_ilocked(thread, &t->work); + } else if (!pending_async) { + binder_enqueue_work_ilocked(&t->work, &proc->todo); + } else { + binder_enqueue_work_ilocked(&t->work, &node->async_todo); + } - if (wakeup) + if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); binder_inner_proc_unlock(proc); @@ -2688,7 +2942,6 @@ static void binder_transaction(struct binder_proc *proc, } thread->transaction_stack = in_reply_to->to_parent; binder_inner_proc_unlock(proc); - binder_set_nice(in_reply_to->saved_priority); target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { return_error = BR_DEAD_REPLY; @@ -2853,7 +3106,15 @@ static void binder_transaction(struct binder_proc *proc, t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; - t->priority = task_nice(current); + if (!(t->flags & TF_ONE_WAY) && + binder_supported_policy(current->policy)) { + /* Inherit supported policies for synchronous transactions */ + t->priority.sched_policy = current->policy; + t->priority.prio = current->normal_prio; + } else { + /* Otherwise, fall back to the default priority */ + t->priority = target_proc->default_priority; + } trace_binder_transaction(reply, t, target_node); @@ -3068,10 +3329,10 @@ static void binder_transaction(struct binder_proc *proc, } } tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; - binder_enqueue_work(proc, tcomplete, &thread->todo); t->work.type = BINDER_WORK_TRANSACTION; if (reply) { + binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); if (target_thread->is_dead) { binder_inner_proc_unlock(target_proc); @@ -3079,13 +3340,22 @@ static void binder_transaction(struct binder_proc *proc, } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); - binder_enqueue_work_ilocked(&t->work, &target_thread->todo); + binder_enqueue_thread_work_ilocked(target_thread, &t->work); binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); + binder_restore_priority(current, in_reply_to->saved_priority); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); + /* + * Defer the TRANSACTION_COMPLETE, so we don't return to + * userspace immediately; this allows the target process to + * immediately start processing this transaction, reducing + * latency. We will then return the TRANSACTION_COMPLETE when + * the target replies (or there is an error). + */ + binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; @@ -3099,6 +3369,7 @@ static void binder_transaction(struct binder_proc *proc, } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); + binder_enqueue_thread_work(thread, tcomplete); if (!binder_proc_transaction(t, target_proc, NULL)) goto err_dead_proc_or_thread; } @@ -3176,16 +3447,13 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { + binder_restore_priority(current, in_reply_to->saved_priority); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; - binder_enqueue_work(thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { thread->return_error.cmd = return_error; - binder_enqueue_work(thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work(thread, &thread->return_error.work); } } @@ -3489,10 +3757,9 @@ static int binder_thread_write(struct binder_proc *proc, WARN_ON(thread->return_error.cmd != BR_OK); thread->return_error.cmd = BR_ERROR; - binder_enqueue_work( - thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work( + thread, + &thread->return_error.work); binder_debug( BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", @@ -3572,9 +3839,9 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work_ilocked( - &death->work, - &thread->todo); + binder_enqueue_thread_work_ilocked( + thread, + &death->work); else { binder_enqueue_work_ilocked( &death->work, @@ -3614,7 +3881,7 @@ static int binder_thread_write(struct binder_proc *proc, } } binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", + "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", proc->pid, thread->pid, (u64)cookie, death); if (death == NULL) { @@ -3629,8 +3896,8 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work_ilocked( - &death->work, &thread->todo); + binder_enqueue_thread_work_ilocked( + thread, &death->work); else { binder_enqueue_work_ilocked( &death->work, @@ -3761,7 +4028,7 @@ static int binder_thread_read(struct binder_proc *proc, wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } - binder_set_nice(proc->default_priority); + binder_restore_priority(current, proc->default_priority); } if (non_block) { @@ -3804,6 +4071,8 @@ static int binder_thread_read(struct binder_proc *proc, break; } w = binder_dequeue_work_head_ilocked(list); + if (binder_worklist_empty_ilocked(&thread->todo)) + thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: { @@ -3973,16 +4242,14 @@ static int binder_thread_read(struct binder_proc *proc, BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; + struct binder_priority node_prio; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; - t->saved_priority = task_nice(current); - if (t->priority < target_node->min_priority && - !(t->flags & TF_ONE_WAY)) - binder_set_nice(t->priority); - else if (!(t->flags & TF_ONE_WAY) || - t->saved_priority > target_node->min_priority) - binder_set_nice(target_node->min_priority); + node_prio.sched_policy = target_node->sched_policy; + node_prio.prio = target_node->min_priority; + binder_transaction_priority(current, t, node_prio, + target_node->inherit_rt); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0; @@ -4015,12 +4282,20 @@ static int binder_thread_read(struct binder_proc *proc, if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); + + binder_cleanup_transaction(t, "put_user failed", + BR_FAILED_REPLY); + return -EFAULT; } ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) { if (t_from) binder_thread_dec_tmpref(t_from); + + binder_cleanup_transaction(t, "copy_to_user failed", + BR_FAILED_REPLY); + return -EFAULT; } ptr += sizeof(tr); @@ -4090,15 +4365,9 @@ static void binder_release_work(struct binder_proc *proc, struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); - if (t->buffer->target_node && - !(t->flags & TF_ONE_WAY)) { - binder_send_failed_reply(t, BR_DEAD_REPLY); - } else { - binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, - "undelivered transaction %d\n", - t->debug_id); - binder_free_transaction(t); - } + + binder_cleanup_transaction(t, "process died.", + BR_DEAD_REPLY); } break; case BINDER_WORK_RETURN_ERROR: { struct binder_error *e = container_of( @@ -4158,6 +4427,8 @@ static struct binder_thread *binder_get_thread_ilocked( binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; + get_task_struct(current); + thread->task = current; atomic_set(&thread->tmp_ref, 0); init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); @@ -4208,6 +4479,7 @@ static void binder_free_thread(struct binder_thread *thread) BUG_ON(!list_empty(&thread->todo)); binder_stats_deleted(BINDER_STAT_THREAD); binder_proc_dec_tmpref(thread->proc); + put_task_struct(thread->task); kfree(thread); } @@ -4267,8 +4539,29 @@ static int binder_thread_release(struct binder_proc *proc, if (t) spin_lock(&t->lock); } + + /* + * If this thread used poll, make sure we remove the waitqueue + * from any epoll data structures holding it with POLLFREE. + * waitqueue_active() is safe to use here because we're holding + * the inner lock. + */ + if ((thread->looper & BINDER_LOOPER_STATE_POLL) && + waitqueue_active(&thread->wait)) { + wake_up_poll(&thread->wait, POLLHUP | POLLFREE); + } + binder_inner_proc_unlock(thread->proc); + /* + * This is needed to avoid races between wake_up_poll() above and + * and ep_remove_waitqueue() called for other reasons (eg the epoll file + * descriptor being closed); ep_remove_waitqueue() holds an RCU read + * lock, so we can be sure it's done after calling synchronize_rcu(). + */ + if (thread->looper & BINDER_LOOPER_STATE_POLL) + synchronize_rcu(); + if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(proc, &thread->todo); @@ -4284,6 +4577,8 @@ static unsigned int binder_poll(struct file *filp, bool wait_for_proc_work; thread = binder_get_thread(proc); + if (!thread) + return POLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; @@ -4605,7 +4900,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) ret = binder_alloc_mmap_handler(&proc->alloc, vma); if (ret) return ret; + mutex_lock(&proc->files_lock); proc->files = get_files_struct(current); + mutex_unlock(&proc->files_lock); return 0; err_bad_arg: @@ -4629,8 +4926,16 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; + mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); - proc->default_priority = task_nice(current); + if (binder_supported_policy(current->policy)) { + proc->default_priority.sched_policy = current->policy; + proc->default_priority.prio = current->normal_prio; + } else { + proc->default_priority.sched_policy = SCHED_NORMAL; + proc->default_priority.prio = NICE_TO_PRIO(0); + } + binder_dev = container_of(filp->private_data, struct binder_device, miscdev); proc->context = &binder_dev->context; @@ -4881,9 +5186,11 @@ static void binder_deferred_func(struct work_struct *work) files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { + mutex_lock(&proc->files_lock); files = proc->files; if (files) proc->files = NULL; + mutex_unlock(&proc->files_lock); } if (defer & BINDER_DEFERRED_FLUSH) @@ -4922,13 +5229,14 @@ static void print_binder_transaction_ilocked(struct seq_file *m, spin_lock(&t->lock); to_proc = t->to_proc; seq_printf(m, - "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, to_proc ? to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, - t->code, t->flags, t->priority, t->need_reply); + t->code, t->flags, t->priority.sched_policy, + t->priority.prio, t->need_reply); spin_unlock(&t->lock); if (proc != to_proc) { @@ -4946,7 +5254,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %p\n", + seq_printf(m, " size %zd:%zd data %pK\n", buffer->data_size, buffer->offsets_size, buffer->data); } @@ -5046,8 +5354,9 @@ static void print_binder_node_nilocked(struct seq_file *m, hlist_for_each_entry(ref, &node->refs, node_entry) count++; - seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", + seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d", node->debug_id, (u64)node->ptr, (u64)node->cookie, + node->sched_policy, node->min_priority, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count, node->tmp_refs); diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index c2819a3d58a6..ba6d8d23f206 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -186,12 +186,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) + void *start, void *end) { void *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; + struct vm_area_struct *vma = NULL; struct mm_struct *mm = NULL; bool need_mm = false; @@ -215,7 +215,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } } - if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm)) + if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) mm = alloc->vma_vm_mm; if (mm) { @@ -281,6 +281,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, goto err_vm_insert_page_failed; } + if (index + 1 > alloc->pages_high) + alloc->pages_high = index + 1; + trace_binder_alloc_page_end(alloc, index); /* vm_insert_page does not seem to increment the refcount */ } @@ -437,7 +440,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; ret = binder_update_page_range(alloc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); if (ret) return ERR_PTR(ret); @@ -478,7 +481,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, err_alloc_buf_struct_failed: binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - end_page_addr, NULL); + end_page_addr); return ERR_PTR(-ENOMEM); } @@ -562,8 +565,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, alloc->pid, buffer->data, prev->data, next ? next->data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), - buffer_start_page(buffer) + PAGE_SIZE, - NULL); + buffer_start_page(buffer) + PAGE_SIZE); } list_del(&buffer->entry); kfree(buffer); @@ -600,8 +602,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), - NULL); + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -668,7 +669,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_already_mapped; } - area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); + area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; @@ -855,6 +856,7 @@ void binder_alloc_print_pages(struct seq_file *m, } mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); + seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } /** @@ -984,7 +986,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return ret; } -struct shrinker binder_shrinker = { +static struct shrinker binder_shrinker = { .count_objects = binder_shrink_count, .scan_objects = binder_shrink_scan, .seeks = DEFAULT_SEEKS, diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 2dd33b6df104..0b145307f1fd 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -92,6 +92,7 @@ struct binder_lru_page { * @pages: array of binder_lru_page * @buffer_size: size of address space specified via mmap * @pid: pid for associated binder_proc (invariant after init) + * @pages_high: high watermark of offset in @pages * * Bookkeeping structure for per-proc address space management for binder * buffers. It is normally initialized during binder_init() and binder_mmap() @@ -112,6 +113,7 @@ struct binder_alloc { size_t buffer_size; uint32_t buffer_free; int pid; + size_t pages_high; }; #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 76e3b9c8a8a2..b11dffc521e8 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -85,6 +85,30 @@ DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); +TRACE_EVENT(binder_set_priority, + TP_PROTO(int proc, int thread, unsigned int old_prio, + unsigned int desired_prio, unsigned int new_prio), + TP_ARGS(proc, thread, old_prio, new_prio, desired_prio), + + TP_STRUCT__entry( + __field(int, proc) + __field(int, thread) + __field(unsigned int, old_prio) + __field(unsigned int, new_prio) + __field(unsigned int, desired_prio) + ), + TP_fast_assign( + __entry->proc = proc; + __entry->thread = thread; + __entry->old_prio = old_prio; + __entry->new_prio = new_prio; + __entry->desired_prio = desired_prio; + ), + TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d", + __entry->proc, __entry->thread, __entry->old_prio, + __entry->new_prio, __entry->desired_prio) +); + TRACE_EVENT(binder_wait_for_work, TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), TP_ARGS(proc_work, transaction_stack, thread_todo), diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 9f78bb03bb76..18391d0c0cd7 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -267,9 +267,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ - { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ - { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ @@ -292,9 +292,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ - { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ - { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ @@ -303,20 +303,20 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ - { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ - { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ - { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */ + { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ @@ -357,21 +357,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ - { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */ - { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */ - { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ @@ -385,6 +385,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */ + { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -536,7 +541,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), .driver_data = board_ahci_yes_fbs }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */ + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */ .driver_data = board_ahci_yes_fbs }, /* Promise */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ee4c1ec9dca0..c6fe2974b336 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4439,6 +4439,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { * https://bugzilla.kernel.org/show_bug.cgi?id=121671 */ { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, + { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 }, /* Devices we expect to fail diagnostics */ @@ -4519,6 +4520,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + /* Crucial BX100 SSD 500GB has broken LPM support */ + { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, + + /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ + { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + /* 512GB MX100 with newer firmware has only LPM issues */ + { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + + /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ + { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + /* devices that don't properly handle queued TRIM commands */ { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -4530,7 +4550,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -5390,8 +5412,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) * We guarantee to LLDs that they will have at least one * non-zero sg if the command is a data command. */ - if (WARN_ON_ONCE(ata_is_data(prot) && - (!qc->sg || !qc->n_elem || !qc->nbytes))) + if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) goto sys_err; if (ata_is_dma(prot) || (ata_is_pio(prot) && diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index e4effef0c83f..ea20e0eb4d5a 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2264,8 +2264,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) eflags |= ATA_EFLAG_DUBIOUS_XFER; ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); + trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); } - trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); DPRINTK("EXIT\n"); } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 44ba292f2cd7..4ff69f508e95 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3315,6 +3315,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) goto invalid_fld; } + /* We may not issue NCQ commands to devices not supporting NCQ */ + if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { + fp = 1; + goto invalid_fld; + } + /* sanity check for pio multi commands */ if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { fp = 1; @@ -4308,7 +4314,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { /* relay SCSI command to ATAPI device */ int len = COMMAND_SIZE(scsi_op); - if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) + if (unlikely(len > scmd->cmd_len || + len > dev->cdb_len || + scmd->cmd_len > ATAPI_CDB_LEN)) goto bad_cdb_len; xlat_func = atapi_xlat; diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 7e76b35f422c..e121b8485731 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2803,7 +2803,7 @@ static int hrz_probe(struct pci_dev *pci_dev, return err; out_free_irq: - free_irq(dev->irq, dev); + free_irq(irq, dev); out_free: kfree(dev); out_release: diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index d7d21118d3e0..2c2ed9cf8796 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -136,6 +136,7 @@ config CFAG12864B_RATE config IMG_ASCII_LCD tristate "Imagination Technologies ASCII LCD Display" + depends on HAS_IOMEM default y if MIPS_MALTA || MIPS_SEAD3 select SYSCON help diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index a9020f82eea7..58403052514f 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -443,3 +443,7 @@ static struct platform_driver img_ascii_lcd_driver = { .remove = img_ascii_lcd_remove, }; module_platform_driver(img_ascii_lcd_driver); + +MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display"); +MODULE_AUTHOR("Paul Burton "); +MODULE_LICENSE("GPL"); diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 2f6614c9a229..49fd50fccd48 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL depends on FW_LOADER default y help - The kernel source tree includes a number of firmware 'blobs' - that are used by various drivers. The recommended way to - use these is to run "make firmware_install", which, after - converting ihex files to binary, copies all of the needed - binary files in firmware/ to /lib/firmware/ on your system so - that they can be loaded by userspace helpers on request. + Various drivers in the kernel source tree may require firmware, + which is generally available in your distribution's linux-firmware + package. + + The linux-firmware package should install firmware into + /lib/firmware/ on your system, so they can be loaded by userspace + helpers on request. Enabling this option will build each required firmware blob - into the kernel directly, where request_firmware() will find - them without having to call out to userspace. This may be - useful if your root file system requires a device that uses - such firmware and do not wish to use an initrd. + specified by EXTRA_FIRMWARE into the kernel directly, where + request_firmware() will find them without having to call out to + userspace. This may be useful if your root file system requires a + device that uses such firmware and you do not wish to use an + initrd. This single option controls the inclusion of firmware for - every driver that uses request_firmware() and ships its - firmware in the kernel source tree, which avoids a + every driver that uses request_firmware(), which avoids a proliferation of 'Include firmware for xxx device' options. Say 'N' and let firmware be loaded from userspace. @@ -235,6 +236,9 @@ config GENERIC_CPU_DEVICES config GENERIC_CPU_AUTOPROBE bool +config GENERIC_CPU_VULNERABILITIES + bool + config SOC_BUS bool select GLOB @@ -245,6 +249,7 @@ config DMA_SHARED_BUFFER bool default n select ANON_INODES + select IRQ_WORK help This option enables the framework for buffer-sharing between multiple drivers. A buffer is associated with a file using driver diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 6df7d6676a48..d711b85b55d6 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -21,15 +21,26 @@ #include #include #include +#include +#include -static DEFINE_MUTEX(cpu_scale_mutex); -static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; -unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) +void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, + unsigned long max_freq) { - return per_cpu(cpu_scale, cpu); + unsigned long scale; + int i; + + scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; + + for_each_cpu(i, cpus) + per_cpu(freq_scale, i) = scale; } +static DEFINE_MUTEX(cpu_scale_mutex); +DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; + void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) { per_cpu(cpu_scale, cpu) = capacity; @@ -44,6 +55,9 @@ static ssize_t cpu_capacity_show(struct device *dev, return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); } +static void update_topology_flags_workfn(struct work_struct *work); +static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); + static ssize_t cpu_capacity_store(struct device *dev, struct device_attribute *attr, const char *buf, @@ -54,10 +68,15 @@ static ssize_t cpu_capacity_store(struct device *dev, int i; unsigned long new_capacity; ssize_t ret; + cpumask_var_t mask; if (!count) return 0; + /* don't allow changes if sched-group-energy is installed */ + if(sched_energy_installed(this_cpu)) + return -EINVAL; + ret = kstrtoul(buf, 0, &new_capacity); if (ret) return ret; @@ -65,10 +84,41 @@ static ssize_t cpu_capacity_store(struct device *dev, return -EINVAL; mutex_lock(&cpu_scale_mutex); - for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) + + if (new_capacity < SCHED_CAPACITY_SCALE) { + int highest_score_cpu = 0; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + mutex_unlock(&cpu_scale_mutex); + return -ENOMEM; + } + + cpumask_andnot(mask, cpu_online_mask, + topology_core_cpumask(this_cpu)); + + for_each_cpu(i, mask) { + if (topology_get_cpu_scale(NULL, i) == + SCHED_CAPACITY_SCALE) { + highest_score_cpu = 1; + break; + } + } + + free_cpumask_var(mask); + + if (!highest_score_cpu) { + mutex_unlock(&cpu_scale_mutex); + return -EINVAL; + } + } + + for_each_cpu(i, topology_core_cpumask(this_cpu)) topology_set_cpu_scale(i, new_capacity); mutex_unlock(&cpu_scale_mutex); + if (topology_detect_flags()) + schedule_work(&update_topology_flags_work); + return count; } @@ -93,6 +143,185 @@ static int register_cpu_capacity_sysctl(void) } subsys_initcall(register_cpu_capacity_sysctl); +enum asym_cpucap_type { no_asym, asym_thread, asym_core, asym_die }; +static enum asym_cpucap_type asym_cpucap = no_asym; +enum share_cap_type { no_share_cap, share_cap_thread, share_cap_core, share_cap_die}; +static enum share_cap_type share_cap = no_share_cap; + +#ifdef CONFIG_CPU_FREQ +int detect_share_cap_flag(void) +{ + int cpu; + enum share_cap_type share_cap_level = no_share_cap; + struct cpufreq_policy *policy; + + for_each_possible_cpu(cpu) { + policy = cpufreq_cpu_get(cpu); + + if (!policy) + return 0; + + if (cpumask_equal(topology_sibling_cpumask(cpu), + policy->related_cpus)) { + share_cap_level = share_cap_thread; + continue; + } + + if (cpumask_equal(topology_core_cpumask(cpu), + policy->related_cpus)) { + share_cap_level = share_cap_core; + continue; + } + + if (cpumask_equal(cpu_cpu_mask(cpu), + policy->related_cpus)) { + share_cap_level = share_cap_die; + continue; + } + } + + if (share_cap != share_cap_level) { + share_cap = share_cap_level; + return 1; + } + + return 0; +} +#else +int detect_share_cap_flag(void) { return 0; } +#endif + +/* + * Walk cpu topology to determine sched_domain flags. + * + * SD_ASYM_CPUCAPACITY: Indicates the lowest level that spans all cpu + * capacities found in the system for all cpus, i.e. the flag is set + * at the same level for all systems. The current algorithm implements + * this by looking for higher capacities, which doesn't work for all + * conceivable topology, but don't complicate things until it is + * necessary. + */ +int topology_detect_flags(void) +{ + unsigned long max_capacity, capacity; + enum asym_cpucap_type asym_level = no_asym; + int cpu, die_cpu, core, thread, flags_changed = 0; + + for_each_possible_cpu(cpu) { + max_capacity = 0; + + if (asym_level >= asym_thread) + goto check_core; + + for_each_cpu(thread, topology_sibling_cpumask(cpu)) { + capacity = topology_get_cpu_scale(NULL, thread); + + if (capacity > max_capacity) { + if (max_capacity != 0) + asym_level = asym_thread; + + max_capacity = capacity; + } + } + +check_core: + if (asym_level >= asym_core) + goto check_die; + + for_each_cpu(core, topology_core_cpumask(cpu)) { + capacity = topology_get_cpu_scale(NULL, core); + + if (capacity > max_capacity) { + if (max_capacity != 0) + asym_level = asym_core; + + max_capacity = capacity; + } + } +check_die: + for_each_possible_cpu(die_cpu) { + capacity = topology_get_cpu_scale(NULL, die_cpu); + + if (capacity > max_capacity) { + if (max_capacity != 0) { + asym_level = asym_die; + goto done; + } + } + } + } + +done: + if (asym_cpucap != asym_level) { + asym_cpucap = asym_level; + flags_changed = 1; + pr_debug("topology flag change detected\n"); + } + + if (detect_share_cap_flag()) + flags_changed = 1; + + return flags_changed; +} + +int topology_smt_flags(void) +{ + int flags = 0; + + if (asym_cpucap == asym_thread) + flags |= SD_ASYM_CPUCAPACITY; + + if (share_cap == share_cap_thread) + flags |= SD_SHARE_CAP_STATES; + + return flags; +} + +int topology_core_flags(void) +{ + int flags = 0; + + if (asym_cpucap == asym_core) + flags |= SD_ASYM_CPUCAPACITY; + + if (share_cap == share_cap_core) + flags |= SD_SHARE_CAP_STATES; + + return flags; +} + +int topology_cpu_flags(void) +{ + int flags = 0; + + if (asym_cpucap == asym_die) + flags |= SD_ASYM_CPUCAPACITY; + + if (share_cap == share_cap_die) + flags |= SD_SHARE_CAP_STATES; + + return flags; +} + +static int update_topology = 0; + +int topology_update_cpu_topology(void) +{ + return update_topology; +} + +/* + * Updating the sched_domains can't be done directly from cpufreq callbacks + * due to locking, so queue the work for later. + */ +static void update_topology_flags_workfn(struct work_struct *work) +{ + update_topology = 1; + rebuild_sched_domains(); + pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); + update_topology = 0; +} + static u32 capacity_scale; static u32 *raw_capacity; @@ -115,13 +344,12 @@ void topology_normalize_cpu_scale(void) pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); mutex_lock(&cpu_scale_mutex); for_each_possible_cpu(cpu) { - pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", - cpu, raw_capacity[cpu]); capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) / capacity_scale; topology_set_cpu_scale(cpu, capacity); - pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", - cpu, topology_get_cpu_scale(NULL, cpu)); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu raw_capacity=%u\n", + cpu, topology_get_cpu_scale(NULL, cpu), + raw_capacity[cpu]); } mutex_unlock(&cpu_scale_mutex); } @@ -129,14 +357,19 @@ void topology_normalize_cpu_scale(void) bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) { static bool cap_parsing_failed; - int ret; + int ret = 0; u32 cpu_capacity; if (cap_parsing_failed) return false; - ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", + /* override capacity-dmips-mhz if we have sched-energy-costs */ + if (of_find_property(cpu_node, "sched-energy-costs", NULL)) + cpu_capacity = topology_get_cpu_scale(NULL, cpu); + else + ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", &cpu_capacity); + if (!ret) { if (!raw_capacity) { raw_capacity = kcalloc(num_possible_cpus(), @@ -166,11 +399,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) } #ifdef CONFIG_CPU_FREQ -static cpumask_var_t cpus_to_visit __initdata; -static void __init parsing_done_workfn(struct work_struct *work); -static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn); +static cpumask_var_t cpus_to_visit; +static void parsing_done_workfn(struct work_struct *work); +static DECLARE_WORK(parsing_done_work, parsing_done_workfn); -static int __init +static int init_cpu_capacity_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -198,6 +431,8 @@ init_cpu_capacity_callback(struct notifier_block *nb, if (cpumask_empty(cpus_to_visit)) { topology_normalize_cpu_scale(); + if (topology_detect_flags()) + schedule_work(&update_topology_flags_work); free_raw_capacity(); pr_debug("cpu_capacity: parsing done\n"); schedule_work(&parsing_done_work); @@ -206,12 +441,14 @@ init_cpu_capacity_callback(struct notifier_block *nb, return 0; } -static struct notifier_block init_cpu_capacity_notifier __initdata = { +static struct notifier_block init_cpu_capacity_notifier = { .notifier_call = init_cpu_capacity_callback, }; static int __init register_cpufreq_notifier(void) { + int ret; + /* * on ACPI-based systems we need to use the default cpu capacity * until we have the necessary code to parse the cpu capacity, so @@ -227,15 +464,21 @@ static int __init register_cpufreq_notifier(void) cpumask_copy(cpus_to_visit, cpu_possible_mask); - return cpufreq_register_notifier(&init_cpu_capacity_notifier, - CPUFREQ_POLICY_NOTIFIER); + ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, + CPUFREQ_POLICY_NOTIFIER); + + if (ret) + free_cpumask_var(cpus_to_visit); + + return ret; } core_initcall(register_cpufreq_notifier); -static void __init parsing_done_workfn(struct work_struct *work) +static void parsing_done_workfn(struct work_struct *work) { cpufreq_unregister_notifier(&init_cpu_capacity_notifier, CPUFREQ_POLICY_NOTIFIER); + free_cpumask_var(cpus_to_visit); } #else diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index eb3af2739537..07532d83be0b 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf) this_leaf->ways_of_associativity = (size / nr_sets) / line_size; } +static bool cache_node_is_unified(struct cacheinfo *this_leaf) +{ + return of_property_read_bool(this_leaf->of_node, "cache-unified"); +} + static void cache_of_override_properties(unsigned int cpu) { int index; @@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu) for (index = 0; index < cache_leaves(cpu); index++) { this_leaf = this_cpu_ci->info_list + index; + /* + * init_cache_level must setup the cache level correctly + * overriding the architecturally specified levels, so + * if type is NONE at this stage, it should be unified + */ + if (this_leaf->type == CACHE_TYPE_NOCACHE && + cache_node_is_unified(this_leaf)) + this_leaf->type = CACHE_TYPE_UNIFIED; cache_size(this_leaf); cache_get_line_size(this_leaf); cache_nr_sets(this_leaf); diff --git a/drivers/base/core.c b/drivers/base/core.c index 12ebd055724c..c8501cdb95f4 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -313,6 +313,9 @@ static void __device_link_del(struct device_link *link) dev_info(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); + if (link->flags & DL_FLAG_PM_RUNTIME) + pm_runtime_drop_link(link->consumer); + list_del(&link->s_node); list_del(&link->c_node); device_link_free(link); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 321cd7b4d817..825964efda1d 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -501,10 +501,58 @@ static void __init cpu_dev_register_generic(void) #endif } +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES + +ssize_t __weak cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); +static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); +static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); + +static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, + &dev_attr_spectre_v1.attr, + &dev_attr_spectre_v2.attr, + NULL +}; + +static const struct attribute_group cpu_root_vulnerabilities_group = { + .name = "vulnerabilities", + .attrs = cpu_root_vulnerabilities_attrs, +}; + +static void __init cpu_register_vulnerabilities(void) +{ + if (sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpu_root_vulnerabilities_group)) + pr_err("Unable to register CPU vulnerabilities\n"); +} + +#else +static inline void cpu_register_vulnerabilities(void) { } +#endif + void __init cpu_dev_init(void) { if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) panic("Failed to register CPU subsystem"); cpu_dev_register_generic(); + cpu_register_vulnerabilities(); } diff --git a/drivers/base/isa.c b/drivers/base/isa.c index cd6ccdcf9df0..372d10af2600 100644 --- a/drivers/base/isa.c +++ b/drivers/base/isa.c @@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->probe) + if (isa_driver && isa_driver->probe) return isa_driver->probe(dev, to_isa_dev(dev)->id); return 0; @@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->remove) + if (isa_driver && isa_driver->remove) return isa_driver->remove(dev, to_isa_dev(dev)->id); return 0; @@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->shutdown) + if (isa_driver && isa_driver->shutdown) isa_driver->shutdown(dev, to_isa_dev(dev)->id); } @@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->suspend) + if (isa_driver && isa_driver->suspend) return isa_driver->suspend(dev, to_isa_dev(dev)->id, state); return 0; @@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->resume) + if (isa_driver && isa_driver->resume) return isa_driver->resume(dev, to_isa_dev(dev)->id); return 0; diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 29cd71d8b360..e1bb691cf8f1 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -2,7 +2,6 @@ obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o -obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e8ca5e2cf1e5..70f8904f46a3 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -921,7 +921,7 @@ static int pm_genpd_prepare(struct device *dev) genpd_unlock(genpd); ret = pm_generic_prepare(dev); - if (ret) { + if (ret < 0) { genpd_lock(genpd); genpd->prepared_count--; @@ -929,7 +929,8 @@ static int pm_genpd_prepare(struct device *dev) genpd_unlock(genpd); } - return ret; + /* Never return 1, as genpd don't cope with the direct_complete path. */ + return ret >= 0 ? 0 : ret; } /** diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 07c3c4a9522d..b2ed606265a8 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -9,7 +9,6 @@ #include #include #include -#include #ifdef CONFIG_PM /** @@ -298,26 +297,4 @@ void pm_generic_complete(struct device *dev) if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); } - -/** - * pm_complete_with_resume_check - Complete a device power transition. - * @dev: Device to handle. - * - * Complete a device power transition during a system-wide power transition and - * optionally schedule a runtime resume of the device if the system resume in - * progress has been initated by the platform firmware and the device had its - * power.direct_complete flag set. - */ -void pm_complete_with_resume_check(struct device *dev) -{ - pm_generic_complete(dev); - /* - * If the device had been runtime-suspended before the system went into - * the sleep state it is going out of and it has never been resumed till - * now, resume it in case the firmware powered it up. - */ - if (dev->power.direct_complete && pm_resume_via_firmware()) - pm_request_resume(dev); -} -EXPORT_SYMBOL_GPL(pm_complete_with_resume_check); #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 770b1539a083..d514291964a2 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "../base.h" #include "power.h" @@ -848,16 +849,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) goto Driver; } - if (dev->class) { - if (dev->class->pm) { - info = "class "; - callback = pm_op(dev->class->pm, state); - goto Driver; - } else if (dev->class->resume) { - info = "legacy class "; - callback = dev->class->resume; - goto End; - } + if (dev->class && dev->class->pm) { + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Driver; } if (dev->bus) { @@ -1455,6 +1450,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_callback_t callback = NULL; const char *info = NULL; int error = 0; + char suspend_abort[MAX_SUSPEND_ABORT_LEN]; DECLARE_DPM_WATCHDOG_ON_STACK(wd); TRACE_DEVICE(dev); @@ -1475,6 +1471,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { + pm_get_active_wakeup_sources(suspend_abort, + MAX_SUSPEND_ABORT_LEN); + log_suspend_abort_reason(suspend_abort); async_error = -EBUSY; goto Complete; } @@ -1508,17 +1507,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto Run; } - if (dev->class) { - if (dev->class->pm) { - info = "class "; - callback = pm_op(dev->class->pm, state); - goto Run; - } else if (dev->class->suspend) { - pm_dev_dbg(dev, state, "legacy class "); - error = legacy_suspend(dev, state, dev->class->suspend, - "legacy class "); - goto End; - } + if (dev->class && dev->class->pm) { + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Run; } if (dev->bus) { @@ -1862,8 +1854,7 @@ void device_pm_check_callbacks(struct device *dev) dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && - (!dev->class || (pm_ops_is_empty(dev->class->pm) && - !dev->class->suspend && !dev->class->resume)) && + (!dev->class || pm_ops_is_empty(dev->class->pm)) && (!dev->type || pm_ops_is_empty(dev->type->pm)) && (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..b2b1eece0db1 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -276,7 +276,8 @@ static int rpm_get_suppliers(struct device *dev) continue; retval = pm_runtime_get_sync(link->supplier); - if (retval < 0) { + /* Ignore suppliers with disabled runtime PM. */ + if (retval < 0 && retval != -EACCES) { pm_runtime_put_noidle(link->supplier); return retval; } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index cdd6f256da59..b932d7f75504 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "power.h" @@ -805,6 +806,37 @@ void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) } EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); +void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max) +{ + struct wakeup_source *ws, *last_active_ws = NULL; + int len = 0; + bool active = false; + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->active && len < max) { + if (!active) + len += scnprintf(pending_wakeup_source, max, + "Pending Wakeup Sources: "); + len += scnprintf(pending_wakeup_source + len, max - len, + "%s ", ws->name); + active = true; + } else if (!active && + (!last_active_ws || + ktime_to_ns(ws->last_time) > + ktime_to_ns(last_active_ws->last_time))) { + last_active_ws = ws; + } + } + if (!active && last_active_ws) { + scnprintf(pending_wakeup_source, max, + "Last active Wakeup Source: %s", + last_active_ws->name); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources); + void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; @@ -1018,7 +1050,7 @@ static int print_wakeup_source_stats(struct seq_file *m, active_time = 0; } - seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", + seq_printf(m, "%-32s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", ws->name, active_count, ws->event_count, ws->wakeup_count, ws->expire_count, ktime_to_ms(active_time), ktime_to_ms(total_time), @@ -1039,7 +1071,7 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused) struct wakeup_source *ws; int srcuidx; - seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" + seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 0368fd7b3a41..d8959dc06d83 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -37,3 +37,11 @@ config REGMAP_MMIO config REGMAP_IRQ bool + +config REGMAP_SDW + default n + tristate "Regmap support for soundwire" + depends on SDW + help + Enable this if regmap support is required for + soundwire slave devices. diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index 0d298c446108..9f37b3906c0b 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o obj-$(CONFIG_REGMAP_W1) += regmap-w1.o +obj-$(CONFIG_REGMAP_SDW) += regmap-sdw.o diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c new file mode 100644 index 000000000000..3eacebf26d5f --- /dev/null +++ b/drivers/base/regmap/regmap-sdw.c @@ -0,0 +1,252 @@ +/* + * regmap-sdw.c - Register map access API - SoundWire support + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ +#include +#include +#include + +#include "internal.h" + +#define SDW_SCP_ADDRPAGE1_MASK 0xFF +#define SDW_SCP_ADDRPAGE1_SHIFT 15 + +#define SDW_SCP_ADDRPAGE2_MASK 0xFF +#define SDW_SCP_ADDRPAGE2_SHIFT 22 + +#define SDW_REGADDR_SHIFT 0x0 +#define SDW_REGADDR_MASK 0xFFFF + +#define SDW_MAX_REG_ADDR 65536 + +static int regmap_sdw_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct sdw_slave *sdw = to_sdw_slave(dev); + struct sdw_msg xfer; + int ret, scp_addr1, scp_addr2; + int reg_command; + int reg_addr = *(u32 *)reg; + size_t t_val_size = 0, t_size; + int offset; + u8 *t_val; + + /* All registers are 4 byte on SoundWire bus */ + if (reg_size != 4) + return -ENOTSUPP; + + xfer.slave_addr = sdw->slv_number; + xfer.ssp_tag = 0; + xfer.flag = SDW_MSG_FLAG_READ; + xfer.len = 0; + t_val = val; + + offset = 0; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + if (val_size > SDW_MAX_REG_ADDR) + t_size = SDW_MAX_REG_ADDR - reg_command; + else + t_size = val_size; + while (t_val_size < val_size) { + + scp_addr1 = (reg_addr >> SDW_SCP_ADDRPAGE1_SHIFT) & + SDW_SCP_ADDRPAGE1_MASK; + scp_addr2 = (reg_addr >> SDW_SCP_ADDRPAGE2_SHIFT) & + SDW_SCP_ADDRPAGE2_MASK; + xfer.addr_page1 = scp_addr1; + xfer.addr_page2 = scp_addr2; + xfer.addr = reg_command; + xfer.len += t_size; + xfer.buf = &t_val[offset]; + ret = sdw_slave_transfer(sdw->mstr, &xfer, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + + t_val_size += t_size; + offset += t_size; + if (val_size - t_val_size > 65535) + t_size = 65535; + else + t_size = val_size - t_val_size; + reg_addr += t_size; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + } + return 0; +} + +static int regmap_sdw_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + struct device *dev = context; + struct sdw_slave *sdw = to_sdw_slave(dev); + struct sdw_msg xfer; + int ret, scp_addr1, scp_addr2; + int reg_command; + int reg_addr = *(u32 *)reg; + size_t t_val_size = 0, t_size; + int offset; + u8 *t_val; + + /* All registers are 4 byte on SoundWire bus */ + if (reg_size != 4) + return -ENOTSUPP; + + if (!sdw) + return 0; + + xfer.slave_addr = sdw->slv_number; + xfer.ssp_tag = 0; + xfer.flag = SDW_MSG_FLAG_WRITE; + xfer.len = 0; + t_val = (u8 *)val; + + offset = 0; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + if (val_size > SDW_MAX_REG_ADDR) + t_size = SDW_MAX_REG_ADDR - reg_command; + else + t_size = val_size; + while (t_val_size < val_size) { + + scp_addr1 = (reg_addr >> SDW_SCP_ADDRPAGE1_SHIFT) & + SDW_SCP_ADDRPAGE1_MASK; + scp_addr2 = (reg_addr >> SDW_SCP_ADDRPAGE2_SHIFT) & + SDW_SCP_ADDRPAGE2_MASK; + xfer.addr_page1 = scp_addr1; + xfer.addr_page2 = scp_addr2; + xfer.addr = reg_command; + xfer.len += t_size; + xfer.buf = &t_val[offset]; + ret = sdw_slave_transfer(sdw->mstr, &xfer, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + + t_val_size += t_size; + offset += t_size; + if (val_size - t_val_size > 65535) + t_size = 65535; + else + t_size = val_size - t_val_size; + reg_addr += t_size; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + } + return 0; +} + +static inline void regmap_sdw_count_check(size_t count, u32 offset) +{ + BUG_ON(count <= offset); +} + +static int regmap_sdw_write(void *context, const void *data, size_t count) +{ + /* 4-byte register address for the soundwire */ + unsigned int offset = 4; + + regmap_sdw_count_check(count, offset); + return regmap_sdw_gather_write(context, data, 4, + data + offset, count - offset); +} + +static struct regmap_bus regmap_sdw = { + .write = regmap_sdw_write, + .gather_write = regmap_sdw_gather_write, + .read = regmap_sdw_read, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static int regmap_sdw_config_check(const struct regmap_config *config) +{ + /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */ + if (config->val_bits != 8) + return -ENOTSUPP; + /* Registers are 32 bit in size, based on SCP_ADDR1 and SCP_ADDR2 + * implementation address range may vary in slave. + */ + if (config->reg_bits != 32) + return -ENOTSUPP; + /* SoundWire register address are contiguous. */ + if (config->reg_stride != 0) + return -ENOTSUPP; + if (config->pad_bits != 0) + return -ENOTSUPP; + + + return 0; +} + +/** + * regmap_init_sdw(): Initialise register map + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +struct regmap *regmap_init_sdw(struct sdw_slave *sdw, + const struct regmap_config *config) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return regmap_init(&sdw->dev, ®map_sdw, &sdw->dev, config); +} +EXPORT_SYMBOL_GPL(regmap_init_sdw); + + +/** + * devm_regmap_init_sdw(): Initialise managed register map + * + * @sdw Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_sdw(struct sdw_slave *sdw, + const struct regmap_config *config) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return devm_regmap_init(&sdw->dev, ®map_sdw, &sdw->dev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_sdw); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index 8d98a329f6ea..96c34a95cc62 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c @@ -11,6 +11,7 @@ #include #include #include +#include static LIST_HEAD(syscore_ops_list); static DEFINE_MUTEX(syscore_ops_lock); @@ -75,6 +76,8 @@ int syscore_suspend(void) return 0; err_out: + log_suspend_abort_reason("System core suspend callback %pF failed", + ops->suspend); pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend); list_for_each_entry_continue(ops, &syscore_ops_list, node) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 85de67334695..754852156622 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) struct iov_iter i; ssize_t bw; - iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); + iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); file_start_write(file); bw = vfs_iter_write(file, &i, ppos, 0); @@ -1098,11 +1098,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (info->lo_encrypt_type) { unsigned int type = info->lo_encrypt_type; - if (type >= MAX_LO_CRYPT) - return -EINVAL; + if (type >= MAX_LO_CRYPT) { + err = -EINVAL; + goto exit; + } xfer = xfer_funcs[type]; - if (xfer == NULL) - return -EINVAL; + if (xfer == NULL) { + err = -EINVAL; + goto exit; + } } else xfer = NULL; @@ -1576,9 +1580,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode) return err; } -static void lo_release(struct gendisk *disk, fmode_t mode) +static void __lo_release(struct loop_device *lo) { - struct loop_device *lo = disk->private_data; int err; if (atomic_dec_return(&lo->lo_refcnt)) @@ -1605,6 +1608,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode) mutex_unlock(&lo->lo_ctl_mutex); } +static void lo_release(struct gendisk *disk, fmode_t mode) +{ + mutex_lock(&loop_index_mutex); + __lo_release(disk->private_data); + mutex_unlock(&loop_index_mutex); +} + static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, .open = lo_open, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9adfb5445f8d..5f2a4240a204 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -288,15 +288,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, cmd->status = BLK_STS_TIMEOUT; return BLK_EH_HANDLED; } - - /* If we are waiting on our dead timer then we could get timeout - * callbacks for our request. For this we just want to reset the timer - * and let the queue side take care of everything. - */ - if (!completion_done(&cmd->send_complete)) { - nbd_config_put(nbd); - return BLK_EH_RESET_TIMER; - } config = nbd->config; if (config->num_connections > 1) { @@ -723,9 +714,9 @@ static int wait_for_reconnect(struct nbd_device *nbd) return 0; if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) return 0; - wait_event_interruptible_timeout(config->conn_wait, - atomic_read(&config->live_connections), - config->dead_conn_timeout); + wait_event_timeout(config->conn_wait, + atomic_read(&config->live_connections), + config->dead_conn_timeout); return atomic_read(&config->live_connections); } @@ -740,6 +731,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) if (!refcount_inc_not_zero(&nbd->config_refs)) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Socks array is empty\n"); + blk_mq_start_request(req); return -EINVAL; } config = nbd->config; @@ -748,6 +740,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) dev_err_ratelimited(disk_to_dev(nbd->disk), "Attempted send on invalid socket\n"); nbd_config_put(nbd); + blk_mq_start_request(req); return -EINVAL; } cmd->status = BLK_STS_OK; @@ -771,6 +764,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) */ sock_shutdown(nbd); nbd_config_put(nbd); + blk_mq_start_request(req); return -EIO; } goto again; @@ -781,6 +775,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) * here so that it gets put _after_ the request that is already on the * dispatch list. */ + blk_mq_start_request(req); if (unlikely(nsock->pending && nsock->pending != req)) { blk_mq_requeue_request(req, true); ret = 0; @@ -793,10 +788,10 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) ret = nbd_send_cmd(nbd, cmd, index); if (ret == -EAGAIN) { dev_err_ratelimited(disk_to_dev(nbd->disk), - "Request send failed trying another connection\n"); + "Request send failed, requeueing\n"); nbd_mark_nsock_dead(nbd, nsock, 1); - mutex_unlock(&nsock->tx_lock); - goto again; + blk_mq_requeue_request(req, true); + ret = 0; } out: mutex_unlock(&nsock->tx_lock); @@ -820,7 +815,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, * done sending everything over the wire. */ init_completion(&cmd->send_complete); - blk_mq_start_request(bd->rq); /* We can be called directly from the user space process, which means we * could possibly have signals pending so our sendmsg will fail. In diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8042c26ea9e6..69dfa1d3f453 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -467,7 +467,6 @@ static void nullb_device_release(struct config_item *item) { struct nullb_device *dev = to_nullb_device(item); - badblocks_exit(&dev->badblocks); null_free_device_storage(dev, false); null_free_dev(dev); } @@ -578,6 +577,10 @@ static struct nullb_device *null_alloc_dev(void) static void null_free_dev(struct nullb_device *dev) { + if (!dev) + return; + + badblocks_exit(&dev->badblocks); kfree(dev); } @@ -1985,8 +1988,10 @@ static int __init null_init(void) for (i = 0; i < nr_devices; i++) { dev = null_alloc_dev(); - if (!dev) + if (!dev) { + ret = -ENOMEM; goto err_dev; + } ret = null_add_dev(dev); if (ret) { null_free_dev(dev); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 67974796c350..531a0915066b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) bdev = bdget(dev); if (!bdev) return -ENOMEM; + ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); + if (ret) + return ret; if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); - bdput(bdev); + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); return -EINVAL; } - ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); - if (ret) - return ret; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); @@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) pd->pkt_dev = MKDEV(pktdev_major, idx); ret = pkt_new_dev(pd, dev); if (ret) - goto out_new_dev; + goto out_mem2; /* inherit events of the host device */ disk->events = pd->bdev->bd_disk->events; @@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) mutex_unlock(&ctl_mutex); return 0; -out_new_dev: - blk_cleanup_queue(disk->queue); out_mem2: put_disk(disk); out_mem: diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index adc877dfef5c..fe4fd8aee19f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -124,11 +124,13 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) #define RBD_FEATURE_DATA_POOL (1ULL<<7) +#define RBD_FEATURE_OPERATIONS (1ULL<<8) #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ RBD_FEATURE_STRIPINGV2 | \ RBD_FEATURE_EXCLUSIVE_LOCK | \ - RBD_FEATURE_DATA_POOL) + RBD_FEATURE_DATA_POOL | \ + RBD_FEATURE_OPERATIONS) /* Features supported by this (client software) implementation. */ @@ -3074,13 +3076,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf) mutex_unlock(&rbd_dev->watch_mutex); } +static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie) +{ + struct rbd_client_id cid = rbd_get_cid(rbd_dev); + + strcpy(rbd_dev->lock_cookie, cookie); + rbd_set_owner_cid(rbd_dev, &cid); + queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); +} + /* * lock_rwsem must be held for write */ static int rbd_lock(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - struct rbd_client_id cid = rbd_get_cid(rbd_dev); char cookie[32]; int ret; @@ -3095,9 +3105,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) return ret; rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; - strcpy(rbd_dev->lock_cookie, cookie); - rbd_set_owner_cid(rbd_dev, &cid); - queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); + __rbd_lock(rbd_dev, cookie); return 0; } @@ -3883,7 +3891,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev) queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); } else { - strcpy(rbd_dev->lock_cookie, cookie); + __rbd_lock(rbd_dev, cookie); } } @@ -4415,7 +4423,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); q->limits.max_sectors = queue_max_hw_sectors(q); - blk_queue_max_segments(q, segment_size / SECTOR_SIZE); + blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); blk_queue_io_opt(q, segment_size); diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 98a60db8e5d1..b33c8d6eb8c7 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -66,6 +66,7 @@ config BT_HCIBTSDIO config BT_HCIUART tristate "HCI UART driver" + depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS depends on TTY help Bluetooth HCI UART driver. @@ -80,7 +81,6 @@ config BT_HCIUART config BT_HCIUART_SERDEV bool depends on SERIAL_DEV_BUS && BT_HCIUART - depends on SERIAL_DEV_BUS=y || SERIAL_DEV_BUS=BT_HCIUART default y config BT_HCIUART_H4 diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index d00c4fdae924..093fd096f0c8 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -26,6 +26,7 @@ struct btqcomsmd { struct hci_dev *hdev; + bdaddr_t bdaddr; struct rpmsg_endpoint *acl_channel; struct rpmsg_endpoint *cmd_channel; }; @@ -85,7 +86,8 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb) break; } - kfree_skb(skb); + if (!ret) + kfree_skb(skb); return ret; } @@ -100,6 +102,38 @@ static int btqcomsmd_close(struct hci_dev *hdev) return 0; } +static int btqcomsmd_setup(struct hci_dev *hdev) +{ + struct btqcomsmd *btq = hci_get_drvdata(hdev); + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + /* Devices do not have persistent storage for BD address. If no + * BD address has been retrieved during probe, mark the device + * as having an invalid BD address. + */ + if (!bacmp(&btq->bdaddr, BDADDR_ANY)) { + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + return 0; + } + + /* When setting a configured BD address fails, mark the device + * as having an invalid BD address. + */ + err = qca_set_bdaddr_rome(hdev, &btq->bdaddr); + if (err) { + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + return 0; + } + + return 0; +} + static int btqcomsmd_probe(struct platform_device *pdev) { struct btqcomsmd *btq; @@ -135,6 +169,7 @@ static int btqcomsmd_probe(struct platform_device *pdev) hdev->open = btqcomsmd_open; hdev->close = btqcomsmd_close; hdev->send = btqcomsmd_send; + hdev->setup = btqcomsmd_setup; hdev->set_bdaddr = qca_set_bdaddr_rome; ret = hci_register_dev(hdev); diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index c8e945d19ffe..20142bc77554 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func, tuple = tuple->next; } + /* BCM43341 devices soldered onto the PCB (non-removable) use an + * uart connection for bluetooth, ignore the BT SDIO interface. + */ + if (func->vendor == SDIO_VENDOR_ID_BROADCOM && + func->device == SDIO_DEVICE_ID_BROADCOM_43341 && + !mmc_card_is_removable(func->card->host)) + return -ENODEV; + data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c old mode 100644 new mode 100755 index 7a5c06aaa181..c581549a0bcd --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -21,8 +21,10 @@ * */ +#include #include #include +#include #include #include #include @@ -233,7 +235,6 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, @@ -266,14 +267,17 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, /* QCA ROME chipset */ + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ @@ -379,6 +383,21 @@ static const struct usb_device_id blacklist_table[] = { { } /* Terminating entry */ }; +/* The Bluetooth USB module build into some devices needs to be reset on resume, + * this is a problem with the platform (likely shutting off all power) not with + * the module itself. So we use a DMI list to match known broken platforms. + */ +static const struct dmi_system_id btusb_needs_reset_resume_table[] = { + { + /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), + }, + }, + {} +}; + #define BTUSB_MAX_ISOC_FRAMES 10 #define BTUSB_INTR_RUNNING 0 @@ -391,9 +410,8 @@ static const struct usb_device_id blacklist_table[] = { #define BTUSB_FIRMWARE_LOADED 7 #define BTUSB_FIRMWARE_FAILED 8 #define BTUSB_BOOTING 9 -#define BTUSB_RESET_RESUME 10 -#define BTUSB_DIAG_RUNNING 11 -#define BTUSB_OOB_WAKE_ENABLED 12 +#define BTUSB_DIAG_RUNNING 10 +#define BTUSB_OOB_WAKE_ENABLED 11 struct btusb_data { struct hci_dev *hdev; @@ -1944,6 +1962,18 @@ static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb) } } } + else if (skb->len >= sizeof(struct hci_event_hdr)) { + struct hci_event_hdr *hdr; + + hdr = (struct hci_event_hdr *) skb->data; + + if (hdr->evt == HCI_EV_CMD_COMPLETE) { + *(__u8 *)(skb->data + 2) = 1; + } else if (hdr->evt == HCI_EV_CMD_STATUS) { + *(__u8 *)(skb->data + 3) = 1; + } + } + return hci_recv_frame(hdev, skb); } @@ -3012,6 +3042,9 @@ static int btusb_probe(struct usb_interface *intf, hdev->send = btusb_send_frame; hdev->notify = btusb_notify; + if (dmi_check_system(btusb_needs_reset_resume_table)) + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; + #ifdef CONFIG_PM err = btusb_config_oob_wake(hdev); if (err) @@ -3098,12 +3131,6 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; - - /* QCA Rome devices lose their updated firmware over suspend, - * but the USB hub doesn't notice any status change. - * Explicitly request a device reset on resume. - */ - set_bit(BTUSB_RESET_RESUME, &data->flags); } #ifdef CONFIG_BT_HCIBTUSB_RTL @@ -3114,7 +3141,7 @@ static int btusb_probe(struct usb_interface *intf, * but the USB hub doesn't notice any status change. * Explicitly request a device reset on resume. */ - set_bit(BTUSB_RESET_RESUME, &data->flags); + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; } #endif @@ -3279,14 +3306,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) enable_irq(data->oob_wake_irq); } - /* Optionally request a device reset on resume, but only when - * wakeups are disabled. If wakeups are enabled we assume the - * device will stay powered up throughout suspend. - */ - if (test_bit(BTUSB_RESET_RESUME, &data->flags) && - !device_may_wakeup(&data->udev->dev)) - data->udev->reset_resume = 1; - return 0; } diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index e2540113d0da..32527bdf4b50 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -68,7 +68,7 @@ struct bcm_device { u32 init_speed; u32 oper_speed; int irq; - u8 irq_polarity; + bool irq_active_low; #ifdef CONFIG_PM struct hci_uart *hu; @@ -213,7 +213,9 @@ static int bcm_request_irq(struct bcm_data *bcm) } err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake, - IRQF_TRIGGER_RISING, "host_wake", bdev); + bdev->irq_active_low ? IRQF_TRIGGER_FALLING : + IRQF_TRIGGER_RISING, + "host_wake", bdev); if (err) goto unlock; @@ -253,7 +255,7 @@ static int bcm_setup_sleep(struct hci_uart *hu) struct sk_buff *skb; struct bcm_set_sleep_mode sleep_params = default_sleep_params; - sleep_params.host_wake_active = !bcm->dev->irq_polarity; + sleep_params.host_wake_active = !bcm->dev->irq_active_low; skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params), &sleep_params, HCI_INIT_TIMEOUT); @@ -690,35 +692,14 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = { }; #ifdef CONFIG_ACPI -static u8 acpi_active_low = ACPI_ACTIVE_LOW; - /* IRQ polarity of some chipsets are not defined correctly in ACPI table. */ -static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { - { - .ident = "Asus T100TA", - .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, - "ASUSTeK COMPUTER INC."), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), - }, - .driver_data = &acpi_active_low, - }, - { - .ident = "Asus T100CHI", - .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, - "ASUSTeK COMPUTER INC."), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"), - }, - .driver_data = &acpi_active_low, - }, +static const struct dmi_system_id bcm_active_low_irq_dmi_table[] = { { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */ .ident = "Lenovo ThinkPad 8", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"), }, - .driver_data = &acpi_active_low, }, { } }; @@ -733,13 +714,15 @@ static int bcm_resource(struct acpi_resource *ares, void *data) switch (ares->type) { case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: irq = &ares->data.extended_irq; - dev->irq_polarity = irq->polarity; + if (irq->polarity != ACPI_ACTIVE_LOW) + dev_info(&dev->pdev->dev, "ACPI Interrupt resource is active-high, this is usually wrong, treating the IRQ as active-low\n"); + dev->irq_active_low = true; break; case ACPI_RESOURCE_TYPE_GPIO: gpio = &ares->data.gpio; if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) - dev->irq_polarity = gpio->polarity; + dev->irq_active_low = gpio->polarity == ACPI_ACTIVE_LOW; break; case ACPI_RESOURCE_TYPE_SERIAL_BUS: @@ -834,11 +817,11 @@ static int bcm_acpi_probe(struct bcm_device *dev) return ret; acpi_dev_free_resource_list(&resources); - dmi_id = dmi_first_match(bcm_wrong_irq_dmi_table); + dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table); if (dmi_id) { bt_dev_warn(dev, "%s: Overwriting IRQ polarity to active low", dmi_id->ident); - dev->irq_polarity = *(u8 *)dmi_id->driver_data; + dev->irq_active_low = true; } return 0; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index a746627e784e..6aef3bde10d7 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -298,6 +299,12 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) unsigned int set = 0; unsigned int clear = 0; + if (hu->serdev) { + serdev_device_set_flow_control(hu->serdev, !enable); + serdev_device_set_rts(hu->serdev, !enable); + return; + } + if (enable) { /* Disable hardware flow control */ ktermios = tty->termios; @@ -510,13 +517,13 @@ static void hci_uart_tty_close(struct tty_struct *tty) if (hdev) hci_uart_close(hdev); - cancel_work_sync(&hu->write_work); - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { write_lock_irqsave(&hu->proto_lock, flags); clear_bit(HCI_UART_PROTO_READY, &hu->flags); write_unlock_irqrestore(&hu->proto_lock, flags); + cancel_work_sync(&hu->write_work); + if (hdev) { if (test_bit(HCI_UART_REGISTERED, &hu->flags)) hci_unregister_dev(hdev); diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index ffb00669346f..d98d846ca4f9 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -60,6 +60,20 @@ struct hci_mrvl_pkt { } __packed; #define HCI_MRVL_PKT_SIZE 4 +static int get_cts(struct hci_uart *hu) +{ + struct tty_struct *tty = hu->tty; + u32 state = tty->ops->tiocmget(tty); + + if (state & TIOCM_CTS) { + BT_INFO("CTS is low"); + return 1; + } + BT_INFO("CTS is high"); + + return 0; +} + static int mrvl_open(struct hci_uart *hu) { struct mrvl_data *mrvl; @@ -345,6 +359,14 @@ static int mrvl_setup(struct hci_uart *hu) { int err; + if (get_cts(hu)) { + BT_INFO("fw is running"); + hci_uart_set_baudrate(hu, 3000000); + hci_uart_set_flow_control(hu, false); + + return 0; + } + hci_uart_set_flow_control(hu, true); err = mrvl_load_firmware(hu->hdev, "mrvl/helper_uart_3000000.bin"); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 392f412b4575..c9f0ac083a3e 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -933,6 +933,9 @@ static int qca_setup(struct hci_uart *hu) if (!ret) { set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); qca_debugfs_init(hdev); + } else if (ret == -ENOENT) { + /* No patch/nvm-config found, run with original fw/config */ + ret = 0; } /* Setup bdaddr */ diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 3e66f4cc1a59..886421f09dc0 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -184,4 +184,28 @@ config DA8XX_MSTPRI configuration. Allows to adjust the priorities of all master peripherals. +config DVC_TRACE_BUS + bool "DvC-Trace pseudobus" + default n + depends on USB_GADGET + help + DvC-Trace pseudobus is meant to group devices capable of sending + trace data via a USB DvC-Trace gadget function. + An usb function driver will be able to choose a source device and + provide the means to transfer the data. + + Say Y to enable it. + +config DVC_TRACE_BUS_DEBUG + bool "DvC-Trace pseudobus debug" + default n + depends on DVC_TRACE_BUS + help + DvC-Trace pseudobus is meant to group devices capable of sending + trace data via a USB DvC-Trace gadget function. + An usb function driver will be able to choose a source device and + provide the means to transfer the data. + + Say Y to enable extended debug messages in this driver. + endmenu diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 3ae96cffabd5..cf2051800214 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -25,3 +25,5 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o +obj-$(CONFIG_DVC_TRACE_BUS) += dvctrace.o +subdir-ccflags-$(CONFIG_DVC_TRACE_BUS_DEBUG) += -DDVCT_DEBUG diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 3c29d36702a8..5426c04fe24b 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev) raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); mutex_init(&cci_pmu->reserve_mutex); atomic_set(&cci_pmu->active_events, 0); - cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); + cpumask_set_cpu(get_cpu(), &cci_pmu->cpus); ret = cci_pmu_init(cci_pmu, pdev); - if (ret) + if (ret) { + put_cpu(); return ret; + } cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, &cci_pmu->node); + put_cpu(); pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; } diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index e8c6946fed9d..72fd1750134d 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1271,11 +1271,16 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id); name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL); + if (!name) { + err = -ENOMEM; + goto error_choose_name; + } snprintf(name, len + 1, "ccn_%d", ccn->dt.id); } /* Perf driver registration */ ccn->dt.pmu = (struct pmu) { + .module = THIS_MODULE, .attr_groups = arm_ccn_pmu_attr_groups, .task_ctx_nr = perf_invalid_context, .event_init = arm_ccn_pmu_event_init, @@ -1297,7 +1302,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) } /* Pick one CPU which we will use to collect data from CCN... */ - cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); + cpumask_set_cpu(get_cpu(), &ccn->dt.cpu); /* Also make sure that the overflow interrupt is handled by this CPU */ if (ccn->irq) { @@ -1314,10 +1319,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, &ccn->dt.node); + put_cpu(); return 0; error_pmu_register: error_set_affinity: + put_cpu(); +error_choose_name: ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); @@ -1580,8 +1588,8 @@ static int __init arm_ccn_init(void) static void __exit arm_ccn_exit(void) { - cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); platform_driver_unregister(&arm_ccn_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); } module_init(arm_ccn_init); diff --git a/drivers/bus/dvctrace.c b/drivers/bus/dvctrace.c new file mode 100644 index 000000000000..915c42593edb --- /dev/null +++ b/drivers/bus/dvctrace.c @@ -0,0 +1,737 @@ +/* + * DvC-Trace(dvct) Bus driver + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#include +#include +#include +#include + +#ifdef DVCT_DEBUG +#define DVCT_IN() pr_debug("in\n") +#else +#define DVCT_IN() do {} while (0) +#endif + +/* Count the number of USB descriptors in the given ascii hex string + * What we expect: + * ll tt ss xx xx xx + * | | | +- Fill up the descriptor + * | | +- Descriptor sub-type (1-4) + * | | DC_INPUT_CONNECTION 0x01 + * | | DC_OUTPUT_CONNECTION 0x02 + * | | DC_DEBUG_UNIT 0x03 + * | | DC_DEBUG_ATTRIBUTES 0x04 + * | +- Descriptor type (USB_DT_CS_INTERFACE) + * +- Descriptor length (check > 3 and we have the rest of it) + */ +static int count_descriptors(const char *buf, size_t size) +{ + size_t off = 0; + int i, j, count = 0; + u8 len, tmp; + + DVCT_IN(); + while (off < size) { + /*the length*/ + j = sscanf(buf + off, "%2hhx%n", &len, &i); + if (!j) + break; + if (j < 0 || len < 4) + return -EINVAL; + len--; + off += i; + + /*Type*/ + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0 || tmp != USB_DT_CS_INTERFACE) + return -EINVAL; + len--; + off += i; + + /*Sub Type*/ + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0 || tmp < DC_INPUT_CONNECTION + || tmp > DC_DEBUG_ATTRIBUTES) + return -EINVAL; + len--; + off += i; + + while (len) { + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0) + return -EINVAL; + len--; + off += i; + } + count++; + } + return count; +} + +/* Parse @buf and get a pointer to the descriptor identified + * @idx*/ +static u8 *get_descriptor(const char *buf, size_t size, int idx) +{ + size_t off = 0; + int i, j, k, count = 0; + u8 len, tmp, *ret = NULL; + + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%2hhx%n", &len, &i); + if (j < 0) + return ERR_PTR(-EINVAL); + if (!j) + return ERR_PTR(-ERANGE); + + if (count == idx) { + ret = kmalloc(len, GFP_KERNEL); + if (!ret) + return ERR_PTR(-ENOMEM); + ret[0] = len; + } + off += i; + for (k = 1; k < len; k++) { + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0) { + kfree(ret); + return ERR_PTR(-EINVAL); + } + if (count == idx) + ret[k] = tmp; + off += i; + } + if (count == idx) + break; + count++; + } + return ret; +} + + +static void free_strings(struct dvct_usb_descriptors *desc) +{ + struct usb_string *string; + + DVCT_IN(); + for (string = desc->str.strings; string && string->s; string++) + kfree(string->s); + + kfree(desc->str.strings); + desc->str.strings = NULL; + kfree(desc->lk_tbl); + desc->lk_tbl = NULL; +} + +static void free_descriptors(struct dvct_usb_descriptors *desc) +{ + struct usb_descriptor_header **hdr; + + DVCT_IN(); + if (desc->dvc_spec) { + for (hdr = desc->dvc_spec; *hdr; hdr++) + kfree(*hdr); + kfree(desc->dvc_spec); + desc->dvc_spec = NULL; + } + free_strings(desc); + kfree(desc); +} + +static int alloc_strings(struct dvct_usb_descriptors *desc, int count) +{ + DVCT_IN(); + desc->lk_tbl = kzalloc((count + 1) * sizeof(struct dvct_string_lookup), + GFP_KERNEL); + if (!desc->lk_tbl) + goto err; + + desc->str.strings = kzalloc((count + 1) * sizeof(*desc->str.strings), + GFP_KERNEL); + if (!desc->str.strings) + goto err_str; + + desc->str.language = 0x0409; + + return count; +err_str: + kfree(desc->lk_tbl); + desc->lk_tbl = NULL; +err: + return -ENOMEM; +} + +static struct dvct_usb_descriptors *alloc_descriptors(int count) +{ + struct dvct_usb_descriptors *desc; + + DVCT_IN(); + desc = kzalloc(sizeof(struct dvct_usb_descriptors), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + desc->dvc_spec = + kzalloc((count + 1) * sizeof(struct usb_descriptor_header *), + GFP_KERNEL); + + if (!desc->dvc_spec) { + kfree(desc); + return ERR_PTR(-ENOMEM); + } + return desc; +} + +static ssize_t descriptors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + struct usb_descriptor_header **desc; + int ret = 0; + + DVCT_IN(); + if (!ds_dev->desc || !ds_dev->desc->dvc_spec + || !*ds_dev->desc->dvc_spec) + return sprintf(buf, "No Descriptors.\n"); + + for (desc = ds_dev->desc->dvc_spec; *desc; desc++) { + u8 len, *pdesc; + int i; + + len = (*desc)->bLength; + + /* Check if it fits, total output is 3 * len */ + if ((ret + 3 * len) > PAGE_SIZE) { + dev_warn(dev, "Descriptors attribute page overrun\n"); + break; + } + + pdesc = (u8 *)(*desc); + for (i = 0; i < len; i++) + ret += snprintf(buf + ret, PAGE_SIZE - ret, "%02hhX ", + pdesc[i]); + buf[ret - 1] = '\n'; + } + return ret; +} + +static ssize_t descriptors_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int desc_count, i; + u8 *hdr; + + DVCT_IN(); + + if (ds_dev->instance_taken) + return -EBUSY; + + /*count the new descriptors, exit if invalid input*/ + desc_count = count_descriptors(buf, size); + if (desc_count <= 0) { + dev_warn(dev, "Invalid descriptor input:[%zu] %s", size, buf); + return -EINVAL; + } + + if (ds_dev->desc && ds_dev->desc != &ds_dev->static_desc) + free_descriptors(ds_dev->desc); + + ds_dev->desc = alloc_descriptors(desc_count); + if (IS_ERR_OR_NULL(ds_dev->desc)) { + ds_dev->desc = NULL; + return -ENOMEM; + } + + for (i = 0; i < desc_count; i++) { + hdr = get_descriptor(buf, size, i); + if (IS_ERR_OR_NULL(hdr)) { + dev_err(dev, "Cannot get descriptor %d, %ld\n", i, + PTR_ERR(hdr)); + free_descriptors(ds_dev->desc); + ds_dev->desc = NULL; + return -EINVAL; + } + ds_dev->desc->dvc_spec[i] = (struct usb_descriptor_header *)hdr; + } + return size; +} + +static DEVICE_ATTR_RW(descriptors); + + +/*find out at which member(offset) of which descriptor the pointer + * points to */ +static int dvctrace_string_ptr_to_offset(struct usb_descriptor_header **first, + u8 *ptr, int *desc_offset, int *offset) +{ + u8 *hdr_start, *hdr_end; + int idx = 0; + + DVCT_IN(); + for (; *first; first++, idx++) { + hdr_start = (u8 *) (*first); + hdr_end = hdr_start + ((*first)->bLength - 1); + if (ptr >= hdr_start && ptr <= hdr_end) { + *desc_offset = idx; + *offset = ptr - hdr_start; + return 0; + } + } + return -EINVAL; +} + +static u8 *dvctrace_offset_to_string_ptr(struct usb_descriptor_header **first, + int desc_offset, int offset) +{ + int idx = 0; + + DVCT_IN(); + for (; *first; first++, idx++) { + if (idx == desc_offset) { + if (offset >= (*first)->bLength) + return ERR_PTR(-ERANGE); + return ((u8 *) (*first)) + offset; + } + } + return ERR_PTR(-ERANGE); +} + +static int count_strings(const char *buf, size_t size) +{ + int count = 0; + size_t off = 0, slen; + int i = 0, j, desc_offset, offset; + + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%d.%d: %n", &desc_offset, &offset, &i); + if (j < 2) + break; + off += i; + slen = 0; + while (off + slen < size) { + if (buf[off + slen] == ';' || buf[off + slen] == '\n') + break; + slen++; + } + off += slen; + if (buf[off] == ';' || buf[off] == '\n') + off++; + count++; + } + return count; +} + +static char *get_string(const char *buf, size_t size, int index, + int *desc_offset, int *offset) +{ + int count = 0; + size_t off = 0, slen; + int i, j; + char *ret = ERR_PTR(-EINVAL); + + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%d.%d: %n", desc_offset, offset, &i); + if (j < 2) + return ERR_PTR(-EINVAL); + off += i; + slen = 0; + while (off + slen < size) { + if (buf[off + slen] == ';' || buf[off + slen] == '\n') + break; + slen++; + } + + if (count == index) { + ret = kmalloc(slen+1, GFP_KERNEL); + if (!ret) + return ERR_PTR(-ENOMEM); + memcpy(ret, buf + off, slen); + ret[slen] = 0; + return ret; + } + off += slen; + if (buf[off] == ';' || buf[off] == '\n') + off++; + count++; + } + return ERR_PTR(-EINVAL); +} + +static ssize_t strings_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dvct_string_lookup *lk_s; + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int ret = 0; + + DVCT_IN(); + if (!ds_dev->desc || !ds_dev->desc->dvc_spec + || !*ds_dev->desc->dvc_spec) + return sprintf(buf, "No Descriptors.\n"); + + if (!ds_dev->desc->lk_tbl) + return sprintf(buf, "No Strings.\n"); + + for (lk_s = ds_dev->desc->lk_tbl; lk_s->str && lk_s->id; lk_s++) { + int desc_offset, offset; + + /* + * Check if it fits, worst case is "Unknown(%p): %s\n" + * 8 + 16 + 3 + string length + 1 + */ + if ((ret + 28 + strlen(lk_s->str->s)) > PAGE_SIZE) { + dev_warn(dev, "Strings attribute page overrun\n"); + break; + } + + if (dvctrace_string_ptr_to_offset(ds_dev->desc->dvc_spec, + lk_s->id, &desc_offset, + &offset)) + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "Unknown(%p): %s\n", lk_s->id, + lk_s->str->s); + else + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "%d.%d: %s\n", desc_offset, offset, + lk_s->str->s); + } + return ret; +} + +static ssize_t strings_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int count, i, ret; + + DVCT_IN(); + if (ds_dev->instance_taken) + return -EBUSY; + + count = count_strings(buf, size); + if (count <= 0) { + dev_err(dev, "Invalid input string:(%zu) %s\n", size, buf); + return -EINVAL; + } + + if (ds_dev->desc == &ds_dev->static_desc) { + dev_warn(dev, "Cannot set strings in static descriptors\n"); + return -EINVAL; + } + + if (ds_dev->desc->str.strings) + free_strings(ds_dev->desc); + + ret = alloc_strings(ds_dev->desc, count); + if (ret < 0) { + dev_err(dev, "Cannot allocate strings %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + char *tmp; + int d_off, off; + u8 *pid; + + tmp = get_string(buf, size, i, &d_off, &off); + if (IS_ERR_OR_NULL(tmp)) { + free_strings(ds_dev->desc); + return -EINVAL; + } + + pid = dvctrace_offset_to_string_ptr(ds_dev->desc->dvc_spec, + d_off, off); + if (IS_ERR_OR_NULL(pid)) { + dev_warn(dev, "String out of bounds\n"); + free_strings(ds_dev->desc); + return -EINVAL; + } + + ds_dev->desc->lk_tbl[i].id = pid; + ds_dev->desc->lk_tbl[i].str = &ds_dev->desc->str.strings[i]; + ds_dev->desc->str.strings[i].s = tmp; + } + return size; +} + +static DEVICE_ATTR_RW(strings); + +static ssize_t protocol_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + DVCT_IN(); + return sprintf(buf, "%d\n", dev_to_dvct_source_device(dev)->protocol); +} + +static ssize_t protocol_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (ds_dev->instance_taken) + return -EBUSY; + + if (!kstrtou8(buf, 10, &ds_dev->protocol)) + return size; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(protocol); + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (ds_dev->instance_taken) { + if (ds_dev->function_taken) + return sprintf(buf, "In use\n"); + else + return sprintf(buf, "Reserved\n"); + } else { + return sprintf(buf, "Free\n"); + } +} + +static DEVICE_ATTR_RO(status); + +static struct attribute *dvct_source_attrs[] = { + &dev_attr_protocol.attr, + &dev_attr_status.attr, + &dev_attr_strings.attr, + &dev_attr_descriptors.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(dvct_source); + + +static int dvct_match(struct device *dev, struct device_driver *drv) +{ + const char *devname = dev_name(dev); + + DVCT_IN(); + if (strlen(devname) <= strlen(drv->name)) + return -1; + if (strncmp(devname, drv->name, strlen(drv->name))) + return -1; + return devname[strlen(drv->name)] == '-'; +}; + +static struct bus_type dvctrace_bus_type = { + .name = "dvctrace", + .match = dvct_match, + .dev_groups = dvct_source_groups, +}; + +static struct device dvctrace_bus = { + .init_name = "dvctrace-bus", +}; + +static int dvct_match_free(struct device *dev, void *data) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + return !ds_dev->instance_taken; +} + +struct dvct_source_device *dvct_source_find_by_name(const char *name) +{ + struct device *dev; + + DVCT_IN(); + dev = bus_find_device_by_name(&dvctrace_bus_type, NULL, name); + if (IS_ERR_OR_NULL(dev)) + return ERR_PTR(-ENODEV); + return dev_to_dvct_source_device(dev); +} +EXPORT_SYMBOL_GPL(dvct_source_find_by_name); + +struct dvct_source_device *dvct_source_find_free_by_name(const char *name) +{ + struct dvct_source_device *ds_dev = dvct_source_find_by_name(name); + + DVCT_IN(); + if (IS_ERR_OR_NULL(ds_dev)) + return ERR_PTR(-ENODEV); + + if (ds_dev->instance_taken) + return ERR_PTR(-EBUSY); + + return ds_dev; +} +EXPORT_SYMBOL_GPL(dvct_source_find_free_by_name); + +struct dvct_source_device *dvct_source_find_free(void) +{ + struct device *dev = bus_find_device(&dvctrace_bus_type, NULL, + NULL, dvct_match_free); + DVCT_IN(); + if (IS_ERR_OR_NULL(dev)) + return ERR_PTR(-ENODEV); + + return dev_to_dvct_source_device(dev); +} +EXPORT_SYMBOL_GPL(dvct_source_find_free); + +static int fn_count_free(struct device *dev, void *data) +{ + int *count = data; + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (!ds_dev->instance_taken) + (*count)++; + return 0; +} + +int dvct_source_count_free(void) +{ + int count = 0; + + DVCT_IN(); + bus_for_each_dev(&dvctrace_bus_type, NULL, &count, fn_count_free); + return count; +} +EXPORT_SYMBOL_GPL(dvct_source_count_free); + +struct dvct_source_driver +*dvct_source_get_drv(struct dvct_source_device *ds_dev) +{ + BUG_ON(ds_dev->device.driver == NULL); + return drv_to_dvct_source_driver(ds_dev->device.driver); +} +EXPORT_SYMBOL_GPL(dvct_source_get_drv); + +int dvct_source_device_add(struct dvct_source_device *ds_dev, + struct dvct_source_driver *ds_drv) +{ + int ret; + + DVCT_IN(); + if (!ds_dev) + return -ENODEV; + if (!ds_drv) + return -EINVAL; + + spin_lock_init(&ds_dev->lock); + spin_lock(&ds_dev->lock); + ds_dev->instance_taken = 0; + ds_dev->function_taken = 0; + spin_unlock(&ds_dev->lock); + + device_initialize(&ds_dev->device); + ds_dev->device.bus = &dvctrace_bus_type; + + if (!ds_dev->device.parent) + ds_dev->device.parent = &dvctrace_bus; + + dev_set_name(&ds_dev->device, "%s-%s", ds_drv->driver.name, + ds_dev->name_add); + + ret = device_add(&ds_dev->device); + if (ret) { + dev_err(&dvctrace_bus, "Cannot add device %s %d\n", + ds_dev->name_add, ret); + return ret; + } + + if (ds_dev->static_desc.dvc_spec) + ds_dev->desc = &ds_dev->static_desc; + + dev_notice(&dvctrace_bus, "Adding device %s\n", ds_dev->name_add); + return 0; +}; +EXPORT_SYMBOL_GPL(dvct_source_device_add); + +void dvct_source_device_del(struct dvct_source_device *ds_dev) +{ + DVCT_IN(); + + if (ds_dev->desc && ds_dev->desc != &ds_dev->static_desc) { + free_descriptors(ds_dev->desc); + ds_dev->desc = NULL; + } + + device_del(&ds_dev->device); +}; +EXPORT_SYMBOL_GPL(dvct_source_device_del); + +int __dvct_source_driver_register(struct dvct_source_driver *ds_drv, + struct module *owner) +{ + DVCT_IN(); + if (!ds_drv->activate || + !ds_drv->binded || + !ds_drv->start_transfer || + !ds_drv->stop_transfer || + !ds_drv->unbinded || + !ds_drv->deactivate) + return -EINVAL; + + ds_drv->driver.owner = owner; + ds_drv->driver.bus = &dvctrace_bus_type; + return driver_register(&ds_drv->driver); +} +EXPORT_SYMBOL_GPL(__dvct_source_driver_register); + +void dvct_source_driver_unregister(struct dvct_source_driver *ds_drv) +{ + DVCT_IN(); + driver_unregister(&ds_drv->driver); +} +EXPORT_SYMBOL_GPL(dvct_source_driver_unregister); + +static int __init dtb_init(void) +{ + int ret; + + DVCT_IN(); + ret = device_register(&dvctrace_bus); + if (ret) { + pr_err("Cannot register bus device %d\n", ret); + return ret; + } + + ret = bus_register(&dvctrace_bus_type); + if (ret) { + pr_err("Cannot register bus %d\n", ret); + return ret; + } + return 0; +} + +static void __exit dtb_exit(void) +{ + DVCT_IN(); + bus_unregister(&dvctrace_bus_type); +} + +subsys_initcall(dtb_init); +module_exit(dtb_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DvC-Trace bus implementation"); +MODULE_AUTHOR("Traian Schiau "); diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 328ca93781cf..1b76d9585902 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = { .match = sunxi_rsb_device_match, .probe = sunxi_rsb_device_probe, .remove = sunxi_rsb_device_remove, + .uevent = of_device_uevent_modalias, }; static void sunxi_rsb_dev_release(struct device *dev) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index c28dca0c613d..0d85d55fcbc6 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -588,5 +588,7 @@ config TILE_SROM source "drivers/char/xillybus/Kconfig" +source "drivers/char/rpmb/Kconfig" + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7dc3abe66464..522e71cf6b04 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -60,3 +60,5 @@ js-rtc-y = rtc.o obj-$(CONFIG_TILE_SROM) += tile-srom.o obj-$(CONFIG_XILLYBUS) += xillybus/ obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o +obj-$(CONFIG_RPMB) += rpmb/ +obj-$(CONFIG_ACRN_VHM) += vhm/ diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9b6b6023193b..dde7caac7f9f 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -872,6 +872,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, } } wmb(); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gtt_insert_sg_entries); diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index d1f5bb534e0e..6e9df558325b 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng) /* Enable secondary noise source on CPUs where it is present. */ /* Nehemiah stepping 8 and higher */ - if ((c->x86_model == 9) && (c->x86_mask > 7)) + if ((c->x86_model == 9) && (c->x86_stepping > 7)) lo |= VIA_NOISESRC2; /* Esther */ diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index 2059f79d669a..c3a23ec3e76f 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -81,7 +81,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, pr_err("ipmi:dmi: Error allocation IPMI platform device"); return; } - pdev->driver_override = override; + pdev->driver_override = kasprintf(GFP_KERNEL, "%s", + override); + if (!pdev->driver_override) + goto err; if (type == IPMI_DMI_TYPE_SSIF) goto add_properties; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 810b138f5897..c82d9fd2f05a 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -4030,7 +4030,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, } static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, - struct list_head *timeouts, long timeout_period, + struct list_head *timeouts, + unsigned long timeout_period, int slot, unsigned long *flags, unsigned int *waiting_msgs) { @@ -4043,8 +4044,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, if (!ent->inuse) return; - ent->timeout -= timeout_period; - if (ent->timeout > 0) { + if (timeout_period < ent->timeout) { + ent->timeout -= timeout_period; (*waiting_msgs)++; return; } @@ -4110,7 +4111,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, } } -static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, + unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 36f47e8d06a3..c04aa11f0e21 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -242,6 +242,9 @@ struct smi_info { /* The timer for this si. */ struct timer_list si_timer; + /* This flag is set, if the timer can be set */ + bool timer_can_start; + /* This flag is set, if the timer is running (timer_pending() isn't enough) */ bool timer_running; @@ -417,6 +420,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) { + if (!smi_info->timer_can_start) + return; smi_info->last_timeout_jiffies = jiffies; mod_timer(&smi_info->si_timer, new_val); smi_info->timer_running = true; @@ -436,21 +441,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); } -static void start_check_enables(struct smi_info *smi_info, bool start_timer) +static void start_check_enables(struct smi_info *smi_info) { unsigned char msg[2]; msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; - if (start_timer) - start_new_msg(smi_info, msg, 2); - else - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + start_new_msg(smi_info, msg, 2); smi_info->si_state = SI_CHECKING_ENABLES; } -static void start_clear_flags(struct smi_info *smi_info, bool start_timer) +static void start_clear_flags(struct smi_info *smi_info) { unsigned char msg[3]; @@ -459,10 +461,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer) msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; - if (start_timer) - start_new_msg(smi_info, msg, 3); - else - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + start_new_msg(smi_info, msg, 3); smi_info->si_state = SI_CLEARING_FLAGS; } @@ -497,11 +496,11 @@ static void start_getting_events(struct smi_info *smi_info) * Note that we cannot just use disable_irq(), since the interrupt may * be shared. */ -static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) +static inline bool disable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = true; - start_check_enables(smi_info, start_timer); + start_check_enables(smi_info); return true; } return false; @@ -511,7 +510,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = false; - start_check_enables(smi_info, true); + start_check_enables(smi_info); return true; } return false; @@ -529,7 +528,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) msg = ipmi_alloc_smi_msg(); if (!msg) { - if (!disable_si_irq(smi_info, true)) + if (!disable_si_irq(smi_info)) smi_info->si_state = SI_NORMAL; } else if (enable_si_irq(smi_info)) { ipmi_free_smi_msg(msg); @@ -545,7 +544,7 @@ static void handle_flags(struct smi_info *smi_info) /* Watchdog pre-timeout */ smi_inc_stat(smi_info, watchdog_pretimeouts); - start_clear_flags(smi_info, true); + start_clear_flags(smi_info); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; if (smi_info->intf) ipmi_smi_watchdog_pretimeout(smi_info->intf); @@ -928,7 +927,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, * disable and messages disabled. */ if (smi_info->supports_event_msg_buff || smi_info->irq) { - start_check_enables(smi_info, true); + start_check_enables(smi_info); } else { smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) @@ -1235,6 +1234,7 @@ static int smi_start_processing(void *send_info, /* Set up the timer that drives the interface. */ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); + new_smi->timer_can_start = true; smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); /* Try to claim any interrupts. */ @@ -3416,15 +3416,17 @@ static void check_for_broken_irqs(struct smi_info *smi_info) check_set_rcv_irq(smi_info); } -static inline void wait_for_timer_and_thread(struct smi_info *smi_info) +static inline void stop_timer_and_thread(struct smi_info *smi_info) { if (smi_info->thread != NULL) kthread_stop(smi_info->thread); + + smi_info->timer_can_start = false; if (smi_info->timer_running) del_timer_sync(&smi_info->si_timer); } -static int is_new_interface(struct smi_info *info) +static struct smi_info *find_dup_si(struct smi_info *info) { struct smi_info *e; @@ -3439,24 +3441,36 @@ static int is_new_interface(struct smi_info *info) */ if (info->slave_addr && !e->slave_addr) e->slave_addr = info->slave_addr; - return 0; + return e; } } - return 1; + return NULL; } static int add_smi(struct smi_info *new_smi) { int rv = 0; + struct smi_info *dup; mutex_lock(&smi_infos_lock); - if (!is_new_interface(new_smi)) { - pr_info(PFX "%s-specified %s state machine: duplicate\n", - ipmi_addr_src_to_str(new_smi->addr_source), - si_to_str[new_smi->si_type]); - rv = -EBUSY; - goto out_err; + dup = find_dup_si(new_smi); + if (dup) { + if (new_smi->addr_source == SI_ACPI && + dup->addr_source == SI_SMBIOS) { + /* We prefer ACPI over SMBIOS. */ + dev_info(dup->dev, + "Removing SMBIOS-specified %s state machine in favor of ACPI\n", + si_to_str[new_smi->si_type]); + cleanup_one_si(dup); + } else { + dev_info(new_smi->dev, + "%s-specified %s state machine: duplicate\n", + ipmi_addr_src_to_str(new_smi->addr_source), + si_to_str[new_smi->si_type]); + rv = -EBUSY; + goto out_err; + } } pr_info(PFX "Adding %s-specified %s state machine\n", @@ -3593,7 +3607,7 @@ static int try_smi_init(struct smi_info *new_smi) * Start clearing the flags before we enable interrupts or the * timer to avoid racing with the timer. */ - start_clear_flags(new_smi, false); + start_clear_flags(new_smi); /* * IRQ is defined to be set when non-zero. req_events will @@ -3662,7 +3676,7 @@ static int try_smi_init(struct smi_info *new_smi) return 0; out_err_stop_timer: - wait_for_timer_and_thread(new_smi); + stop_timer_and_thread(new_smi); out_err: new_smi->interrupt_disabled = true; @@ -3854,7 +3868,7 @@ static void cleanup_one_si(struct smi_info *to_clean) */ if (to_clean->irq_cleanup) to_clean->irq_cleanup(to_clean); - wait_for_timer_and_thread(to_clean); + stop_timer_and_thread(to_clean); /* * Timeouts are stopped, now make sure the interrupts are off @@ -3865,7 +3879,8 @@ static void cleanup_one_si(struct smi_info *to_clean) poll(to_clean); schedule_timeout_uninterruptible(1); } - disable_si_irq(to_clean, false); + if (to_clean->handlers) + disable_si_irq(to_clean); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { poll(to_clean); schedule_timeout_uninterruptible(1); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 970e1242a282..f11224a5dc5c 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -107,6 +107,8 @@ static ssize_t read_mem(struct file *file, char __user *buf, phys_addr_t p = *ppos; ssize_t read, sz; void *ptr; + char *bounce; + int err; if (p != *ppos) return 0; @@ -129,15 +131,22 @@ static ssize_t read_mem(struct file *file, char __user *buf, } #endif + bounce = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!bounce) + return -ENOMEM; + while (count > 0) { unsigned long remaining; - int allowed; + int allowed, probe; sz = size_inside_page(p, count); + err = -EPERM; allowed = page_is_allowed(p >> PAGE_SHIFT); if (!allowed) - return -EPERM; + goto failed; + + err = -EFAULT; if (allowed == 2) { /* Show zeros for restricted memory. */ remaining = clear_user(buf, sz); @@ -149,24 +158,32 @@ static ssize_t read_mem(struct file *file, char __user *buf, */ ptr = xlate_dev_mem_ptr(p); if (!ptr) - return -EFAULT; - - remaining = copy_to_user(buf, ptr, sz); + goto failed; + probe = probe_kernel_read(bounce, ptr, sz); unxlate_dev_mem_ptr(p, ptr); + if (probe) + goto failed; + + remaining = copy_to_user(buf, bounce, sz); } if (remaining) - return -EFAULT; + goto failed; buf += sz; p += sz; count -= sz; read += sz; } + kfree(bounce); *ppos += read; return read; + +failed: + kfree(bounce); + return err; } static ssize_t write_mem(struct file *file, const char __user *buf, diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ad92707e45f..ea0115cf5fc0 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -259,7 +259,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig new file mode 100644 index 000000000000..084c286d95e0 --- /dev/null +++ b/drivers/char/rpmb/Kconfig @@ -0,0 +1,31 @@ +config RPMB + tristate "RPMB partition interface" + help + Unified RPMB partition interface for eMMC and UFS. + Provides interface for in kernel security controllers to + access RPMB partition. + + If unsure, select N. + +config RPMB_INTF_DEV + bool "RPMB character device interface /dev/rpmbN" + depends on RPMB + help + Say yes here if you want to access RPMB from user space + via character device interface /dev/rpmb%d + +config RPMB_SIM + tristate "RPMB partition device simulator" + default n + select RPMB + select CRYPTO_SHA256 + select CRYPTO_HMAC + help + RPMB partition simulation device is a virtual device that + provides simulation of the RPMB protocol and use kernel memory + as storage. + + Be aware it doesn't promise any real security. This driver is + suitable only for testing of the RPMB subsystem or RPMB applications + prior to RPMB key provisioning. + Most people should say N here. diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile new file mode 100644 index 000000000000..81f924fd9a87 --- /dev/null +++ b/drivers/char/rpmb/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_RPMB) += rpmb.o +rpmb-objs += core.o +rpmb-$(CONFIG_RPMB_INTF_DEV) += cdev.o +obj-$(CONFIG_RPMB_SIM) += rpmb_sim.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/char/rpmb/cdev.c b/drivers/char/rpmb/cdev.c new file mode 100644 index 000000000000..6874172315cf --- /dev/null +++ b/drivers/char/rpmb/cdev.c @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2015-2016 Intel Corp. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include + +#include "rpmb-cdev.h" + +static dev_t rpmb_devt; +#define RPMB_MAX_DEVS MINORMASK + +#define RPMB_DEV_OPEN 0 /** single open bit (position) */ +/* from MMC_IOC_MAX_CMDS */ +#define RPMB_MAX_FRAMES 255 + +/** + * rpmb_open - the open function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_open(struct inode *inode, struct file *fp) +{ + struct rpmb_dev *rdev; + + rdev = container_of(inode->i_cdev, struct rpmb_dev, cdev); + if (!rdev) + return -ENODEV; + + /* the rpmb is single open! */ + if (test_and_set_bit(RPMB_DEV_OPEN, &rdev->status)) + return -EBUSY; + + mutex_lock(&rdev->lock); + + fp->private_data = rdev; + + mutex_unlock(&rdev->lock); + + return nonseekable_open(inode, fp); +} + +/** + * rpmb_open - the open function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_release(struct inode *inode, struct file *fp) +{ + struct rpmb_dev *rdev = fp->private_data; + + clear_bit(RPMB_DEV_OPEN, &rdev->status); + + return 0; +} + +/** + * rpmb_cmd_copy_from_user - copy rpmb command from the user space + * + * @cmd: internal cmd structure + * @ucmd: user space cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_cmd_copy_from_user(struct rpmb_cmd *cmd, + struct rpmb_ioc_cmd __user *ucmd) +{ + size_t sz; + struct rpmb_frame *frames; + u64 frames_ptr; + + if (get_user(cmd->flags, &ucmd->flags)) + return -EFAULT; + + if (get_user(cmd->nframes, &ucmd->nframes)) + return -EFAULT; + + if (cmd->nframes > RPMB_MAX_FRAMES) + return -EOVERFLOW; + + if (!cmd->nframes) + return -EINVAL; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + sz = cmd->nframes * sizeof(struct rpmb_frame); + frames = memdup_user(u64_to_user_ptr(frames_ptr), sz); + if (IS_ERR(frames)) + return PTR_ERR(frames); + + cmd->frames = frames; + return 0; +} + +/** + * rpmb_cmd_copy_to_user - copy rpmb command the the user space + * + * @ucmd: user space cmd structure + * @cmd: internal cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_cmd_copy_to_user(struct rpmb_ioc_cmd __user *ucmd, + struct rpmb_cmd *cmd) +{ + size_t sz; + u64 frames_ptr; + + sz = cmd->nframes * sizeof(struct rpmb_frame); + + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + /* some archs have issues with 64bit get_user */ + if (copy_to_user(u64_to_user_ptr(frames_ptr), cmd->frames, sz)) + return -EFAULT; + + return 0; +} + +/** + * rpmb_ioctl_seq_cmd - issue an rpmb command sequence + * + * @rdev: rpmb device + * @ptr: rpmb cmd sequence + * + * RPMB_IOC_SEQ_CMD handler + * + * Return: 0 on success, <0 on error + */ +static long rpmb_ioctl_seq_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_seq_cmd __user *ptr) +{ + __u64 ncmds; + struct rpmb_cmd *cmds; + struct rpmb_ioc_cmd __user *ucmds; + + int i; + int ret; + + /* The caller must have CAP_SYS_RAWIO, like mmc ioctl */ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&ncmds, &ptr->num_of_cmds, sizeof(ncmds))) + return -EFAULT; + + if (ncmds > 3) { + dev_err(&rdev->dev, "supporting up to 3 packets (%llu)\n", + ncmds); + return -EINVAL; + } + + cmds = kcalloc(ncmds, sizeof(*cmds), GFP_KERNEL); + if (!cmds) + return -ENOMEM; + + ucmds = (struct rpmb_ioc_cmd __user *)ptr->cmds; + for (i = 0; i < ncmds; i++) { + ret = rpmb_cmd_copy_from_user(&cmds[i], &ucmds[i]); + if (ret) + goto out; + } + + ret = rpmb_cmd_seq(rdev, cmds, ncmds); + if (ret) + goto out; + + for (i = 0; i < ncmds; i++) { + ret = rpmb_cmd_copy_to_user(&ucmds[i], &cmds[i]); + if (ret) + goto out; + } +out: + for (i = 0; i < ncmds; i++) + kfree(cmds[i].frames); + kfree(cmds); + return ret; +} + +/** + * rpmb_ioctl_req_cmd - issue an rpmb request command + * + * @rdev: rpmb device + * @ptr: rpmb request command + * + * RPMB_IOC_REQ_CMD handler + * + * Return: 0 on success; < 0 on error + */ +static long rpmb_ioctl_req_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_req_cmd __user *ptr) +{ + struct rpmb_data rpmbd; + u64 req_type; + int ret; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&req_type, &ptr->req_type, sizeof(req_type))) + return -EFAULT; + + if (req_type >= U16_MAX) + return -EINVAL; + + memset(&rpmbd, 0, sizeof(rpmbd)); + + rpmbd.req_type = req_type & 0xFFFF; + + ret = rpmb_cmd_copy_from_user(&rpmbd.icmd, &ptr->icmd); + if (ret) + goto out; + + ret = rpmb_cmd_copy_from_user(&rpmbd.ocmd, &ptr->ocmd); + if (ret) + goto out; + + ret = rpmb_cmd_req(rdev, &rpmbd); + if (ret) + goto out; + + ret = rpmb_cmd_copy_to_user(&ptr->ocmd, &rpmbd.ocmd); + +out: + kfree(rpmbd.icmd.frames); + kfree(rpmbd.ocmd.frames); + return ret; +} + +/** + * rpmb_ioctl - rpmb ioctl dispatcher + * + * @fp: a file pointer + * @cmd: ioctl command RPMB_IOC_REQ_CMD or RPMB_IOC_SEQ_CMD + * @arg: ioctl data: rpmb_ioc_req_cmd or rpmb_ioc_seq_cmd + * + * Return: 0 on success; < 0 on error + */ +static long rpmb_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + struct rpmb_dev *rdev = fp->private_data; + void __user *ptr = (void __user *)arg; + + switch (cmd) { + case RPMB_IOC_REQ_CMD: + return rpmb_ioctl_req_cmd(rdev, ptr); + case RPMB_IOC_SEQ_CMD: + return rpmb_ioctl_seq_cmd(rdev, ptr); + default: + dev_err(&rdev->dev, "unsupported ioctl 0x%x.\n", cmd); + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMPAT +static long rpmb_compat_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + return rpmb_ioctl(fp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif /* CONFIG_COMPAT */ + +static const struct file_operations rpmb_fops = { + .open = rpmb_open, + .release = rpmb_release, + .unlocked_ioctl = rpmb_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = rpmb_compat_ioctl, +#endif + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +void rpmb_cdev_prepare(struct rpmb_dev *rdev) +{ + rdev->dev.devt = MKDEV(MAJOR(rpmb_devt), rdev->id); + rdev->cdev.owner = THIS_MODULE; + cdev_init(&rdev->cdev, &rpmb_fops); +} + +void rpmb_cdev_add(struct rpmb_dev *rdev) +{ + cdev_add(&rdev->cdev, rdev->dev.devt, 1); +} + +void rpmb_cdev_del(struct rpmb_dev *rdev) +{ + if (rdev->dev.devt) + cdev_del(&rdev->cdev); +} + +int __init rpmb_cdev_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&rpmb_devt, 0, RPMB_MAX_DEVS, "rpmb"); + if (ret < 0) + pr_err("unable to allocate char dev region\n"); + + return ret; +} + +void __exit rpmb_cdev_exit(void) +{ + unregister_chrdev_region(rpmb_devt, RPMB_MAX_DEVS); +} diff --git a/drivers/char/rpmb/core.c b/drivers/char/rpmb/core.c new file mode 100644 index 000000000000..16671396fec8 --- /dev/null +++ b/drivers/char/rpmb/core.c @@ -0,0 +1,560 @@ +/* + * Copyright (C) 2015-2016 Intel Corp. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rpmb-cdev.h" + +static DEFINE_IDA(rpmb_ida); + +/** + * rpmb_dev_get - increase rpmb device ref counter + * + * @rdev: rpmb device + */ +struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) +{ + return get_device(&rdev->dev) ? rdev : NULL; +} +EXPORT_SYMBOL_GPL(rpmb_dev_get); + +/** + * rpmb_dev_put - decrease rpmb device ref counter + * + * @rdev: rpmb device + */ +void rpmb_dev_put(struct rpmb_dev *rdev) +{ + put_device(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_put); + +static int rpmb_request_verify(struct rpmb_dev *rdev, struct rpmb_data *rpmbd) +{ + u16 req_type, block_count; + struct rpmb_cmd *in_cmd = &rpmbd->icmd; + struct rpmb_cmd *out_cmd = &rpmbd->ocmd; + + if (!in_cmd->frames || !in_cmd->nframes || + !out_cmd->frames || !out_cmd->nframes) + return -EINVAL; + + req_type = be16_to_cpu(in_cmd->frames[0].req_resp); + block_count = be16_to_cpu(in_cmd->frames[0].block_count); + + if (rpmbd->req_type != req_type) { + dev_err(&rdev->dev, "rpmb req type doesn't match 0x%04X = 0x%04X\n", + req_type, rpmbd->req_type); + return -EINVAL; + } + + switch (req_type) { + case RPMB_PROGRAM_KEY: + dev_dbg(&rdev->dev, "rpmb program key = 0x%1x blk = %d\n", + req_type, block_count); + break; + case RPMB_GET_WRITE_COUNTER: + dev_dbg(&rdev->dev, "rpmb get write counter = 0x%1x blk = %d\n", + req_type, block_count); + + break; + case RPMB_WRITE_DATA: + dev_dbg(&rdev->dev, "rpmb write data = 0x%1x blk = %d\n", + req_type, block_count); + + if (rdev->ops->reliable_wr_cnt && + block_count > rdev->ops->reliable_wr_cnt) { + dev_err(&rdev->dev, "rpmb write data: block count %u > reliable wr count %u\n", + block_count, rdev->ops->reliable_wr_cnt); + return -EINVAL; + } + + if (block_count > in_cmd->nframes) { + dev_err(&rdev->dev, "rpmb write data: block count %u > in frame count %u\n", + block_count, in_cmd->nframes); + return -EINVAL; + } + break; + case RPMB_READ_DATA: + dev_dbg(&rdev->dev, "rpmb read data = 0x%1x blk = %d\n", + req_type, block_count); + + if (block_count > out_cmd->nframes) { + dev_err(&rdev->dev, "rpmb read data: block count %u > out frame count %u\n", + block_count, out_cmd->nframes); + return -EINVAL; + } + break; + case RPMB_RESULT_READ: + /* Internal command not supported */ + dev_err(&rdev->dev, "NOTSUPPORTED rpmb resut read = 0x%1x blk = %d\n", + req_type, block_count); + return -EOPNOTSUPP; + + default: + dev_err(&rdev->dev, "Error rpmb invalid command = 0x%1x blk = %d\n", + req_type, block_count); + return -EINVAL; + } + + return 0; +} + +/** + * rpmb_cmd_fixup - fixup rpmb command + * + * @rdev: rpmb device + * @cmds: rpmb command list + * @ncmds: number of commands + * + */ +static void rpmb_cmd_fixup(struct rpmb_dev *rdev, + struct rpmb_cmd *cmds, u32 ncmds) +{ + int i; + + if (rdev->ops->type != RPMB_TYPE_EMMC) + return; + + /* Fixup RPMB_READ_DATA specific to eMMC + * The block count of the RPMB read operation is not indicated + * in the original RPMB Data Read Request packet. + * This is different then implementation for other protocol + * standards. + */ + for (i = 0; i < ncmds; i++) + if (cmds->frames->req_resp == cpu_to_be16(RPMB_READ_DATA)) { + dev_dbg(&rdev->dev, "Fixing up READ_DATA frame to block_count=0\n"); + cmds->frames->block_count = 0; + } +} + +/** + * rpmb_cmd_seq - send RPMB command sequence + * + * @rdev: rpmb device + * @cmds: rpmb command list + * @ncmds: number of commands + * + * Return: 0 on success + * -EINVAL on wrong parameters + * -EOPNOTSUPP if device doesn't support the requested operation + * < 0 if the operation fails + */ +int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds) +{ + int err; + + if (!rdev || !cmds || !ncmds) + return -EINVAL; + + mutex_lock(&rdev->lock); + err = -EOPNOTSUPP; + if (rdev->ops && rdev->ops->cmd_seq) { + rpmb_cmd_fixup(rdev, cmds, ncmds); + err = rdev->ops->cmd_seq(rdev->dev.parent, cmds, ncmds); + } + mutex_unlock(&rdev->lock); + return err; +} +EXPORT_SYMBOL_GPL(rpmb_cmd_seq); + +static void rpmb_cmd_set(struct rpmb_cmd *cmd, u32 flags, + struct rpmb_frame *frames, u32 nframes) +{ + cmd->flags = flags; + cmd->frames = frames; + cmd->nframes = nframes; +} + +/** + * rpmb_cmd_req_write - setup cmd request write sequence + * + * @cmd: cmd sequence + * @rpmbd: rpmb request data + * @cnt_in: number of input frames + * @cnt_out: number of output frames + * + * Return: 3 - number of commands in the sequence + */ +static u32 rpmb_cmd_req_write(struct rpmb_cmd *cmd, struct rpmb_data *rpmbd, + u32 cnt_in, u32 cnt_out) +{ + struct rpmb_frame *res_frame; + + rpmb_cmd_set(&cmd[0], RPMB_F_WRITE | RPMB_F_REL_WRITE, + rpmbd->icmd.frames, cnt_in); + res_frame = rpmbd->ocmd.frames; + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = cpu_to_be16(RPMB_RESULT_READ); + rpmb_cmd_set(&cmd[1], RPMB_F_WRITE, res_frame, 1); + + rpmb_cmd_set(&cmd[2], 0, rpmbd->ocmd.frames, cnt_out); + + return 3; +} + +/** + * rpmb_cmd_req_read - setup cmd request read sequence + * + * @cmd: cmd sequence + * @rpmbd: rpmb request data + * @cnt_in: number of input frames + * @cnt_out: number of output frames + * + * Return: 2 - number of commands in the sequence + */ +static u32 rpmb_cmd_req_read(struct rpmb_cmd *cmd, struct rpmb_data *rpmbd, + u32 cnt_in, u32 cnt_out) +{ + rpmb_cmd_set(&cmd[0], RPMB_F_WRITE, rpmbd->icmd.frames, cnt_in); + rpmb_cmd_set(&cmd[1], 0, rpmbd->ocmd.frames, cnt_out); + + return 2; +} + +/** + * rpmb_cmd_req - send rpmb request command + * + * @rdev: rpmb device + * @rpmbd: rpmb request data + * + * Return: 0 on success + * -EINVAL on wrong parameters + * -EOPNOTSUPP if device doesn't support the requested operation + * < 0 if the operation fails + */ +int rpmb_cmd_req(struct rpmb_dev *rdev, struct rpmb_data *rpmbd) +{ + struct rpmb_cmd cmd[3]; + u32 cnt_in, cnt_out; + u32 ncmds; + u16 type; + int ret; + + if (!rdev || !rpmbd) + return -EINVAL; + + ret = rpmb_request_verify(rdev, rpmbd); + if (ret) + return ret; + + if (!rdev->ops || !rdev->ops->cmd_seq) + return -EOPNOTSUPP; + + cnt_in = rpmbd->icmd.nframes; + cnt_out = rpmbd->ocmd.nframes; + type = rpmbd->req_type; + switch (type) { + case RPMB_PROGRAM_KEY: + ncmds = rpmb_cmd_req_write(cmd, rpmbd, 1, 1); + break; + + case RPMB_WRITE_DATA: + ncmds = rpmb_cmd_req_write(cmd, rpmbd, cnt_in, cnt_out); + break; + + case RPMB_GET_WRITE_COUNTER: + ncmds = rpmb_cmd_req_read(cmd, rpmbd, 1, 1); + break; + + case RPMB_READ_DATA: + ncmds = rpmb_cmd_req_write(cmd, rpmbd, cnt_in, cnt_out); + break; + + default: + return -EINVAL; + } + + mutex_lock(&rdev->lock); + ret = rdev->ops->cmd_seq(rdev->dev.parent, cmd, ncmds); + mutex_unlock(&rdev->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(rpmb_cmd_req); + +static void rpmb_dev_release(struct device *dev) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + ida_simple_remove(&rpmb_ida, rdev->id); + kfree(rdev); +} + +struct class rpmb_class = { + .name = "rpmb", + .owner = THIS_MODULE, + .dev_release = rpmb_dev_release, +}; +EXPORT_SYMBOL(rpmb_class); + +/** + * rpmb_dev_find_device - return first matching rpmb device + * + * @data: data for the match function + * @match: the matching function + * + * Return: matching rpmb device or NULL on failure + */ +struct rpmb_dev *rpmb_dev_find_device(void *data, + int (*match)(struct device *dev, + const void *data)) +{ + struct device *dev; + + dev = class_find_device(&rpmb_class, NULL, data, match); + + return dev ? to_rpmb_dev(dev) : NULL; +} +EXPORT_SYMBOL_GPL(rpmb_dev_find_device); + +static int match_by_type(struct device *dev, const void *data) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + enum rpmb_type *type = (enum rpmb_type *)data; + + return (*type == RPMB_TYPE_ANY || rdev->ops->type == *type); +} + +/** + * rpmb_dev_get_by_type - return first registered rpmb device + * with matching type. + * If run with RPMB_TYPE_ANY the first an probably only + * device is returned + * + * @type: rpbm underlying device type + * + * Return: matching rpmb device or NULL/ERR_PTR on failure + */ +struct rpmb_dev *rpmb_dev_get_by_type(enum rpmb_type type) +{ + if (type > RPMB_TYPE_MAX) + return ERR_PTR(-EINVAL); + + return rpmb_dev_find_device(&type, match_by_type); +} +EXPORT_SYMBOL_GPL(rpmb_dev_get_by_type); + +static int match_by_parent(struct device *dev, const void *data) +{ + const struct device *parent = data; + + return (parent && dev->parent == parent); +} + +/** + * rpmb_dev_find_by_device - retrieve rpmb device from the parent device + * + * @parent: parent device of the rpmb device + * + * Return: NULL if there is no rpmb device associated with the parent device + */ +struct rpmb_dev *rpmb_dev_find_by_device(struct device *parent) +{ + if (!parent) + return NULL; + + return rpmb_dev_find_device(parent, match_by_parent); +} +EXPORT_SYMBOL_GPL(rpmb_dev_find_by_device); + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + ssize_t ret; + + switch (rdev->ops->type) { + case RPMB_TYPE_EMMC: + ret = sprintf(buf, "EMMC\n"); + break; + case RPMB_TYPE_UFS: + ret = sprintf(buf, "UFS\n"); + break; + case RPMB_TYPE_SIM: + ret = sprintf(buf, "SIM\n"); + break; + default: + ret = sprintf(buf, "UNKNOWN\n"); + break; + } + + return ret; +} +static DEVICE_ATTR_RO(type); + +static ssize_t id_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + size_t sz = min_t(size_t, rdev->ops->dev_id_len, PAGE_SIZE); + + if (!rdev->ops->dev_id) + return 0; + + return memory_read_from_buffer(buf, count, &off, rdev->ops->dev_id, sz); +} +static BIN_ATTR_RO(id, 0); + +static ssize_t reliable_wr_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return sprintf(buf, "%u\n", rdev->ops->reliable_wr_cnt); +} +static DEVICE_ATTR_RO(reliable_wr_cnt); + +static struct attribute *rpmb_attrs[] = { + &dev_attr_type.attr, + &dev_attr_reliable_wr_cnt.attr, + NULL, +}; + +static struct bin_attribute *rpmb_bin_attributes[] = { + &bin_attr_id, + NULL, +}; + +static struct attribute_group rpmb_attr_group = { + .attrs = rpmb_attrs, + .bin_attrs = rpmb_bin_attributes, +}; + +static const struct attribute_group *rpmb_attr_groups[] = { + &rpmb_attr_group, + NULL +}; + +/** + * rpmb_dev_unregister - unregister RPMB partition from the RPMB subsystem + * + * @dev: parent device of the rpmb device + */ +int rpmb_dev_unregister(struct device *dev) +{ + struct rpmb_dev *rdev; + + if (!dev) + return -EINVAL; + + rdev = rpmb_dev_find_by_device(dev); + if (!rdev) { + dev_warn(dev, "no disk found %s\n", dev_name(dev->parent)); + return -ENODEV; + } + + rpmb_dev_put(rdev); + + mutex_lock(&rdev->lock); + rpmb_cdev_del(rdev); + device_del(&rdev->dev); + mutex_unlock(&rdev->lock); + + rpmb_dev_put(rdev); + + return 0; +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister); + +/** + * rpmb_dev_register - register RPMB partition with the RPMB subsystem + * + * @dev: storage device of the rpmb device + * @ops: device specific operations + */ +struct rpmb_dev *rpmb_dev_register(struct device *dev, + const struct rpmb_ops *ops) +{ + struct rpmb_dev *rdev; + int id; + int ret; + + if (!dev || !ops) + return ERR_PTR(-EINVAL); + + if (!ops->cmd_seq) + return ERR_PTR(-EINVAL); + + if (ops->type == RPMB_TYPE_ANY || ops->type > RPMB_TYPE_MAX) + return ERR_PTR(-EINVAL); + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + + id = ida_simple_get(&rpmb_ida, 0, 0, GFP_KERNEL); + if (id < 0) { + ret = id; + goto exit; + } + + mutex_init(&rdev->lock); + rdev->ops = ops; + rdev->id = id; + + dev_set_name(&rdev->dev, "rpmb%d", id); + rdev->dev.class = &rpmb_class; + rdev->dev.parent = dev; + rdev->dev.groups = rpmb_attr_groups; + + rpmb_cdev_prepare(rdev); + + ret = device_register(&rdev->dev); + if (ret) + goto exit; + + rpmb_cdev_add(rdev); + + dev_dbg(&rdev->dev, "registered disk\n"); + + return rdev; + +exit: + if (id >= 0) + ida_simple_remove(&rpmb_ida, id); + kfree(rdev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(rpmb_dev_register); + +static int __init rpmb_init(void) +{ + ida_init(&rpmb_ida); + class_register(&rpmb_class); + return rpmb_cdev_init(); +} + +static void __exit rpmb_exit(void) +{ + rpmb_cdev_exit(); + class_unregister(&rpmb_class); + ida_destroy(&rpmb_ida); +} + +subsys_initcall(rpmb_init); +module_exit(rpmb_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("RPMB class"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/rpmb/rpmb-cdev.h b/drivers/char/rpmb/rpmb-cdev.h new file mode 100644 index 000000000000..5fb41e586ba9 --- /dev/null +++ b/drivers/char/rpmb/rpmb-cdev.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2015-2016 Intel Corp. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifdef CONFIG_RPMB_INTF_DEV +int __init rpmb_cdev_init(void); +void __exit rpmb_cdev_exit(void); +void rpmb_cdev_prepare(struct rpmb_dev *rdev); +void rpmb_cdev_add(struct rpmb_dev *rdev); +void rpmb_cdev_del(struct rpmb_dev *rdev); +#else +static inline int __init rpmb_cdev_init(void) { return 0; } +static inline void __exit rpmb_cdev_exit(void) {} +static inline void rpmb_cdev_prepare(struct rpmb_dev *rdev) {} +static inline void rpmb_cdev_add(struct rpmb_dev *rdev) {} +static inline void rpmb_cdev_del(struct rpmb_dev *rdev) {} +#endif /* CONFIG_RPMB_INTF_DEV */ diff --git a/drivers/char/rpmb/rpmb_sim.c b/drivers/char/rpmb/rpmb_sim.c new file mode 100644 index 000000000000..bf0681d9a341 --- /dev/null +++ b/drivers/char/rpmb/rpmb_sim.c @@ -0,0 +1,753 @@ +/****************************************************************************** + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include + +static const char id[] = "RPMB:SIM"; +#define CAPACITY_UNIT SZ_128K +#define CAPACITY_MIN SZ_128K +#define CAPACITY_MAX SZ_16M +#define BLK_UNIT SZ_256 + +static unsigned int max_wr_blks = 2; +module_param(max_wr_blks, uint, 0644); +MODULE_PARM_DESC(max_wr_blks, "max blocks that can be written in a single command (default: 2)"); + +static unsigned int daunits = 1; +module_param(daunits, uint, 0644); +MODULE_PARM_DESC(daunits, "number of data area units of 128K (default: 1)"); + +struct blk { + u8 data[BLK_UNIT]; +}; + +/** + * struct rpmb_sim_dev + * + * @dev: back pointer device + * @rdev: rpmb device + * @auth_key: Authentication key register which is used to authenticate + * accesses when MAC is calculated; + * @auth_key_set: true if authentication key was set + * @write_counter: Counter value for the total amount of successful + * authenticated data write requests made by the host. + * The initial value of this register after production is 00000000h. + * The value will be incremented by one along with each successful + * programming access. The value cannot be reset. After the counter + * has reached the maximum value of FFFFFFFFh, + * it will not be incremented anymore (overflow prevention) + * @hash_desc: hmac(sha256) shash descriptor + * + * @res_frames: frame that holds the result of the last write operation + * @out_frames: next read operation result frames + * @out_frames_cnt: number of the output frames + * + * @capacity: size of the partition in bytes multiple of 128K + * @blkcnt: block count + * @da: data area in blocks + */ +struct rpmb_sim_dev { + struct device *dev; + struct rpmb_dev *rdev; + u8 auth_key[32]; + bool auth_key_set; + u32 write_counter; + struct shash_desc *hash_desc; + + struct rpmb_frame res_frames[1]; + struct rpmb_frame *out_frames; + unsigned int out_frames_cnt; + + size_t capacity; + size_t blkcnt; + struct blk *da; +}; + +static __be16 op_result(struct rpmb_sim_dev *rsdev, u16 result) +{ + if (!rsdev->auth_key_set) + return cpu_to_be16(RPMB_ERR_NO_KEY); + + if (rsdev->write_counter == 0xFFFFFFFF) + result |= RPMB_ERR_COUNTER_EXPIRED; + + return cpu_to_be16(result); +} + +static __be16 req_to_resp(u16 req) +{ + return cpu_to_be16(RPMB_REQ2RESP(req)); +} + +static int rpmb_sim_calc_hmac(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *frames, + unsigned int blks, u8 *mac) +{ + struct shash_desc *desc = rsdev->hash_desc; + int i; + int ret; + + ret = crypto_shash_init(desc); + if (ret) + goto out; + + for (i = 0; i < blks; i++) { + ret = crypto_shash_update(desc, frames[i].data, hmac_data_len); + if (ret) + goto out; + } + ret = crypto_shash_final(desc, mac); +out: + if (ret) + dev_err(rsdev->dev, "digest error = %d", ret); + + return ret; +} + +static int rpmb_op_not_programmed(struct rpmb_sim_dev *rsdev, u16 req) +{ + struct rpmb_frame *res_frame = rsdev->res_frames; + + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, RPMB_ERR_NO_KEY); + + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + dev_err(rsdev->dev, "not programmed\n"); + + return 0; +} + +static int rpmb_op_program_key(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *in_frame, u32 cnt) +{ + struct rpmb_frame *res_frame = rsdev->res_frames; + struct crypto_shash *tfm = rsdev->hash_desc->tfm; + u16 req; + int ret; + u16 err = RPMB_ERR_OK; + + req = be16_to_cpu(in_frame[0].req_resp); + + if (req != RPMB_PROGRAM_KEY) + return -EINVAL; + + if (cnt != 1) { + dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); + return -EINVAL; + } + + if (rsdev->auth_key_set) { + dev_err(rsdev->dev, "key already set\n"); + err = RPMB_ERR_WRITE; + goto out; + } + + ret = crypto_shash_setkey(tfm, in_frame[0].key_mac, 32); + if (ret) { + dev_err(rsdev->dev, "set key failed = %d\n", ret); + err = RPMB_ERR_GENERAL; + goto out; + } + + dev_dbg(rsdev->dev, "digest size %u\n", crypto_shash_digestsize(tfm)); + + memcpy(rsdev->auth_key, in_frame[0].key_mac, 32); + rsdev->auth_key_set = true; +out: + + memset(res_frame, 0, sizeof(struct rpmb_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, err); + + return 0; +} + +static int rpmb_op_get_wr_counter(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *in_frame, u32 cnt) +{ + struct rpmb_frame *frame; + int ret = 0; + u16 req; + u16 err; + + req = be16_to_cpu(in_frame[0].req_resp); + if (req != RPMB_GET_WRITE_COUNTER) + return -EINVAL; + + if (cnt != 1) { + dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); + return -EINVAL; + } + + frame = kcalloc(1, sizeof(struct rpmb_frame), GFP_KERNEL); + if (!frame) { + err = RPMB_ERR_READ; + ret = -ENOMEM; + rsdev->out_frames = rsdev->res_frames; + rsdev->out_frames_cnt = cnt; + goto out; + } + + rsdev->out_frames = frame; + rsdev->out_frames_cnt = cnt; + + frame->req_resp = req_to_resp(req); + frame->write_counter = cpu_to_be32(rsdev->write_counter); + memcpy(frame->nonce, in_frame[0].nonce, 16); + + err = RPMB_ERR_OK; + if (rpmb_sim_calc_hmac(rsdev, frame, cnt, frame->key_mac)) + err = RPMB_ERR_READ; + +out: + rsdev->out_frames[0].req_resp = req_to_resp(req); + rsdev->out_frames[0].result = op_result(rsdev, err); + + return ret; +} + +static int rpmb_op_write_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *in_frame, u32 cnt) +{ + struct rpmb_frame *res_frame = rsdev->res_frames; + u8 mac[32]; + u16 req, err, addr, blks; + unsigned int i; + int ret = 0; + + req = be16_to_cpu(in_frame[0].req_resp); + if (req != RPMB_WRITE_DATA) + return -EINVAL; + + if (rsdev->write_counter == 0xFFFFFFFF) { + err = RPMB_ERR_WRITE; + goto out; + } + + blks = be16_to_cpu(in_frame[0].block_count); + if (blks == 0 || blks > cnt) { + dev_err(rsdev->dev, "wrong number of blocks: blks=%u cnt=%u\n", + blks, cnt); + ret = -EINVAL; + err = RPMB_ERR_GENERAL; + goto out; + } + + if (blks > max_wr_blks) { + err = RPMB_ERR_WRITE; + goto out; + } + + addr = be16_to_cpu(in_frame[0].addr); + if (addr >= rsdev->blkcnt) { + err = RPMB_ERR_ADDRESS; + goto out; + } + + if (rpmb_sim_calc_hmac(rsdev, in_frame, blks, mac)) { + err = RPMB_ERR_AUTH; + goto out; + } + + /* mac is in the last frame */ + if (memcmp(mac, in_frame[blks - 1].key_mac, sizeof(mac)) != 0) { + err = RPMB_ERR_AUTH; + goto out; + } + + if (be32_to_cpu(in_frame[0].write_counter) != rsdev->write_counter) { + err = RPMB_ERR_COUNTER; + goto out; + } + + if (addr + blks > rsdev->blkcnt) { + err = RPMB_ERR_WRITE; + goto out; + } + + dev_dbg(rsdev->dev, "Writing = %u blocks at addr = 0x%X\n", blks, addr); + err = RPMB_ERR_OK; + for (i = 0; i < blks; i++) + memcpy(rsdev->da[addr + i].data, in_frame[i].data, BLK_UNIT); + + rsdev->write_counter++; + + memset(res_frame, 0, sizeof(struct rpmb_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->write_counter = cpu_to_be32(rsdev->write_counter); + res_frame->addr = cpu_to_be16(addr); + if (rpmb_sim_calc_hmac(rsdev, res_frame, 1, res_frame->key_mac)) + err = RPMB_ERR_READ; + +out: + if (err != RPMB_ERR_OK) { + memset(res_frame, 0, sizeof(struct rpmb_frame)); + res_frame->req_resp = req_to_resp(req); + } + res_frame->result = op_result(rsdev, err); + + return ret; +} + +static int rpmb_do_read_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *in_frame, u32 cnt) +{ + struct rpmb_frame *res_frame = rsdev->res_frames; + struct rpmb_frame *out_frames = NULL; + u8 mac[32]; + u16 req, err, addr, blks; + unsigned int i; + int ret; + + req = be16_to_cpu(in_frame->req_resp); + if (req != RPMB_READ_DATA) + return -EINVAL; + + /* eMMC intentionally set 0 here */ + blks = be16_to_cpu(in_frame->block_count); + blks = blks ?: cnt; + if (blks > cnt) { + dev_err(rsdev->dev, "wrong number of frames cnt %u\n", blks); + ret = -EINVAL; + err = RPMB_ERR_GENERAL; + goto out; + } + + out_frames = kcalloc(blks, sizeof(struct rpmb_frame), GFP_KERNEL); + if (!out_frames) { + ret = -ENOMEM; + err = RPMB_ERR_READ; + goto out; + } + + ret = 0; + addr = be16_to_cpu(in_frame[0].addr); + if (addr >= rsdev->blkcnt) { + err = RPMB_ERR_ADDRESS; + goto out; + } + + if (addr + blks > rsdev->blkcnt) { + err = RPMB_ERR_READ; + goto out; + } + + dev_dbg(rsdev->dev, "reading = %u blocks at addr = 0x%X\n", blks, addr); + for (i = 0; i < blks; i++) { + memcpy(out_frames[i].data, rsdev->da[addr + i].data, BLK_UNIT); + memcpy(out_frames[i].nonce, in_frame[0].nonce, 16); + out_frames[i].req_resp = req_to_resp(req); + out_frames[i].addr = in_frame[0].addr; + out_frames[i].block_count = cpu_to_be16(blks); + } + + if (rpmb_sim_calc_hmac(rsdev, out_frames, blks, mac)) { + err = RPMB_ERR_AUTH; + goto out; + } + + memcpy(out_frames[blks - 1].key_mac, mac, sizeof(mac)); + + err = RPMB_ERR_OK; + for (i = 0; i < blks; i++) + out_frames[i].result = op_result(rsdev, err); + + rsdev->out_frames = out_frames; + rsdev->out_frames_cnt = cnt; + + return 0; + +out: + memset(res_frame, 0, sizeof(struct rpmb_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, err); + kfree(out_frames); + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + return ret; +} + +static int rpmb_op_read_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *in_frame, u32 cnt) +{ + struct rpmb_frame *res_frame = rsdev->res_frames; + u16 req; + + req = be16_to_cpu(in_frame->req_resp); + if (req != RPMB_READ_DATA) + return -EINVAL; + + memcpy(res_frame, in_frame, sizeof(*res_frame)); + + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + return 0; +} + +static int rpmb_op_result_read(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *frames, u32 cnt) +{ + u16 req = be16_to_cpu(frames[0].req_resp); + u16 blks = be16_to_cpu(frames[0].block_count); + + if (req != RPMB_RESULT_READ) + return -EINVAL; + + if (blks != 0) { + dev_err(rsdev->dev, "wrong number of frames %u != 0\n", blks); + return -EINVAL; + } + + rsdev->out_frames = rsdev->res_frames; + rsdev->out_frames_cnt = 1; + return 0; +} + +static int rpmb_sim_write(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *frames, u32 cnt) +{ + u16 req; + int ret; + + if (!frames || !cnt) + return -EINVAL; + + req = be16_to_cpu(frames[0].req_resp); + if (!rsdev->auth_key_set && req != RPMB_PROGRAM_KEY) + return rpmb_op_not_programmed(rsdev, req); + + switch (req) { + case RPMB_PROGRAM_KEY: + dev_dbg(rsdev->dev, "rpmb: program key\n"); + ret = rpmb_op_program_key(rsdev, frames, cnt); + break; + case RPMB_WRITE_DATA: + dev_dbg(rsdev->dev, "rpmb: write data\n"); + ret = rpmb_op_write_data(rsdev, frames, cnt); + break; + case RPMB_GET_WRITE_COUNTER: + dev_dbg(rsdev->dev, "rpmb: get write counter\n"); + ret = rpmb_op_get_wr_counter(rsdev, frames, cnt); + break; + case RPMB_READ_DATA: + dev_dbg(rsdev->dev, "rpmb: read data\n"); + ret = rpmb_op_read_data(rsdev, frames, cnt); + break; + case RPMB_RESULT_READ: + dev_dbg(rsdev->dev, "rpmb: result read\n"); + ret = rpmb_op_result_read(rsdev, frames, cnt); + break; + default: + dev_err(rsdev->dev, "unsupported command %u\n", req); + ret = -EINVAL; + break; + } + + dev_dbg(rsdev->dev, "rpmb: ret=%d\n", ret); + + return ret; +} + +static int rpmb_sim_read(struct rpmb_sim_dev *rsdev, + struct rpmb_frame *frames, u32 cnt) +{ + int i; + + if (!frames || !cnt) + return -EINVAL; + + if (!rsdev->out_frames || rsdev->out_frames_cnt == 0) { + dev_err(rsdev->dev, "out_frames are not set\n"); + return -EINVAL; + } + + if (rsdev->out_frames->req_resp == cpu_to_be16(RPMB_READ_DATA)) + rpmb_do_read_data(rsdev, rsdev->out_frames, cnt); + + for (i = 0; i < min_t(u32, rsdev->out_frames_cnt, cnt); i++) + memcpy(&frames[i], &rsdev->out_frames[i], sizeof(frames[i])); + + if (rsdev->out_frames != rsdev->res_frames) + kfree(rsdev->out_frames); + + rsdev->out_frames = NULL; + rsdev->out_frames_cnt = 0; + dev_dbg(rsdev->dev, "rpmb: cnt=%d\n", cnt); + + return 0; +} + +static int rpmb_sim_cmd_seq(struct device *dev, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct rpmb_sim_dev *rsdev; + int i; + int ret; + struct rpmb_cmd *cmd; + + if (!dev) + return -EINVAL; + + rsdev = dev_get_drvdata(dev); + + if (!rsdev) + return -EINVAL; + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + if (cmd->flags & RPMB_F_WRITE) + ret = rpmb_sim_write(rsdev, cmd->frames, cmd->nframes); + else + ret = rpmb_sim_read(rsdev, cmd->frames, cmd->nframes); + } + return ret; +} + +static struct rpmb_ops rpmb_sim_ops = { + .cmd_seq = rpmb_sim_cmd_seq, + .type = RPMB_TYPE_EMMC, +}; + +static int rpmb_sim_hmac_256_alloc(struct rpmb_sim_dev *rsdev) +{ + struct shash_desc *desc; + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + crypto_free_shash(tfm); + return -ENOMEM; + } + + desc->tfm = tfm; + rsdev->hash_desc = desc; + + dev_dbg(rsdev->dev, "hamac(sha256) registered\n"); + return 0; +} + +static void rpmb_sim_hmac_256_free(struct rpmb_sim_dev *rsdev) +{ + struct shash_desc *desc = rsdev->hash_desc; + + if (desc->tfm) + crypto_free_shash(desc->tfm); + kfree(desc); + + rsdev->hash_desc = NULL; +} + +static int rpmb_sim_probe(struct device *dev) +{ + struct rpmb_sim_dev *rsdev; + int ret; + + rsdev = kzalloc(sizeof(*rsdev), GFP_KERNEL); + if (!rsdev) + return -ENOMEM; + + rsdev->dev = dev; + + ret = rpmb_sim_hmac_256_alloc(rsdev); + if (ret) + goto err; + + rsdev->capacity = CAPACITY_UNIT * daunits; + rsdev->blkcnt = rsdev->capacity / BLK_UNIT; + rsdev->da = kzalloc(rsdev->capacity, GFP_KERNEL); + if (!rsdev->da) { + ret = -ENOMEM; + goto err; + } + + rpmb_sim_ops.dev_id_len = strlen(id); + rpmb_sim_ops.dev_id = id; + rpmb_sim_ops.reliable_wr_cnt = max_wr_blks; + + rsdev->rdev = rpmb_dev_register(rsdev->dev, &rpmb_sim_ops); + if (IS_ERR(rsdev->rdev)) { + ret = PTR_ERR(rsdev->rdev); + goto err; + } + + dev_info(dev, "registered RPMB capacity = %zu of %zu blocks\n", + rsdev->capacity, rsdev->blkcnt); + + dev_set_drvdata(dev, rsdev); + + return 0; +err: + rpmb_sim_hmac_256_free(rsdev); + if (rsdev) + kfree(rsdev->da); + kfree(rsdev); + return ret; +} + +static int rpmb_sim_remove(struct device *dev) +{ + struct rpmb_sim_dev *rsdev; + + rsdev = dev_get_drvdata(dev); + + rpmb_dev_unregister(rsdev->dev); + + dev_set_drvdata(dev, NULL); + + rpmb_sim_hmac_256_free(rsdev); + + kfree(rsdev->da); + kfree(rsdev); + return 0; +} + +static void rpmb_sim_shutdown(struct device *dev) +{ + rpmb_sim_remove(dev); +} + +static int rpmb_sim_match(struct device *dev, struct device_driver *drv) +{ + return 1; +} + +static struct bus_type rpmb_sim_bus = { + .name = "rpmb_sim", + .match = rpmb_sim_match, +}; + +static struct device_driver rpmb_sim_drv = { + .name = "rpmb_sim", + .probe = rpmb_sim_probe, + .remove = rpmb_sim_remove, + .shutdown = rpmb_sim_shutdown, +}; + +static void rpmb_sim_dev_release(struct device *dev) +{ +} + +static struct device rpmb_sim_dev; + +static int __init rpmb_sim_init(void) +{ + int ret; + struct device *dev = &rpmb_sim_dev; + struct device_driver *drv = &rpmb_sim_drv; + + ret = bus_register(&rpmb_sim_bus); + if (ret) + return ret; + + dev->bus = &rpmb_sim_bus; + dev->release = rpmb_sim_dev_release; + dev_set_name(dev, "%s", "rpmb_sim"); + ret = device_register(dev); + if (ret) { + pr_err("device register failed %d\n", ret); + goto err_device; + } + + drv->bus = &rpmb_sim_bus; + ret = driver_register(drv); + if (ret) { + pr_err("driver register failed %d\n", ret); + goto err_driver; + } + + return 0; + +err_driver: + device_unregister(dev); +err_device: + bus_unregister(&rpmb_sim_bus); + return ret; +} + +static void __exit rpmb_sim_exit(void) +{ + struct device *dev = &rpmb_sim_dev; + struct device_driver *drv = &rpmb_sim_drv; + + device_unregister(dev); + driver_unregister(drv); + bus_unregister(&rpmb_sim_bus); +} + +module_init(rpmb_sim_init); +module_exit(rpmb_sim_exit); + +MODULE_AUTHOR("Tomas Winkler count) { + if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 610638a80383..461bf0b8a094 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -110,6 +110,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, return -EFAULT; } + if (in_size < 6 || + in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) { + mutex_unlock(&priv->buffer_mutex); + return -EINVAL; + } + /* atomic tpm command send and result receive. We only hold the ops * lock during this period so that the tpm can be unregistered even if * the char dev is held open. diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 1d6729be4cd6..0f1dc35e7078 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -328,7 +328,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); -static bool tpm_validate_command(struct tpm_chip *chip, +static int tpm_validate_command(struct tpm_chip *chip, struct tpm_space *space, const u8 *cmd, size_t len) @@ -340,10 +340,10 @@ static bool tpm_validate_command(struct tpm_chip *chip, unsigned int nr_handles; if (len < TPM_HEADER_SIZE) - return false; + return -EINVAL; if (!space) - return true; + return 0; if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) { cc = be32_to_cpu(header->ordinal); @@ -352,7 +352,7 @@ static bool tpm_validate_command(struct tpm_chip *chip, if (i < 0) { dev_dbg(&chip->dev, "0x%04X is an invalid command\n", cc); - return false; + return -EOPNOTSUPP; } attrs = chip->cc_attrs_tbl[i]; @@ -362,11 +362,11 @@ static bool tpm_validate_command(struct tpm_chip *chip, goto err_len; } - return true; + return 0; err_len: dev_dbg(&chip->dev, "%s: insufficient command length %zu", __func__, len); - return false; + return -EINVAL; } /** @@ -391,8 +391,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, unsigned long stop; bool need_locality; - if (!tpm_validate_command(chip, space, buf, bufsiz)) - return -EINVAL; + rc = tpm_validate_command(chip, space, buf, bufsiz); + if (rc == -EINVAL) + return rc; + /* + * If the command is not implemented by the TPM, synthesize a + * response with a TPM2_RC_COMMAND_CODE return for user-space. + */ + if (rc == -EOPNOTSUPP) { + header->length = cpu_to_be32(sizeof(*header)); + header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE | + TSS2_RESMGR_TPM_RC_LAYER); + return bufsiz; + } if (bufsiz > TPM_BUFSIZE) bufsiz = TPM_BUFSIZE; @@ -413,6 +425,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, if (chip->dev.parent) pm_runtime_get_sync(chip->dev.parent); + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, true); + /* Store the decision as chip->locality will be changed. */ need_locality = chip->locality == -1; @@ -489,6 +504,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, chip->locality = -1; } out_no_locality: + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); + if (chip->dev.parent) pm_runtime_put_sync(chip->dev.parent); @@ -1228,6 +1246,10 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) break; recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); + if (recd > num_bytes) { + total = -EFAULT; + break; + } rlength = be32_to_cpu(tpm_cmd.header.out.length); if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 2d5466a72e40..0b5b499f726a 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -93,12 +93,17 @@ enum tpm2_structures { TPM2_ST_SESSIONS = 0x8002, }; +/* Indicates from what layer of the software stack the error comes from */ +#define TSS2_RC_LAYER_SHIFT 16 +#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT) + enum tpm2_return_codes { TPM2_RC_SUCCESS = 0x0000, TPM2_RC_HASH = 0x0083, /* RC_FMT1 */ TPM2_RC_HANDLE = 0x008B, TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */ TPM2_RC_DISABLED = 0x0120, + TPM2_RC_COMMAND_CODE = 0x0143, TPM2_RC_TESTING = 0x090A, /* RC_WARN */ TPM2_RC_REFERENCE_H0 = 0x0910, }; diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index e1a41b788f08..44a3d16231f6 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, if (!rc) { data_len = be16_to_cpup( (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); + if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) { + rc = -EFAULT; + goto out; + } rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) ->header.out.length); diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 79d6bbb58e39..d5b44cadac56 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0; - int expected, status; + int status; + u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) } expected = be32_to_cpu(*(__be32 *)(buf + 2)); - if ((size_t) expected > count) { + if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) { size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index c6428771841f..caa86b19c76d 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); s32 rc; - int expected, status, burst_count, retries, size = 0; + int status; + int burst_count; + int retries; + int size = 0; + u32 expected; if (count < TPM_HEADER_SIZE) { i2c_nuvoton_ready(chip); /* return to idle */ @@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) * to machine native */ expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { + if (expected > count || expected < size) { dev_err(dev, "%s() expected > count\n", __func__); size = -EIO; continue; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7e55aa9ce680..50b59a69dc33 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -132,108 +132,25 @@ static int check_acpi_tpm2(struct device *dev) } #endif -#ifdef CONFIG_X86 -#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000 -#define ILB_REMAP_SIZE 0x100 -#define LPC_CNTRL_REG_OFFSET 0x84 -#define LPC_CLKRUN_EN (1 << 2) - -static void __iomem *ilb_base_addr; - -static inline bool is_bsw(void) -{ - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); -} - -/** - * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running - */ -static void tpm_platform_begin_xfer(void) -{ - u32 clkrun_val; - - if (!is_bsw()) - return; - - clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET); - - /* Disable LPC CLKRUN# */ - clkrun_val &= ~LPC_CLKRUN_EN; - iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET); - - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); - -} - -/** - * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off - */ -static void tpm_platform_end_xfer(void) -{ - u32 clkrun_val; - - if (!is_bsw()) - return; - - clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET); - - /* Enable LPC CLKRUN# */ - clkrun_val |= LPC_CLKRUN_EN; - iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET); - - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); - -} -#else -static inline bool is_bsw(void) -{ - return false; -} - -static void tpm_platform_begin_xfer(void) -{ -} - -static void tpm_platform_end_xfer(void) -{ -} -#endif - static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - while (len--) *result++ = ioread8(phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *value) + const u8 *value) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - while (len--) iowrite8(*value++, phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } @@ -241,12 +158,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - *result = ioread16(phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } @@ -254,12 +167,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - *result = ioread32(phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } @@ -267,12 +176,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - iowrite32(value, phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } @@ -460,11 +365,6 @@ static int __init init_tis(void) if (rc) goto err_force; -#ifdef CONFIG_X86 - if (is_bsw()) - ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, - ILB_REMAP_SIZE); -#endif rc = platform_driver_register(&tis_drv); if (rc) goto err_platform; @@ -483,10 +383,6 @@ static int __init init_tis(void) err_platform: if (force_pdev) platform_device_unregister(force_pdev); -#ifdef CONFIG_X86 - if (is_bsw()) - iounmap(ilb_base_addr); -#endif err_force: return rc; } @@ -496,10 +392,6 @@ static void __exit cleanup_tis(void) pnp_unregister_driver(&tis_pnp_driver); platform_driver_unregister(&tis_drv); -#ifdef CONFIG_X86 - if (is_bsw()) - iounmap(ilb_base_addr); -#endif if (force_pdev) platform_device_unregister(force_pdev); } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 63bc6c3b949e..a21e31c2b952 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -31,6 +31,8 @@ #include "tpm.h" #include "tpm_tis_core.h" +static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value); + /* Before we attempt to access the TPM we must see that the valid bit is set. * The specification says that this bit is 0 at reset and remains 0 until the * 'TPM has gone through its self test and initialization and has established @@ -202,7 +204,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int size = 0; - int expected, status; + int status; + u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -217,7 +220,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) } expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { + if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } @@ -252,7 +255,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc, status, burstcnt; @@ -343,7 +346,7 @@ static void disable_interrupts(struct tpm_chip *chip) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc; @@ -421,19 +424,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip, int i, rc; u32 did_vid; + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, true); + rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid); if (rc < 0) - return rc; + goto out; for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { if (vendor_timeout_overrides[i].did_vid != did_vid) continue; memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, sizeof(vendor_timeout_overrides[i].timeout_us)); - return true; + rc = true; } - return false; + rc = false; + +out: + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); + + return rc; } /* @@ -653,14 +665,73 @@ void tpm_tis_remove(struct tpm_chip *chip) u32 interrupt; int rc; + tpm_tis_clkrun_enable(chip, true); + rc = tpm_tis_read32(priv, reg, &interrupt); if (rc < 0) interrupt = 0; tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); + + tpm_tis_clkrun_enable(chip, false); + + if (priv->ilb_base_addr) + iounmap(priv->ilb_base_addr); } EXPORT_SYMBOL_GPL(tpm_tis_remove); +/** + * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration + * of a single TPM command + * @chip: TPM chip to use + * @value: 1 - Disable CLKRUN protocol, so that clocks are free running + * 0 - Enable CLKRUN protocol + * Call this function directly in tpm_tis_remove() in error or driver removal + * path, since the chip->ops is set to NULL in tpm_chip_unregister(). + */ +static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) +{ + struct tpm_tis_data *data = dev_get_drvdata(&chip->dev); + u32 clkrun_val; + + if (!IS_ENABLED(CONFIG_X86) || !is_bsw() || + !data->ilb_base_addr) + return; + + if (value) { + data->clkrun_enabled++; + if (data->clkrun_enabled > 1) + return; + clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET); + + /* Disable LPC CLKRUN# */ + clkrun_val &= ~LPC_CLKRUN_EN; + iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); + + /* + * Write any random value on port 0x80 which is on LPC, to make + * sure LPC clock is running before sending any TPM command. + */ + outb(0xCC, 0x80); + } else { + data->clkrun_enabled--; + if (data->clkrun_enabled) + return; + + clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET); + + /* Enable LPC CLKRUN# */ + clkrun_val |= LPC_CLKRUN_EN; + iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); + + /* + * Write any random value on port 0x80 which is on LPC, to make + * sure LPC clock is running before sending any TPM command. + */ + outb(0xCC, 0x80); + } +} + static const struct tpm_class_ops tpm_tis = { .flags = TPM_OPS_AUTO_STARTUP, .status = tpm_tis_status, @@ -673,6 +744,7 @@ static const struct tpm_class_ops tpm_tis = { .req_canceled = tpm_tis_req_canceled, .request_locality = request_locality, .relinquish_locality = release_locality, + .clk_enable = tpm_tis_clkrun_enable, }; int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, @@ -680,6 +752,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, acpi_handle acpi_dev_handle) { u32 vendor, intfcaps, intmask; + u32 clkrun_val; u8 rid; int rc, probe; struct tpm_chip *chip; @@ -700,6 +773,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->phy_ops = phy_ops; dev_set_drvdata(&chip->dev, priv); + if (is_bsw()) { + priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, + ILB_REMAP_SIZE); + if (!priv->ilb_base_addr) + return -ENOMEM; + + clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET); + /* Check if CLKRUN# is already not enabled in the LPC bus */ + if (!(clkrun_val & LPC_CLKRUN_EN)) { + iounmap(priv->ilb_base_addr); + priv->ilb_base_addr = NULL; + } + } + + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, true); + if (wait_startup(chip, 0) != 0) { rc = -ENODEV; goto out_err; @@ -790,9 +880,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, } } - return tpm_chip_register(chip); + rc = tpm_chip_register(chip); + if (rc) + goto out_err; + + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); + + return 0; out_err: + if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) + chip->ops->clk_enable(chip, false); + tpm_tis_remove(chip); + return rc; } EXPORT_SYMBOL_GPL(tpm_tis_core_init); @@ -804,22 +905,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) u32 intmask; int rc; + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, true); + /* reenable interrupts that device may have lost or * BIOS/firmware may have disabled */ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq); if (rc < 0) - return; + goto out; rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); if (rc < 0) - return; + goto out; intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + +out: + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); + + return; } int tpm_tis_resume(struct device *dev) diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index e2212f021a02..d5c6a2e952b3 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -79,6 +79,11 @@ enum tis_defaults { #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) #define TPM_RID(l) (0x0F04 | ((l) << 12)) +#define LPC_CNTRL_OFFSET 0x84 +#define LPC_CLKRUN_EN (1 << 2) +#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000 +#define ILB_REMAP_SIZE 0x100 + enum tpm_tis_flags { TPM_TIS_ITPM_WORKAROUND = BIT(0), }; @@ -89,6 +94,8 @@ struct tpm_tis_data { int irq; bool irq_tested; unsigned int flags; + void __iomem *ilb_base_addr; + u16 clkrun_enabled; wait_queue_head_t int_queue; wait_queue_head_t read_queue; const struct tpm_tis_phy_ops *phy_ops; @@ -98,7 +105,7 @@ struct tpm_tis_phy_ops { int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result); int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *value); + const u8 *value); int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result); int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result); int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src); @@ -128,7 +135,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr, } static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *value) + u16 len, const u8 *value) { return data->phy_ops->write_bytes(data, addr, len, value); } @@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr, return data->phy_ops->write32(data, addr, value); } +static inline bool is_bsw(void) +{ +#ifdef CONFIG_X86 + return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); +#else + return false; +#endif +} + void tpm_tis_remove(struct tpm_chip *chip); int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, const struct tpm_tis_phy_ops *phy_ops, diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c index 88fe72ae967f..8ab0bd8445f6 100644 --- a/drivers/char/tpm/tpm_tis_spi.c +++ b/drivers/char/tpm/tpm_tis_spi.c @@ -46,9 +46,7 @@ struct tpm_tis_spi_phy { struct tpm_tis_data priv; struct spi_device *spi_device; - - u8 tx_buf[4]; - u8 rx_buf[4]; + u8 *iobuf; }; static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data) @@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da } static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *buffer, u8 direction) + u8 *in, const u8 *out) { struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); int ret = 0; @@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, while (len) { transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); - phy->tx_buf[0] = direction | (transfer_len - 1); - phy->tx_buf[1] = 0xd4; - phy->tx_buf[2] = addr >> 8; - phy->tx_buf[3] = addr; + phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); + phy->iobuf[1] = 0xd4; + phy->iobuf[2] = addr >> 8; + phy->iobuf[3] = addr; memset(&spi_xfer, 0, sizeof(spi_xfer)); - spi_xfer.tx_buf = phy->tx_buf; - spi_xfer.rx_buf = phy->rx_buf; + spi_xfer.tx_buf = phy->iobuf; + spi_xfer.rx_buf = phy->iobuf; spi_xfer.len = 4; spi_xfer.cs_change = 1; @@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, if (ret < 0) goto exit; - if ((phy->rx_buf[3] & 0x01) == 0) { + if ((phy->iobuf[3] & 0x01) == 0) { // handle SPI wait states - phy->tx_buf[0] = 0; + phy->iobuf[0] = 0; for (i = 0; i < TPM_RETRY; i++) { spi_xfer.len = 1; @@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, ret = spi_sync_locked(phy->spi_device, &m); if (ret < 0) goto exit; - if (phy->rx_buf[0] & 0x01) + if (phy->iobuf[0] & 0x01) break; } @@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, spi_xfer.len = transfer_len; spi_xfer.delay_usecs = 5; - if (direction) { + if (in) { spi_xfer.tx_buf = NULL; - spi_xfer.rx_buf = buffer; - } else { - spi_xfer.tx_buf = buffer; + } else if (out) { spi_xfer.rx_buf = NULL; + memcpy(phy->iobuf, out, transfer_len); + out += transfer_len; } spi_message_init(&m); @@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, if (ret < 0) goto exit; + if (in) { + memcpy(in, phy->iobuf, transfer_len); + in += transfer_len; + } + len -= transfer_len; - buffer += transfer_len; } exit: @@ -139,13 +141,13 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result) { - return tpm_tis_spi_transfer(data, addr, len, result, 0x80); + return tpm_tis_spi_transfer(data, addr, len, result, NULL); } static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *value) + u16 len, const u8 *value) { - return tpm_tis_spi_transfer(data, addr, len, value, 0); + return tpm_tis_spi_transfer(data, addr, len, NULL, value); } static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result) @@ -194,6 +196,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev) phy->spi_device = dev; + phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); + if (!phy->iobuf) + return -ENOMEM; + return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops, NULL); } diff --git a/drivers/char/vhm/Makefile b/drivers/char/vhm/Makefile new file mode 100644 index 000000000000..cb801c70a37e --- /dev/null +++ b/drivers/char/vhm/Makefile @@ -0,0 +1 @@ +obj-y += vhm_dev.o diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c new file mode 100644 index 000000000000..f41bc15c76c1 --- /dev/null +++ b/drivers/char/vhm/vhm_dev.c @@ -0,0 +1,709 @@ +/* + * virtio and hyperviosr service module (VHM): main framework + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Liang Ding + * Jason Zeng + * Xiao Zheng + * Jason Chen CJ + * Jack Ren + * Mingqiang Chi + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#define DEVICE_NAME "acrn_vhm" +#define CLASS_NAME "vhm" + +#define VHM_API_VERSION_MAJOR 1 +#define VHM_API_VERSION_MINOR 0 + +static int major; +static struct class *vhm_class; +static struct device *vhm_device; +static struct tasklet_struct vhm_io_req_tasklet; +static atomic_t ioreq_retry = ATOMIC_INIT(0); + +struct table_iomems { + /* list node for this table_iomems */ + struct list_head list; + /* device's physical BDF */ + unsigned short phys_bdf; + /* virtual base address of MSI-X table in memory space after ioremap */ + unsigned long mmap_addr; +}; +static LIST_HEAD(table_iomems_list); +static DEFINE_MUTEX(table_iomems_lock); + +static int vhm_dev_open(struct inode *inodep, struct file *filep) +{ + struct vhm_vm *vm; + + vm = kzalloc(sizeof(struct vhm_vm), GFP_KERNEL); + pr_info("vhm_dev_open: opening device node\n"); + + if (!vm) + return -ENOMEM; + vm->vmid = ACRN_INVALID_VMID; + vm->dev = vhm_device; + + INIT_LIST_HEAD(&vm->memseg_list); + mutex_init(&vm->seg_lock); + + INIT_LIST_HEAD(&vm->ioreq_client_list); + spin_lock_init(&vm->ioreq_client_lock); + + vm_mutex_lock(&vhm_vm_list_lock); + vm->refcnt = 1; + vm_list_add(&vm->list); + vm_mutex_unlock(&vhm_vm_list_lock); + filep->private_data = vm; + return 0; +} + +static ssize_t vhm_dev_read(struct file *filep, char *buffer, size_t len, + loff_t *offset) +{ + /* Does Nothing */ + pr_info("vhm_dev_read: reading device node\n"); + return 0; +} + +static ssize_t vhm_dev_write(struct file *filep, const char *buffer, + size_t len, loff_t *offset) +{ + /* Does Nothing */ + pr_info("vhm_dev_read: writing device node\n"); + return 0; +} + +static long vhm_dev_ioctl(struct file *filep, + unsigned int ioctl_num, unsigned long ioctl_param) +{ + long ret = 0; + struct vhm_vm *vm; + + trace_printk("[%s] ioctl_num=0x%x\n", __func__, ioctl_num); + + if (ioctl_num == IC_GET_API_VERSION) { + struct api_version api_version; + + api_version.major_version = VHM_API_VERSION_MAJOR; + api_version.minor_version = VHM_API_VERSION_MINOR; + + if (copy_to_user((void *)ioctl_param, &api_version, + sizeof(struct api_version))) + return -EFAULT; + + return 0; + } + + vm = (struct vhm_vm *)filep->private_data; + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); + return -EFAULT; + } + if ((vm->vmid == ACRN_INVALID_VMID) && (ioctl_num != IC_CREATE_VM)) { + pr_err("vhm: invalid VM ID !\n"); + return -EFAULT; + } + + switch (ioctl_num) { + case IC_CREATE_VM: { + struct acrn_create_vm created_vm; + + if (copy_from_user(&created_vm, (void *)ioctl_param, + sizeof(struct acrn_create_vm))) + return -EFAULT; + + ret = hcall_create_vm(virt_to_phys(&created_vm)); + if ((ret < 0) || + (created_vm.vmid == ACRN_INVALID_VMID)) { + pr_err("vhm: failed to create VM from Hypervisor !\n"); + return -EFAULT; + } + + if (copy_to_user((void *)ioctl_param, &created_vm, + sizeof(struct acrn_create_vm))) + return -EFAULT; + + vm->vmid = created_vm.vmid; + + pr_info("vhm: VM %d created\n", created_vm.vmid); + break; + } + + case IC_START_VM: { + ret = hcall_start_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to start VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_PAUSE_VM: { + ret = hcall_pause_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to pause VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_DESTROY_VM: { + ret = hcall_destroy_vm(vm->vmid); + if (ret < 0) { + pr_err("failed to destroy VM %ld\n", vm->vmid); + return -EFAULT; + } + vm->vmid = ACRN_INVALID_VMID; + break; + } + + case IC_CREATE_VCPU: { + struct acrn_create_vcpu cv; + + if (copy_from_user(&cv, (void *)ioctl_param, + sizeof(struct acrn_create_vcpu))) + return -EFAULT; + + ret = acrn_hypercall2(HC_CREATE_VCPU, vm->vmid, + virt_to_phys(&cv)); + if (ret < 0) { + pr_err("vhm: failed to create vcpu %d!\n", cv.vcpu_id); + return -EFAULT; + } + + return ret; + } + + case IC_ALLOC_MEMSEG: { + struct vm_memseg memseg; + + if (copy_from_user(&memseg, (void *)ioctl_param, + sizeof(struct vm_memseg))) + return -EFAULT; + + return alloc_guest_memseg(vm, &memseg); + } + + case IC_SET_MEMSEG: { + struct vm_memmap memmap; + + if (copy_from_user(&memmap, (void *)ioctl_param, + sizeof(struct vm_memmap))) + return -EFAULT; + + ret = map_guest_memseg(vm, &memmap); + break; + } + + case IC_SET_IOREQ_BUFFER: { + /* init ioreq buffer */ + ret = acrn_ioreq_init(vm, (unsigned long)ioctl_param); + if (ret < 0) + return ret; + break; + } + + case IC_CREATE_IOREQ_CLIENT: { + int client_id; + + client_id = acrn_ioreq_create_fallback_client(vm->vmid, "acrndm"); + if (client_id < 0) + return -EFAULT; + return client_id; + } + + case IC_DESTROY_IOREQ_CLIENT: { + int client = ioctl_param; + + acrn_ioreq_destroy_client(client); + break; + } + + case IC_ATTACH_IOREQ_CLIENT: { + int client = ioctl_param; + + return acrn_ioreq_attach_client(client, 0); + } + + case IC_NOTIFY_REQUEST_FINISH: { + struct ioreq_notify notify; + + if (copy_from_user(¬ify, (void *)ioctl_param, + sizeof(notify))) + return -EFAULT; + + ret = acrn_ioreq_complete_request(notify.client_id, notify.vcpu); + if (ret < 0) + return -EFAULT; + break; + } + + case IC_ASSERT_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_assert_irqline(vm->vmid, virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to assert irq!\n"); + return -EFAULT; + } + break; + } + case IC_DEASSERT_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_deassert_irqline(vm->vmid, virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to deassert irq!\n"); + return -EFAULT; + } + break; + } + case IC_PULSE_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_pulse_irqline(vm->vmid, + virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to assert irq!\n"); + return -EFAULT; + } + break; + } + + case IC_INJECT_MSI: { + struct acrn_msi_entry msi; + + if (copy_from_user(&msi, (void *)ioctl_param, sizeof(msi))) + return -EFAULT; + + ret = hcall_inject_msi(vm->vmid, virt_to_phys(&msi)); + if (ret < 0) { + pr_err("vhm: failed to inject!\n"); + return -EFAULT; + } + break; + } + + case IC_ASSIGN_PTDEV: { + uint16_t bdf; + + if (copy_from_user(&bdf, + (void *)ioctl_param, sizeof(uint16_t))) + return -EFAULT; + + ret = hcall_assign_ptdev(vm->vmid, virt_to_phys(&bdf)); + if (ret < 0) { + pr_err("vhm: failed to assign ptdev!\n"); + return -EFAULT; + } + break; + } + case IC_DEASSIGN_PTDEV: { + uint16_t bdf; + + if (copy_from_user(&bdf, + (void *)ioctl_param, sizeof(uint16_t))) + return -EFAULT; + + ret = hcall_deassign_ptdev(vm->vmid, virt_to_phys(&bdf)); + if (ret < 0) { + pr_err("vhm: failed to deassign ptdev!\n"); + return -EFAULT; + } + break; + } + + case IC_SET_PTDEV_INTR_INFO: { + struct ic_ptdev_irq ic_pt_irq; + struct hc_ptdev_irq hc_pt_irq; + struct table_iomems *new; + + if (copy_from_user(&ic_pt_irq, + (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + + memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_set_ptdev_intr_info(vm->vmid, + virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to set intr info for ptdev!\n"); + return -EFAULT; + } + + if ((ic_pt_irq.type == IRQ_MSIX) && + ic_pt_irq.msix.table_paddr) { + new = kmalloc(sizeof(struct table_iomems), GFP_KERNEL); + if (new == NULL) + return -EFAULT; + new->phys_bdf = ic_pt_irq.phys_bdf; + new->mmap_addr = (unsigned long) + ioremap_nocache(ic_pt_irq.msix.table_paddr, + ic_pt_irq.msix.table_size); + + mutex_lock(&table_iomems_lock); + list_add(&new->list, &table_iomems_list); + mutex_unlock(&table_iomems_lock); + } + + break; + } + case IC_RESET_PTDEV_INTR_INFO: { + struct ic_ptdev_irq ic_pt_irq; + struct hc_ptdev_irq hc_pt_irq; + struct table_iomems *ptr; + int dev_found = 0; + + if (copy_from_user(&ic_pt_irq, + (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + + memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_reset_ptdev_intr_info(vm->vmid, + virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to reset intr info for ptdev!\n"); + return -EFAULT; + } + + if (ic_pt_irq.type == IRQ_MSIX) { + mutex_lock(&table_iomems_lock); + list_for_each_entry(ptr, &table_iomems_list, list) { + if (ptr->phys_bdf == ic_pt_irq.phys_bdf) { + dev_found = 1; + break; + } + } + if (dev_found) { + iounmap((void __iomem *)ptr->mmap_addr); + list_del(&ptr->list); + } + mutex_unlock(&table_iomems_lock); + } + + break; + } + + case IC_VM_PCI_MSIX_REMAP: { + struct acrn_vm_pci_msix_remap msix_remap; + + if (copy_from_user(&msix_remap, + (void *)ioctl_param, sizeof(msix_remap))) + return -EFAULT; + + ret = hcall_remap_pci_msix(vm->vmid, virt_to_phys(&msix_remap)); + + if (copy_to_user((void *)ioctl_param, + &msix_remap, sizeof(msix_remap))) + return -EFAULT; + + if (msix_remap.msix) { + void __iomem *msix_entry; + struct table_iomems *ptr; + int dev_found = 0; + + mutex_lock(&table_iomems_lock); + list_for_each_entry(ptr, &table_iomems_list, list) { + if (ptr->phys_bdf == msix_remap.phys_bdf) { + dev_found = 1; + break; + } + } + mutex_unlock(&table_iomems_lock); + + if (!dev_found || !ptr->mmap_addr) + return -EFAULT; + + msix_entry = (void __iomem *) (ptr->mmap_addr + + msix_remap.msix_entry_index * + PCI_MSIX_ENTRY_SIZE); + + /* mask the entry when setup */ + writel(PCI_MSIX_ENTRY_CTRL_MASKBIT, + msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); + + /* setup the msi entry */ + writel((uint32_t)msix_remap.msi_addr, + msix_entry + PCI_MSIX_ENTRY_LOWER_ADDR); + writel((uint32_t)(msix_remap.msi_addr >> 32), + msix_entry + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msix_remap.msi_data, + msix_entry + PCI_MSIX_ENTRY_DATA); + + /* unmask the entry */ + writel(msix_remap.vector_ctl & + PCI_MSIX_ENTRY_CTRL_MASKBIT, + msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); + } + break; + } + + case IC_PM_GET_CPU_STATE: { + uint64_t cmd; + + if (copy_from_user(&cmd, + (void *)ioctl_param, sizeof(cmd))) + return -EFAULT; + + switch (cmd & PMCMD_TYPE_MASK) { + case PMCMD_GET_PX_CNT: { + uint8_t px_cnt; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_cnt)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &px_cnt, sizeof(px_cnt))) + ret = -EFAULT; + + break; + } + case PMCMD_GET_PX_DATA: { + struct cpu_px_data px_data; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_data)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &px_data, sizeof(px_data))) + ret = -EFAULT; + break; + } + default: + ret = -EFAULT; + break; + } + } + + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; + break; + } + + return ret; +} + +static void io_req_tasklet(unsigned long data) +{ + struct vhm_vm *vm; + + list_for_each_entry(vm, &vhm_vm_list, list) { + if (!vm || !vm->req_buf) + break; + + acrn_ioreq_distribute_request(vm); + } + + if (atomic_read(&ioreq_retry) > 0) { + atomic_dec(&ioreq_retry); + tasklet_schedule(&vhm_io_req_tasklet); + } +} + +static void vhm_intr_handler(void) +{ + if (test_bit(TASKLET_STATE_SCHED, &(vhm_io_req_tasklet.state))) + atomic_inc(&ioreq_retry); + else + tasklet_schedule(&vhm_io_req_tasklet); +} + +static int vhm_dev_release(struct inode *inodep, struct file *filep) +{ + struct vhm_vm *vm = filep->private_data; + + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); + return -EFAULT; + } + put_vm(vm); + filep->private_data = NULL; + return 0; +} + +static const struct file_operations fops = { + .open = vhm_dev_open, + .read = vhm_dev_read, + .write = vhm_dev_write, + .mmap = vhm_dev_mmap, + .release = vhm_dev_release, + .unlocked_ioctl = vhm_dev_ioctl, + .poll = vhm_dev_poll, +}; + +#define SUPPORT_HV_API_VERSION_MAJOR 1 +#define SUPPORT_HV_API_VERSION_MINOR 0 +static int __init vhm_init(void) +{ + unsigned long flag; + struct hc_api_version api_version = {0, 0}; + + if (x86_hyper_type != X86_HYPER_ACRN) + return -ENODEV; + + pr_info("vhm: initializing\n"); + + if (hcall_get_api_version(virt_to_phys(&api_version)) < 0) { + pr_err("vhm: failed to get api version from Hypervisor !\n"); + return -EINVAL; + } + + if (api_version.major_version == SUPPORT_HV_API_VERSION_MAJOR && + api_version.minor_version == SUPPORT_HV_API_VERSION_MINOR) { + pr_info("vhm: hv api version %d.%d\n", + api_version.major_version, api_version.minor_version); + } else { + pr_err("vhm: not support hv api version %d.%d!\n", + api_version.major_version, api_version.minor_version); + return -EINVAL; + } + + /* Try to dynamically allocate a major number for the device */ + major = register_chrdev(0, DEVICE_NAME, &fops); + if (major < 0) { + pr_warn("vhm: failed to register a major number\n"); + return major; + } + pr_info("vhm: registered correctly with major number %d\n", major); + + /* Register the device class */ + vhm_class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(vhm_class)) { + unregister_chrdev(major, DEVICE_NAME); + pr_warn("vhm: failed to register device class\n"); + return PTR_ERR(vhm_class); + } + pr_info("vhm: device class registered correctly\n"); + + /* Register the device driver */ + vhm_device = device_create(vhm_class, NULL, MKDEV(major, 0), + NULL, DEVICE_NAME); + if (IS_ERR(vhm_device)) { + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); + pr_warn("vhm: failed to create the device\n"); + return PTR_ERR(vhm_device); + } + pr_info("register IPI handler\n"); + tasklet_init(&vhm_io_req_tasklet, io_req_tasklet, 0); + if (x86_platform_ipi_callback) { + pr_warn("vhm: ipi callback was occupied\n"); + return -EINVAL; + } + local_irq_save(flag); + x86_platform_ipi_callback = vhm_intr_handler; + local_irq_restore(flag); + + pr_info("vhm: Virtio & Hypervisor service module initialized\n"); + return 0; +} +static void __exit vhm_exit(void) +{ + tasklet_kill(&vhm_io_req_tasklet); + device_destroy(vhm_class, MKDEV(major, 0)); + class_unregister(vhm_class); + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); + pr_info("vhm: exit\n"); +} + +module_init(vhm_init); +module_exit(vhm_exit); + +MODULE_AUTHOR("Intel"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("This is a char device driver, acts as a route " + "responsible for transferring IO requsts from other modules " + "either in user-space or in kernel to and from hypervisor"); +MODULE_VERSION("0.1"); diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 775af473fe11..5c2b26de303e 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -107,10 +107,20 @@ static int pmc_suspend(void) return 0; } +static bool pmc_ready(unsigned int mask) +{ + unsigned int status; + + regmap_read(pmcreg, AT91_PMC_SR, &status); + + return ((status & mask) == mask) ? 1 : 0; +} + static void pmc_resume(void) { - int i, ret = 0; + int i; u32 tmp; + u32 mask = AT91_PMC_MCKRDY | AT91_PMC_LOCKA; regmap_read(pmcreg, AT91_PMC_MCKR, &tmp); if (pmc_cache.mckr != tmp) @@ -134,13 +144,11 @@ static void pmc_resume(void) AT91_PMC_PCR_CMD); } - if (pmc_cache.uckr & AT91_PMC_UPLLEN) { - ret = regmap_read_poll_timeout(pmcreg, AT91_PMC_SR, tmp, - !(tmp & AT91_PMC_LOCKU), - 10, 5000); - if (ret) - pr_crit("USB PLL didn't lock when resuming\n"); - } + if (pmc_cache.uckr & AT91_PMC_UPLLEN) + mask |= AT91_PMC_LOCKU; + + while (!pmc_ready(mask)) + cpu_relax(); } static struct syscore_ops pmc_syscore_ops = { diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 58ce6af8452d..eec52734d6ac 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -479,17 +479,17 @@ struct bcm2835_pll_ana_bits { static const struct bcm2835_pll_ana_bits bcm2835_ana_default = { .mask0 = 0, .set0 = 0, - .mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK), + .mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK, .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT), - .mask3 = (u32)~A2W_PLL_KA_MASK, + .mask3 = A2W_PLL_KA_MASK, .set3 = (2 << A2W_PLL_KA_SHIFT), .fb_prediv_mask = BIT(14), }; static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = { - .mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK), + .mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK, .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT), - .mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK), + .mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK, .set1 = (6 << A2W_PLLH_KP_SHIFT), .mask3 = 0, .set3 = 0, @@ -653,8 +653,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) ~A2W_PLL_CTRL_PWRDN); /* Take the PLL out of reset. */ + spin_lock(&cprman->regs_lock); cprman_write(cprman, data->cm_ctrl_reg, cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); + spin_unlock(&cprman->regs_lock); /* Wait for the PLL to lock. */ timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); @@ -731,9 +733,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, } /* Unmask the reference clock from the oscillator. */ + spin_lock(&cprman->regs_lock); cprman_write(cprman, A2W_XOSC_CTRL, cprman_read(cprman, A2W_XOSC_CTRL) | data->reference_enable_mask); + spin_unlock(&cprman->regs_lock); if (do_ana_setup_first) bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c index 5e918e7afaba..95a6e9834392 100644 --- a/drivers/clk/clk-axi-clkgen.c +++ b/drivers/clk/clk-axi-clkgen.c @@ -40,6 +40,10 @@ #define MMCM_REG_FILTER1 0x4e #define MMCM_REG_FILTER2 0x4f +#define MMCM_CLKOUT_NOCOUNT BIT(6) + +#define MMCM_CLK_DIV_NOCOUNT BIT(12) + struct axi_clkgen { void __iomem *base; struct clk_hw clk_hw; @@ -315,12 +319,27 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw, unsigned int reg; unsigned long long tmp; - axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, ®); - dout = (reg & 0x3f) + ((reg >> 6) & 0x3f); + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_2, ®); + if (reg & MMCM_CLKOUT_NOCOUNT) { + dout = 1; + } else { + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, ®); + dout = (reg & 0x3f) + ((reg >> 6) & 0x3f); + } + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, ®); - d = (reg & 0x3f) + ((reg >> 6) & 0x3f); - axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, ®); - m = (reg & 0x3f) + ((reg >> 6) & 0x3f); + if (reg & MMCM_CLK_DIV_NOCOUNT) + d = 1; + else + d = (reg & 0x3f) + ((reg >> 6) & 0x3f); + + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB2, ®); + if (reg & MMCM_CLKOUT_NOCOUNT) { + m = 1; + } else { + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, ®); + m = (reg & 0x3f) + ((reg >> 6) & 0x3f); + } if (d == 0 || dout == 0) return 0; diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 4ed516cb7276..b49942b9fe50 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -118,12 +118,11 @@ static unsigned int _get_val(const struct clk_div_table *table, unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, unsigned int val, const struct clk_div_table *table, - unsigned long flags) + unsigned long flags, unsigned long width) { - struct clk_divider *divider = to_clk_divider(hw); unsigned int div; - div = _get_div(table, val, flags, divider->width); + div = _get_div(table, val, flags, width); if (!div) { WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO), "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", @@ -145,7 +144,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, val &= div_mask(divider->width); return divider_recalc_rate(hw, parent_rate, val, divider->table, - divider->flags); + divider->flags, divider->width); } static bool _is_valid_table_div(const struct clk_div_table *table, diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 20d90769cced..653b0f38d475 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = { "xtal", "clkin" }; static const char * const si5351_pll_names[] = { - "plla", "pllb", "vxco" + "si5351_plla", "si5351_pllb", "si5351_vxco" }; static const char * const si5351_msynth_names[] = { "ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7" diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c index a94c3f56c590..61c3e40507d3 100644 --- a/drivers/clk/clk-stm32h7.c +++ b/drivers/clk/clk-stm32h7.c @@ -384,7 +384,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg, mux_ops = div_ops = gate_ops = NULL; mux_hw = div_hw = gate_hw = NULL; - if (gcfg->mux && gcfg->mux) { + if (gcfg->mux && cfg->mux) { mux = _get_cmux(base + cfg->mux->offset, cfg->mux->shift, cfg->mux->width, @@ -410,7 +410,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg, } } - if (gcfg->gate && gcfg->gate) { + if (gcfg->gate && cfg->gate) { gate = _get_cgate(base + cfg->gate->offset, cfg->gate->bit_idx, gcfg->gate->flags, lock); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index c8d83acda006..60c7fde37d23 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2470,6 +2470,21 @@ static int __clk_core_init(struct clk_core *core) rate = 0; core->rate = core->req_rate = rate; + /* + * Enable CLK_IS_CRITICAL clocks so newly added critical clocks + * don't get accidentally disabled when walking the orphan tree and + * reparenting clocks + */ + if (core->flags & CLK_IS_CRITICAL) { + unsigned long flags; + + clk_core_prepare(core); + + flags = clk_enable_lock(); + clk_core_enable(core); + clk_enable_unlock(flags); + } + /* * walk the list of orphan clocks and reparent any that newly finds a * parent. @@ -2478,10 +2493,13 @@ static int __clk_core_init(struct clk_core *core) struct clk_core *parent = __clk_init_parent(orphan); /* - * we could call __clk_set_parent, but that would result in a - * redundant call to the .set_rate op, if it exists + * We need to use __clk_set_parent_before() and _after() to + * to properly migrate any prepare/enable count of the orphan + * clock. This is important for CLK_IS_CRITICAL clocks, which + * are enabled during init but might not have a parent yet. */ if (parent) { + /* update the clk tree topology */ __clk_set_parent_before(orphan, parent); __clk_set_parent_after(orphan, parent, NULL); __clk_recalc_accuracies(orphan); @@ -2500,16 +2518,6 @@ static int __clk_core_init(struct clk_core *core) if (core->ops->init) core->ops->init(core->hw); - if (core->flags & CLK_IS_CRITICAL) { - unsigned long flags; - - clk_core_prepare(core); - - flags = clk_enable_lock(); - clk_core_enable(core); - clk_enable_unlock(flags); - } - kref_init(&core->ref); out: clk_prepare_unlock(); diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c index a18258eb89cb..f40419959656 100644 --- a/drivers/clk/hisilicon/clk-hi3660.c +++ b/drivers/clk/hisilicon/clk-hi3660.c @@ -34,7 +34,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = { /* crgctrl */ static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = { - { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, }, + { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, }, { HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, }, { HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, }, { HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, }, diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c index e786d717f75d..a87809d4bd52 100644 --- a/drivers/clk/hisilicon/clk-hi6220.c +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -145,7 +145,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = { { HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, }, { HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, }, { HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, }, - { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, }, + { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x270, 12, 0, }, }; static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = { diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c index a1c1f684ad58..9f46cf9dcc65 100644 --- a/drivers/clk/hisilicon/clkdivider-hi6220.c +++ b/drivers/clk/hisilicon/clkdivider-hi6220.c @@ -56,7 +56,7 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw, val &= div_mask(dclk->width); return divider_recalc_rate(hw, parent_rate, val, dclk->table, - CLK_DIVIDER_ROUND_CLOSEST); + CLK_DIVIDER_ROUND_CLOSEST, dclk->width); } static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index c07df719b8a3..8d518ad5dc13 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -761,7 +761,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); - clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4); + clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4); clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6); clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8); clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10); diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 2305699db467..0ac9b30c8b90 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -797,7 +797,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0); clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); - clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0); + clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0); clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0); clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0); clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0); diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index f5d6b70ce189..210ce8e8025e 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -216,6 +216,7 @@ struct mtk_pll_data { uint32_t pcw_reg; int pcw_shift; const struct mtk_pll_div_table *div_table; + const char *parent_name; }; void mtk_clk_register_plls(struct device_node *node, diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index a409142e9346..7598477ff60f 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -303,7 +303,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, init.name = data->name; init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0; init.ops = &mtk_pll_ops; - init.parent_names = &parent_name; + if (data->parent_name) + init.parent_names = &data->parent_name; + else + init.parent_names = &parent_name; init.num_parents = 1; clk = clk_register(NULL, &pll->hw); diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index 44a5a535ca63..5144360e2c80 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -98,7 +98,7 @@ static void params_from_rate(unsigned long requested_rate, *sdm = SDM_DEN - 1; } else { *n2 = div; - *sdm = DIV_ROUND_UP(rem * SDM_DEN, requested_rate); + *sdm = DIV_ROUND_UP_ULL((u64)rem * SDM_DEN, requested_rate); } } diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index b2d1e8ed7152..92168348ffa6 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -1139,7 +1139,7 @@ static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6); static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7); static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8); static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9); -static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG0, 10); +static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10); static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11); static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12); static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13); @@ -1190,7 +1190,7 @@ static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9); static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11); static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12); static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15); -static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG2, 22); +static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22); static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25); static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26); static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29); diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c index 7b359afd620e..a6438f50e6db 100644 --- a/drivers/clk/nxp/clk-lpc32xx.c +++ b/drivers/clk/nxp/clk-lpc32xx.c @@ -956,7 +956,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, val &= div_mask(divider->width); return divider_recalc_rate(hw, parent_rate, val, divider->table, - divider->flags); + divider->flags, divider->width); } static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c index 53484912301e..928fcc16ee27 100644 --- a/drivers/clk/qcom/clk-regmap-divider.c +++ b/drivers/clk/qcom/clk-regmap-divider.c @@ -59,7 +59,7 @@ static unsigned long div_recalc_rate(struct clk_hw *hw, div &= BIT(divider->width) - 1; return divider_recalc_rate(hw, parent_rate, div, NULL, - CLK_DIVIDER_ROUND_CLOSEST); + CLK_DIVIDER_ROUND_CLOSEST, divider->width); } const struct clk_ops clk_regmap_div_ops = { diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index d523991c945f..28ceaf1e9937 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -143,8 +143,10 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path, int ret; clocks_node = of_find_node_by_path("/clocks"); - if (clocks_node) - node = of_find_node_by_name(clocks_node, path); + if (clocks_node) { + node = of_get_child_by_name(clocks_node, path); + of_node_put(clocks_node); + } if (!node) { fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL); diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 3410ee68d4bc..2057809219f4 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -1438,6 +1438,7 @@ static const struct freq_tbl ftbl_codec_clk[] = { static struct clk_rcg2 codec_digcodec_clk_src = { .cmd_rcgr = 0x1c09c, + .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll1_emclk_sleep_map, .freq_tbl = ftbl_codec_clk, diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index ab9e850b3707..2f385a57cd91 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -982,8 +982,8 @@ static void __init sun5i_ccu_init(struct device_node *node, /* Force the PLL-Audio-1x divider to 4 */ val = readl(reg + SUN5I_PLL_AUDIO_REG); - val &= ~GENMASK(19, 16); - writel(val | (3 << 16), reg + SUN5I_PLL_AUDIO_REG); + val &= ~GENMASK(29, 26); + writel(val | (3 << 26), reg + SUN5I_PLL_AUDIO_REG); /* * Use the peripheral PLL as the AHB parent, instead of CPU / diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 8af434815fba..40d5f74cb2ac 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents, 0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); -static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0); +static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0); static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); @@ -750,7 +750,7 @@ static struct ccu_mp out_a_clk = { .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("out-a", clk_out_parents, - &ccu_div_ops, + &ccu_mp_ops, 0), }, }; @@ -771,7 +771,7 @@ static struct ccu_mp out_b_clk = { .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("out-b", clk_out_parents, - &ccu_div_ops, + &ccu_mp_ops, 0), }, }; @@ -792,7 +792,7 @@ static struct ccu_mp out_c_clk = { .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("out-c", clk_out_parents, - &ccu_div_ops, + &ccu_mp_ops, 0), }, }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c index e43acebdfbcd..c10160d7a556 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c @@ -354,9 +354,9 @@ static SUNXI_CCU_GATE(bus_tdm_clk, "bus-tdm", "apb1", static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x06c, BIT(0), 0); static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", - 0x06c, BIT(0), 0); + 0x06c, BIT(1), 0); static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", - 0x06c, BIT(0), 0); + 0x06c, BIT(2), 0); static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", 0x06c, BIT(16), 0); static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", @@ -493,8 +493,8 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon0_clk, "tcon0", tcon0_parents, 0x118, 24, 3, BIT(31), CLK_SET_RATE_PARENT); static const char * const tcon1_parents[] = { "pll-video1" }; -static SUNXI_CCU_MUX_WITH_GATE(tcon1_clk, "tcon1", tcon1_parents, - 0x11c, 24, 3, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_M_WITH_MUX_GATE(tcon1_clk, "tcon1", tcon1_parents, + 0x11c, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x130, BIT(16), 0); diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c index baa3cf96507b..302a18efd39f 100644 --- a/drivers/clk/sunxi-ng/ccu_div.c +++ b/drivers/clk/sunxi-ng/ccu_div.c @@ -71,7 +71,7 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw, parent_rate); val = divider_recalc_rate(hw, parent_rate, val, cd->div.table, - cd->div.flags); + cd->div.flags, cd->div.width); if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV) val /= cd->fixed_post_div; diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index a32158e8f2e3..84a5e7f17f6f 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -99,6 +99,9 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, struct ccu_nm *nm = hw_to_ccu_nm(hw); struct _ccu_nm _nm; + if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) + return rate; + _nm.min_n = nm->n.min ?: 1; _nm.max_n = nm->n.max ?: 1 << nm->n.width; _nm.min_m = 1; diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index 6041bdba2e97..f69f9e8c6f38 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev, return 0; } +static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + sun9i_mmc_reset_assert(rcdev, id); + udelay(10); + sun9i_mmc_reset_deassert(rcdev, id); + + return 0; +} + static const struct reset_control_ops sun9i_mmc_reset_ops = { .assert = sun9i_mmc_reset_assert, .deassert = sun9i_mmc_reset_deassert, + .reset = sun9i_mmc_reset_reset, }; static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 6d7a613f2656..b92867814e2d 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2566,8 +2566,8 @@ static int tegra210_enable_pllu(void) reg |= PLL_ENABLE; writel(reg, clk_base + PLLU_BASE); - readl_relaxed_poll_timeout(clk_base + PLLU_BASE, reg, - reg & PLL_BASE_LOCK, 2, 1000); + readl_relaxed_poll_timeout_atomic(clk_base + PLLU_BASE, reg, + reg & PLL_BASE_LOCK, 2, 1000); if (!(reg & PLL_BASE_LOCK)) { pr_err("Timed out waiting for PLL_U to lock\n"); return -ETIMEDOUT; diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index a2d163f759b4..07f5203df01c 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -964,7 +964,7 @@ static void __init tegra30_super_clk_init(void) * U71 divider of cclk_lp. */ clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3", - clk_base + SUPER_CCLKG_DIVIDER, 0, + clk_base + SUPER_CCLKLP_DIVIDER, 0, TEGRA_DIVIDER_INT, 16, 8, 1, NULL); clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL); diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 13eb04f72389..148815470431 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) /* Get configuration for the ATL instances */ snprintf(prop, sizeof(prop), "atl%u", i); - of_node_get(node); - cfg_node = of_find_node_by_name(node, prop); + cfg_node = of_get_child_by_name(node, prop); if (cfg_node) { ret = of_property_read_u32(cfg_node, "bws", &cdesc->bws); diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c index 07f3b91a7daf..d244e724e198 100644 --- a/drivers/clk/uniphier/clk-uniphier-sys.c +++ b/drivers/clk/uniphier/clk-uniphier-sys.c @@ -123,7 +123,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = { const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */ UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */ - UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */ + UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48), UNIPHIER_PRO5_SYS_CLK_NAND(2), diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fd4b7f684bd0..14e2419063e9 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1268,10 +1268,6 @@ arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) iounmap(cntctlbase); - if (!best_frame) - pr_err("Unable to find a suitable frame in timer @ %pa\n", - &timer_mem->cntctlbase); - return best_frame; } @@ -1372,6 +1368,8 @@ static int __init arch_timer_mem_of_init(struct device_node *np) frame = arch_timer_mem_find_best_frame(timer_mem); if (!frame) { + pr_err("Unable to find a suitable frame in timer @ %pa\n", + &timer_mem->cntctlbase); ret = -EINVAL; goto out; } @@ -1420,7 +1418,7 @@ arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) static int __init arch_timer_mem_acpi_init(int platform_timer_count) { struct arch_timer_mem *timers, *timer; - struct arch_timer_mem_frame *frame; + struct arch_timer_mem_frame *frame, *best_frame = NULL; int timer_count, i, ret = 0; timers = kcalloc(platform_timer_count, sizeof(*timers), @@ -1432,14 +1430,6 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) if (ret || !timer_count) goto out; - for (i = 0; i < timer_count; i++) { - ret = arch_timer_mem_verify_cntfrq(&timers[i]); - if (ret) { - pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); - goto out; - } - } - /* * While unlikely, it's theoretically possible that none of the frames * in a timer expose the combination of feature we want. @@ -1448,12 +1438,26 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) timer = &timers[i]; frame = arch_timer_mem_find_best_frame(timer); - if (frame) - break; + if (!best_frame) + best_frame = frame; + + ret = arch_timer_mem_verify_cntfrq(timer); + if (ret) { + pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); + goto out; + } + + if (!best_frame) /* implies !frame */ + /* + * Only complain about missing suitable frames if we + * haven't already found one in a previous iteration. + */ + pr_err("Unable to find a suitable frame in timer @ %pa\n", + &timer->cntctlbase); } - if (frame) - ret = arch_timer_mem_frame_register(frame); + if (best_frame) + ret = arch_timer_mem_frame_register(best_frame); out: kfree(timers); return ret; diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c index 8f2423789ba9..4bfeb9929ab2 100644 --- a/drivers/clocksource/timer-stm32.c +++ b/drivers/clocksource/timer-stm32.c @@ -106,6 +106,10 @@ static int __init stm32_clockevent_init(struct device_node *np) unsigned long rate, max_delta; int irq, ret, bits, prescaler = 1; + data = kmemdup(&clock_event_ddata, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + clk = of_clk_get(np, 0); if (IS_ERR(clk)) { ret = PTR_ERR(clk); @@ -156,8 +160,8 @@ static int __init stm32_clockevent_init(struct device_node *np) writel_relaxed(prescaler - 1, data->base + TIM_PSC); writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR); - writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); writel_relaxed(0, data->base + TIM_SR); + writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ); @@ -184,6 +188,7 @@ static int __init stm32_clockevent_init(struct device_node *np) err_clk_enable: clk_put(clk); err_clk_get: + kfree(data); return ret; } diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 4ebae43118ef..d8addbce40bc 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -275,6 +275,7 @@ config BMIPS_CPUFREQ config LOONGSON2_CPUFREQ tristate "Loongson2 CPUFreq Driver" + depends on LEMOTE_MACH2F help This option adds a CPUFreq driver for loongson processors which support software configurable cpu frequency. @@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ config LOONGSON1_CPUFREQ tristate "Loongson1 CPUFreq Driver" + depends on LOONGSON1_LS1B help This option adds a CPUFreq driver for loongson1 processors which support software configurable cpu frequency. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 3a2ca0f79daf..d0c34df0529c 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) if (c->x86_vendor == X86_VENDOR_INTEL) { if ((c->x86 == 15) && (c->x86_model == 6) && - (c->x86_mask == 8)) { + (c->x86_stepping == 8)) { pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); return -ENODEV; } diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 17504129fd77..0c41ab3b16eb 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -213,6 +213,7 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, { u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; unsigned int freqs_new; + int ret; cur_cluster = cpu_to_cluster(cpu); new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); @@ -229,7 +230,14 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, } } - return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); + ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); + + if (!ret) { + arch_set_freq_scale(policy->related_cpus, freqs_new, + policy->cpuinfo.max_freq); + } + + return ret; } static inline u32 get_table_count(struct cpufreq_frequency_table *table) diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a753c50e9e41..2db1525a3d7b 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -83,8 +83,6 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "rockchip,rk3368", }, { .compatible = "rockchip,rk3399", }, - { .compatible = "socionext,uniphier-ld6b", }, - { .compatible = "st-ericsson,u8500", }, { .compatible = "st-ericsson,u8540", }, { .compatible = "st-ericsson,u9500", }, @@ -111,6 +109,14 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "marvell,armadaxp", }, + { .compatible = "mediatek,mt2701", }, + { .compatible = "mediatek,mt2712", }, + { .compatible = "mediatek,mt7622", }, + { .compatible = "mediatek,mt7623", }, + { .compatible = "mediatek,mt817x", }, + { .compatible = "mediatek,mt8173", }, + { .compatible = "mediatek,mt8176", }, + { .compatible = "nvidia,tegra124", }, { .compatible = "st,stih407", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index d83ab94d041a..545946ad0752 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -43,9 +43,17 @@ static struct freq_attr *cpufreq_dt_attr[] = { static int set_target(struct cpufreq_policy *policy, unsigned int index) { struct private_data *priv = policy->driver_data; + unsigned long freq = policy->freq_table[index].frequency; + int ret; + + ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000); - return dev_pm_opp_set_rate(priv->cpu_dev, - policy->freq_table[index].frequency * 1000); + if (!ret) { + arch_set_freq_scale(policy->related_cpus, freq, + policy->cpuinfo.max_freq); + } + + return ret; } /* diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ea43b147a7fe..183e1edaeece 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2232,6 +2232,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->min = new_policy->min; policy->max = new_policy->max; + trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu); policy->cached_target_freq = UINT_MAX; @@ -2430,6 +2431,17 @@ int cpufreq_boost_enabled(void) } EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); +/********************************************************************* + * FREQUENCY INVARIANT ACCOUNTING SUPPORT * + *********************************************************************/ + +__weak void arch_set_freq_scale(struct cpumask *cpus, + unsigned long cur_freq, + unsigned long max_freq) +{ +} +EXPORT_SYMBOL_GPL(arch_set_freq_scale); + /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * *********************************************************************/ diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 58d4f4e1ad6a..ca38229b045a 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -22,6 +22,8 @@ #include "cpufreq_governor.h" +#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC) + static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs); static DEFINE_MUTEX(gov_dbs_data_mutex); @@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, { struct dbs_data *dbs_data = to_dbs_data(attr_set); struct policy_dbs_info *policy_dbs; + unsigned int sampling_interval; int ret; - ret = sscanf(buf, "%u", &dbs_data->sampling_rate); - if (ret != 1) + + ret = sscanf(buf, "%u", &sampling_interval); + if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL) return -EINVAL; + dbs_data->sampling_rate = sampling_interval; + /* * We are operating under dbs_data->mutex and so the list and its * entries can't be freed concurrently. @@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) if (ret) goto free_policy_dbs_info; - dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy); + /* + * The sampling interval should not be less than the transition latency + * of the CPU and it also cannot be too small for dbs_update() to work + * correctly. + */ + dbs_data->sampling_rate = max_t(unsigned int, + CPUFREQ_DBS_MIN_SAMPLING_INTERVAL, + cpufreq_policy_transition_delay_us(policy)); if (!have_governor_per_policy()) gov->gdbs_data = dbs_data; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index c46a12df40dd..859a62ea6120 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) break; case 7: - switch (c->x86_mask) { + switch (c->x86_stepping) { case 0: longhaul_version = TYPE_LONGHAUL_V1; cpu_model = CPU_SAMUEL2; @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V2; - if (c->x86_mask < 8) { + if (c->x86_stepping < 8) { cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; } else { @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) numscales = 32; memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); - switch (c->x86_mask) { + switch (c->x86_stepping) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah A' [C5XLOE]"; @@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) longhaul_setup_voltagescaling(); - policy->cpuinfo.transition_latency = 200000; /* nsec */ + policy->transition_delay_us = 200000; /* usec */ return cpufreq_table_validate_and_show(policy, longhaul_table); } diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index fd77812313f3..a25741b1281b 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) #endif /* Errata workaround */ - cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping; switch (cpuid) { case 0x0f07: case 0x0f0a: diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 80ac313e6c59..302e9ce793a0 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -131,7 +131,7 @@ static int check_powernow(void) return 0; } - if ((c->x86_model == 6) && (c->x86_mask == 0)) { + if ((c->x86_model == 6) && (c->x86_stepping == 0)) { pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); have_a0 = 1; } diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 062d71434e47..b01e31db5f83 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1043,7 +1043,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { - pr_err("unable to alloc powernow_k8_data"); + pr_err("unable to alloc powernow_k8_data\n"); return -ENOMEM; } diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 3ff5160451b4..6b3a63545619 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -41,11 +41,9 @@ #define POWERNV_MAX_PSTATES 256 #define PMSR_PSAFE_ENABLE (1UL << 30) #define PMSR_SPR_EM_DISABLE (1UL << 31) -#define PMSR_MAX(x) ((x >> 32) & 0xFF) +#define MAX_PSTATE_SHIFT 32 #define LPSTATE_SHIFT 48 #define GPSTATE_SHIFT 56 -#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF) -#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF) #define MAX_RAMP_DOWN_TIME 5120 /* @@ -93,6 +91,7 @@ struct global_pstate_info { }; static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; +u32 pstate_sign_prefix; static bool rebooting, throttled, occ_reset; static const char * const throttle_reason[] = { @@ -147,6 +146,20 @@ static struct powernv_pstate_info { bool wof_enabled; } powernv_pstate_info; +static inline int extract_pstate(u64 pmsr_val, unsigned int shift) +{ + int ret = ((pmsr_val >> shift) & 0xFF); + + if (!ret) + return ret; + + return (pstate_sign_prefix | ret); +} + +#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT) +#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT) +#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT) + /* Use following macros for conversions between pstate_id and index */ static inline int idx_to_pstate(unsigned int i) { @@ -277,6 +290,9 @@ static int init_powernv_pstates(void) powernv_pstate_info.nr_pstates = nr_pstates; pr_debug("NR PStates %d\n", nr_pstates); + + pstate_sign_prefix = pstate_min & ~0xFF; + for (i = 0; i < nr_pstates; i++) { u32 id = be32_to_cpu(pstate_ids[i]); u32 freq = be32_to_cpu(pstate_freqs[i]); @@ -287,9 +303,9 @@ static int init_powernv_pstates(void) if (id == pstate_max) powernv_pstate_info.max = i; - else if (id == pstate_nominal) + if (id == pstate_nominal) powernv_pstate_info.nominal = i; - else if (id == pstate_min) + if (id == pstate_min) powernv_pstate_info.min = i; if (powernv_pstate_info.wof_enabled && id == pstate_turbo) { @@ -437,17 +453,10 @@ struct powernv_smp_call_data { static void powernv_read_cpu_freq(void *arg) { unsigned long pmspr_val; - s8 local_pstate_id; struct powernv_smp_call_data *freq_data = arg; pmspr_val = get_pmspr(SPRN_PMSR); - - /* - * The local pstate id corresponds bits 48..55 in the PMSR. - * Note: Watch out for the sign! - */ - local_pstate_id = (pmspr_val >> 48) & 0xFF; - freq_data->pstate_id = local_pstate_id; + freq_data->pstate_id = extract_local_pstate(pmspr_val); freq_data->freq = pstate_id_to_freq(freq_data->pstate_id); pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n", @@ -521,7 +530,7 @@ static void powernv_cpufreq_throttle_check(void *data) chip = this_cpu_read(chip_info); /* Check for Pmax Capping */ - pmsr_pmax = (s8)PMSR_MAX(pmsr); + pmsr_pmax = extract_max_pstate(pmsr); pmsr_pmax_idx = pstate_to_idx(pmsr_pmax); if (pmsr_pmax_idx != powernv_pstate_info.max) { if (chip->throttled) @@ -644,8 +653,8 @@ void gpstate_timer_handler(unsigned long data) * value. Hence, read from PMCR to get correct data. */ val = get_pmspr(SPRN_PMCR); - freq_data.gpstate_id = (s8)GET_GPSTATE(val); - freq_data.pstate_id = (s8)GET_LPSTATE(val); + freq_data.gpstate_id = extract_global_pstate(val); + freq_data.pstate_id = extract_local_pstate(val); if (freq_data.gpstate_id == freq_data.pstate_id) { reset_gpstates(policy); spin_unlock(&gpstates->gpstate_lock); diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 7b596fa38ad2..6bebc1f9f55a 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) static int s3c_cpufreq_init(struct cpufreq_policy *policy) { policy->clk = clk_arm; - return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); + + policy->cpuinfo.transition_latency = cpu_cur.info->latency; + + if (ftab) + return cpufreq_table_validate_and_show(policy, ftab); + + return 0; } static int __init s3c_cpufreq_initclks(void) diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 4894924a3ca2..195f27f9c1cb 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -177,7 +177,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev) np = of_cpu_device_node_get(0); if (!np) { - pr_err("No cpu node found"); + pr_err("No cpu node found\n"); return -ENODEV; } @@ -187,7 +187,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev) prop = of_find_property(np, "cpufreq_tbl", NULL); if (!prop || !prop->value) { - pr_err("Invalid cpufreq_tbl"); + pr_err("Invalid cpufreq_tbl\n"); ret = -ENODEV; goto out_put_node; } diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 41bc5397f4bb..4fa5adf16c70 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -37,7 +37,7 @@ struct cpu_id { __u8 x86; /* CPU family */ __u8 x86_model; /* model */ - __u8 x86_mask; /* stepping */ + __u8 x86_stepping; /* stepping */ }; enum { @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, { if ((c->x86 == x->x86) && (c->x86_model == x->x86_model) && - (c->x86_mask == x->x86_mask)) + (c->x86_stepping == x->x86_stepping)) return 1; return 0; } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index ccab452a4ef5..dd7bb00991f4 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void) ebx = cpuid_ebx(0x00000001); ebx &= 0x000000FF; - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); + pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping); - switch (c->x86_mask) { + switch (c->x86_stepping) { case 4: /* * B-stepping [M-P4-M] @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void) msr_lo, msr_hi); if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { - if (c->x86_mask == 0x01) { + if (c->x86_stepping == 0x01) { pr_debug("early PIII version\n"); return SPEEDSTEP_CPU_PIII_C_EARLY; } else diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 4bf47de6101f..ffcddcd4c5e6 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -217,7 +217,8 @@ static int ti_cpufreq_init(void) opp_data->cpu_dev = get_cpu_device(0); if (!opp_data->cpu_dev) { pr_err("%s: Failed to get device for CPU0\n", __func__); - return -ENODEV; + ret = ENODEV; + goto free_opp_data; } opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev); @@ -262,6 +263,8 @@ static int ti_cpufreq_init(void) fail_put_node: of_node_put(opp_data->opp_node); +free_opp_data: + kfree(opp_data); return ret; } diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 52a75053ee03..f47c54546752 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -104,13 +104,13 @@ static int __init arm_idle_init(void) ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); if (ret <= 0) { ret = ret ? : -ENODEV; - goto init_fail; + goto out_kfree_drv; } ret = cpuidle_register_driver(drv); if (ret) { pr_err("Failed to register cpuidle driver\n"); - goto init_fail; + goto out_kfree_drv; } /* @@ -128,14 +128,14 @@ static int __init arm_idle_init(void) if (ret) { pr_err("CPU %d failed to init idle CPU ops\n", cpu); - goto out_fail; + goto out_unregister_drv; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { pr_err("Failed to allocate cpuidle device\n"); ret = -ENOMEM; - goto out_fail; + goto out_unregister_drv; } dev->cpu = cpu; @@ -143,21 +143,25 @@ static int __init arm_idle_init(void) if (ret) { pr_err("Failed to register cpuidle device for CPU %d\n", cpu); - kfree(dev); - goto out_fail; + goto out_kfree_dev; } } return 0; -init_fail: + +out_kfree_dev: + kfree(dev); +out_unregister_drv: + cpuidle_unregister_driver(drv); +out_kfree_drv: kfree(drv); out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); + drv = cpuidle_get_cpu_driver(dev); cpuidle_unregister_device(dev); - kfree(dev); - drv = cpuidle_get_driver(); cpuidle_unregister_driver(drv); + kfree(dev); kfree(drv); } diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index ed6531f075c6..e06605b21841 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -384,9 +384,9 @@ static int powernv_add_idle_states(void) * Firmware passes residency and latency values in ns. * cpuidle expects it in us. */ - exit_latency = latency_ns[i] / 1000; + exit_latency = DIV_ROUND_UP(latency_ns[i], 1000); if (!rc) - target_residency = residency_ns[i] / 1000; + target_residency = DIV_ROUND_UP(residency_ns[i], 1000); else target_residency = 0; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 484cc8909d5c..e47e14101768 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -208,10 +208,11 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, return -EBUSY; } target_state = &drv->states[index]; + broadcast = false; } /* Take note of the planned idle state. */ - sched_idle_set_state(target_state); + sched_idle_set_state(target_state, index); trace_cpu_idle_rcuidle(index, dev->cpu); time_start = ns_to_ktime(local_clock()); @@ -225,7 +226,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ - sched_idle_set_state(NULL); + sched_idle_set_state(NULL, -1); if (broadcast) { if (WARN_ON_ONCE(!irqs_disabled())) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 48eaf2879228..a99d5620056d 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -180,7 +180,12 @@ static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned lo /* for higher loadavg, we are more reluctant */ - mult += 2 * get_loadavg(load); + /* + * this doesn't work as intended - it is almost always 0, but can + * sometimes, depending on workload, spike very high into the hundreds + * even when the average cpu load is under 10%. + */ + /* mult += 2 * get_loadavg(); */ /* for IO wait tasks (per cpu!) we add 5x each */ mult += 10 * nr_iowaiters; diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index ecfdcfe3698d..4f41d6da5acc 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -34,12 +34,12 @@ #define PPC405EX_CE_RESET 0x00000008 #define CRYPTO4XX_CRYPTO_PRIORITY 300 -#define PPC4XX_LAST_PD 63 -#define PPC4XX_NUM_PD 64 -#define PPC4XX_LAST_GD 1023 +#define PPC4XX_NUM_PD 256 +#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1) #define PPC4XX_NUM_GD 1024 -#define PPC4XX_LAST_SD 63 -#define PPC4XX_NUM_SD 64 +#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1) +#define PPC4XX_NUM_SD 256 +#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1) #define PPC4XX_SD_BUFFER_SIZE 2048 #define PD_ENTRY_INUSE 1 diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 0f9754e07719..6eb5cb92b986 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); // The HW omits the initial increment of the counter field. - crypto_inc(req_ctx->hw_ctx.J0+12, 4); + memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4); ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); @@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = { .setkey = artpec6_crypto_aead_set_key, .encrypt = artpec6_crypto_aead_encrypt, .decrypt = artpec6_crypto_aead_decrypt, - .ivsize = AES_BLOCK_SIZE, + .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .base = { diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 8685c7e4debd..ee52c355bee0 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg, return 0; } +static int mailbox_send_message(struct brcm_message *mssg, u32 flags, + u8 chan_idx) +{ + int err; + int retry_cnt = 0; + struct device *dev = &(iproc_priv.pdev->dev); + + err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { + while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { + /* + * Mailbox queue is full. Since MAY_SLEEP is set, assume + * not in atomic context and we can wait and try again. + */ + retry_cnt++; + usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); + err = mbox_send_message(iproc_priv.mbox[chan_idx], + mssg); + atomic_inc(&iproc_priv.mb_no_spc); + } + } + if (err < 0) { + atomic_inc(&iproc_priv.mb_send_fail); + return err; + } + + /* Check error returned by mailbox controller */ + err = mssg->error; + if (unlikely(err < 0)) { + dev_err(dev, "message error %d", err); + /* Signal txdone for mailbox channel */ + } + + /* Signal txdone for mailbox channel */ + mbox_client_txdone(iproc_priv.mbox[chan_idx], err); + return err; +} + /** * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in * a single SPU request message, starting at the current position in the request @@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) u32 pad_len; /* total length of all padding */ bool update_key = false; struct brcm_message *mssg; /* mailbox message */ - int retry_cnt = 0; /* number of entries in src and dst sg in mailbox message. */ u8 rx_frag_num = 2; /* response header and STATUS */ @@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (unlikely(err < 0)) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } return -EINPROGRESS; } @@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) u32 spu_hdr_len; unsigned int digestsize; u16 rem = 0; - int retry_cnt = 0; /* * number of entries in src and dst sg. Always includes SPU msg header. @@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (err < 0) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } + return -EINPROGRESS; } @@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) int assoc_nents = 0; bool incl_icv = false; unsigned int digestsize = ctx->digestsize; - int retry_cnt = 0; /* number of entries in src and dst sg. Always includes SPU msg header. */ @@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (err < 0) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } return -EINPROGRESS; } @@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev) mcl->dev = dev; mcl->tx_block = false; mcl->tx_tout = 0; - mcl->knows_txdone = false; + mcl->knows_txdone = true; mcl->rx_callback = spu_rx_callback; mcl->tx_done = NULL; diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index a118b9bed669..bfbf8bf77f03 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c @@ -494,7 +494,8 @@ static struct ahash_alg algs = { .cra_driver_name = DRIVER_NAME, .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), .cra_alignmask = 3, diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 2eefc4a26bc2..b648e31673f9 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -668,7 +668,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, qm_sg_ents = 1 + !!ivsize + mapped_src_nents + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", qm_sg_ents, CAAM_QI_MAX_AEAD_SG); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, op_type, 0, 0); @@ -905,7 +905,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, op_type, 0, 0); @@ -1058,7 +1058,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( } if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", + dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, GIVENCRYPT, 0, 0); diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 027e121c6f70..39f70411f28f 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, * without any error (HW optimizations for later * CAAM eras), then try again. */ + if (ret) + break; + rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || - !(rdsta_val & (1 << sh_idx))) + !(rdsta_val & (1 << sh_idx))) { ret = -EAGAIN; - if (ret) break; + } + dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); /* Clear the contents before recreating the descriptor */ memset(desc, 0x00, CAAM_CMD_SZ * 7); @@ -809,9 +813,6 @@ static int caam_probe(struct platform_device *pdev) return 0; caam_remove: -#ifdef CONFIG_DEBUG_FS - debugfs_remove_recursive(ctrlpriv->dfs_root); -#endif caam_remove(pdev); return ret; diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 169e66231bcf..b0ba4331944b 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); - return -ENOMEM; + ret = -ENOMEM; + goto request_cleanup; } result = (union cpt_res_s *)info->completion_addr; diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c index e6db8672d89c..05850dfd7940 100644 --- a/drivers/crypto/ccp/ccp-crypto-rsa.c +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c @@ -60,10 +60,9 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) { - if (ccp_version() > CCP_VERSION(3, 0)) - return CCP5_RSA_MAXMOD; - else - return CCP_RSA_MAXMOD; + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ctx->u.rsa.n_len; } static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 3e104f5aa0c2..b56b3f711d94 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_AUTHENC + select CRYPTO_GF128MUL ---help--- The Chelsio Crypto Co-processor driver for T6 adapters. diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 89ba9e85c0f3..d4c81cb73bee 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv ndesc = ctx->handle_result(priv, ring, sreq->req, &should_complete, &ret); if (ndesc < 0) { + kfree(sreq); dev_err(priv->dev, "failed to handle result (%d)", ndesc); return; } @@ -788,7 +789,7 @@ static int safexcel_probe(struct platform_device *pdev) return PTR_ERR(priv->base); } - priv->clk = of_clk_get(dev->of_node, 0); + priv->clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(priv->clk)) { ret = clk_prepare_enable(priv->clk); if (ret) { diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 5438552bc6d7..fcc0a606d748 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -14,6 +14,7 @@ #include #include +#include #include "safexcel.h" @@ -33,6 +34,10 @@ struct safexcel_cipher_ctx { unsigned int key_len; }; +struct safexcel_cipher_req { + bool needs_inv; +}; + static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, struct crypto_async_request *async, struct safexcel_command_desc *cdesc, @@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, return 0; } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_result_desc *rdesc; @@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async, spin_unlock_bh(&priv->ring[ring].egress_lock); request->req = &req->base; - ctx->base.handle_result = safexcel_handle_result; *commands = n_cdesc; *results = n_rdesc; @@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ring = safexcel_select_ring(priv); ctx->base.ring = ring; - ctx->base.needs_inv = false; - ctx->base.send = safexcel_aes_send; spin_lock_bh(&priv->ring[ring].queue_lock); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); @@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, return ndesc; } +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + int err; + + if (sreq->needs_inv) { + sreq->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, + should_complete, ret); + } + + return err; +} + static int safexcel_cipher_send_inv(struct crypto_async_request *async, int ring, struct safexcel_request *request, int *commands, int *results) @@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, struct safexcel_crypto_priv *priv = ctx->priv; int ret; - ctx->base.handle_result = safexcel_handle_inv_result; - ret = safexcel_invalidate_cache(async, &ctx->base, priv, ctx->base.ctxr_dma, ring, request); if (unlikely(ret)) @@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, return 0; } +static int safexcel_send(struct crypto_async_request *async, + int ring, struct safexcel_request *request, + int *commands, int *results) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + int ret; + + if (sreq->needs_inv) + ret = safexcel_cipher_send_inv(async, ring, request, + commands, results); + else + ret = safexcel_aes_send(async, ring, request, + commands, results); + return ret; +} + static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - struct skcipher_request req; + SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm)); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(&req, 0, sizeof(struct skcipher_request)); + memset(req, 0, sizeof(struct skcipher_request)); /* create invalidation request */ init_completion(&result.completion); - skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + safexcel_inv_complete, &result); - skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); - ctx = crypto_tfm_ctx(req.base.tfm); + skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); + ctx = crypto_tfm_ctx(req->base.tfm); ctx->base.exit_inv = true; - ctx->base.send = safexcel_cipher_send_inv; + sreq->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); if (!priv->ring[ring].need_dequeue) @@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req, enum safexcel_cipher_direction dir, u32 mode) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; + sreq->needs_inv = false; ctx->direction = dir; ctx->mode = mode; if (ctx->base.ctxr) { - if (ctx->base.needs_inv) - ctx->base.send = safexcel_cipher_send_inv; + if (ctx->base.needs_inv) { + sreq->needs_inv = true; + ctx->base.needs_inv = false; + } } else { ctx->base.ring = safexcel_select_ring(priv); - ctx->base.send = safexcel_aes_send; - ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, EIP197_GFP_FLAGS(req->base), &ctx->base.ctxr_dma); @@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) alg.skcipher.base); ctx->priv = tmpl->priv; + ctx->base.send = safexcel_send; + ctx->base.handle_result = safexcel_handle_result; + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct safexcel_cipher_req)); return 0; } diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 3980f946874f..d626aa485a76 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -32,6 +32,9 @@ struct safexcel_ahash_req { bool last_req; bool finish; bool hmac; + bool needs_inv; + + int nents; u8 state_sz; /* expected sate size, only set once */ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; @@ -119,9 +122,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, } } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct safexcel_result_desc *rdesc; struct ahash_request *areq = ahash_request_cast(async); @@ -151,8 +154,10 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, result_sz = crypto_ahash_digestsize(ahash); memcpy(sreq->state, areq->result, result_sz); - dma_unmap_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); + if (sreq->nents) { + dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); + sreq->nents = 0; + } safexcel_free_context(priv, async, sreq->state_sz); @@ -165,9 +170,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, return 1; } -static int safexcel_ahash_send(struct crypto_async_request *async, int ring, - struct safexcel_request *request, int *commands, - int *results) +static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, + struct safexcel_request *request, + int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); @@ -177,7 +182,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, struct safexcel_command_desc *cdesc, *first_cdesc = NULL; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; + int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; queued = len = req->len - req->processed; if (queued < crypto_ahash_blocksize(ahash)) @@ -185,17 +190,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, else cache_len = queued - areq->nbytes; - /* - * If this is not the last request and the queued data does not fit - * into full blocks, cache it for the next send() call. - */ - extra = queued & (crypto_ahash_blocksize(ahash) - 1); - if (!req->last_req && extra) { - sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), - req->cache_next, extra, areq->nbytes - extra); - - queued -= extra; - len -= extra; + if (!req->last_req) { + /* If this is not the last request and the queued data does not + * fit into full blocks, cache it for the next send() call. + */ + extra = queued & (crypto_ahash_blocksize(ahash) - 1); + if (!extra) + /* If this is not the last request and the queued data + * is a multiple of a block, cache the last one for now. + */ + extra = queued - crypto_ahash_blocksize(ahash); + + if (extra) { + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), + req->cache_next, extra, + areq->nbytes - extra); + + queued -= extra; + len -= extra; + + if (!queued) { + *commands = 0; + *results = 0; + return 0; + } + } } spin_lock_bh(&priv->ring[ring].egress_lock); @@ -233,15 +252,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, } /* Now handle the current ahash request buffer(s) */ - nents = dma_map_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), - DMA_TO_DEVICE); - if (!nents) { + req->nents = dma_map_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, areq->nbytes), + DMA_TO_DEVICE); + if (!req->nents) { ret = -ENOMEM; goto cdesc_rollback; } - for_each_sg(areq->src, sg, nents, i) { + for_each_sg(areq->src, sg, req->nents, i) { int sglen = sg_dma_len(sg); /* Do not overflow the request */ @@ -292,7 +311,6 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, req->processed += len; request->req = &areq->base; - ctx->base.handle_result = safexcel_handle_result; *commands = n_cdesc; *results = 1; @@ -376,8 +394,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ring = safexcel_select_ring(priv); ctx->base.ring = ring; - ctx->base.needs_inv = false; - ctx->base.send = safexcel_ahash_send; spin_lock_bh(&priv->ring[ring].queue_lock); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); @@ -394,6 +410,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, return 1; } +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct ahash_request *areq = ahash_request_cast(async); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + int err; + + if (req->needs_inv) { + req->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, + should_complete, ret); + } + + return err; +} + static int safexcel_ahash_send_inv(struct crypto_async_request *async, int ring, struct safexcel_request *request, int *commands, int *results) @@ -402,7 +438,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); int ret; - ctx->base.handle_result = safexcel_handle_inv_result; ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, ctx->base.ctxr_dma, ring, request); if (unlikely(ret)) @@ -414,28 +449,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, return 0; } +static int safexcel_ahash_send(struct crypto_async_request *async, + int ring, struct safexcel_request *request, + int *commands, int *results) +{ + struct ahash_request *areq = ahash_request_cast(async); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + int ret; + + if (req->needs_inv) + ret = safexcel_ahash_send_inv(async, ring, request, + commands, results); + else + ret = safexcel_ahash_send_req(async, ring, request, + commands, results); + return ret; +} + static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - struct ahash_request req; + AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm)); + struct safexcel_ahash_req *rctx = ahash_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(&req, 0, sizeof(struct ahash_request)); + memset(req, 0, sizeof(struct ahash_request)); /* create invalidation request */ init_completion(&result.completion); - ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, safexcel_inv_complete, &result); - ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); - ctx = crypto_tfm_ctx(req.base.tfm); + ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); + ctx = crypto_tfm_ctx(req->base.tfm); ctx->base.exit_inv = true; - ctx->base.send = safexcel_ahash_send_inv; + rctx->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); if (!priv->ring[ring].need_dequeue) @@ -483,14 +536,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; - ctx->base.send = safexcel_ahash_send; + req->needs_inv = false; if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); if (ctx->base.ctxr) { - if (ctx->base.needs_inv) - ctx->base.send = safexcel_ahash_send_inv; + if (ctx->base.needs_inv) { + ctx->base.needs_inv = false; + req->needs_inv = true; + } } else { ctx->base.ring = safexcel_select_ring(priv); ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, @@ -624,6 +679,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template, alg.ahash); ctx->priv = tmpl->priv; + ctx->base.send = safexcel_ahash_send; + ctx->base.handle_result = safexcel_handle_result; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct safexcel_ahash_req)); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index a9fd8b9e86cd..699ee5a9a8f9 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1625,6 +1625,7 @@ static int queue_cache_init(void) CWQ_ENTRY_SIZE, 0, NULL); if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; return -ENOMEM; } return 0; @@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; + queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; } static long spu_queue_register_workfn(void *arg) diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b3869748cc6b..c939f18f70cc 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -512,7 +512,7 @@ static int __init padlock_init(void) printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 7ac657f46d15..aec66159566d 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -601,15 +601,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) uint32_t aes_control; unsigned long flags; int err; + u8 *iv; aes_control = SSS_AES_KEY_CHANGE_MODE; if (mode & FLAGS_AES_DECRYPT) aes_control |= SSS_AES_MODE_DECRYPT; - if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) + if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) { aes_control |= SSS_AES_CHAIN_MODE_CBC; - else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) + iv = req->info; + } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) { aes_control |= SSS_AES_CHAIN_MODE_CTR; + iv = req->info; + } else { + iv = NULL; /* AES_ECB */ + } if (dev->ctx->keylen == AES_KEYSIZE_192) aes_control |= SSS_AES_KEY_SIZE_192; @@ -640,7 +646,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) goto outdata_error; SSS_AES_WRITE(dev, AES_CONTROL, aes_control); - s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); + s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen); s5p_set_dma_indata(dev, dev->sg_src); s5p_set_dma_outdata(dev, dev->sg_dst); diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c index 090582baecfe..8f09b8430893 100644 --- a/drivers/crypto/stm32/stm32_crc32.c +++ b/drivers/crypto/stm32/stm32_crc32.c @@ -208,6 +208,7 @@ static struct shash_alg algs[] = { .cra_name = "crc32", .cra_driver_name = DRIVER_NAME, .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct stm32_crc_ctx), @@ -229,6 +230,7 @@ static struct shash_alg algs[] = { .cra_name = "crc32c", .cra_driver_name = DRIVER_NAME, .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct stm32_crc_ctx), diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c index 0d01d1624252..63d636424161 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c @@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ss = algt->ss; - spin_lock(&ss->slock); + spin_lock_bh(&ss->slock); writel(mode, ss->base + SS_CTL); @@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, } writel(0, ss->base + SS_CTL); - spin_unlock(&ss->slock); - return dlen; + spin_unlock_bh(&ss->slock); + return 0; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dff88838dce7..ceae25112acd 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1124,6 +1124,11 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); + if (!src) { + *ptr = zero_entry; + return 1; + } + to_talitos_ptr_len(ptr, len, is_sec1); to_talitos_ptr_ext_set(ptr, 0, is_sec1); @@ -1232,12 +1237,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, sg_link_tbl_len += authsize; } - sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, - &desc->ptr[4], sg_count, areq->assoclen, - tbl_off); + ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc, + &desc->ptr[4], sg_count, areq->assoclen, tbl_off); - if (sg_count > 1) { - tbl_off += sg_count; + if (ret > 1) { + tbl_off += ret; sync_needed = true; } @@ -1248,14 +1252,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); } - sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc, - &desc->ptr[5], sg_count, areq->assoclen, - tbl_off); + ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], + sg_count, areq->assoclen, tbl_off); if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); - if (sg_count > 1) { + /* ICV data */ + if (ret > 1) { + tbl_off += ret; edesc->icv_ool = true; sync_needed = true; @@ -1265,9 +1270,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, sizeof(struct talitos_ptr) + authsize; /* Add an entry to the link table for ICV data */ - tbl_ptr += sg_count - 1; - to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1); - tbl_ptr++; + to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, is_sec1); to_talitos_ptr_len(tbl_ptr, authsize, is_sec1); @@ -1275,18 +1278,33 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* icv data follows link tables */ to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, is_sec1); + } else { + dma_addr_t addr = edesc->dma_link_tbl; + + if (is_sec1) + addr += areq->assoclen + cryptlen; + else + addr += sizeof(struct talitos_ptr) * tbl_off; + + to_talitos_ptr(&desc->ptr[6], addr, is_sec1); + to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); + } + } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { + ret = talitos_sg_map(dev, areq->dst, authsize, edesc, + &desc->ptr[6], sg_count, areq->assoclen + + cryptlen, + tbl_off); + if (ret > 1) { + tbl_off += ret; + edesc->icv_ool = true; + sync_needed = true; + } else { + edesc->icv_ool = false; } } else { edesc->icv_ool = false; } - /* ICV data */ - if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { - to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); - to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl + - areq->assoclen + cryptlen, is_sec1); - } - /* iv out */ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, @@ -1494,12 +1512,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 tmp[DES_EXPKEY_WORDS]; if (keylen > TALITOS_MAX_KEY_SIZE) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } + if (unlikely(crypto_ablkcipher_get_flags(cipher) & + CRYPTO_TFM_REQ_WEAK_KEY) && + !des_ekey(tmp, key)) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + memcpy(&ctx->key, key, keylen); ctx->keylen = keylen; @@ -2614,7 +2640,7 @@ static struct talitos_alg_template driver_algs[] = { .ivsize = AES_BLOCK_SIZE, } }, - .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CTR, }, @@ -3047,6 +3073,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, t_alg->algt.alg.aead.setkey = aead_setkey; t_alg->algt.alg.aead.encrypt = aead_encrypt; t_alg->algt.alg.aead.decrypt = aead_decrypt; + if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && + !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) { + kfree(t_alg); + return ERR_PTR(-ENOTSUPP); + } break; case CRYPTO_ALG_TYPE_AHASH: alg = &t_alg->algt.alg.hash.halg.base; diff --git a/drivers/dax/device.c b/drivers/dax/device.c index e9f3b3e4bbf4..7b0bf825c4e7 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -222,7 +222,8 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size) { struct resource *res; - phys_addr_t phys; + /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ + phys_addr_t uninitialized_var(phys); int i; for (i = 0; i < dev_dax->num_resources; i++) { @@ -427,9 +428,21 @@ static int dev_dax_fault(struct vm_fault *vmf) return dev_dax_huge_fault(vmf, PE_SIZE_PTE); } +static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) +{ + struct file *filp = vma->vm_file; + struct dev_dax *dev_dax = filp->private_data; + struct dax_region *dax_region = dev_dax->region; + + if (!IS_ALIGNED(addr, dax_region->align)) + return -EINVAL; + return 0; +} + static const struct vm_operations_struct dax_vm_ops = { .fault = dev_dax_fault, .huge_fault = dev_dax_huge_fault, + .split = dev_dax_split, }; static int dax_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 557b93703532..c4cd034a3820 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -344,6 +344,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb) struct inode *inode; dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); + if (!dax_dev) + return NULL; + inode = &dax_dev->inode; inode->i_rdev = 0; return inode; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index a1c4ee818614..8a411514a7c5 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -676,7 +676,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev, devfreq = devfreq_add_device(dev, profile, governor_name, data); if (IS_ERR(devfreq)) { devres_free(ptr); - return ERR_PTR(-ENOMEM); + return devfreq; } *ptr = devfreq; @@ -935,7 +935,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, if (df->governor == governor) { ret = 0; goto out; - } else if (df->governor->immutable || governor->immutable) { + } else if ((df->governor && df->governor->immutable) || + governor->immutable) { ret = -EINVAL; goto out; } diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index ed3b785bae37..09ccac1768e3 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -30,4 +30,6 @@ config SW_SYNC WARNING: improper use of this can result in deadlocking kernel drivers from userspace. Intended for test and debug only. +source "drivers/dma-buf/hyper_dmabuf/Kconfig" + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index c33bf8863147..3f15a841502e 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,3 +1,4 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o +obj-$(CONFIG_HYPER_DMABUF) += hyper_dmabuf/ diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 0350829ba62e..dd1edfb27b61 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -31,6 +31,14 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) return "unbound"; } +static void irq_dma_fence_array_work(struct irq_work *wrk) +{ + struct dma_fence_array *array = container_of(wrk, typeof(*array), work); + + dma_fence_signal(&array->base); + dma_fence_put(&array->base); +} + static void dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { @@ -39,8 +47,9 @@ static void dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_array *array = array_cb->array; if (atomic_dec_and_test(&array->num_pending)) - dma_fence_signal(&array->base); - dma_fence_put(&array->base); + irq_work_queue(&array->work); + else + dma_fence_put(&array->base); } static bool dma_fence_array_enable_signaling(struct dma_fence *fence) @@ -136,6 +145,7 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, spin_lock_init(&array->lock); dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, context, seqno); + init_irq_work(&array->work, irq_dma_fence_array_work); array->num_fences = num_fences; atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); diff --git a/drivers/dma-buf/hyper_dmabuf/Kconfig b/drivers/dma-buf/hyper_dmabuf/Kconfig new file mode 100644 index 000000000000..88992167c645 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/Kconfig @@ -0,0 +1,71 @@ +menu "hyper_dmabuf options" + +config HYPER_DMABUF + bool "Enables hyper dmabuf driver" + default y + +choice + prompt "Hypervisor" + depends on HYPER_DMABUF + default HYPER_DMABUF_XEN + +config HYPER_DMABUF_XEN + bool "Configure hyper_dmabuf for XEN hypervisor" + depends on HYPER_DMABUF && XEN + help + Configuring hyper_dmabuf driver for XEN hypervisor + +config HYPER_DMABUF_ACRN + bool "Configure hyper_dmabuf for ACRN hypervisor" + depends on HYPER_DMABUF && ACRN_VIRTIO_DEVICES + help + Configuring hyper_dmabuf driver for ACRN hypervisor +endchoice + +choice + prompt "Virtio driver type" + depends on HYPER_DMABUF && HYPER_DMABUF_ACRN + default HYPER_DMABUF_VIRTIO_BE + +config HYPER_DMABUF_VIRTIO_BE + depends on VBS && DRM_I915_GVT + bool "Configure hyper_dmabuf as virtio backend" + help + Configuring hyper_dmabuf driver as virtio backend + +config HYPER_DMABUF_VIRTIO_FE + depends on ACRN_VIRTIO_DEVICES + bool "Configure hyper_dmabuf as virtio frontend" + help + Configuring hyper_dmabuf driver as virtio frontend +endchoice + +config HYPER_DMABUF_SYSFS + bool "Enable sysfs information about hyper DMA buffers" + default y + depends on HYPER_DMABUF + help + Expose information about imported and exported buffers using + hyper_dmabuf driver + +config HYPER_DMABUF_EVENT_GEN + bool "Enable event-generation and polling operation" + default n + depends on HYPER_DMABUF + help + With this config enabled, hyper_dmabuf driver on the importer side + generates events and queue those up in the event list whenever a new + shared DMA-BUF is available. Events in the list can be retrieved by + read operation. + +config HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + bool "Enable automatic rx-ch add with 10 secs interval" + default y + depends on HYPER_DMABUF && HYPER_DMABUF_XEN + help + If enabled, driver reads a node in xenstore every 10 seconds + to check whether there is any tx comm ch configured by another + domain then initialize matched rx comm ch automatically for any + existing tx comm chs. + +endmenu diff --git a/drivers/dma-buf/hyper_dmabuf/Makefile b/drivers/dma-buf/hyper_dmabuf/Makefile new file mode 100644 index 000000000000..f63967cc99f6 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/Makefile @@ -0,0 +1,57 @@ +TARGET_MODULE:=hyper_dmabuf + +# If we running by kernel building system +ifneq ($(KERNELRELEASE),) + $(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \ + hyper_dmabuf_ioctl.o \ + hyper_dmabuf_list.o \ + hyper_dmabuf_sgl_proc.o \ + hyper_dmabuf_ops.o \ + hyper_dmabuf_msg.o \ + hyper_dmabuf_id.o \ + hyper_dmabuf_remote_sync.o \ + hyper_dmabuf_query.o \ + +ifeq ($(CONFIG_HYPER_DMABUF_EVENT_GEN), y) + $(TARGET_MODULE)-objs += hyper_dmabuf_event.o +endif + +ifeq ($(CONFIG_HYPER_DMABUF_XEN), y) + $(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \ + xen/hyper_dmabuf_xen_comm_list.o \ + xen/hyper_dmabuf_xen_shm.o \ + xen/hyper_dmabuf_xen_drv.o +else ifeq ($(CONFIG_HYPER_DMABUF_ACRN), y) + ifeq ($(CONFIG_HYPER_DMABUF_VIRTIO_BE), y) + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_be_drv.o \ + virtio/hyper_dmabuf_virtio_fe_list.o + else + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_fe_drv.o + endif + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_common.o \ + virtio/hyper_dmabuf_virtio_shm.o \ + virtio/hyper_dmabuf_virtio_comm_ring.o +endif + +obj-$(CONFIG_HYPER_DMABUF) := $(TARGET_MODULE).o + +# If we are running without kernel build system +else +BUILDSYSTEM_DIR?=../../../ +PWD:=$(shell pwd) + +all : +# run kernel build system to make module + $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules + +clean: +# run kernel build system to cleanup in current directory + $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) clean + +load: + insmod ./$(TARGET_MODULE).ko + +unload: + rmmod ./$(TARGET_MODULE).ko + +endif diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c new file mode 100644 index 000000000000..f1afce29d6af --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c @@ -0,0 +1,411 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_ioctl.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_event.h" + +#ifdef CONFIG_HYPER_DMABUF_XEN +#include "xen/hyper_dmabuf_xen_drv.h" +#elif defined (CONFIG_HYPER_DMABUF_ACRN) +#include "virtio/hyper_dmabuf_virtio_common.h" +#endif + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Intel Corporation"); + +struct hyper_dmabuf_private *hy_drv_priv; + +static void force_free(struct exported_sgt_info *exported, + void *attr) +{ + struct ioctl_hyper_dmabuf_unexport unexport_attr; + struct file *filp = (struct file *)attr; + + if (!filp || !exported) + return; + + if (exported->filp == filp) { + dev_dbg(hy_drv_priv->dev, + "Forcefully releasing buffer {id:%d key:%d %d %d}\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + unexport_attr.hid = exported->hid; + unexport_attr.delay_ms = 0; + + hyper_dmabuf_unexport_ioctl(filp, &unexport_attr); + } +} + +static int hyper_dmabuf_open(struct inode *inode, struct file *filp) +{ + int ret = 0; + + /* Do not allow exclusive open */ + if (filp->f_flags & O_EXCL) + return -EBUSY; + + return ret; +} + +static int hyper_dmabuf_release(struct inode *inode, struct file *filp) +{ + hyper_dmabuf_foreach_exported(force_free, filp); + + return 0; +} + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + +static unsigned int hyper_dmabuf_event_poll(struct file *filp, + struct poll_table_struct *wait) +{ + poll_wait(filp, &hy_drv_priv->event_wait, wait); + + if (!list_empty(&hy_drv_priv->event_list)) + return POLLIN | POLLRDNORM; + + return 0; +} + +static ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset) +{ + int ret; + + /* only root can read events */ + if (!capable(CAP_DAC_OVERRIDE)) { + dev_err(hy_drv_priv->dev, + "Only root can read events\n"); + return -EPERM; + } + + /* make sure user buffer can be written */ + if (!access_ok(VERIFY_WRITE, buffer, count)) { + dev_err(hy_drv_priv->dev, + "User buffer can't be written.\n"); + return -EINVAL; + } + + ret = mutex_lock_interruptible(&hy_drv_priv->event_read_lock); + if (ret) + return ret; + + while (1) { + struct hyper_dmabuf_event *e = NULL; + + spin_lock_irq(&hy_drv_priv->event_lock); + if (!list_empty(&hy_drv_priv->event_list)) { + e = list_first_entry(&hy_drv_priv->event_list, + struct hyper_dmabuf_event, link); + list_del(&e->link); + } + spin_unlock_irq(&hy_drv_priv->event_lock); + + if (!e) { + if (ret) + break; + + if (filp->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + mutex_unlock(&hy_drv_priv->event_read_lock); + ret = wait_event_interruptible(hy_drv_priv->event_wait, + !list_empty(&hy_drv_priv->event_list)); + + if (ret == 0) + ret = mutex_lock_interruptible( + &hy_drv_priv->event_read_lock); + + if (ret) + return ret; + } else { + unsigned int length = (sizeof(e->event_data.hdr) + + e->event_data.hdr.size); + + if (length > count - ret) { +put_back_event: + spin_lock_irq(&hy_drv_priv->event_lock); + list_add(&e->link, &hy_drv_priv->event_list); + spin_unlock_irq(&hy_drv_priv->event_lock); + break; + } + + if (copy_to_user(buffer + ret, &e->event_data.hdr, + sizeof(e->event_data.hdr))) { + if (ret == 0) + ret = -EFAULT; + + goto put_back_event; + } + + ret += sizeof(e->event_data.hdr); + + if (copy_to_user(buffer + ret, e->event_data.data, + e->event_data.hdr.size)) { + /* error while copying void *data */ + + struct hyper_dmabuf_event_hdr dummy_hdr = {0}; + + ret -= sizeof(e->event_data.hdr); + + /* nullifying hdr of the event in user buffer */ + if (copy_to_user(buffer + ret, &dummy_hdr, + sizeof(dummy_hdr))) { + dev_err(hy_drv_priv->dev, + "failed to nullify invalid hdr already in userspace\n"); + } + + ret = -EFAULT; + + goto put_back_event; + } + + ret += e->event_data.hdr.size; + hy_drv_priv->pending--; + kfree(e); + } + } + + mutex_unlock(&hy_drv_priv->event_read_lock); + + return ret; +} + +#endif + +static const struct file_operations hyper_dmabuf_driver_fops = { + .owner = THIS_MODULE, + .open = hyper_dmabuf_open, + .release = hyper_dmabuf_release, + +/* poll and read interfaces are needed only for event-polling */ +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + .read = hyper_dmabuf_event_read, + .poll = hyper_dmabuf_event_poll, +#endif + + .unlocked_ioctl = hyper_dmabuf_ioctl, +}; + +static struct miscdevice hyper_dmabuf_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hyper_dmabuf", + .fops = &hyper_dmabuf_driver_fops, +}; + +static int register_device(void) +{ + int ret = 0; + + ret = misc_register(&hyper_dmabuf_miscdev); + + if (ret) { + printk(KERN_ERR "hyper_dmabuf: driver can't be registered\n"); + return ret; + } + + hy_drv_priv->dev = hyper_dmabuf_miscdev.this_device; + + /* TODO: Check if there is a different way to initialize dma mask */ + dma_coerce_mask_and_coherent(hy_drv_priv->dev, DMA_BIT_MASK(64)); + + return ret; +} + +static void unregister_device(void) +{ + dev_info(hy_drv_priv->dev, + "hyper_dmabuf: unregister_device() is called\n"); + + misc_deregister(&hyper_dmabuf_miscdev); +} + +static int __init hyper_dmabuf_drv_init(void) +{ + int ret = 0; + + printk(KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n"); + + hy_drv_priv = kcalloc(1, sizeof(struct hyper_dmabuf_private), + GFP_KERNEL); + + if (!hy_drv_priv) + return -ENOMEM; + + ret = register_device(); + if (ret < 0) + return ret; + +/* currently only supports XEN hypervisor */ +#ifdef CONFIG_HYPER_DMABUF_XEN + hy_drv_priv->bknd_ops = &xen_bknd_ops; +#elif defined (CONFIG_HYPER_DMABUF_ACRN) + hy_drv_priv->bknd_ops = &virtio_bknd_ops; +#else + hy_drv_priv->bknd_ops = NULL; + printk(KERN_ERR "No backend configured for hyper_dmabuf in kernel config\n"); +#endif + + if (hy_drv_priv->bknd_ops == NULL) { + printk(KERN_ERR "Hyper_dmabuf: no backend found\n"); + return -1; + } + + mutex_init(&hy_drv_priv->lock); + + mutex_lock(&hy_drv_priv->lock); + + hy_drv_priv->initialized = false; + + dev_info(hy_drv_priv->dev, + "initializing database for imported/exported dmabufs\n"); + + hy_drv_priv->work_queue = create_workqueue("hyper_dmabuf_wqueue"); + + ret = hyper_dmabuf_table_init(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "fail to init table for exported/imported entries\n"); + mutex_unlock(&hy_drv_priv->lock); + kfree(hy_drv_priv); + return ret; + } + +#ifdef CONFIG_HYPER_DMABUF_SYSFS + ret = hyper_dmabuf_register_sysfs(hy_drv_priv->dev); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "failed to initialize sysfs\n"); + mutex_unlock(&hy_drv_priv->lock); + kfree(hy_drv_priv); + return ret; + } +#endif + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + mutex_init(&hy_drv_priv->event_read_lock); + spin_lock_init(&hy_drv_priv->event_lock); + + /* Initialize event queue */ + INIT_LIST_HEAD(&hy_drv_priv->event_list); + init_waitqueue_head(&hy_drv_priv->event_wait); + + /* resetting number of pending events */ + hy_drv_priv->pending = 0; +#endif + + if (hy_drv_priv->bknd_ops->init) { + ret = hy_drv_priv->bknd_ops->init(); + + if (ret < 0) { + dev_dbg(hy_drv_priv->dev, + "failed to initialize backend.\n"); + return ret; + } + } + + hy_drv_priv->domid = hy_drv_priv->bknd_ops->get_vm_id(); + + hy_drv_priv->initialized = true; + if (hy_drv_priv->bknd_ops->init_comm_env) { + ret = hy_drv_priv->bknd_ops->init_comm_env(); + if (ret < 0) { + hy_drv_priv->initialized = false; + dev_dbg(hy_drv_priv->dev, + "failed to initialize comm-env.\n"); + } + } + + mutex_unlock(&hy_drv_priv->lock); + + dev_info(hy_drv_priv->dev, + "Finishing up initialization of hyper_dmabuf drv\n"); + + /* interrupt for comm should be registered here: */ + return ret; +} + +static void hyper_dmabuf_drv_exit(void) +{ +#ifdef CONFIG_HYPER_DMABUF_SYSFS + hyper_dmabuf_unregister_sysfs(hy_drv_priv->dev); +#endif + + mutex_lock(&hy_drv_priv->lock); + + /* hash tables for export/import entries and ring_infos */ + hyper_dmabuf_table_destroy(); + + if (hy_drv_priv->bknd_ops->destroy_comm) { + hy_drv_priv->bknd_ops->destroy_comm(); + } + + if (hy_drv_priv->bknd_ops->cleanup) { + hy_drv_priv->bknd_ops->cleanup(); + }; + + /* destroy workqueue */ + if (hy_drv_priv->work_queue) + destroy_workqueue(hy_drv_priv->work_queue); + + /* destroy id_queue */ + if (hy_drv_priv->id_queue) + hyper_dmabuf_free_hid_list(); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* clean up event queue */ + hyper_dmabuf_events_release(); +#endif + + mutex_unlock(&hy_drv_priv->lock); + + dev_info(hy_drv_priv->dev, + "hyper_dmabuf driver: Exiting\n"); + + kfree(hy_drv_priv); + + unregister_device(); +} + +module_init(hyper_dmabuf_drv_init); +module_exit(hyper_dmabuf_drv_exit); diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h new file mode 100644 index 000000000000..45c24fd8d25d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h @@ -0,0 +1,118 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ +#define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ + +#include +#include + +struct hyper_dmabuf_req; + +struct hyper_dmabuf_event { + struct hyper_dmabuf_event_data event_data; + struct list_head link; +}; + +struct hyper_dmabuf_private { + struct device *dev; + + /* VM(domain) id of current VM instance */ + int domid; + + /* workqueue dedicated to hyper_dmabuf driver */ + struct workqueue_struct *work_queue; + + /* list of reusable hyper_dmabuf_ids */ + struct list_reusable_id *id_queue; + + /* backend ops - hypervisor specific */ + struct hyper_dmabuf_bknd_ops *bknd_ops; + + /* device global lock */ + /* TODO: might need a lock per resource (e.g. EXPORT LIST) */ + struct mutex lock; + + /* flag that shows whether backend is initialized */ + bool initialized; + + wait_queue_head_t event_wait; + struct list_head event_list; + + spinlock_t event_lock; + struct mutex event_read_lock; + + /* # of pending events */ + int pending; +}; + +struct list_reusable_id { + hyper_dmabuf_id_t hid; + struct list_head list; +}; + +struct hyper_dmabuf_bknd_ops { + /* backend initialization routine (optional) */ + int (*init)(void); + + /* backend cleanup routine (optional) */ + void (*cleanup)(void); + + /* retreiving id of current virtual machine */ + int (*get_vm_id)(void); + + /* get pages shared via hypervisor-specific method */ + int (*share_pages)(struct page **, int, int, void **); + + /* make shared pages unshared via hypervisor specific method */ + int (*unshare_pages)(void **, int); + + /* map remotely shared pages on importer's side via + * hypervisor-specific method + */ + struct page ** (*map_shared_pages)(unsigned long, int, int, void **); + + /* unmap and free shared pages on importer's side via + * hypervisor-specific method + */ + int (*unmap_shared_pages)(void **, int); + + /* initialize communication environment */ + int (*init_comm_env)(void); + + void (*destroy_comm)(void); + + /* upstream ch setup (receiving and responding) */ + int (*init_rx_ch)(int); + + /* downstream ch setup (transmitting and parsing responses) */ + int (*init_tx_ch)(int); + + int (*send_req)(int, struct hyper_dmabuf_req *, int); +}; + +/* exporting global drv private info */ +extern struct hyper_dmabuf_private *hy_drv_priv; + +#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c new file mode 100644 index 000000000000..392ea99e0784 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c @@ -0,0 +1,122 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_event.h" + +static void send_event(struct hyper_dmabuf_event *e) +{ + struct hyper_dmabuf_event *oldest; + unsigned long irqflags; + + spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags); + + /* check current number of event then if it hits the max num allowed + * then remove the oldest event in the list + */ + if (hy_drv_priv->pending > MAX_DEPTH_EVENT_QUEUE - 1) { + oldest = list_first_entry(&hy_drv_priv->event_list, + struct hyper_dmabuf_event, link); + list_del(&oldest->link); + hy_drv_priv->pending--; + kfree(oldest); + } + + list_add_tail(&e->link, + &hy_drv_priv->event_list); + + hy_drv_priv->pending++; + + wake_up_interruptible(&hy_drv_priv->event_wait); + + spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags); +} + +void hyper_dmabuf_events_release(void) +{ + struct hyper_dmabuf_event *e, *et; + unsigned long irqflags; + + spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags); + + list_for_each_entry_safe(e, et, &hy_drv_priv->event_list, + link) { + list_del(&e->link); + kfree(e); + hy_drv_priv->pending--; + } + + if (hy_drv_priv->pending) { + dev_err(hy_drv_priv->dev, + "possible leak on event_list\n"); + } + + spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags); +} + +int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid) +{ + struct hyper_dmabuf_event *e; + struct imported_sgt_info *imported; + + imported = hyper_dmabuf_find_imported(hid); + + if (!imported) { + dev_err(hy_drv_priv->dev, + "can't find imported_sgt_info in the list\n"); + return -EINVAL; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + + if (!e) + return -ENOMEM; + + e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT; + e->event_data.hdr.hid = hid; + e->event_data.data = (void *)imported->priv; + e->event_data.hdr.size = imported->sz_priv; + + send_event(e); + + dev_dbg(hy_drv_priv->dev, + "event number = %d :", hy_drv_priv->pending); + + dev_dbg(hy_drv_priv->dev, + "generating events for {%d, %d, %d, %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h new file mode 100644 index 000000000000..50db04faf222 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h @@ -0,0 +1,38 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_EVENT_H__ +#define __HYPER_DMABUF_EVENT_H__ + +#define MAX_DEPTH_EVENT_QUEUE 32 + +enum hyper_dmabuf_event_type { + HYPER_DMABUF_NEW_IMPORT = 0x10000, +}; + +void hyper_dmabuf_events_release(void); + +int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid); + +#endif /* __HYPER_DMABUF_EVENT_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c new file mode 100644 index 000000000000..e67b84a7e64c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c @@ -0,0 +1,133 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_id.h" + +void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + struct list_reusable_id *new_reusable; + + new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL); + + if (!new_reusable) + return; + + new_reusable->hid = hid; + + list_add(&new_reusable->list, &reusable_head->list); +} + +static hyper_dmabuf_id_t get_reusable_hid(void) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + hyper_dmabuf_id_t hid = {-1, {0, 0, 0} }; + + /* check there is reusable id */ + if (!list_empty(&reusable_head->list)) { + reusable_head = list_first_entry(&reusable_head->list, + struct list_reusable_id, + list); + + list_del(&reusable_head->list); + hid = reusable_head->hid; + kfree(reusable_head); + } + + return hid; +} + +void hyper_dmabuf_free_hid_list(void) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + struct list_reusable_id *temp_head; + + if (reusable_head) { + /* freeing mem space all reusable ids in the stack */ + while (!list_empty(&reusable_head->list)) { + temp_head = list_first_entry(&reusable_head->list, + struct list_reusable_id, + list); + list_del(&temp_head->list); + kfree(temp_head); + } + + /* freeing head */ + kfree(reusable_head); + } +} + +hyper_dmabuf_id_t hyper_dmabuf_get_hid(void) +{ + static int count; + hyper_dmabuf_id_t hid; + struct list_reusable_id *reusable_head; + + /* first call to hyper_dmabuf_get_id */ + if (count == 0) { + reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL); + + if (!reusable_head) + return (hyper_dmabuf_id_t){-1, {0, 0, 0} }; + + /* list head has an invalid count */ + reusable_head->hid.id = -1; + INIT_LIST_HEAD(&reusable_head->list); + hy_drv_priv->id_queue = reusable_head; + } + + hid = get_reusable_hid(); + + /*creating a new H-ID only if nothing in the reusable id queue + * and count is less than maximum allowed + */ + if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX) + hid.id = HYPER_DMABUF_ID_CREATE(hy_drv_priv->domid, count++); + + /* random data embedded in the id for security */ + get_random_bytes(&hid.rng_key[0], 12); + + return hid; +} + +bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2) +{ + int i; + + /* compare keys */ + for (i = 0; i < 3; i++) { + if (hid1.rng_key[i] != hid2.rng_key[i]) + return false; + } + + return true; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h new file mode 100644 index 000000000000..ed690f3a478c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_ID_H__ +#define __HYPER_DMABUF_ID_H__ + +#define HYPER_DMABUF_ID_CREATE(domid, cnt) \ + ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF)) + +#define HYPER_DMABUF_DOM_ID(hid) \ + (((hid.id) >> 24) & 0xFF) + +/* currently maximum number of buffers shared + * at any given moment is limited to 1000 + */ +#define HYPER_DMABUF_ID_MAX 1000 + +/* adding freed hid to the reusable list */ +void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid); + +/* freeing the reusasble list */ +void hyper_dmabuf_free_hid_list(void); + +/* getting a hid available to use. */ +hyper_dmabuf_id_t hyper_dmabuf_get_hid(void); + +/* comparing two different hid */ +bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2); + +#endif /*__HYPER_DMABUF_ID_H*/ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c new file mode 100644 index 000000000000..20274e1b9e20 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c @@ -0,0 +1,789 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_ioctl.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_sgl_proc.h" +#include "hyper_dmabuf_ops.h" +#include "hyper_dmabuf_query.h" + +static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret = 0; + + if (!data) { + dev_err(hy_drv_priv->dev, "user data is NULL\n"); + return -EINVAL; + } + tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data; + + if (bknd_ops->init_tx_ch) { + ret = bknd_ops->init_tx_ch(tx_ch_attr->remote_domain); + } + + return ret; +} + +static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret = 0; + + if (!data) { + dev_err(hy_drv_priv->dev, "user data is NULL\n"); + return -EINVAL; + } + + rx_ch_attr = (struct ioctl_hyper_dmabuf_rx_ch_setup *)data; + + if (bknd_ops->init_rx_ch) + ret = bknd_ops->init_rx_ch(rx_ch_attr->source_domain); + + return ret; +} + +static int send_export_msg(struct exported_sgt_info *exported, + struct pages_info *pg_info) +{ + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct hyper_dmabuf_req *req; + int op[MAX_NUMBER_OF_OPERANDS] = {0}; + int ret, i; + + /* now create request for importer via ring */ + op[0] = exported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = exported->hid.rng_key[i]; + + if (pg_info) { + op[4] = pg_info->nents; + op[5] = pg_info->frst_ofst; + op[6] = pg_info->last_len; + op[7] = bknd_ops->share_pages(pg_info->pgs, exported->rdomid, + pg_info->nents, &exported->refs_info); + if (op[7] < 0) { + dev_err(hy_drv_priv->dev, "pages sharing failed\n"); + return op[7]; + } + } + + op[8] = exported->sz_priv; + + /* driver/application specific private info */ + memcpy(&op[9], exported->priv, op[8]); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return -ENOMEM; + + /* composing a message to the importer */ + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]); + + ret = bknd_ops->send_req(exported->rdomid, req, true); + + kfree(req); + + return ret; +} + +/* Fast path exporting routine in case same buffer is already exported. + * In this function, we skip normal exporting process and just update + * private data on both VMs (importer and exporter) + * + * return '1' if reexport is needed, return '0' if succeeds, return + * Kernel error code if something goes wrong + */ +static int fastpath_export(hyper_dmabuf_id_t hid, int sz_priv, char *priv) +{ + int reexport = 1; + int ret = 0; + struct exported_sgt_info *exported; + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) + return reexport; + + if (exported->valid == false) + return reexport; + + /* + * Check if unexport is already scheduled for that buffer, + * if so try to cancel it. If that will fail, buffer needs + * to be reexport once again. + */ + if (exported->unexport_sched) { + if (!cancel_delayed_work_sync(&exported->unexport)) + return reexport; + + exported->unexport_sched = false; + } + + /* if there's any change in size of private data. + * we reallocate space for private data with new size + */ + if (sz_priv != exported->sz_priv) { + kfree(exported->priv); + + /* truncating size */ + if (sz_priv > MAX_SIZE_PRIV_DATA) + exported->sz_priv = MAX_SIZE_PRIV_DATA; + else + exported->sz_priv = sz_priv; + + exported->priv = kcalloc(1, exported->sz_priv, + GFP_KERNEL); + + if (!exported->priv) { + hyper_dmabuf_remove_exported(exported->hid); + hyper_dmabuf_cleanup_sgt_info(exported, true); + kfree(exported); + return -ENOMEM; + } + } + + /* update private data in sgt_info with new ones */ + ret = copy_from_user(exported->priv, priv, exported->sz_priv); + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to load a new private data\n"); + ret = -EINVAL; + } else { + /* send an export msg for updating priv in importer */ + ret = send_export_msg(exported, NULL); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to send a new private data\n"); + ret = -EBUSY; + } + } + + return ret; +} + +static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_export_remote *export_remote_attr = + (struct ioctl_hyper_dmabuf_export_remote *)data; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct pages_info *pg_info; + struct exported_sgt_info *exported; + hyper_dmabuf_id_t hid; + int ret = 0; + + if (hy_drv_priv->domid == export_remote_attr->remote_domain) { + dev_err(hy_drv_priv->dev, + "exporting to the same VM is not permitted\n"); + return -EINVAL; + } + + dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd); + + if (IS_ERR(dma_buf)) { + dev_err(hy_drv_priv->dev, "Cannot get dma buf\n"); + return PTR_ERR(dma_buf); + } + + /* we check if this specific attachment was already exported + * to the same domain and if yes and it's valid sgt_info, + * it returns hyper_dmabuf_id of pre-exported sgt_info + */ + hid = hyper_dmabuf_find_hid_exported(dma_buf, + export_remote_attr->remote_domain); + + if (hid.id != -1) { + ret = fastpath_export(hid, export_remote_attr->sz_priv, + export_remote_attr->priv); + + /* return if fastpath_export succeeds or + * gets some fatal error + */ + if (ret <= 0) { + dma_buf_put(dma_buf); + export_remote_attr->hid = hid; + return ret; + } + } + + attachment = dma_buf_attach(dma_buf, hy_drv_priv->dev); + if (IS_ERR(attachment)) { + dev_err(hy_drv_priv->dev, "cannot get attachment\n"); + ret = PTR_ERR(attachment); + goto fail_attach; + } + + sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); + + if (IS_ERR(sgt)) { + dev_err(hy_drv_priv->dev, "cannot map attachment\n"); + ret = PTR_ERR(sgt); + goto fail_map_attachment; + } + + exported = kcalloc(1, sizeof(*exported), GFP_KERNEL); + + if (!exported) { + ret = -ENOMEM; + goto fail_sgt_info_creation; + } + + /* possible truncation */ + if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA) + exported->sz_priv = MAX_SIZE_PRIV_DATA; + else + exported->sz_priv = export_remote_attr->sz_priv; + + /* creating buffer for private data of buffer */ + if (exported->sz_priv != 0) { + exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL); + + if (!exported->priv) { + ret = -ENOMEM; + goto fail_priv_creation; + } + } else { + dev_err(hy_drv_priv->dev, "size is 0\n"); + } + + exported->hid = hyper_dmabuf_get_hid(); + + /* no more exported dmabuf allowed */ + if (exported->hid.id == -1) { + dev_err(hy_drv_priv->dev, + "exceeds allowed number of dmabuf to be exported\n"); + ret = -ENOMEM; + goto fail_sgt_info_creation; + } + + exported->rdomid = export_remote_attr->remote_domain; + exported->dma_buf = dma_buf; + exported->valid = true; + + exported->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL); + if (!exported->active_sgts) { + ret = -ENOMEM; + goto fail_map_active_sgts; + } + + exported->active_attached = kmalloc(sizeof(struct attachment_list), + GFP_KERNEL); + if (!exported->active_attached) { + ret = -ENOMEM; + goto fail_map_active_attached; + } + + exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), + GFP_KERNEL); + if (!exported->va_kmapped) { + ret = -ENOMEM; + goto fail_map_va_kmapped; + } + + exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), + GFP_KERNEL); + if (!exported->va_vmapped) { + ret = -ENOMEM; + goto fail_map_va_vmapped; + } + + exported->active_sgts->sgt = sgt; + exported->active_attached->attach = attachment; + exported->va_kmapped->vaddr = NULL; + exported->va_vmapped->vaddr = NULL; + + /* initialize list of sgt, attachment and vaddr for dmabuf sync + * via shadow dma-buf + */ + INIT_LIST_HEAD(&exported->active_sgts->list); + INIT_LIST_HEAD(&exported->active_attached->list); + INIT_LIST_HEAD(&exported->va_kmapped->list); + INIT_LIST_HEAD(&exported->va_vmapped->list); + + /* copy private data to sgt_info */ + ret = copy_from_user(exported->priv, export_remote_attr->priv, + exported->sz_priv); + + if (ret) { + dev_err(hy_drv_priv->dev, + "failed to load private data\n"); + ret = -EINVAL; + goto fail_export; + } + + pg_info = hyper_dmabuf_ext_pgs(sgt); + if (!pg_info) { + dev_err(hy_drv_priv->dev, + "failed to construct pg_info\n"); + ret = -ENOMEM; + goto fail_export; + } + + exported->nents = pg_info->nents; + + /* now register it to export list */ + hyper_dmabuf_register_exported(exported); + + export_remote_attr->hid = exported->hid; + + ret = send_export_msg(exported, pg_info); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "failed to send out the export request\n"); + goto fail_send_request; + } + + /* free pg_info */ + kfree(pg_info->pgs); + kfree(pg_info); + + exported->filp = filp; + + return ret; + +/* Clean-up if error occurs */ + +fail_send_request: + hyper_dmabuf_remove_exported(exported->hid); + + /* free pg_info */ + kfree(pg_info->pgs); + kfree(pg_info); + +fail_export: + kfree(exported->va_vmapped); + +fail_map_va_vmapped: + kfree(exported->va_kmapped); + +fail_map_va_kmapped: + kfree(exported->active_attached); + +fail_map_active_attached: + kfree(exported->active_sgts); + kfree(exported->priv); + +fail_priv_creation: + kfree(exported); + +fail_map_active_sgts: +fail_sgt_info_creation: + dma_buf_unmap_attachment(attachment, sgt, + DMA_BIDIRECTIONAL); + +fail_map_attachment: + dma_buf_detach(dma_buf, attachment); + +fail_attach: + dma_buf_put(dma_buf); + + return ret; +} + +static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_export_fd *export_fd_attr = + (struct ioctl_hyper_dmabuf_export_fd *)data; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct imported_sgt_info *imported; + struct hyper_dmabuf_req *req; + struct page **data_pgs; + int op[4]; + int i; + int ret = 0; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + /* look for dmabuf for the id */ + imported = hyper_dmabuf_find_imported(export_fd_attr->hid); + + /* can't find sgt from the table */ + if (!imported) { + dev_err(hy_drv_priv->dev, "can't find the entry\n"); + return -ENOENT; + } + + mutex_lock(&hy_drv_priv->lock); + + imported->importers++; + + /* send notification for export_fd to exporter */ + op[0] = imported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = imported->hid.rng_key[i]; + + dev_dbg(hy_drv_priv->dev, "Export FD of buffer {id:%d key:%d %d %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) { + mutex_unlock(&hy_drv_priv->lock); + return -ENOMEM; + } + + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]); + + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true); + + if (ret < 0) { + /* in case of timeout other end eventually will receive request, + * so we need to undo it + */ + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, + &op[0]); + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, false); + kfree(req); + dev_err(hy_drv_priv->dev, + "Failed to create sgt or notify exporter\n"); + imported->importers--; + mutex_unlock(&hy_drv_priv->lock); + return ret; + } + + kfree(req); + + if (ret == HYPER_DMABUF_REQ_ERROR) { + dev_err(hy_drv_priv->dev, + "Buffer invalid {id:%d key:%d %d %d}, cannot import\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + imported->importers--; + mutex_unlock(&hy_drv_priv->lock); + return -EINVAL; + } + + ret = 0; + + dev_dbg(hy_drv_priv->dev, + "Found buffer gref %d off %d\n", + imported->ref_handle, imported->frst_ofst); + + dev_dbg(hy_drv_priv->dev, + "last len %d nents %d domain %d\n", + imported->last_len, imported->nents, + HYPER_DMABUF_DOM_ID(imported->hid)); + + if (!imported->sgt) { + dev_dbg(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} pages not mapped yet\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + data_pgs = bknd_ops->map_shared_pages(imported->ref_handle, + HYPER_DMABUF_DOM_ID(imported->hid), + imported->nents, + &imported->refs_info); + + if (!data_pgs) { + dev_err(hy_drv_priv->dev, + "can't map pages hid {id:%d key:%d %d %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], + imported->hid.rng_key[2]); + + imported->importers--; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) { + mutex_unlock(&hy_drv_priv->lock); + return -ENOMEM; + } + + hyper_dmabuf_create_req(req, + HYPER_DMABUF_EXPORT_FD_FAILED, + &op[0]); + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, + false); + kfree(req); + mutex_unlock(&hy_drv_priv->lock); + return -EINVAL; + } + + imported->sgt = hyper_dmabuf_create_sgt(data_pgs, + imported->frst_ofst, + imported->last_len, + imported->nents); + + } + + export_fd_attr->fd = hyper_dmabuf_export_fd(imported, + export_fd_attr->flags); + + if (export_fd_attr->fd < 0) { + /* fail to get fd */ + ret = export_fd_attr->fd; + } + + mutex_unlock(&hy_drv_priv->lock); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return ret; +} + +/* unexport dmabuf from the database and send int req to the source domain + * to unmap it. + */ +static void delayed_unexport(struct work_struct *work) +{ + struct hyper_dmabuf_req *req; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct exported_sgt_info *exported = + container_of(work, struct exported_sgt_info, unexport.work); + int op[4]; + int i, ret; + + if (!exported) + return; + + dev_dbg(hy_drv_priv->dev, + "Marking buffer {id:%d key:%d %d %d} as invalid\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + /* no longer valid */ + exported->valid = false; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return; + + op[0] = exported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = exported->hid.rng_key[i]; + + hyper_dmabuf_create_req(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &op[0]); + + /* Now send unexport request to remote domain, marking + * that buffer should not be used anymore + */ + ret = bknd_ops->send_req(exported->rdomid, req, true); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "unexport message for buffer {id:%d key:%d %d %d} failed\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + } + + kfree(req); + exported->unexport_sched = false; + + /* Immediately clean-up if it has never been exported by importer + * (so no SGT is constructed on importer). + * clean it up later in remote sync when final release ops + * is called (importer does this only when there's no + * no consumer of locally exported FDs) + */ + if (exported->active == 0) { + dev_dbg(hy_drv_priv->dev, + "claning up buffer {id:%d key:%d %d %d} completly\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + hyper_dmabuf_cleanup_sgt_info(exported, false); + hyper_dmabuf_remove_exported(exported->hid); + + /* register hyper_dmabuf_id to the list for reuse */ + hyper_dmabuf_store_hid(exported->hid); + + if (exported->sz_priv > 0 && !exported->priv) + kfree(exported->priv); + + kfree(exported); + } +} + +/* Schedule unexport of dmabuf. + */ +int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_unexport *unexport_attr = + (struct ioctl_hyper_dmabuf_unexport *)data; + struct exported_sgt_info *exported; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + /* find dmabuf in export list */ + exported = hyper_dmabuf_find_exported(unexport_attr->hid); + + dev_dbg(hy_drv_priv->dev, + "scheduling unexport of buffer {id:%d key:%d %d %d}\n", + unexport_attr->hid.id, unexport_attr->hid.rng_key[0], + unexport_attr->hid.rng_key[1], unexport_attr->hid.rng_key[2]); + + /* failed to find corresponding entry in export list */ + if (exported == NULL) { + unexport_attr->status = -ENOENT; + return -ENOENT; + } + + if (exported->unexport_sched) + return 0; + + exported->unexport_sched = true; + INIT_DELAYED_WORK(&exported->unexport, delayed_unexport); + schedule_delayed_work(&exported->unexport, + msecs_to_jiffies(unexport_attr->delay_ms)); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} + +static int hyper_dmabuf_query_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_query *query_attr = + (struct ioctl_hyper_dmabuf_query *)data; + struct exported_sgt_info *exported = NULL; + struct imported_sgt_info *imported = NULL; + int ret = 0; + + if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hy_drv_priv->domid) { + /* query for exported dmabuf */ + exported = hyper_dmabuf_find_exported(query_attr->hid); + if (exported) { + ret = hyper_dmabuf_query_exported(exported, + query_attr->item, + &query_attr->info); + } else { + dev_err(hy_drv_priv->dev, + "hid {id:%d key:%d %d %d} not in exp list\n", + query_attr->hid.id, + query_attr->hid.rng_key[0], + query_attr->hid.rng_key[1], + query_attr->hid.rng_key[2]); + return -ENOENT; + } + } else { + /* query for imported dmabuf */ + imported = hyper_dmabuf_find_imported(query_attr->hid); + if (imported) { + ret = hyper_dmabuf_query_imported(imported, + query_attr->item, + &query_attr->info); + } else { + dev_err(hy_drv_priv->dev, + "hid {id:%d key:%d %d %d} not in imp list\n", + query_attr->hid.id, + query_attr->hid.rng_key[0], + query_attr->hid.rng_key[1], + query_attr->hid.rng_key[2]); + return -ENOENT; + } + } + + return ret; +} + +const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = { + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP, + hyper_dmabuf_tx_ch_setup_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP, + hyper_dmabuf_rx_ch_setup_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE, + hyper_dmabuf_export_remote_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD, + hyper_dmabuf_export_fd_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT, + hyper_dmabuf_unexport_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY, + hyper_dmabuf_query_ioctl, 0), +}; + +long hyper_dmabuf_ioctl(struct file *filp, + unsigned int cmd, unsigned long param) +{ + const struct hyper_dmabuf_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + int ret; + hyper_dmabuf_ioctl_t func; + char *kdata; + + if (nr > ARRAY_SIZE(hyper_dmabuf_ioctls)) { + dev_err(hy_drv_priv->dev, "invalid ioctl\n"); + return -EINVAL; + } + + ioctl = &hyper_dmabuf_ioctls[nr]; + + func = ioctl->func; + + if (unlikely(!func)) { + dev_err(hy_drv_priv->dev, "no function\n"); + return -EINVAL; + } + + kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); + if (!kdata) + return -ENOMEM; + + if (copy_from_user(kdata, (void __user *)param, + _IOC_SIZE(cmd)) != 0) { + dev_err(hy_drv_priv->dev, + "failed to copy from user arguments\n"); + ret = -EFAULT; + goto ioctl_error; + } + + ret = func(filp, kdata); + + if (copy_to_user((void __user *)param, kdata, + _IOC_SIZE(cmd)) != 0) { + dev_err(hy_drv_priv->dev, + "failed to copy to user arguments\n"); + ret = -EFAULT; + goto ioctl_error; + } + +ioctl_error: + kfree(kdata); + + return ret; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h new file mode 100644 index 000000000000..5991a87b194f --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h @@ -0,0 +1,50 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_IOCTL_H__ +#define __HYPER_DMABUF_IOCTL_H__ + +typedef int (*hyper_dmabuf_ioctl_t)(struct file *filp, void *data); + +struct hyper_dmabuf_ioctl_desc { + unsigned int cmd; + int flags; + hyper_dmabuf_ioctl_t func; + const char *name; +}; + +#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags) \ + [_IOC_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +long hyper_dmabuf_ioctl(struct file *filp, + unsigned int cmd, unsigned long param); + +int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data); + +#endif //__HYPER_DMABUF_IOCTL_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c new file mode 100644 index 000000000000..bba6d1d607a8 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c @@ -0,0 +1,293 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_event.h" + +DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED); +DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED); + +#ifdef CONFIG_HYPER_DMABUF_SYSFS +static ssize_t hyper_dmabuf_imported_show(struct device *drv, + struct device_attribute *attr, + char *buf) +{ + struct list_entry_imported *info_entry; + int bkt; + ssize_t count = 0; + size_t total = 0; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) { + hyper_dmabuf_id_t hid = info_entry->imported->hid; + int nents = info_entry->imported->nents; + bool valid = info_entry->imported->valid; + int num_importers = info_entry->imported->importers; + + total += nents; + count += scnprintf(buf + count, PAGE_SIZE - count, + "hid:{%d %d %d %d}, nent:%d, v:%c, numi:%d\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2], nents, (valid ? 't' : 'f'), + num_importers); + } + count += scnprintf(buf + count, PAGE_SIZE - count, + "total nents: %lu\n", total); + + return count; +} + +static ssize_t hyper_dmabuf_exported_show(struct device *drv, + struct device_attribute *attr, + char *buf) +{ + struct list_entry_exported *info_entry; + int bkt; + ssize_t count = 0; + size_t total = 0; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) { + hyper_dmabuf_id_t hid = info_entry->exported->hid; + int nents = info_entry->exported->nents; + bool valid = info_entry->exported->valid; + int importer_exported = info_entry->exported->active; + + total += nents; + count += scnprintf(buf + count, PAGE_SIZE - count, + "hid:{%d %d %d %d}, nent:%d, v:%c, ie:%d\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2], nents, (valid ? 't' : 'f'), + importer_exported); + } + count += scnprintf(buf + count, PAGE_SIZE - count, + "total nents: %lu\n", total); + + return count; +} + +static DEVICE_ATTR(imported, 0400, hyper_dmabuf_imported_show, NULL); +static DEVICE_ATTR(exported, 0400, hyper_dmabuf_exported_show, NULL); + +int hyper_dmabuf_register_sysfs(struct device *dev) +{ + int err; + + err = device_create_file(dev, &dev_attr_imported); + if (err < 0) + goto err1; + err = device_create_file(dev, &dev_attr_exported); + if (err < 0) + goto err2; + + return 0; +err2: + device_remove_file(dev, &dev_attr_imported); +err1: + return -1; +} + +int hyper_dmabuf_unregister_sysfs(struct device *dev) +{ + device_remove_file(dev, &dev_attr_imported); + device_remove_file(dev, &dev_attr_exported); + return 0; +} + +#endif + +int hyper_dmabuf_table_init(void) +{ + hash_init(hyper_dmabuf_hash_imported); + hash_init(hyper_dmabuf_hash_exported); + return 0; +} + +int hyper_dmabuf_table_destroy(void) +{ + /* TODO: cleanup hyper_dmabuf_hash_imported + * and hyper_dmabuf_hash_exported + */ + return 0; +} + +int hyper_dmabuf_register_exported(struct exported_sgt_info *exported) +{ + struct list_entry_exported *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->exported = exported; + + hash_add(hyper_dmabuf_hash_exported, &info_entry->node, + info_entry->exported->hid.id); + + return 0; +} + +int hyper_dmabuf_register_imported(struct imported_sgt_info *imported) +{ + struct list_entry_imported *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->imported = imported; + + hash_add(hyper_dmabuf_hash_imported, &info_entry->node, + info_entry->imported->hid.id); + + return 0; +} + +struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid) +{ + struct list_entry_exported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->exported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid, + hid)) + return info_entry->exported; + + /* if key is unmatched, given HID is invalid, + * so returning NULL + */ + break; + } + + return NULL; +} + +/* search for pre-exported sgt and return id of it if it exist */ +hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, + int domid) +{ + struct list_entry_exported *info_entry; + hyper_dmabuf_id_t hid = {-1, {0, 0, 0} }; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + if (info_entry->exported->dma_buf == dmabuf && + info_entry->exported->rdomid == domid) + return info_entry->exported->hid; + + return hid; +} + +struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid) +{ + struct list_entry_imported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->imported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid, + hid)) + return info_entry->imported; + /* if key is unmatched, given HID is invalid, + * so returning NULL + */ + break; + } + + return NULL; +} + +int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid) +{ + struct list_entry_exported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->exported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid, + hid)) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + break; + } + + return -ENOENT; +} + +int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid) +{ + struct list_entry_imported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->imported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid, + hid)) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + break; + } + + return -ENOENT; +} + +void hyper_dmabuf_foreach_exported( + void (*func)(struct exported_sgt_info *, void *attr), + void *attr) +{ + struct list_entry_exported *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(hyper_dmabuf_hash_exported, bkt, tmp, + info_entry, node) { + func(info_entry->exported, attr); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h new file mode 100644 index 000000000000..f7102f5db75d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h @@ -0,0 +1,71 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_LIST_H__ +#define __HYPER_DMABUF_LIST_H__ + +#include "hyper_dmabuf_struct.h" + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_EXPORTED 7 +/* number of bits to be used for imported dmabufs hash table */ +#define MAX_ENTRY_IMPORTED 7 + +struct list_entry_exported { + struct exported_sgt_info *exported; + struct hlist_node node; +}; + +struct list_entry_imported { + struct imported_sgt_info *imported; + struct hlist_node node; +}; + +int hyper_dmabuf_table_init(void); + +int hyper_dmabuf_table_destroy(void); + +int hyper_dmabuf_register_exported(struct exported_sgt_info *info); + +/* search for pre-exported sgt and return id of it if it exist */ +hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, + int domid); + +int hyper_dmabuf_register_imported(struct imported_sgt_info *info); + +struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid); + +struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid); + +int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid); + +int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid); + +void hyper_dmabuf_foreach_exported(void (*func)(struct exported_sgt_info *, + void *attr), void *attr); + +int hyper_dmabuf_register_sysfs(struct device *dev); +int hyper_dmabuf_unregister_sysfs(struct device *dev); + +#endif /* __HYPER_DMABUF_LIST_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c new file mode 100644 index 000000000000..37ee894ec418 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c @@ -0,0 +1,413 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_remote_sync.h" +#include "hyper_dmabuf_event.h" +#include "hyper_dmabuf_list.h" + +struct cmd_process { + struct work_struct work; + struct hyper_dmabuf_req *rq; + int domid; +}; + +void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req, + enum hyper_dmabuf_command cmd, int *op) +{ + int i; + + req->stat = HYPER_DMABUF_REQ_NOT_RESPONDED; + req->cmd = cmd; + + switch (cmd) { + /* as exporter, commands to importer */ + case HYPER_DMABUF_EXPORT: + /* exporting pages for dmabuf */ + /* command : HYPER_DMABUF_EXPORT, + * op0~op3 : hyper_dmabuf_id + * op4 : number of pages to be shared + * op5 : offset of data in the first page + * op6 : length of data in the last page + * op7 : top-level reference number for shared pages + * op8 : size of private data (from op9) + * op9 ~ : Driver-specific private data + * (e.g. graphic buffer's meta info) + */ + + memcpy(&req->op[0], &op[0], 9 * sizeof(int) + op[8]); + break; + + case HYPER_DMABUF_NOTIFY_UNEXPORT: + /* destroy sg_list for hyper_dmabuf_id on remote side */ + /* command : DMABUF_DESTROY, + * op0~op3 : hyper_dmabuf_id_t hid + */ + + for (i = 0; i < 4; i++) + req->op[i] = op[i]; + break; + + case HYPER_DMABUF_EXPORT_FD: + case HYPER_DMABUF_EXPORT_FD_FAILED: + /* dmabuf fd is being created on imported side or importing + * failed + * + * command : HYPER_DMABUF_EXPORT_FD or + * HYPER_DMABUF_EXPORT_FD_FAILED, + * op0~op3 : hyper_dmabuf_id + */ + + for (i = 0; i < 4; i++) + req->op[i] = op[i]; + break; + + case HYPER_DMABUF_OPS_TO_REMOTE: + /* notifying dmabuf map/unmap to importer (probably not needed) + * for dmabuf synchronization + */ + break; + + case HYPER_DMABUF_OPS_TO_SOURCE: + /* notifying dmabuf map/unmap to exporter, map will make + * the driver to do shadow mapping or unmapping for + * synchronization with original exporter (e.g. i915) + * + * command : DMABUF_OPS_TO_SOURCE. + * op0~3 : hyper_dmabuf_id + * op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4) + */ + for (i = 0; i < 5; i++) + req->op[i] = op[i]; + break; + + default: + /* no command found */ + return; + } +} + +static void cmd_process_work(struct work_struct *work) +{ + struct imported_sgt_info *imported; + struct cmd_process *proc = container_of(work, + struct cmd_process, work); + struct hyper_dmabuf_req *req; + hyper_dmabuf_id_t hid; + int domid; + int i; + + req = proc->rq; + domid = proc->domid; + + switch (req->cmd) { + case HYPER_DMABUF_EXPORT: + /* exporting pages for dmabuf */ + /* command : HYPER_DMABUF_EXPORT, + * op0~op3 : hyper_dmabuf_id + * op4 : number of pages to be shared + * op5 : offset of data in the first page + * op6 : length of data in the last page + * op7 : top-level reference number for shared pages + * op8 : size of private data (from op9) + * op9 ~ : Driver-specific private data + * (e.g. graphic buffer's meta info) + */ + + /* if nents == 0, it means it is a message only for + * priv synchronization. for existing imported_sgt_info + * so not creating a new one + */ + if (req->op[4] == 0) { + hyper_dmabuf_id_t exist = {req->op[0], + {req->op[1], req->op[2], + req->op[3] } }; + + imported = hyper_dmabuf_find_imported(exist); + + if (!imported) { + dev_err(hy_drv_priv->dev, + "Can't find imported sgt_info\n"); + break; + } + + /* if size of new private data is different, + * we reallocate it. + */ + if (imported->sz_priv != req->op[8]) { + kfree(imported->priv); + imported->sz_priv = req->op[8]; + imported->priv = kcalloc(1, req->op[8], + GFP_KERNEL); + if (!imported->priv) { + /* set it invalid */ + imported->valid = 0; + break; + } + } + + /* updating priv data */ + memcpy(imported->priv, &req->op[9], req->op[8]); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* generating import event */ + hyper_dmabuf_import_event(imported->hid); +#endif + + break; + } + + imported = kcalloc(1, sizeof(*imported), GFP_KERNEL); + + if (!imported) + break; + + imported->sz_priv = req->op[8]; + imported->priv = kcalloc(1, req->op[8], GFP_KERNEL); + + if (!imported->priv) { + kfree(imported); + break; + } + + imported->hid.id = req->op[0]; + + for (i = 0; i < 3; i++) + imported->hid.rng_key[i] = req->op[i+1]; + + imported->nents = req->op[4]; + imported->frst_ofst = req->op[5]; + imported->last_len = req->op[6]; + imported->ref_handle = req->op[7]; + + dev_dbg(hy_drv_priv->dev, "DMABUF was exported\n"); + dev_dbg(hy_drv_priv->dev, "\thid{id:%d key:%d %d %d}\n", + req->op[0], req->op[1], req->op[2], + req->op[3]); + dev_dbg(hy_drv_priv->dev, "\tnents %d\n", req->op[4]); + dev_dbg(hy_drv_priv->dev, "\tfirst offset %d\n", req->op[5]); + dev_dbg(hy_drv_priv->dev, "\tlast len %d\n", req->op[6]); + dev_dbg(hy_drv_priv->dev, "\tgrefid %d\n", req->op[7]); + + memcpy(imported->priv, &req->op[9], req->op[8]); + + imported->valid = true; + hyper_dmabuf_register_imported(imported); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* generating import event */ + hyper_dmabuf_import_event(imported->hid); +#endif + + break; + + case HYPER_DMABUF_OPS_TO_SOURCE: + /* notifying dmabuf map/unmap to exporter, map will + * make the driver to do shadow mapping + * or unmapping for synchronization with original + * exporter (e.g. i915) + * + * command : DMABUF_OPS_TO_SOURCE. + * op0~3 : hyper_dmabuf_id + * op1 : enum hyper_dmabuf_ops {....} + */ + dev_dbg(hy_drv_priv->dev, + "%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__); + + hid.id = req->op[0]; + hid.rng_key[0] = req->op[1]; + hid.rng_key[1] = req->op[2]; + hid.rng_key[2] = req->op[3]; + hyper_dmabuf_remote_sync(hid, req->op[4]); + + break; + + + case HYPER_DMABUF_OPS_TO_REMOTE: + /* notifying dmabuf map/unmap to importer + * (probably not needed) for dmabuf synchronization + */ + break; + + default: + /* shouldn't get here */ + break; + } + + kfree(req); + kfree(proc); +} + +int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req) +{ + struct cmd_process *proc; + struct hyper_dmabuf_req *temp_req; + struct imported_sgt_info *imported; + struct exported_sgt_info *exported; + hyper_dmabuf_id_t hid; + + if (!req) { + dev_err(hy_drv_priv->dev, "request is NULL\n"); + return -EINVAL; + } + + hid.id = req->op[0]; + hid.rng_key[0] = req->op[1]; + hid.rng_key[1] = req->op[2]; + hid.rng_key[2] = req->op[3]; + + if ((req->cmd < HYPER_DMABUF_EXPORT) || + (req->cmd > HYPER_DMABUF_OPS_TO_SOURCE)) { + dev_err(hy_drv_priv->dev, "invalid command\n"); + return -EINVAL; + } + + req->stat = HYPER_DMABUF_REQ_PROCESSED; + + /* HYPER_DMABUF_DESTROY requires immediate + * follow up so can't be processed in workqueue + */ + if (req->cmd == HYPER_DMABUF_NOTIFY_UNEXPORT) { + /* destroy sg_list for hyper_dmabuf_id on remote side */ + /* command : HYPER_DMABUF_NOTIFY_UNEXPORT, + * op0~3 : hyper_dmabuf_id + */ + dev_dbg(hy_drv_priv->dev, + "processing HYPER_DMABUF_NOTIFY_UNEXPORT\n"); + + imported = hyper_dmabuf_find_imported(hid); + + if (imported) { + /* if anything is still using dma_buf */ + if (imported->importers) { + /* Buffer is still in use, just mark that + * it should not be allowed to export its fd + * anymore. + */ + imported->valid = false; + } else { + /* No one is using buffer, remove it from + * imported list + */ + hyper_dmabuf_remove_imported(hid); + kfree(imported->priv); + kfree(imported); + } + } else { + req->stat = HYPER_DMABUF_REQ_ERROR; + } + + return req->cmd; + } + + /* synchronous dma_buf_fd export */ + if (req->cmd == HYPER_DMABUF_EXPORT_FD) { + /* find a corresponding SGT for the id */ + dev_dbg(hy_drv_priv->dev, + "HYPER_DMABUF_EXPORT_FD for {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]); + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} not found\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else if (!exported->valid) { + dev_dbg(hy_drv_priv->dev, + "Buffer no longer valid {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else { + dev_dbg(hy_drv_priv->dev, + "Buffer still valid {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + exported->active++; + req->stat = HYPER_DMABUF_REQ_PROCESSED; + } + return req->cmd; + } + + if (req->cmd == HYPER_DMABUF_EXPORT_FD_FAILED) { + dev_dbg(hy_drv_priv->dev, + "HYPER_DMABUF_EXPORT_FD_FAILED for {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]); + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} not found\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else { + exported->active--; + req->stat = HYPER_DMABUF_REQ_PROCESSED; + } + return req->cmd; + } + + dev_dbg(hy_drv_priv->dev, + "%s: putting request to workqueue\n", __func__); + temp_req = kmalloc(sizeof(*temp_req), GFP_ATOMIC); + + if (!temp_req) + return -ENOMEM; + + memcpy(temp_req, req, sizeof(*temp_req)); + + proc = kcalloc(1, sizeof(struct cmd_process), GFP_ATOMIC); + + if (!proc) { + kfree(temp_req); + return -ENOMEM; + } + + proc->rq = temp_req; + proc->domid = domid; + + INIT_WORK(&(proc->work), cmd_process_work); + + queue_work(hy_drv_priv->work_queue, &(proc->work)); + + return req->cmd; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h new file mode 100644 index 000000000000..9c8a76bf261e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h @@ -0,0 +1,87 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_MSG_H__ +#define __HYPER_DMABUF_MSG_H__ + +#define MAX_NUMBER_OF_OPERANDS 64 + +struct hyper_dmabuf_req { + unsigned int req_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +struct hyper_dmabuf_resp { + unsigned int resp_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +enum hyper_dmabuf_command { + HYPER_DMABUF_EXPORT = 0x10, + HYPER_DMABUF_EXPORT_FD, + HYPER_DMABUF_EXPORT_FD_FAILED, + HYPER_DMABUF_NOTIFY_UNEXPORT, + HYPER_DMABUF_OPS_TO_REMOTE, + HYPER_DMABUF_OPS_TO_SOURCE, +}; + +enum hyper_dmabuf_ops { + HYPER_DMABUF_OPS_ATTACH = 0x1000, + HYPER_DMABUF_OPS_DETACH, + HYPER_DMABUF_OPS_MAP, + HYPER_DMABUF_OPS_UNMAP, + HYPER_DMABUF_OPS_RELEASE, + HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS, + HYPER_DMABUF_OPS_END_CPU_ACCESS, + HYPER_DMABUF_OPS_KMAP_ATOMIC, + HYPER_DMABUF_OPS_KUNMAP_ATOMIC, + HYPER_DMABUF_OPS_KMAP, + HYPER_DMABUF_OPS_KUNMAP, + HYPER_DMABUF_OPS_MMAP, + HYPER_DMABUF_OPS_VMAP, + HYPER_DMABUF_OPS_VUNMAP, +}; + +enum hyper_dmabuf_req_feedback { + HYPER_DMABUF_REQ_PROCESSED = 0x100, + HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP, + HYPER_DMABUF_REQ_ERROR, + HYPER_DMABUF_REQ_NOT_RESPONDED +}; + +/* create a request packet with given command and operands */ +void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req, + enum hyper_dmabuf_command command, + int *operands); + +/* parse incoming request packet (or response) and take + * appropriate actions for those + */ +int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req); + +#endif // __HYPER_DMABUF_MSG_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c new file mode 100644 index 000000000000..915743741897 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c @@ -0,0 +1,414 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_ops.h" +#include "hyper_dmabuf_sgl_proc.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_list.h" + +#define WAIT_AFTER_SYNC_REQ 0 +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +static int dmabuf_refcount(struct dma_buf *dma_buf) +{ + if ((dma_buf != NULL) && (dma_buf->file != NULL)) + return file_count(dma_buf->file); + + return -EINVAL; +} + +static int sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops) +{ + struct hyper_dmabuf_req *req; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int op[5]; + int i; + int ret; + + op[0] = hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = hid.rng_key[i]; + + op[4] = dmabuf_ops; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return -ENOMEM; + + hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]); + + /* send request and wait for a response */ + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, + WAIT_AFTER_SYNC_REQ); + + if (ret < 0) { + dev_dbg(hy_drv_priv->dev, + "dmabuf sync request failed:%d\n", req->op[4]); + } + + kfree(req); + + return ret; +} + +static int hyper_dmabuf_ops_attach(struct dma_buf *dmabuf, + struct device *dev, + struct dma_buf_attachment *attach) +{ + struct imported_sgt_info *imported; + int ret; + + if (!attach->dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)attach->dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_ATTACH); + + return ret; +} + +static void hyper_dmabuf_ops_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct imported_sgt_info *imported; + int ret; + + if (!attach->dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)attach->dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_DETACH); +} + +static struct sg_table *hyper_dmabuf_ops_map( + struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct sg_table *st; + struct imported_sgt_info *imported; + struct pages_info *pg_info; + int ret; + + if (!attachment->dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)attachment->dmabuf->priv; + + /* extract pages from sgt */ + pg_info = hyper_dmabuf_ext_pgs(imported->sgt); + + if (!pg_info) + return NULL; + + /* create a new sg_table with extracted pages */ + st = hyper_dmabuf_create_sgt(pg_info->pgs, pg_info->frst_ofst, + pg_info->last_len, pg_info->nents); + if (!st) + goto err_free_sg; + + if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) + goto err_free_sg; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MAP); + + kfree(pg_info->pgs); + kfree(pg_info); + + return st; + +err_free_sg: + if (st) { + sg_free_table(st); + kfree(st); + } + + kfree(pg_info->pgs); + kfree(pg_info); + + return NULL; +} + +static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment, + struct sg_table *sg, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + int ret; + + if (!attachment->dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)attachment->dmabuf->priv; + + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + + sg_free_table(sg); + kfree(sg); + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_UNMAP); +} + +static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf) +{ + struct imported_sgt_info *imported; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret; + int finish; + + if (!dma_buf->priv) + return; + + imported = (struct imported_sgt_info *)dma_buf->priv; + + if (!dmabuf_refcount(imported->dma_buf)) + imported->dma_buf = NULL; + + imported->importers--; + + if (imported->importers == 0) { + bknd_ops->unmap_shared_pages(&imported->refs_info, + imported->nents); + + if (imported->sgt) { + sg_free_table(imported->sgt); + kfree(imported->sgt); + imported->sgt = NULL; + } + } + + finish = imported && !imported->valid && + !imported->importers; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_RELEASE); + + /* + * Check if buffer is still valid and if not remove it + * from imported list. That has to be done after sending + * sync request + */ + if (finish) { + hyper_dmabuf_remove_imported(imported->hid); + kfree(imported->priv); + kfree(imported); + } +} + +static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS); + + return ret; +} + +static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_END_CPU_ACCESS); + + return 0; +} + +static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, + unsigned long pgnum) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP_ATOMIC); + + /* TODO: NULL for now. Need to return the addr of mapped region */ + return NULL; +} + +static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, + unsigned long pgnum, void *vaddr) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP_ATOMIC); +} + +static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP); + + /* for now NULL.. need to return the address of mapped region */ + return NULL; +} + +static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum, + void *vaddr) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP); +} + +static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MMAP); + + return ret; +} + +static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_VMAP); + + return NULL; +} + +static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_VUNMAP); +} + +static const struct dma_buf_ops hyper_dmabuf_ops = { + .attach = hyper_dmabuf_ops_attach, + .detach = hyper_dmabuf_ops_detach, + .map_dma_buf = hyper_dmabuf_ops_map, + .unmap_dma_buf = hyper_dmabuf_ops_unmap, + .release = hyper_dmabuf_ops_release, + .begin_cpu_access = (void *)hyper_dmabuf_ops_begin_cpu_access, + .end_cpu_access = (void *)hyper_dmabuf_ops_end_cpu_access, + .map_atomic = hyper_dmabuf_ops_kmap_atomic, + .unmap_atomic = hyper_dmabuf_ops_kunmap_atomic, + .map = hyper_dmabuf_ops_kmap, + .unmap = hyper_dmabuf_ops_kunmap, + .mmap = hyper_dmabuf_ops_mmap, + .vmap = hyper_dmabuf_ops_vmap, + .vunmap = hyper_dmabuf_ops_vunmap, +}; + +/* exporting dmabuf as fd */ +int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags) +{ + int fd = -1; + + /* call hyper_dmabuf_export_dmabuf and create + * and bind a handle for it then release + */ + hyper_dmabuf_export_dma_buf(imported); + + if (imported->dma_buf) + fd = dma_buf_fd(imported->dma_buf, flags); + + return fd; +} + +void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported) +{ + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &hyper_dmabuf_ops; + + /* multiple of PAGE_SIZE, not considering offset */ + exp_info.size = imported->sgt->nents * PAGE_SIZE; + exp_info.flags = /* not sure about flag */ 0; + exp_info.priv = imported; + + imported->dma_buf = dma_buf_export(&exp_info); +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h new file mode 100644 index 000000000000..c5505a41f0fe --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h @@ -0,0 +1,32 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_OPS_H__ +#define __HYPER_DMABUF_OPS_H__ + +int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags); + +void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported); + +#endif /* __HYPER_DMABUF_IMP_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c new file mode 100644 index 000000000000..1f2f56b1162d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c @@ -0,0 +1,172 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_id.h" + +#define HYPER_DMABUF_SIZE(nents, first_offset, last_len) \ + ((nents)*PAGE_SIZE - (first_offset) - PAGE_SIZE + (last_len)) + +int hyper_dmabuf_query_exported(struct exported_sgt_info *exported, + int query, unsigned long *info) +{ + switch (query) { + case HYPER_DMABUF_QUERY_TYPE: + *info = EXPORTED; + break; + + /* exporting domain of this specific dmabuf*/ + case HYPER_DMABUF_QUERY_EXPORTER: + *info = HYPER_DMABUF_DOM_ID(exported->hid); + break; + + /* importing domain of this specific dmabuf */ + case HYPER_DMABUF_QUERY_IMPORTER: + *info = exported->rdomid; + break; + + /* size of dmabuf in byte */ + case HYPER_DMABUF_QUERY_SIZE: + *info = exported->dma_buf->size; + break; + + /* whether the buffer is used by importer */ + case HYPER_DMABUF_QUERY_BUSY: + *info = (exported->active > 0); + break; + + /* whether the buffer is unexported */ + case HYPER_DMABUF_QUERY_UNEXPORTED: + *info = !exported->valid; + break; + + /* whether the buffer is scheduled to be unexported */ + case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED: + *info = !exported->unexport_sched; + break; + + /* size of private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE: + *info = exported->sz_priv; + break; + + /* copy private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO: + if (exported->sz_priv > 0) { + int n; + + n = copy_to_user((void __user *) *info, + exported->priv, + exported->sz_priv); + if (n != 0) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + + +int hyper_dmabuf_query_imported(struct imported_sgt_info *imported, + int query, unsigned long *info) +{ + switch (query) { + case HYPER_DMABUF_QUERY_TYPE: + *info = IMPORTED; + break; + + /* exporting domain of this specific dmabuf*/ + case HYPER_DMABUF_QUERY_EXPORTER: + *info = HYPER_DMABUF_DOM_ID(imported->hid); + break; + + /* importing domain of this specific dmabuf */ + case HYPER_DMABUF_QUERY_IMPORTER: + *info = hy_drv_priv->domid; + break; + + /* size of dmabuf in byte */ + case HYPER_DMABUF_QUERY_SIZE: + if (imported->dma_buf) { + /* if local dma_buf is created (if it's + * ever mapped), retrieve it directly + * from struct dma_buf * + */ + *info = imported->dma_buf->size; + } else { + /* calcuate it from given nents, frst_ofst + * and last_len + */ + *info = HYPER_DMABUF_SIZE(imported->nents, + imported->frst_ofst, + imported->last_len); + } + break; + + /* whether the buffer is used or not */ + case HYPER_DMABUF_QUERY_BUSY: + /* checks if it's used by importer */ + *info = (imported->importers > 0); + break; + + /* whether the buffer is unexported */ + case HYPER_DMABUF_QUERY_UNEXPORTED: + *info = !imported->valid; + break; + + /* size of private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE: + *info = imported->sz_priv; + break; + + /* copy private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO: + if (imported->sz_priv > 0) { + int n; + + n = copy_to_user((void __user *)*info, + imported->priv, + imported->sz_priv); + if (n != 0) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h new file mode 100644 index 000000000000..65ae738f8f53 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h @@ -0,0 +1,10 @@ +#ifndef __HYPER_DMABUF_QUERY_H__ +#define __HYPER_DMABUF_QUERY_H__ + +int hyper_dmabuf_query_imported(struct imported_sgt_info *imported, + int query, unsigned long *info); + +int hyper_dmabuf_query_exported(struct exported_sgt_info *exported, + int query, unsigned long *info); + +#endif // __HYPER_DMABUF_QUERY_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c new file mode 100644 index 000000000000..a82fd7b087b8 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c @@ -0,0 +1,322 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_sgl_proc.h" + +/* Whenever importer does dma operations from remote domain, + * a notification is sent to the exporter so that exporter + * issues equivalent dma operation on the original dma buf + * for indirect synchronization via shadow operations. + * + * All ptrs and references (e.g struct sg_table*, + * struct dma_buf_attachment) created via these operations on + * exporter's side are kept in stack (implemented as circular + * linked-lists) separately so that those can be re-referenced + * later when unmapping operations are invoked to free those. + * + * The very first element on the bottom of each stack holds + * is what is created when initial exporting is issued so it + * should not be modified or released by this fuction. + */ +int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops) +{ + struct exported_sgt_info *exported; + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + int ret; + + /* find a coresponding SGT for the id */ + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "dmabuf remote sync::can't find exported list\n"); + return -ENOENT; + } + + switch (ops) { + case HYPER_DMABUF_OPS_ATTACH: + attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL); + + if (!attachl) + return -ENOMEM; + + attachl->attach = dma_buf_attach(exported->dma_buf, + hy_drv_priv->dev); + + if (!attachl->attach) { + kfree(attachl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_ATTACH\n"); + return -ENOMEM; + } + + list_add(&attachl->list, &exported->active_attached->list); + break; + + case HYPER_DMABUF_OPS_DETACH: + if (list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_DETACH\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf attachment left to be detached\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + dma_buf_detach(exported->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + break; + + case HYPER_DMABUF_OPS_MAP: + if (list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_MAP\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf attachment left to be mapped\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL); + + if (!sgtl) + return -ENOMEM; + + sgtl->sgt = dma_buf_map_attachment(attachl->attach, + DMA_BIDIRECTIONAL); + if (!sgtl->sgt) { + kfree(sgtl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_MAP\n"); + return -ENOMEM; + } + list_add(&sgtl->list, &exported->active_sgts->list); + break; + + case HYPER_DMABUF_OPS_UNMAP: + if (list_empty(&exported->active_sgts->list) || + list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_UNMAP\n"); + dev_err(hy_drv_priv->dev, + "no SGT or attach left to be unmapped\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + sgtl = list_first_entry(&exported->active_sgts->list, + struct sgt_list, list); + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + break; + + case HYPER_DMABUF_OPS_RELEASE: + dev_dbg(hy_drv_priv->dev, + "id:%d key:%d %d %d} released, ref left: %d\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2], + exported->active - 1); + + exported->active--; + + /* If there are still importers just break, if no then + * continue with final cleanup + */ + if (exported->active) + break; + + /* Importer just released buffer fd, check if there is + * any other importer still using it. + * If not and buffer was unexported, clean up shared + * data and remove that buffer. + */ + dev_dbg(hy_drv_priv->dev, + "Buffer {id:%d key:%d %d %d} final released\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + if (!exported->valid && !exported->active && + !exported->unexport_sched) { + hyper_dmabuf_cleanup_sgt_info(exported, false); + hyper_dmabuf_remove_exported(hid); + kfree(exported); + /* store hyper_dmabuf_id in the list for reuse */ + hyper_dmabuf_store_hid(hid); + } + + break; + + case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS: + ret = dma_buf_begin_cpu_access(exported->dma_buf, + DMA_BIDIRECTIONAL); + if (ret) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n"); + return ret; + } + break; + + case HYPER_DMABUF_OPS_END_CPU_ACCESS: + ret = dma_buf_end_cpu_access(exported->dma_buf, + DMA_BIDIRECTIONAL); + if (ret) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_END_CPU_ACCESS\n"); + return ret; + } + break; + + case HYPER_DMABUF_OPS_KMAP_ATOMIC: + case HYPER_DMABUF_OPS_KMAP: + va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL); + if (!va_kmapl) + return -ENOMEM; + + /* dummy kmapping of 1 page */ + if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC) + va_kmapl->vaddr = dma_buf_kmap_atomic( + exported->dma_buf, 1); + else + va_kmapl->vaddr = dma_buf_kmap( + exported->dma_buf, 1); + + if (!va_kmapl->vaddr) { + kfree(va_kmapl); + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n"); + return -ENOMEM; + } + list_add(&va_kmapl->list, &exported->va_kmapped->list); + break; + + case HYPER_DMABUF_OPS_KUNMAP_ATOMIC: + case HYPER_DMABUF_OPS_KUNMAP: + if (list_empty(&exported->va_kmapped->list)) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf VA to be freed\n"); + return -EFAULT; + } + + va_kmapl = list_first_entry(&exported->va_kmapped->list, + struct kmap_vaddr_list, list); + if (!va_kmapl->vaddr) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + return PTR_ERR(va_kmapl->vaddr); + } + + /* unmapping 1 page */ + if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC) + dma_buf_kunmap_atomic(exported->dma_buf, + 1, va_kmapl->vaddr); + else + dma_buf_kunmap(exported->dma_buf, + 1, va_kmapl->vaddr); + + list_del(&va_kmapl->list); + kfree(va_kmapl); + break; + + case HYPER_DMABUF_OPS_MMAP: + /* currently not supported: looking for a way to create + * a dummy vma + */ + dev_warn(hy_drv_priv->dev, + "remote sync::sychronized mmap is not supported\n"); + break; + + case HYPER_DMABUF_OPS_VMAP: + va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL); + + if (!va_vmapl) + return -ENOMEM; + + /* dummy vmapping */ + va_vmapl->vaddr = dma_buf_vmap(exported->dma_buf); + + if (!va_vmapl->vaddr) { + kfree(va_vmapl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VMAP\n"); + return -ENOMEM; + } + list_add(&va_vmapl->list, &exported->va_vmapped->list); + break; + + case HYPER_DMABUF_OPS_VUNMAP: + if (list_empty(&exported->va_vmapped->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VUNMAP\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf VA to be freed\n"); + return -EFAULT; + } + va_vmapl = list_first_entry(&exported->va_vmapped->list, + struct vmap_vaddr_list, list); + if (!va_vmapl || va_vmapl->vaddr == NULL) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VUNMAP\n"); + return -EFAULT; + } + + dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr); + + list_del(&va_vmapl->list); + kfree(va_vmapl); + break; + + default: + /* program should not get here */ + break; + } + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h new file mode 100644 index 000000000000..366389287f4e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h @@ -0,0 +1,30 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_REMOTE_SYNC_H__ +#define __HYPER_DMABUF_REMOTE_SYNC_H__ + +int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops); + +#endif // __HYPER_DMABUF_REMOTE_SYNC_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c new file mode 100644 index 000000000000..c1887d1ad709 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c @@ -0,0 +1,261 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_sgl_proc.h" + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +/* return total number of pages referenced by a sgt + * for pre-calculation of # of pages behind a given sgt + */ +static int get_num_pgs(struct sg_table *sgt) +{ + struct scatterlist *sgl; + int length, i; + /* at least one page */ + int num_pages = 1; + + sgl = sgt->sgl; + + length = sgl->length - PAGE_SIZE + sgl->offset; + + /* round-up */ + num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE); + + for (i = 1; i < sgt->nents; i++) { + sgl = sg_next(sgl); + + /* round-up */ + num_pages += ((sgl->length + PAGE_SIZE - 1) / + PAGE_SIZE); /* round-up */ + } + + return num_pages; +} + +/* extract pages directly from struct sg_table */ +struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt) +{ + struct pages_info *pg_info; + int i, j, k; + int length; + struct scatterlist *sgl; + + pg_info = kmalloc(sizeof(*pg_info), GFP_KERNEL); + if (!pg_info) + return NULL; + + pg_info->pgs = kmalloc_array(get_num_pgs(sgt), + sizeof(struct page *), + GFP_KERNEL); + + if (!pg_info->pgs) { + kfree(pg_info); + return NULL; + } + + sgl = sgt->sgl; + + pg_info->nents = 1; + pg_info->frst_ofst = sgl->offset; + pg_info->pgs[0] = sg_page(sgl); + length = sgl->length - PAGE_SIZE + sgl->offset; + i = 1; + + while (length > 0) { + pg_info->pgs[i] = nth_page(sg_page(sgl), i); + length -= PAGE_SIZE; + pg_info->nents++; + i++; + } + + for (j = 1; j < sgt->nents; j++) { + sgl = sg_next(sgl); + pg_info->pgs[i++] = sg_page(sgl); + length = sgl->length - PAGE_SIZE; + pg_info->nents++; + k = 1; + + while (length > 0) { + pg_info->pgs[i++] = nth_page(sg_page(sgl), k++); + length -= PAGE_SIZE; + pg_info->nents++; + } + } + + /* + * lenght at that point will be 0 or negative, + * so to calculate last page size just add it to PAGE_SIZE + */ + pg_info->last_len = PAGE_SIZE + length; + + return pg_info; +} + +/* create sg_table with given pages and other parameters */ +struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs, + int frst_ofst, int last_len, + int nents) +{ + struct sg_table *sgt; + struct scatterlist *sgl; + int i, ret; + + sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sgt) + return NULL; + + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (ret) { + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } + + return NULL; + } + + sgl = sgt->sgl; + + sg_set_page(sgl, pgs[0], PAGE_SIZE-frst_ofst, frst_ofst); + + for (i = 1; i < nents-1; i++) { + sgl = sg_next(sgl); + sg_set_page(sgl, pgs[i], PAGE_SIZE, 0); + } + + if (nents > 1) /* more than one page */ { + sgl = sg_next(sgl); + sg_set_page(sgl, pgs[i], last_len, 0); + } + + return sgt; +} + +int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, + int force) +{ + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + + if (!exported) { + dev_err(hy_drv_priv->dev, "invalid hyper_dmabuf_id\n"); + return -EINVAL; + } + + /* if force != 1, sgt_info can be released only if + * there's no activity on exported dma-buf on importer + * side. + */ + if (!force && + exported->active) { + dev_warn(hy_drv_priv->dev, + "dma-buf is used by importer\n"); + + return -EPERM; + } + + /* force == 1 is not recommended */ + while (!list_empty(&exported->va_kmapped->list)) { + va_kmapl = list_first_entry(&exported->va_kmapped->list, + struct kmap_vaddr_list, list); + + dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr); + list_del(&va_kmapl->list); + kfree(va_kmapl); + } + + while (!list_empty(&exported->va_vmapped->list)) { + va_vmapl = list_first_entry(&exported->va_vmapped->list, + struct vmap_vaddr_list, list); + + dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr); + list_del(&va_vmapl->list); + kfree(va_vmapl); + } + + while (!list_empty(&exported->active_sgts->list)) { + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + sgtl = list_first_entry(&exported->active_sgts->list, + struct sgt_list, list); + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + } + + while (!list_empty(&exported->active_sgts->list)) { + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + dma_buf_detach(exported->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + } + + /* Start cleanup of buffer in reverse order to exporting */ + bknd_ops->unshare_pages(&exported->refs_info, exported->nents); + + /* unmap dma-buf */ + dma_buf_unmap_attachment(exported->active_attached->attach, + exported->active_sgts->sgt, + DMA_BIDIRECTIONAL); + + /* detatch dma-buf */ + dma_buf_detach(exported->dma_buf, exported->active_attached->attach); + + /* close connection to dma-buf completely */ + dma_buf_put(exported->dma_buf); + exported->dma_buf = NULL; + + kfree(exported->active_sgts); + kfree(exported->active_attached); + kfree(exported->va_kmapped); + kfree(exported->va_vmapped); + kfree(exported->priv); + + exported->active_sgts = NULL; + exported->active_attached = NULL; + exported->va_kmapped = NULL; + exported->va_vmapped = NULL; + exported->priv = NULL; + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h new file mode 100644 index 000000000000..869d98204e03 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h @@ -0,0 +1,41 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_IMP_H__ +#define __HYPER_DMABUF_IMP_H__ + +/* extract pages directly from struct sg_table */ +struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt); + +/* create sg_table with given pages and other parameters */ +struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs, + int frst_ofst, int last_len, + int nents); + +int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, + int force); + +void hyper_dmabuf_free_sgt(struct sg_table *sgt); + +#endif /* __HYPER_DMABUF_IMP_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h new file mode 100644 index 000000000000..a11f804edfb3 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h @@ -0,0 +1,141 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_STRUCT_H__ +#define __HYPER_DMABUF_STRUCT_H__ + +/* stack of mapped sgts */ +struct sgt_list { + struct sg_table *sgt; + struct list_head list; +}; + +/* stack of attachments */ +struct attachment_list { + struct dma_buf_attachment *attach; + struct list_head list; +}; + +/* stack of vaddr mapped via kmap */ +struct kmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* stack of vaddr mapped via vmap */ +struct vmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* Exporter builds pages_info before sharing pages */ +struct pages_info { + int frst_ofst; + int last_len; + int nents; + struct page **pgs; +}; + + +/* Exporter stores references to sgt in a hash table + * Exporter keeps these references for synchronization + * and tracking purposes + */ +struct exported_sgt_info { + hyper_dmabuf_id_t hid; + + /* VM ID of importer */ + int rdomid; + + struct dma_buf *dma_buf; + int nents; + + /* list for tracking activities on dma_buf */ + struct sgt_list *active_sgts; + struct attachment_list *active_attached; + struct kmap_vaddr_list *va_kmapped; + struct vmap_vaddr_list *va_vmapped; + + /* set to 0 when unexported. Importer doesn't + * do a new mapping of buffer if valid == false + */ + bool valid; + + /* active == true if the buffer is actively used + * (mapped) by importer + */ + int active; + + /* hypervisor specific reference data for shared pages */ + void *refs_info; + + struct delayed_work unexport; + bool unexport_sched; + + /* list for file pointers associated with all user space + * application that have exported this same buffer to + * another VM. This needs to be tracked to know whether + * the buffer can be completely freed. + */ + struct file *filp; + + /* size of private */ + size_t sz_priv; + + /* private data associated with the exported buffer */ + char *priv; +}; + +/* imported_sgt_info contains information about imported DMA_BUF + * this info is kept in IMPORT list and asynchorously retrieved and + * used to map DMA_BUF on importer VM's side upon export fd ioctl + * request from user-space + */ + +struct imported_sgt_info { + hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */ + + /* hypervisor-specific handle to pages */ + int ref_handle; + + /* offset and size info of DMA_BUF */ + int frst_ofst; + int last_len; + int nents; + + struct dma_buf *dma_buf; + struct sg_table *sgt; + + void *refs_info; + bool valid; + int importers; + + /* size of private */ + size_t sz_priv; + + /* private data associated with the exported buffer */ + char *priv; +}; + +#endif /* __HYPER_DMABUF_STRUCT_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c new file mode 100644 index 000000000000..bb16360d06d5 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c @@ -0,0 +1,505 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_msg.h" +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_fe_list.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_comm_ring.h" + +/* + * Identifies which queue is used for TX and RX + * Note: it is opposite regarding to frontent definition + */ +enum virio_queue_type { + HDMA_VIRTIO_RX_QUEUE = 0, + HDMA_VIRTIO_TX_QUEUE, + HDMA_VIRTIO_QUEUE_MAX +}; + +/* Data required for sending TX messages using virtqueues*/ +struct virtio_be_tx_data { + struct iovec tx_iov; + uint16_t tx_idx; +}; + +struct virtio_be_priv { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[HDMA_VIRTIO_QUEUE_MAX]; + bool busy; + struct hyper_dmabuf_req *pending_tx_req; + struct virtio_comm_ring tx_ring; + struct mutex lock; +}; + + +/* + * Received response to TX request, + * or empty buffer to be used for TX requests in future + */ +static void virtio_be_handle_tx_kick(struct virtio_vq_info *vq, + struct virtio_fe_info *fe_info) +{ + struct virtio_be_priv *priv = fe_info->priv; + /* Fill last used buffer with received buffer details */ + struct virtio_be_tx_data *tx_data = + (struct virtio_be_tx_data *) + virtio_comm_ring_pop(&priv->tx_ring); + + virtio_vq_getchain(vq, &tx_data->tx_idx, &tx_data->tx_iov, 1, NULL); + + /* Copy response if request was synchronous */ + if (priv->busy) { + memcpy(priv->pending_tx_req, + tx_data->tx_iov.iov_base, + tx_data->tx_iov.iov_len); + priv->busy = false; + } +} + +/* + * Received request from frontend + */ +static void virtio_be_handle_rx_kick(struct virtio_vq_info *vq, + struct virtio_fe_info *fe_info) +{ + struct iovec iov; + uint16_t idx; + struct hyper_dmabuf_req *req = NULL; + int len; + int ret; + + /* Make sure we will process all pending requests */ + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + + if (iov.iov_len != sizeof(struct hyper_dmabuf_req)) { + /* HACK: if int size buffer was provided, + * treat that as request to get frontend vmid */ + if (iov.iov_len == sizeof(int)) { + *((int *)iov.iov_base) = fe_info->vmid; + len = iov.iov_len; + } else { + len = 0; + dev_warn(hy_drv_priv->dev, + "received request with wrong size"); + dev_warn(hy_drv_priv->dev, + "%zu != %zu\n", + iov.iov_len, + sizeof(struct hyper_dmabuf_req)); + } + + virtio_vq_relchain(vq, idx, len); + continue; + } + + req = (struct hyper_dmabuf_req *)iov.iov_base; + + ret = hyper_dmabuf_msg_parse(1, req); + + len = iov.iov_len; + + virtio_vq_relchain(vq, idx, len); + } + virtio_vq_endchains(vq, 1); +} + +/* + * Check in what virtqueue we received buffer and process it accordingly. + */ +static void virtio_be_handle_vq_kick( + int vq_idx, struct virtio_fe_info *fe_info) +{ + struct virtio_vq_info *vq; + + vq = &fe_info->priv->vqs[vq_idx]; + + if (vq_idx == HDMA_VIRTIO_RX_QUEUE) + virtio_be_handle_rx_kick(vq, fe_info); + else + virtio_be_handle_tx_kick(vq, fe_info); +} + +/* + * Received new buffer in virtqueue + */ +static int virtio_be_handle_kick(int client_id, int req_cnt) +{ + int val = -1; + struct vhm_request *req; + struct virtio_fe_info *fe_info; + int i; + + if (unlikely(req_cnt <= 0)) + return -EINVAL; + + fe_info = virtio_fe_find(client_id); + if (fe_info == NULL) { + dev_warn(hy_drv_priv->dev, "Client %d not found\n", client_id); + return -EINVAL; + } + + for (i = 0; i < fe_info->max_vcpu; ++i) { + req = &fe_info->req_buf[i]; + if (req->valid && + req->processed == REQ_STATE_PROCESSING && + req->client == fe_info->client_id) { + if (req->reqs.pio_request.direction == REQUEST_READ) + req->reqs.pio_request.value = 0; + else + val = req->reqs.pio_request.value; + + req->processed = REQ_STATE_SUCCESS; + acrn_ioreq_complete_request(fe_info->client_id, i); + } + } + + if (val >= 0) + virtio_be_handle_vq_kick(val, fe_info); + + return 0; +} + +/* + * New frontend is connecting to backend. + * Creates virtqueues for it and registers internally. + */ +static int virtio_be_register_vhm_client(struct virtio_dev_info *d) +{ + unsigned int vmid; + struct vm_info info; + struct virtio_fe_info *fe_info; + int ret; + + fe_info = kcalloc(1, sizeof(*fe_info), GFP_KERNEL); + if (fe_info == NULL) + return -ENOMEM; + + fe_info->priv = + container_of(d, struct virtio_be_priv, dev); + vmid = d->_ctx.vmid; + fe_info->vmid = vmid; + + dev_dbg(hy_drv_priv->dev, + "Virtio frontend from vm %d connected\n", vmid); + + fe_info->client_id = + acrn_ioreq_create_client(vmid, + virtio_be_handle_kick, + "hyper dmabuf kick"); + if (fe_info->client_id < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create client of ACRN ioreq\n"); + goto err; + } + + ret = acrn_ioreq_add_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to add iorange to acrn ioreq\n"); + goto err; + } + + ret = vhm_get_vm_info(vmid, &info); + if (ret < 0) { + acrn_ioreq_del_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len); + + dev_err(hy_drv_priv->dev, "Failed in vhm_get_vm_info\n"); + goto err; + } + + fe_info->max_vcpu = info.max_vcpu; + + fe_info->req_buf = acrn_ioreq_get_reqbuf(fe_info->client_id); + if (fe_info->req_buf == NULL) { + acrn_ioreq_del_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len); + + dev_err(hy_drv_priv->dev, "Failed in acrn_ioreq_get_reqbuf\n"); + goto err; + } + + acrn_ioreq_attach_client(fe_info->client_id, 0); + + virtio_fe_add(fe_info); + + return 0; + +err: + acrn_ioreq_destroy_client(fe_info->client_id); + kfree(fe_info); + + return -EINVAL; +} + +/* + * DM is opening our VBS interface to create new frontend instance. + */ +static int vbs_k_open(struct inode *inode, struct file *f) +{ + struct virtio_be_priv *priv; + struct virtio_dev_info *dev; + struct virtio_vq_info *vqs; + int i; + + priv = kcalloc(1, sizeof(*priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + vqs = &priv->vqs[0]; + + dev = &priv->dev; + + for (i = 0; i < HDMA_VIRTIO_QUEUE_MAX; i++) { + vqs[i].dev = dev; + vqs[i].vq_notify = NULL; + } + dev->vqs = vqs; + + virtio_dev_init(dev, vqs, HDMA_VIRTIO_QUEUE_MAX); + + priv->pending_tx_req = + kcalloc(1, sizeof(struct hyper_dmabuf_req), GFP_KERNEL); + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct virtio_be_tx_data), + REQ_RING_SIZE); + + mutex_init(&priv->lock); + + f->private_data = priv; + + return 0; +} + +static int vbs_k_release(struct inode *inode, struct file *f) +{ + struct virtio_be_priv *priv = + (struct virtio_be_priv *) f->private_data; + int i; + +// virtio_dev_stop(&priv->dev); +// virtio_dev_cleanup(&priv->dev, false); + + for (i = 0; i < HDMA_VIRTIO_QUEUE_MAX; i++) + virtio_vq_reset(&priv->vqs[i]); + + kfree(priv->pending_tx_req); + virtio_comm_ring_free(&priv->tx_ring); + kfree(priv); + return 0; +} + +static long vbs_k_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct virtio_be_priv *priv = + (struct virtio_be_priv *) f->private_data; + void __user *argp = (void __user *)arg; + int r; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "No backend private data\n"); + + return -EINVAL; + } + + if (ioctl == VBS_SET_VQ) { + /* Overridden to call additionally + * virtio_be_register_vhm_client */ + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + return -EFAULT; + + if (virtio_be_register_vhm_client(&priv->dev) < 0) + return -EFAULT; + } else { + r = virtio_dev_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + } + + return r; +} + +static const struct file_operations vbs_hyper_dmabuf_fops = { + .owner = THIS_MODULE, + .open = vbs_k_open, + .release = vbs_k_release, + .unlocked_ioctl = vbs_k_ioctl, + .llseek = noop_llseek, +}; + +static struct miscdevice vbs_hyper_dmabuf_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vbs_hyper_dmabuf", + .fops = &vbs_hyper_dmabuf_fops, +}; + +static int virtio_be_register(void) +{ + return misc_register(&vbs_hyper_dmabuf_misc); +} + +static void virtio_be_unregister(void) +{ + misc_deregister(&vbs_hyper_dmabuf_misc); +} + +/* + * ACRN SOS will always has vmid 0 + * TODO: check if that always will be true + */ +static int virtio_be_get_vmid(void) +{ + return 0; +} + +static int virtio_be_send_req(int vmid, struct hyper_dmabuf_req *req, + int wait) +{ + int timeout = 1000; + struct virtio_fe_info *fe_info; + struct virtio_be_priv *priv; + struct virtio_be_tx_data *tx_data; + struct virtio_vq_info *vq; + int len; + + fe_info = virtio_fe_find_by_vmid(vmid); + + if (fe_info == NULL) { + dev_err(hy_drv_priv->dev, + "No frontend registered for vmid %d\n", vmid); + return -ENOENT; + } + + priv = fe_info->priv; + + mutex_lock(&priv->lock); + + /* Check if we have any free buffers for sending new request */ + while (virtio_comm_ring_full(&priv->tx_ring) && + timeout--) { + usleep_range(100, 120); + } + + if (timeout <= 0) { + dev_warn(hy_drv_priv->dev, "Requests ring full\n"); + return -EBUSY; + } + + /* Get free buffer for sending request from ring */ + tx_data = (struct virtio_be_tx_data *) + virtio_comm_ring_push(&priv->tx_ring); + + vq = &priv->vqs[HDMA_VIRTIO_TX_QUEUE]; + + if (tx_data->tx_iov.iov_len != sizeof(struct hyper_dmabuf_req)) { + dev_warn(hy_drv_priv->dev, + "received request with wrong size\n"); + virtio_vq_relchain(vq, tx_data->tx_idx, 0); + mutex_unlock(&priv->lock); + return -EINVAL; + } + + req->req_id = hyper_dmabuf_virtio_get_next_req_id(); + + /* Copy request data to virtqueue buffer */ + memcpy(tx_data->tx_iov.iov_base, req, sizeof(*req)); + len = tx_data->tx_iov.iov_len; + + /* update req_pending with current request */ + if (wait) { + priv->busy = true; + memcpy(priv->pending_tx_req, req, sizeof(*req)); + } + + virtio_vq_relchain(vq, tx_data->tx_idx, len); + + virtio_vq_endchains(vq, 1); + + if (wait) { + while (timeout--) { + if (priv->pending_tx_req->stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + mutex_unlock(&priv->lock); + dev_err(hy_drv_priv->dev, "request timed-out\n"); + return -EBUSY; + } + } + + mutex_unlock(&priv->lock); + return 0; +}; + +struct hyper_dmabuf_bknd_ops virtio_bknd_ops = { + .init = virtio_be_register, + .cleanup = virtio_be_unregister, + .get_vm_id = virtio_be_get_vmid, + .share_pages = virtio_share_pages, + .unshare_pages = virtio_unshare_pages, + .map_shared_pages = virtio_map_shared_pages, + .unmap_shared_pages = virtio_unmap_shared_pages, + .init_comm_env = NULL, + .destroy_comm = NULL, + .init_rx_ch = NULL, + .init_tx_ch = NULL, + .send_req = virtio_be_send_req, +}; + + +MODULE_DESCRIPTION("Hyper dmabuf virtio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c new file mode 100644 index 000000000000..d73bcbcc8e87 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c @@ -0,0 +1,89 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include "hyper_dmabuf_virtio_comm_ring.h" + +int virtio_comm_ring_init(struct virtio_comm_ring *ring, + int entry_size, + int num_entries) +{ + ring->data = kcalloc(num_entries, entry_size, GFP_KERNEL); + + if (!ring->data) + return -ENOMEM; + + ring->head = 0; + ring->tail = 0; + ring->used = 0; + ring->num_entries = num_entries; + ring->entry_size = entry_size; + + return 0; +} + +void virtio_comm_ring_free(struct virtio_comm_ring *ring) +{ + kfree(ring->data); + ring->data = NULL; +} + +bool virtio_comm_ring_full(struct virtio_comm_ring *ring) +{ + if (ring->used == ring->num_entries) + return true; + + return false; +} + +void *virtio_comm_ring_push(struct virtio_comm_ring *ring) +{ + int old_head; + + if (virtio_comm_ring_full(ring)) + return NULL; + + old_head = ring->head; + + ring->head++; + ring->head %= ring->num_entries; + ring->used++; + + return ring->data + (ring->entry_size * old_head); +} + +void *virtio_comm_ring_pop(struct virtio_comm_ring *ring) +{ + int old_tail = ring->tail; + + ring->tail++; + ring->tail %= ring->num_entries; + ring->used--; + + return ring->data + (ring->entry_size * old_tail); +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h new file mode 100644 index 000000000000..a95a63af2ba0 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_COMM_RING_H__ +#define __HYPER_DMABUF_VIRTIO_COMM_RING_H__ + +/* Generic ring buffer */ +struct virtio_comm_ring { + /* Buffer allocated for keeping ring entries */ + void *data; + + /* Index pointing to next free element in ring */ + int head; + + /* Index pointing to last released element in ring */ + int tail; + + /* Total number of elements that ring can contain */ + int num_entries; + + /* Size of single ring element in bytes */ + int entry_size; + + /* Number of currently used elements */ + int used; +}; + +/* Initializes given ring for keeping given a + * number of entries of specific size */ +int virtio_comm_ring_init(struct virtio_comm_ring *ring, + int entry_size, + int num_entries); + +/* Frees buffer used for storing ring entries */ +void virtio_comm_ring_free(struct virtio_comm_ring *ring); + +/* Checks if ring is full */ +bool virtio_comm_ring_full(struct virtio_comm_ring *ring); + +/* Gets next free element from ring and marks it as used + * or NULL if ring is full */ +void *virtio_comm_ring_push(struct virtio_comm_ring *ring); + +/* Pops oldest element from ring and marks it as free */ +void *virtio_comm_ring_pop(struct virtio_comm_ring *ring); + +#endif /* __HYPER_DMABUF_VIRTIO_COMM_RING_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c new file mode 100644 index 000000000000..05be74358a74 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c @@ -0,0 +1,35 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include "hyper_dmabuf_virtio_common.h" + +int hyper_dmabuf_virtio_get_next_req_id(void) +{ + static int req_id; + + return req_id++; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h new file mode 100644 index 000000000000..24a652ef54c0 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h @@ -0,0 +1,55 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_COMMON_H__ +#define __HYPER_DMABUF_VIRTIO_COMMON_H__ + +/* + * ACRN uses physicall addresses for memory sharing, + * so size of one page ref will be 64-bits + */ + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(u64)) + +/* Defines size of requests circular buffer */ +#define REQ_RING_SIZE 128 + +extern struct hyper_dmabuf_bknd_ops virtio_bknd_ops; +struct virtio_be_priv; +struct vhm_request; + +/* Entry describing each connected frontend */ +struct virtio_fe_info { + struct virtio_be_priv *priv; + int client_id; + int vmid; + int max_vcpu; + struct vhm_request *req_buf; +}; + +extern struct hyper_dmabuf_private hyper_dmabuf_private; + +int hyper_dmabuf_virtio_get_next_req_id(void); + +#endif /* __HYPER_DMABUF_VIRTIO_COMMON_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c new file mode 100644 index 000000000000..9ae290435d70 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c @@ -0,0 +1,385 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_msg.h" +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_comm_ring.h" + +/* + * Identifies which queue is used for TX and RX + * Note: it is opposite regarding to backend definition + */ +enum virio_queue_type { + HDMA_VIRTIO_TX_QUEUE = 0, + HDMA_VIRTIO_RX_QUEUE, + HDMA_VIRTIO_QUEUE_MAX +}; + +struct virtio_hdma_fe_priv { + struct virtqueue *vqs[HDMA_VIRTIO_QUEUE_MAX]; + struct virtio_comm_ring tx_ring; + struct virtio_comm_ring rx_ring; + int vmid; +}; + +/* Assuming there will be one FE instance per VM */ +static struct virtio_hdma_fe_priv *hyper_dmabuf_virtio_fe; + +/* + * Received response for request. + * No need for copying request with updated result, + * as backend is processing original request data directly. + */ +static void virtio_hdma_fe_tx_done(struct virtqueue *vq) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vq->vdev->priv; + int len; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + return; + } + + /* Make sure that all pending responses are processed */ + while (virtqueue_get_buf(vq, &len)) { + if (len == sizeof(struct hyper_dmabuf_req)) { + /* Mark that response was received + * and buffer can be reused */ + virtio_comm_ring_pop(&priv->tx_ring); + } + } +} + +/* + * Sends given data buffer via given virtqueue. + */ +static void virtio_hdma_fe_queue_buffer(struct virtio_hdma_fe_priv *priv, + unsigned int queue_nr, + void *buf, size_t size) +{ + struct scatterlist sg; + + if (queue_nr >= HDMA_VIRTIO_QUEUE_MAX) { + dev_dbg(hy_drv_priv->dev, + "queue_nr exceeding max queue number\n"); + return; + } + + sg_init_one(&sg, buf, size); + + virtqueue_add_inbuf(priv->vqs[queue_nr], &sg, 1, buf, GFP_KERNEL); + + virtqueue_kick(priv->vqs[queue_nr]); +} + +/* + * Handle requests coming from other VMs + */ +static void virtio_hdma_fe_handle_rx(struct virtqueue *vq) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vq->vdev->priv; + struct hyper_dmabuf_req *rx_req; + int size, ret; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + return; + } + + /* Make sure all pending requests will be processed */ + while (virtqueue_get_buf(vq, &size)) { + + /* Get next request from ring */ + rx_req = (struct hyper_dmabuf_req *) + virtio_comm_ring_pop(&priv->rx_ring); + + if (size != sizeof(struct hyper_dmabuf_req)) { + dev_dbg(hy_drv_priv->dev, + "Received malformed request\n"); + } else { + ret = hyper_dmabuf_msg_parse(1, rx_req); + } + + /* Send updated request back to virtqueue as a response.*/ + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_RX_QUEUE, + rx_req, sizeof(*rx_req)); + } +} + +static int virtio_hdma_fe_probe_common(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv; + vq_callback_t *callbacks[] = {virtio_hdma_fe_tx_done, + virtio_hdma_fe_handle_rx}; + static const char *names[] = {"txqueue", "rxqueue"}; + int ret; + + priv = kzalloc(sizeof(struct virtio_hdma_fe_priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct hyper_dmabuf_req), + REQ_RING_SIZE); + virtio_comm_ring_init(&priv->rx_ring, + sizeof(struct hyper_dmabuf_req), + REQ_RING_SIZE); + + /* Set vmid to -1 to mark that it is not initialized yet */ + priv->vmid = -1; + + vdev->priv = priv; + + ret = virtio_find_vqs(vdev, HDMA_VIRTIO_QUEUE_MAX, + priv->vqs, callbacks, names, NULL); + if (ret) + goto err; + + hyper_dmabuf_virtio_fe = priv; + + return 0; +err: + virtio_comm_ring_free(&priv->tx_ring); + virtio_comm_ring_free(&priv->rx_ring); + kfree(priv); + return ret; +} + +static void virtio_hdma_fe_remove_common(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vdev->priv; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "No frontend private data\n"); + + return; + } + + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + virtio_comm_ring_free(&priv->tx_ring); + virtio_comm_ring_free(&priv->rx_ring); + kfree(priv); + hyper_dmabuf_virtio_fe = NULL; +} + +static int virtio_hdma_fe_probe(struct virtio_device *vdev) +{ + return virtio_hdma_fe_probe_common(vdev); +} + +static void virtio_hdma_fe_remove(struct virtio_device *vdev) +{ + virtio_hdma_fe_remove_common(vdev); +} + +/* + * Queues empty requests buffers to backend, + * which will be used by it to send requests back to frontend. + */ +static void virtio_hdma_fe_scan(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vdev->priv; + struct hyper_dmabuf_req *rx_req; + int timeout = 1000; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + + return; + } + + /* Send request to query vmid, in ACRN guest instances don't + * know their ids, but host does. Here a small hack is used, + * and buffer of int size is sent to backend, in that case + * backend will fill it with vmid of instance that sent that request + */ + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_TX_QUEUE, + &priv->vmid, sizeof(priv->vmid)); + + while (timeout--) { + if (priv->vmid > 0) + break; + usleep_range(100, 120); + } + + if (timeout < 0) + dev_err(hy_drv_priv->dev, + "Cannot query vmid\n"); + + while (!virtio_comm_ring_full(&priv->rx_ring)) { + rx_req = virtio_comm_ring_push(&priv->rx_ring); + + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_RX_QUEUE, + rx_req, sizeof(*rx_req)); + } +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_hdma_fe_freeze(struct virtio_device *vdev) +{ + virtio_hdma_fe_remove_common(vdev); + return 0; +} + +static int virtio_hdma_fe_restore(struct virtio_device *vdev) +{ + return virtio_hdma_fe_probe_common(vdev); +} +#endif + + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_HYPERDMABUF, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_hdma_fe_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_hdma_fe_probe, + .remove = virtio_hdma_fe_remove, + .scan = virtio_hdma_fe_scan, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_hdma_fe_freeze, + .restore = virtio_hdma_fe_restore, +#endif +}; + +int virtio_hdma_fe_register(void) +{ + return register_virtio_driver(&virtio_hdma_fe_driver); +} + +void virtio_hdma_fe_unregister(void) +{ + unregister_virtio_driver(&virtio_hdma_fe_driver); +} + +static int virtio_hdma_fe_get_vmid(void) +{ + struct virtio_hdma_fe_priv *priv = hyper_dmabuf_virtio_fe; + + if (hyper_dmabuf_virtio_fe == NULL) { + dev_err(hy_drv_priv->dev, + "Backend not connected\n"); + return -1; + } + + return priv->vmid; +} + +static int virtio_hdma_fe_send_req(int vmid, struct hyper_dmabuf_req *req, + int wait) +{ + struct virtio_hdma_fe_priv *priv = hyper_dmabuf_virtio_fe; + struct hyper_dmabuf_req *tx_req; + int timeout = 1000; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "Backend not connected\n"); + return -ENOENT; + } + + /* Check if there are any free buffers in ring */ + while (timeout--) { + if (!virtio_comm_ring_full(&priv->tx_ring)) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + dev_err(hy_drv_priv->dev, + "Timedout while waiting for free request buffers\n"); + return -EBUSY; + } + + /* Get free buffer for sending request from ring */ + tx_req = (struct hyper_dmabuf_req *) + virtio_comm_ring_push(&priv->tx_ring); + req->req_id = hyper_dmabuf_virtio_get_next_req_id(); + + /* copy request to buffer that will be used in virtqueue */ + memcpy(tx_req, req, sizeof(*req)); + + virtio_hdma_fe_queue_buffer(hyper_dmabuf_virtio_fe, + HDMA_VIRTIO_TX_QUEUE, + tx_req, sizeof(*tx_req)); + + if (wait) { + while (timeout--) { + if (tx_req->stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) + return -EBUSY; + } + + return 0; +} + +struct hyper_dmabuf_bknd_ops virtio_bknd_ops = { + .init = virtio_hdma_fe_register, + .cleanup = virtio_hdma_fe_unregister, + .get_vm_id = virtio_hdma_fe_get_vmid, + .share_pages = virtio_share_pages, + .unshare_pages = virtio_unshare_pages, + .map_shared_pages = virtio_map_shared_pages, + .unmap_shared_pages = virtio_unmap_shared_pages, + .send_req = virtio_hdma_fe_send_req, + .init_comm_env = NULL, + .destroy_comm = NULL, + .init_rx_ch = NULL, + .init_tx_ch = NULL, +}; + + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Hyper dmabuf virtio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c new file mode 100644 index 000000000000..79b30e286b5e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c @@ -0,0 +1,99 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_fe_list.h" + +DECLARE_HASHTABLE(virtio_fe_hash, MAX_ENTRY_FE); + +void virtio_fe_table_init(void) +{ + hash_init(virtio_fe_hash); +} + +int virtio_fe_add(struct virtio_fe_info *fe_info) +{ + struct virtio_fe_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = fe_info; + + hash_add(virtio_fe_hash, &info_entry->node, + info_entry->info->client_id); + + return 0; +} + +struct virtio_fe_info *virtio_fe_find(int client_id) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) + return info_entry->info; + + return NULL; +} + +struct virtio_fe_info *virtio_fe_find_by_vmid(int vmid) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->vmid == vmid) + return info_entry->info; + + return NULL; +} + +int virtio_fe_remove(int client_id) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h new file mode 100644 index 000000000000..bc7ef843161c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h @@ -0,0 +1,48 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_FE_LIST_H__ +#define __HYPER_DMABUF_VIRTIO_FE_LIST_H__ + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_FE 7 + +struct virtio_fe_info; + +struct virtio_fe_info_entry { + struct virtio_fe_info *info; + struct hlist_node node; +}; + +void virtio_fe_table_init(void); + +int virtio_fe_add(struct virtio_fe_info *fe_info); + +int virtio_remove_fe(int client_id); + +struct virtio_fe_info *virtio_fe_find(int client_id); + +struct virtio_fe_info *virtio_fe_find_by_vmid(int vmid); + +#endif /* __HYPER_DMABUF_VIRTIO_FE_LIST_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c new file mode 100644 index 000000000000..be5141c25191 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c @@ -0,0 +1,343 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +#include +#endif +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_common.h" + +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +struct virtio_shared_pages_info { + u64 *lvl3_table; + u64 **lvl2_table; + u64 lvl3_gref; + struct page **data_pages; + int n_lvl2_refs; + int nents_last; + int vmid; +}; +#else +struct virtio_shared_pages_info { + u64 *lvl3_table; + u64 *lvl2_table; + u64 lvl3_gref; +}; +#endif + +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +static int virtio_be_share_pages(struct page **pages, + int vmid, + int nents, + void **refs_info) +{ + dev_err(hy_drv_priv->dev, + "Pages sharing not available with ACRN backend in SOS\n"); + + return -EINVAL; +} + +static int virtio_be_unshare_pages(void **refs_info, + int nents) +{ + dev_err(hy_drv_priv->dev, + "Pages sharing not available with ACRN backend in SOS\n"); + + return -EINVAL; +} + +static struct page **virtio_be_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + u64 *lvl3_table = NULL; + u64 **lvl2_table = NULL; + struct page **data_pages = NULL; + struct virtio_shared_pages_info *sh_pages_info = NULL; + void *pageaddr; + + int nents_last = (nents - 1) % REFS_PER_PAGE + 1; + int n_lvl2_refs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0) - + (nents_last == REFS_PER_PAGE); + int i, j, k; + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + if (sh_pages_info == NULL) + goto map_failed; + + *refs_info = (void *) sh_pages_info; + + data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); + if (data_pages == NULL) + goto map_failed; + + lvl2_table = kcalloc(n_lvl2_refs, sizeof(u64 *), GFP_KERNEL); + if (lvl2_table == NULL) + goto map_failed; + + lvl3_table = (u64 *)map_guest_phys(vmid, lvl3_gref, PAGE_SIZE); + if (lvl3_table == NULL) + goto map_failed; + + for (i = 0; i < n_lvl2_refs; i++) { + lvl2_table[i] = (u64 *)map_guest_phys(vmid, + lvl3_table[i], + PAGE_SIZE); + if (lvl2_table[i] == NULL) + goto map_failed; + } + + k = 0; + for (i = 0; i < n_lvl2_refs - 1; i++) { + for (j = 0; j < REFS_PER_PAGE; j++) { + pageaddr = map_guest_phys(vmid, + lvl2_table[i][j], + PAGE_SIZE); + if (pageaddr == NULL) + goto map_failed; + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + } + + for (j = 0; j < nents_last; j++) { + pageaddr = map_guest_phys(vmid, + lvl2_table[i][j], + PAGE_SIZE); + if (pageaddr == NULL) + goto map_failed; + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + + sh_pages_info->lvl2_table = lvl2_table; + sh_pages_info->lvl3_table = lvl3_table; + sh_pages_info->lvl3_gref = lvl3_gref; + sh_pages_info->n_lvl2_refs = n_lvl2_refs; + sh_pages_info->nents_last = nents_last; + sh_pages_info->data_pages = data_pages; + sh_pages_info->vmid = vmid; + + return data_pages; + +map_failed: + dev_err(hy_drv_priv->dev, + "Cannot map guest memory\n"); + + kfree(lvl2_table); + kfree(data_pages); + kfree(sh_pages_info); + + return NULL; +} + +/* + * TODO: In theory pages don't need to be unmaped, + * as ACRN is just translating memory addresses, + * but not sure if that will work the same way in future + */ +static int virtio_be_unmap_shared_pages(void **refs_info, int nents) +{ + struct virtio_shared_pages_info *sh_pages_info; + int vmid; + int i, j; + + sh_pages_info = (struct virtio_shared_pages_info *)(*refs_info); + + if (sh_pages_info->data_pages == NULL) { + dev_warn(hy_drv_priv->dev, + "Imported pages already cleaned up"); + dev_warn(hy_drv_priv->dev, + "or buffer was not imported yet\n"); + return 0; + } + vmid = sh_pages_info->vmid; + + for (i = 0; i < sh_pages_info->n_lvl2_refs - 1; i++) { + for (j = 0; j < REFS_PER_PAGE; j++) + unmap_guest_phys(vmid, + sh_pages_info->lvl2_table[i][j]); + } + + for (j = 0; j < sh_pages_info->nents_last; j++) + unmap_guest_phys(vmid, sh_pages_info->lvl2_table[i][j]); + + for (i = 0; i < sh_pages_info->n_lvl2_refs; i++) + unmap_guest_phys(vmid, sh_pages_info->lvl3_table[i]); + + unmap_guest_phys(vmid, sh_pages_info->lvl3_gref); + + kfree(sh_pages_info->lvl2_table); + kfree(sh_pages_info->data_pages); + sh_pages_info->data_pages = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + return 0; +} +#else +static int virtio_fe_share_pages(struct page **pages, + int domid, int nents, + void **refs_info) +{ + struct virtio_shared_pages_info *sh_pages_info; + u64 lvl3_gref; + u64 *lvl2_table; + u64 *lvl3_table; + int i; + + /* + * Calculate number of pages needed for 2nd level addresing: + */ + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + lvl3_table = (u64 *)__get_free_pages(GFP_KERNEL, 1); + lvl2_table = (u64 *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + + if (sh_pages_info == NULL) + return -ENOMEM; + + *refs_info = (void *)sh_pages_info; + + /* Share physical address of pages */ + for (i = 0; i < nents; i++) + lvl2_table[i] = page_to_phys(pages[i]); + + for (i = 0; i < n_lvl2_grefs; i++) + lvl3_table[i] = + virt_to_phys((void *)lvl2_table + i * PAGE_SIZE); + + lvl3_gref = virt_to_phys(lvl3_table); + + sh_pages_info->lvl3_table = lvl3_table; + sh_pages_info->lvl2_table = lvl2_table; + sh_pages_info->lvl3_gref = lvl3_gref; + + return lvl3_gref; +} + +static int virtio_fe_unshare_pages(void **refs_info, + int nents) +{ + struct virtio_shared_pages_info *sh_pages_info; + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + sh_pages_info = (struct virtio_shared_pages_info *)(*refs_info); + + if (sh_pages_info == NULL) { + dev_err(hy_drv_priv->dev, + "No pages info\n"); + return -EINVAL; + } + + free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)sh_pages_info->lvl3_table, 1); + + kfree(sh_pages_info); + + return 0; +} + +static struct page **virtio_fe_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + dev_dbg(hy_drv_priv->dev, + "Virtio frontend not supporting currently page mapping\n"); + return NULL; +} + +static int virtio_fe_unmap_shared_pages(void **refs_info, int nents) +{ + dev_dbg(hy_drv_priv->dev, + "Virtio frontend not supporting currently page mapping\n"); + return -EINVAL; +} + +#endif + +int virtio_share_pages(struct page **pages, + int domid, int nents, + void **refs_info) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_share_pages(pages, domid, nents, refs_info); +#else + ret = virtio_fe_share_pages(pages, domid, nents, refs_info); +#endif + return ret; +} + +int virtio_unshare_pages(void **refs_info, int nents) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_unshare_pages(refs_info, nents); +#else + ret = virtio_fe_unshare_pages(refs_info, nents); +#endif + return ret; +} + +struct page **virtio_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + struct page **ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_map_shared_pages(lvl3_gref, vmid, + nents, refs_info); +#else + ret = virtio_fe_map_shared_pages(lvl3_gref, vmid, + nents, refs_info); +#endif + return ret; +} + +int virtio_unmap_shared_pages(void **refs_info, int nents) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_unmap_shared_pages(refs_info, nents); +#else + ret = virtio_fe_unmap_shared_pages(refs_info, nents); +#endif + return ret; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h new file mode 100644 index 000000000000..05cbf5779f86 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h @@ -0,0 +1,40 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_SHM_H__ +#define __HYPER_DMABUF_VIRTIO_SHM_H__ + +int virtio_share_pages(struct page **pages, + int domid, int nents, + void **refs_info); + +int virtio_unshare_pages(void **refs_info, int nents); + +struct page **virtio_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info); + +int virtio_unmap_shared_pages(void **refs_info, int nents); + +#endif /* __HYPER_DMABUF_VIRTIO_SHM_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c new file mode 100644 index 000000000000..3dd49db66e31 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c @@ -0,0 +1,951 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_comm_list.h" +#include "../hyper_dmabuf_drv.h" + +static int export_req_id; + +struct hyper_dmabuf_req req_pending = {0}; + +static void xen_get_domid_delayed(struct work_struct *unused); +static void xen_init_comm_env_delayed(struct work_struct *unused); + +static DECLARE_DELAYED_WORK(get_vm_id_work, xen_get_domid_delayed); +static DECLARE_DELAYED_WORK(xen_init_comm_env_work, xen_init_comm_env_delayed); + +/* Creates entry in xen store that will keep details of all + * exporter rings created by this domain + */ +static int xen_comm_setup_data_dir(void) +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", + hy_drv_priv->domid); + + return xenbus_mkdir(XBT_NIL, buf, ""); +} + +/* Removes entry from xenstore with exporter ring details. + * Other domains that has connected to any of exporter rings + * created by this domain, will be notified about removal of + * this entry and will treat that as signal to cleanup importer + * rings created for this domain + */ +static int xen_comm_destroy_data_dir(void) +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", + hy_drv_priv->domid); + + return xenbus_rm(XBT_NIL, buf, ""); +} + +/* Adds xenstore entries with details of exporter ring created + * for given remote domain. It requires special daemon running + * in dom0 to make sure that given remote domain will have right + * permissions to access that data. + */ +static int xen_comm_expose_ring_details(int domid, int rdomid, + int gref, int port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + domid, rdomid); + + ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to write xenbus entry %s: %d\n", + buf, ret); + + return ret; + } + + ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to write xenbus entry %s: %d\n", + buf, ret); + + return ret; + } + + return 0; +} + +/* + * Queries details of ring exposed by remote domain. + */ +static int xen_comm_get_ring_details(int domid, int rdomid, + int *grefid, int *port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + rdomid, domid); + + ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid); + + if (ret <= 0) { + dev_err(hy_drv_priv->dev, + "Failed to read xenbus entry %s: %d\n", + buf, ret); + + return 1; + } + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port); + + if (ret <= 0) { + dev_err(hy_drv_priv->dev, + "Failed to read xenbus entry %s: %d\n", + buf, ret); + + return 1; + } + + return 0; +} + +static void xen_get_domid_delayed(struct work_struct *unused) +{ + struct xenbus_transaction xbt; + int domid, ret; + + /* scheduling another if driver is still running + * and xenstore has not been initialized + */ + if (likely(xenstored_ready == 0)) { + dev_dbg(hy_drv_priv->dev, + "Xenstore is not ready yet. Will retry in 500ms\n"); + schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500)); + } else { + xenbus_transaction_start(&xbt); + + ret = xenbus_scanf(xbt, "domid", "", "%d", &domid); + + if (ret <= 0) + domid = -1; + + xenbus_transaction_end(xbt, 0); + + /* try again since -1 is an invalid id for domain + * (but only if driver is still running) + */ + if (unlikely(domid == -1)) { + dev_dbg(hy_drv_priv->dev, + "domid==-1 is invalid. Will retry it in 500ms\n"); + schedule_delayed_work(&get_vm_id_work, + msecs_to_jiffies(500)); + } else { + dev_info(hy_drv_priv->dev, + "Successfully retrieved domid from Xenstore:%d\n", + domid); + hy_drv_priv->domid = domid; + } + } +} + +int xen_be_get_domid(void) +{ + struct xenbus_transaction xbt; + int domid; + + if (unlikely(xenstored_ready == 0)) { + xen_get_domid_delayed(NULL); + return -1; + } + + xenbus_transaction_start(&xbt); + + if (!xenbus_scanf(xbt, "domid", "", "%d", &domid)) + domid = -1; + + xenbus_transaction_end(xbt, 0); + + return domid; +} + +static int xen_comm_next_req_id(void) +{ + export_req_id++; + return export_req_id; +} + +/* For now cache latast rings as global variables TODO: keep them in list*/ +static irqreturn_t front_ring_isr(int irq, void *info); +static irqreturn_t back_ring_isr(int irq, void *info); + +/* Callback function that will be called on any change of xenbus path + * being watched. Used for detecting creation/destruction of remote + * domain exporter ring. + * + * When remote domain's exporter ring will be detected, importer ring + * on this domain will be created. + * + * When remote domain's exporter ring destruction will be detected it + * will celanup this domain importer ring. + * + * Destruction can be caused by unloading module by remote domain or + * it's crash/force shutdown. + */ +static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch, + const char *path, const char *token) +{ + int rdom, ret; + uint32_t grefid, port; + struct xen_comm_rx_ring_info *ring_info; + + /* Check which domain has changed its exporter rings */ + ret = sscanf(watch->node, "/local/domain/%d/", &rdom); + if (ret <= 0) + return; + + /* Check if we have importer ring for given remote domain already + * created + */ + ring_info = xen_comm_find_rx_ring(rdom); + + /* Try to query remote domain exporter ring details - if + * that will fail and we have importer ring that means remote + * domains has cleanup its exporter ring, so our importer ring + * is no longer useful. + * + * If querying details will succeed and we don't have importer ring, + * it means that remote domain has setup it for us and we should + * connect to it. + */ + + ret = xen_comm_get_ring_details(xen_be_get_domid(), + rdom, &grefid, &port); + + if (ring_info && ret != 0) { + dev_info(hy_drv_priv->dev, + "Remote exporter closed, cleaninup importer\n"); + xen_be_cleanup_rx_rbuf(rdom); + } else if (!ring_info && ret == 0) { + dev_info(hy_drv_priv->dev, + "Registering importer\n"); + xen_be_init_rx_rbuf(rdom); + } +} + +/* exporter needs to generated info for page sharing */ +int xen_be_init_tx_rbuf(int domid) +{ + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_sring *sring; + struct evtchn_alloc_unbound alloc_unbound; + struct evtchn_close close; + + void *shared_ring; + int ret; + + /* check if there's any existing tx channel in the table */ + ring_info = xen_comm_find_tx_ring(domid); + + if (ring_info) { + dev_info(hy_drv_priv->dev, + "tx ring ch to domid = %d already exist\ngref = %d, port = %d\n", + ring_info->rdomain, ring_info->gref_ring, ring_info->port); + return 0; + } + + ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL); + + if (!ring_info) + return -ENOMEM; + + /* from exporter to importer */ + shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1); + if (shared_ring == 0) { + kfree(ring_info); + return -ENOMEM; + } + + sring = (struct xen_comm_sring *) shared_ring; + + SHARED_RING_INIT(sring); + + FRONT_RING_INIT(&(ring_info->ring_front), sring, PAGE_SIZE); + + ring_info->gref_ring = gnttab_grant_foreign_access(domid, + virt_to_mfn(shared_ring), + 0); + if (ring_info->gref_ring < 0) { + /* fail to get gref */ + kfree(ring_info); + return -EFAULT; + } + + alloc_unbound.dom = DOMID_SELF; + alloc_unbound.remote_dom = domid; + ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, + &alloc_unbound); + if (ret) { + dev_err(hy_drv_priv->dev, + "Cannot allocate event channel\n"); + kfree(ring_info); + return -EIO; + } + + /* setting up interrupt */ + ret = bind_evtchn_to_irqhandler(alloc_unbound.port, + front_ring_isr, 0, + NULL, (void *) ring_info); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to setup event channel\n"); + close.port = alloc_unbound.port; + HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); + gnttab_end_foreign_access(ring_info->gref_ring, 0, + virt_to_mfn(shared_ring)); + kfree(ring_info); + return -EIO; + } + + ring_info->rdomain = domid; + ring_info->irq = ret; + ring_info->port = alloc_unbound.port; + + mutex_init(&ring_info->lock); + + dev_dbg(hy_drv_priv->dev, + "%s: allocated eventchannel gref %d port: %d irq: %d\n", + __func__, + ring_info->gref_ring, + ring_info->port, + ring_info->irq); + + ret = xen_comm_add_tx_ring(ring_info); + + if (ret < 0) { + kfree(ring_info); + return -ENOMEM; + } + + ret = xen_comm_expose_ring_details(xen_be_get_domid(), + domid, + ring_info->gref_ring, + ring_info->port); + + /* Register watch for remote domain exporter ring. + * When remote domain will setup its exporter ring, + * we will automatically connect our importer ring to it. + */ + ring_info->watch.callback = remote_dom_exporter_watch_cb; + ring_info->watch.node = kmalloc(255, GFP_KERNEL); + + if (!ring_info->watch.node) { + kfree(ring_info); + return -ENOMEM; + } + + sprintf((char *)ring_info->watch.node, + "/local/domain/%d/data/hyper_dmabuf/%d/port", + domid, xen_be_get_domid()); + + register_xenbus_watch(&ring_info->watch); + + return ret; +} + +/* cleans up exporter ring created for given remote domain */ +void xen_be_cleanup_tx_rbuf(int domid) +{ + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_rx_ring_info *rx_ring_info; + + /* check if we at all have exporter ring for given rdomain */ + ring_info = xen_comm_find_tx_ring(domid); + + if (!ring_info) + return; + + xen_comm_remove_tx_ring(domid); + + unregister_xenbus_watch(&ring_info->watch); + kfree(ring_info->watch.node); + + /* No need to close communication channel, will be done by + * this function + */ + unbind_from_irqhandler(ring_info->irq, (void *) ring_info); + + /* No need to free sring page, will be freed by this function + * when other side will end its access + */ + gnttab_end_foreign_access(ring_info->gref_ring, 0, + (unsigned long) ring_info->ring_front.sring); + + kfree(ring_info); + + rx_ring_info = xen_comm_find_rx_ring(domid); + if (!rx_ring_info) + return; + + BACK_RING_INIT(&(rx_ring_info->ring_back), + rx_ring_info->ring_back.sring, + PAGE_SIZE); +} + +/* importer needs to know about shared page and port numbers for + * ring buffer and event channel + */ +int xen_be_init_rx_rbuf(int domid) +{ + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_sring *sring; + + struct page *shared_ring; + + struct gnttab_map_grant_ref *map_ops; + + int ret; + int rx_gref, rx_port; + + /* check if there's existing rx ring channel */ + ring_info = xen_comm_find_rx_ring(domid); + + if (ring_info) { + dev_info(hy_drv_priv->dev, + "rx ring ch from domid = %d already exist\n", + ring_info->sdomain); + + return 0; + } + + ret = xen_comm_get_ring_details(xen_be_get_domid(), domid, + &rx_gref, &rx_port); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Domain %d has not created exporter ring for current domain\n", + domid); + + return ret; + } + + ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL); + + if (!ring_info) + return -ENOMEM; + + ring_info->sdomain = domid; + ring_info->evtchn = rx_port; + + map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL); + + if (!map_ops) { + ret = -ENOMEM; + goto fail_no_map_ops; + } + + if (gnttab_alloc_pages(1, &shared_ring)) { + ret = -ENOMEM; + goto fail_others; + } + + gnttab_set_map_op(&map_ops[0], + (unsigned long)pfn_to_kaddr( + page_to_pfn(shared_ring)), + GNTMAP_host_map, rx_gref, domid); + + gnttab_set_unmap_op(&ring_info->unmap_op, + (unsigned long)pfn_to_kaddr( + page_to_pfn(shared_ring)), + GNTMAP_host_map, -1); + + ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1); + if (ret < 0) { + dev_err(hy_drv_priv->dev, "Cannot map ring\n"); + ret = -EFAULT; + goto fail_others; + } + + if (map_ops[0].status) { + dev_err(hy_drv_priv->dev, "Ring mapping failed\n"); + ret = -EFAULT; + goto fail_others; + } else { + ring_info->unmap_op.handle = map_ops[0].handle; + } + + kfree(map_ops); + + sring = (struct xen_comm_sring *)pfn_to_kaddr(page_to_pfn(shared_ring)); + + BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE); + + ret = bind_interdomain_evtchn_to_irq(domid, rx_port); + + if (ret < 0) { + ret = -EIO; + goto fail_others; + } + + ring_info->irq = ret; + + dev_dbg(hy_drv_priv->dev, + "%s: bound to eventchannel port: %d irq: %d\n", __func__, + rx_port, + ring_info->irq); + + ret = xen_comm_add_rx_ring(ring_info); + + if (ret < 0) { + ret = -ENOMEM; + goto fail_others; + } + + /* Setup communcation channel in opposite direction */ + if (!xen_comm_find_tx_ring(domid)) + ret = xen_be_init_tx_rbuf(domid); + + ret = request_irq(ring_info->irq, + back_ring_isr, 0, + NULL, (void *)ring_info); + + return ret; + +fail_others: + kfree(map_ops); + +fail_no_map_ops: + kfree(ring_info); + + return ret; +} + +/* clenas up importer ring create for given source domain */ +void xen_be_cleanup_rx_rbuf(int domid) +{ + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_tx_ring_info *tx_ring_info; + struct page *shared_ring; + + /* check if we have importer ring created for given sdomain */ + ring_info = xen_comm_find_rx_ring(domid); + + if (!ring_info) + return; + + xen_comm_remove_rx_ring(domid); + + /* no need to close event channel, will be done by that function */ + unbind_from_irqhandler(ring_info->irq, (void *)ring_info); + + /* unmapping shared ring page */ + shared_ring = virt_to_page(ring_info->ring_back.sring); + gnttab_unmap_refs(&ring_info->unmap_op, NULL, &shared_ring, 1); + gnttab_free_pages(1, &shared_ring); + + kfree(ring_info); + + tx_ring_info = xen_comm_find_tx_ring(domid); + if (!tx_ring_info) + return; + + SHARED_RING_INIT(tx_ring_info->ring_front.sring); + FRONT_RING_INIT(&(tx_ring_info->ring_front), + tx_ring_info->ring_front.sring, + PAGE_SIZE); +} + +#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + +static void xen_rx_ch_add_delayed(struct work_struct *unused); + +static DECLARE_DELAYED_WORK(xen_rx_ch_auto_add_work, xen_rx_ch_add_delayed); + +#define DOMID_SCAN_START 1 /* domid = 1 */ +#define DOMID_SCAN_END 10 /* domid = 10 */ + +static void xen_rx_ch_add_delayed(struct work_struct *unused) +{ + int ret; + char buf[128]; + int i, dummy; + + dev_dbg(hy_drv_priv->dev, + "Scanning new tx channel comming from another domain\n"); + + /* check other domains and schedule another work if driver + * is still running and backend is valid + */ + if (hy_drv_priv && + hy_drv_priv->initialized) { + for (i = DOMID_SCAN_START; i < DOMID_SCAN_END + 1; i++) { + if (i == hy_drv_priv->domid) + continue; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + i, hy_drv_priv->domid); + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", &dummy); + + if (ret > 0) { + if (xen_comm_find_rx_ring(i) != NULL) + continue; + + ret = xen_be_init_rx_rbuf(i); + + if (!ret) + dev_info(hy_drv_priv->dev, + "Done rx ch init for VM %d\n", + i); + } + } + + /* check every 10 seconds */ + schedule_delayed_work(&xen_rx_ch_auto_add_work, + msecs_to_jiffies(10000)); + } +} + +#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */ + +void xen_init_comm_env_delayed(struct work_struct *unused) +{ + int ret; + + /* scheduling another work if driver is still running + * and xenstore hasn't been initialized or dom_id hasn't + * been correctly retrieved. + */ + if (likely(xenstored_ready == 0 || + hy_drv_priv->domid == -1)) { + dev_dbg(hy_drv_priv->dev, + "Xenstore not ready Will re-try in 500ms\n"); + schedule_delayed_work(&xen_init_comm_env_work, + msecs_to_jiffies(500)); + } else { + ret = xen_comm_setup_data_dir(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create data dir in Xenstore\n"); + } else { + dev_info(hy_drv_priv->dev, + "Successfully finished comm env init\n"); + hy_drv_priv->initialized = true; + +#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + xen_rx_ch_add_delayed(NULL); +#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */ + } + } +} + +int xen_be_init_comm_env(void) +{ + int ret; + + xen_comm_ring_table_init(); + + if (unlikely(xenstored_ready == 0 || + hy_drv_priv->domid == -1)) { + xen_init_comm_env_delayed(NULL); + return -1; + } + + ret = xen_comm_setup_data_dir(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create data dir in Xenstore\n"); + } else { + dev_info(hy_drv_priv->dev, + "Successfully finished comm env initialization\n"); + + hy_drv_priv->initialized = true; + } + + return ret; +} + +/* cleans up all tx/rx rings */ +static void xen_be_cleanup_all_rbufs(void) +{ + xen_comm_foreach_tx_ring(xen_be_cleanup_tx_rbuf); + xen_comm_foreach_rx_ring(xen_be_cleanup_rx_rbuf); +} + +void xen_be_destroy_comm(void) +{ + xen_be_cleanup_all_rbufs(); + xen_comm_destroy_data_dir(); +} + +int xen_be_send_req(int domid, struct hyper_dmabuf_req *req, + int wait) +{ + struct xen_comm_front_ring *ring; + struct hyper_dmabuf_req *new_req; + struct xen_comm_tx_ring_info *ring_info; + int notify; + + struct timeval tv_start, tv_end; + struct timeval tv_diff; + + int timeout = 1000; + + /* find a ring info for the channel */ + ring_info = xen_comm_find_tx_ring(domid); + if (!ring_info) { + dev_err(hy_drv_priv->dev, + "Can't find ring info for the channel\n"); + return -ENOENT; + } + + + ring = &ring_info->ring_front; + + do_gettimeofday(&tv_start); + + while (RING_FULL(ring)) { + dev_dbg(hy_drv_priv->dev, "RING_FULL\n"); + + if (timeout == 0) { + dev_err(hy_drv_priv->dev, + "Timeout while waiting for an entry in the ring\n"); + return -EIO; + } + usleep_range(100, 120); + timeout--; + } + + timeout = 1000; + + mutex_lock(&ring_info->lock); + + new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt); + if (!new_req) { + mutex_unlock(&ring_info->lock); + dev_err(hy_drv_priv->dev, + "NULL REQUEST\n"); + return -EIO; + } + + req->req_id = xen_comm_next_req_id(); + + /* update req_pending with current request */ + memcpy(&req_pending, req, sizeof(req_pending)); + + /* pass current request to the ring */ + memcpy(new_req, req, sizeof(*new_req)); + + ring->req_prod_pvt++; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); + if (notify) + notify_remote_via_irq(ring_info->irq); + + if (wait) { + while (timeout--) { + if (req_pending.stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + mutex_unlock(&ring_info->lock); + dev_err(hy_drv_priv->dev, + "request timed-out\n"); + return -EBUSY; + } + + mutex_unlock(&ring_info->lock); + do_gettimeofday(&tv_end); + + /* checking time duration for round-trip of a request + * for debugging + */ + if (tv_end.tv_usec >= tv_start.tv_usec) { + tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec; + tv_diff.tv_usec = tv_end.tv_usec-tv_start.tv_usec; + } else { + tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec-1; + tv_diff.tv_usec = tv_end.tv_usec+1000000- + tv_start.tv_usec; + } + + if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000) + dev_dbg(hy_drv_priv->dev, + "send_req:time diff: %ld sec, %ld usec\n", + tv_diff.tv_sec, tv_diff.tv_usec); + } + + mutex_unlock(&ring_info->lock); + + return 0; +} + +/* ISR for handling request */ +static irqreturn_t back_ring_isr(int irq, void *info) +{ + RING_IDX rc, rp; + struct hyper_dmabuf_req req; + struct hyper_dmabuf_resp resp; + + int notify, more_to_do; + int ret; + + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_back_ring *ring; + + ring_info = (struct xen_comm_rx_ring_info *)info; + ring = &ring_info->ring_back; + + dev_dbg(hy_drv_priv->dev, "%s\n", __func__); + + do { + rc = ring->req_cons; + rp = ring->sring->req_prod; + more_to_do = 0; + while (rc != rp) { + if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) + break; + + memcpy(&req, RING_GET_REQUEST(ring, rc), sizeof(req)); + ring->req_cons = ++rc; + + ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req); + + if (ret > 0) { + /* preparing a response for the request and + * send it to the requester + */ + memcpy(&resp, &req, sizeof(resp)); + memcpy(RING_GET_RESPONSE(ring, + ring->rsp_prod_pvt), + &resp, sizeof(resp)); + ring->rsp_prod_pvt++; + + dev_dbg(hy_drv_priv->dev, + "responding to exporter for req:%d\n", + resp.resp_id); + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, + notify); + + if (notify) + notify_remote_via_irq(ring_info->irq); + } + + RING_FINAL_CHECK_FOR_REQUESTS(ring, more_to_do); + } + } while (more_to_do); + + return IRQ_HANDLED; +} + +/* ISR for handling responses */ +static irqreturn_t front_ring_isr(int irq, void *info) +{ + /* front ring only care about response from back */ + struct hyper_dmabuf_resp *resp; + RING_IDX i, rp; + int more_to_do, ret; + + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_front_ring *ring; + + ring_info = (struct xen_comm_tx_ring_info *)info; + ring = &ring_info->ring_front; + + dev_dbg(hy_drv_priv->dev, "%s\n", __func__); + + do { + more_to_do = 0; + rp = ring->sring->rsp_prod; + for (i = ring->rsp_cons; i != rp; i++) { + resp = RING_GET_RESPONSE(ring, i); + + /* update pending request's status with what is + * in the response + */ + + dev_dbg(hy_drv_priv->dev, + "getting response from importer\n"); + + if (req_pending.req_id == resp->resp_id) + req_pending.stat = resp->stat; + + if (resp->stat == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) { + /* parsing response */ + ret = hyper_dmabuf_msg_parse(ring_info->rdomain, + (struct hyper_dmabuf_req *)resp); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "err while parsing resp\n"); + } + } else if (resp->stat == HYPER_DMABUF_REQ_PROCESSED) { + /* for debugging dma_buf remote synch */ + dev_dbg(hy_drv_priv->dev, + "original request = 0x%x\n", resp->cmd); + dev_dbg(hy_drv_priv->dev, + "got HYPER_DMABUF_REQ_PROCESSED\n"); + } else if (resp->stat == HYPER_DMABUF_REQ_ERROR) { + /* for debugging dma_buf remote synch */ + dev_dbg(hy_drv_priv->dev, + "original request = 0x%x\n", resp->cmd); + dev_dbg(hy_drv_priv->dev, + "got HYPER_DMABUF_REQ_ERROR\n"); + } + } + + ring->rsp_cons = i; + + if (i != ring->req_prod_pvt) + RING_FINAL_CHECK_FOR_RESPONSES(ring, more_to_do); + else + ring->sring->rsp_event = i+1; + + } while (more_to_do); + + return IRQ_HANDLED; +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h new file mode 100644 index 000000000000..70a2b704badd --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h @@ -0,0 +1,78 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_COMM_H__ +#define __HYPER_DMABUF_XEN_COMM_H__ + +#include "xen/interface/io/ring.h" +#include "xen/xenbus.h" +#include "../hyper_dmabuf_msg.h" + +extern int xenstored_ready; + +DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp); + +struct xen_comm_tx_ring_info { + struct xen_comm_front_ring ring_front; + int rdomain; + int gref_ring; + int irq; + int port; + struct mutex lock; + struct xenbus_watch watch; +}; + +struct xen_comm_rx_ring_info { + int sdomain; + int irq; + int evtchn; + struct xen_comm_back_ring ring_back; + struct gnttab_unmap_grant_ref unmap_op; +}; + +int xen_be_get_domid(void); + +int xen_be_init_comm_env(void); + +/* exporter needs to generated info for page sharing */ +int xen_be_init_tx_rbuf(int domid); + +/* importer needs to know about shared page and port numbers + * for ring buffer and event channel + */ +int xen_be_init_rx_rbuf(int domid); + +/* cleans up exporter ring created for given domain */ +void xen_be_cleanup_tx_rbuf(int domid); + +/* cleans up importer ring created for given domain */ +void xen_be_cleanup_rx_rbuf(int domid); + +void xen_be_destroy_comm(void); + +/* send request to the remote domain */ +int xen_be_send_req(int domid, struct hyper_dmabuf_req *req, + int wait); + +#endif /* __HYPER_DMABUF_XEN_COMM_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c new file mode 100644 index 000000000000..15023dbc8ced --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c @@ -0,0 +1,158 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_comm_list.h" + +DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING); +DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING); + +void xen_comm_ring_table_init(void) +{ + hash_init(xen_comm_rx_ring_hash); + hash_init(xen_comm_tx_ring_hash); +} + +int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = ring_info; + + hash_add(xen_comm_tx_ring_hash, &info_entry->node, + info_entry->info->rdomain); + + return 0; +} + +int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = ring_info; + + hash_add(xen_comm_rx_ring_hash, &info_entry->node, + info_entry->info->sdomain); + + return 0; +} + +struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node) + if (info_entry->info->rdomain == domid) + return info_entry->info; + + return NULL; +} + +struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node) + if (info_entry->info->sdomain == domid) + return info_entry->info; + + return NULL; +} + +int xen_comm_remove_tx_ring(int domid) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node) + if (info_entry->info->rdomain == domid) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +int xen_comm_remove_rx_ring(int domid) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node) + if (info_entry->info->sdomain == domid) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +void xen_comm_foreach_tx_ring(void (*func)(int domid)) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(xen_comm_tx_ring_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info->rdomain); + } +} + +void xen_comm_foreach_rx_ring(void (*func)(int domid)) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(xen_comm_rx_ring_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info->sdomain); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h new file mode 100644 index 000000000000..8502fe7df578 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h @@ -0,0 +1,67 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_COMM_LIST_H__ +#define __HYPER_DMABUF_XEN_COMM_LIST_H__ + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_TX_RING 7 +/* number of bits to be used for imported dmabufs hash table */ +#define MAX_ENTRY_RX_RING 7 + +struct xen_comm_tx_ring_info_entry { + struct xen_comm_tx_ring_info *info; + struct hlist_node node; +}; + +struct xen_comm_rx_ring_info_entry { + struct xen_comm_rx_ring_info *info; + struct hlist_node node; +}; + +void xen_comm_ring_table_init(void); + +int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info); + +int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info); + +int xen_comm_remove_tx_ring(int domid); + +int xen_comm_remove_rx_ring(int domid); + +struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid); + +struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid); + +/* iterates over all exporter rings and calls provided + * function for each of them + */ +void xen_comm_foreach_tx_ring(void (*func)(int domid)); + +/* iterates over all importer rings and calls provided + * function for each of them + */ +void xen_comm_foreach_rx_ring(void (*func)(int domid)); + +#endif // __HYPER_DMABUF_XEN_COMM_LIST_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c new file mode 100644 index 000000000000..14ed3bc51e6a --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_shm.h" + +struct hyper_dmabuf_bknd_ops xen_bknd_ops = { + .init = NULL, /* not needed for xen */ + .cleanup = NULL, /* not needed for xen */ + .get_vm_id = xen_be_get_domid, + .share_pages = xen_be_share_pages, + .unshare_pages = xen_be_unshare_pages, + .map_shared_pages = (void *)xen_be_map_shared_pages, + .unmap_shared_pages = xen_be_unmap_shared_pages, + .init_comm_env = xen_be_init_comm_env, + .destroy_comm = xen_be_destroy_comm, + .init_rx_ch = xen_be_init_rx_rbuf, + .init_tx_ch = xen_be_init_tx_rbuf, + .send_req = xen_be_send_req, +}; diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h new file mode 100644 index 000000000000..a4902b747a87 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h @@ -0,0 +1,53 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_DRV_H__ +#define __HYPER_DMABUF_XEN_DRV_H__ +#include + +extern struct hyper_dmabuf_bknd_ops xen_bknd_ops; + +/* Main purpose of this structure is to keep + * all references created or acquired for sharing + * pages with another domain for freeing those later + * when unsharing. + */ +struct xen_shared_pages_info { + /* top level refid */ + grant_ref_t lvl3_gref; + + /* page of top level addressing, it contains refids of 2nd lvl pages */ + grant_ref_t *lvl3_table; + + /* table of 2nd level pages, that contains refids to data pages */ + grant_ref_t *lvl2_table; + + /* unmap ops for mapped pages */ + struct gnttab_unmap_grant_ref *unmap_ops; + + /* data pages to be unmapped */ + struct page **data_pages; +}; + +#endif // __HYPER_DMABUF_XEN_COMM_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c new file mode 100644 index 000000000000..c6a15f187fe3 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c @@ -0,0 +1,525 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include "hyper_dmabuf_xen_drv.h" +#include "../hyper_dmabuf_drv.h" + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +/* + * Creates 2 level page directory structure for referencing shared pages. + * Top level page is a single page that contains up to 1024 refids that + * point to 2nd level pages. + * + * Each 2nd level page contains up to 1024 refids that point to shared + * data pages. + * + * There will always be one top level page and number of 2nd level pages + * depends on number of shared data pages. + * + * 3rd level page 2nd level pages Data pages + * +-------------------------+ ┌>+--------------------+ ┌>+------------+ + * |2nd level page 0 refid |---┘ |Data page 0 refid |-┘ |Data page 0 | + * |2nd level page 1 refid |---┐ |Data page 1 refid |-┐ +------------+ + * | ... | | | .... | | + * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └>+------------+ + * +-------------------------+ | | +--------------------+ |Data page 1 | + * | | +------------+ + * | └>+--------------------+ + * | |Data page 1024 refid| + * | |Data page 1025 refid| + * | | ... | + * | |Data page 2047 refid| + * | +--------------------+ + * | + * | ..... + * └-->+-----------------------+ + * |Data page 1047552 refid| + * |Data page 1047553 refid| + * | ... | + * |Data page 1048575 refid| + * +-----------------------+ + * + * Using such 2 level structure it is possible to reference up to 4GB of + * shared data using single refid pointing to top level page. + * + * Returns refid of top level page. + */ +int xen_be_share_pages(struct page **pages, int domid, int nents, + void **refs_info) +{ + grant_ref_t lvl3_gref; + grant_ref_t *lvl2_table; + grant_ref_t *lvl3_table; + + /* + * Calculate number of pages needed for 2nd level addresing: + */ + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + struct xen_shared_pages_info *sh_pages_info; + int i; + + lvl3_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, 1); + lvl2_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + + if (!sh_pages_info) + return -ENOMEM; + + *refs_info = (void *)sh_pages_info; + + /* share data pages in readonly mode for security */ + for (i = 0; i < nents; i++) { + lvl2_table[i] = gnttab_grant_foreign_access(domid, + pfn_to_mfn(page_to_pfn(pages[i])), + true /* read only */); + if (lvl2_table[i] == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all already shared pages for lvl2 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl2_table[i], 0); + gnttab_free_grant_reference(lvl2_table[i]); + } + goto err_cleanup; + } + } + + /* Share 2nd level addressing pages in readonly mode*/ + for (i = 0; i < n_lvl2_grefs; i++) { + lvl3_table[i] = gnttab_grant_foreign_access(domid, + virt_to_mfn( + (unsigned long)lvl2_table+i*PAGE_SIZE), + true); + + if (lvl3_table[i] == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all already shared pages for lvl3 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl3_table[i], 1); + gnttab_free_grant_reference(lvl3_table[i]); + } + + /* Unshare all pages for lvl2 */ + while (nents--) { + gnttab_end_foreign_access_ref( + lvl2_table[nents], 0); + gnttab_free_grant_reference(lvl2_table[nents]); + } + + goto err_cleanup; + } + } + + /* Share lvl3_table in readonly mode*/ + lvl3_gref = gnttab_grant_foreign_access(domid, + virt_to_mfn((unsigned long)lvl3_table), + true); + + if (lvl3_gref == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all pages for lvl3 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl3_table[i], 1); + gnttab_free_grant_reference(lvl3_table[i]); + } + + /* Unshare all pages for lvl2 */ + while (nents--) { + gnttab_end_foreign_access_ref(lvl2_table[nents], 0); + gnttab_free_grant_reference(lvl2_table[nents]); + } + + goto err_cleanup; + } + + /* Store lvl3_table page to be freed later */ + sh_pages_info->lvl3_table = lvl3_table; + + /* Store lvl2_table pages to be freed later */ + sh_pages_info->lvl2_table = lvl2_table; + + + /* Store exported pages refid to be unshared later */ + sh_pages_info->lvl3_gref = lvl3_gref; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return lvl3_gref; + +err_cleanup: + free_pages((unsigned long)lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)lvl3_table, 1); + + return -ENOSPC; +} + +int xen_be_unshare_pages(void **refs_info, int nents) +{ + struct xen_shared_pages_info *sh_pages_info; + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + int i; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + sh_pages_info = (struct xen_shared_pages_info *)(*refs_info); + + if (sh_pages_info->lvl3_table == NULL || + sh_pages_info->lvl2_table == NULL || + sh_pages_info->lvl3_gref == -1) { + dev_warn(hy_drv_priv->dev, + "gref table for hyper_dmabuf already cleaned up\n"); + return 0; + } + + /* End foreign access for data pages, but do not free them */ + for (i = 0; i < nents; i++) { + if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i])) + dev_warn(hy_drv_priv->dev, "refid not shared !!\n"); + + gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0); + gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]); + } + + /* End foreign access for 2nd level addressing pages */ + for (i = 0; i < n_lvl2_grefs; i++) { + if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i])) + dev_warn(hy_drv_priv->dev, "refid not shared !!\n"); + + if (!gnttab_end_foreign_access_ref( + sh_pages_info->lvl3_table[i], 1)) + dev_warn(hy_drv_priv->dev, "refid still in use!!!\n"); + + gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]); + } + + /* End foreign access for top level addressing page */ + if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref)) + dev_warn(hy_drv_priv->dev, "gref not shared !!\n"); + + gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1); + gnttab_free_grant_reference(sh_pages_info->lvl3_gref); + + /* freeing all pages used for 2 level addressing */ + free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)sh_pages_info->lvl3_table, 1); + + sh_pages_info->lvl3_gref = -1; + sh_pages_info->lvl2_table = NULL; + sh_pages_info->lvl3_table = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} + +/* Maps provided top level ref id and then return array of pages + * containing data refs. + */ +struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid, + int nents, void **refs_info) +{ + struct page *lvl3_table_page; + struct page **lvl2_table_pages; + struct page **data_pages; + struct xen_shared_pages_info *sh_pages_info; + + grant_ref_t *lvl3_table; + grant_ref_t *lvl2_table; + + struct gnttab_map_grant_ref lvl3_map_ops; + struct gnttab_unmap_grant_ref lvl3_unmap_ops; + + struct gnttab_map_grant_ref *lvl2_map_ops; + struct gnttab_unmap_grant_ref *lvl2_unmap_ops; + + struct gnttab_map_grant_ref *data_map_ops; + struct gnttab_unmap_grant_ref *data_unmap_ops; + + /* # of grefs in the last page of lvl2 table */ + int nents_last = (nents - 1) % REFS_PER_PAGE + 1; + int n_lvl2_grefs = (nents / REFS_PER_PAGE) + + ((nents_last > 0) ? 1 : 0) - + (nents_last == REFS_PER_PAGE); + int i, j, k; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + *refs_info = (void *) sh_pages_info; + + lvl2_table_pages = kcalloc(n_lvl2_grefs, sizeof(struct page *), + GFP_KERNEL); + + data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); + + lvl2_map_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_map_ops), + GFP_KERNEL); + + lvl2_unmap_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_unmap_ops), + GFP_KERNEL); + + data_map_ops = kcalloc(nents, sizeof(*data_map_ops), GFP_KERNEL); + data_unmap_ops = kcalloc(nents, sizeof(*data_unmap_ops), GFP_KERNEL); + + /* Map top level addressing page */ + if (gnttab_alloc_pages(1, &lvl3_table_page)) { + dev_err(hy_drv_priv->dev, "Cannot allocate pages\n"); + return NULL; + } + + lvl3_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl3_table_page)); + + gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table, + GNTMAP_host_map | GNTMAP_readonly, + (grant_ref_t)lvl3_gref, domid); + + gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table, + GNTMAP_host_map | GNTMAP_readonly, -1); + + if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed"); + return NULL; + } + + if (lvl3_map_ops.status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d", + lvl3_map_ops.status); + + goto error_cleanup_lvl3; + } else { + lvl3_unmap_ops.handle = lvl3_map_ops.handle; + } + + /* Map all second level pages */ + if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) { + dev_err(hy_drv_priv->dev, "Cannot allocate pages\n"); + goto error_cleanup_lvl3; + } + + for (i = 0; i < n_lvl2_grefs; i++) { + lvl2_table = (grant_ref_t *)pfn_to_kaddr( + page_to_pfn(lvl2_table_pages[i])); + gnttab_set_map_op(&lvl2_map_ops[i], + (unsigned long)lvl2_table, GNTMAP_host_map | + GNTMAP_readonly, + lvl3_table[i], domid); + gnttab_set_unmap_op(&lvl2_unmap_ops[i], + (unsigned long)lvl2_table, GNTMAP_host_map | + GNTMAP_readonly, -1); + } + + /* Unmap top level page, as it won't be needed any longer */ + if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL, + &lvl3_table_page, 1)) { + dev_err(hy_drv_priv->dev, + "xen: cannot unmap top level page\n"); + return NULL; + } + + /* Mark that page was unmapped */ + lvl3_unmap_ops.handle = -1; + + if (gnttab_map_refs(lvl2_map_ops, NULL, + lvl2_table_pages, n_lvl2_grefs)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed"); + return NULL; + } + + /* Checks if pages were mapped correctly */ + for (i = 0; i < n_lvl2_grefs; i++) { + if (lvl2_map_ops[i].status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d", + lvl2_map_ops[i].status); + goto error_cleanup_lvl2; + } else { + lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle; + } + } + + if (gnttab_alloc_pages(nents, data_pages)) { + dev_err(hy_drv_priv->dev, + "Cannot allocate pages\n"); + goto error_cleanup_lvl2; + } + + k = 0; + + for (i = 0; i < n_lvl2_grefs - 1; i++) { + lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i])); + for (j = 0; j < REFS_PER_PAGE; j++) { + gnttab_set_map_op(&data_map_ops[k], + (unsigned long)pfn_to_kaddr( + page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, + lvl2_table[j], domid); + + gnttab_set_unmap_op(&data_unmap_ops[k], + (unsigned long)pfn_to_kaddr( + page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, -1); + k++; + } + } + + /* for grefs in the last lvl2 table page */ + lvl2_table = pfn_to_kaddr(page_to_pfn( + lvl2_table_pages[n_lvl2_grefs - 1])); + + for (j = 0; j < nents_last; j++) { + gnttab_set_map_op(&data_map_ops[k], + (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, + lvl2_table[j], domid); + + gnttab_set_unmap_op(&data_unmap_ops[k], + (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, -1); + k++; + } + + if (gnttab_map_refs(data_map_ops, NULL, + data_pages, nents)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed\n"); + return NULL; + } + + /* unmapping lvl2 table pages */ + if (gnttab_unmap_refs(lvl2_unmap_ops, + NULL, lvl2_table_pages, + n_lvl2_grefs)) { + dev_err(hy_drv_priv->dev, + "Cannot unmap 2nd level refs\n"); + return NULL; + } + + /* Mark that pages were unmapped */ + for (i = 0; i < n_lvl2_grefs; i++) + lvl2_unmap_ops[i].handle = -1; + + for (i = 0; i < nents; i++) { + if (data_map_ops[i].status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d\n", + data_map_ops[i].status); + goto error_cleanup_data; + } else { + data_unmap_ops[i].handle = data_map_ops[i].handle; + } + } + + /* store these references for unmapping in the future */ + sh_pages_info->unmap_ops = data_unmap_ops; + sh_pages_info->data_pages = data_pages; + + gnttab_free_pages(1, &lvl3_table_page); + gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages); + kfree(lvl2_table_pages); + kfree(lvl2_map_ops); + kfree(lvl2_unmap_ops); + kfree(data_map_ops); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return data_pages; + +error_cleanup_data: + gnttab_unmap_refs(data_unmap_ops, NULL, data_pages, + nents); + + gnttab_free_pages(nents, data_pages); + +error_cleanup_lvl2: + if (lvl2_unmap_ops[0].handle != -1) + gnttab_unmap_refs(lvl2_unmap_ops, NULL, + lvl2_table_pages, n_lvl2_grefs); + gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages); + +error_cleanup_lvl3: + if (lvl3_unmap_ops.handle != -1) + gnttab_unmap_refs(&lvl3_unmap_ops, NULL, + &lvl3_table_page, 1); + gnttab_free_pages(1, &lvl3_table_page); + + kfree(lvl2_table_pages); + kfree(lvl2_map_ops); + kfree(lvl2_unmap_ops); + kfree(data_map_ops); + + + return NULL; +} + +int xen_be_unmap_shared_pages(void **refs_info, int nents) +{ + struct xen_shared_pages_info *sh_pages_info; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + sh_pages_info = (struct xen_shared_pages_info *)(*refs_info); + + if (sh_pages_info->unmap_ops == NULL || + sh_pages_info->data_pages == NULL) { + dev_warn(hy_drv_priv->dev, + "pages already cleaned up or buffer not imported yet\n"); + return 0; + } + + if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL, + sh_pages_info->data_pages, nents)) { + dev_err(hy_drv_priv->dev, "Cannot unmap data pages\n"); + return -EFAULT; + } + + gnttab_free_pages(nents, sh_pages_info->data_pages); + + kfree(sh_pages_info->data_pages); + kfree(sh_pages_info->unmap_ops); + sh_pages_info->unmap_ops = NULL; + sh_pages_info->data_pages = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h new file mode 100644 index 000000000000..d5236b500075 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_SHM_H__ +#define __HYPER_DMABUF_XEN_SHM_H__ + +/* This collects all reference numbers for 2nd level shared pages and + * create a table with those in 1st level shared pages then return reference + * numbers for this top level table. + */ +int xen_be_share_pages(struct page **pages, int domid, int nents, + void **refs_info); + +int xen_be_unshare_pages(void **refs_info, int nents); + +/* Maps provided top level ref id and then return array of pages containing + * data refs. + */ +struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid, + int nents, + void **refs_info); + +int xen_be_unmap_shared_pages(void **refs_info, int nents); + +#endif /* __HYPER_DMABUF_XEN_SHM_H__ */ diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index dec3a815455d..012fa3d1f407 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence); * @dst: the destination reservation object * @src: the source reservation object * -* Copy all fences from src to dst. Both src->lock as well as dst-lock must be -* held. +* Copy all fences from src to dst. dst-lock must be held. */ int reservation_object_copy_fences(struct reservation_object *dst, struct reservation_object *src) @@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst, size_t size; unsigned i; - src_list = reservation_object_get_list(src); + rcu_read_lock(); + src_list = rcu_dereference(src->fence); +retry: if (src_list) { - size = offsetof(typeof(*src_list), - shared[src_list->shared_count]); + unsigned shared_count = src_list->shared_count; + + size = offsetof(typeof(*src_list), shared[shared_count]); + rcu_read_unlock(); + dst_list = kmalloc(size, GFP_KERNEL); if (!dst_list) return -ENOMEM; - dst_list->shared_count = src_list->shared_count; - dst_list->shared_max = src_list->shared_count; - for (i = 0; i < src_list->shared_count; ++i) - dst_list->shared[i] = - dma_fence_get(src_list->shared[i]); + rcu_read_lock(); + src_list = rcu_dereference(src->fence); + if (!src_list || src_list->shared_count > shared_count) { + kfree(dst_list); + goto retry; + } + + dst_list->shared_count = 0; + dst_list->shared_max = shared_count; + for (i = 0; i < src_list->shared_count; ++i) { + struct dma_fence *fence; + + fence = rcu_dereference(src_list->shared[i]); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + continue; + + if (!dma_fence_get_rcu(fence)) { + kfree(dst_list); + src_list = rcu_dereference(src->fence); + goto retry; + } + + if (dma_fence_is_signaled(fence)) { + dma_fence_put(fence); + continue; + } + + dst_list->shared[dst_list->shared_count++] = fence; + } } else { dst_list = NULL; } + new = dma_fence_get_rcu_safe(&src->fence_excl); + rcu_read_unlock(); + kfree(dst->staged); dst->staged = NULL; src_list = reservation_object_get_list(dst); - old = reservation_object_get_excl(dst); - new = reservation_object_get_excl(src); - - dma_fence_get(new); preempt_disable(); write_seqcount_begin(&dst->seq); @@ -427,13 +455,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, unsigned long timeout) { struct dma_fence *fence; - unsigned seq, shared_count, i = 0; + unsigned seq, shared_count; long ret = timeout ? timeout : 1; + int i; retry: shared_count = 0; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); + i = -1; fence = rcu_dereference(obj->fence_excl); if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { @@ -449,14 +479,14 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, fence = NULL; } - if (!fence && wait_all) { + if (wait_all) { struct reservation_object_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; - for (i = 0; i < shared_count; ++i) { + for (i = 0; !fence && i < shared_count; ++i) { struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 38cc7389a6c1..24f83f9eeaed 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file) static int sw_sync_debugfs_release(struct inode *inode, struct file *file) { struct sync_timeline *obj = file->private_data; + struct sync_pt *pt, *next; + + spin_lock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { + dma_fence_set_error(&pt->base, -ENOENT); + dma_fence_signal_locked(&pt->base); + } - smp_wmb(); + spin_unlock_irq(&obj->lock); sync_timeline_put(obj); return 0; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index fbab271b3bf9..a861b5b4d443 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan, unsigned long flags) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct data_chunk *first = xt->sgl; + struct data_chunk *first; struct at_desc *desc = NULL; size_t xfer_count; unsigned int dwidth; @@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan, if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) return NULL; + first = xt->sgl; + dev_info(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", __func__, &xt->src_start, &xt->dst_start, xt->numf, diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index d50273fed715..afd5e10f8927 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev) ret = dma_async_device_register(dd); if (ret) - return ret; + goto err_clk; irq = platform_get_irq(pdev, 0); ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); @@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev) err_unregister: dma_async_device_unregister(dd); +err_clk: + clk_disable_unprepare(dmadev->clk); return ret; } diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 34ff53290b03..80cc2be6483c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); #define PATTERN_COUNT_MASK 0x1f #define PATTERN_MEMSET_IDX 0x01 +/* poor man's completion - we want to use wait_event_freezable() on it */ +struct dmatest_done { + bool done; + wait_queue_head_t *wait; +}; + struct dmatest_thread { struct list_head node; struct dmatest_info *info; @@ -165,6 +171,8 @@ struct dmatest_thread { u8 **dsts; u8 **udsts; enum dma_transaction_type type; + wait_queue_head_t done_wait; + struct dmatest_done test_done; bool done; }; @@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, return error_count; } -/* poor man's completion - we want to use wait_event_freezable() on it */ -struct dmatest_done { - bool done; - wait_queue_head_t *wait; -}; static void dmatest_callback(void *arg) { struct dmatest_done *done = arg; - - done->done = true; - wake_up_all(done->wait); + struct dmatest_thread *thread = + container_of(done, struct dmatest_thread, test_done); + if (!thread->done) { + done->done = true; + wake_up_all(done->wait); + } else { + /* + * If thread->done, it means that this callback occurred + * after the parent thread has cleaned up. This can + * happen in the case that driver doesn't implement + * the terminate_all() functionality and a dma operation + * did not occur within the timeout period + */ + WARN(1, "dmatest: Kernel memory may be corrupted!!\n"); + } } static unsigned int min_odd(unsigned int x, unsigned int y) @@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) */ static int dmatest_func(void *data) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); struct dmatest_thread *thread = data; - struct dmatest_done done = { .wait = &done_wait }; + struct dmatest_done *done = &thread->test_done; struct dmatest_info *info; struct dmatest_params *params; struct dma_chan *chan; @@ -673,9 +687,9 @@ static int dmatest_func(void *data) continue; } - done.done = false; + done->done = false; tx->callback = dmatest_callback; - tx->callback_param = &done; + tx->callback_param = done; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { @@ -688,20 +702,12 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - wait_event_freezable_timeout(done_wait, done.done, + wait_event_freezable_timeout(thread->done_wait, done->done, msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - if (!done.done) { - /* - * We're leaving the timed out dma operation with - * dangling pointer to done_wait. To make this - * correct, we'll need to allocate wait_done for - * each test iteration and perform "who's gonna - * free it this time?" dancing. For now, just - * leave it dangling. - */ + if (!done->done) { dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); @@ -788,7 +794,7 @@ static int dmatest_func(void *data) dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ - if (ret) + if (ret || failed_tests) dmaengine_terminate_all(chan); thread->done = true; @@ -848,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info, thread->info = info; thread->chan = dtc->chan; thread->type = type; + thread->test_done.wait = &thread->done_wait; + init_waitqueue_head(&thread->done_wait); smp_wmb(); thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 6775f2c74e25..c7568869284e 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -863,11 +863,11 @@ static void fsl_edma_irq_exit( } } -static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) +static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) { int i; - for (i = 0; i < DMAMUX_NR; i++) + for (i = 0; i < nr_clocks; i++) clk_disable_unprepare(fsl_edma->muxclk[i]); } @@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(fsl_edma->muxbase[i])) + if (IS_ERR(fsl_edma->muxbase[i])) { + /* on error: disable all previously enabled clks */ + fsl_disable_clocks(fsl_edma, i); return PTR_ERR(fsl_edma->muxbase[i]); + } sprintf(clkname, "dmamux%d", i); fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); if (IS_ERR(fsl_edma->muxclk[i])) { dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); + /* on error: disable all previously enabled clks */ + fsl_disable_clocks(fsl_edma, i); return PTR_ERR(fsl_edma->muxclk[i]); } ret = clk_prepare_enable(fsl_edma->muxclk[i]); - if (ret) { - /* disable only clks which were enabled on error */ - for (; i >= 0; i--) - clk_disable_unprepare(fsl_edma->muxclk[i]); - - dev_err(&pdev->dev, "DMAMUX clk block failed.\n"); - return ret; - } + if (ret) + /* on error: disable all previously enabled clks */ + fsl_disable_clocks(fsl_edma, i); } @@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Can't register Freescale eDMA engine. (%d)\n", ret); - fsl_disable_clocks(fsl_edma); + fsl_disable_clocks(fsl_edma, DMAMUX_NR); return ret; } @@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma. (%d)\n", ret); dma_async_device_unregister(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma); + fsl_disable_clocks(fsl_edma, DMAMUX_NR); return ret; } @@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev) fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma); + fsl_disable_clocks(fsl_edma, DMAMUX_NR); return 0; } diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 1953e57505f4..076eca8a7b5f 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -701,7 +701,17 @@ static struct platform_driver idma64_platform_driver = { }, }; -module_platform_driver(idma64_platform_driver); +static int __init idma64_platform_driver_init(void) +{ + return platform_driver_register(&idma64_platform_driver); +} +fs_initcall(idma64_platform_driver_init); + +static void __exit idma64_platform_driver_exit(void) +{ + platform_driver_unregister(&idma64_platform_driver); +} +module_exit(idma64_platform_driver_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("iDMA64 core driver"); diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 93e006c3441d..854deb0da07c 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) if (memcmp(src, dest, IOAT_TEST_SIZE)) { dev_err(dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; - goto free_resources; + goto unmap_dma; } unmap_dma: diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 4999e266b2de..7c6e2ff212a2 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -393,6 +393,8 @@ static int hidma_ll_reset(struct hidma_lldev *lldev) */ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) { + unsigned long irqflags; + if (cause & HIDMA_ERR_INT_MASK) { dev_err(lldev->dev, "error 0x%x, disabling...\n", cause); @@ -410,6 +412,10 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) return; } + spin_lock_irqsave(&lldev->lock, irqflags); + writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + spin_unlock_irqrestore(&lldev->lock, irqflags); + /* * Fine tuned for this HW... * @@ -421,9 +427,6 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) * Try to consume as many EVREs as possible. */ hidma_handle_tre_completion(lldev); - - /* We consumed TREs or there are pending TREs or EVREs. */ - writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); } irqreturn_t hidma_ll_inthandler(int chirq, void *arg) diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index f1d04b70ee67..9272b173c746 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -49,12 +49,20 @@ struct ti_am335x_xbar_data { struct ti_am335x_xbar_map { u16 dma_line; - u16 mux_val; + u8 mux_val; }; -static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) +static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) { - writeb_relaxed(val & 0x1f, iomem + event); + /* + * TPCC_EVT_MUX_60_63 register layout is different than the + * rest, in the sense, that event 63 is mapped to lowest byte + * and event 60 is mapped to highest, handle it separately. + */ + if (event >= 60 && event <= 63) + writeb_relaxed(val, iomem + (63 - event % 4)); + else + writeb_relaxed(val, iomem + event); } static void ti_am335x_xbar_free(struct device *dev, void *route_data) @@ -105,7 +113,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, } map->dma_line = (u16)dma_spec->args[0]; - map->mux_val = (u16)dma_spec->args[2]; + map->mux_val = (u8)dma_spec->args[2]; dma_spec->args[2] = 0; dma_spec->args_count = 2; diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 1ee1241ca797..5cc8ed31f26b 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -838,7 +838,8 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) if (!chan) return; - devm_free_irq(chan->zdev->dev, chan->irq, chan); + if (chan->irq) + devm_free_irq(chan->zdev->dev, chan->irq, chan); tasklet_kill(&chan->tasklet); list_del(&chan->common.device_node); clk_disable_unprepare(chan->clk_apb); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ac2f30295efe..59ce32e405ac 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) struct amd64_family_type *fam_type = NULL; pvt->ext_model = boot_cpu_data.x86_model >> 4; - pvt->stepping = boot_cpu_data.x86_mask; + pvt->stepping = boot_cpu_data.x86_stepping; pvt->model = boot_cpu_data.x86_model; pvt->fam = boot_cpu_data.x86; diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index a11a671c7a38..2ab4d61ee47e 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -854,21 +854,24 @@ static void decode_mc6_mce(struct mce *m) static void decode_smca_error(struct mce *m) { struct smca_hwid *hwid; - unsigned int bank_type; + enum smca_bank_types bank_type; const char *ip_name; u8 xec = XEC(m->status, xec_mask); if (m->bank >= ARRAY_SIZE(smca_banks)) return; - if (x86_family(m->cpuid) >= 0x17 && m->bank == 4) - pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n"); - hwid = smca_banks[m->bank].hwid; if (!hwid) return; bank_type = hwid->bank_type; + + if (bank_type == SMCA_RESERVED) { + pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank); + return; + } + ip_name = smca_get_long_name(bank_type); pr_emerg(HW_ERR "%s Extended Error Code: %d\n", ip_name, xec); diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index ec5d695bbb72..3c68bb525d5d 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c @@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) /* Non-ECC RAM? */ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); res = -ENODEV; - goto err2; + goto err; } edac_dbg(3, "init mci\n"); diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c index 9c1ffe3e912b..aeb222ca3ed1 100644 --- a/drivers/edac/octeon_edac-lmc.c +++ b/drivers/edac/octeon_edac-lmc.c @@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci) if (!pvt->inject) int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); else { + int_reg.u64 = 0; if (pvt->error_type == 1) int_reg.s.sec_err = 1; if (pvt->error_type == 2) diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index dc0591654011..0dc0d595c47c 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = { * sbridge structs */ -#define NUM_CHANNELS 4 /* Max channels per MC */ +#define NUM_CHANNELS 6 /* Max channels per MC */ #define MAX_DIMMS 3 /* Max DIMMS per channel */ #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ @@ -462,6 +462,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { static const struct pci_id_descr pci_dev_descr_ibridge[] = { /* Processor Home Agent */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, /* Memory controller */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) }, @@ -472,7 +473,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) }, /* Optional, mode 2HA */ - { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) }, @@ -2291,6 +2291,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev, next_imc: sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev); if (!sbridge_dev) { + /* If the HA1 wasn't found, don't create EDAC second memory controller */ + if (dev_descr->dom == IMC1 && devno != 1) { + edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n", + PCI_VENDOR_ID_INTEL, dev_descr->dev_id); + pci_dev_put(pdev); + return 0; + } if (dev_descr->dom == SOCK) goto out_imc; @@ -2491,6 +2498,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA: pvt->pci_ta = pdev; + break; case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS: pvt->pci_ras = pdev; diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index a7bca4207f44..1101eb3a2242 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -157,4 +157,21 @@ config EXTCON_USBC_CROS_EC Say Y here to enable USB Type C cable detection extcon support when using Chrome OS EC based USB Type-C ports. +config EXTCON_INTEL_USB + bool "Intel USB MUX support" + depends on X86 && USB + help + Intel SoCs and chipsets often have an internal USB mux that is used to + share one USB port between an USB Device Controller and xHCI. The mux + is by default controlled by BIOS/FW, but on some platforms that is not + possible and the OS has to configure the mux with this driver. + + The driver relies on events from some external source. The mux has no + means to detect a change in the cable connection status on its own. + That makes this driver useful only on platforms where a separate + component exists, for example PMIC, that can detect connection status + changes on the USB port behind the mux. + + If unsure, say N. + endif diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile index 0888fdeded72..1cb698882fa1 100644 --- a/drivers/extcon/Makefile +++ b/drivers/extcon/Makefile @@ -22,3 +22,4 @@ obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o +obj-$(CONFIG_EXTCON_INTEL_USB) += extcon-intel-usb.o diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index 1a45e745717d..a6661097b2f9 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev) return ret; } - /* queue initial processing of id-pin */ + /* process id-pin so that we start with the right status */ queue_delayed_work(system_wq, &data->work, 0); + flush_delayed_work(&data->work); platform_set_drvdata(pdev, data); diff --git a/drivers/extcon/extcon-intel-usb.c b/drivers/extcon/extcon-intel-usb.c new file mode 100644 index 000000000000..26b043db0350 --- /dev/null +++ b/drivers/extcon/extcon-intel-usb.c @@ -0,0 +1,115 @@ +/** + * extcon-intel-usb.c - Driver for Intel USB mux + * + * Copyright (C) 2015 Intel Corporation + * Author: Heikki Krogerus + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include + +#include "extcon.h" + +#define INTEL_MUX_CFG0 0x00 +#define INTEL_MUX_CFG1 0x04 + +#define CFG0_SW_DRD_MODE_MASK 0x3 +#define CFG0_SW_DRD_DYN 0 +#define CFG0_SW_DRD_STATIC_HOST 1 +#define CFG0_SW_DRD_STATIC_DEV 2 +#define CFG0_SW_SYNC_SS_AND_HS BIT(2) +#define CFG0_SW_SWITCH_EN BIT(16) +#define CFG0_SW_IDPIN BIT(20) +#define CFG0_SW_IDPIN_EN BIT(21) +#define CFG0_SW_VBUS_VALID BIT(24) + +#define CFG1_MODE BIT(29) + +struct intel_usb_mux { + struct notifier_block nb; + struct extcon_dev edev; + void __iomem *regs; +}; + +static const int intel_mux_cable[] = { + EXTCON_USB_HOST, + EXTCON_NONE, +}; + +static int intel_usb_mux_notifier(struct notifier_block *nb, + unsigned long old, void *ptr) +{ + struct intel_usb_mux *mux = container_of(nb, struct intel_usb_mux, nb); + u32 val; + + if (mux->edev.state) + val = CFG0_SW_IDPIN_EN | CFG0_SW_DRD_STATIC_HOST | CFG0_SW_SWITCH_EN; + else + val = CFG0_SW_IDPIN_EN | CFG0_SW_IDPIN | CFG0_SW_VBUS_VALID | + CFG0_SW_DRD_STATIC_DEV | CFG0_SW_SWITCH_EN ; + + writel(val, mux->regs); + return NOTIFY_OK; +} + +struct intel_usb_mux *intel_usb_mux_register(struct device *dev, + struct resource *r) +{ + struct intel_usb_mux *mux; + int ret; + u32 val; + + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + mux->regs = ioremap_nocache(r->start, resource_size(r)); + if (!mux->regs) { + kfree(mux); + return ERR_PTR(-ENOMEM); + } + + val = CFG0_SW_IDPIN_EN | CFG0_SW_IDPIN | CFG0_SW_VBUS_VALID | + CFG0_SW_DRD_STATIC_DEV | CFG0_SW_SWITCH_EN; + writel(val, mux->regs); + + mux->edev.dev.parent = dev; + mux->edev.supported_cable = intel_mux_cable; + + ret = extcon_dev_register(&mux->edev); + if (ret) + goto err; + + mux->edev.name = "intel_usb_mux"; + mux->edev.state = !!(readl(mux->regs + INTEL_MUX_CFG1) & CFG1_MODE); + + /* An external source needs to tell us what to do */ + mux->nb.notifier_call = intel_usb_mux_notifier; + ret = extcon_register_notifier(&mux->edev, EXTCON_USB_HOST, &mux->nb); + if (ret) { + dev_err(&mux->edev.dev, "failed to register notifier\n"); + extcon_dev_unregister(&mux->edev); + goto err; + } + return mux; +err: + iounmap(mux->regs); + kfree(mux); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(intel_usb_mux_register); + +void intel_usb_mux_unregister(struct intel_usb_mux *mux) +{ + extcon_unregister_notifier(&mux->edev, EXTCON_USB_HOST, &mux->nb); + extcon_dev_unregister(&mux->edev); + iounmap(mux->regs); + kfree(mux); +} +EXPORT_SYMBOL_GPL(intel_usb_mux_unregister); diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 2b4c39fdfa91..86210f75d233 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -159,7 +159,10 @@ config RESET_ATTACK_MITIGATION using the TCG Platform Reset Attack Mitigation specification. This protects against an attacker forcibly rebooting the system while it still contains secrets in RAM, booting another OS and extracting the - secrets. + secrets. This should only be enabled when userland is configured to + clear the MemoryOverwriteRequest flag on clean shutdown after secrets + have been evicted, since otherwise it will trigger even on clean + reboots. endmenu diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index ec8ac5c4dd84..055e2e8f985a 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -20,10 +20,6 @@ #define NO_FURTHER_WRITE_ACTION -1 -#ifndef phys_to_page -#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT) -#endif - /** * efi_free_all_buff_pages - free all previous allocated buffer pages * @cap_info: pointer to current instance of capsule_info structure @@ -35,7 +31,7 @@ static void efi_free_all_buff_pages(struct capsule_info *cap_info) { while (cap_info->index > 0) - __free_page(phys_to_page(cap_info->pages[--cap_info->index])); + __free_page(cap_info->pages[--cap_info->index]); cap_info->index = NO_FURTHER_WRITE_ACTION; } @@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info) cap_info->pages = temp_page; + temp_page = krealloc(cap_info->phys, + pages_needed * sizeof(phys_addr_t *), + GFP_KERNEL | __GFP_ZERO); + if (!temp_page) + return -ENOMEM; + + cap_info->phys = temp_page; + return 0; } @@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff, **/ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) { + bool do_vunmap = false; int ret; - ret = efi_capsule_update(&cap_info->header, cap_info->pages); + /* + * cap_info->capsule may have been assigned already by a quirk + * handler, so only overwrite it if it is NULL + */ + if (!cap_info->capsule) { + cap_info->capsule = vmap(cap_info->pages, cap_info->index, + VM_MAP, PAGE_KERNEL); + if (!cap_info->capsule) + return -ENOMEM; + do_vunmap = true; + } + + ret = efi_capsule_update(cap_info->capsule, cap_info->phys); + if (do_vunmap) + vunmap(cap_info->capsule); if (ret) { pr_err("capsule update failed\n"); return ret; @@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff, goto failed; } - cap_info->pages[cap_info->index++] = page_to_phys(page); + cap_info->pages[cap_info->index] = page; + cap_info->phys[cap_info->index] = page_to_phys(page); cap_info->page_bytes_remain = PAGE_SIZE; + cap_info->index++; } else { - page = phys_to_page(cap_info->pages[cap_info->index - 1]); + page = cap_info->pages[cap_info->index - 1]; } kbuff = kmap(page); @@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file) struct capsule_info *cap_info = file->private_data; kfree(cap_info->pages); + kfree(cap_info->phys); kfree(file->private_data); file->private_data = NULL; return 0; @@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file) return -ENOMEM; } + cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL); + if (!cap_info->phys) { + kfree(cap_info->pages); + kfree(cap_info); + return -ENOMEM; + } + file->private_data = cap_info; return 0; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index f70febf680c3..c3eefa126e3b 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -143,8 +143,7 @@ static ssize_t systab_show(struct kobject *kobj, return str - buf; } -static struct kobj_attribute efi_attr_systab = - __ATTR(systab, 0400, systab_show, NULL); +static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); #define EFI_FIELD(var) efi.var diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index bd7ed3c1148a..c47e0c6ec00f 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = { }; /* Generic ESRT Entry ("ESRE") support. */ -static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) +static ssize_t fw_class_show(struct esre_entry *entry, char *buf) { char *str = buf; @@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) return str - buf; } -static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400, - esre_fw_class_show, NULL); +static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400); #define esre_attr_decl(name, size, fmt) \ -static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \ +static ssize_t name##_show(struct esre_entry *entry, char *buf) \ { \ return sprintf(buf, fmt "\n", \ le##size##_to_cpu(entry->esre.esre1->name)); \ } \ \ -static struct esre_attribute esre_##name = __ATTR(name, 0400, \ - esre_##name##_show, NULL) +static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400) esre_attr_decl(fw_type, 32, "%u"); esre_attr_decl(fw_version, 32, "%u"); @@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) /* support for displaying ESRT fields at the top level */ #define esrt_attr_decl(name, size, fmt) \ -static ssize_t esrt_##name##_show(struct kobject *kobj, \ +static ssize_t name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf)\ { \ return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \ } \ \ -static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \ - esrt_##name##_show, NULL) +static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400) esrt_attr_decl(fw_resource_count, 32, "%u"); esrt_attr_decl(fw_resource_count_max, 32, "%u"); @@ -431,7 +428,7 @@ static int __init esrt_sysfs_init(void) err_remove_esrt: kobject_put(esrt_kobj); err: - kfree(esrt); + memunmap(esrt); esrt = NULL; return error; } diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 8e64b77aeac9..f377609ff141 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr, return map_attr->show(entry, buf); } -static struct map_attribute map_type_attr = __ATTR_RO(type); -static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr); -static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr); -static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages); -static struct map_attribute map_attribute_attr = __ATTR_RO(attribute); +static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400); +static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400); +static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400); +static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400); +static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400); /* * These are default attributes that are added for every memmap entry. diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 35e553b3b190..e4b40f2b4627 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev) if (ret) return ret; - return vpd_sections_init(entry.cbmem_addr); + vpd_kobj = kobject_create_and_add("vpd", firmware_kobj); + if (!vpd_kobj) + return -ENOMEM; + + ret = vpd_sections_init(entry.cbmem_addr); + if (ret) { + kobject_put(vpd_kobj); + return ret; + } + + return 0; +} + +static int vpd_remove(struct platform_device *pdev) +{ + vpd_section_destroy(&ro_vpd); + vpd_section_destroy(&rw_vpd); + + kobject_put(vpd_kobj); + + return 0; } static struct platform_driver vpd_driver = { .probe = vpd_probe, + .remove = vpd_remove, .driver = { .name = "vpd", }, }; +static struct platform_device *vpd_pdev; + static int __init vpd_platform_init(void) { - struct platform_device *pdev; - - pdev = platform_device_register_simple("vpd", -1, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + int ret; - vpd_kobj = kobject_create_and_add("vpd", firmware_kobj); - if (!vpd_kobj) - return -ENOMEM; + ret = platform_driver_register(&vpd_driver); + if (ret) + return ret; - platform_driver_register(&vpd_driver); + vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0); + if (IS_ERR(vpd_pdev)) { + platform_driver_unregister(&vpd_driver); + return PTR_ERR(vpd_pdev); + } return 0; } static void __exit vpd_platform_exit(void) { - vpd_section_destroy(&ro_vpd); - vpd_section_destroy(&rw_vpd); - kobject_put(vpd_kobj); + platform_device_unregister(vpd_pdev); + platform_driver_unregister(&vpd_driver); } module_init(vpd_platform_init); diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index d687ca3d5049..c80ec1d03274 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu) return cpu == resident_cpu; } -struct psci_operations psci_ops; +struct psci_operations psci_ops = { + .conduit = PSCI_CONDUIT_NONE, + .smccc_version = SMCCC_VERSION_1_0, +}; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); @@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void) 0, 0, 0); } +static void set_conduit(enum psci_conduit conduit) +{ + switch (conduit) { + case PSCI_CONDUIT_HVC: + invoke_psci_fn = __invoke_psci_fn_hvc; + break; + case PSCI_CONDUIT_SMC: + invoke_psci_fn = __invoke_psci_fn_smc; + break; + default: + WARN(1, "Unexpected PSCI conduit %d\n", conduit); + } + + psci_ops.conduit = conduit; +} + static int get_set_conduit_method(struct device_node *np) { const char *method; @@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np) } if (!strcmp("hvc", method)) { - invoke_psci_fn = __invoke_psci_fn_hvc; + set_conduit(PSCI_CONDUIT_HVC); } else if (!strcmp("smc", method)) { - invoke_psci_fn = __invoke_psci_fn_smc; + set_conduit(PSCI_CONDUIT_SMC); } else { pr_warn("invalid \"method\" property: %s\n", method); return -EINVAL; @@ -493,9 +512,36 @@ static void __init psci_init_migrate(void) pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); } +static void __init psci_init_smccc(void) +{ + u32 ver = ARM_SMCCC_VERSION_1_0; + int feature; + + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); + + if (feature != PSCI_RET_NOT_SUPPORTED) { + u32 ret; + ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); + if (ret == ARM_SMCCC_VERSION_1_1) { + psci_ops.smccc_version = SMCCC_VERSION_1_1; + ver = ret; + } + } + + /* + * Conveniently, the SMCCC and PSCI versions are encoded the + * same way. No, this isn't accidental. + */ + pr_info("SMC Calling Convention v%d.%d\n", + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); + +} + static void __init psci_0_2_set_functions(void) { pr_info("Using standard PSCI v0.2 function IDs\n"); + psci_ops.get_version = psci_get_version; + psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_FN_NATIVE(0_2, CPU_SUSPEND); psci_ops.cpu_suspend = psci_cpu_suspend; @@ -539,6 +585,7 @@ static int __init psci_probe(void) psci_init_migrate(); if (PSCI_VERSION_MAJOR(ver) >= 1) { + psci_init_smccc(); psci_init_cpu_suspend(); psci_init_system_suspend(); } @@ -652,9 +699,9 @@ int __init psci_acpi_init(void) pr_info("probing for conduit method from ACPI.\n"); if (acpi_psci_use_hvc()) - invoke_psci_fn = __invoke_psci_fn_hvc; + set_conduit(PSCI_CONDUIT_HVC); else - invoke_psci_fn = __invoke_psci_fn_smc; + set_conduit(PSCI_CONDUIT_SMC); return psci_probe(); } diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index d9ab7c75b14f..e0c73ceba2ed 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -147,6 +147,7 @@ static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region) mgr_node = of_parse_phandle(np, "fpga-mgr", 0); if (mgr_node) { mgr = of_fpga_mgr_get(mgr_node); + of_node_put(mgr_node); of_node_put(np); return mgr; } @@ -192,10 +193,13 @@ static int fpga_region_get_bridges(struct fpga_region *region, parent_br = region_np->parent; /* If overlay has a list of bridges, use it. */ - if (of_parse_phandle(overlay, "fpga-bridges", 0)) + br = of_parse_phandle(overlay, "fpga-bridges", 0); + if (br) { + of_node_put(br); np = overlay; - else + } else { np = region_np; + } for (i = 0; ; i++) { br = of_parse_phandle(np, "fpga-bridges", i); @@ -203,12 +207,15 @@ static int fpga_region_get_bridges(struct fpga_region *region, break; /* If parent bridge is in list, skip it. */ - if (br == parent_br) + if (br == parent_br) { + of_node_put(br); continue; + } /* If node is a bridge, get it and add to list */ ret = fpga_bridge_get_to_list(br, region->info, ®ion->bridge_list); + of_node_put(br); /* If any of the bridges are in use, give up */ if (ret == -EBUSY) { diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c index 6b535ec858cc..15a1f4b348c4 100644 --- a/drivers/gpio/gpio-74x164.c +++ b/drivers/gpio/gpio-74x164.c @@ -23,6 +23,7 @@ struct gen_74x164_chip { struct gpio_chip gpio_chip; struct mutex lock; + struct gpio_desc *gpiod_oe; u32 registers; /* * Since the registers are chained, every byte sent will make @@ -31,8 +32,7 @@ struct gen_74x164_chip { * register at the end of the transfer. So, to have a logical * numbering, store the bytes in reverse order. */ - u8 buffer[0]; - struct gpio_desc *gpiod_oe; + u8 buffer[]; }; static int __gen_74x164_write_config(struct gen_74x164_chip *chip) diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index f33d4a5fe671..af0baf8da295 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -323,3 +323,6 @@ static struct platform_driver ath79_gpio_driver = { }; module_platform_driver(ath79_gpio_driver); + +MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index f75d8443ecaf..e4b3d7db68c9 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -383,7 +383,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) u32 mask; d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data); - g = (struct davinci_gpio_regs __iomem *)d->regs; + g = (struct davinci_gpio_regs __iomem *)d->regs[0]; mask = __gpio_mask(data->irq - d->base_irq); if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c index 98c7ff2a76e7..8d62db447ec1 100644 --- a/drivers/gpio/gpio-iop.c +++ b/drivers/gpio/gpio-iop.c @@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void) return platform_driver_register(&iop3xx_gpio_driver); } arch_initcall(iop3xx_gpio_init); + +MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors"); +MODULE_AUTHOR("Lennert Buytenhek "); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 16cbc5702865..491b0974c0fe 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) }; int i, j; + /* + * STMPE1600: to be able to get IRQ from pins, + * a read must be done on GPMR register, or a write in + * GPSR or GPCR registers + */ + if (stmpe->partnum == STMPE1600) { + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]); + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]); + } + for (i = 0; i < CACHE_NR_REGS; i++) { /* STMPE801 and STMPE1600 don't have RE and FE registers */ if ((stmpe->partnum == STMPE801 || @@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc); - struct stmpe *stmpe = stmpe_gpio->stmpe; int offset = d->hwirq; int regoffset = offset / 8; int mask = BIT(offset % 8); stmpe_gpio->regs[REG_IE][regoffset] |= mask; - - /* - * STMPE1600 workaround: to be able to get IRQ from pins, - * a read must be done on GPMR register, or a write in - * GPSR or GPCR registers - */ - if (stmpe->partnum == STMPE1600) - stmpe_reg_read(stmpe, - stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]); } static void stmpe_dbg_show_one(struct seq_file *s, diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index 57efb251f9c4..10523ce00c38 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -566,8 +566,10 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain, 0, 0, of_node_to_fwnode(dev->of_node), &thunderx_gpio_irqd_ops, txgpio); - if (!txgpio->irqd) + if (!txgpio->irqd) { + err = -ENOMEM; goto out; + } /* Push on irq_data and the domain for each line. */ for (i = 0; i < ngpio; i++) { diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index eb4528c87c0b..d6f3d9ee1350 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) } if (!chip->names) - devprop_gpiochip_set_names(chip); + devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent)); acpi_gpiochip_request_regions(acpi_gpio); acpi_gpiochip_scan_gpios(acpi_gpio); diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c index 27f383bda7d9..f748aa3e77f7 100644 --- a/drivers/gpio/gpiolib-devprop.c +++ b/drivers/gpio/gpiolib-devprop.c @@ -19,30 +19,27 @@ /** * devprop_gpiochip_set_names - Set GPIO line names using device properties * @chip: GPIO chip whose lines should be named, if possible + * @fwnode: Property Node containing the gpio-line-names property * * Looks for device property "gpio-line-names" and if it exists assigns * GPIO line names for the chip. The memory allocated for the assigned * names belong to the underlying firmware node and should not be released * by the caller. */ -void devprop_gpiochip_set_names(struct gpio_chip *chip) +void devprop_gpiochip_set_names(struct gpio_chip *chip, + const struct fwnode_handle *fwnode) { struct gpio_device *gdev = chip->gpiodev; const char **names; int ret, i; - if (!chip->parent) { - dev_warn(&gdev->dev, "GPIO chip parent is NULL\n"); - return; - } - - ret = device_property_read_string_array(chip->parent, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", NULL, 0); if (ret < 0) return; if (ret != gdev->ngpio) { - dev_warn(chip->parent, + dev_warn(&gdev->dev, "names %d do not match number of GPIOs %d\n", ret, gdev->ngpio); return; @@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip) if (!names) return; - ret = device_property_read_string_array(chip->parent, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", names, gdev->ngpio); if (ret < 0) { - dev_warn(chip->parent, "failed to read GPIO line names\n"); + dev_warn(&gdev->dev, "failed to read GPIO line names\n"); kfree(names); return; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index bfcd20699ec8..ba38f530e403 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip) /* If the chip defines names itself, these take precedence */ if (!chip->names) - devprop_gpiochip_set_names(chip); + devprop_gpiochip_set_names(chip, + of_fwnode_handle(chip->of_node)); of_node_get(chip->of_node); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index eb80dac4e26a..b4c8b25453a6 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -723,6 +723,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) struct gpioevent_data ge; int ret, level; + /* Do not leak kernel stack to userspace */ + memset(&ge, 0, sizeof(ge)); + ge.timestamp = ktime_get_real_ns(); level = gpiod_get_value_cansleep(le->desc); @@ -3337,7 +3340,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return desc; } - status = gpiod_request(desc, con_id); + /* If a connection label was passed use that, else use the device name as label */ + status = gpiod_request(desc, con_id ? con_id : dev_name(dev)); if (status < 0) return ERR_PTR(status); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index d003ccb12781..3d4d0634c9dd 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -224,7 +224,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc) return desc - &desc->gdev->descs[0]; } -void devprop_gpiochip_set_names(struct gpio_chip *chip); +void devprop_gpiochip_set_names(struct gpio_chip *chip, + const struct fwnode_handle *fwnode); /* With descriptor prefix */ diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 83cb2a88c204..b1d12a0c77a2 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -110,7 +110,7 @@ config DRM_FBDEV_OVERALLOC config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" - depends on DRM_KMS_HELPER + depends on DRM help Say Y here, if you want to use EDID data to be loaded from the /lib/firmware directory or one of the provided built-in diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 8ce07039bb89..7e1442fa1755 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -29,6 +29,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o drm-$(CONFIG_AGP) += drm_agpsupport.o drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o +drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ @@ -37,7 +38,6 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_scdc_helper.o drm_gem_framebuffer_helper.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o -drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 103635ab784c..712ad8c2bdc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -697,7 +697,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr); int amdgpu_queue_mgr_map(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr, - int hw_ip, int instance, int ring, + u32 hw_ip, u32 instance, u32 ring, struct amdgpu_ring **out_ring); /* @@ -1536,18 +1536,14 @@ struct amdgpu_device { /* sdma */ struct amdgpu_sdma sdma; - union { - struct { - /* uvd */ - struct amdgpu_uvd uvd; + /* uvd */ + struct amdgpu_uvd uvd; - /* vce */ - struct amdgpu_vce vce; - }; + /* vce */ + struct amdgpu_vce vce; - /* vcn */ - struct amdgpu_vcn vcn; - }; + /* vcn */ + struct amdgpu_vcn vcn; /* firmwares */ struct amdgpu_firmware firmware; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 57afad79f55d..8fa850a070e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, size_t size; u32 retry = 3; + if (amdgpu_acpi_pcie_notify_device_ready(adev)) + return -EINVAL; + /* Get the device handle */ handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5432af39a674..f7fa7675215c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -265,6 +265,9 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - /* The sclk is in quantas of 10kHz */ - return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100; + /* the sclk is in quantas of 10kHz */ + if (amdgpu_sriov_vf(adev)) + return adev->clock.default_sclk / 100; + + return amdgpu_dpm_get_sclk(adev, false) / 100; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b9dbbf9cb8b0..bdabaa3399db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -369,29 +369,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; + unsigned long end_jiffies; uint32_t sdma_base_addr; + uint32_t data; m = get_sdma_mqd(mqd); sdma_base_addr = get_sdma_base_addr(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, - m->sdma_rlc_virtual_addr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, - m->sdma_rlc_rb_base); + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + if (m->sdma_engine_id) { + data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); + data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); + } else { + data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); + } + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, + m->sdma_rlc_doorbell); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + m->sdma_rlc_virtual_addr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, m->sdma_rlc_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdma_rlc_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdma_rlc_rb_rptr_addr_hi); - - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, - m->sdma_rlc_doorbell); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, m->sdma_rlc_rb_cntl); @@ -564,9 +585,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, } WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index ce443586a0c7..cc4e18dcd8b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) return true; } -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index c13c51af0b68..c53095b3b0fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -14,6 +14,16 @@ #include "amd_acpi.h" +#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0) + +struct amdgpu_px_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; + u32 px_quirk_flags; +}; + struct amdgpu_atpx_functions { bool px_params; bool power_cntl; @@ -35,6 +45,7 @@ struct amdgpu_atpx { static struct amdgpu_atpx_priv { bool atpx_detected; bool bridge_pm_usable; + unsigned int quirks; /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle other_handle; @@ -205,13 +216,19 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { - printk("ATPX Hybrid Graphics\n"); - /* - * Disable legacy PM methods only when pcie port PM is usable, - * otherwise the device might fail to power off or power on. - */ - atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; - atpx->is_hybrid = true; + if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) { + printk("ATPX Hybrid Graphics, forcing to ATPX\n"); + atpx->functions.power_cntl = true; + atpx->is_hybrid = false; + } else { + printk("ATPX Hybrid Graphics\n"); + /* + * Disable legacy PM methods only when pcie port PM is usable, + * otherwise the device might fail to power off or power on. + */ + atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; + atpx->is_hybrid = true; + } } atpx->dgpu_req_power_for_displays = false; @@ -547,6 +564,31 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = { .get_client_id = amdgpu_atpx_get_client_id, }; +static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { + /* HG _PR3 doesn't seem to work on this A+A weston board */ + { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0, 0, 0, 0, 0 }, +}; + +static void amdgpu_atpx_get_quirks(struct pci_dev *pdev) +{ + const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list; + + /* Apply PX quirks */ + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device) { + amdgpu_atpx_priv.quirks |= p->px_quirk_flags; + break; + } + ++p; + } +} + /** * amdgpu_atpx_detect - detect whether we have PX * @@ -570,6 +612,7 @@ static bool amdgpu_atpx_detect(void) parent_pdev = pci_upstream_bridge(pdev); d3_supported |= parent_pdev && parent_pdev->bridge_d3; + amdgpu_atpx_get_quirks(pdev); } while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { @@ -579,6 +622,7 @@ static bool amdgpu_atpx_detect(void) parent_pdev = pci_upstream_bridge(pdev); d3_supported |= parent_pdev && parent_pdev->bridge_d3; + amdgpu_atpx_get_quirks(pdev); } if (has_atpx && vga_count == 2) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index c21adf60a7f2..057e1ecd83ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size) return false; } - tmp = bios[0x18] | (bios[0x19] << 8); - if (bios[tmp + 0x14] != 0x0) { - DRM_INFO("Not an x86 BIOS ROM\n"); - return false; - } - bios_header_start = bios[0x48] | (bios[0x49] << 8); if (!bios_header_start) { DRM_INFO("Can't locate bios header\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 8d1cf2d3e663..1eff36a87595 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - int saved_dpms = connector->dpms; - /* Only turn off the display if it's physically disconnected */ - if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { - /* Don't try to start link training before we - * have the dpcd */ - if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) - return; - - /* set it to OFF so that drm_helper_connector_dpms() - * won't return immediately since the current state - * is ON at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - connector->dpms = saved_dpms; + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && + amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && + amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { + /* Don't start link training before we have the DPCD */ + if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) + return; + + /* Turn the connector off and back on immediately, which + * will trigger link training + */ + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } } } @@ -739,9 +732,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (encoder) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -760,8 +755,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) /* check acpi lid status ??? */ amdgpu_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -871,9 +870,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = amdgpu_connector_best_single_encoder(connector); if (!encoder) @@ -927,8 +928,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -991,9 +994,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1118,8 +1123,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); exit: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1362,9 +1369,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1432,8 +1441,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 60d8bedb694d..b5aa8e6f8e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -403,6 +403,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, if (candidate->robj == validated) break; + /* We can't move pinned BOs here */ + if (bo->pin_count) + continue; + other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); /* Check if this BO is in one of the domains we need space for */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e630d918fefc..bc746a6e0ecc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2076,8 +2076,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, * ignore it */ vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); - if (amdgpu_runtime_pm == 1) - runtime = true; if (amdgpu_device_is_px(ddev)) runtime = true; if (!pci_is_thunderbolt_attached(adev->pdev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e..837332e84d78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); if (robj) { - if (robj->gem_base.import_attach) - drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); amdgpu_mn_unregister(robj); amdgpu_bo_unref(&robj); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9e495da0bb03..4d08957d2108 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -46,6 +46,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_bo_kunmap(bo); + if (bo->gem_base.import_attach) + drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); if (!list_empty(&bo->shadow_list)) { @@ -391,6 +393,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv, &amdgpu_ttm_bo_destroy); + if (unlikely(r != 0)) + return r; + bytes_moved = atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved; if (adev->mc.visible_vram_size < adev->mc.real_vram_size && @@ -400,9 +405,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, else amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); - if (unlikely(r != 0)) - return r; - if (kernel) bo->tbo.priority = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index befc09b68543..b293380bd46c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper, static int amdgpu_identity_map(struct amdgpu_device *adev, struct amdgpu_queue_mapper *mapper, - int ring, + u32 ring, struct amdgpu_ring **out_ring) { switch (mapper->hw_ip) { @@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip) static int amdgpu_lru_map(struct amdgpu_device *adev, struct amdgpu_queue_mapper *mapper, - int user_ring, + u32 user_ring, struct amdgpu_ring **out_ring) { int r, i, j; @@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, */ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr, - int hw_ip, int instance, int ring, + u32 hw_ip, u32 instance, u32 ring, struct amdgpu_ring **out_ring) { int r, ip_num_rings; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 5ce65280b396..90adff83e489 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) if (ring->funcs->end_use) ring->funcs->end_use(ring); - amdgpu_ring_lru_touch(ring->adev, ring); + if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) + amdgpu_ring_lru_touch(ring->adev, ring); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e19928dae8e3..17deca0f6255 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -293,12 +293,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (adev->uvd.vcpu_bo == NULL) return 0; - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - break; + /* only valid for physical mode */ + if (adev->asic_type < CHIP_POLARIS10) { + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.handles[i])) + break; - if (i == AMDGPU_MAX_UVD_HANDLES) - return 0; + if (i == adev->uvd.max_handles) + return 0; + } cancel_delayed_work_sync(&adev->uvd.idle_work); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index c855366521ab..9fc3d387eae3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -647,7 +647,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; - int i, r, idx = 0; + int i, r = 0, idx = 0; p->job->vm = NULL; ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bd20ff018512..863c6dd0123a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1201,7 +1201,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - int r; + int r = 0; r = amdgpu_vm_update_level(adev, vm, &vm->root, 0); if (r) @@ -2586,7 +2586,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; - int i; + struct amdgpu_bo *root; + int i, r; amd_sched_entity_fini(vm->entity.sched, &vm->entity); @@ -2609,7 +2610,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_mapping(adev, vm, mapping, NULL); } - amdgpu_vm_free_levels(&vm->root); + root = amdgpu_bo_ref(vm->root.bo); + r = amdgpu_bo_reserve(root, true); + if (r) { + dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); + } else { + amdgpu_vm_free_levels(&vm->root); + amdgpu_bo_unreserve(root); + } + amdgpu_bo_unref(&root); dma_fence_put(vm->last_dir_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vm_free_reserved_vmid(adev, vm, i); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index b9ee9073cb0d..f3f93b6b51ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -437,6 +437,8 @@ static int dce_virtual_sw_fini(void *handle) drm_kms_helper_poll_fini(adev->ddev); drm_mode_config_cleanup(adev->ddev); + /* clear crtcs pointer to avoid dce irq finish routine access freed data */ + memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS); adev->mode_info.mode_config_initialized = false; return 0; } @@ -723,7 +725,7 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad int crtc, enum amdgpu_interrupt_state state) { - if (crtc >= adev->mode_info.num_crtc) { + if (crtc >= adev->mode_info.num_crtc || !adev->mode_info.crtcs[crtc]) { DRM_DEBUG("invalid crtc %d\n", crtc); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 00868764a0dd..6f76b2646465 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4387,34 +4387,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) case CHIP_KAVERI: adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 4; - if ((adev->pdev->device == 0x1304) || - (adev->pdev->device == 0x1305) || - (adev->pdev->device == 0x130C) || - (adev->pdev->device == 0x130F) || - (adev->pdev->device == 0x1310) || - (adev->pdev->device == 0x1311) || - (adev->pdev->device == 0x131C)) { - adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1309) || - (adev->pdev->device == 0x130A) || - (adev->pdev->device == 0x130D) || - (adev->pdev->device == 0x1313) || - (adev->pdev->device == 0x131D)) { - adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1306) || - (adev->pdev->device == 0x1307) || - (adev->pdev->device == 0x130B) || - (adev->pdev->device == 0x130E) || - (adev->pdev->device == 0x1315) || - (adev->pdev->device == 0x131B)) { - adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; - } else { - adev->gfx.config.max_cu_per_sh = 3; - adev->gfx.config.max_backends_per_se = 1; - } + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_backends_per_se = 2; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_texture_channel_caches = 4; adev->gfx.config.max_gprs = 256; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d04d0b123212..6dc0f6e346e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -395,7 +395,16 @@ static int gmc_v9_0_early_init(void *handle) static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 }; + /* + * The latest engine allocation on gfx9 is: + * Engine 0, 1: idle + * Engine 2, 3: firmware + * Engine 4~13: amdgpu ring, subject to change when ring number changes + * Engine 14~15: idle + * Engine 16: kfd tlb invalidation + * Engine 17: Gart flushes + */ + unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; unsigned i; for(i = 0; i < adev->num_rings; ++i) { @@ -408,9 +417,9 @@ static int gmc_v9_0_late_init(void *handle) ring->funcs->vmhub); } - /* Engine 17 is used for GART flushes */ + /* Engine 16 is used for KFD and 17 for GART flushes */ for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 17); + BUG_ON(vm_inv_eng[i] > 16); return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 2812d88a8bdd..9b7b01333fc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -276,9 +276,17 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, /* see what event we get */ r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); - /* only handle FLR_NOTIFY now */ - if (!r) - schedule_work(&adev->virt.flr_work); + /* sometimes the interrupt is delayed to inject to VM, so under such case + * the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus + * above recieve message could be failed, we should schedule the flr_work + * anyway + */ + if (r) { + DRM_ERROR("FLR_NOTIFICATION is missed\n"); + xgpu_ai_mailbox_send_ack(adev); + } + + schedule_work(&adev->virt.flr_work); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 8284d5dbfc30..4c178feeb4bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -31,6 +31,7 @@ #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" +#include "amd_pcie.h" #include "amdgpu_powerplay.h" #include "sid.h" #include "si_ih.h" @@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) { struct pci_dev *root = adev->pdev->bus->self; int bridge_pos, gpu_pos; - u32 speed_cntl, mask, current_data_rate; - int ret, i; + u32 speed_cntl, current_data_rate; + int i; u16 tmp16; if (pci_is_root_bus(adev->pdev->bus)) @@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret != 0) - return; - - if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) return; speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> LC_CURRENT_DATA_RATE_SHIFT; - if (mask & DRM_PCIE_SPEED_80) { + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { if (current_data_rate == 2) { DRM_INFO("PCIE gen 3 link speeds already enabled\n"); return; } DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); - } else if (mask & DRM_PCIE_SPEED_50) { + } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) { if (current_data_rate == 1) { DRM_INFO("PCIE gen 2 link speeds already enabled\n"); return; @@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) if (!gpu_pos) return; - if (mask & DRM_PCIE_SPEED_80) { + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { if (current_data_rate != 2) { u16 bridge_cfg, gpu_cfg; u16 bridge_cfg2, gpu_cfg2; @@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); tmp16 &= ~0xf; - if (mask & DRM_PCIE_SPEED_80) + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) tmp16 |= 3; - else if (mask & DRM_PCIE_SPEED_50) + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) tmp16 |= 2; else tmp16 |= 1; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index d63873f3f574..abb0a2341a41 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -26,6 +26,7 @@ #include "amdgpu_pm.h" #include "amdgpu_dpm.h" #include "amdgpu_atombios.h" +#include "amd_pcie.h" #include "sid.h" #include "r600_dpm.h" #include "si_dpm.h" @@ -3332,29 +3333,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, } } -static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, - u32 sys_mask, - enum amdgpu_pcie_gen asic_gen, - enum amdgpu_pcie_gen default_gen) -{ - switch (asic_gen) { - case AMDGPU_PCIE_GEN1: - return AMDGPU_PCIE_GEN1; - case AMDGPU_PCIE_GEN2: - return AMDGPU_PCIE_GEN2; - case AMDGPU_PCIE_GEN3: - return AMDGPU_PCIE_GEN3; - default: - if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) - return AMDGPU_PCIE_GEN3; - else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) - return AMDGPU_PCIE_GEN2; - else - return AMDGPU_PCIE_GEN1; - } - return AMDGPU_PCIE_GEN1; -} - static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, u32 *p, u32 *u) { @@ -3465,6 +3443,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6667)) { max_sclk = 75000; } + if ((adev->pdev->revision == 0xC3) || + (adev->pdev->device == 0x6665)) { + max_sclk = 60000; + max_mclk = 80000; + } } else if (adev->asic_type == CHIP_OLAND) { if ((adev->pdev->revision == 0xC7) || (adev->pdev->revision == 0x80) || @@ -5023,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, table->ACPIState.levels[0].vddc.index, &table->ACPIState.levels[0].std_vddc); } - table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - AMDGPU_PCIE_GEN1); + table->ACPIState.levels[0].gen2PCIE = + (u8)amdgpu_get_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + AMDGPU_PCIE_GEN1); if (si_pi->vddc_phase_shed_control) si_populate_phase_shedding_value(adev, @@ -7157,10 +7141,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev, pl->vddc = le16_to_cpu(clock_info->si.usVDDC); pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); pl->flags = le32_to_cpu(clock_info->si.ulFlags); - pl->pcie_gen = r600_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - clock_info->si.ucPCIEGen); + pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + clock_info->si.ucPCIEGen); /* patch up vddc if necessary */ ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, @@ -7315,7 +7299,6 @@ static int si_dpm_init(struct amdgpu_device *adev) struct si_power_info *si_pi; struct atom_clock_dividers dividers; int ret; - u32 mask; si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); if (si_pi == NULL) @@ -7325,11 +7308,9 @@ static int si_dpm_init(struct amdgpu_device *adev) eg_pi = &ni_pi->eg; pi = &eg_pi->rv7xx; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret) - si_pi->sys_pcie_mask = 0; - else - si_pi->sys_pcie_mask = mask; + si_pi->sys_pcie_mask = + (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> + CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT; si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f2c3a49f73a0..ff7d4827385e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev) } static u32 soc15_get_xclk(struct amdgpu_device *adev) { - if (adev->asic_type == CHIP_VEGA10) - return adev->clock.spll.reference_freq/4; - else - return adev->clock.spll.reference_freq; + return adev->clock.spll.reference_freq; } @@ -664,8 +661,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB; + adev->pg_flags = AMD_PG_SUPPORT_SDMA; + adev->external_rev_id = 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 21e7b88401e1..a098712bdd2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 9ff69b90df36..4968b6bb9466 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -448,14 +448,19 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, static void vi_detect_hw_virtualization(struct amdgpu_device *adev) { - uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); - /* bit0: 0 means pf and 1 means vf */ - /* bit31: 0 means disable IOV and 1 means enable */ - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + uint32_t reg = 0; + + if (adev->asic_type == CHIP_TONGA || + adev->asic_type == CHIP_FIJI) { + reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + /* bit0: 0 means pf and 1 means vf */ + /* bit31: 0 means disable IOV and 1 means enable */ + if (reg & 1) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; + + if (reg & 0x80000000) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + } if (reg == 0) { if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 44ffd23348fc..164fa4b1f9a9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -205,8 +205,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, struct cik_sdma_rlc_registers *m; m = get_sdma_mqd(mqd); - m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << - SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | + m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) + << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 03bec765b03d..f9a1a4db9be7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -184,6 +184,24 @@ int pqm_create_queue(struct process_queue_manager *pqm, switch (type) { case KFD_QUEUE_TYPE_SDMA: + if (dev->dqm->queue_count >= + CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) { + pr_err("Over-subscription is not allowed for SDMA.\n"); + retval = -EPERM; + goto err_create_queue; + } + + retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); + if (retval != 0) + goto err_create_queue; + pqn->q = q; + pqn->kq = NULL; + retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, + &q->properties.vmid); + pr_debug("DQM returned %d for create_queue\n", retval); + print_queue(q); + break; + case KFD_QUEUE_TYPE_COMPUTE: /* check if there is over subscription */ if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 19ce59028d6b..e0b78fd9804d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -501,11 +501,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr, return ret; } +static void kfd_topology_kobj_release(struct kobject *kobj) +{ + kfree(kobj); +} + static const struct sysfs_ops sysprops_ops = { .show = sysprops_show, }; static struct kobj_type sysprops_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &sysprops_ops, }; @@ -541,6 +547,7 @@ static const struct sysfs_ops iolink_ops = { }; static struct kobj_type iolink_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &iolink_ops, }; @@ -568,6 +575,7 @@ static const struct sysfs_ops mem_ops = { }; static struct kobj_type mem_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &mem_ops, }; @@ -607,6 +615,7 @@ static const struct sysfs_ops cache_ops = { }; static struct kobj_type cache_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &cache_ops, }; @@ -729,6 +738,7 @@ static const struct sysfs_ops node_ops = { }; static struct kobj_type node_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &node_ops, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 84f01fd33aff..b50aa292d026 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -850,9 +850,9 @@ static int init_over_drive_limits( const ATOM_Tonga_POWERPLAYTABLE *powerplay_table) { hwmgr->platform_descriptor.overdriveLimit.engineClock = - le16_to_cpu(powerplay_table->ulMaxODEngineClock); + le32_to_cpu(powerplay_table->ulMaxODEngineClock); hwmgr->platform_descriptor.overdriveLimit.memoryClock = - le16_to_cpu(powerplay_table->ulMaxODMemoryClock); + le32_to_cpu(powerplay_table->ulMaxODMemoryClock); hwmgr->platform_descriptor.minOverdriveVDDC = 0; hwmgr->platform_descriptor.maxOverdriveVDDC = 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index b526f49be65d..336fdd8c7db0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2788,10 +2788,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - disable_mclk_switching = ((1 < info.display_count) || - disable_mclk_switching_for_frame_lock || - smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || - (mode_info.refresh_rate > 120)); + if (info.display_count == 0) + disable_mclk_switching = false; + else + disable_mclk_switching = ((1 < info.display_count) || + disable_mclk_switching_for_frame_lock || + smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || + (mode_info.refresh_rate > 120)); sclk = smu7_ps->performance_levels[0].engine_clock; mclk = smu7_ps->performance_levels[0].memory_clock; @@ -4576,13 +4579,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, int tmp_result, result = 0; uint32_t sclk_mask = 0, mclk_mask = 0; - if (hwmgr->chip_id == CHIP_FIJI) { - if (request->type == AMD_PP_GFX_PROFILE) - smu7_enable_power_containment(hwmgr); - else if (request->type == AMD_PP_COMPUTE_PROFILE) - smu7_disable_power_containment(hwmgr); - } - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index f8f02e70b8bc..ca232a9e2334 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3243,10 +3243,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ForceMclkHigh); - disable_mclk_switching = (info.display_count > 1) || - disable_mclk_switching_for_frame_lock || - disable_mclk_switching_for_vr || - force_mclk_high; + if (info.display_count == 0) + disable_mclk_switching = false; + else + disable_mclk_switching = (info.display_count > 1) || + disable_mclk_switching_for_frame_lock || + disable_mclk_switching_for_vr || + force_mclk_high; sclk = vega10_ps->performance_levels[0].gfx_clock; mclk = vega10_ps->performance_levels[0].mem_clock; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h index 262c8ded87c0..dafc9c4b1e6f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h @@ -40,7 +40,7 @@ struct smu_table_entry { uint32_t table_addr_high; uint32_t table_addr_low; uint8_t *table; - uint32_t handle; + unsigned long handle; }; struct smu_table_array { diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 2a4d163ac76f..79ce877bf45f 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -1225,17 +1225,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", dcrtc); - if (ret < 0) { - kfree(dcrtc); - return ret; - } + if (ret < 0) + goto err_crtc; if (dcrtc->variant->init) { ret = dcrtc->variant->init(dcrtc, dev); - if (ret) { - kfree(dcrtc); - return ret; - } + if (ret) + goto err_crtc; } /* Ensure AXI pipeline is enabled */ @@ -1246,13 +1242,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, dcrtc->crtc.port = port; primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (!primary) - return -ENOMEM; + if (!primary) { + ret = -ENOMEM; + goto err_crtc; + } ret = armada_drm_plane_init(primary); if (ret) { kfree(primary); - return ret; + goto err_crtc; } ret = drm_universal_plane_init(drm, &primary->base, 0, @@ -1263,7 +1261,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); - return ret; + goto err_crtc; } ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, @@ -1282,6 +1280,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, err_crtc_init: primary->base.funcs->destroy(&primary->base); +err_crtc: + kfree(dcrtc); + return ret; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 6f3849ec0c1d..e9f1e6fe7b94 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc) { struct ast_private *ast = crtc->dev->dev_private; ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0); + ast_crtc_load_lut(crtc); } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 5dd3f1cd074a..a8905049b9da 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector) return 0; } + pm_runtime_get_sync(dp->dev); edid = drm_get_edid(connector, &dp->aux.ddc); + pm_runtime_put(dp->dev); if (edid) { drm_mode_connector_update_edid_property(&dp->connector, edid); diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c index 0903ba574f61..75b0d3f6e4de 100644 --- a/drivers/gpu/drm/bridge/lvds-encoder.c +++ b/drivers/gpu/drm/bridge/lvds-encoder.c @@ -13,13 +13,37 @@ #include +struct lvds_encoder { + struct drm_bridge bridge; + struct drm_bridge *panel_bridge; +}; + +static int lvds_encoder_attach(struct drm_bridge *bridge) +{ + struct lvds_encoder *lvds_encoder = container_of(bridge, + struct lvds_encoder, + bridge); + + return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge, + bridge); +} + +static struct drm_bridge_funcs funcs = { + .attach = lvds_encoder_attach, +}; + static int lvds_encoder_probe(struct platform_device *pdev) { struct device_node *port; struct device_node *endpoint; struct device_node *panel_node; struct drm_panel *panel; - struct drm_bridge *bridge; + struct lvds_encoder *lvds_encoder; + + lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder), + GFP_KERNEL); + if (!lvds_encoder) + return -ENOMEM; /* Locate the panel DT node. */ port = of_graph_get_port_by_id(pdev->dev.of_node, 1); @@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS); - if (IS_ERR(bridge)) - return PTR_ERR(bridge); + lvds_encoder->panel_bridge = + devm_drm_panel_bridge_add(&pdev->dev, + panel, DRM_MODE_CONNECTOR_LVDS); + if (IS_ERR(lvds_encoder->panel_bridge)) + return PTR_ERR(lvds_encoder->panel_bridge); + + /* The panel_bridge bridge is attached to the panel's of_node, + * but we need a bridge attached to our of_node for our user + * to look up. + */ + lvds_encoder->bridge.of_node = pdev->dev.of_node; + lvds_encoder->bridge.funcs = &funcs; + drm_bridge_add(&lvds_encoder->bridge); - platform_set_drvdata(pdev, bridge); + platform_set_drvdata(pdev, lvds_encoder); return 0; } static int lvds_encoder_remove(struct platform_device *pdev) { - struct drm_bridge *bridge = platform_get_drvdata(pdev); + struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev); - drm_bridge_remove(bridge); + drm_bridge_remove(&lvds_encoder->bridge); return 0; } diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8571cfd877c5..8636e7eeb731 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -97,7 +97,7 @@ #define DP0_ACTIVEVAL 0x0650 #define DP0_SYNCVAL 0x0654 #define DP0_MISC 0x0658 -#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */ +#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ #define BPC_6 (0 << 5) #define BPC_8 (1 << 5) @@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, tmp = (tmp << 8) | buf[i]; i++; if (((i % 4) == 0) || (i == size)) { - tc_write(DP0_AUXWDATA(i >> 2), tmp); + tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp); tmp = 0; } } @@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc) ret = drm_dp_link_probe(&tc->aux, &tc->link.base); if (ret < 0) goto err_dpcd_read; - if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000)) - goto err_dpcd_inval; + if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { + dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); + tc->link.base.rate = 270000; + } + + if (tc->link.base.num_lanes > 2) { + dev_dbg(tc->dev, "Falling to 2 lanes\n"); + tc->link.base.num_lanes = 2; + } ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp); if (ret < 0) @@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc) err_dpcd_read: dev_err(tc->dev, "failed to read DPCD: %d\n", ret); return ret; -err_dpcd_inval: - dev_err(tc->dev, "invalid DPCD\n"); - return -EINVAL; } static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) @@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) int lower_margin = mode->vsync_start - mode->vdisplay; int vsync_len = mode->vsync_end - mode->vsync_start; + /* + * Recommended maximum number of symbols transferred in a transfer unit: + * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, + * (output active video bandwidth in bytes)) + * Must be less than tu_size. + */ + max_tu_symbol = TU_SIZE_RECOMMENDED - 1; + dev_dbg(tc->dev, "set mode %dx%d\n", mode->hdisplay, mode->vdisplay); dev_dbg(tc->dev, "H margin %d,%d sync %d\n", @@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); - /* LCD Ctl Frame Size */ - tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ | + /* + * LCD Ctl Frame Size + * datasheet is not clear of vsdelay in case of DPI + * assume we do not need any delay when DPI is a source of + * sync signals + */ + tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ | OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); - tc_write(HTIM01, (left_margin << 16) | /* H back porch */ - (hsync_len << 0)); /* Hsync */ - tc_write(HTIM02, (right_margin << 16) | /* H front porch */ - (mode->hdisplay << 0)); /* width */ + tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */ + (ALIGN(hsync_len, 2) << 0)); /* Hsync */ + tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */ + (ALIGN(mode->hdisplay, 2) << 0)); /* width */ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */ (vsync_len << 0)); /* Vsync */ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */ @@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) /* DP Main Stream Attributes */ vid_sync_dly = hsync_len + left_margin + mode->hdisplay; tc_write(DP0_VIDSYNCDELAY, - (0x003e << 16) | /* thresh_dly */ + (max_tu_symbol << 16) | /* thresh_dly */ (vid_sync_dly << 0)); tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal)); @@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); - /* - * Recommended maximum number of symbols transferred in a transfer unit: - * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, - * (output active video bandwidth in bytes)) - * Must be less than tu_size. - */ - max_tu_symbol = TU_SIZE_RECOMMENDED - 1; - tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8); + tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) | + BPC_8); return 0; err: @@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc) unsigned int rate; u32 dp_phy_ctrl; int timeout; - bool aligned; - bool ready; u32 value; int ret; u8 tmp[8]; @@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc) ret = drm_dp_dpcd_read_link_status(aux, tmp + 2); if (ret < 0) goto err_dpcd_read; - ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */ - DP_CHANNEL_EQ_BITS)); /* Lane0 */ - aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE; - } while ((--timeout) && !(ready && aligned)); + } while ((--timeout) && + !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes))); if (timeout == 0) { /* Read DPCD 0x200-0x201 */ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2); if (ret < 0) goto err_dpcd_read; + dev_err(dev, "channel(s) EQ not ok\n"); dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]); dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n", tmp[1]); @@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc) dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", tmp[6]); - if (!ready) - dev_err(dev, "Lane0/1 not ready\n"); - if (!aligned) - dev_err(dev, "Lane0/1 not aligned\n"); return -EAGAIN; } @@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, static int tc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - /* Accept any mode */ + /* DPI interface clock limitation: upto 154 MHz */ + if (mode->clock > 154000) + return MODE_CLOCK_HIGH; + return MODE_OK; } diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index a4c4a465b385..130483f2cd7f 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc) { } -/* - * This is called after a mode is programmed. It should reverse anything done - * by the prepare function - */ -static void cirrus_crtc_commit(struct drm_crtc *crtc) -{ -} - -/* - * The core can pass us a set of gamma values to program. We actually only - * use this for 8-bit mode so can't perform smooth fades on deeper modes, - * but it's a requirement that we provide the function - */ -static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t size, - struct drm_modeset_acquire_ctx *ctx) +static void cirrus_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct cirrus_device *cdev = dev->dev_private; @@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, int i; if (!crtc->enabled) - return 0; + return; r = crtc->gamma_store; g = r + crtc->gamma_size; @@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, WREG8(PALETTE_DATA, *g++ >> 8); WREG8(PALETTE_DATA, *b++ >> 8); } +} + +/* + * This is called after a mode is programmed. It should reverse anything done + * by the prepare function + */ +static void cirrus_crtc_commit(struct drm_crtc *crtc) +{ + cirrus_crtc_load_lut(crtc); +} + +/* + * The core can pass us a set of gamma values to program. We actually only + * use this for 8-bit mode so can't perform smooth fades on deeper modes, + * but it's a requirement that we provide the function + */ +static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size, + struct drm_modeset_acquire_ctx *ctx) +{ + cirrus_crtc_load_lut(crtc); return 0; } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 2fd383d7253a..52b08ec5fa2b 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -485,6 +485,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, &replaced); state->color_mgmt_changed |= replaced; return ret; + } else if (property == config->ctm_post_offset_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->ctm_post_offset, + val, + sizeof(struct drm_color_ctm_post_offset), + &replaced); + state->color_mgmt_changed |= replaced; + return ret; } else if (property == config->gamma_lut_property) { ret = drm_atomic_replace_property_blob_from_id(dev, &state->gamma_lut, @@ -543,6 +551,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc, *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; else if (property == config->ctm_property) *val = (state->ctm) ? state->ctm->base.id : 0; + else if (property == config->ctm_post_offset_property) + *val = (state->ctm_post_offset) ? state->ctm_post_offset->base.id : 0; else if (property == config->gamma_lut_property) *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; else if (property == config->prop_out_fence_ptr) @@ -1202,6 +1212,12 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, state->picture_aspect_ratio = val; } else if (property == connector->scaling_mode_property) { state->scaling_mode = val; + } else if (property == connector->content_protection_property) { + if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); + return -EINVAL; + } + state->content_protection = val; } else if (connector->funcs->atomic_set_property) { return connector->funcs->atomic_set_property(connector, state, property, val); @@ -1281,6 +1297,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = state->picture_aspect_ratio; } else if (property == connector->scaling_mode_property) { *val = state->scaling_mode; + } else if (property == connector->content_protection_property) { + *val = state->content_protection; } else if (connector->funcs->atomic_get_property) { return connector->funcs->atomic_get_property(connector, state, property, val); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 0028591f3f95..097757d75ae3 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3179,6 +3179,8 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, drm_property_blob_get(state->degamma_lut); if (state->ctm) drm_property_blob_get(state->ctm); + if (state->ctm_post_offset) + drm_property_blob_get(state->ctm_post_offset); if (state->gamma_lut) drm_property_blob_get(state->gamma_lut); state->mode_changed = false; @@ -3228,6 +3230,7 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) drm_property_blob_put(state->mode_blob); drm_property_blob_put(state->degamma_lut); drm_property_blob_put(state->ctm); + drm_property_blob_put(state->ctm_post_offset); drm_property_blob_put(state->gamma_lut); } EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); @@ -3609,6 +3612,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, /* Reset DEGAMMA_LUT and CTM properties. */ replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm_post_offset, NULL); replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); crtc_state->color_mgmt_changed |= replaced; diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index fe0982708e95..7f23bfc89248 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -64,6 +64,16 @@ * boot-up state too. Drivers can access the blob for the color conversion * matrix through &drm_crtc_state.ctm. * + * “CTM_POST_OFFSET”: + * Blob property to set post offset vector used to convert colors after + * applying ctm. The data is interpreted as a struct + * &drm_color_ctm_post_offset. + * + * Setting this to NULL (blob property value set to 0) means a pass-thru + * vector should be used. This is generally the driver boot-up state too. + * Drivers can access the blob for post offset vector through + * &drm_crtc_state.ctm_post_offset. + * * “GAMMA_LUT”: * Blob property to set the gamma lookup table (LUT) mapping pixel data * after the transformation matrix to data sent to the connector. The @@ -148,9 +158,12 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, degamma_lut_size); } - if (has_ctm) + if (has_ctm) { drm_object_attach_property(&crtc->base, config->ctm_property, 0); + drm_object_attach_property(&crtc->base, + config->ctm_post_offset_property, 0); + } if (gamma_lut_size) { drm_object_attach_property(&crtc->base, diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index ba9f36cef68c..ac4a9047570a 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -699,6 +699,13 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) +static struct drm_prop_enum_list drm_cp_enum_list[] = { + { DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" }, + { DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" }, + { DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" }, +}; +DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) + /** * DOC: standard connector properties * @@ -741,6 +748,41 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, * value of link-status is "GOOD". If something fails during or after modeset, * the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers * should update this value using drm_mode_connector_set_link_status_property(). + * Content Protection: + * This property is used by userspace to request the kernel protect future + * content communicated over the link. When requested, kernel will apply + * the appropriate means of protection (most often HDCP), and use the + * property to tell userspace the protection is active. + * + * Drivers can set this up by calling + * drm_connector_attach_content_protection_property() on initialization. + * + * The value of this property can be one of the following: + * + * - DRM_MODE_CONTENT_PROTECTION_UNDESIRED = 0 + * The link is not protected, content is transmitted in the clear. + * - DRM_MODE_CONTENT_PROTECTION_DESIRED = 1 + * Userspace has requested content protection, but the link is not + * currently protected. When in this state, kernel should enable + * Content Protection as soon as possible. + * - DRM_MODE_CONTENT_PROTECTION_ENABLED = 2 + * Userspace has requested content protection, and the link is + * protected. Only the driver can set the property to this value. + * If userspace attempts to set to ENABLED, kernel will return + * -EINVAL. + * + * A few guidelines: + * + * - DESIRED state should be preserved until userspace de-asserts it by + * setting the property to UNDESIRED. This means ENABLED should only + * transition to UNDESIRED when the user explicitly requests it. + * - If the state is DESIRED, kernel should attempt to re-authenticate the + * link whenever possible. This includes across disable/enable, dpms, + * hotplug, downstream device changes, link status failures, etc.. + * - Userspace is responsible for polling the property to determine when + * the value transitions from ENABLED to DESIRED. This signifies the link + * is no longer protected and userspace should take appropriate action + * (whatever that might be). * * Connectors also have one standardized atomic property: * @@ -1024,6 +1066,42 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, } EXPORT_SYMBOL(drm_connector_attach_scaling_mode_property); +/** + * drm_connector_attach_content_protection_property - attach content protection + * property + * + * @connector: connector to attach CP property on. + * + * This is used to add support for content protection on select connectors. + * Content Protection is intentionally vague to allow for different underlying + * technologies, however it is most implemented by HDCP. + * + * The content protection will be set to &drm_connector_state.content_protection + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_content_protection_property( + struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + prop = drm_property_create_enum(dev, 0, "Content Protection", + drm_cp_enum_list, + ARRAY_SIZE(drm_cp_enum_list)); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&connector->base, prop, + DRM_MODE_CONTENT_PROTECTION_UNDESIRED); + + connector->content_protection_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_content_protection_property); + /** * drm_mode_create_aspect_ratio_property - create aspect ratio property * @dev: DRM device diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 0ef9011a1856..02a50929af67 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -410,6 +410,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter, { u8 data; int ret = 0; + int retry; if (!mode) { DRM_ERROR("NULL input\n"); @@ -417,10 +418,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter, } /* Read Status: i2c over aux */ - ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE, - &data, sizeof(data)); + for (retry = 0; retry < 6; retry++) { + if (retry) + usleep_range(500, 1000); + + ret = drm_dp_dual_mode_read(adapter, + DP_DUAL_MODE_LSPCON_CURRENT_MODE, + &data, sizeof(data)); + if (!ret) + break; + } + if (ret < 0) { - DRM_ERROR("LSPCON read(0x80, 0x41) failed\n"); + DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n"); return -EFAULT; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6bb6337be920..240e86a7f91e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -111,6 +111,9 @@ static const struct edid_quirk { /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, + /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ + { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, @@ -1533,6 +1536,10 @@ static void connector_bad_edid(struct drm_connector *connector, * level, drivers must make all reasonable efforts to expose it as an I2C * adapter and use drm_get_edid() instead of abusing this function. * + * The EDID may be overridden using debugfs override_edid or firmare EDID + * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority + * order. Having either of them bypasses actual EDID reads. + * * Return: Pointer to valid EDID or NULL if we couldn't find any. */ struct edid *drm_do_get_edid(struct drm_connector *connector, @@ -1542,6 +1549,17 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, { int i, j = 0, valid_extensions = 0; u8 *edid, *new; + struct edid *override = NULL; + + if (connector->override_edid) + override = drm_edid_duplicate((const struct edid *) + connector->edid_blob_ptr->data); + + if (!override) + override = drm_load_edid_firmware(connector); + + if (!IS_ERR_OR_NULL(override)) + return override; if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; @@ -3820,8 +3838,7 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name); * @edid: EDID to parse * * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The - * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to - * fill in. + * HDCP and Port_ID ELD fields are left for the graphics driver to fill in. */ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) { @@ -3902,6 +3919,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) } eld[5] |= total_sad_count << 4; + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP; + else + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI; + eld[DRM_ELD_BASELINE_ELD_LEN] = DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4); @@ -4809,7 +4832,8 @@ void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range, - bool rgb_quant_range_selectable) + bool rgb_quant_range_selectable, + bool is_hdmi2_sink) { /* * CEA-861: @@ -4833,8 +4857,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, * YQ-field to match the RGB Quantization Range being transmitted * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB, * set YQ=1) and the Sink shall ignore the YQ-field." + * + * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused + * by non-zero YQ when receiving RGB. There doesn't seem to be any + * good way to tell which version of CEA-861 the sink supports, so + * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based + * on on CEA-861-F. */ - if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) + if (!is_hdmi2_sink || + rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; else diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1b8f013ffa65..0914684c0c0a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1350,6 +1350,7 @@ static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info) replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm_post_offset, NULL); replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, gamma_lut); crtc_state->color_mgmt_changed |= replaced; @@ -1809,6 +1810,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { DRM_INFO("Cannot find any crtc or sizes\n"); + + /* First time: disable all crtc's.. */ + if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master)) + restore_fbdev_mode(fb_helper); return -EAGAIN; } diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index af279844d7ce..c21e10c780ac 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -118,6 +118,10 @@ int drm_mode_addfb(struct drm_device *dev, r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); r.handles[0] = or->handle; + if (r.pixel_format == DRM_FORMAT_XRGB2101010 && + dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP) + r.pixel_format = DRM_FORMAT_XBGR2101010; + ret = drm_mode_addfb2(dev, &r, file_priv); if (ret) return ret; @@ -454,6 +458,12 @@ int drm_mode_getfb(struct drm_device *dev, if (!fb) return -ENOENT; + /* Multi-planar framebuffers need getfb2. */ + if (fb->format->num_planes > 1) { + ret = -EINVAL; + goto out; + } + r->height = fb->height; r->width = fb->width; r->depth = fb->format->depth; @@ -477,6 +487,7 @@ int drm_mode_getfb(struct drm_device *dev, ret = -ENODEV; } +out: drm_framebuffer_put(fb); return ret; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index fbc3f308fa19..5de4c6e7435e 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -46,6 +46,7 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr struct dma_buf *dma_buf); /* drm_drv.c */ +#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ struct drm_minor *drm_minor_acquire(unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index a9ae6dd2d593..d01cf222f320 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -547,7 +547,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -618,10 +618,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), @@ -630,7 +630,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), @@ -642,7 +642,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 61a1c8ea74bc..1acf3b1479a1 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -834,9 +834,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) if (!mm->color_adjust) return NULL; - hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); - hole_start = __drm_mm_hole_node_start(hole); - hole_end = hole_start + hole->hole_size; + /* + * The hole found during scanning should ideally be the first element + * in the hole_stack list, but due to side-effects in the driver it + * may not be. + */ + list_for_each_entry(hole, &mm->hole_stack, hole_stack) { + hole_start = __drm_mm_hole_node_start(hole); + hole_end = hole_start + hole->hole_size; + + if (hole_start <= scan->hit_start && + hole_end >= scan->hit_end) + break; + } + + /* We should only be called after we found the hole previously */ + DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); + if (unlikely(&hole->hole_stack == &mm->hole_stack)) + return NULL; DRM_MM_BUG_ON(hole_start > scan->hit_start); DRM_MM_BUG_ON(hole_end < scan->hit_end); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 74f6ff5df656..47647bcef32c 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -323,6 +323,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.ctm_property = prop; + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB, + "CTM_POST_OFFSET", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.ctm_post_offset_property = prop; + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "GAMMA_LUT", 0); diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index af4e906c630d..56b9f9b1c3ae 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -88,7 +88,7 @@ void drm_modeset_lock_all(struct drm_device *dev) struct drm_modeset_acquire_ctx *ctx; int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL); if (WARN_ON(!ctx)) return; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 904966cde32b..7cf30d5683c6 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -353,8 +353,6 @@ EXPORT_SYMBOL(drm_helper_probe_detect); * drm_mode_probed_add(). New modes start their life with status as OK. * Modes are added from a single source using the following priority order. * - * - debugfs 'override_edid' (used for testing only) - * - firmware EDID (drm_load_edid_firmware()) * - &drm_connector_helper_funcs.get_modes vfunc * - if the connector status is connector_status_connected, standard * VESA DMT modes up to 1024x768 are automatically added @@ -483,22 +481,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, goto prune; } - if (connector->override_edid) { - struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; - - count = drm_add_edid_modes(connector, edid); - drm_edid_to_eld(connector, edid); - } else { - struct edid *edid = drm_load_edid_firmware(connector); - if (!IS_ERR_OR_NULL(edid)) { - drm_mode_connector_update_edid_property(connector, edid); - count = drm_add_edid_modes(connector, edid); - drm_edid_to_eld(connector, edid); - kfree(edid); - } - if (count == 0) - count = (*connector_funcs->get_modes)(connector); - } + count = (*connector_funcs->get_modes)(connector); if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); @@ -671,6 +654,26 @@ static void output_poll_execute(struct work_struct *work) schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); } +/** + * drm_kms_helper_is_poll_worker - is %current task an output poll worker? + * + * Determine if %current task is an output poll worker. This can be used + * to select distinct code paths for output polling versus other contexts. + * + * One use case is to avoid a deadlock between the output poll worker and + * the autosuspend worker wherein the latter waits for polling to finish + * upon calling drm_kms_helper_poll_disable(), while the former waits for + * runtime suspend to finish upon calling pm_runtime_get_sync() in a + * connector ->detect hook. + */ +bool drm_kms_helper_is_poll_worker(void) +{ + struct work_struct *work = current_work(); + + return work && work->func == output_poll_execute; +} +EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); + /** * drm_kms_helper_poll_disable - disable output polling * @dev: drm_device diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 0422b8c2c2e7..7bcf5702c91c 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -328,28 +328,11 @@ static const struct file_operations drm_syncobj_file_fops = { .release = drm_syncobj_file_release, }; -static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj) -{ - struct file *file = anon_inode_getfile("syncobj_file", - &drm_syncobj_file_fops, - syncobj, 0); - if (IS_ERR(file)) - return PTR_ERR(file); - - drm_syncobj_get(syncobj); - if (cmpxchg(&syncobj->file, NULL, file)) { - /* lost the race */ - fput(file); - } - - return 0; -} - static int drm_syncobj_handle_to_fd(struct drm_file *file_private, u32 handle, int *p_fd) { struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); - int ret; + struct file *file; int fd; if (!syncobj) @@ -361,46 +344,40 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private, return fd; } - if (!syncobj->file) { - ret = drm_syncobj_alloc_file(syncobj); - if (ret) - goto out_put_fd; + file = anon_inode_getfile("syncobj_file", + &drm_syncobj_file_fops, + syncobj, 0); + if (IS_ERR(file)) { + put_unused_fd(fd); + drm_syncobj_put(syncobj); + return PTR_ERR(file); } - fd_install(fd, syncobj->file); - drm_syncobj_put(syncobj); + + drm_syncobj_get(syncobj); + fd_install(fd, file); + *p_fd = fd; return 0; -out_put_fd: - put_unused_fd(fd); - drm_syncobj_put(syncobj); - return ret; } -static struct drm_syncobj *drm_syncobj_fdget(int fd) -{ - struct file *file = fget(fd); - - if (!file) - return NULL; - if (file->f_op != &drm_syncobj_file_fops) - goto err; - - return file->private_data; -err: - fput(file); - return NULL; -}; - static int drm_syncobj_fd_to_handle(struct drm_file *file_private, int fd, u32 *handle) { - struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); + struct drm_syncobj *syncobj; + struct file *file; int ret; - if (!syncobj) + file = fget(fd); + if (!file) + return -EINVAL; + + if (file->f_op != &drm_syncobj_file_fops) { + fput(file); return -EINVAL; + } /* take a reference to put in the idr */ + syncobj = file->private_data; drm_syncobj_get(syncobj); idr_preload(GFP_KERNEL); @@ -409,12 +386,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, spin_unlock(&file_private->syncobj_table_lock); idr_preload_end(); - if (ret < 0) { - fput(syncobj->file); - return ret; - } - *handle = ret; - return 0; + if (ret > 0) { + *handle = ret; + ret = 0; + } else + drm_syncobj_put(syncobj); + + fput(file); + return ret; } int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 70f2b9593edc..17e8ef9a1c11 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -311,8 +311,8 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) u32 vblank; unsigned long flags; - WARN(!dev->driver->get_vblank_timestamp, - "This function requires support for accurate vblank timestamps."); + WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp, + "This function requires support for accurate vblank timestamps."); spin_lock_irqsave(&dev->vblank_time_lock, flags); @@ -869,7 +869,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, assert_spin_locked(&dev->event_lock); e->pipe = pipe; - e->event.sequence = drm_vblank_count(dev, pipe); + e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1; e->event.crtc_id = crtc->base.id; list_add_tail(&e->base.link, &dev->vblank_event_list); } diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index 38b477b5fbf9..4df3c48adcec 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -6,6 +6,7 @@ config DRM_ETNAVIV depends on MMU select SHMEM select SYNC_FILE + select THERMAL if DRM_ETNAVIV_THERMAL select TMPFS select IOMMU_API select IOMMU_SUPPORT @@ -15,6 +16,14 @@ config DRM_ETNAVIV help DRM driver for Vivante GPUs. +config DRM_ETNAVIV_THERMAL + bool "enable ETNAVIV thermal throttling" + depends on DRM_ETNAVIV + default y + help + Compile in support for thermal throttling. + Say Y unless you want to risk burning your SoC. + config DRM_ETNAVIV_REGISTER_LOGGING bool "enable ETNAVIV register logging" depends on DRM_ETNAVIV diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index fc9a6a83dfc7..a1562f89c3d7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1622,7 +1622,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, struct etnaviv_gpu *gpu = dev_get_drvdata(dev); int ret; - if (IS_ENABLED(CONFIG_THERMAL)) { + if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) { gpu->cooling = thermal_of_cooling_device_register(dev->of_node, (char *)dev_name(dev), gpu, &cooling_ops); if (IS_ERR(gpu->cooling)) @@ -1635,7 +1635,8 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, ret = etnaviv_gpu_clk_enable(gpu); #endif if (ret < 0) { - thermal_cooling_device_unregister(gpu->cooling); + if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) + thermal_cooling_device_unregister(gpu->cooling); return ret; } @@ -1692,7 +1693,8 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, gpu->drm = NULL; - thermal_cooling_device_unregister(gpu->cooling); + if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) + thermal_cooling_device_unregister(gpu->cooling); gpu->cooling = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 077de014d610..4400efe3974a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, if (IS_ERR(exynos_gem)) return exynos_gem; + if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { + /* + * when no IOMMU is available, all allocated buffers are + * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag + */ + flags &= ~EXYNOS_BO_NONCONTIG; + DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); + } + /* set memory type and cache attribute from user side. */ exynos_gem->flags = flags; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 58e9e0601a61..faf17b83b910 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev) return PTR_ERR(fsl_dev->state); } - clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk); return 0; @@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); + enable_irq(fsl_dev->irq); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); console_lock(); @@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) console_unlock(); drm_kms_helper_poll_enable(fsl_dev->drm); - enable_irq(fsl_dev->irq); return 0; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index edd7d8127d19..c54806d08dd7 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, { struct drm_encoder *encoder = &fsl_dev->encoder; struct drm_connector *connector = &fsl_dev->connector.base; - struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config; int ret; fsl_dev->connector.encoder = encoder; @@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, if (ret < 0) goto err_sysfs; - drm_object_property_set_value(&connector->base, - mode_config->dpms_property, - DRM_MODE_DPMS_OFF); - ret = drm_panel_attach(panel, connector); if (ret) { dev_err(fsl_dev->dev, "failed to attach panel\n"); diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 9823477b1855..2269be91f3e1 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -534,9 +534,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc, { struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; + struct drm_display_mode *mode = &crtc->state->mode; + struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode; if (!ctx->power_on) (void)ade_power_up(ctx); + ade_ldi_set_mode(acrtc, mode, adj_mode); } static void ade_crtc_atomic_flush(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index e9e64e8e9765..c4ef83863947 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -124,6 +124,22 @@ config DRM_I915_GVT_KVMGT help Choose this option if you want to enable KVMGT support for Intel GVT-g. +config DRM_I915_GVT_ACRN_GVT + bool "Enable ACRN support for Intel GVT-g" + depends on DRM_I915_GVT + depends on ACRN + default n + help + Choose this option if you want to enable ACRN_GVT support for + Intel GVT-g under ACRN hypervisor environment. + +config DRM_I915_LOAD_ASYNC_SUPPORT + bool "Async i915_driver_load support" + default n + depends on DRM_I915 + help + Choose this option to support async i915_driver_load for boot-up time saving + menu "drm/i915 Debugging" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 2e034efc4d6d..9153fad2b831 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -51,6 +51,7 @@ i915-y += i915_cmd_parser.o \ i915_trace_points.o \ i915_vma.o \ intel_breadcrumbs.o \ + i915_gem_gvtbuffer.o \ intel_engine_cs.o \ intel_hangcheck.o \ intel_lrc.o \ @@ -85,6 +86,7 @@ i915-y += intel_audio.o \ intel_fbc.o \ intel_fifo_underrun.o \ intel_frontbuffer.o \ + intel_hdcp.o \ intel_hotplug.o \ intel_modes.o \ intel_overlay.o \ diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index 2641ba510a61..50c1cc9f5005 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -2,8 +2,9 @@ GVT_DIR := gvt GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ - execlist.o scheduler.o sched_policy.o render.o cmd_parser.o + execlist.o scheduler.o sched_policy.o render.o cmd_parser.o fb_decoder.o -ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) +ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o +obj-$(CONFIG_DRM_I915_GVT_ACRN_GVT) += $(GVT_DIR)/acrn-gvt.o diff --git a/drivers/gpu/drm/i915/gvt/acrn-gvt.c b/drivers/gpu/drm/i915/gvt/acrn-gvt.c new file mode 100644 index 000000000000..1d629f8679ca --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/acrn-gvt.c @@ -0,0 +1,985 @@ +/* + * Interfaces coupled to ACRN + * + * Copyright(c) 2011-2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of Version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * + */ + +/* + * NOTE: + * This file contains hypervisor specific interactions to + * implement the concept of mediated pass-through framework. + * What this file provides is actually a general abstraction + * of in-kernel device model, which is not gvt specific. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include "acrn-gvt.h" + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("ACRNGT mediated passthrough driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); + +#define ASSERT(x) \ +do { if (x) break; \ + printk(KERN_EMERG "### ASSERTION FAILED %s: %s: %d: %s\n", \ + __FILE__, __func__, __LINE__, #x); dump_stack(); BUG(); \ +} while (0) + + +struct kobject *acrn_gvt_ctrl_kobj; +static struct kset *acrn_gvt_kset; +static DEFINE_MUTEX(acrn_gvt_sysfs_lock); + +struct gvt_acrngt acrngt_priv; +const struct intel_gvt_ops *intel_gvt_ops; + +static void disable_domu_plane(int pipe, int plane) +{ + struct drm_i915_private *dev_priv = acrngt_priv.gvt->dev_priv; + + I915_WRITE(PLANE_CTL(pipe, plane), 0); + + I915_WRITE(PLANE_SURF(pipe, plane), 0); + POSTING_READ(PLANE_SURF(pipe, plane)); +} + +void acrngt_instance_destroy(struct intel_vgpu *vgpu) +{ + int pipe, plane; + struct acrngt_hvm_dev *info = NULL; + struct intel_gvt *gvt = acrngt_priv.gvt; + + if (vgpu) { + info = (struct acrngt_hvm_dev *)vgpu->handle; + if (info && info->emulation_thread != NULL) + kthread_stop(info->emulation_thread); + + for_each_pipe(gvt->dev_priv, pipe) { + for_each_universal_plane(gvt->dev_priv, pipe, plane) { + if (gvt->pipe_info[pipe].plane_owner[plane] == + vgpu->id) { + disable_domu_plane(pipe, plane); + } + } + } + + intel_gvt_ops->vgpu_deactivate(vgpu); + intel_gvt_ops->vgpu_destroy(vgpu); + } + + if (info) { + gvt_dbg_core("destroy vgpu instance, vm id: %d, client %d", + info->vm_id, info->client); + + if (info->client != 0) + acrn_ioreq_destroy_client(info->client); + + if (info->vm) + put_vm(info->vm); + + kfree(info); + } +} + +static bool acrngt_write_cfg_space(struct intel_vgpu *vgpu, + unsigned int port, unsigned int bytes, unsigned long val) +{ + if (intel_gvt_ops->emulate_cfg_write(vgpu, port, &val, bytes)) { + gvt_err("failed to write config space port 0x%x\n", port); + return false; + } + return true; +} + +static bool acrngt_read_cfg_space(struct intel_vgpu *vgpu, + unsigned int port, unsigned int bytes, unsigned long *val) +{ + unsigned long data; + + if (intel_gvt_ops->emulate_cfg_read(vgpu, port, &data, bytes)) { + gvt_err("failed to read config space port 0x%x\n", port); + return false; + } + memcpy(val, &data, bytes); + return true; +} + +static int acrngt_hvm_pio_emulation(struct intel_vgpu *vgpu, + struct vhm_request *req) +{ + if (req->reqs.pci_request.direction == REQUEST_READ) { + /* PIO READ */ + gvt_dbg_core("handle pio read emulation at port 0x%x\n", + req->reqs.pci_request.reg); + if (!acrngt_read_cfg_space(vgpu, + req->reqs.pci_request.reg, + req->reqs.pci_request.size, + (unsigned long *)&req->reqs.pci_request.value)) { + gvt_err("failed to read pio at addr 0x%x\n", + req->reqs.pci_request.reg); + return -EINVAL; + } + } else if (req->reqs.pci_request.direction == REQUEST_WRITE) { + /* PIO WRITE */ + gvt_dbg_core("handle pio write emulation at address 0x%x, " + "value 0x%x\n", + req->reqs.pci_request.reg, req->reqs.pci_request.value); + if (!acrngt_write_cfg_space(vgpu, + req->reqs.pci_request.reg, + req->reqs.pci_request.size, + (unsigned long)req->reqs.pci_request.value)) { + gvt_err("failed to write pio at addr 0x%x\n", + req->reqs.pci_request.reg); + return -EINVAL; + } + } + return 0; +} + +static int acrngt_hvm_mmio_emulation(struct intel_vgpu *vgpu, + struct vhm_request *req) +{ + if (req->reqs.mmio_request.direction == REQUEST_READ) { + /* MMIO READ */ + gvt_dbg_core("handle mmio read emulation at address 0x%llx\n", + req->reqs.mmio_request.address); + if (intel_gvt_ops->emulate_mmio_read(vgpu, + req->reqs.mmio_request.address, + &req->reqs.mmio_request.value, + req->reqs.mmio_request.size)) { + gvt_err("failed to read mmio at addr 0x%llx\n", + req->reqs.mmio_request.address); + return -EINVAL; + } + } else if (req->reqs.mmio_request.direction == REQUEST_WRITE) { + /* MMIO Write */ + if (intel_gvt_ops->emulate_mmio_write(vgpu, + req->reqs.mmio_request.address, + &req->reqs.mmio_request.value, + req->reqs.mmio_request.size)) { + gvt_err("failed to write mmio at addr 0x%llx\n", + req->reqs.mmio_request.address); + return -EINVAL; + } + gvt_dbg_core("handle mmio write emulation at address 0x%llx, " + "value 0x%llx\n", + req->reqs.mmio_request.address, req->reqs.mmio_request.value); + } + + return 0; +} + +static int acrngt_emulation_thread(void *priv) +{ + struct intel_vgpu *vgpu = (struct intel_vgpu *)priv; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)vgpu->handle; + struct vhm_request *req; + + int vcpu, ret; + int nr_vcpus = info->nr_vcpu; + + gvt_dbg_core("start kthread for VM%d\n", info->vm_id); + ASSERT(info->nr_vcpu <= MAX_HVM_VCPUS_SUPPORTED); + + set_freezable(); + while (1) { + acrn_ioreq_attach_client(info->client, 1); + + if (kthread_should_stop()) + return 0; + + for (vcpu = 0; vcpu < nr_vcpus; vcpu++) { + req = &info->req_buf[vcpu]; + if (req->valid && + req->processed == REQ_STATE_PROCESSING && + req->client == info->client) { + gvt_dbg_core("handle ioreq type %d\n", + req->type); + switch (req->type) { + case REQ_PCICFG: + ret = acrngt_hvm_pio_emulation(vgpu, req); + break; + case REQ_MMIO: + case REQ_WP: + ret = acrngt_hvm_mmio_emulation(vgpu, req); + break; + default: + gvt_err("Unknown ioreq type %x\n", + req->type); + ret = -EINVAL; + break; + } + /* error handling */ + if (ret) + BUG(); + + req->processed = REQ_STATE_SUCCESS; + /* complete request */ + if (acrn_ioreq_complete_request(info->client, + vcpu)) + gvt_err("failed complete request\n"); + } + } + } + + BUG(); /* It's actually impossible to reach here */ + return 0; +} + +struct intel_vgpu *acrngt_instance_create(domid_t vm_id, + struct intel_vgpu_type *vgpu_type) +{ + struct acrngt_hvm_dev *info; + struct intel_vgpu *vgpu; + int ret = 0; + struct task_struct *thread; + struct vm_info vm_info; + + gvt_dbg_core("acrngt_instance_create enter\n"); + if (!intel_gvt_ops || !acrngt_priv.gvt) + return NULL; + + vgpu = intel_gvt_ops->vgpu_create(acrngt_priv.gvt, vgpu_type); + if (IS_ERR(vgpu)) { + gvt_err("failed to create vgpu\n"); + return NULL; + } + + info = kzalloc(sizeof(struct acrngt_hvm_dev), GFP_KERNEL); + if (info == NULL) { + gvt_err("failed to alloc acrngt_hvm_dev\n"); + goto err; + } + + info->vm_id = vm_id; + info->vgpu = vgpu; + vgpu->handle = (unsigned long)info; + + if ((info->vm = find_get_vm(vm_id)) == NULL) { + gvt_err("failed to get vm %d\n", vm_id); + acrngt_instance_destroy(vgpu); + return NULL; + } + if (info->vm->req_buf == NULL) { + gvt_err("failed to get req buf for vm %d\n", vm_id); + goto err; + } + gvt_dbg_core("get vm req_buf from vm_id %d\n", vm_id); + + /* create client: no handler -> handle request by itself */ + info->client = acrn_ioreq_create_client(vm_id, NULL, "ioreq gvt-g"); + if (info->client < 0) { + gvt_err("failed to create ioreq client for vm id %d\n", vm_id); + goto err; + } + + /* get vm info */ + ret = vhm_get_vm_info(vm_id, &vm_info); + if (ret < 0) { + gvt_err("failed to get vm info for vm id %d\n", vm_id); + goto err; + } + + info->nr_vcpu = vm_info.max_vcpu; + + /* get req buf */ + info->req_buf = acrn_ioreq_get_reqbuf(info->client); + if (info->req_buf == NULL) { + gvt_err("failed to get req_buf for client %d\n", info->client); + goto err; + } + + /* trap config space access */ + acrn_ioreq_intercept_bdf(info->client, 0, 2, 0); + + thread = kthread_run(acrngt_emulation_thread, vgpu, + "acrngt_emulation:%d", vm_id); + if (IS_ERR(thread)) { + gvt_err("failed to run emulation thread for vm %d\n", vm_id); + goto err; + } + info->emulation_thread = thread; + gvt_dbg_core("create vgpu instance success, vm_id %d, client %d," + " nr_vcpu %d\n", info->vm_id,info->client, info->nr_vcpu); + + return vgpu; + +err: + acrngt_instance_destroy(vgpu); + return NULL; +} + +static ssize_t kobj_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->show) + ret = kattr->show(kobj, kattr, buf); + return ret; +} + +static ssize_t kobj_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->store) + ret = kattr->store(kobj, kattr, buf, count); + return ret; +} + +const struct sysfs_ops acrngt_kobj_sysfs_ops = { + .show = kobj_attr_show, + .store = kobj_attr_store, +}; + +static ssize_t acrngt_sysfs_vgpu_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int i; + + for (i = 0; i < GVT_MAX_VGPU_INSTANCE; i++) { + if (acrngt_priv.vgpus[i] && + (kobj == &((struct acrngt_hvm_dev *) + (acrngt_priv.vgpus[i]->handle))->kobj)) { + return sprintf(buf, "%d\n", acrngt_priv.vgpus[i]->id); + } + } + return 0; +} + +static struct kobj_attribute acrngt_vm_attr = +__ATTR(vgpu_id, 0440, acrngt_sysfs_vgpu_id, NULL); + + +static struct attribute *acrngt_vm_attrs[] = { + &acrngt_vm_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct kobj_type acrngt_instance_ktype = { + .sysfs_ops = &acrngt_kobj_sysfs_ops, + .default_attrs = acrngt_vm_attrs, +}; + +static int acrngt_sysfs_add_instance(struct acrngt_hvm_params *vp) +{ + int ret = 0; + struct intel_vgpu *vgpu; + struct acrngt_hvm_dev *info; + + struct intel_vgpu_type type = acrngt_priv.gvt->types[0]; + + /* todo: wa patch due to plane restriction patches are not porting */ + acrngt_priv.gvt->pipe_info[1].plane_owner[0] = 1; + acrngt_priv.gvt->pipe_info[1].plane_owner[1] = 1; + acrngt_priv.gvt->pipe_info[1].plane_owner[2] = 1; + acrngt_priv.gvt->pipe_info[1].plane_owner[3] = 1; + + type.low_gm_size = vp->aperture_sz * VMEM_1MB; + type.high_gm_size = (vp->gm_sz - vp->aperture_sz) * VMEM_1MB; + type.fence = vp->fence_sz; + mutex_lock(&acrn_gvt_sysfs_lock); + vgpu = acrngt_instance_create(vp->vm_id, &type); + mutex_unlock(&acrn_gvt_sysfs_lock); + if (vgpu == NULL) { + gvt_err("acrngt_sysfs_add_instance failed.\n"); + ret = -EINVAL; + } else { + info = (struct acrngt_hvm_dev *) vgpu->handle; + info->vm_id = vp->vm_id; + acrngt_priv.vgpus[vgpu->id - 1] = vgpu; + gvt_dbg_core("add acrngt instance for vm-%d with vgpu-%d.\n", + vp->vm_id, vgpu->id); + + kobject_init(&info->kobj, &acrngt_instance_ktype); + info->kobj.kset = acrn_gvt_kset; + /* add kobject, NULL parent indicates using kset as parent */ + ret = kobject_add(&info->kobj, NULL, "vm%u", info->vm_id); + if (ret) { + gvt_err("%s: kobject add error: %d\n", __func__, ret); + kobject_put(&info->kobj); + } + } + + return ret; +} + +static struct intel_vgpu *vgpu_from_id(int vm_id) +{ + int i; + struct acrngt_hvm_dev *hvm_dev = NULL; + + /* vm_id is negtive in del_instance call */ + if (vm_id < 0) + vm_id = -vm_id; + for (i = 0; i < GVT_MAX_VGPU_INSTANCE; i++) + if (acrngt_priv.vgpus[i]) { + hvm_dev = (struct acrngt_hvm_dev *) + acrngt_priv.vgpus[i]->handle; + if (hvm_dev && (vm_id == hvm_dev->vm_id)) + return acrngt_priv.vgpus[i]; + } + return NULL; +} + +static int acrngt_sysfs_del_instance(struct acrngt_hvm_params *vp) +{ + int ret = 0; + struct intel_vgpu *vgpu = vgpu_from_id(vp->vm_id); + struct acrngt_hvm_dev *info = NULL; + + if (vgpu) { + info = (struct acrngt_hvm_dev *) vgpu->handle; + gvt_dbg_core("remove vm-%d sysfs node.\n", vp->vm_id); + kobject_put(&info->kobj); + + mutex_lock(&acrn_gvt_sysfs_lock); + acrngt_priv.vgpus[vgpu->id - 1] = NULL; + acrngt_instance_destroy(vgpu); + mutex_unlock(&acrn_gvt_sysfs_lock); + } + + return ret; +} + +static ssize_t acrngt_sysfs_instance_manage(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct acrngt_hvm_params vp; + int param_cnt; + char param_str[64]; + int rc; + int high_gm_sz; + int low_gm_sz; + + /* We expect the param_str should be vmid,a,b,c (where the guest + * wants a MB aperture and b MB gm, and c fence registers) or -vmid + * (where we want to release the gvt instance). + */ + (void)sscanf(buf, "%63s", param_str); + param_cnt = sscanf(param_str, "%d,%d,%d,%d", &vp.vm_id, + &low_gm_sz, &high_gm_sz, &vp.fence_sz); + gvt_dbg_core("create vm-%d sysfs node, low gm size %d," + " high gm size %d, fence size %d\n", + vp.vm_id, low_gm_sz, high_gm_sz, vp.fence_sz); + vp.aperture_sz = low_gm_sz; + vp.gm_sz = high_gm_sz + low_gm_sz; + if (param_cnt == 1) { + if (vp.vm_id >= 0) + return -EINVAL; + } else if (param_cnt == 4) { + if (!(vp.vm_id > 0 && vp.aperture_sz > 0 && + vp.aperture_sz <= vp.gm_sz && vp.fence_sz > 0)) + return -EINVAL; + } else { + gvt_err("%s: parameter counter incorrect\n", __func__); + return -EINVAL; + } + + rc = (vp.vm_id > 0) ? acrngt_sysfs_add_instance(&vp) : + acrngt_sysfs_del_instance(&vp); + + return rc < 0 ? rc : count; +} + +static ssize_t show_plane_owner(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "Planes:\nPipe A: %d %d %d %d\n" + "Pipe B: %d %d %d %d\nPipe C: %d %d %d\n", + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE1], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE2], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE1], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE2], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_SPRITE1]); +} + +static struct kobj_attribute acrngt_instance_attr = +__ATTR(create_gvt_instance, 0220, NULL, acrngt_sysfs_instance_manage); + +static struct kobj_attribute plane_owner_attr = +__ATTR(plane_owner_show, 0440, show_plane_owner, NULL); + +static struct attribute *acrngt_ctrl_attrs[] = { + &acrngt_instance_attr.attr, + &plane_owner_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct kobj_type acrngt_ctrl_ktype = { + .sysfs_ops = &acrngt_kobj_sysfs_ops, + .default_attrs = acrngt_ctrl_attrs, +}; + +int acrngt_sysfs_init(struct intel_gvt *gvt) +{ + int ret; + + acrn_gvt_kset = kset_create_and_add("gvt", NULL, kernel_kobj); + if (!acrn_gvt_kset) { + ret = -ENOMEM; + goto kset_fail; + } + + acrn_gvt_ctrl_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (!acrn_gvt_ctrl_kobj) { + ret = -ENOMEM; + goto ctrl_fail; + } + + acrn_gvt_ctrl_kobj->kset = acrn_gvt_kset; + ret = kobject_init_and_add(acrn_gvt_ctrl_kobj, &acrngt_ctrl_ktype, + NULL, "control"); + if (ret) { + ret = -EINVAL; + goto kobj_fail; + } + + return 0; + +kobj_fail: + kobject_put(acrn_gvt_ctrl_kobj); +ctrl_fail: + kset_unregister(acrn_gvt_kset); +kset_fail: + return ret; +} + +void acrngt_sysfs_del(void) +{ + kobject_put(acrn_gvt_ctrl_kobj); + kset_unregister(acrn_gvt_kset); +} + +static int acrngt_host_init(struct device *dev, void *gvt, const void *ops) +{ + int ret = -EFAULT; + + if (!gvt || !ops) + return -EINVAL; + + acrngt_priv.gvt = (struct intel_gvt *)gvt; + intel_gvt_ops = (const struct intel_gvt_ops *)ops; + + ret = acrngt_sysfs_init(acrngt_priv.gvt); + if (ret) { + gvt_err("failed call acrngt_sysfs_init, error: %d\n", ret); + acrngt_priv.gvt = NULL; + intel_gvt_ops = NULL; + } + + return ret; +} + +static void acrngt_host_exit(struct device *dev, void *gvt) +{ + acrngt_sysfs_del(); + acrngt_priv.gvt = NULL; + intel_gvt_ops = NULL; +} + +static int acrngt_attach_vgpu(void *vgpu, unsigned long *handle) +{ + return 0; +} + +static void acrngt_detach_vgpu(unsigned long handle) +{ + return; +} + +static int acrngt_inject_msi(unsigned long handle, u32 addr_lo, u16 data) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("inject msi irq, addr 0x%x, data 0x%hx\n", addr_lo, data); + + ret = vhm_inject_msi(info->vm_id, addr_lo, data); + if (ret) + gvt_err("failed to inject msi for vm %d\n", info->vm_id); + return ret; +} + +static unsigned long acrngt_virt_to_mfn(void *addr) +{ + uint64_t gpa; + uint64_t hpa; + gvt_dbg_core("virt 0x%lx to mfn\n", (unsigned long)addr); + + gpa = virt_to_phys(addr); + hpa = vhm_vm_gpa2hpa(0, gpa); + + return (unsigned long) (hpa >> PAGE_SHIFT); +} + +static int acrngt_set_wp_page(unsigned long handle, u64 gfn) +{ + int ret; + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("set wp page for gfn 0x%llx\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + ret = acrn_ioreq_add_iorange(info->client, REQ_WP, gfn << PAGE_SHIFT, + ((gfn + 1) << PAGE_SHIFT) - 1); + if (ret) { + gvt_err("failed acrn_ioreq_add_iorange for gfn 0x%llx\n", gfn); + return ret; + } + ret = update_memmap_attr(info->vm_id, gfn << PAGE_SHIFT, + acrn_hpa2gpa(hpa), 0x1000, + MEM_TYPE_WB, + (MEM_ACCESS_READ | MEM_ACCESS_EXEC)); + if (ret) + gvt_err("failed update_memmap_attr set for gfn 0x%llx\n", gfn); + return ret; +} + +static int acrngt_unset_wp_page(unsigned long handle, u64 gfn) +{ + int ret; + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("unset wp page for gfx 0x%llx\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + /* TODO: need to read back default value before write */ + ret = update_memmap_attr(info->vm_id, gfn << PAGE_SHIFT, + acrn_hpa2gpa(hpa), 0x1000, MEM_TYPE_WB, MEM_ACCESS_RWX); + if (ret) { + gvt_err("failed update_memmap_attr unset for gfn 0x%llx\n", + gfn); + return ret; + } + ret = acrn_ioreq_del_iorange(info->client, REQ_WP, gfn << PAGE_SHIFT, + ((gfn + 1) << PAGE_SHIFT) - 1); + if (ret) + gvt_err("failed acrn_ioreq_del_iorange for gfn 0x%llx\n", gfn); + return ret; +} + +static int acrngt_read_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + void *va = NULL; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("read gpa 0x%lx with len 0x%lx\n", gpa, len); + + va = map_guest_phys(info->vm_id, gpa, len); + if (!va) { + gvt_err("GVT: can not read gpa = 0x%lx!!!\n", gpa); + return -EFAULT; + } + + switch (len) + { + case 1: + *((uint8_t *) buf) = *((uint8_t *) va); + break; + case 2: + *((uint16_t *) buf) = *((uint16_t *) va); + break; + case 4: + *((uint32_t *) buf) = *((uint32_t *) va); + break; + case 8: + *((uint64_t *) buf) = *((uint64_t *) va); + break; + default: + memcpy(buf, va, len); + } + return 0; +} + +static int acrngt_write_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + void *va = NULL; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("write gpa 0x%lx with len 0x%lx\n", gpa, len); + + va = map_guest_phys(info->vm_id, gpa, len); + if (!va) { + gvt_err("GVT: can not write gpa = 0x%lx!!!\n", gpa); + return -EFAULT; + } + + switch (len) + { + case 1: + *((uint8_t *) va) = *((uint8_t *) buf); + break; + case 2: + *((uint16_t *) va) = *((uint16_t *) buf); + break; + case 4: + *((uint32_t *) va) = *((uint32_t *) buf); + break; + case 8: + *((uint64_t *) va) = *((uint64_t *) buf); + break; + default: + memcpy(va, buf, len); + } + return 0; +} + +static unsigned long acrngt_gfn_to_pfn(unsigned long handle, unsigned long gfn) +{ + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("convert gfn 0x%lx to pfn\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + return hpa >> PAGE_SHIFT; +} + +static int acrngt_map_gfn_to_mfn(unsigned long handle, unsigned long gfn, + unsigned long mfn, unsigned int nr, bool map) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("map/unmap gfn 0x%lx to mfn 0x%lx with %u pages, map %d\n", + gfn, mfn, nr, map); + + if (map) + ret = set_mmio_map(info->vm_id, gfn << PAGE_SHIFT, + mfn << PAGE_SHIFT, nr << PAGE_SHIFT, + MEM_TYPE_UC, MEM_ACCESS_RWX); + else + ret = unset_mmio_map(info->vm_id, gfn << PAGE_SHIFT, + mfn << PAGE_SHIFT, nr << PAGE_SHIFT); + if (ret) + gvt_err("failed map/unmap gfn 0x%lx to mfn 0x%lx with %u pages," + " map %d\n", gfn, mfn, nr, map); + return ret; +} + +static int acrngt_set_trap_area(unsigned long handle, u64 start, + u64 end, bool map) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("set trap area, start 0x%llx, end 0x%llx, map %d\n", + start, end, map); + + if (map) + ret = acrn_ioreq_add_iorange(info->client, REQ_MMIO, + start, end); + else + ret = acrn_ioreq_del_iorange(info->client, REQ_MMIO, + start, end); + if (ret) + gvt_err("failed set trap, start 0x%llx, end 0x%llx, map %d\n", + start, end, map); + return ret; +} + +static int acrngt_set_pvmmio(unsigned long handle, u64 start, u64 end, bool map) +{ + int rc; + unsigned long mfn, shared_mfn; + unsigned long pfn = start >> PAGE_SHIFT; + u32 mmio_size_fn = acrngt_priv.gvt->device_info.mmio_size >> PAGE_SHIFT; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + + if (map) { + mfn = acrngt_virt_to_mfn(info->vgpu->mmio.vreg); + rc = acrngt_map_gfn_to_mfn(handle, pfn, mfn, mmio_size_fn, map); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + + /* map the shared page to guest */ + shared_mfn = acrngt_virt_to_mfn(info->vgpu->mmio.shared_page); + rc = acrngt_map_gfn_to_mfn(handle, pfn + mmio_size_fn, shared_mfn, 1, map); + if (rc) { + gvt_err("acrn-gvt: map shared page fail with ret %d\n", rc); + return rc; + } + + /* mmio access is trapped like memory write protection */ + rc = acrn_ioreq_add_iorange(info->client, REQ_WP, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", pfn); + return rc; + } + rc = update_memmap_attr(info->vm_id, pfn << PAGE_SHIFT, + mfn << PAGE_SHIFT, + mmio_size_fn << PAGE_SHIFT, + MEM_TYPE_WB, + (MEM_ACCESS_READ | MEM_ACCESS_EXEC)); + if (rc) + gvt_err("failed update_memmap_attr set for pfn 0x%lx\n", pfn); + + /* scratch reg access is trapped like mmio access, 1 page */ + rc = acrngt_map_gfn_to_mfn(handle, pfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), + mfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), 1, 0); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + rc = acrn_ioreq_add_iorange(info->client, REQ_MMIO, + (pfn << PAGE_SHIFT) + VGT_PVINFO_PAGE, + ((pfn + 1) << PAGE_SHIFT) + VGT_PVINFO_PAGE - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", + (pfn << PAGE_SHIFT) + VGT_PVINFO_PAGE); + return rc; + } + + /* shared page is not trapped, directly pass through */ + //todo: MEM_ATTR_ALL_WB or MEM_ATTR_ALL? + rc = update_memmap_attr(info->vm_id, + (pfn + mmio_size_fn) << PAGE_SHIFT, + shared_mfn << PAGE_SHIFT, + 0x1000, MEM_TYPE_WB, MEM_ACCESS_RWX); + if (rc) + gvt_err("failed update_memmap_attr set for gfn 0x%lx\n", + pfn + mmio_size_fn); + } else { + mfn = acrngt_virt_to_mfn(info->vgpu->mmio.vreg); + rc = acrngt_map_gfn_to_mfn(handle, pfn, mfn, mmio_size_fn, map); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + rc = acrn_ioreq_del_iorange(info->client, REQ_WP, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", pfn); + return rc; + } + rc = acrn_ioreq_add_iorange(info->client, REQ_MMIO, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_del_iorange for pfn 0x%lx\n", pfn); + return rc; + } + + /* unmap the shared page to guest */ + shared_mfn = acrngt_virt_to_mfn(info->vgpu->mmio.shared_page); + rc = acrngt_map_gfn_to_mfn(handle, pfn + mmio_size_fn, shared_mfn, 1, map); + if (rc) { + gvt_err("acrn-gvt: map shared page fail with ret %d\n", rc); + return rc; + } + } + return rc; +} + +static int acrn_pause_domain(unsigned long handle) +{ + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + if (info == NULL) + return 0; + + /* todo: should be implemented to workaroud hw bug */ + gvt_dbg_core("pause domain\n"); + return 0; +} + +static int acrn_unpause_domain(unsigned long handle) +{ + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + if (info == NULL) + return 0; + + /* todo: should be implemented to workaroud hw bug */ + gvt_dbg_core("unpause domain\n"); + return 0; +} + +static int acrngt_dom0_ready(void) +{ + char *env[] = {"GVT_DOM0_READY=1", NULL}; + if(!acrn_gvt_ctrl_kobj) + return 0; + gvt_dbg_core("acrngt: Dom 0 ready to accept Dom U guests\n"); + return kobject_uevent_env(acrn_gvt_ctrl_kobj, KOBJ_ADD, env); +} + +struct intel_gvt_mpt acrn_gvt_mpt = { + //.detect_host = acrngt_detect_host, + .host_init = acrngt_host_init, + .host_exit = acrngt_host_exit, + .attach_vgpu = acrngt_attach_vgpu, + .detach_vgpu = acrngt_detach_vgpu, + .inject_msi = acrngt_inject_msi, + .from_virt_to_mfn = acrngt_virt_to_mfn, + .set_wp_page = acrngt_set_wp_page, + .unset_wp_page = acrngt_unset_wp_page, + .read_gpa = acrngt_read_gpa, + .write_gpa = acrngt_write_gpa, + .gfn_to_mfn = acrngt_gfn_to_pfn, + .map_gfn_to_mfn = acrngt_map_gfn_to_mfn, + .set_trap_area = acrngt_set_trap_area, + .set_pvmmio = acrngt_set_pvmmio, + .pause_domain = acrn_pause_domain, + .unpause_domain= acrn_unpause_domain, + .dom0_ready = acrngt_dom0_ready, +}; +EXPORT_SYMBOL_GPL(acrn_gvt_mpt); + +static int __init acrngt_init(void) +{ + /* todo: to support need implment check_gfx_iommu_enabled func */ + gvt_dbg_core("acrngt loaded\n"); + return 0; +} + +static void __exit acrngt_exit(void) +{ + gvt_dbg_core("acrngt: unloaded\n"); +} + +module_init(acrngt_init); +module_exit(acrngt_exit); diff --git a/drivers/gpu/drm/i915/gvt/acrn-gvt.h b/drivers/gpu/drm/i915/gvt/acrn-gvt.h new file mode 100644 index 000000000000..0799df2ec557 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/acrn-gvt.h @@ -0,0 +1,81 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef INTEL_GVT_ACRNGT_H +#define INTEL_GVT_ACRNGT_H + +extern struct intel_gvt *gvt_instance; +extern const struct intel_gvt_ops *acrn_intel_gvt_ops; + +#define MAX_HVM_VCPUS_SUPPORTED 127 + +#define VMEM_1MB (1ULL << 20) /* the size of the first 1MB */ + +typedef uint16_t domid_t; + +/* + * acrngt_hvm_dev is a wrapper of a vGPU instance which is reprensented by the + * intel_vgpu structure. Under acrn hypervisor, the acrngt_instance stands for a + * HVM device, which the related resource. + */ +struct acrngt_hvm_dev { + domid_t vm_id; + struct kobject kobj; + struct intel_vgpu *vgpu; + + int nr_vcpu; + struct task_struct *emulation_thread; + + int client; + struct vhm_request *req_buf; + struct vhm_vm *vm; +}; + +struct acrngt_hvm_params { + int vm_id; + int aperture_sz; /* in MB */ + int gm_sz; /* in MB */ + int fence_sz; +}; + +/* + * struct gvt_acrngt should be a single instance to share global + * information for ACRNGT module. + */ +#define GVT_MAX_VGPU_INSTANCE 15 +struct gvt_acrngt { + struct intel_gvt *gvt; + struct intel_vgpu *vgpus[GVT_MAX_VGPU_INSTANCE]; +}; + +static ssize_t acrngt_sysfs_instance_manage(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count); +static ssize_t acrngt_sysfs_vgpu_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf); + +struct intel_vgpu *acrngt_instance_create(domid_t vm_id, + struct intel_vgpu_type *type); +void acrngt_instance_destroy(struct intel_vgpu *vgpu); + +#endif diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d4726a3358a4..fd6a8ce70fa2 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -173,6 +173,8 @@ struct decode_info { #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3) #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4) +#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5) + #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0) #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2) #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3) @@ -811,7 +813,7 @@ static bool is_shadowed_mmio(unsigned int offset) return ret; } -static inline bool is_force_nonpriv_mmio(unsigned int offset) +bool is_force_nonpriv_mmio(unsigned int offset) { return (offset >= 0x24d0 && offset < 0x2500); } @@ -863,6 +865,15 @@ static int cmd_reg_handler(struct parser_exec_state *s, patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); } + /* Re-direct the non-context MMIO access to VGT_SCRATCH_REG, it + * has no functional impact to HW. + */ + if (!strcmp(cmd, "lri") || !strcmp(cmd, "lrr-dst") + || !strcmp(cmd, "lrm") || !strcmp(cmd, "pipe_ctrl")) { + if (intel_gvt_mmio_is_non_context(gvt, offset)) + patch_value(s, cmd_ptr(s, index), VGT_SCRATCH_REG); + } + /* TODO: Update the global mask if this MMIO is a masked-MMIO */ intel_gvt_mmio_set_cmd_accessed(gvt, offset); return 0; @@ -899,6 +910,34 @@ static int cmd_handler_lri(struct parser_exec_state *s) if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); + + if (s->vgpu->entire_nonctxmmio_checked + && intel_gvt_mmio_is_non_context(s->vgpu->gvt, cmd_reg(s, i))) { + int offset = cmd_reg(s, i); + int value = cmd_val(s, i + 1); + u32 *host_cache = s->vgpu->gvt->mmio.mmio_host_cache; + + if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) { + u32 mask = value >> 16; + + vgpu_vreg(s->vgpu, offset) = + (vgpu_vreg(s->vgpu, offset) & ~mask) + | (value & mask); + } else { + vgpu_vreg(s->vgpu, offset) = value; + } + + if (host_cache[cmd_reg(s, i) >> 2] != + vgpu_vreg(s->vgpu, offset)) { + + gvt_err("vgpu%d unexpected non-context MMIOs" + "access by cmd 0x%x:0x%x,0x%x\n", + s->vgpu->id, + (u32)cmd_reg(s, i), + cmd_val(s, i + 1), + host_cache[cmd_reg(s, i) >> 2]); + } + } } return ret; } @@ -1050,6 +1089,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) { set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, s->workload->pending_events); + patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } @@ -1209,7 +1249,8 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, if (!info->async_flip) return 0; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_BROXTON(dev_priv)) { stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; @@ -1237,7 +1278,8 @@ static int gen8_update_plane_mmio_from_mi_display_flip( set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_BROXTON(dev_priv)) { set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), @@ -1261,7 +1303,8 @@ static int decode_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv)) return gen8_decode_mi_display_flip(s, info); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_BROXTON(dev_priv)) return skl_decode_mi_display_flip(s, info); return -ENODEV; @@ -1274,6 +1317,7 @@ static int check_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) + || IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) return gen8_check_mi_display_flip(s, info); return -ENODEV; @@ -1287,6 +1331,7 @@ static int update_plane_mmio_from_mi_display_flip( if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) + || IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) return gen8_update_plane_mmio_from_mi_display_flip(s, info); return -ENODEV; @@ -1568,6 +1613,7 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) struct intel_gvt *gvt = s->vgpu->gvt; if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { /* BDW decides privilege based on address space */ if (cmd_val(s, 0) & (1 << 8)) @@ -2278,6 +2324,9 @@ static struct cmd_info cmd_info[] = { {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, + {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16, + NULL}, + {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, @@ -2599,11 +2648,39 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return ret; } +#define GEN8_PDPES 4 +int gvt_emit_pdps(struct intel_vgpu_workload *workload) +{ + const int num_cmds = GEN8_PDPES * 2; + struct drm_i915_gem_request *req = workload->req; + struct intel_engine_cs *engine = req->engine; + u32 *cs; + u32 *pdps = (u32 *)workload->shadow_mm->shadow_page_table; + int i; + + cs = intel_ring_begin(req, num_cmds * 2 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(num_cmds); + for (i = 0; i < GEN8_PDPES; i++) { + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); + *cs++ = pdps[i * 2]; + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); + *cs++ = pdps[i * 2 + 1]; + } + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + return 0; +} + static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; unsigned long gma_head, gma_tail, gma_top, guest_rb_size; - u32 *cs; + void *shadow_ring_buffer_va; + int ring_id = workload->ring_id; int ret; guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); @@ -2616,34 +2693,42 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_tail = workload->rb_start + workload->rb_tail; gma_top = workload->rb_start + guest_rb_size; - /* allocate shadow ring buffer */ - cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); - if (IS_ERR(cs)) - return PTR_ERR(cs); + if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) { + void *va = vgpu->reserve_ring_buffer_va[ring_id]; + /* realloc the new ring buffer if needed */ + vgpu->reserve_ring_buffer_va[ring_id] = + krealloc(va, workload->rb_len, GFP_KERNEL); + if (!vgpu->reserve_ring_buffer_va[ring_id]) { + gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + return -ENOMEM; + } + vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len; + } + + shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id]; /* get shadow ring buffer va */ - workload->shadow_ring_buffer_va = cs; + workload->shadow_ring_buffer_va = shadow_ring_buffer_va; /* head > tail --> copy head <-> top */ if (gma_head > gma_tail) { ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, - gma_head, gma_top, cs); + gma_head, gma_top, shadow_ring_buffer_va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } - cs += ret / sizeof(u32); + shadow_ring_buffer_va += ret; gma_head = workload->rb_start; } /* copy head or start <-> tail */ - ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs); + ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, + shadow_ring_buffer_va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } - cs += ret / sizeof(u32); - intel_ring_advance(workload->req, cs); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index 286703643002..1356803a0586 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -46,4 +46,5 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); +int gvt_emit_pdps(struct intel_vgpu_workload *workload); #endif diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 3c318439a659..71279fad904e 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -169,6 +169,23 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + if (IS_BROXTON(dev_priv)) { + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA | + BXT_DE_PORT_HP_DDIB | + BXT_DE_PORT_HP_DDIC); + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIA; + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIB; + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIC; + return; + } + vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); @@ -279,15 +296,19 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) port->dpcd = NULL; } -static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, - int type, unsigned int resolution) +static int setup_virtual_monitor(struct intel_vgpu *vgpu, int port_num, + int type, unsigned int resolution, void *edid, bool is_dp) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); + int valid_extensions = 1; if (WARN_ON(resolution >= GVT_EDID_NUM)) return -EINVAL; - port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); + if (edid) + valid_extensions += ((struct edid *)edid)->extensions; + port->edid = kzalloc(sizeof(*(port->edid)) + + valid_extensions * EDID_SIZE, GFP_KERNEL); if (!port->edid) return -ENOMEM; @@ -297,13 +318,24 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, return -ENOMEM; } - memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], - EDID_SIZE); + if (edid) + memcpy(port->edid->edid_block, edid, + EDID_SIZE * valid_extensions); + else + memcpy(port->edid->edid_block, + virtual_dp_monitor_edid[resolution], + EDID_SIZE); + port->edid->data_valid = true; - memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); - port->dpcd->data_valid = true; - port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + if (is_dp) { + memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); + port->dpcd->data_valid = true; + + + port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + } + port->type = type; emulate_monitor_status_change(vgpu); @@ -403,6 +435,69 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) emulate_vblank(vgpu); } +static void intel_gvt_vblank_work(struct work_struct *w) +{ + struct intel_gvt_pipe_info *pipe_info = container_of(w, + struct intel_gvt_pipe_info, vblank_work); + struct intel_gvt *gvt = pipe_info->gvt; + struct intel_vgpu *vgpu; + int id; + + mutex_lock(&gvt->lock); + for_each_active_vgpu(gvt, vgpu, id) + emulate_vblank_on_pipe(vgpu, pipe_info->pipe_num); + mutex_unlock(&gvt->lock); +} + +void intel_gvt_init_pipe_info(struct intel_gvt *gvt) +{ + int pipe; + + for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { + gvt->pipe_info[pipe].pipe_num = pipe; + gvt->pipe_info[pipe].gvt = gvt; + INIT_WORK(&gvt->pipe_info[pipe].vblank_work, + intel_gvt_vblank_work); + } +} + +int bxt_setup_virtual_monitors(struct intel_vgpu *vgpu) +{ + struct intel_connector *connector = NULL; + struct drm_connector_list_iter conn_iter; + int pipe = 0; + int ret = 0; + int port = 0; + + drm_connector_list_iter_begin(&vgpu->gvt->dev_priv->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->encoder->get_hw_state(connector->encoder, &pipe) + && connector->detect_edid) { + /* Get (Dom0) port associated with current pipe. */ + port = enc_to_dig_port( + &(connector->encoder->base))->port; + ret = setup_virtual_monitor(vgpu, port, + GVT_HDMI_A + port, 0, + connector->detect_edid, false); + if (ret) + return ret; + } + } + return 0; +} + +void bxt_clean_virtual_monitors(struct intel_vgpu *vgpu) +{ + int port = 0; + + for (port = PORT_A; port < INTEL_GVT_MAX_PORT; port++) { + struct intel_vgpu_port *p = intel_vgpu_port(vgpu, port); + + if (p->edid) + clean_virtual_dp_monitor(vgpu, port); + } +} + /** * intel_vgpu_clean_display - clean vGPU virtual display emulation * @vgpu: a vGPU @@ -414,7 +509,9 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_BROXTON(dev_priv)) + bxt_clean_virtual_monitors(vgpu); + else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); @@ -436,12 +533,14 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) intel_vgpu_init_i2c_edid(vgpu); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, - resolution); + if (IS_BROXTON(dev_priv)) + return bxt_setup_virtual_monitors(vgpu); + else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + return setup_virtual_monitor(vgpu, + PORT_D, GVT_DP_D, resolution, NULL, true); else - return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, - resolution); + return setup_virtual_monitor(vgpu, + PORT_B, GVT_DP_B, resolution, NULL, true); } /** diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index d73de22102e2..3b8c52cd61b2 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -140,6 +140,7 @@ enum intel_vgpu_port_type { GVT_DP_B, GVT_DP_C, GVT_DP_D, + GVT_HDMI_A, GVT_HDMI_B, GVT_HDMI_C, GVT_HDMI_D, diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 42cd09ec63fa..af06f6374163 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -55,10 +55,6 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); return 0; } - if (edid->current_edid_read >= EDID_SIZE) { - gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); - return 0; - } if (!edid->edid_available) { gvt_vgpu_err("Reading EDID but EDID is not available!\n"); @@ -77,19 +73,30 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) return chr; } -static inline int get_port_from_gmbus0(u32 gmbus0) +static inline int get_port_from_gmbus0(struct intel_vgpu *vgpu, u32 gmbus0) { + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port = -EINVAL; - if (port_select == 2) - port = PORT_E; - else if (port_select == 4) - port = PORT_C; - else if (port_select == 5) - port = PORT_B; - else if (port_select == 6) - port = PORT_D; + if (IS_BROXTON(dev_priv)) { + if (port_select == 1) + port = PORT_B; + else if (port_select == 2) + port = PORT_C; + else if (port_select == 3) + port = PORT_A; + } else { + if (port_select == 2) + port = PORT_E; + else if (port_select == 4) + port = PORT_C; + else if (port_select == 5) + port = PORT_B; + else if (port_select == 6) + port = PORT_D; + } + return port; } @@ -116,7 +123,7 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, if (pin_select == 0) return 0; - port = get_port_from_gmbus0(pin_select); + port = get_port_from_gmbus0(vgpu, pin_select); if (WARN_ON(port < 0)) return 0; @@ -434,6 +441,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, u32 value = *(u32 *)p_data; int aux_data_for_write = 0; int reg = get_aux_ch_reg(offset); + uint8_t rxbuf[20]; + size_t rxsize; if (reg != AUX_CH_CTL) { vgpu_vreg(vgpu, offset) = value; @@ -441,6 +450,9 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } msg_length = AUX_CTL_MSG_LENGTH(value); + for (rxsize = 0; rxsize < msg_length; rxsize += 4) + intel_dp_unpack_aux(vgpu_vreg(vgpu, offset + 4 + rxsize), + rxbuf + rxsize, msg_length - rxsize); // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); addr = (msg >> 8) & 0xffff; @@ -480,12 +492,13 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } } } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) { - /* TODO - * We only support EDID reading from I2C_over_AUX. And - * we do not expect the index mode to be used. Right now - * the WRITE operation is ignored. It is good enough to - * support the gfx driver to do EDID access. + /* We only support EDID reading from I2C_over_AUX. + * But if EDID has extension blocks, we use this write + * operation to set block starting address */ + if (addr == EDID_ADDR) { + i2c_edid->current_edid_read = rxbuf[4]; + } } else { if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ)) return; diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index f6dfc8b795ec..11a75d69062d 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -48,7 +48,7 @@ struct intel_vgpu_edid_data { bool data_valid; - unsigned char edid_block[EDID_SIZE]; + unsigned char edid_block[0]; }; enum gmbus_cycle_type { diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index e5320b4eb698..e7638444ebed 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -46,6 +46,7 @@ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) +bool gvt_shadow_wa_ctx = false; static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); static int context_switch_events[] = { @@ -368,7 +369,7 @@ static void free_workload(struct intel_vgpu_workload *workload) #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) -static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) +static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; struct intel_shadow_bb_entry *entry_obj; @@ -379,7 +380,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); if (IS_ERR(vma)) { - return; + return PTR_ERR(vma); } /* FIXME: we are not tracking our pinned VMA leaving it @@ -392,6 +393,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) if (gmadr_bytes == 8) entry_obj->bb_start_cmd_va[2] = 0; } + return 0; } static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) @@ -420,7 +422,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } -static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { struct i915_vma *vma; unsigned char *per_ctx_va = @@ -428,12 +430,12 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx->indirect_ctx.size; if (wa_ctx->indirect_ctx.size == 0) - return; + return 0; vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, CACHELINE_BYTES, 0); if (IS_ERR(vma)) { - return; + return PTR_ERR(vma); } /* FIXME: we are not tracking our pinned VMA leaving it @@ -447,26 +449,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) memset(per_ctx_va, 0, CACHELINE_BYTES); update_wa_ctx_2_shadow_ctx(wa_ctx); -} - -static int prepare_execlist_workload(struct intel_vgpu_workload *workload) -{ - struct intel_vgpu *vgpu = workload->vgpu; - struct execlist_ctx_descriptor_format ctx[2]; - int ring_id = workload->ring_id; - - intel_vgpu_pin_mm(workload->shadow_mm); - intel_vgpu_sync_oos_pages(workload->vgpu); - intel_vgpu_flush_post_shadow(workload->vgpu); - prepare_shadow_batch_buffer(workload); - prepare_shadow_wa_ctx(&workload->wa_ctx); - if (!workload->emulate_schedule_in) - return 0; - - ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); - ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); - - return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); + return 0; } static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) @@ -489,13 +472,64 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) } } -static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +static int prepare_execlist_workload(struct intel_vgpu_workload *workload) { - if (!wa_ctx->indirect_ctx.obj) - return; + struct intel_vgpu *vgpu = workload->vgpu; + struct execlist_ctx_descriptor_format ctx[2]; + int ring_id = workload->ring_id; + int ret; + + ret = intel_vgpu_pin_mm(workload->shadow_mm); + if (ret) { + gvt_vgpu_err("fail to vgpu pin mm\n"); + goto out; + } + + ret = intel_vgpu_sync_oos_pages(workload->vgpu); + if (ret) { + gvt_vgpu_err("fail to vgpu sync oos pages\n"); + goto err_unpin_mm; + } + + ret = intel_vgpu_flush_post_shadow(workload->vgpu); + if (ret) { + gvt_vgpu_err("fail to flush post shadow\n"); + goto err_unpin_mm; + } + + ret = prepare_shadow_batch_buffer(workload); + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); + goto err_unpin_mm; + } - i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); - i915_gem_object_put(wa_ctx->indirect_ctx.obj); + if (gvt_shadow_wa_ctx) + ret = prepare_shadow_wa_ctx(&workload->wa_ctx); + + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); + goto err_shadow_batch; + } + + if (!workload->emulate_schedule_in) + return 0; + + ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); + ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); + + ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); + if (!ret) + goto out; + else + gvt_vgpu_err("fail to emulate execlist schedule in\n"); + + release_shadow_wa_ctx(&workload->wa_ctx); +err_shadow_batch: + release_shadow_batch_buffer(workload); +err_unpin_mm: + intel_vgpu_unpin_mm(workload->shadow_mm); +out: + return ret; } static int complete_execlist_workload(struct intel_vgpu_workload *workload) @@ -511,8 +545,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); + if(!workload->status || gvt_shadow_wa_ctx) { + release_shadow_batch_buffer(workload); + release_shadow_wa_ctx(&workload->wa_ctx); + } if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { /* if workload->status is not successful means HW GPU @@ -738,7 +774,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) { struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; struct execlist_ctx_descriptor_format desc[2]; - int i, ret; + int i, ret = 0; desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); @@ -757,6 +793,9 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) } } + mutex_unlock(&vgpu->gvt->lock); + mutex_lock(&vgpu->gvt->sched_lock); + mutex_lock(&vgpu->gvt->lock); /* submit workload */ for (i = 0; i < ARRAY_SIZE(desc); i++) { if (!desc[i].valid) @@ -764,11 +803,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ret = submit_context(vgpu, ring_id, &desc[i], i == 0); if (ret) { gvt_vgpu_err("failed to submit desc %d\n", i); - return ret; + goto out; } } - return 0; +out: + mutex_unlock(&vgpu->gvt->sched_lock); + return ret; inv_desc: gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n", @@ -819,10 +860,21 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) { + enum intel_engine_id i; + struct intel_engine_cs *engine; + clean_workloads(vgpu, ALL_ENGINES); kmem_cache_destroy(vgpu->workloads); + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + kfree(vgpu->reserve_ring_buffer_va[i]); + vgpu->reserve_ring_buffer_va[i] = NULL; + vgpu->reserve_ring_buffer_size[i] = 0; + } + } +#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8) int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) { enum intel_engine_id i; @@ -842,7 +894,26 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) if (!vgpu->workloads) return -ENOMEM; + /* each ring has a shadow ring buffer until vgpu destroyed */ + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu->reserve_ring_buffer_va[i] = + kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL); + if (!vgpu->reserve_ring_buffer_va[i]) { + gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + goto out; + } + vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; + } return 0; +out: + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + if (vgpu->reserve_ring_buffer_size[i]) { + kfree(vgpu->reserve_ring_buffer_va[i]); + vgpu->reserve_ring_buffer_va[i] = NULL; + vgpu->reserve_ring_buffer_size[i] = 0; + } + } + return -ENOMEM; } void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c new file mode 100644 index 000000000000..020b447f801e --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -0,0 +1,422 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "../i915_drv.h" +#include +#include + +#define FORMAT_NUM 16 +struct pixel_format { + int drm_format; /* Pixel format in DRM definition */ + int bpp; /* Bits per pixel, 0 indicates invalid */ + char *desc; /* The description */ +}; + +/* non-supported format has bpp default to 0 */ +static struct pixel_format primary_pixel_formats[FORMAT_NUM] = { + [0b0010] = {DRM_FORMAT_C8, 8, "8-bit Indexed"}, + [0b0101] = {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"}, + [0b0110] = {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"}, + [0b1000] = {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"}, + [0b1010] = {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"}, + [0b1100] = {DRM_FORMAT_XRGB161616_VGT, 64, + "64-bit RGBX Floating Point(16:16:16:16 MSB-X:B:G:R)"}, + [0b1110] = {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"}, +}; + +/* non-supported format has bpp default to 0 */ +static struct pixel_format skl_pixel_formats[] = { + {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"}, + {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"}, + {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"}, + {DRM_FORMAT_VYUY, 16, "16-bit packed VYUY (8:8:8:8 MSB-Y2:U:Y1:V)"}, + + {DRM_FORMAT_C8, 8, "8-bit Indexed"}, + {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"}, + {DRM_FORMAT_ABGR8888, 32, "32-bit RGBA (8:8:8:8 MSB-A:B:G:R)"}, + {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"}, + + {DRM_FORMAT_ARGB8888, 32, "32-bit BGRA (8:8:8:8 MSB-A:R:G:B)"}, + {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"}, + {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"}, + {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"}, + + {DRM_FORMAT_XRGB161616_VGT, 64, "64-bit XRGB (16:16:16:16 MSB-X:R:G:B)"}, + {DRM_FORMAT_XBGR161616_VGT, 64, "64-bit XBGR (16:16:16:16 MSB-X:B:G:R)"}, + + /* non-supported format has bpp default to 0 */ + {0, 0, NULL}, +}; + +static int skl_format_to_drm(int format, bool rgb_order, bool alpha, int yuv_order) +{ + int skl_pixel_formats_index = 14; + + switch (format) { + case PLANE_CTL_FORMAT_INDEXED: + skl_pixel_formats_index = 4; + break; + case PLANE_CTL_FORMAT_RGB_565: + skl_pixel_formats_index = 5; + break; + case PLANE_CTL_FORMAT_XRGB_8888: + if (rgb_order) + skl_pixel_formats_index = alpha ? 6 : 7; + else + skl_pixel_formats_index = alpha ? 8 : 9; + break; + case PLANE_CTL_FORMAT_XRGB_2101010: + skl_pixel_formats_index = rgb_order ? 10 : 11; + break; + + case PLANE_CTL_FORMAT_XRGB_16161616F: + skl_pixel_formats_index = rgb_order ? 12 : 13; + break; + + case PLANE_CTL_FORMAT_YUV422: + skl_pixel_formats_index = yuv_order >> 16; + if (skl_pixel_formats_index > 3) + return -EINVAL; + break; + + default: + break; + } + + return skl_pixel_formats_index; +} + +static u32 gvt_get_stride(struct intel_vgpu *vgpu, int pipe, u32 tiled, + int stride_mask, int bpp) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + u32 stride_reg = vgpu_vreg(vgpu, DSPSTRIDE(pipe)) & stride_mask; + u32 stride = stride_reg; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { + switch (tiled) { + case PLANE_CTL_TILED_LINEAR: + stride = stride_reg * 64; + break; + case PLANE_CTL_TILED_X: + stride = stride_reg * 512; + break; + case PLANE_CTL_TILED_Y: + stride = stride_reg * 128; + break; + case PLANE_CTL_TILED_YF: + if (bpp == 8) + stride = stride_reg * 64; + else if (bpp == 16 || bpp == 32 || bpp == 64) + stride = stride_reg * 128; + else + DRM_DEBUG_KMS("skl: unsupported bpp:%d\n", bpp); + break; + default: + DRM_DEBUG_KMS("skl: unsupported tile format:%x\n", tiled); + } + } + + return stride; +} + +static int gvt_decode_primary_plane_format(struct intel_vgpu *vgpu, + int pipe, struct gvt_primary_plane_format *plane) +{ + u32 val, fmt; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + val = vgpu_vreg(vgpu, DSPCNTR(pipe)); + plane->enabled = !!(val & DISPLAY_PLANE_ENABLE); + if (!plane->enabled) + return 0; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { + plane->tiled = (val & PLANE_CTL_TILED_MASK) >> _PLANE_CTL_TILED_SHIFT; + fmt = skl_format_to_drm( + val & PLANE_CTL_FORMAT_MASK, + val & PLANE_CTL_ORDER_RGBX, + val & PLANE_CTL_ALPHA_MASK, + val & PLANE_CTL_YUV422_ORDER_MASK); + plane->bpp = skl_pixel_formats[fmt].bpp; + plane->drm_format = skl_pixel_formats[fmt].drm_format; + } else { + plane->tiled = !!(val & DISPPLANE_TILED); + fmt = (val & DISPPLANE_PIXFORMAT_MASK) >> _PRI_PLANE_FMT_SHIFT; + plane->bpp = primary_pixel_formats[fmt].bpp; + plane->drm_format = primary_pixel_formats[fmt].drm_format; + } + + if (((IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && !skl_pixel_formats[fmt].bpp) + || (!(IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && !primary_pixel_formats[fmt].bpp)) { + gvt_err("Non-supported pixel format (0x%x)\n", fmt); + return -EINVAL; + } + + plane->hw_format = fmt; + + plane->base = vgpu_vreg(vgpu, DSPSURF(pipe)) & GTT_PAGE_MASK; + + plane->stride = gvt_get_stride(vgpu, pipe, (plane->tiled << 10), + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))? + (_PRI_PLANE_STRIDE_MASK >> 6) + : _PRI_PLANE_STRIDE_MASK, plane->bpp); + + plane->width = (vgpu_vreg(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> + _PIPE_H_SRCSZ_SHIFT; + plane->width += 1; + plane->height = (vgpu_vreg(vgpu, PIPESRC(pipe)) & + _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; + plane->height += 1; /* raw height is one minus the real value */ + + val = vgpu_vreg(vgpu, DSPTILEOFF(pipe)); + plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >> + _PRI_PLANE_X_OFF_SHIFT; + plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >> + _PRI_PLANE_Y_OFF_SHIFT; + + return 0; +} + +#define CURSOR_MODE_NUM (1 << 6) +struct cursor_mode_format { + int drm_format; /* Pixel format in DRM definition */ + u8 bpp; /* Bits per pixel; 0 indicates invalid */ + u32 width; /* In pixel */ + u32 height; /* In lines */ + char *desc; /* The description */ +}; + +/* non-supported format has bpp default to 0 */ +static struct cursor_mode_format cursor_pixel_formats[CURSOR_MODE_NUM] = { + [0b100010] = {DRM_FORMAT_ARGB8888, 32, 128, 128,"128x128 32bpp ARGB"}, + [0b100011] = {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"}, + [0b100111] = {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"}, + [0b000111] = {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},//actually inverted... figure this out later +}; + +static int gvt_decode_cursor_plane_format(struct intel_vgpu *vgpu, + int pipe, struct gvt_cursor_plane_format *plane) +{ + u32 val, mode; + u32 alpha_plane, alpha_force; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + val = vgpu_vreg(vgpu, CURCNTR(pipe)); + mode = val & CURSOR_MODE; + plane->enabled = (mode != CURSOR_MODE_DISABLE); + if (!plane->enabled) + return 0; + + if (!cursor_pixel_formats[mode].bpp) { + gvt_err("Non-supported cursor mode (0x%x)\n", mode); + return -EINVAL; + } + plane->mode = mode; + plane->bpp = cursor_pixel_formats[mode].bpp; + plane->drm_format = cursor_pixel_formats[mode].drm_format; + plane->width = cursor_pixel_formats[mode].width; + plane->height = cursor_pixel_formats[mode].height; + + alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >> + _CURSOR_ALPHA_PLANE_SHIFT; + alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >> + _CURSOR_ALPHA_FORCE_SHIFT; + if (alpha_plane || alpha_force) + gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", + alpha_plane, alpha_force); + + plane->base = vgpu_vreg(vgpu, CURBASE(pipe)) & GTT_PAGE_MASK; + + val = vgpu_vreg(vgpu, CURPOS(pipe)); + plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; + plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; + plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; + plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT; + + return 0; +} + +#define FORMAT_NUM_SRRITE (1 << 3) + +static struct pixel_format sprite_pixel_formats[FORMAT_NUM_SRRITE] = { + [0b000] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"}, + [0b001] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"}, + [0b010] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"}, + [0b011] = {DRM_FORMAT_XRGB161616_VGT, 64, + "RGB 64-bit 16:16:16:16 Floating Point"}, + [0b100] = {DRM_FORMAT_AYUV, 32, "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"}, +}; + +static int gvt_decode_sprite_plane_format(struct intel_vgpu *vgpu, + int pipe, struct gvt_sprite_plane_format *plane) +{ + u32 val, fmt; + u32 width; + u32 color_order, yuv_order; + int drm_format; + + val = vgpu_vreg(vgpu, SPRCTL(pipe)); + plane->enabled = !!(val & SPRITE_ENABLE); + if (!plane->enabled) + return 0; + + plane->tiled = !!(val & SPRITE_TILED); + color_order = !!(val & SPRITE_RGB_ORDER_RGBX); + yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >> + _SPRITE_YUV_ORDER_SHIFT; + + fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT; + if (!sprite_pixel_formats[fmt].bpp) { + gvt_err("Non-supported pixel format (0x%x)\n", fmt); + return -EINVAL; + } + plane->hw_format = fmt; + plane->bpp = sprite_pixel_formats[fmt].bpp; + drm_format = sprite_pixel_formats[fmt].drm_format; + + /* Order of RGB values in an RGBxxx buffer may be ordered RGB or + * BGR depending on the state of the color_order field + */ + if (!color_order) { + if (drm_format == DRM_FORMAT_XRGB2101010) + drm_format = DRM_FORMAT_XBGR2101010; + else if (drm_format == DRM_FORMAT_XRGB8888) + drm_format = DRM_FORMAT_XBGR8888; + } + + if (drm_format == DRM_FORMAT_YUV422) { + switch (yuv_order){ + case 0: + drm_format = DRM_FORMAT_YUYV; + break; + case 1: + drm_format = DRM_FORMAT_UYVY; + break; + case 2: + drm_format = DRM_FORMAT_YVYU; + break; + case 3: + drm_format = DRM_FORMAT_VYUY; + break; + default: + /* yuv_order has only 2 bits */ + BUG(); + break; + } + } + + plane->drm_format = drm_format; + + plane->base = vgpu_vreg(vgpu, SPRSURF(pipe)) & GTT_PAGE_MASK; + plane->width = vgpu_vreg(vgpu, SPRSTRIDE(pipe)) & + _SPRITE_STRIDE_MASK; + plane->width /= plane->bpp / 8; /* raw width in bytes */ + + val = vgpu_vreg(vgpu, SPRSIZE(pipe)); + plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >> + _SPRITE_SIZE_HEIGHT_SHIFT; + width = (val & _SPRITE_SIZE_WIDTH_MASK) >> _SPRITE_SIZE_WIDTH_SHIFT; + plane->height += 1; /* raw height is one minus the real value */ + width += 1; /* raw width is one minus the real value */ + if (plane->width != width) + gvt_dbg_core("sprite_plane: plane->width=%d, width=%d\n", + plane->width, width); + + val = vgpu_vreg(vgpu, SPRPOS(pipe)); + plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT; + plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT; + + val = vgpu_vreg(vgpu, SPROFFSET(pipe)); + plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >> + _SPRITE_OFFSET_START_X_SHIFT; + plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >> + _SPRITE_OFFSET_START_Y_SHIFT; + return 0; +} + +/** + * gvt_decode_fb_format - Decode framebuffer information from raw vMMIO + * @gvt: GVT device + * @vmid: guest domain ID + * @fb: frame buffer infomation of guest. + * This function is called for query frame buffer format, so that gl can + * display guest fb in Dom0 + * + * Returns: + * Zero on success, negative error code if failed. + */ +int gvt_decode_fb_format(struct intel_gvt *gvt, int id, struct gvt_fb_format *fb) +{ + int i; + struct intel_vgpu *vgpu = NULL; + int ret = 0; + struct drm_i915_private *dev_priv = gvt->dev_priv; + + if (!fb) + return -EINVAL; + + /* TODO: use fine-grained refcnt later */ + mutex_lock(&gvt->lock); + + for_each_active_vgpu(gvt, vgpu, i) + if (vgpu->id == id) + break; + + if (!vgpu) { + gvt_err("Invalid vgpu ID (%d)\n", id); + mutex_unlock(&gvt->lock); + return -ENODEV; + } + + for (i = 0; i < I915_MAX_PIPES; i++) { + struct gvt_pipe_format *pipe = &fb->pipes[i]; + u32 ddi_func_ctl = vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(i)); + + if (!(ddi_func_ctl & TRANS_DDI_FUNC_ENABLE)) { + pipe->ddi_port = DDI_PORT_NONE; + } else { + u32 port = (ddi_func_ctl & TRANS_DDI_PORT_MASK) >> + TRANS_DDI_PORT_SHIFT; + if (port <= DDI_PORT_E) + pipe->ddi_port = port; + else + pipe->ddi_port = DDI_PORT_NONE; + } + + ret |= gvt_decode_primary_plane_format(vgpu, i, &pipe->primary); + ret |= gvt_decode_sprite_plane_format(vgpu, i, &pipe->sprite); + ret |= gvt_decode_cursor_plane_format(vgpu, i, &pipe->cursor); + + if (ret) { + gvt_err("Decode format error for pipe(%d)\n", i); + ret = -EINVAL; + break; + } + } + + mutex_unlock(&gvt->lock); + + return ret; +} diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h new file mode 100644 index 000000000000..180d392a96d9 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -0,0 +1,155 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _GVT_FB_DECODER_H_ +#define _GVT_FB_DECODER_H_ + +#define _PLANE_CTL_FORMAT_SHIFT 24 +#define _PLANE_CTL_TILED_SHIFT 10 +#define _PIPE_V_SRCSZ_SHIFT 0 +#define _PIPE_V_SRCSZ_MASK (0xfff << _PIPE_V_SRCSZ_SHIFT) +#define _PIPE_H_SRCSZ_SHIFT 16 +#define _PIPE_H_SRCSZ_MASK (0x1fff << _PIPE_H_SRCSZ_SHIFT) + +#define _PRI_PLANE_FMT_SHIFT 26 +#define _PRI_PLANE_STRIDE_MASK (0x3ff << 6) +#define _PRI_PLANE_X_OFF_SHIFT 0 +#define _PRI_PLANE_X_OFF_MASK (0x1fff << _PRI_PLANE_X_OFF_SHIFT) +#define _PRI_PLANE_Y_OFF_SHIFT 16 +#define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT) + +#define _CURSOR_MODE 0x3f +#define _CURSOR_ALPHA_FORCE_SHIFT 8 +#define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT) +#define _CURSOR_ALPHA_PLANE_SHIFT 10 +#define _CURSOR_ALPHA_PLANE_MASK (0x3 << _CURSOR_ALPHA_PLANE_SHIFT) +#define _CURSOR_POS_X_SHIFT 0 +#define _CURSOR_POS_X_MASK (0x1fff << _CURSOR_POS_X_SHIFT) +#define _CURSOR_SIGN_X_SHIFT 15 +#define _CURSOR_SIGN_X_MASK (1 << _CURSOR_SIGN_X_SHIFT) +#define _CURSOR_POS_Y_SHIFT 16 +#define _CURSOR_POS_Y_MASK (0xfff << _CURSOR_POS_Y_SHIFT) +#define _CURSOR_SIGN_Y_SHIFT 31 +#define _CURSOR_SIGN_Y_MASK (1 << _CURSOR_SIGN_Y_SHIFT) + +#define _SPRITE_FMT_SHIFT 25 +#define _SPRITE_COLOR_ORDER_SHIFT 20 +#define _SPRITE_YUV_ORDER_SHIFT 16 +#define _SPRITE_STRIDE_SHIFT 6 +#define _SPRITE_STRIDE_MASK (0x1ff << _SPRITE_STRIDE_SHIFT) +#define _SPRITE_SIZE_WIDTH_SHIFT 0 +#define _SPRITE_SIZE_HEIGHT_SHIFT 16 +#define _SPRITE_SIZE_WIDTH_MASK (0x1fff << _SPRITE_SIZE_WIDTH_SHIFT) +#define _SPRITE_SIZE_HEIGHT_MASK (0xfff << _SPRITE_SIZE_HEIGHT_SHIFT) +#define _SPRITE_POS_X_SHIFT 0 +#define _SPRITE_POS_Y_SHIFT 16 +#define _SPRITE_POS_X_MASK (0x1fff << _SPRITE_POS_X_SHIFT) +#define _SPRITE_POS_Y_MASK (0xfff << _SPRITE_POS_Y_SHIFT) +#define _SPRITE_OFFSET_START_X_SHIFT 0 +#define _SPRITE_OFFSET_START_Y_SHIFT 16 +#define _SPRITE_OFFSET_START_X_MASK (0x1fff << _SPRITE_OFFSET_START_X_SHIFT) +#define _SPRITE_OFFSET_START_Y_MASK (0xfff << _SPRITE_OFFSET_START_Y_SHIFT) + +typedef enum { + FB_MODE_SET_START = 1, + FB_MODE_SET_END, + FB_DISPLAY_FLIP, +}gvt_fb_event_t; + +typedef enum { + DDI_PORT_NONE = 0, + DDI_PORT_B = 1, + DDI_PORT_C = 2, + DDI_PORT_D = 3, + DDI_PORT_E = 4 +} ddi_port_t; + +struct intel_gvt; + +struct gvt_fb_notify_msg { + unsigned vm_id; + unsigned pipe_id; /* id starting from 0 */ + unsigned plane_id; /* primary, cursor, or sprite */ +}; + +/* color space conversion and gamma correction are not included */ +struct gvt_primary_plane_format { + u8 enabled; /* plane is enabled */ + u8 tiled; /* X-tiled */ + u8 bpp; /* bits per pixel */ + u32 hw_format; /* format field in the PRI_CTL register */ + u32 drm_format; /* format in DRM definition */ + u32 base; /* framebuffer base in graphics memory */ + u32 x_offset; /* in pixels */ + u32 y_offset; /* in lines */ + u32 width; /* in pixels */ + u32 height; /* in lines */ + u32 stride; /* in bytes */ +}; + +struct gvt_sprite_plane_format { + u8 enabled; /* plane is enabled */ + u8 tiled; /* X-tiled */ + u8 bpp; /* bits per pixel */ + u32 hw_format; /* format field in the SPR_CTL register */ + u32 drm_format; /* format in DRM definition */ + u32 base; /* sprite base in graphics memory */ + u32 x_pos; /* in pixels */ + u32 y_pos; /* in lines */ + u32 x_offset; /* in pixels */ + u32 y_offset; /* in lines */ + u32 width; /* in pixels */ + u32 height; /* in lines */ +}; + +struct gvt_cursor_plane_format { + u8 enabled; + u8 mode; /* cursor mode select */ + u8 bpp; /* bits per pixel */ + u32 drm_format; /* format in DRM definition */ + u32 base; /* cursor base in graphics memory */ + u32 x_pos; /* in pixels */ + u32 y_pos; /* in lines */ + u8 x_sign; /* X Position Sign */ + u8 y_sign; /* Y Position Sign */ + u32 width; /* in pixels */ + u32 height; /* in lines */ + u32 x_hot; /* in pixels */ + u32 y_hot; /* in pixels */ +}; + +struct gvt_pipe_format { + struct gvt_primary_plane_format primary; + struct gvt_sprite_plane_format sprite; + struct gvt_cursor_plane_format cursor; + ddi_port_t ddi_port; /* the DDI port that the pipe is connected to */ +}; + +struct gvt_fb_format{ + struct gvt_pipe_format pipes[4]; +}; + +extern int gvt_decode_fb_format(struct intel_gvt *pdev, int vmid, + struct gvt_fb_format *fb); + +#endif diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index a26c1705430e..be2868867280 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -220,27 +220,27 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) void *mem; int ret; - path = kmalloc(PATH_MAX, GFP_KERNEL); - if (!path) - return -ENOMEM; - mem = kmalloc(info->cfg_space_size, GFP_KERNEL); - if (!mem) { - kfree(path); + if (!mem) return -ENOMEM; - } firmware->cfg_space = mem; mem = kmalloc(info->mmio_size, GFP_KERNEL); if (!mem) { - kfree(path); kfree(firmware->cfg_space); return -ENOMEM; } firmware->mmio = mem; + if (i915_modparams.disable_gvt_fw_loading) + goto expose_firmware; + + path = kmalloc(PATH_MAX, GFP_KERNEL); + if (!path) + return -ENOMEM; + sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); @@ -248,6 +248,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) gvt_dbg_core("request hw state firmware %s...\n", path); ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev); + kfree(path); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index e6dfc3331f4b..31f9cf2a4a46 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -33,12 +33,12 @@ * */ +#include #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" -static bool enable_out_of_sync = false; static int preallocated_oos_pages = 8192; /* @@ -284,6 +284,50 @@ static inline int gtt_get_entry64(void *pt, return 0; } +struct ggtt_entry64 { + void *pt; + struct intel_gvt_gtt_entry *e; + unsigned long index; + bool hypervisor_access; + unsigned long gpa; + struct intel_vgpu *vgpu; +}; + +#ifdef CONFIG_INTEL_IOMMU +static int gtt_get_entry64__cb(void *_arg) +{ + struct ggtt_entry64 *arg = _arg; + int ret = 0; + + gvt_pause_user_domains(arg->vgpu->gvt->dev_priv); + ret = gtt_get_entry64(arg->pt, arg->e, arg->index, + arg->hypervisor_access, arg->gpa, arg->vgpu); + gvt_unpause_user_domains(arg->vgpu->gvt->dev_priv); + + return ret; +} +#endif + +static inline int gtt_get_entry64__BKL(void *pt, + struct intel_gvt_gtt_entry *e, + unsigned long index, bool hypervisor_access, unsigned long gpa, + struct intel_vgpu *vgpu) +{ +#ifdef CONFIG_INTEL_IOMMU + struct ggtt_entry64 arg = { pt, e, index, hypervisor_access, gpa, vgpu }; + + if (!intel_iommu_gfx_mapped || !IS_BROXTON(vgpu->gvt->dev_priv) || + hypervisor_access || pt) { + return gtt_get_entry64(pt, e, index, hypervisor_access, gpa, vgpu); + } else { + stop_machine(gtt_get_entry64__cb, &arg, NULL); + return 0; + } +#else + return gtt_get_entry64(pt, e, index, hypervisor_access, gpa, vgpu); +#endif +} + static inline int gtt_set_entry64(void *pt, struct intel_gvt_gtt_entry *e, unsigned long index, bool hypervisor_access, unsigned long gpa, @@ -309,11 +353,46 @@ static inline int gtt_set_entry64(void *pt, return 0; } +#ifdef CONFIG_INTEL_IOMMU +static int gtt_set_entry64__cb(void *_arg) +{ + struct ggtt_entry64 *arg = _arg; + int ret; + + gvt_pause_user_domains(arg->vgpu->gvt->dev_priv); + ret = gtt_set_entry64(arg->pt, arg->e, arg->index, arg->hypervisor_access, + arg->gpa, arg->vgpu); + gvt_unpause_user_domains(arg->vgpu->gvt->dev_priv); + + return ret; +} +#endif + +static inline int gtt_set_entry64__BKL(void *pt, + struct intel_gvt_gtt_entry *e, + unsigned long index, bool hypervisor_access, unsigned long gpa, + struct intel_vgpu *vgpu) +{ +#ifdef CONFIG_INTEL_IOMMU + struct ggtt_entry64 arg = { pt, e, index, hypervisor_access, gpa, vgpu }; + + if (!intel_iommu_gfx_mapped || !IS_BROXTON(vgpu->gvt->dev_priv) || + hypervisor_access || pt) { + return gtt_set_entry64(pt, e, index, hypervisor_access, gpa, vgpu); + } else { + stop_machine(gtt_set_entry64__cb, &arg, NULL); + return 0; + } +#else + return gtt_set_entry64(pt, e, index, hypervisor_access, gpa, vgpu); +#endif +} + #define GTT_HAW 46 -#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30) -#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21) -#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12) +#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30) +#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21) +#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12) static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) { @@ -403,8 +482,8 @@ DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { - .get_entry = gtt_get_entry64, - .set_entry = gtt_set_entry64, + .get_entry = gtt_get_entry64__BKL, + .set_entry = gtt_set_entry64__BKL, .clear_present = gtt_entry_clear_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, @@ -1219,7 +1298,7 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) struct intel_vgpu_oos_page *oos_page; int ret; - if (!enable_out_of_sync) + if (!i915_modparams.enable_gvt_oos) return 0; list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { @@ -1281,7 +1360,7 @@ static int ppgtt_handle_guest_write_page_table( static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt) { - return enable_out_of_sync + return i915_modparams.enable_gvt_oos && gtt_type_is_pte_pt( guest_page_to_ppgtt_spt(gpt)->guest_page_type) && gpt->write_cnt >= 2; @@ -1349,6 +1428,8 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; + /* Set guest ppgtt entry. Optional for KVMGT, but MUST for XENGT. */ + intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); ppgtt_get_guest_entry(spt, &we, index); ops->test_pse(&we); @@ -1359,16 +1440,19 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, return ret; } else { if (!test_bit(index, spt->post_shadow_bitmap)) { + int type = spt->shadow_page.type; + ppgtt_get_shadow_entry(spt, &se, index); ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); if (ret) return ret; + ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &se, index); } - ppgtt_set_post_shadow(spt, index); } - if (!enable_out_of_sync) + if (!i915_modparams.enable_gvt_oos) return 0; gpt->write_cnt++; @@ -2264,7 +2348,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt_dbg_core("init gtt\n"); if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { + || IS_KABYLAKE(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv)) { gvt->gtt.pte_ops = &gen8_gtt_pte_ops; gvt->gtt.gma_ops = &gen8_gtt_gma_ops; gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; @@ -2289,7 +2373,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt->gtt.scratch_ggtt_page = virt_to_page(page); gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); - if (enable_out_of_sync) { + if (i915_modparams.enable_gvt_oos) { ret = setup_spt_oos(gvt); if (ret) { gvt_err("fail to initialize SPT oos\n"); @@ -2320,7 +2404,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) __free_page(gvt->gtt.scratch_ggtt_page); - if (enable_out_of_sync) + if (i915_modparams.enable_gvt_oos) clean_spt_oos(gvt); } diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index c27c6838eaca..e1ab43c6c957 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -42,6 +42,7 @@ struct intel_gvt_host intel_gvt_host; static const char * const supported_hypervisors[] = { [INTEL_GVT_HYPERVISOR_XEN] = "XEN", [INTEL_GVT_HYPERVISOR_KVM] = "KVM", + [INTEL_GVT_HYPERVISOR_ACRN] = "ACRN", }; static const struct intel_gvt_ops intel_gvt_ops = { @@ -90,6 +91,11 @@ int intel_gvt_init_host(void) symbol_get(kvmgt_mpt), "kvmgt"); intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; #endif + /* not in Xen. Try ACRN */ + intel_gvt_host.mpt = try_then_request_module( + symbol_get(acrn_gvt_mpt), "acrn-gvt"); + intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_ACRN; + printk("acrn-gvt %s\n", intel_gvt_host.mpt?"found":"not found"); } /* Fail to load MPT modules - bail out */ @@ -109,10 +115,13 @@ static void init_device_info(struct intel_gvt *gvt) struct pci_dev *pdev = gvt->dev_priv->drm.pdev; if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { info->max_support_vgpus = 8; info->cfg_space_size = 256; info->mmio_size = 2 * 1024 * 1024; + /* order of mmio size. assert(2^order == mmio_size) */ + info->mmio_size_order = 9; info->mmio_bar = 0; info->gtt_start_offset = 8 * 1024 * 1024; info->gtt_entry_size = 8; @@ -176,6 +185,46 @@ static int init_service_thread(struct intel_gvt *gvt) return 0; } +void intel_gvt_init_pipe_info(struct intel_gvt *gvt); + +/* + * When enabling multi-plane in DomU, an issue is that the PLANE_BUF_CFG + * register cannot be updated dynamically, since Dom0 has no idea of the + * plane information of DomU's planes, so here we statically allocate the + * ddb entries for all the possible enabled planes. + */ +static void intel_gvt_init_ddb(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct skl_ddb_allocation *ddb = &gvt->ddb; + unsigned int pipe_size, ddb_size, plane_size, plane_cnt; + u16 start, end; + enum pipe pipe; + enum plane_id plane; + + ddb_size = INTEL_INFO(dev_priv)->ddb_size; + ddb_size -= 4; /* 4 blocks for bypass path allocation */ + pipe_size = ddb_size / INTEL_INFO(dev_priv)->num_pipes; + + memset(ddb, 0, sizeof(*ddb)); + for_each_pipe(dev_priv, pipe) { + start = pipe * ddb_size / INTEL_INFO(dev_priv)->num_pipes; + end = start + pipe_size; + ddb->plane[pipe][PLANE_CURSOR].start = end - 8; + ddb->plane[pipe][PLANE_CURSOR].end = end; + + plane_cnt = (INTEL_INFO(dev_priv)->num_sprites[pipe] + 1); + plane_size = (pipe_size - 8) / plane_cnt; + + for_each_universal_plane(dev_priv, pipe, plane) { + ddb->plane[pipe][plane].start = start + + (plane * (pipe_size - 8) / plane_cnt); + ddb->plane[pipe][plane].end = + ddb->plane[pipe][plane].start + plane_size; + } + } +} + /** * intel_gvt_clean_device - clean a GVT device * @gvt: intel gvt device @@ -248,6 +297,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) idr_init(&gvt->vgpu_idr); spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); + mutex_init(&gvt->sched_lock); gvt->dev_priv = dev_priv; init_device_info(gvt); @@ -292,6 +342,9 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) if (ret) goto out_clean_thread; + intel_gvt_init_pipe_info(gvt); + intel_gvt_init_ddb(gvt); + ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt, &intel_gvt_ops); if (ret) { @@ -336,3 +389,41 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) kfree(gvt); return ret; } + +int gvt_pause_user_domains(struct drm_i915_private *dev_priv) +{ + struct intel_vgpu *vgpu; + int id, ret = 0; + + if (!intel_gvt_active(dev_priv)) + return 0; + + for_each_active_vgpu(dev_priv->gvt, vgpu, id) { + ret = intel_gvt_hypervisor_pause_domain(vgpu); + } + + return ret; +} + +int gvt_unpause_user_domains(struct drm_i915_private *dev_priv) +{ + struct intel_vgpu *vgpu; + int id, ret = 0; + + if (!intel_gvt_active(dev_priv)) + return 0; + + for_each_active_vgpu(dev_priv->gvt, vgpu, id) { + ret = intel_gvt_hypervisor_unpause_domain(vgpu); + } + + return ret; +} + +int gvt_dom0_ready(struct drm_i915_private *dev_priv) +{ + if (!intel_gvt_active(dev_priv)) + return 0; + + return intel_gvt_hypervisor_dom0_ready(); +} diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 44b719eda8c4..91b85397c59f 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -52,6 +52,7 @@ enum { INTEL_GVT_HYPERVISOR_XEN = 0, INTEL_GVT_HYPERVISOR_KVM, + INTEL_GVT_HYPERVISOR_ACRN, }; struct intel_gvt_host { @@ -67,6 +68,7 @@ struct intel_gvt_device_info { u32 max_support_vgpus; u32 cfg_space_size; u32 mmio_size; + u32 mmio_size_order; u32 mmio_bar; unsigned long msi_cap_offset; u32 gtt_start_offset; @@ -96,6 +98,7 @@ struct intel_vgpu_fence { struct intel_vgpu_mmio { void *vreg; void *sreg; + struct gvt_shared_page *shared_page; bool disable_warn_untrack; }; @@ -150,7 +153,7 @@ struct intel_vgpu { bool pv_notified; bool failsafe; unsigned int resetting_eng; - void *sched_data; + void *sched_data[I915_NUM_ENGINES]; struct vgpu_sched_ctl sched_ctl; struct intel_vgpu_fence fence; @@ -165,6 +168,9 @@ struct intel_vgpu { struct list_head workload_q_head[I915_NUM_ENGINES]; struct kmem_cache *workloads; atomic_t running_workload_num; + /* 1/2K for each reserve ring buffer */ + void *reserve_ring_buffer_va[I915_NUM_ENGINES]; + int reserve_ring_buffer_size[I915_NUM_ENGINES]; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); @@ -185,6 +191,8 @@ struct intel_vgpu { atomic_t released; } vdev; #endif + + bool entire_nonctxmmio_checked; }; struct intel_gvt_gm { @@ -223,10 +231,15 @@ struct intel_gvt_mmio { #define F_CMD_ACCESSED (1 << 5) /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) +/* This reg is not in the context */ +#define F_NON_CONTEXT (1 << 7) + struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; + void *mmio_host_cache; + bool host_cache_initialized; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); unsigned int num_tracked_mmio; }; @@ -243,6 +256,7 @@ struct intel_gvt_opregion { }; #define NR_MAX_INTEL_VGPU_TYPES 20 + struct intel_vgpu_type { char name[16]; unsigned int avail_instance; @@ -253,8 +267,34 @@ struct intel_vgpu_type { enum intel_vgpu_edid resolution; }; +struct intel_dom0_plane_regs { + u32 plane_ctl; + u32 plane_stride; + u32 plane_pos; + u32 plane_size; + u32 plane_keyval; + u32 plane_keymsk; + u32 plane_keymax; + u32 plane_offset; + u32 plane_aux_dist; + u32 plane_aux_offset; + u32 plane_surf; + u32 plane_wm[8]; + u32 plane_wm_trans; +}; + +struct intel_gvt_pipe_info { + enum pipe pipe_num; + int owner; + struct intel_gvt *gvt; + struct work_struct vblank_work; + struct intel_dom0_plane_regs dom0_regs[I915_MAX_PLANES - 1]; + int plane_owner[I915_MAX_PLANES - 1]; +}; + struct intel_gvt { struct mutex lock; + struct mutex sched_lock; struct drm_i915_private *dev_priv; struct idr vgpu_idr; /* vGPU IDR pool */ @@ -276,6 +316,9 @@ struct intel_gvt { struct task_struct *service_thread; wait_queue_head_t service_thread_wq; unsigned long service_request; + struct intel_gvt_pipe_info pipe_info[I915_MAX_PIPES]; + + struct skl_ddb_allocation ddb; }; static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) @@ -482,6 +525,9 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); void populate_pvinfo_page(struct intel_vgpu *vgpu); +int gvt_pause_user_domains(struct drm_i915_private *dev_priv); +int gvt_unpause_user_domains(struct drm_i915_private *dev_priv); +int gvt_dom0_ready(struct drm_i915_private *dev_priv); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); @@ -581,7 +627,35 @@ static inline bool intel_gvt_mmio_has_mode_mask( return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; } + /** + * intel_gvt_mmio_is_non_context - check a MMIO is non-context + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline bool intel_gvt_mmio_is_non_context( + struct intel_gvt *gvt, unsigned int offset) +{ + return gvt->mmio.mmio_attribute[offset >> 2] & F_NON_CONTEXT; +} + +/** + * intel_gvt_mmio_set_non_context - mark a MMIO is non-context + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline void intel_gvt_mmio_set_non_context( + struct intel_gvt *gvt, unsigned int offset) +{ + gvt->mmio.mmio_attribute[offset >> 2] |= F_NON_CONTEXT; +} + #include "trace.h" + +void intel_gvt_mark_noncontext_mmios(struct intel_gvt *gvt); +bool is_force_nonpriv_mmio(unsigned int offset); + #include "mpt.h" #endif diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index a5bed2e71b92..7897a9e76392 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return D_SKL; else if (IS_KABYLAKE(gvt->dev_priv)) return D_KBL; + else if (IS_BROXTON(gvt->dev_priv)) + return D_BXT; return 0; } @@ -243,6 +245,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(vgpu->gvt->dev_priv) || IS_KABYLAKE(vgpu->gvt->dev_priv)) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: @@ -304,7 +307,11 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } } + mutex_unlock(&vgpu->gvt->lock); + mutex_lock(&vgpu->gvt->sched_lock); + mutex_lock(&vgpu->gvt->lock); intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); + mutex_unlock(&vgpu->gvt->sched_lock); /* sw will wait for the device to ack the reset request */ vgpu_vreg(vgpu, offset) = 0; @@ -372,27 +379,9 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, +static int mmio_write_empty(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - switch (offset) { - case 0xe651c: - case 0xe661c: - case 0xe671c: - case 0xe681c: - vgpu_vreg(vgpu, offset) = 1 << 17; - break; - case 0xe6c04: - vgpu_vreg(vgpu, offset) = 0x3; - break; - case 0xe6e1c: - vgpu_vreg(vgpu, offset) = 0x2f << 16; - break; - default: - return -EINVAL; - } - - read_vreg(vgpu, offset, p_data, bytes); return 0; } @@ -400,14 +389,18 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; + struct drm_device *dev = &vgpu->gvt->dev_priv->drm; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (data & PIPECONF_ENABLE) + if (data & PIPECONF_ENABLE) { vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; - else + dev->driver->enable_vblank(dev, pipe); + } else { vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; + } intel_gvt_check_vblank_emulation(vgpu->gvt); return 0; } @@ -480,6 +473,14 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu, return ret; } +static int pipe_dsl_mmio_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); +} + static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -735,6 +736,42 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +static int skl_plane_surf_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + i915_reg_t reg_1ac = _MMIO(_REG_701AC(pipe, plane)); + int flip_event = SKL_FLIP_EVENT(pipe, plane); + + write_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg(vgpu, reg_1ac) = vgpu_vreg(vgpu, offset); + + if ((vgpu_vreg(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) && + (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id)) { + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + } + + set_bit(flip_event, vgpu->irq.flip_done_event[pipe]); + return 0; +} + +static int skl_plane_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + + write_vreg(vgpu, offset, p_data, bytes); + if ((vgpu_vreg(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) && + (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id)) { + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + } + return 0; +} + static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, unsigned int reg) { @@ -839,6 +876,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, data = vgpu_vreg(vgpu, offset); if ((IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(vgpu->gvt->dev_priv) || IS_KABYLAKE(vgpu->gvt->dev_priv)) && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ @@ -1096,6 +1134,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { bool invalid_read = false; + int ret = 0; read_vreg(vgpu, offset, p_data, bytes); @@ -1110,6 +1149,23 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, _vgtif_reg(avail_rs.fence_num) + 4) invalid_read = true; break; + case _vgtif_reg(pv_mmio): + /* a remap happens from guest mmio read operation, the target reg offset + * is in the first DWORD of shared_page. + */ + { + u32 reg = vgpu->mmio.shared_page->reg_addr; + struct intel_gvt_mmio_info *mmio; + + mmio = find_mmio_info(vgpu->gvt, rounddown(reg, 4)); + if (mmio) + ret = mmio->read(vgpu, reg, p_data, bytes); + else + ret = intel_vgpu_default_mmio_read(vgpu, reg, p_data, + bytes); + break; + } + case 0x78010: /* vgt_caps */ case 0x7881c: break; @@ -1121,7 +1177,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", offset, bytes, *(u32 *)p_data); vgpu->pv_notified = true; - return 0; + return ret; } static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) @@ -1168,6 +1224,26 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) return kobject_uevent_env(kobj, KOBJ_ADD, env); } +#define INTEL_GVT_PCI_BAR_GTTMMIO 0 +static int set_pvmmio(struct intel_vgpu *vgpu, bool map) +{ + u64 start, end; + u64 val; + int ret; + + val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; + if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) + start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + else + start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + + start &= ~GENMASK(3, 0); + end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; + + ret = intel_gvt_hypervisor_set_pvmmio(vgpu, start, end, map); + return ret; +} + static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1184,6 +1260,17 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, case _vgtif_reg(g2v_notify): ret = handle_g2v_notification(vgpu, data); break; + case _vgtif_reg(enable_pvmmio): + if (i915_modparams.enable_pvmmio) { + if (set_pvmmio(vgpu, !!data)) { + vgpu_vreg(vgpu, offset) = 0; + break; + } + vgpu_vreg(vgpu, offset) = !!data; + } else { + vgpu_vreg(vgpu, offset) = 0; + } + break; /* add xhot and yhot to handled list to avoid error log */ case 0x78830: case 0x78834: @@ -1345,6 +1432,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, *data0 = 0x1e1a1100; else *data0 = 0x61514b3d; + } else if(IS_BROXTON(vgpu->gvt->dev_priv)) { + /** + * "Read memory latency" command on gen9. + * Below memory latency values are read + * from broxton MRB. + */ + if (!*data0) + *data0 = 0x16080707; + else + *data0 = 0x16161616; } break; case SKL_PCODE_CDCLK_CONTROL: @@ -1374,10 +1471,18 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, { u32 v = *(u32 *)p_data; - v &= (1 << 31) | (1 << 29) | (1 << 9) | - (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); + if (IS_BROXTON(vgpu->gvt->dev_priv)) + v &= (1 << 31) | (1 << 29); + else + v &= (1 << 31) | (1 << 29) | (1 << 9) | + (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); v |= (v >> 1); + vgpu_vreg(vgpu, i915_mmio_reg_offset(SKL_FUSE_STATUS)) = + (SKL_FUSE_PG_DIST_STATUS(0) + | SKL_FUSE_PG_DIST_STATUS(1) + | SKL_FUSE_PG_DIST_STATUS(2)); + return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); } @@ -1429,6 +1534,109 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + if (v & BXT_DE_PLL_PLL_ENABLE) + v |= BXT_DE_PLL_LOCK; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + if (v & PORT_PLL_ENABLE) + v |= PORT_PLL_LOCK; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_dbuf_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + if (v & DBUF_POWER_REQUEST) + v |= DBUF_POWER_STATE; + else + v &= ~DBUF_POWER_STATE; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; + + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = vgpu_vreg(vgpu, offset); + v &= ~UNIQUE_TRANGE_EN_METHOD; + + vgpu_vreg(vgpu, offset) = v; + + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); +} + +static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) { + vgpu_vreg(vgpu, offset - 0x600) = v; + vgpu_vreg(vgpu, offset - 0x800) = v; + } else { + vgpu_vreg(vgpu, offset - 0x400) = v; + vgpu_vreg(vgpu, offset - 0x600) = v; + } + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (v & BIT(0)) { + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= ~PHY_RESERVED; + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= PHY_POWER_GOOD; + } + + if (v & BIT(1)) { + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= ~PHY_RESERVED; + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= PHY_POWER_GOOD; + } + + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1446,6 +1654,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); struct intel_vgpu_execlist *execlist; u32 data = *(u32 *)p_data; + u32 *elsp_data = vgpu->mmio.shared_page->elsp_data; int ret = 0; if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1)) @@ -1453,16 +1662,23 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, execlist = &vgpu->execlist[ring_id]; - execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; - if (execlist->elsp_dwords.index == 3) { + if (VGPU_PVMMIO(vgpu)) { + execlist->elsp_dwords.data[0] = elsp_data[0]; + execlist->elsp_dwords.data[1] = elsp_data[1]; + execlist->elsp_dwords.data[2] = elsp_data[2]; + execlist->elsp_dwords.data[3] = data; ret = intel_vgpu_submit_execlist(vgpu, ring_id); - if(ret) - gvt_vgpu_err("fail submit workload on ring %d\n", - ring_id); + } else { + execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; + if (execlist->elsp_dwords.index == 3) + ret = intel_vgpu_submit_execlist(vgpu, ring_id); + ++execlist->elsp_dwords.index; + execlist->elsp_dwords.index &= 0x3; } - ++execlist->elsp_dwords.index; - execlist->elsp_dwords.index &= 0x3; + if (ret) + gvt_vgpu_err("fail submit workload on ring %d\n", ring_id); + return ret; } @@ -1493,8 +1709,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, (enable_execlist ? "enabling" : "disabling"), ring_id); - if (enable_execlist) + if (enable_execlist) { + mutex_unlock(&vgpu->gvt->lock); + mutex_lock(&vgpu->gvt->sched_lock); + mutex_lock(&vgpu->gvt->lock); intel_vgpu_start_schedule(vgpu); + mutex_unlock(&vgpu->gvt->sched_lock); + } } return 0; } @@ -1597,6 +1818,60 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, #define MMIO_RING_RO(prefix, d, f, rm, r, w) \ MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) +#define MMIO_PIPES_SDH(prefix, plane, s, d, r, w) do { \ + int pipe; \ + for_each_pipe(dev_priv, pipe) \ + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ +} while (0) + +#define MMIO_PLANES_SDH(prefix, s, d, r, w) do { \ + int pipe, plane; \ + for_each_pipe(dev_priv, pipe) \ + for_each_universal_plane(dev_priv, pipe, plane) \ + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ +} while (0) + +#define MMIO_PLANES_DH(prefix, d, r, w) \ + MMIO_PLANES_SDH(prefix, 4, d, r, w) + +#define MMIO_PORT_CL_REF(phy) \ + MMIO_D(BXT_PORT_CL1CM_DW0(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL1CM_DW9(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL1CM_DW10(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL1CM_DW28(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL1CM_DW30(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL2CM_DW6(phy), D_BXT); \ + MMIO_D(BXT_PORT_REF_DW3(phy), D_BXT); \ + MMIO_D(BXT_PORT_REF_DW6(phy), D_BXT); \ + MMIO_D(BXT_PORT_REF_DW8(phy), D_BXT) + +#define MMIO_PORT_PCS_TX(phy, ch) \ + MMIO_D(BXT_PORT_PLL_EBB_0(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PLL_EBB_4(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PCS_DW10_LN01(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PCS_DW10_GRP(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PCS_DW12_LN01(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PCS_DW12_LN23(phy, ch), D_BXT); \ + MMIO_DH(BXT_PORT_PCS_DW12_GRP(phy, ch), D_BXT, NULL, bxt_pcs_dw12_grp_write); \ + MMIO_D(BXT_PORT_TX_DW2_LN0(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW2_GRP(phy, ch), D_BXT); \ + MMIO_DH(BXT_PORT_TX_DW3_LN0(phy, ch), D_BXT, bxt_port_tx_dw3_read, NULL); \ + MMIO_D(BXT_PORT_TX_DW3_GRP(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW4_LN0(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW4_GRP(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 0), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 1), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 2), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 3), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 0), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 1), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 2), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 3), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 6), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 8), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 9), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 10), D_BXT) + static int init_generic_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->dev_priv; @@ -1694,9 +1969,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(0xc4040, D_ALL); MMIO_D(DERRMR, D_ALL); - MMIO_D(PIPEDSL(PIPE_A), D_ALL); - MMIO_D(PIPEDSL(PIPE_B), D_ALL); - MMIO_D(PIPEDSL(PIPE_C), D_ALL); + MMIO_DH(PIPEDSL(PIPE_A), D_ALL, pipe_dsl_mmio_read, NULL); + MMIO_DH(PIPEDSL(PIPE_B), D_ALL, pipe_dsl_mmio_read, NULL); + MMIO_DH(PIPEDSL(PIPE_C), D_ALL, pipe_dsl_mmio_read, NULL); MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL); MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); @@ -1740,71 +2015,71 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(0x70098, D_ALL); MMIO_D(0x7009c, D_ALL); - MMIO_D(DSPCNTR(PIPE_A), D_ALL); - MMIO_D(DSPADDR(PIPE_A), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_A), D_ALL); - MMIO_D(DSPPOS(PIPE_A), D_ALL); - MMIO_D(DSPSIZE(PIPE_A), D_ALL); - MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_A), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); - - MMIO_D(DSPCNTR(PIPE_B), D_ALL); - MMIO_D(DSPADDR(PIPE_B), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_B), D_ALL); - MMIO_D(DSPPOS(PIPE_B), D_ALL); - MMIO_D(DSPSIZE(PIPE_B), D_ALL); - MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_B), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); - - MMIO_D(DSPCNTR(PIPE_C), D_ALL); - MMIO_D(DSPADDR(PIPE_C), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_C), D_ALL); - MMIO_D(DSPPOS(PIPE_C), D_ALL); - MMIO_D(DSPSIZE(PIPE_C), D_ALL); - MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_C), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); - - MMIO_D(SPRCTL(PIPE_A), D_ALL); - MMIO_D(SPRLINOFF(PIPE_A), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_A), D_ALL); - MMIO_D(SPRPOS(PIPE_A), D_ALL); - MMIO_D(SPRSIZE(PIPE_A), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_A), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_A), D_ALL); - MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_A), D_ALL); - MMIO_D(SPROFFSET(PIPE_A), D_ALL); - MMIO_D(SPRSCALE(PIPE_A), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); - - MMIO_D(SPRCTL(PIPE_B), D_ALL); - MMIO_D(SPRLINOFF(PIPE_B), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_B), D_ALL); - MMIO_D(SPRPOS(PIPE_B), D_ALL); - MMIO_D(SPRSIZE(PIPE_B), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_B), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_B), D_ALL); - MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_B), D_ALL); - MMIO_D(SPROFFSET(PIPE_B), D_ALL); - MMIO_D(SPRSCALE(PIPE_B), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); - - MMIO_D(SPRCTL(PIPE_C), D_ALL); - MMIO_D(SPRLINOFF(PIPE_C), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_C), D_ALL); - MMIO_D(SPRPOS(PIPE_C), D_ALL); - MMIO_D(SPRSIZE(PIPE_C), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_C), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_C), D_ALL); - MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_C), D_ALL); - MMIO_D(SPROFFSET(PIPE_C), D_ALL); - MMIO_D(SPRSCALE(PIPE_C), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); + MMIO_D(DSPCNTR(PIPE_A), D_BDW); + MMIO_D(DSPADDR(PIPE_A), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_A), D_BDW); + MMIO_D(DSPPOS(PIPE_A), D_BDW); + MMIO_D(DSPSIZE(PIPE_A), D_BDW); + MMIO_DH(DSPSURF(PIPE_A), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_A), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_A), D_BDW); + + MMIO_D(DSPCNTR(PIPE_B), D_BDW); + MMIO_D(DSPADDR(PIPE_B), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_B), D_BDW); + MMIO_D(DSPPOS(PIPE_B), D_BDW); + MMIO_D(DSPSIZE(PIPE_B), D_BDW); + MMIO_DH(DSPSURF(PIPE_B), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_B), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_B), D_BDW); + + MMIO_D(DSPCNTR(PIPE_C), D_BDW); + MMIO_D(DSPADDR(PIPE_C), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_C), D_BDW); + MMIO_D(DSPPOS(PIPE_C), D_BDW); + MMIO_D(DSPSIZE(PIPE_C), D_BDW); + MMIO_DH(DSPSURF(PIPE_C), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_C), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_C), D_BDW); + + MMIO_D(SPRCTL(PIPE_A), D_BDW); + MMIO_D(SPRLINOFF(PIPE_A), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_A), D_BDW); + MMIO_D(SPRPOS(PIPE_A), D_BDW); + MMIO_D(SPRSIZE(PIPE_A), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_A), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_A), D_BDW); + MMIO_DH(SPRSURF(PIPE_A), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_A), D_BDW); + MMIO_D(SPROFFSET(PIPE_A), D_BDW); + MMIO_D(SPRSCALE(PIPE_A), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_A), D_BDW); + + MMIO_D(SPRCTL(PIPE_B), D_BDW); + MMIO_D(SPRLINOFF(PIPE_B), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_B), D_BDW); + MMIO_D(SPRPOS(PIPE_B), D_BDW); + MMIO_D(SPRSIZE(PIPE_B), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_B), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_B), D_BDW); + MMIO_DH(SPRSURF(PIPE_B), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_B), D_BDW); + MMIO_D(SPROFFSET(PIPE_B), D_BDW); + MMIO_D(SPRSCALE(PIPE_B), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_B), D_BDW); + + MMIO_D(SPRCTL(PIPE_C), D_BDW); + MMIO_D(SPRLINOFF(PIPE_C), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_C), D_BDW); + MMIO_D(SPRPOS(PIPE_C), D_BDW); + MMIO_D(SPRSIZE(PIPE_C), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_C), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_C), D_BDW); + MMIO_DH(SPRSURF(PIPE_C), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_C), D_BDW); + MMIO_D(SPROFFSET(PIPE_C), D_BDW); + MMIO_D(SPRSCALE(PIPE_C), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_C), D_BDW); MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); MMIO_D(HBLANK(TRANSCODER_A), D_ALL); @@ -1911,8 +2186,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(BLC_PWM_CPU_CTL2, D_ALL); MMIO_D(BLC_PWM_CPU_CTL, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL1, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL2, D_ALL); + MMIO_D(BLC_PWM_PCH_CTL1, D_ALL & ~D_BXT); + MMIO_D(BLC_PWM_PCH_CTL2, D_ALL & ~D_BXT); MMIO_D(0x48268, D_ALL); @@ -2010,12 +2285,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(PCH_PP_ON_DELAYS, D_ALL); MMIO_D(PCH_PP_OFF_DELAYS, D_ALL); - MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL); + MMIO_DH(0xe651c, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe661c, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe671c, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe681c, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe6c04, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe6e1c, D_ALL, NULL, mmio_write_empty); MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, PORTA_HOTPLUG_STATUS_MASK @@ -2653,106 +2928,37 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_D(0x70380, D_SKL_PLUS); - MMIO_D(0x71380, D_SKL_PLUS); - MMIO_D(0x72380, D_SKL_PLUS); - MMIO_D(0x7039c, D_SKL_PLUS); +// MMIO_PLANES_DH(PLANE_COLOR_CTL, D_SKL, NULL, NULL); + MMIO_PLANES_DH(PLANE_CTL, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_STRIDE, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_POS, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_SIZE, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYVAL, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYMSK, D_SKL_PLUS, NULL, skl_plane_mmio_write); + + MMIO_PLANES_DH(PLANE_SURF, D_SKL_PLUS, NULL, skl_plane_surf_write); + + MMIO_PLANES_DH(PLANE_KEYMAX, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_OFFSET, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(_REG_701C0, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(_REG_701C4, D_SKL_PLUS, NULL, skl_plane_mmio_write); + + MMIO_PLANES_SDH(_PLANE_WM_BASE, 4 * 8, D_SKL_PLUS, NULL, NULL); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_SKL_PLUS, NULL, NULL); + MMIO_PLANES_DH(PLANE_NV12_BUF_CFG, D_SKL_PLUS, NULL, NULL); + MMIO_PLANES_DH(PLANE_BUF_CFG, D_SKL_PLUS, NULL, NULL); MMIO_D(0x8f074, D_SKL | D_KBL); MMIO_D(0x8f004, D_SKL | D_KBL); @@ -2804,14 +3010,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(0x71034, D_SKL_PLUS); MMIO_D(0x72034, D_SKL_PLUS); - MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS); - MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS); - MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS); - MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS); - MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS); - MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS); - MMIO_D(0x44500, D_SKL_PLUS); + MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); @@ -2819,6 +3019,235 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(0x4ab8, D_KBL); MMIO_D(0x2248, D_SKL_PLUS | D_KBL); + MMIO_D(HUC_STATUS2, D_GEN9PLUS); + + return 0; +} + +static int init_bxt_mmio_info(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + int ret; + + MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); + MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL); + MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); + MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL); + MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); + MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); + + MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); + + MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS); + MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, + skl_power_well_ctl_write); + + MMIO_D(0xa210, D_SKL_PLUS); + MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); + MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); + MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DH(0x4ddc, D_BXT, NULL, skl_misc_ctl_write); + MMIO_DH(0x42080, D_BXT, NULL, skl_misc_ctl_write); + MMIO_D(0x45504, D_BXT); + MMIO_D(0x45520, D_BXT); + MMIO_D(0x46000, D_BXT); + MMIO_DH(0x46010, D_BXT, NULL, skl_lcpll_write); + MMIO_DH(0x46014, D_BXT, NULL, skl_lcpll_write); + MMIO_D(0x6C040, D_BXT); + MMIO_D(0x6C048, D_BXT); + MMIO_D(0x6C050, D_BXT); + MMIO_D(0x6C044, D_BXT); + MMIO_D(0x6C04C, D_BXT); + MMIO_D(0x6C054, D_BXT); + MMIO_D(0x6c058, D_BXT); + MMIO_D(0x6c05c, D_BXT); + MMIO_DH(0X6c060, D_BXT, dpll_status_read, NULL); + + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_BXT, NULL, pf_write); + + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_BXT, NULL, pf_write); + + MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_BXT, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_BXT, NULL, pf_write); + + MMIO_DH(CUR_BUF_CFG(PIPE_A), D_BXT, NULL, NULL); + MMIO_DH(CUR_BUF_CFG(PIPE_B), D_BXT, NULL, NULL); + MMIO_DH(CUR_BUF_CFG(PIPE_C), D_BXT, NULL, NULL); + + MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); + MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); + MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); + + MMIO_DH(CUR_WM_TRANS(PIPE_A), D_BXT, NULL, NULL); + MMIO_DH(CUR_WM_TRANS(PIPE_B), D_BXT, NULL, NULL); + MMIO_DH(CUR_WM_TRANS(PIPE_C), D_BXT, NULL, NULL); + + MMIO_PLANES_DH(PLANE_CTL, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_STRIDE, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_POS, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_SIZE, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYVAL, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYMSK, D_BXT, NULL, skl_plane_mmio_write); + + MMIO_PLANES_DH(PLANE_SURF, D_BXT, NULL, skl_plane_surf_write); + + MMIO_PLANES_DH(PLANE_KEYMAX, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_OFFSET, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(_REG_701C0, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(_REG_701C4, D_BXT, NULL, skl_plane_mmio_write); + + MMIO_PLANES_SDH(_PLANE_WM_BASE, 4 * 8, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_NV12_BUF_CFG, D_BXT, NULL, NULL); + MMIO_PLANES_DH(PLANE_BUF_CFG, D_BXT, NULL, NULL); + + MMIO_F(0x80000, 0x3000, 0, 0, 0, D_BXT, NULL, NULL); + MMIO_D(0x8f074, D_BXT); + MMIO_D(0x8f004, D_BXT); + MMIO_D(0x8f034, D_BXT); + + MMIO_D(0xb11c, D_BXT); + + MMIO_D(0x51000, D_BXT); + MMIO_D(0x6c00c, D_BXT); + + MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); + MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); + + MMIO_D(0xd08, D_BXT); + MMIO_D(0x20e0, D_BXT); + MMIO_D(0x20ec, D_BXT); + + /* TRTT */ + MMIO_D(0x4de0, D_BXT); + MMIO_D(0x4de4, D_BXT); + MMIO_D(0x4de8, D_BXT); + MMIO_D(0x4dec, D_BXT); + MMIO_D(0x4df0, D_BXT); + MMIO_DH(0x4df4, D_BXT, NULL, gen9_trtte_write); + MMIO_DH(0x4dfc, D_BXT, NULL, gen9_trtt_chicken_write); + + MMIO_DH(0x45008, D_BXT, NULL, bxt_dbuf_ctl_write); + + MMIO_D(0x46430, D_BXT); + + MMIO_D(0x46520, D_BXT); + + MMIO_D(0xc403c, D_BXT); + MMIO_D(0xb004, D_BXT); + MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); + + MMIO_D(0x65900, D_BXT); + MMIO_D(0x1082c0, D_BXT); + MMIO_D(0x4068, D_BXT); + MMIO_D(0x67054, D_BXT); + MMIO_D(0x6e560, D_BXT); + MMIO_D(0x6e554, D_BXT); + MMIO_D(0x2b20, D_BXT); + MMIO_D(0x65f00, D_BXT); + MMIO_D(0x65f08, D_BXT); + MMIO_D(0x320f0, D_BXT); + + MMIO_D(0x70034, D_BXT); + MMIO_D(0x71034, D_BXT); + MMIO_D(0x72034, D_BXT); + + MMIO_D(0x44500, D_BXT); + + MMIO_D(GEN8_GTCR, D_SKL_PLUS); + + MMIO_D(GEN7_SAMPLER_INSTDONE, D_SKL_PLUS); + MMIO_D(GEN7_ROW_INSTDONE, D_SKL_PLUS); + MMIO_D(GEN8_FAULT_TLB_DATA0, D_SKL_PLUS); + MMIO_D(GEN8_FAULT_TLB_DATA1, D_SKL_PLUS); + MMIO_D(ERROR_GEN6, D_SKL_PLUS); + MMIO_D(DONE_REG, D_SKL_PLUS); + MMIO_D(EIR, D_SKL_PLUS); + MMIO_D(PGTBL_ER, D_SKL_PLUS); + MMIO_D(0x4194, D_SKL_PLUS); + MMIO_D(0x4294, D_SKL_PLUS); + MMIO_D(0x4494, D_SKL_PLUS); + + MMIO_RING_D(RING_PSMI_CTL, D_SKL_PLUS); + MMIO_RING_D(RING_DMA_FADD, D_SKL_PLUS); + MMIO_RING_D(RING_DMA_FADD_UDW, D_SKL_PLUS); + MMIO_RING_D(RING_IPEHR, D_SKL_PLUS); + MMIO_RING_D(RING_INSTPS, D_SKL_PLUS); + MMIO_RING_D(RING_BBADDR_UDW, D_SKL_PLUS); + MMIO_RING_D(RING_BBSTATE, D_SKL_PLUS); + MMIO_RING_D(RING_IPEIR, D_SKL_PLUS); + + MMIO_D(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS); + MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_D(0xc4c8, D_SKL_PLUS); + MMIO_D(GUC_BCS_RCS_IER, D_SKL_PLUS); + MMIO_D(GUC_VCS2_VCS1_IER, D_SKL_PLUS); + MMIO_D(GUC_WD_VECS_IER, D_SKL_PLUS); + MMIO_D(GUC_MAX_IDLE_COUNT, D_SKL_PLUS); + + MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); + MMIO_D(BXT_RP_STATE_CAP, D_BXT); + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, NULL, bxt_phy_ctl_family_write); + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, NULL, bxt_phy_ctl_family_write); + MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT); + MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT); + MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, NULL, bxt_port_pll_enable_write); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, NULL, bxt_port_pll_enable_write); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, bxt_port_pll_enable_write); + + MMIO_PORT_CL_REF(DPIO_PHY0); + MMIO_PORT_PCS_TX(DPIO_PHY0, DPIO_CH0); + MMIO_PORT_PCS_TX(DPIO_PHY0, DPIO_CH1); + MMIO_PORT_CL_REF(DPIO_PHY1); + MMIO_PORT_PCS_TX(DPIO_PHY1, DPIO_CH0); + + MMIO_D(BXT_DE_PLL_CTL, D_BXT); + MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); + MMIO_D(BXT_DSI_PLL_CTL, D_BXT); + MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); + + MMIO_D(BXT_BLC_PWM_CTL(0), D_BXT); + MMIO_D(BXT_BLC_PWM_FREQ(0), D_BXT); + MMIO_D(BXT_BLC_PWM_DUTY(0), D_BXT); + MMIO_D(BXT_BLC_PWM_CTL(1), D_BXT); + MMIO_D(BXT_BLC_PWM_FREQ(1), D_BXT); + MMIO_D(BXT_BLC_PWM_DUTY(1), D_BXT); + + MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); + + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); + + MMIO_D(RC6_LOCATION, D_BXT); + MMIO_D(RC6_CTX_BASE, D_BXT); + + MMIO_D(0xA248, D_SKL_PLUS); + MMIO_D(0xA250, D_SKL_PLUS); + MMIO_D(0xA25C, D_SKL_PLUS); + MMIO_D(0xA000, D_SKL_PLUS); + MMIO_D(0xB100, D_SKL_PLUS); + MMIO_D(0xD00, D_SKL_PLUS); + + MMIO_D(HUC_STATUS2, D_GEN9PLUS); + return 0; } @@ -2859,6 +3288,9 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) vfree(gvt->mmio.mmio_attribute); gvt->mmio.mmio_attribute = NULL; + + vfree(gvt->mmio.mmio_host_cache); + gvt->mmio.mmio_host_cache = NULL; } /* Special MMIO blocks. */ @@ -2893,6 +3325,12 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) if (!gvt->mmio.mmio_attribute) return -ENOMEM; + gvt->mmio.mmio_host_cache = vzalloc(info->mmio_size); + if (!gvt->mmio.mmio_host_cache) { + vfree(gvt->mmio.mmio_attribute); + return -ENOMEM; + } + ret = init_generic_mmio_info(gvt); if (ret) goto err; @@ -2909,6 +3347,13 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ret = init_skl_mmio_info(gvt); if (ret) goto err; + } else if (IS_BROXTON(dev_priv)) { + ret = init_broadwell_mmio_info(gvt); + if (ret) + goto err; + ret = init_bxt_mmio_info(gvt); + if (ret) + goto err; } gvt->mmio.mmio_block = mmio_blocks; @@ -2916,13 +3361,15 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) gvt_dbg_mmio("traced %u virtual mmio registers\n", gvt->mmio.num_tracked_mmio); + + intel_gvt_mark_noncontext_mmios(gvt); + return 0; err: intel_gvt_clean_mmio_info(gvt); return ret; } - /** * intel_vgpu_default_mmio_read - default MMIO read handler * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index df7f33abd393..25cd19b712db 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -55,9 +55,15 @@ struct intel_gvt_mpt { unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, bool map); + int (*set_pvmmio)(unsigned long handle, u64 start, u64 end, + bool map); + int (*pause_domain)(unsigned long handle); + int (*unpause_domain)(unsigned long handle); + int (*dom0_ready)(void); }; extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt kvmgt_mpt; +extern struct intel_gvt_mpt acrn_gvt_mpt; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 7a041b368f68..0918d913dbcb 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -69,6 +69,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = { [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults", [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt", [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT", + [VCS2_CMD_STREAMER_ERR] = "VCS2 Video CS error interrupt", [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify", [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt", @@ -523,21 +524,26 @@ static void gen8_init_irq( /* GEN8 interrupt GT0 events */ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); + SET_BIT_INFO(irq, 3, RCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); + SET_BIT_INFO(irq, 19, BCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); /* GEN8 interrupt GT1 events */ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); + SET_BIT_INFO(irq, 3, VCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); if (HAS_BSD2(gvt->dev_priv)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); + SET_BIT_INFO(irq, 19, VCS2_CMD_STREAMER_ERR, + INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH, @@ -546,6 +552,7 @@ static void gen8_init_irq( /* GEN8 interrupt GT3 events */ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3); + SET_BIT_INFO(irq, 3, VECS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3); @@ -580,7 +587,8 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); - } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { + } else if (IS_SKYLAKE(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); @@ -592,6 +600,10 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); + + SET_BIT_INFO(irq, 5, PLANE_3_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); + SET_BIT_INFO(irq, 5, PLANE_3_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); + SET_BIT_INFO(irq, 5, PLANE_3_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); } /* GEN8 interrupt PCU events */ @@ -651,7 +663,7 @@ static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data) irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer); gvt = container_of(irq, struct intel_gvt, irq); - intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK); +// intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK); hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period); return HRTIMER_RESTART; } @@ -691,6 +703,7 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) gvt_dbg_core("init irq framework\n"); if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { irq->ops = &gen8_irq_ops; irq->irq_map = gen8_irq_map; diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 5313fb1b33e1..6ec761a84557 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -53,6 +53,7 @@ enum intel_gvt_event_type { VCS_AS_CONTEXT_SWITCH, VCS2_MI_USER_INTERRUPT, + VCS2_CMD_STREAMER_ERR, VCS2_MI_FLUSH_DW, VCS2_AS_CONTEXT_SWITCH, @@ -64,6 +65,7 @@ enum intel_gvt_event_type { BCS_AS_CONTEXT_SWITCH, VECS_MI_USER_INTERRUPT, + VECS_CMD_STREAMER_ERR, VECS_MI_FLUSH_DW, VECS_AS_CONTEXT_SWITCH, @@ -92,6 +94,9 @@ enum intel_gvt_event_type { SPRITE_A_FLIP_DONE, SPRITE_B_FLIP_DONE, SPRITE_C_FLIP_DONE, + PLANE_3_A_FLIP_DONE, + PLANE_3_B_FLIP_DONE, + PLANE_3_C_FLIP_DONE, PCU_THERMAL, PCU_PCODE2DRIVER_MAILBOX, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 980ec8906b1e..6726df75ffc8 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -68,7 +68,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, return; gvt = vgpu->gvt; - mutex_lock(&gvt->lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (reg_is_mmio(gvt, offset)) { if (read) @@ -106,7 +105,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, p_data, bytes); } } - mutex_unlock(&gvt->lock); } /** @@ -119,19 +117,17 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, * Returns: * Zero on success, negative error code if failed */ -int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, +int intel_vgpu_emulate_mmio_read_locked(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; unsigned int offset = 0; int ret = -EINVAL; - if (vgpu->failsafe) { failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); return 0; } - mutex_lock(&gvt->lock); if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; @@ -146,7 +142,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ret, gp->gfn, pa, *(u32 *)p_data, bytes); } - mutex_unlock(&gvt->lock); return ret; } } @@ -168,13 +163,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, p_data, bytes); if (ret) goto err; - mutex_unlock(&gvt->lock); return ret; } if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); - mutex_unlock(&gvt->lock); return ret; } @@ -191,12 +184,22 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto err; intel_gvt_mmio_set_accessed(gvt, offset); - mutex_unlock(&gvt->lock); return 0; err: gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", offset, bytes); - mutex_unlock(&gvt->lock); + return ret; +} + +int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, + void *p_data, unsigned int bytes) +{ + int ret; + + mutex_lock(&vgpu->gvt->lock); + ret = intel_vgpu_emulate_mmio_read_locked(vgpu, pa, p_data, bytes); + mutex_unlock(&vgpu->gvt->lock); + return ret; } @@ -210,7 +213,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, * Returns: * Zero on success, negative error code if failed */ -int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, +int intel_vgpu_emulate_mmio_write_locked(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; @@ -222,8 +225,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, return 0; } - mutex_lock(&gvt->lock); - if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; @@ -237,7 +238,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ret, gp->gfn, pa, *(u32 *)p_data, bytes); } - mutex_unlock(&gvt->lock); return ret; } } @@ -259,13 +259,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, p_data, bytes); if (ret) goto err; - mutex_unlock(&gvt->lock); return ret; } if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); - mutex_unlock(&gvt->lock); return ret; } @@ -273,16 +271,34 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, if (ret < 0) goto err; + if (vgpu->entire_nonctxmmio_checked + && intel_gvt_mmio_is_non_context(vgpu->gvt, offset) + && vgpu_vreg(vgpu, offset) + != *(u32 *)(vgpu->gvt->mmio.mmio_host_cache + offset)) { + gvt_err("vgpu%d unexpected non-context MMIO change at 0x%x:0x%x,0x%x\n", + vgpu->id, offset, vgpu_vreg(vgpu, offset), + *(u32 *)(vgpu->gvt->mmio.mmio_host_cache + offset)); + } + intel_gvt_mmio_set_accessed(gvt, offset); - mutex_unlock(&gvt->lock); return 0; err: gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, bytes); - mutex_unlock(&gvt->lock); return ret; } +int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, + void *p_data, unsigned int bytes) +{ + int ret; + + mutex_lock(&vgpu->gvt->lock); + ret = intel_vgpu_emulate_mmio_write_locked(vgpu, pa, p_data, bytes); + mutex_unlock(&vgpu->gvt->lock); + + return ret; +} /** * intel_vgpu_reset_mmio - reset virtual MMIO space @@ -294,6 +310,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; void *mmio = gvt->firmware.mmio; + struct drm_i915_private *dev_priv = gvt->dev_priv; if (dmlr) { memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); @@ -315,6 +332,24 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); } + /* below vreg init value are got from handler.c, + * which won't change during vgpu life cycle + */ + vgpu_vreg(vgpu, 0xe651c) = 1 << 17; + vgpu_vreg(vgpu, 0xe661c) = 1 << 17; + vgpu_vreg(vgpu, 0xe671c) = 1 << 17; + vgpu_vreg(vgpu, 0xe681c) = 1 << 17; + vgpu_vreg(vgpu, 0xe6c04) = 3; + vgpu_vreg(vgpu, 0xe6e1c) = 0x2f << 16; + + if (HAS_HUC_UCODE(dev_priv)) { + mmio_hw_access_pre(dev_priv); + vgpu_vreg(vgpu, HUC_STATUS2) = I915_READ(HUC_STATUS2); + mmio_hw_access_post(dev_priv); + } + + /* Non-context MMIOs need entire check again if mmio/vgpu reset */ + vgpu->entire_nonctxmmio_checked = false; } /** @@ -328,12 +363,14 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); - if (!vgpu->mmio.vreg) + vgpu->mmio.sreg = vzalloc(info->mmio_size); + vgpu->mmio.vreg = (void *)__get_free_pages(GFP_KERNEL, + info->mmio_size_order); + vgpu->mmio.shared_page = (struct gvt_shared_page *) __get_free_pages( + GFP_KERNEL, 0); + if (!vgpu->mmio.vreg || !vgpu->mmio.sreg || !vgpu->mmio.shared_page) return -ENOMEM; - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; - intel_vgpu_reset_mmio(vgpu, true); return 0; @@ -346,6 +383,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) */ void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) { - vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + + vfree(vgpu->mmio.sreg); + free_pages((unsigned long) vgpu->mmio.vreg, info->mmio_size_order); + free_pages((unsigned long) vgpu->mmio.shared_page, 0); + vgpu->mmio.vreg = vgpu->mmio.sreg = vgpu->mmio.shared_page = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 32cd64ddad26..ad21866e4ffb 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -42,15 +42,16 @@ struct intel_vgpu; #define D_BDW (1 << 0) #define D_SKL (1 << 1) #define D_KBL (1 << 2) +#define D_BXT (1 << 3) -#define D_GEN9PLUS (D_SKL | D_KBL) -#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL) +#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT) +#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT) -#define D_SKL_PLUS (D_SKL | D_KBL) -#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL) +#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT) +#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT) #define D_PRE_SKL (D_BDW) -#define D_ALL (D_BDW | D_SKL | D_KBL) +#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT) typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *, unsigned int); @@ -85,9 +86,12 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes); +int intel_vgpu_emulate_mmio_read_locked(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes); int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes); - +int intel_vgpu_emulate_mmio_write_locked(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes); int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index f0e5487e6688..95b806c49397 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -292,4 +292,48 @@ static inline int intel_gvt_hypervisor_set_trap_area( return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); } +/** + * intel_gvt_hypervisor_set_pvmmio - Set the pvmmio area + * @vgpu: a vGPU + * @start: the beginning of the guest physical address region + * @end: the end of the guest physical address region + * @map: map or unmap + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_hypervisor_set_pvmmio( + struct intel_vgpu *vgpu, u64 start, u64 end, bool map) +{ + /* a MPT implementation could have MMIO trapped elsewhere */ + if (!intel_gvt_host.mpt->set_pvmmio) + return -ENOENT; + + return intel_gvt_host.mpt->set_pvmmio(vgpu->handle, start, end, map); +} + + +static inline int intel_gvt_hypervisor_pause_domain(struct intel_vgpu *vgpu) +{ + if (!intel_gvt_host.mpt || !intel_gvt_host.mpt->pause_domain) + return 0; + + return intel_gvt_host.mpt->pause_domain(vgpu->handle); +} + +static inline int intel_gvt_hypervisor_unpause_domain(struct intel_vgpu *vgpu) +{ + if (!intel_gvt_host.mpt || !intel_gvt_host.mpt->unpause_domain) + return 0; + + return intel_gvt_host.mpt->unpause_domain(vgpu->handle); +} + +static inline int intel_gvt_hypervisor_dom0_ready(void) +{ + if (!intel_gvt_host.mpt->dom0_ready) + return 0; + + return intel_gvt_host.mpt->dom0_ready(); +} #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 311799136d7f..68484b002ce9 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -27,6 +27,7 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) { + void __iomem *host_va = vgpu->gvt->opregion.opregion_va; u8 *buf; int i; @@ -42,19 +43,20 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) if (!vgpu_opregion(vgpu)->va) return -ENOMEM; - memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va, - INTEL_GVT_OPREGION_SIZE); + if (vgpu->gvt->opregion.opregion_va) { + memcpy_fromio(vgpu_opregion(vgpu)->va, host_va, + INTEL_GVT_OPREGION_SIZE); + + /* for unknown reason, the value in LID field is incorrect + * which block the windows guest, so workaround it by force + * setting it to "OPEN" + */ + buf = (u8 *)vgpu_opregion(vgpu)->va; + buf[INTEL_GVT_OPREGION_CLID] = 0x3; + } for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - - /* for unknown reason, the value in LID field is incorrect - * which block the windows guest, so workaround it by force - * setting it to "OPEN" - */ - buf = (u8 *)vgpu_opregion(vgpu)->va; - buf[INTEL_GVT_OPREGION_CLID] = 0x3; - return 0; } @@ -120,6 +122,12 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { gvt_dbg_core("emulate opregion from kernel\n"); + /* clean opregion content before it could be reused. */ + if (gpa == 0) + return 0; + if (vgpu_opregion(vgpu)->va) + intel_vgpu_clean_opregion(vgpu); + ret = init_vgpu_opregion(vgpu, gpa); if (ret) return ret; @@ -139,6 +147,9 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) */ void intel_gvt_clean_opregion(struct intel_gvt *gvt) { + if (!gvt->opregion.opregion_va) + return; + memunmap(gvt->opregion.opregion_va); gvt->opregion.opregion_va = NULL; } @@ -157,6 +168,11 @@ int intel_gvt_init_opregion(struct intel_gvt *gvt) pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION, &gvt->opregion.opregion_pa); + if (gvt->opregion.opregion_pa == 0) { + gvt_err("host opregion doesn't exist\n"); + return 0; + } + gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa, INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB); if (!gvt->opregion.opregion_va) { diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 7d01c77a0f7a..0beec530aabc 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -54,8 +54,13 @@ #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) -#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) -#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) +#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + plane * 0x100) +#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + plane * 0x100) +#define _REG_701AC(pipe, plane) (0x701ac + pipe * 0x1000 + plane * 0x100) + +#define SKL_PLANE_REG_TO_PIPE(reg) (((reg) >> 12) & 0x3) +#define SKL_PLANE_REG_TO_PLANE(reg) ((((reg) & 0xFFF) - 0x180) >> 8) +#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane)*3 + pipe) #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16)))) diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 2ea542257f03..1fc332fa088b 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -141,6 +141,27 @@ static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = { {RCS, _MMIO(0x20e4), 0xffff, false}, }; +void intel_gvt_mark_noncontext_mmios(struct intel_gvt *gvt) +{ + struct render_mmio *mmio; + int i, array_size; + + if (IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv)) { + mmio = gen9_render_mmio_list; + array_size = ARRAY_SIZE(gen9_render_mmio_list); + } else { + mmio = gen8_render_mmio_list; + array_size = ARRAY_SIZE(gen8_render_mmio_list); + } + + for (i = 0; i < array_size; i++, mmio++) { + if (mmio->in_context) + continue; + intel_gvt_mmio_set_non_context(gvt, mmio->reg.reg); + } +} + static u32 gen9_render_mocs[I915_NUM_ENGINES][64]; static u32 gen9_render_mocs_L3[32]; @@ -172,7 +193,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) */ fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) + if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv))) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(dev_priv, fw); @@ -271,6 +293,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) i915_reg_t last_reg = _MMIO(0); if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(dev_priv) || IS_KABYLAKE(vgpu->gvt->dev_priv)) { mmio = gen9_render_mmio_list; array_size = ARRAY_SIZE(gen9_render_mmio_list); @@ -293,7 +316,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) */ if (mmio->in_context && ((ctx_ctrl & inhibit_mask) != inhibit_mask) && - i915.enable_execlists) + i915_modparams.enable_execlists) continue; if (mmio->mask) @@ -325,7 +348,8 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id) u32 v; int i, array_size; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { mmio = gen9_render_mmio_list; array_size = ARRAY_SIZE(gen9_render_mmio_list); restore_mocs(vgpu, ring_id); @@ -403,3 +427,108 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } + +#define gvt_host_reg(gvt, reg) \ + (*(u32 *)(gvt->mmio.mmio_host_cache + reg)) \ + +#define MMIO_COMPARE(vgpu, reg, mask) ({ \ + int ret; \ + u32 value = vgpu_vreg(vgpu, reg); \ + u32 host_value = gvt_host_reg(vgpu->gvt, reg); \ + \ + if (mask) { \ + value &= mask; \ + host_value &= mask; \ + } \ + if (host_value == value) { \ + ret = 0; \ + } else { \ + gvt_err("vgpu%d unconformance mmio 0x%x:0x%x,0x%x\n", \ + vgpu->id, reg, \ + vgpu_vreg(vgpu, reg), \ + gvt_host_reg(vgpu->gvt, reg)); \ + ret = -EINVAL; \ + } \ + ret; \ + }) + +static int noncontext_mmio_compare(struct intel_vgpu *vgpu, int ring_id) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct render_mmio *mmio, *mmio_list; + int i, array_size; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { + mmio_list = gen9_render_mmio_list; + array_size = ARRAY_SIZE(gen9_render_mmio_list); + } else { + mmio_list = gen8_render_mmio_list; + array_size = ARRAY_SIZE(gen8_render_mmio_list); + } + + for (i = 0, mmio = mmio_list; i < array_size; i++, mmio++) { + if (mmio->ring_id != ring_id || mmio->in_context + || is_force_nonpriv_mmio(mmio->reg.reg) + || mmio->reg.reg == RING_MODE_GEN7(engine).reg) + continue; + + if (MMIO_COMPARE(vgpu, mmio->reg.reg, mmio->mask)) + return -EINVAL; + } + + return 0; +} + +static void get_host_mmio_snapshot(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct render_mmio *mmio, *mmio_list; + int i, array_size; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { + mmio_list = gen9_render_mmio_list; + array_size = ARRAY_SIZE(gen9_render_mmio_list); + } else { + mmio_list = gen8_render_mmio_list; + array_size = ARRAY_SIZE(gen8_render_mmio_list); + } + + if (!gvt->mmio.host_cache_initialized) { + /* Snapshot all the non-context MMIOs */ + for (i = 0, mmio = mmio_list; i < array_size; i++, mmio++) { + + if (mmio->in_context) + continue; + + gvt_host_reg(gvt, mmio->reg.reg) = + I915_READ_FW(mmio->reg); + if (mmio->mask) + gvt_host_reg(gvt, mmio->reg.reg) &= mmio->mask; + } + + gvt->mmio.host_cache_initialized = true; + } +} + +int intel_gvt_vgpu_conformance_check(struct intel_vgpu *vgpu, int ring_id) +{ + + int ret; + + /* Entire non-context MMIOs check only need once */ + if (!vgpu->entire_nonctxmmio_checked) + vgpu->entire_nonctxmmio_checked = true; + else + return 0; + + get_host_mmio_snapshot(vgpu->gvt); + + ret = noncontext_mmio_compare(vgpu, ring_id); + if (ret) + goto err; + + return 0; +err: + return ret; +} diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h index 91db1d39d28f..51471fd063a5 100644 --- a/drivers/gpu/drm/i915/gvt/render.h +++ b/drivers/gpu/drm/i915/gvt/render.h @@ -40,4 +40,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, int ring_id); +int intel_gvt_vgpu_conformance_check(struct intel_vgpu *vgpu, int ring_id); + #endif diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 03532dfc0cd5..253589f334ad 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -34,15 +34,11 @@ #include "i915_drv.h" #include "gvt.h" -static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) +static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu, + enum intel_engine_id ring_id) { - enum intel_engine_id i; - struct intel_engine_cs *engine; - - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (!list_empty(workload_q_head(vgpu, i))) - return true; - } + if (!list_empty(workload_q_head(vgpu, ring_id))) + return true; return false; } @@ -64,29 +60,35 @@ struct gvt_sched_data { struct intel_gvt *gvt; struct hrtimer timer; unsigned long period; - struct list_head lru_runq_head; + struct list_head lru_runq_head[I915_NUM_ENGINES]; }; -static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu) +static void vgpu_update_timeslice(struct intel_vgpu *vgpu, + enum intel_engine_id ring_id, ktime_t cur_time) { ktime_t delta_ts; - struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data; + struct vgpu_sched_data *vgpu_data; - delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time; + if (vgpu == NULL || vgpu == vgpu->gvt->idle_vgpu) + return; - vgpu_data->sched_time += delta_ts; - vgpu_data->left_ts -= delta_ts; + vgpu_data = vgpu->sched_data[ring_id]; + delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time); + vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts); + vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts); + vgpu_data->sched_in_time = cur_time; } #define GVT_TS_BALANCE_PERIOD_MS 100 #define GVT_TS_BALANCE_STAGE_NUM 10 -static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) +static void gvt_balance_timeslice(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct vgpu_sched_data *vgpu_data; struct list_head *pos; - static uint64_t stage_check; - int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; + static uint64_t stage_check[I915_NUM_ENGINES]; + int stage = stage_check[ring_id]++ % GVT_TS_BALANCE_STAGE_NUM; /* The timeslice accumulation reset at stage 0, which is * allocated again without adding previous debt. @@ -95,12 +97,12 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) int total_weight = 0; ktime_t fair_timeslice; - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); total_weight += vgpu_data->sched_ctl.weight; } - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) * vgpu_data->sched_ctl.weight / @@ -110,7 +112,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) vgpu_data->left_ts = vgpu_data->allocated_ts; } } else { - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); /* timeslice for next 100ms should add the left/debt @@ -121,73 +123,68 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) } } -static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) +static void try_to_schedule_next_vgpu(struct intel_gvt *gvt, + enum intel_engine_id ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - enum intel_engine_id i; - struct intel_engine_cs *engine; struct vgpu_sched_data *vgpu_data; ktime_t cur_time; /* no need to schedule if next_vgpu is the same with current_vgpu, * let scheduler chose next_vgpu again by setting it to NULL. */ - if (scheduler->next_vgpu == scheduler->current_vgpu) { - scheduler->next_vgpu = NULL; + if (scheduler->next_vgpu[ring_id] == scheduler->current_vgpu[ring_id]) { + scheduler->next_vgpu[ring_id] = NULL; return; } + /* no target to schedule */ + if (!scheduler->next_vgpu[ring_id]) + return; + /* * after the flag is set, workload dispatch thread will * stop dispatching workload for current vgpu */ - scheduler->need_reschedule = true; + scheduler->need_reschedule[ring_id] = true; /* still have uncompleted workload? */ - for_each_engine(engine, gvt->dev_priv, i) { - if (scheduler->current_workload[i]) - return; - } + if (scheduler->current_workload[ring_id]) + return; cur_time = ktime_get(); - if (scheduler->current_vgpu) { - vgpu_data = scheduler->current_vgpu->sched_data; - vgpu_data->sched_out_time = cur_time; - vgpu_update_timeslice(scheduler->current_vgpu); - } - vgpu_data = scheduler->next_vgpu->sched_data; + vgpu_update_timeslice(scheduler->current_vgpu[ring_id], ring_id, cur_time); + vgpu_data = scheduler->next_vgpu[ring_id]->sched_data[ring_id]; vgpu_data->sched_in_time = cur_time; /* switch current vgpu */ - scheduler->current_vgpu = scheduler->next_vgpu; - scheduler->next_vgpu = NULL; + scheduler->current_vgpu[ring_id] = scheduler->next_vgpu[ring_id]; + scheduler->next_vgpu[ring_id] = NULL; - scheduler->need_reschedule = false; + scheduler->need_reschedule[ring_id] = false; /* wake up workload dispatch thread */ - for_each_engine(engine, gvt->dev_priv, i) - wake_up(&scheduler->waitq[i]); + wake_up(&scheduler->waitq[ring_id]); } -static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) +static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; - struct list_head *head = &sched_data->lru_runq_head; + struct list_head *head = &sched_data->lru_runq_head[ring_id]; struct list_head *pos; /* search a vgpu with pending workload */ list_for_each(pos, head) { - vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); - if (!vgpu_has_pending_workload(vgpu_data->vgpu)) + if (!vgpu_has_pending_workload(vgpu_data->vgpu, ring_id)) continue; - - /* Return the vGPU only if it has time slice left */ - if (vgpu_data->left_ts > 0) { - vgpu = vgpu_data->vgpu; - break; - } + /* Return the vGPU only if it has time slice left */ + if (vgpu_data->left_ts > 0) { + vgpu = vgpu_data->vgpu; + break; + } } return vgpu; @@ -196,50 +193,67 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) /* in nanosecond */ #define GVT_DEFAULT_TIME_SLICE 1000000 -static void tbs_sched_func(struct gvt_sched_data *sched_data) +static void tbs_sched_func(struct gvt_sched_data *sched_data, enum intel_engine_id ring_id) { struct intel_gvt *gvt = sched_data->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; /* no active vgpu or has already had a target */ - if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) + if (list_empty(&sched_data->lru_runq_head[ring_id]) || scheduler->next_vgpu[ring_id]) goto out; - vgpu = find_busy_vgpu(sched_data); + vgpu = find_busy_vgpu(sched_data, ring_id); if (vgpu) { - scheduler->next_vgpu = vgpu; + scheduler->next_vgpu[ring_id] = vgpu; /* Move the last used vGPU to the tail of lru_list */ - vgpu_data = vgpu->sched_data; + vgpu_data = vgpu->sched_data[ring_id]; list_del_init(&vgpu_data->lru_list); list_add_tail(&vgpu_data->lru_list, - &sched_data->lru_runq_head); + &sched_data->lru_runq_head[ring_id]); + + gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); } else { - scheduler->next_vgpu = gvt->idle_vgpu; + scheduler->next_vgpu[ring_id] = gvt->idle_vgpu; } out: - if (scheduler->next_vgpu) - try_to_schedule_next_vgpu(gvt); + if (scheduler->next_vgpu[ring_id]) { + gvt_dbg_sched("try to schedule next vgpu %d\n", + scheduler->next_vgpu[ring_id]->id); + try_to_schedule_next_vgpu(gvt, ring_id); + } } void intel_gvt_schedule(struct intel_gvt *gvt) { struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; - static uint64_t timer_check; + static ktime_t check_time; + enum intel_engine_id i; + struct intel_engine_cs *engine; + ktime_t cur_time; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); + cur_time = ktime_get(); if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request)) { - if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS)) - gvt_balance_timeslice(sched_data); + + if (ktime_sub(cur_time, check_time) >= + GVT_TS_BALANCE_PERIOD_MS * NSEC_PER_MSEC) { + check_time = cur_time; + for_each_engine(engine, gvt->dev_priv, i) + gvt_balance_timeslice(sched_data, i); + } } clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request); - tbs_sched_func(sched_data); + for_each_engine(engine, gvt->dev_priv, i) { + vgpu_update_timeslice(gvt->scheduler.current_vgpu[i], i, cur_time); + tbs_sched_func(sched_data, i); + } - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); } static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) @@ -257,6 +271,9 @@ static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) static int tbs_sched_init(struct intel_gvt *gvt) { + enum intel_engine_id i; + struct intel_engine_cs *engine; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -266,7 +283,9 @@ static int tbs_sched_init(struct intel_gvt *gvt) if (!data) return -ENOMEM; - INIT_LIST_HEAD(&data->lru_runq_head); + for_each_engine(engine, gvt->dev_priv, i) + INIT_LIST_HEAD(&data->lru_runq_head[i]); + hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); data->timer.function = tbs_timer_fn; data->period = GVT_DEFAULT_TIME_SLICE; @@ -292,35 +311,54 @@ static void tbs_sched_clean(struct intel_gvt *gvt) static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) { struct vgpu_sched_data *data; + enum intel_engine_id i; + struct intel_engine_cs *engine; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->sched_ctl.weight = vgpu->sched_ctl.weight; - data->vgpu = vgpu; - INIT_LIST_HEAD(&data->lru_list); + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto err; - vgpu->sched_data = data; + data->sched_ctl.weight = vgpu->sched_ctl.weight; + data->vgpu = vgpu; + INIT_LIST_HEAD(&data->lru_list); + vgpu->sched_data[i] = data; + } return 0; + +err: + for (; i >= 0; i--) { + kfree(vgpu->sched_data[i]); + vgpu->sched_data[i] = NULL; + } + return -ENOMEM; } static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) { - kfree(vgpu->sched_data); - vgpu->sched_data = NULL; + enum intel_engine_id i; + struct intel_engine_cs *engine; + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + kfree(vgpu->sched_data[i]); + vgpu->sched_data[i] = NULL; + } } static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - - if (!list_empty(&vgpu_data->lru_list)) - return; + struct vgpu_sched_data *vgpu_data[I915_NUM_ENGINES]; + enum intel_engine_id i; + struct intel_engine_cs *engine; - list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data[i] = vgpu->sched_data[i]; + if (!list_empty(&vgpu_data[i]->lru_list)) + continue; + list_add_tail(&vgpu_data[i]->lru_list, &sched_data->lru_runq_head[i]); + } if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), @@ -329,9 +367,14 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) { - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data[I915_NUM_ENGINES]; + enum intel_engine_id i; + struct intel_engine_cs *engine; - list_del_init(&vgpu_data->lru_list); + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data[i] = vgpu->sched_data[i]; + list_del_init(&vgpu_data[i]->lru_list); + } } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { @@ -377,18 +420,21 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; int ring_id; + enum intel_engine_id i; + struct intel_engine_cs *engine; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); scheduler->sched_ops->stop_schedule(vgpu); - if (scheduler->next_vgpu == vgpu) - scheduler->next_vgpu = NULL; - - if (scheduler->current_vgpu == vgpu) { - /* stop workload dispatching */ - scheduler->need_reschedule = true; - scheduler->current_vgpu = NULL; + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + if (scheduler->next_vgpu[i] == vgpu) + scheduler->next_vgpu[i] = NULL; + if (scheduler->current_vgpu[i] == vgpu) { + /* stop workload dispatching */ + scheduler->need_reschedule[i] = true; + scheduler->current_vgpu[i] = NULL; + } } spin_lock_bh(&scheduler->mmio_context_lock); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 391800d2067b..7932e55b7f25 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -52,6 +52,55 @@ static void set_context_pdp_root_pointer( pdp_pair[i].val = pdp[7 - i]; } +/* + * when populating shadow ctx from guest, we should not overrride oa related + * registers, so that they will not be overlapped by guest oa configs. Thus + * made it possible to capture oa data from host for both host and guests. + */ +static void sr_oa_regs(struct intel_vgpu_workload *workload, + u32 *reg_state, bool save) +{ + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; + u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; + int i = 0; + u32 flex_mmio[] = { + i915_mmio_reg_offset(EU_PERF_CNTL0), + i915_mmio_reg_offset(EU_PERF_CNTL1), + i915_mmio_reg_offset(EU_PERF_CNTL2), + i915_mmio_reg_offset(EU_PERF_CNTL3), + i915_mmio_reg_offset(EU_PERF_CNTL4), + i915_mmio_reg_offset(EU_PERF_CNTL5), + i915_mmio_reg_offset(EU_PERF_CNTL6), + }; + + if (!workload || !reg_state || workload->ring_id != RCS) + return; + + if (save) { + workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; + + for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { + u32 state_offset = ctx_flexeu0 + i * 2; + + workload->flex_mmio[i] = reg_state[state_offset + 1]; + } + } else { + reg_state[ctx_oactxctrl] = + i915_mmio_reg_offset(GEN8_OACTXCONTROL); + reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; + + for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { + u32 state_offset = ctx_flexeu0 + i * 2; + u32 mmio = flex_mmio[i]; + + reg_state[state_offset] = mmio; + reg_state[state_offset + 1] = workload->flex_mmio[i]; + } + } +} + +static bool enable_lazy_shadow_ctx = true; static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -64,6 +113,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct page *page; void *dst; unsigned long context_gpa, context_page_num; + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct i915_ggtt *ggtt = &gvt->dev_priv->ggtt; + dma_addr_t addr; + gen8_pte_t __iomem *pte; int i; gvt_dbg_sched("ring id %d workload lrca %x", ring_id, @@ -77,6 +130,18 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) context_page_num = 19; i = 2; +#ifdef CONFIG_INTEL_IOMMU + /* + * In case IOMMU for graphics is turned on, we don't want to + * turn on lazy shadow context feature because it will touch + * GGTT entries which require a BKL and since this is a + * performance enhancement feature, we will end up negating + * the performance. + */ + if(intel_iommu_gfx_mapped) { + enable_lazy_shadow_ctx = false; + } +#endif while (i < context_page_num) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, @@ -87,17 +152,44 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EINVAL; } - page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); - dst = kmap(page); - intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, + if (!enable_lazy_shadow_ctx) { + page = i915_gem_object_get_page(ctx_obj, + LRC_HEADER_PAGES + i); + dst = kmap(page); + intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, GTT_PAGE_SIZE); - kunmap(page); + kunmap(page); + } else { + unsigned long mfn; + + addr = i915_ggtt_offset( + shadow_ctx->engine[ring_id].state) + + (LRC_PPHWSP_PN + i) * PAGE_SIZE; + pte = (gen8_pte_t __iomem *)ggtt->gsm + + (addr >> PAGE_SHIFT); + + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, + context_gpa >> 12); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn during context shadow\n"); + return -ENXIO; + } + + mfn <<= 12; + mfn |= _PAGE_PRESENT | _PAGE_RW | PPAT_CACHED_INDEX; + writeq(mfn, pte); + } + i++; } + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); shadow_ring_context = kmap(page); + sr_oa_regs(workload, (u32 *)shadow_ring_context, true); #define COPY_REG(name) \ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) @@ -122,6 +214,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sizeof(*shadow_ring_context), GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); + sr_oa_regs(workload, (u32 *)shadow_ring_context, false); kunmap(page); return 0; } @@ -141,19 +234,8 @@ static int shadow_context_status_change(struct notifier_block *nb, enum intel_engine_id ring_id = req->engine->id; struct intel_vgpu_workload *workload; - if (!is_gvt_request(req)) { - spin_lock_bh(&scheduler->mmio_context_lock); - if (action == INTEL_CONTEXT_SCHEDULE_IN && - scheduler->engine_owner[ring_id]) { - /* Switch ring from vGPU to host. */ - intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - NULL, ring_id); - scheduler->engine_owner[ring_id] = NULL; - } - spin_unlock_bh(&scheduler->mmio_context_lock); - + if (!is_gvt_request(req)) return NOTIFY_OK; - } workload = scheduler->current_workload[ring_id]; if (unlikely(!workload)) @@ -161,23 +243,12 @@ static int shadow_context_status_change(struct notifier_block *nb, switch (action) { case INTEL_CONTEXT_SCHEDULE_IN: - spin_lock_bh(&scheduler->mmio_context_lock); - if (workload->vgpu != scheduler->engine_owner[ring_id]) { - /* Switch ring from host to vGPU or vGPU to vGPU. */ - intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - workload->vgpu, ring_id); - scheduler->engine_owner[ring_id] = workload->vgpu; - } else - gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", - ring_id, workload->vgpu->id); - spin_unlock_bh(&scheduler->mmio_context_lock); atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: atomic_set(&workload->shadow_ctx_active, 0); break; default: - WARN_ON(1); return NOTIFY_OK; } wake_up(&workload->shadow_ctx_status_wq); @@ -201,6 +272,43 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, ce->lrc_desc = desc; } +static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + void *shadow_ring_buffer_va; + u32 *cs; + + /* allocate shadow ring buffer */ + cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); + if (IS_ERR(cs)) { + gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n", + workload->rb_len); + return PTR_ERR(cs); + } + + shadow_ring_buffer_va = workload->shadow_ring_buffer_va; + + /* get shadow ring buffer va */ + workload->shadow_ring_buffer_va = cs; + + memcpy(cs, shadow_ring_buffer_va, + workload->rb_len); + + cs += workload->rb_len / sizeof(u32); + intel_ring_advance(workload->req, cs); + + return 0; +} + +void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +{ + if (!wa_ctx->indirect_ctx.obj) + return; + + i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); + i915_gem_object_put(wa_ctx->indirect_ctx.obj); +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -214,8 +322,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct drm_i915_gem_request *rq; struct intel_vgpu *vgpu = workload->vgpu; + struct intel_ring *ring; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); @@ -231,46 +341,121 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) shadow_context_descriptor_update(shadow_ctx, dev_priv->engine[ring_id]); + ret = intel_gvt_scan_and_shadow_ringbuffer(workload); + if (ret) + goto err_scan; + + if ((workload->ring_id == RCS) && + (workload->wa_ctx.indirect_ctx.size != 0) + && gvt_shadow_wa_ctx) { + ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); + if (ret) + goto err_scan; + } + + /* pin shadow context by gvt even the shadow context will be pinned + * when i915 alloc request. That is because gvt will update the guest + * context from shadow context when workload is completed, and at that + * moment, i915 may already unpined the shadow context to make the + * shadow_ctx pages invalid. So gvt need to pin itself. After update + * the guest context, gvt can unpin the shadow_ctx safely. + */ + ring = engine->context_pin(engine, shadow_ctx); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); + gvt_vgpu_err("fail to pin shadow context\n"); + goto err_shadow; + } + + ret = populate_shadow_context(workload); + if (ret) + goto err_unpin; + rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); ret = PTR_ERR(rq); - goto out; + goto err_unpin; } gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); workload->req = i915_gem_request_get(rq); - ret = intel_gvt_scan_and_shadow_ringbuffer(workload); - if (ret) - goto out; - - if ((workload->ring_id == RCS) && - (workload->wa_ctx.indirect_ctx.size != 0)) { - ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); - if (ret) - goto out; + /* we consider this as an workaround to avoid the situation that + * PDP's not updated, and right now we only limit it to BXT platform + * since it's not reported on the other platforms + */ + if (IS_BROXTON(vgpu->gvt->dev_priv)) { + ret = gvt_emit_pdps(workload); + if (ret) { + i915_gem_request_put(rq); + workload->req = NULL; + goto err_unpin; + } } - ret = populate_shadow_context(workload); + ret = copy_workload_to_ring_buffer(workload); if (ret) - goto out; - + goto err_unpin; workload->shadowed = true; + return 0; -out: +err_unpin: + engine->context_unpin(engine, shadow_ctx); +err_shadow: + release_shadow_wa_ctx(&workload->wa_ctx); +err_scan: return ret; } +static void gen8_shadow_pid_cid(struct intel_vgpu_workload *workload) +{ + int ring_id = workload->ring_id; + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + u32 *cs; + + /* Copy the PID and CID from the guest's HWS page to the host's one */ + cs = intel_ring_begin(workload->req, 16); + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = (workload->ctx_desc.lrca << GTT_PAGE_SHIFT) + I915_GEM_HWS_PID_ADDR; + *cs++ = 0; + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_PID_ADDR + + (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT); + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = (workload->ctx_desc.lrca << GTT_PAGE_SHIFT) + I915_GEM_HWS_CID_ADDR; + *cs++ = 0; + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_CID_ADDR + + (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT); + *cs++ = 0; + intel_ring_advance(workload->req, cs); +} + +static int sanitize_priority(int priority) +{ + if (priority > I915_CONTEXT_MAX_USER_PRIORITY) + return I915_CONTEXT_MAX_USER_PRIORITY; + else if (priority < I915_CONTEXT_MIN_USER_PRIORITY) + return I915_CONTEXT_MIN_USER_PRIORITY; + return priority; +} + static int dispatch_workload(struct intel_vgpu_workload *workload) { int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - struct intel_vgpu *vgpu = workload->vgpu; - struct intel_ring *ring; + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_ring *ring; int ret = 0; gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", @@ -279,11 +464,22 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); + + if (i915_modparams.enable_conformance_check + && intel_gvt_vgpu_conformance_check(vgpu, ring_id)) + gvt_err("vgpu%d unconformance guest detected\n", vgpu->id); + if (ret) goto out; + gen8_shadow_pid_cid(workload); + if (workload->prepare) { + mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_lock(&vgpu->gvt->lock); + mutex_lock(&dev_priv->drm.struct_mutex); ret = workload->prepare(workload); + mutex_unlock(&vgpu->gvt->lock); if (ret) goto out; } @@ -302,6 +498,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) goto out; } + workload->guilty_count = atomic_read(&workload->req->ctx->guilty_count); out: if (ret) workload->status = ret; @@ -309,6 +506,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); + shadow_ctx->priority = i915_modparams.gvt_workload_priority = + sanitize_priority(i915_modparams.gvt_workload_priority); i915_add_request(workload->req); workload->dispatched = true; } @@ -323,24 +522,27 @@ static struct intel_vgpu_workload *pick_next_workload( struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); /* * no current vgpu / will be scheduled out / no workload * bail out */ - if (!scheduler->current_vgpu) { + if (!scheduler->current_vgpu[ring_id]) { gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); goto out; } - if (scheduler->need_reschedule) { + if (scheduler->need_reschedule[ring_id]) { gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); goto out; } - if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) + if (list_empty(workload_q_head(scheduler->current_vgpu[ring_id], ring_id))) { + gvt_dbg_sched("ring id %d stop - no available workload\n", + ring_id); goto out; + } /* * still have current workload, maybe the workload disptacher @@ -360,7 +562,7 @@ static struct intel_vgpu_workload *pick_next_workload( * schedule out a vgpu. */ scheduler->current_workload[ring_id] = container_of( - workload_q_head(scheduler->current_vgpu, ring_id)->next, + workload_q_head(scheduler->current_vgpu[ring_id], ring_id)->next, struct intel_vgpu_workload, list); workload = scheduler->current_workload[ring_id]; @@ -369,7 +571,7 @@ static struct intel_vgpu_workload *pick_next_workload( atomic_inc(&workload->vgpu->running_workload_num); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); return workload; } @@ -390,32 +592,33 @@ static void update_guest_context(struct intel_vgpu_workload *workload) gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, workload->ctx_desc.lrca); - context_page_num = gvt->dev_priv->engine[ring_id]->context_size; + if (!enable_lazy_shadow_ctx) { + context_page_num = gvt->dev_priv->engine[ring_id]->context_size; + context_page_num = context_page_num >> PAGE_SHIFT; - context_page_num = context_page_num >> PAGE_SHIFT; + if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + context_page_num = 19; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) - context_page_num = 19; - - i = 2; + i = 2; - while (i < context_page_num) { - context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + while (i < context_page_num) { + context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((workload->ctx_desc.lrca + i) << GTT_PAGE_SHIFT)); - if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid guest context descriptor\n"); - return; + if (context_gpa == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("invalid guest context descriptor\n"); + return; + } + + page = i915_gem_object_get_page(ctx_obj, + LRC_HEADER_PAGES + i); + src = kmap(page); + intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, + GTT_PAGE_SIZE); + kunmap(page); + i++; } - - page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); - src = kmap(page); - intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, - GTT_PAGE_SIZE); - kunmap(page); - i++; } - intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); @@ -448,7 +651,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) struct intel_vgpu *vgpu; int event; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); workload = scheduler->current_workload[ring_id]; vgpu = workload->vgpu; @@ -483,9 +686,11 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ENGINE_MASK(ring_id))) { update_guest_context(workload); + mutex_lock(&gvt->lock); for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) intel_vgpu_trigger_virtual_event(vgpu, event); + mutex_unlock(&gvt->lock); } mutex_lock(&dev_priv->drm.struct_mutex); /* unpin shadow ctx as the shadow_ctx update is done */ @@ -498,7 +703,11 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; + mutex_lock(&gvt->lock); list_del_init(&workload->list); + if (workload->status == -EIO) + intel_vgpu_reset_execlist(vgpu, 1 << ring_id); + workload->complete(workload); atomic_dec(&vgpu->running_workload_num); @@ -508,6 +717,19 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); +} + +static void inject_error_cs_irq(struct intel_vgpu *vgpu, int ring_id) +{ + enum intel_gvt_event_type events[] = { + RCS_CMD_STREAMER_ERR, + BCS_CMD_STREAMER_ERR, + VCS_CMD_STREAMER_ERR, + VCS2_CMD_STREAMER_ERR, + VECS_CMD_STREAMER_ERR, + }; + intel_vgpu_trigger_virtual_event(vgpu, events[ring_id]); } struct workload_thread_param { @@ -524,8 +746,11 @@ static int workload_thread(void *priv) struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; + long lret; bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv); + DEFINE_WAIT_FUNC(wait, woken_wake_function); kfree(p); @@ -559,9 +784,9 @@ static int workload_thread(void *priv) intel_uncore_forcewake_get(gvt->dev_priv, FORCEWAKE_ALL); - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); ret = dispatch_workload(workload); - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); if (ret) { vgpu = workload->vgpu; @@ -571,7 +796,24 @@ static int workload_thread(void *priv) gvt_dbg_sched("ring id %d wait workload %p\n", workload->ring_id, workload); - i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); + lret = i915_wait_request(workload->req, 0, + MAX_SCHEDULE_TIMEOUT); + + gvt_dbg_sched("i915_wait_request %p returns %ld\n", + workload, lret); + if (lret >= 0 && workload->status == -EINPROGRESS) + workload->status = 0; + + /* + * increased guilty_count means that this request triggerred + * a GPU reset, so we need to notify the guest about the + * hang. + */ + if (workload->guilty_count < + atomic_read(&workload->req->ctx->guilty_count)) { + workload->status = -EIO; + inject_error_cs_irq(workload->vgpu, ring_id); + } complete: gvt_dbg_sched("will complete workload %p, status: %d\n", @@ -610,9 +852,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) gvt_dbg_core("clean workload scheduler\n"); for_each_engine(engine, gvt->dev_priv, i) { - atomic_notifier_chain_unregister( - &engine->context_status_notifier, - &gvt->shadow_ctx_notifier_block[i]); kthread_stop(scheduler->thread[i]); } } @@ -649,11 +888,13 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) goto err; } - gvt->shadow_ctx_notifier_block[i].notifier_call = - shadow_context_status_change; - atomic_notifier_chain_register(&engine->context_status_notifier, - &gvt->shadow_ctx_notifier_block[i]); + + gvt->shadow_ctx_notifier_block[i].notifier_call = + shadow_context_status_change; + atomic_notifier_chain_register(&engine->context_status_notifier, + &gvt->shadow_ctx_notifier_block[i]); } + return 0; err: intel_gvt_clean_workload_scheduler(gvt); @@ -676,6 +917,10 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) if (IS_ERR(vgpu->shadow_ctx)) return PTR_ERR(vgpu->shadow_ctx); + if (!vgpu->shadow_ctx->name) { + vgpu->shadow_ctx->name = kasprintf(GFP_KERNEL, "Shadow Context %d", vgpu->id); + } + vgpu->shadow_ctx->engine[RCS].initialised = true; bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 93a49eb0209e..d506e3acd08f 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -37,10 +37,10 @@ #define _GVT_SCHEDULER_H_ struct intel_gvt_workload_scheduler { - struct intel_vgpu *current_vgpu; - struct intel_vgpu *next_vgpu; + struct intel_vgpu *current_vgpu[I915_NUM_ENGINES]; + struct intel_vgpu *next_vgpu[I915_NUM_ENGINES]; struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES]; - bool need_reschedule; + bool need_reschedule[I915_NUM_ENGINES]; spinlock_t mmio_context_lock; /* can be null when owner is host */ @@ -85,6 +85,7 @@ struct intel_vgpu_workload { bool dispatched; bool shadowed; int status; + unsigned int guilty_count; struct intel_vgpu_mm *shadow_mm; @@ -110,6 +111,10 @@ struct intel_vgpu_workload { /* shadow batch buffer */ struct list_head shadow_bb; struct intel_shadow_wa_ctx wa_ctx; + + /* oa registers */ + u32 oactxctrl; + u32 flex_mmio[7]; }; /* Intel shadow batch buffer is a i915 gem object */ @@ -141,4 +146,8 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu); void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); +void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); + +extern bool gvt_shadow_wa_ctx; + #endif diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 02c61a1ad56a..b6c9bbf55240 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -55,6 +55,8 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); + vgpu_vreg(vgpu, vgtif_reg(enable_pvmmio)) = 0; + gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); @@ -222,19 +224,25 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; + mutex_lock(&vgpu->gvt->sched_lock); mutex_lock(&gvt->lock); vgpu->active = false; + idr_remove(&gvt->vgpu_idr, vgpu->id); + if (atomic_read(&vgpu->running_workload_num)) { mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->gvt->sched_lock); intel_gvt_wait_vgpu_idle(vgpu); + mutex_lock(&vgpu->gvt->sched_lock); mutex_lock(&gvt->lock); } intel_vgpu_stop_schedule(vgpu); mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->gvt->sched_lock); } /** @@ -384,6 +392,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_shadow_ctx; + vgpu->active = true; + mutex_unlock(&gvt->lock); return vgpu; @@ -448,6 +458,8 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, return vgpu; } +#define _vgtif_reg(x) \ + (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)) /** * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset * @vgpu: virtual GPU @@ -482,6 +494,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; + bool enable_pvmmio = vgpu_vreg(vgpu, _vgtif_reg(enable_pvmmio)); gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", @@ -494,9 +507,11 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, * The current_vgpu will set to NULL after stopping the * scheduler when the reset is triggered by current vgpu. */ - if (scheduler->current_vgpu == NULL) { + if (scheduler->current_vgpu[0] == NULL) { mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->gvt->sched_lock); intel_gvt_wait_vgpu_idle(vgpu); + mutex_lock(&vgpu->gvt->sched_lock); mutex_lock(&gvt->lock); } @@ -513,6 +528,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_mmio(vgpu, dmlr); populate_pvinfo_page(vgpu); + vgpu_vreg(vgpu, _vgtif_reg(enable_pvmmio)) = enable_pvmmio; intel_vgpu_reset_display(vgpu); if (dmlr) { @@ -537,7 +553,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->sched_lock); mutex_lock(&vgpu->gvt->lock); intel_gvt_reset_vgpu_locked(vgpu, true, 0); mutex_unlock(&vgpu->gvt->lock); + mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e4d4b6b41e26..bfacdc93c9f0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -42,13 +42,13 @@ static __always_inline void seq_print_param(struct seq_file *m, const void *x) { if (!__builtin_strcmp(type, "bool")) - seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x)); + seq_printf(m, "i915_modparams.%s=%s\n", name, yesno(*(const bool *)x)); else if (!__builtin_strcmp(type, "int")) - seq_printf(m, "i915.%s=%d\n", name, *(const int *)x); + seq_printf(m, "i915_modparams.%s=%d\n", name, *(const int *)x); else if (!__builtin_strcmp(type, "unsigned int")) - seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x); + seq_printf(m, "i915_modparams.%s=%u\n", name, *(const unsigned int *)x); else if (!__builtin_strcmp(type, "char *")) - seq_printf(m, "i915.%s=%s\n", name, *(const char **)x); + seq_printf(m, "i915_modparams.%s=%s\n", name, *(const char **)x); else BUILD_BUG(); } @@ -67,7 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data) #undef PRINT_FLAG kernel_param_lock(THIS_MODULE); -#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x); +#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915_modparams.x); I915_PARAMS_FOR_EACH(PRINT_PARAM); #undef PRINT_PARAM kernel_param_unlock(THIS_MODULE); @@ -1043,7 +1043,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 freq_sts; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); @@ -1066,7 +1066,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "efficient (RPe) frequency: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } else if (INTEL_GEN(dev_priv) >= 6) { u32 rp_state_limits; u32 gt_perf_status; @@ -1267,7 +1267,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) seq_puts(m, "struct_mutex blocked for reset\n"); - if (!i915.enable_hangcheck) { + if (!i915_modparams.enable_hangcheck) { seq_puts(m, "Hangcheck disabled\n"); return 0; } @@ -1422,6 +1422,9 @@ static int i915_forcewake_domains(struct seq_file *m, void *data) struct intel_uncore_forcewake_domain *fw_domain; unsigned int tmp; + seq_printf(m, "user.bypass_count = %u\n", + i915->uncore.user_forcewake.count); + for_each_fw_domain(fw_domain, i915, tmp) seq_printf(m, "%s.wake_count = %u\n", intel_uncore_forcewake_domain_to_str(fw_domain->id), @@ -1502,9 +1505,9 @@ static int gen6_drpc_info(struct seq_file *m) gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); } - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); seq_printf(m, "Video Turbo Mode: %s\n", yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); @@ -1699,7 +1702,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); seq_printf(m, "Enabled by kernel parameter: %s\n", - yesno(i915.enable_ips)); + yesno(i915_modparams.enable_ips)); if (INTEL_GEN(dev_priv) >= 8) { seq_puts(m, "Currently: unknown\n"); @@ -1786,7 +1789,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); if (ret) goto out; @@ -1817,7 +1820,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ((ia_freq >> 8) & 0xff) * 100); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); out: intel_runtime_pm_put(dev_priv); @@ -1910,6 +1913,49 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) ring->space, ring->head, ring->tail); } +bool is_shadow_context(struct i915_gem_context *ctx) +{ + if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) + return true; + + return false; +} + +int get_vgt_id(struct i915_gem_context *ctx) +{ + int vgt_id; + + vgt_id = 0; + + if (is_shadow_context(ctx)) + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + + return vgt_id; +} + +int get_pid_shadowed(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + int pid, vgt_id; + + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id); + return pid; +} + +static void describe_ctx_ring_shadowed(struct seq_file *m, + struct i915_gem_context *ctx, struct intel_ring *ring, + struct intel_engine_cs *engine) +{ + int pid, cid, vgt_id; + + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id); + cid = intel_read_status_page(engine, I915_GEM_HWS_CID_INDEX + vgt_id); + seq_printf(m, " (Current DomU Process PID: %d, CID: %d)", + pid, cid); +} + static int i915_context_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1924,6 +1970,7 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { + bool is_shadow_context = false; seq_printf(m, "HW context %u ", ctx->hw_id); if (ctx->pid) { struct task_struct *task; @@ -1934,6 +1981,9 @@ static int i915_context_status(struct seq_file *m, void *unused) task->comm, task->pid); put_task_struct(task); } + } else if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) { + seq_puts(m, "DomU Shadow Context "); + is_shadow_context = true; } else if (IS_ERR(ctx->file_priv)) { seq_puts(m, "(deleted) "); } else { @@ -1945,13 +1995,22 @@ static int i915_context_status(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) { struct intel_context *ce = &ctx->engine[engine->id]; + u64 lrc_desc = intel_lr_context_descriptor(ctx, + engine); seq_printf(m, "%s: ", engine->name); + seq_printf(m, "ctx id 0x%x ", (uint32_t)((lrc_desc >> 12) & + 0xFFFFF)); seq_putc(m, ce->initialised ? 'I' : 'i'); if (ce->state) describe_obj(m, ce->state->obj); - if (ce->ring) + if (ce->ring) { describe_ctx_ring(m, ce->ring); + if(is_shadow_context) + describe_ctx_ring_shadowed(m, ctx, + ce->ring, engine); + } + seq_putc(m, '\n'); } @@ -2014,7 +2073,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) enum intel_engine_id id; int ret; - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { seq_printf(m, "Logical Ring Contexts are disabled\n"); return 0; } @@ -2443,12 +2502,8 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", client->priority, client->stage_id, client->proc_desc_offset); - seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n", - client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); - seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", - client->wq_size, client->wq_offset, client->wq_tail); - - seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); + seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", + client->doorbell_id, client->doorbell_offset); for_each_engine(engine, dev_priv, id) { u64 submissions = client->submissions[id]; @@ -2594,7 +2649,7 @@ static int i915_guc_log_control_get(void *data, u64 *val) if (!dev_priv->guc.log.vma) return -EINVAL; - *val = i915.guc_log_level; + *val = i915_modparams.guc_log_level; return 0; } @@ -3312,7 +3367,9 @@ static int i915_engine_info(struct seq_file *m, void *unused) seq_printf(m, "\tBBADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); - if (i915.enable_execlists) { + if (i915_modparams.enable_execlists) { + const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + struct intel_engine_execlists * const execlists = &engine->execlists; u32 ptr, read, write; unsigned int idx; @@ -3323,8 +3380,10 @@ static int i915_engine_info(struct seq_file *m, void *unused) ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); read = GEN8_CSB_READ_PTR(ptr); write = GEN8_CSB_WRITE_PTR(ptr); - seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n", - read, write, + seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n", + read, execlists->csb_head, + write, + intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), yesno(test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))); if (read >= GEN8_CSB_ENTRIES) @@ -3335,18 +3394,19 @@ static int i915_engine_info(struct seq_file *m, void *unused) write += GEN8_CSB_ENTRIES; while (read < write) { idx = ++read % GEN8_CSB_ENTRIES; - seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", + seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", idx, I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), - I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); + hws[idx * 2], + I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), + hws[idx * 2 + 1]); } rcu_read_lock(); - for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) { + for (idx = 0; idx < execlists_num_ports(execlists); idx++) { unsigned int count; - rq = port_unpack(&engine->execlist_port[idx], - &count); + rq = port_unpack(&execlists->port[idx], &count); if (rq) { seq_printf(m, "\t\tELSP[%d] count=%d, ", idx, count); @@ -3359,7 +3419,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) rcu_read_unlock(); spin_lock_irq(&engine->timeline->lock); - for (rb = engine->execlist_first; rb; rb = rb_next(rb)){ + for (rb = execlists->first; rb; rb = rb_next(rb)) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); @@ -3403,7 +3463,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) enum intel_engine_id id; int j, ret; - if (!i915.semaphores) { + if (!i915_modparams.semaphores) { seq_puts(m, "Semaphores are disabled\n"); return 0; } @@ -3549,11 +3609,18 @@ static int i915_ddb_info(struct seq_file *m, void *unused) seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, entry->start, entry->end, skl_ddb_entry_size(entry)); + entry = &ddb->y_plane[pipe][plane]; + seq_printf(m, " YPlane%-8d%8u%8u%8u\n", plane + 1, + entry->start, entry->end, + skl_ddb_entry_size(entry)); } entry = &ddb->plane[pipe][PLANE_CURSOR]; seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, entry->end, skl_ddb_entry_size(entry)); + entry = &ddb->y_plane[pipe][PLANE_CURSOR]; + seq_printf(m, " Y%-13s%8u%8u%8u\n", "Cursor", entry->start, + entry->end, skl_ddb_entry_size(entry)); } drm_modeset_unlock_all(dev); @@ -4153,6 +4220,9 @@ i915_wedged_set(void *data, u64 val) struct intel_engine_cs *engine; unsigned int tmp; + if (intel_vgpu_active(i915)) + return -EINVAL; + /* * There is no safeguard against this debugfs entry colliding * with the hangcheck calling same i915_handle_error() in @@ -4353,7 +4423,7 @@ i915_max_freq_set(void *data, u64 val) DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); if (ret) return ret; @@ -4366,7 +4436,7 @@ i915_max_freq_set(void *data, u64 val) hw_min = dev_priv->rps.min_freq; if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return -EINVAL; } @@ -4375,7 +4445,7 @@ i915_max_freq_set(void *data, u64 val) if (intel_set_rps(dev_priv, val)) DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return 0; } @@ -4408,7 +4478,7 @@ i915_min_freq_set(void *data, u64 val) DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); if (ret) return ret; @@ -4422,7 +4492,7 @@ i915_min_freq_set(void *data, u64 val) if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return -EINVAL; } @@ -4431,7 +4501,7 @@ i915_min_freq_set(void *data, u64 val) if (intel_set_rps(dev_priv, val)) DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return 0; } @@ -4674,26 +4744,26 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct drm_i915_private *i915 = inode->i_private; - if (INTEL_GEN(dev_priv) < 6) + if (INTEL_GEN(i915) < 6) return 0; - intel_runtime_pm_get(dev_priv); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_get(i915); + intel_uncore_forcewake_user_get(i915); return 0; } static int i915_forcewake_release(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct drm_i915_private *i915 = inode->i_private; - if (INTEL_GEN(dev_priv) < 6) + if (INTEL_GEN(i915) < 6) return 0; - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); + intel_uncore_forcewake_user_put(i915); + intel_runtime_pm_put(i915); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9f45cfeae775..2edeea2584f1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -49,6 +49,7 @@ #include "i915_drv.h" #include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_uc.h" #include "intel_drv.h" #include "intel_uc.h" @@ -58,12 +59,12 @@ static unsigned int i915_load_fail_count; bool __i915_inject_load_failure(const char *func, int line) { - if (i915_load_fail_count >= i915.inject_load_failure) + if (i915_load_fail_count >= i915_modparams.inject_load_failure) return false; - if (++i915_load_fail_count == i915.inject_load_failure) { + if (++i915_load_fail_count == i915_modparams.inject_load_failure) { DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", - i915.inject_load_failure, func, line); + i915_modparams.inject_load_failure, func, line); return true; } @@ -106,8 +107,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, static bool i915_error_injected(struct drm_i915_private *dev_priv) { - return i915.inject_load_failure && - i915_load_fail_count == i915.inject_load_failure; + return i915_modparams.inject_load_failure && + i915_load_fail_count == i915_modparams.inject_load_failure; } #define i915_load_error(dev_priv, fmt, ...) \ @@ -146,6 +147,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { ret = PCH_CNP; DRM_DEBUG_KMS("Assuming CannonPoint PCH\n"); + } else if (IS_BROXTON(dev_priv)) { + ret = PCH_NONE; + DRM_DEBUG_KMS("Assuming None PCH for BXT\n"); } return ret; @@ -320,7 +324,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = USES_PPGTT(dev_priv); break; case I915_PARAM_HAS_SEMAPHORES: - value = i915.semaphores; + value = i915_modparams.semaphores; break; case I915_PARAM_HAS_SECURE_BATCHES: value = capable(CAP_SYS_ADMIN); @@ -339,7 +343,8 @@ static int i915_getparam(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_HAS_GPU_RESET: - value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); + value = i915_modparams.enable_hangcheck && + intel_has_gpu_reset(dev_priv); if (value && intel_has_reset_engine(dev_priv)) value = 2; break; @@ -365,9 +370,18 @@ static int i915_getparam(struct drm_device *dev, void *data, value = i915_gem_mmap_gtt_version(); break; case I915_PARAM_HAS_SCHEDULER: - value = dev_priv->engine[RCS] && - dev_priv->engine[RCS]->schedule; + value = 0; + if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) { + value |= I915_SCHEDULER_CAP_ENABLED; + value |= I915_SCHEDULER_CAP_PRIORITY; + + if (INTEL_INFO(dev_priv)->has_logical_ring_preemption && + i915_modparams.enable_execlists && + !i915_modparams.enable_guc_submission) + value |= I915_SCHEDULER_CAP_PREEMPTION; + } break; + case I915_PARAM_MMAP_VERSION: /* Remember to bump this if the version changes! */ case I915_PARAM_HAS_GEM: @@ -872,6 +886,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, device_info->gen_mask = BIT(device_info->gen - 1); spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->shared_page_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); @@ -890,6 +905,14 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, if (ret < 0) goto err_engines; + if (IS_BROXTON(dev_priv)) { + struct intel_device_info *info = mkwrite_device_info(dev_priv); + + info->num_sprites[PIPE_A] = 2; + info->num_sprites[PIPE_B] = 2; + info->num_sprites[PIPE_C] = 1; + } + /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); @@ -974,6 +997,9 @@ static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) intel_teardown_mchbar(dev_priv); pci_iounmap(pdev, dev_priv->regs); + if (intel_vgpu_active(dev_priv) && dev_priv->shared_page) + pci_iounmap(pdev, dev_priv->shared_page); + } /** @@ -1001,6 +1027,21 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) intel_uncore_init(dev_priv); + if (intel_vgpu_active(dev_priv) && i915_modparams.enable_pvmmio) { + u32 bar = 0; + u32 mmio_size = 2 * 1024 * 1024; + + /* Map a share page from the end of 2M mmio region in bar0. */ + dev_priv->shared_page = (struct gvt_shared_page *) + pci_iomap_range(dev_priv->drm.pdev, bar, + mmio_size, PAGE_SIZE); + if (dev_priv->shared_page == NULL) { + ret = -EIO; + DRM_ERROR("ivi: failed to map share page.\n"); + goto err_uncore; + } + } + ret = intel_engines_init_mmio(dev_priv); if (ret) goto err_uncore; @@ -1010,6 +1051,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) return 0; err_uncore: + if (intel_vgpu_active(dev_priv) && dev_priv->shared_page) + pci_iounmap(dev_priv->drm.pdev, dev_priv->shared_page); intel_uncore_fini(dev_priv); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -1030,22 +1073,25 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) static void intel_sanitize_options(struct drm_i915_private *dev_priv) { - i915.enable_execlists = + i915_modparams.enable_execlists = intel_sanitize_enable_execlists(dev_priv, - i915.enable_execlists); + i915_modparams.enable_execlists); /* - * i915.enable_ppgtt is read-only, so do an early pass to validate the + * i915_modparams.enable_ppgtt is read-only, so do an early pass to validate the * user's requested state against the hardware/driver capabilities. We * do this now so that we can print out any log messages once rather * than every time we check intel_enable_ppgtt(). */ - i915.enable_ppgtt = - intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); + i915_modparams.enable_ppgtt = + intel_sanitize_enable_ppgtt(dev_priv, + i915_modparams.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); - i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); - DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); + i915_modparams.semaphores = + intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores); + DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", + yesno(i915_modparams.semaphores)); intel_uc_sanitize_options(dev_priv); @@ -1257,6 +1303,41 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_gem_shrinker_cleanup(dev_priv); } +#ifdef CONFIG_DRM_I915_LOAD_ASYNC_SUPPORT + +static int i915_load_finished; +static DECLARE_WAIT_QUEUE_HEAD(i915_load_queue); + +#include +struct drm_i915_load_para { + struct pci_dev *dev; + struct pci_device_id *ent; +}; + +static int drm_i915_load_fn(void *arg) +{ + struct drm_i915_load_para *para = (struct drm_i915_load_para *)arg; + struct pci_dev *dev = para->dev; + struct pci_device_id *ent = para->ent; + int ret = i915_driver_load(dev, ent); + i915_load_finished = 1; + wake_up(&i915_load_queue); + return ret; +} + +int i915_driver_load_async(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_i915_load_para *para; + para = (struct drm_i915_load_para *)kmalloc(sizeof(struct drm_i915_load_para), GFP_KERNEL); + if (para == NULL) + return ERR_PTR(-ENOMEM); + para->dev = pdev; + para->ent = ent; + kthread_run(drm_i915_load_fn, (void *)para, "drm_i915_load_thread"); + return 0; +} +#endif + /** * i915_driver_load - setup chip and create an initial config * @pdev: PCI device @@ -1276,7 +1357,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) int ret; /* Enable nuclear pageflip on ILK+ */ - if (!i915.nuclear_pageflip && match_info->gen < 5) + if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) driver.driver_features &= ~DRIVER_ATOMIC; ret = -ENOMEM; @@ -1340,7 +1421,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) intel_runtime_pm_enable(dev_priv); - dev_priv->ipc_enabled = false; + intel_init_ipc(dev_priv); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) DRM_INFO("DRM_I915_DEBUG enabled\n"); @@ -1435,7 +1516,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); int ret; - +#ifdef CONFIG_DRM_I915_LOAD_ASYNC_SUPPORT + wait_event_interruptible(i915_load_queue, i915_load_finished == 1); +#endif ret = i915_gem_open(i915, file); if (ret) return ret; @@ -1470,6 +1553,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); + kfree(file_priv->process_name); kfree(file_priv); } @@ -1693,6 +1777,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_guc_resume(dev_priv); intel_modeset_init_hw(dev); + intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) @@ -1805,6 +1890,8 @@ static int i915_drm_resume_early(struct drm_device *dev) if (IS_GEN9_LP(dev_priv) || !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) intel_power_domains_init_hw(dev_priv, true); + else + intel_display_set_init_power(dev_priv, true); i915_gem_sanitize(dev_priv); @@ -2591,6 +2678,8 @@ static int intel_runtime_resume(struct device *kdev) ret = vlv_resume_prepare(dev_priv, true); } + intel_uncore_runtime_resume(dev_priv); + /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). @@ -2608,6 +2697,8 @@ static int intel_runtime_resume(struct device *kdev) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); + intel_enable_ipc(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); if (ret) @@ -2738,6 +2829,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GVTBUFFER, i915_gem_gvtbuffer_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_ACCESS_USERDATA, i915_gem_access_userdata_ioctl, DRM_RENDER_ALLOW), }; static struct drm_driver driver = { @@ -2752,6 +2845,7 @@ static struct drm_driver driver = { .lastclose = i915_driver_lastclose, .postclose = i915_driver_postclose, + .gem_open_object = i915_gem_open_object, .gem_close_object = i915_gem_close_object, .gem_free_object_unlocked = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 18d9da53282b..7dd9df515c43 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -53,6 +53,7 @@ #include "i915_params.h" #include "i915_reg.h" +#include "i915_pvinfo.h" #include "i915_utils.h" #include "intel_uncore.h" @@ -93,7 +94,7 @@ #define I915_STATE_WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - if (!WARN(i915.verbose_state_checks, format)) \ + if (!WARN(i915_modparams.verbose_state_checks, format)) \ DRM_ERROR(format); \ unlikely(__ret_warn_on); \ }) @@ -576,6 +577,8 @@ struct i915_mmu_object; struct drm_i915_file_private { struct drm_i915_private *dev_priv; struct drm_file *file; + char *process_name; + struct pid *tgid; struct { spinlock_t lock; @@ -595,6 +598,8 @@ struct drm_i915_file_private { unsigned int bsd_engine; + struct bin_attribute *obj_attr; + /* Client can have a maximum of 3 contexts banned before * it is denied of creating new contexts. As one context * ban needs 4 consecutive hangs, and more if there is @@ -767,6 +772,7 @@ struct intel_csr { func(has_l3_dpf); \ func(has_llc); \ func(has_logical_ring_contexts); \ + func(has_logical_ring_preemption); \ func(has_overlay); \ func(has_pipe_cxsr); \ func(has_pooled_eu); \ @@ -780,7 +786,8 @@ struct intel_csr { func(cursor_needs_physical); \ func(hws_needs_physical); \ func(overlay_needs_physical); \ - func(supports_tv); + func(supports_tv); \ + func(has_ipc); struct sseu_dev_info { u8 slice_mask; @@ -842,6 +849,7 @@ struct intel_device_info { u8 gen; u16 gen_mask; enum intel_platform platform; + u8 gt; /* GT number, 0 if undefined */ u8 ring_mask; /* Rings supported by the HW */ u8 num_rings; #define DEFINE_FLAG(name) u8 name:1 @@ -956,6 +964,7 @@ struct i915_gpu_state { pid_t pid; u32 handle; u32 hw_id; + int priority; int ban_score; int active; int guilty; @@ -978,11 +987,13 @@ struct i915_gpu_state { long jiffies; pid_t pid; u32 context; + int priority; int ban_score; u32 seqno; u32 head; u32 tail; - } *requests, execlist[2]; + } *requests, execlist[EXECLIST_MAX_PORTS]; + unsigned int num_ports; struct drm_i915_error_waiter { char comm[TASK_COMM_LEN]; @@ -1324,14 +1335,6 @@ struct intel_gen6_power_mgmt { /* manual wa residency calculations */ struct intel_rps_ei ei; - - /* - * Protects RPS/RC6 register access and PCU communication. - * Must be taken after struct_mutex if nested. Note that - * this lock may be held for long periods of time when - * talking to hw - so only take it when talking to hw! - */ - struct mutex hw_lock; }; /* defined intel_pm.c */ @@ -1499,6 +1502,8 @@ struct i915_gem_mm { spinlock_t object_stat_lock; u64 object_memory; u32 object_count; + + size_t phys_mem_total; }; struct drm_i915_error_state_buf { @@ -2164,6 +2169,8 @@ struct drm_i915_private { const struct intel_device_info info; void __iomem *regs; + struct gvt_shared_page *shared_page; + spinlock_t shared_page_lock; struct intel_uncore uncore; @@ -2197,8 +2204,11 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; - struct i915_gem_context *kernel_context; struct intel_engine_cs *engine[I915_NUM_ENGINES]; + /* Context used internally to idle the GPU and setup initial state */ + struct i915_gem_context *kernel_context; + /* Context only to be used for injecting preemption commands */ + struct i915_gem_context *preempt_context; struct i915_vma *semaphore; struct drm_dma_handle *status_page_dmah; @@ -2235,6 +2245,8 @@ struct drm_i915_private { bool preserve_bios_swizzle; + struct kobject memtrack_kobj; + /* overlay */ struct intel_overlay *overlay; @@ -2351,6 +2363,14 @@ struct drm_i915_private { /* Cannot be determined by PCIID. You must always read a register. */ u32 edram_cap; + /* + * Protects RPS/RC6 register access and PCU communication. + * Must be taken after struct_mutex if nested. Note that + * this lock may be held for long periods of time when + * talking to hw - so only take it when talking to hw! + */ + struct mutex pcu_lock; + /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; @@ -2392,7 +2412,13 @@ struct drm_i915_private { * This is limited in execlists to 21 bits. */ struct ida hw_ida; -#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ +#ifdef CONFIG_DRM_I915_GVT + /* In case of virtualization, 3-bits of vgt-id will be added to hw_id */ +#define SIZE_CONTEXT_HW_ID (18) +#else +#define SIZE_CONTEXT_HW_ID (21) +#endif +#define MAX_CONTEXT_HW_ID (1<drm.pdev->revision) #define GEN_FOREVER (0) + +#define INTEL_GEN_MASK(s, e) ( \ + BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ + BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ + GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ + (s) != GEN_FOREVER ? (s) - 1 : 0) \ +) + /* * Returns true if Gen is in inclusive range [Start, End]. * * Use GEN_FOREVER for unbound start and or end. */ -#define IS_GEN(dev_priv, s, e) ({ \ - unsigned int __s = (s), __e = (e); \ - BUILD_BUG_ON(!__builtin_constant_p(s)); \ - BUILD_BUG_ON(!__builtin_constant_p(e)); \ - if ((__s) != GEN_FOREVER) \ - __s = (s) - 1; \ - if ((__e) == GEN_FOREVER) \ - __e = BITS_PER_LONG - 1; \ - else \ - __e = (e) - 1; \ - !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \ -}) +#define IS_GEN(dev_priv, s, e) \ + (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) /* * Return true if revision is in range [since,until] inclusive. @@ -3012,9 +3036,9 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ ((dev_priv)->info.has_logical_ring_contexts) -#define USES_PPGTT(dev_priv) (i915.enable_ppgtt) -#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) -#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) +#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) +#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) +#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ @@ -3065,6 +3089,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) +#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc) + /* * For now, anything with a GuC requires uCode loading, and then supports * command submission once loaded. But these are logically independent @@ -3172,6 +3198,8 @@ extern const struct dev_pm_ops i915_pm_ops; extern int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); +extern int i915_driver_load_async(struct pci_dev *pdev, + const struct pci_device_id *ent); extern void i915_driver_unload(struct drm_device *dev); extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); @@ -3210,7 +3238,7 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) { unsigned long delay; - if (unlikely(!i915.enable_hangcheck)) + if (unlikely(!i915_modparams.enable_hangcheck)) return; /* Don't continually defer the hangcheck so that it is always run at @@ -3238,7 +3266,7 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) return dev_priv->gvt; } -static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) +static inline bool intel_vgpu_active(const struct drm_i915_private *dev_priv) { return dev_priv->vgpu.active; } @@ -3356,6 +3384,11 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, const void *data, size_t size); void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); void i915_gem_free_object(struct drm_gem_object *obj); +int i915_gem_open_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv); +void i915_gem_close_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv); + static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) { @@ -3630,6 +3663,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); +int i915_gem_access_userdata_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); @@ -3697,6 +3733,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, void i915_oa_init_reg_state(struct intel_engine_cs *engine, struct i915_gem_context *ctx, uint32_t *reg_state); +int i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, @@ -3771,6 +3809,18 @@ u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, unsigned int tiling, unsigned int stride); +int i915_get_pid_cmdline(struct task_struct *task, char *buffer); +int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj); +void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj); +void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj); +int i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj, + unsigned long addr, bool is_map_gtt, + bool is_mutex_locked); +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev); +int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid); + /* i915_debugfs.c */ #ifdef CONFIG_DEBUG_FS int i915_debugfs_register(struct drm_i915_private *dev_priv); @@ -3788,11 +3838,16 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); +void i915_error_puts(struct drm_i915_error_state_buf *e, + const char *str); +bool i915_error_ok(struct drm_i915_error_state_buf *e); int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, const struct i915_gpu_state *gpu); int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, struct drm_i915_private *i915, size_t count, loff_t pos); +int i915_obj_state_buf_init(struct drm_i915_error_state_buf *eb, + size_t count); static inline void i915_error_state_buf_release( struct drm_i915_error_state_buf *eb) { @@ -3867,6 +3922,10 @@ extern int i915_restore_state(struct drm_i915_private *dev_priv); /* i915_sysfs.c */ void i915_setup_sysfs(struct drm_i915_private *dev_priv); void i915_teardown_sysfs(struct drm_i915_private *dev_priv); +int i915_gem_create_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file); +void i915_gem_remove_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file); /* intel_lpe_audio.c */ int intel_lpe_audio_init(struct drm_i915_private *dev_priv); @@ -3881,6 +3940,7 @@ extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, unsigned int pin); +extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter); extern struct i2c_adapter * intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); @@ -4105,7 +4165,11 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ i915_reg_t reg) \ { \ - return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ + if (!intel_vgpu_active(dev_priv) || !i915_modparams.enable_pvmmio || \ + likely(!in_mmio_read_trap_list((reg).reg))) \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ + dev_priv->shared_page->reg_addr = i915_mmio_reg_offset(reg); \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(vgtif_reg(pv_mmio))); \ } #define __raw_write(x, s) \ @@ -4333,11 +4397,12 @@ int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap); -static inline bool -intel_engine_can_store_dword(struct intel_engine_cs *engine) +static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { - return __intel_engine_can_store_dword(INTEL_GEN(engine->i915), - engine->class); + if (INTEL_GEN(i915) >= 10) + return CNL_HWS_CSB_WRITE_INDEX; + else + return I915_HWS_CSB_WRITE_INDEX; } #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dc1faa49687d..8f7db477b1e2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -44,8 +44,964 @@ #include #include #include +#include +#include +#include +#include "../drm_internal.h" static void i915_gem_flush_free_objects(struct drm_i915_private *i915); +static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); + +struct per_file_obj_mem_info { + int num_obj; + int num_obj_shared; + int num_obj_private; + int num_obj_gtt_bound; + int num_obj_purged; + int num_obj_purgeable; + int num_obj_allocated; + int num_obj_fault_mappable; + int num_obj_stolen; + size_t gtt_space_allocated_shared; + size_t gtt_space_allocated_priv; + size_t phys_space_allocated_shared; + size_t phys_space_allocated_priv; + size_t phys_space_purgeable; + size_t phys_space_shared_proportion; + size_t fault_mappable_size; + size_t stolen_space_allocated; + char *process_name; +}; + +struct name_entry { + struct list_head head; + struct drm_hash_item hash_item; +}; + +struct pid_stat_entry { + struct list_head head; + struct list_head namefree; + struct drm_open_hash namelist; + struct per_file_obj_mem_info stats; + struct pid *tgid; + int pid_num; +}; + +struct drm_i915_obj_virt_addr { + struct list_head head; + unsigned long user_virt_addr; +}; + +struct drm_i915_obj_pid_info { + struct list_head head; + pid_t tgid; + int open_handle_count; + struct list_head virt_addr_head; +}; + +struct get_obj_stats_buf { + struct pid_stat_entry *entry; + struct drm_i915_error_state_buf *m; +}; + +#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) +#define err_puts(e, s) i915_error_puts(e, s) + +static const char *get_tiling_flag(struct drm_i915_gem_object *obj) +{ + switch (i915_gem_object_get_tiling(obj)) { + default: + case I915_TILING_NONE: return " "; + case I915_TILING_X: return "X"; + case I915_TILING_Y: return "Y"; + } +} + +/* + * If this mmput call is the last one, it will tear down the mmaps of the + * process and calls drm_gem_vm_close(), which leads deadlock on i915 mutex. + * Instead, asynchronously schedule mmput function here, to avoid recursive + * calls to acquire i915_mutex. + */ +static void async_mmput_func(void *data, async_cookie_t cookie) +{ + struct mm_struct *mm = data; + mmput(mm); +} + +static void async_mmput(struct mm_struct *mm) +{ + async_schedule(async_mmput_func, mm); +} + +int i915_get_pid_cmdline(struct task_struct *task, char *buffer) +{ + int res = 0; + unsigned int len; + struct mm_struct *mm = get_task_mm(task); + + if (!mm) + goto out; + if (!mm->arg_end) + goto out_mm; + + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + + res = access_process_vm(task, mm->arg_start, buffer, len, 0); + if (res < 0) { + async_mmput(mm); + return res; + } + + if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) + buffer[res-1] = '\0'; +out_mm: + async_mmput(mm); +out: + return 0; +} + +static int i915_obj_get_shmem_pages_alloced(struct drm_i915_gem_object *obj) +{ + int ret; + + if (obj->base.filp) { + struct inode *inode = file_inode(obj->base.filp); + struct shmem_inode_info *info = SHMEM_I(inode); + + if (!inode) + return 0; + spin_lock(&info->lock); + ret = inode->i_mapping->nrpages; + spin_unlock(&info->lock); + return ret; + } + return 0; +} + +int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj) +{ + int found = 0; + struct drm_i915_obj_pid_info *entry; + pid_t current_tgid = task_tgid_nr(current); + + if (!i915_modparams.memtrack_debug) + return 0; + + mutex_lock(&obj->base.dev->struct_mutex); + + list_for_each_entry(entry, &obj->pid_info, head) { + if (entry->tgid == current_tgid) { + entry->open_handle_count++; + found = 1; + break; + } + } + if (found == 0) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (entry == NULL) { + DRM_ERROR("alloc failed\n"); + mutex_unlock(&obj->base.dev->struct_mutex); + return -ENOMEM; + } + entry->tgid = current_tgid; + entry->open_handle_count = 1; + INIT_LIST_HEAD(&entry->virt_addr_head); + list_add_tail(&entry->head, &obj->pid_info); + } + + mutex_unlock(&obj->base.dev->struct_mutex); + return 0; +} + +void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj) +{ + pid_t current_tgid = task_tgid_nr(current); + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + int found = 0; + + if (!i915_modparams.memtrack_debug) + return; + + mutex_lock(&obj->base.dev->struct_mutex); + + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + if (pid_entry->tgid == current_tgid) { + pid_entry->open_handle_count--; + found = 1; + if (pid_entry->open_handle_count == 0) { + list_for_each_entry_safe(virt_entry, + virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } + break; + } + } + mutex_unlock(&obj->base.dev->struct_mutex); + + if (found == 0) + DRM_DEBUG("Couldn't find matching tgid %d for obj %p\n", + current_tgid, obj); +} + +void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj) +{ + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + list_for_each_entry_safe(virt_entry, + virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } +} + + int +i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj, + unsigned long addr, + bool is_map_gtt, + bool is_mutex_locked) +{ + struct drm_i915_obj_pid_info *pid_entry; + pid_t current_tgid = task_tgid_nr(current); + int ret = 0, found = 0; + + if (!i915_modparams.memtrack_debug) + return 0; + + if (is_map_gtt) + addr |= 1; + + if (!is_mutex_locked) { + ret = i915_mutex_lock_interruptible(obj->base.dev); + if (ret) + return ret; + } + + list_for_each_entry(pid_entry, &obj->pid_info, head) { + if (pid_entry->tgid == current_tgid) { + struct drm_i915_obj_virt_addr *virt_entry, *new_entry; + + list_for_each_entry(virt_entry, + &pid_entry->virt_addr_head, + head) { + if (virt_entry->user_virt_addr == addr) { + found = 1; + break; + } + } + if (found) + break; + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (new_entry == NULL) { + DRM_ERROR("alloc failed\n"); + ret = -ENOMEM; + goto out; + } + new_entry->user_virt_addr = addr; + list_add_tail(&new_entry->head, + &pid_entry->virt_addr_head); + break; + } + } + +out: + if (!is_mutex_locked) + mutex_unlock(&obj->base.dev->struct_mutex); + + return ret; +} + +static int i915_obj_virt_addr_is_invalid(struct drm_gem_object *obj, + struct pid *tgid, unsigned long addr) +{ + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + int locked, ret = 0; + + task = get_pid_task(tgid, PIDTYPE_PID); + if (task == NULL) { + DRM_DEBUG("null task for tgid=%d\n", pid_nr(tgid)); + return -EINVAL; + } + + mm = get_task_mm(task); + if (mm == NULL) { + DRM_DEBUG("null mm for tgid=%d\n", pid_nr(tgid)); + ret = -EINVAL; + goto out_task; + } + + locked = down_read_trylock(&mm->mmap_sem); + if (!locked) + goto out_mm; + + vma = find_vma(mm, addr); + if (vma) { + if (addr & 1) { /* mmap_gtt case */ + if (vma->vm_pgoff*PAGE_SIZE == (unsigned long) + drm_vma_node_offset_addr(&obj->vma_node)) + ret = 0; + else + ret = -EINVAL; + } else { /* mmap case */ + if (vma->vm_file == obj->filp) + ret = 0; + else + ret = -EINVAL; + } + } else + ret = -EINVAL; + + up_read(&mm->mmap_sem); + +out_mm: + async_mmput(mm); +out_task: + put_task_struct(task); + return ret; +} + +static void i915_obj_pidarray_validate(struct drm_gem_object *gem_obj) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + struct drm_device *dev = gem_obj->dev; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_file *file; + struct drm_i915_file_private *file_priv; + struct pid *tgid; + int pid_num, present; + + /* + * Run a sanity check on pid_array. All entries in pid_array should + * be subset of the the drm filelist pid entries. + */ + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + if (pid_next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + break; + } + + present = 0; + list_for_each_entry(file, &dev->filelist, lhead) { + file_priv = file->driver_priv; + tgid = file_priv->tgid; + pid_num = pid_nr(tgid); + + if (pid_num == pid_entry->tgid) { + present = 1; + break; + } + } + if (present == 0) { + DRM_DEBUG("stale_tgid=%d\n", pid_entry->tgid); + list_for_each_entry_safe(virt_entry, virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } else { + /* Validate the virtual address list */ + struct task_struct *task = + get_pid_task(tgid, PIDTYPE_PID); + if (task == NULL) + continue; + + list_for_each_entry_safe(virt_entry, virt_next, + &pid_entry->virt_addr_head, + head) { + if (i915_obj_virt_addr_is_invalid(gem_obj, tgid, + virt_entry->user_virt_addr)) { + DRM_DEBUG("stale_addr=%ld\n", + virt_entry->user_virt_addr); + list_del(&virt_entry->head); + kfree(virt_entry); + } + } + put_task_struct(task); + } + } +} + +static int i915_obj_find_insert_in_hash(struct drm_i915_gem_object *obj, + struct pid_stat_entry *pid_entry, + bool *found) +{ + struct drm_hash_item *hash_item; + int ret; + + ret = drm_ht_find_item(&pid_entry->namelist, + (unsigned long)&obj->base, &hash_item); + /* Not found, insert in hash */ + if (ret) { + struct name_entry *entry = + kzalloc(sizeof(*entry), GFP_NOWAIT); + if (entry == NULL) { + DRM_ERROR("alloc failed\n"); + return -ENOMEM; + } + entry->hash_item.key = (unsigned long)&obj->base; + drm_ht_insert_item(&pid_entry->namelist, + &entry->hash_item); + list_add_tail(&entry->head, &pid_entry->namefree); + *found = false; + } else + *found = true; + + return 0; +} + +static int i915_obj_shared_count(struct drm_i915_gem_object *obj, + struct pid_stat_entry *pid_entry, + bool *discard) +{ + struct drm_i915_obj_pid_info *pid_info_entry; + int ret, obj_shared_count = 0; + + /* + * The object can be shared among different processes by either flink + * or dma-buf mechanism, leading to shared count more than 1. For the + * objects not shared , return the shared count as 1. + * In case of shared dma-buf objects, there's a possibility that these + * may be external to i915. Detect this condition through + * 'import_attach' field. + */ + if (!obj->base.name && !obj->base.dma_buf) + return 1; + else if(obj->base.import_attach) { + /* not our GEM obj */ + *discard = true; + return 0; + } + + ret = i915_obj_find_insert_in_hash(obj, pid_entry, discard); + if (ret) + return ret; + + list_for_each_entry(pid_info_entry, &obj->pid_info, head) + obj_shared_count++; + + if (WARN_ON(obj_shared_count == 0)) + return -EINVAL; + + return obj_shared_count; +} + + static int +i915_describe_obj(struct get_obj_stats_buf *obj_stat_buf, + struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + struct drm_i915_obj_pid_info *pid_info_entry; + struct drm_i915_obj_virt_addr *virt_entry; + struct drm_i915_error_state_buf *m = obj_stat_buf->m; + struct pid_stat_entry *pid_entry = obj_stat_buf->entry; + struct per_file_obj_mem_info *stats = &pid_entry->stats; + int obj_shared_count = 0; + + bool discard = false; + + obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard); + if (obj_shared_count < 0) + return obj_shared_count; + + if (!discard && !obj->stolen && + (obj->mm.madv != __I915_MADV_PURGED) && + (i915_obj_get_shmem_pages_alloced(obj) != 0)) { + if (obj_shared_count > 1) + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + else + stats->phys_space_allocated_priv += + obj->base.size; + } + + err_printf(m, + "%p: %7zdK %s %s %s %s %s %s ", + &obj->base, + obj->base.size / 1024, + get_tiling_flag(obj), + obj->mm.dirty ? "Y" : "N", + (obj_shared_count > 1) ? "Y" : "N", + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", + (obj->pin_display) ? "Y" : "N"); + + if (obj->mm.madv == __I915_MADV_PURGED) + err_puts(m, " purged "); + else if (obj->mm.madv == I915_MADV_DONTNEED) + err_puts(m, " purgeable "); + else if (i915_obj_get_shmem_pages_alloced(obj) != 0) + err_puts(m, " allocated "); + else + err_puts(m, " "); + + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!i915_is_ggtt(vma->vm)) + err_puts(m, " PP "); + else + err_puts(m, " G "); + err_printf(m, " %08llx ", vma->node.start); + } + if (list_empty(&obj->vma_list)) + err_puts(m, " "); + + list_for_each_entry(pid_info_entry, &obj->pid_info, head) { + err_printf(m, " (%d: %d:", + pid_info_entry->tgid, + pid_info_entry->open_handle_count); + list_for_each_entry(virt_entry, + &pid_info_entry->virt_addr_head, head) { + if (virt_entry->user_virt_addr & 1) + err_printf(m, " %p", + (void *)(virt_entry->user_virt_addr & ~1)); + else + err_printf(m, " %p*", + (void *)virt_entry->user_virt_addr); + } + err_puts(m, ") "); + } + + err_puts(m, "\n"); + + if (m->bytes == 0 && m->err) + return m->err; + + return 0; +} + + static int +i915_drm_gem_obj_info(int id, void *ptr, void *data) +{ + struct drm_i915_gem_object *obj = ptr; + struct get_obj_stats_buf *obj_stat_buf = data; + int ret; + + if (obj->pid_info.next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + return 0; + } + i915_obj_pidarray_validate(&obj->base); + ret = i915_describe_obj(obj_stat_buf, obj); + + return ret; +} + +bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &o->vma_list, obj_link) + if (drm_mm_node_allocated(&vma->node)) + return true; + + return false; +} + + static int +i915_drm_gem_object_per_file_summary(int id, void *ptr, void *data) +{ + struct pid_stat_entry *pid_entry = data; + struct drm_i915_gem_object *obj = ptr; + struct per_file_obj_mem_info *stats = &pid_entry->stats; + int obj_shared_count = 0; + bool discard = false; + + if (obj->pid_info.next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + return 0; + } + + i915_obj_pidarray_validate(&obj->base); + + stats->num_obj++; + + obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard); + if (obj_shared_count < 0) + return obj_shared_count; + + if (discard) + return 0; + + if (obj_shared_count > 1) + stats->num_obj_shared++; + else + stats->num_obj_private++; + + if (i915_gem_obj_bound_any(obj)) { + stats->num_obj_gtt_bound++; + if (obj_shared_count > 1) + stats->gtt_space_allocated_shared += obj->base.size; + else + stats->gtt_space_allocated_priv += obj->base.size; + } + + if (obj->stolen) { + stats->num_obj_stolen++; + stats->stolen_space_allocated += obj->base.size; + } else if (obj->mm.madv == __I915_MADV_PURGED) { + stats->num_obj_purged++; + } else if (obj->mm.madv == I915_MADV_DONTNEED) { + stats->num_obj_purgeable++; + stats->num_obj_allocated++; + if (i915_obj_get_shmem_pages_alloced(obj) != 0) { + stats->phys_space_purgeable += obj->base.size; + if (obj_shared_count > 1) { + stats->phys_space_allocated_shared += + obj->base.size; + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + } else + stats->phys_space_allocated_priv += + obj->base.size; + } else + WARN_ON(1); + } else if (i915_obj_get_shmem_pages_alloced(obj) != 0) { + stats->num_obj_allocated++; + if (obj_shared_count > 1) { + stats->phys_space_allocated_shared += + obj->base.size; + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + } + else + stats->phys_space_allocated_priv += obj->base.size; + } + return 0; +} + + static int +__i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev) +{ + struct drm_file *file; + struct drm_i915_private *dev_priv = dev->dev_private; + + struct name_entry *entry, *next; + struct pid_stat_entry *pid_entry, *temp_entry; + struct pid_stat_entry *new_pid_entry, *new_temp_entry; + struct list_head per_pid_stats, sorted_pid_stats; + int ret = 0; + size_t total_shared_prop_space = 0, total_priv_space = 0; + + INIT_LIST_HEAD(&per_pid_stats); + INIT_LIST_HEAD(&sorted_pid_stats); + + err_puts(m, + "\n\n pid Total Shared Priv Purgeable Alloced SharedPHYsize SharedPHYprop PrivPHYsize PurgeablePHYsize process\n"); + + list_for_each_entry(file, &dev->filelist, lhead) { + struct pid *tgid; + struct drm_i915_file_private *file_priv = file->driver_priv; + int pid_num, found = 0; + + tgid = file_priv->tgid; + pid_num = pid_nr(tgid); + + list_for_each_entry(pid_entry, &per_pid_stats, head) { + if (pid_entry->pid_num == pid_num) { + found = 1; + break; + } + } + + if (!found) { + struct pid_stat_entry *new_entry = + kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (new_entry == NULL) { + DRM_ERROR("alloc failed\n"); + ret = -ENOMEM; + break; + } + new_entry->tgid = tgid; + new_entry->pid_num = pid_num; + ret = drm_ht_create(&new_entry->namelist, + DRM_MAGIC_HASH_ORDER); + if (ret) { + kfree(new_entry); + break; + } + + list_add_tail(&new_entry->head, &per_pid_stats); + INIT_LIST_HEAD(&new_entry->namefree); + new_entry->stats.process_name = file_priv->process_name; + pid_entry = new_entry; + } + + spin_lock(&file->table_lock); + ret = idr_for_each(&file->object_idr, + &i915_drm_gem_object_per_file_summary, pid_entry); + spin_unlock(&file->table_lock); + if (ret) + break; + } + + list_for_each_entry_safe(pid_entry, temp_entry, &per_pid_stats, head) { + if (list_empty(&sorted_pid_stats)) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, &sorted_pid_stats); + continue; + } + + list_for_each_entry_safe(new_pid_entry, new_temp_entry, + &sorted_pid_stats, head) { + int prev_space = + pid_entry->stats.phys_space_shared_proportion + + pid_entry->stats.phys_space_allocated_priv; + int new_space = + new_pid_entry-> + stats.phys_space_shared_proportion + + new_pid_entry->stats.phys_space_allocated_priv; + if (prev_space > new_space) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, + &new_pid_entry->head); + break; + } + if (list_is_last(&new_pid_entry->head, + &sorted_pid_stats)) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, + &sorted_pid_stats); + } + } + } + + list_for_each_entry_safe(pid_entry, temp_entry, + &sorted_pid_stats, head) { + struct task_struct *task = get_pid_task(pid_entry->tgid, + PIDTYPE_PID); + err_printf(m, + "%5d %6d %6d %6d %9d %8d %14zdK %14zdK %14zdK %14zdK %s", + pid_entry->pid_num, + pid_entry->stats.num_obj, + pid_entry->stats.num_obj_shared, + pid_entry->stats.num_obj_private, + pid_entry->stats.num_obj_purgeable, + pid_entry->stats.num_obj_allocated, + pid_entry->stats.phys_space_allocated_shared/1024, + pid_entry->stats.phys_space_shared_proportion/1024, + pid_entry->stats.phys_space_allocated_priv/1024, + pid_entry->stats.phys_space_purgeable/1024, + pid_entry->stats.process_name); + + if (task == NULL) + err_puts(m, "*\n"); + else + err_puts(m, "\n"); + + total_shared_prop_space += + pid_entry->stats.phys_space_shared_proportion/1024; + total_priv_space += + pid_entry->stats.phys_space_allocated_priv/1024; + list_del(&pid_entry->head); + + list_for_each_entry_safe(entry, next, + &pid_entry->namefree, head) { + list_del(&entry->head); + drm_ht_remove_item(&pid_entry->namelist, + &entry->hash_item); + kfree(entry); + } + drm_ht_remove(&pid_entry->namelist); + kfree(pid_entry); + if (task) + put_task_struct(task); + } + + err_puts(m, + "\t\t\t\t\t\t\t\t--------------\t-------------\t--------\n"); + err_printf(m, + "\t\t\t\t\t\t\t\t%13zdK\t%12zdK\tTotal\n", + total_shared_prop_space, total_priv_space); + + err_printf(m, "\nTotal used GFX Shmem Physical space %8zdK\n", + dev_priv->mm.phys_mem_total/1024); + + if (ret) + return ret; + if (m->bytes == 0 && m->err) + return m->err; + + return 0; +} + +#define NUM_SPACES 100 +#define INITIAL_SPACES_STR(x) #x +#define SPACES_STR(x) INITIAL_SPACES_STR(x) + + static int +__i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid) +{ + struct drm_file *file; + struct drm_i915_file_private *file_priv_reqd = NULL; + int bytes_copy, ret = 0; + struct pid_stat_entry pid_entry; + struct name_entry *entry, *next; + + pid_entry.stats.phys_space_shared_proportion = 0; + pid_entry.stats.phys_space_allocated_priv = 0; + pid_entry.tgid = tgid; + pid_entry.pid_num = pid_nr(tgid); + ret = drm_ht_create(&pid_entry.namelist, DRM_MAGIC_HASH_ORDER); + if (ret) + return ret; + + INIT_LIST_HEAD(&pid_entry.namefree); + + /* + * Fill up initial few bytes with spaces, to insert summary data later + * on + */ + err_printf(m, "%"SPACES_STR(NUM_SPACES)"s\n", " "); + + list_for_each_entry(file, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv = file->driver_priv; + struct get_obj_stats_buf obj_stat_buf; + + obj_stat_buf.entry = &pid_entry; + obj_stat_buf.m = m; + + if (file_priv->tgid != tgid) + continue; + + file_priv_reqd = file_priv; + err_puts(m, + "\n Obj Identifier Size Tiling Dirty Shared Vmap Stolen Mappable AllocState Global/PP GttOffset (PID: handle count: user virt addrs)\n"); + spin_lock(&file->table_lock); + ret = idr_for_each(&file->object_idr, + &i915_drm_gem_obj_info, &obj_stat_buf); + spin_unlock(&file->table_lock); + if (ret) + break; + } + + if (file_priv_reqd) { + int space_remaining; + + /* Reset the bytes counter to buffer beginning */ + bytes_copy = m->bytes; + m->bytes = 0; + + err_printf(m, "\n PID GfxMem Process\n"); + err_printf(m, "%5d %8zdK ", pid_nr(file_priv_reqd->tgid), + (pid_entry.stats.phys_space_shared_proportion + + pid_entry.stats.phys_space_allocated_priv)/1024); + + space_remaining = NUM_SPACES - m->bytes - 1; + if (strlen(file_priv_reqd->process_name) > space_remaining) + file_priv_reqd->process_name[space_remaining] = '\0'; + + err_printf(m, "%s\n", file_priv_reqd->process_name); + + /* Reinstate the previous saved value of bytes counter */ + m->bytes = bytes_copy; + } else + WARN(1, "drm file corresponding to tgid:%d not found\n", + pid_nr(tgid)); + + list_for_each_entry_safe(entry, next, + &pid_entry.namefree, head) { + list_del(&entry->head); + drm_ht_remove_item(&pid_entry.namelist, + &entry->hash_item); + kfree(entry); + } + drm_ht_remove(&pid_entry.namelist); + + if (ret) + return ret; + if (m->bytes == 0 && m->err) + return m->err; + return 0; +} + +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev) +{ + int ret = 0; + + /* + * Protect the access to global drm resources such as filelist. Protect + * against their removal under our noses, while in use. + */ + mutex_lock(&drm_global_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + mutex_unlock(&drm_global_mutex); + return ret; + } + + ret = __i915_get_drm_clients_info(m, dev); + + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&drm_global_mutex); + + return ret; +} + +int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid) +{ + int ret = 0; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = __i915_gem_get_obj_info(m, dev, tgid); + + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static bool cpu_cache_is_coherent(struct drm_device *dev, + enum i915_cache_level level) +{ + return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE; +} static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { @@ -325,17 +1281,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) * must wait for all rendering to complete to the object (as unbinding * must anyway), and retire the requests. */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + ret = i915_gem_object_set_to_cpu_domain(obj, false); if (ret) return ret; - i915_gem_retire_requests(to_i915(obj->base.dev)); - while ((vma = list_first_entry_or_null(&obj->vma_list, struct i915_vma, obj_link))) { @@ -1675,6 +2624,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_mmap *args = data; struct drm_i915_gem_object *obj; unsigned long addr; + int ret; if (args->flags & ~(I915_MMAP_WC)) return -EINVAL; @@ -1720,6 +2670,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (IS_ERR((void *)addr)) return addr; + ret = i915_obj_insert_virt_addr(to_intel_bo(obj), addr, false, false); + if (ret) + return ret; + args->addr_ptr = (uint64_t) addr; return 0; @@ -1917,6 +2871,7 @@ int i915_gem_fault(struct vm_fault *vmf) (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, min_t(u64, vma->size, area->vm_end - area->vm_start), &ggtt->mappable); + ret = i915_obj_insert_virt_addr(obj, (unsigned long)area->vm_start, true, true); err_unpin: __i915_vma_unpin(vma); @@ -2156,6 +3111,17 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; obj->mm.pages = ERR_PTR(-EFAULT); + + /* + * Mark the object as not having backing pages, as physical space + * returned back to kernel + */ + if (obj->has_backing_pages == 1) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total -= obj->base.size; + obj->has_backing_pages = 0; + } } /* Try to discard unwanted pages */ @@ -2432,6 +3398,13 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); + if (obj->has_backing_pages == 0) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total += obj->base.size; + obj->has_backing_pages = 1; + } + return st; err_sg: @@ -2779,13 +3752,15 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) static bool engine_stalled(struct intel_engine_cs *engine) { - if (!engine->hangcheck.stalled) - return false; + if (!intel_vgpu_active(engine->i915)) { + if (!engine->hangcheck.stalled) + return false; - /* Check for possible seqno movement after hang declaration */ - if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { - DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); - return false; + /* Check for possible seqno movement after hang declaration */ + if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { + DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); + return false; + } } return true; @@ -2819,8 +3794,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) * Turning off the engine->irq_tasklet until the reset is over * prevents the race. */ - tasklet_kill(&engine->irq_tasklet); - tasklet_disable(&engine->irq_tasklet); + tasklet_kill(&engine->execlists.irq_tasklet); + tasklet_disable(&engine->execlists.irq_tasklet); if (engine->irq_seqno_barrier) engine->irq_seqno_barrier(engine); @@ -2999,7 +3974,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) { - tasklet_enable(&engine->irq_tasklet); + tasklet_enable(&engine->execlists.irq_tasklet); kthread_unpark(engine->breadcrumbs.signaler); } @@ -3031,9 +4006,6 @@ static void nop_submit_request(struct drm_i915_gem_request *request) static void engine_set_wedged(struct intel_engine_cs *engine) { - struct drm_i915_gem_request *request; - unsigned long flags; - /* We need to be sure that no thread is running the old callback as * we install the nop handler (otherwise we would submit a request * to hardware that will never complete). In order to prevent this @@ -3043,40 +4015,7 @@ static void engine_set_wedged(struct intel_engine_cs *engine) engine->submit_request = nop_submit_request; /* Mark all executing requests as skipped */ - spin_lock_irqsave(&engine->timeline->lock, flags); - list_for_each_entry(request, &engine->timeline->requests, link) - if (!i915_gem_request_completed(request)) - dma_fence_set_error(&request->fence, -EIO); - spin_unlock_irqrestore(&engine->timeline->lock, flags); - - /* - * Clear the execlists queue up before freeing the requests, as those - * are the ones that keep the context and ringbuffer backing objects - * pinned in place. - */ - - if (i915.enable_execlists) { - struct execlist_port *port = engine->execlist_port; - unsigned long flags; - unsigned int n; - - spin_lock_irqsave(&engine->timeline->lock, flags); - - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) - i915_gem_request_put(port_request(&port[n])); - memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); - engine->execlist_queue = RB_ROOT; - engine->execlist_first = NULL; - - spin_unlock_irqrestore(&engine->timeline->lock, flags); - - /* The port is checked prior to scheduling a tasklet, but - * just in case we have suspended the tasklet to do the - * wedging make sure that when it wakes, it decides there - * is no work to do by clearing the irq_posted bit. - */ - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - } + engine->cancel_requests(engine); /* Mark all pending requests as complete so that any concurrent * (lockless) lookup doesn't try and wait upon the request as we @@ -4295,6 +5234,13 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); + /* + * Mark the object as not having backing pages, as no allocation + * for it yet + */ + obj->has_backing_pages = 0; + INIT_LIST_HEAD(&obj->pid_info); + i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); } @@ -4404,6 +5350,26 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj) return atomic_long_read(&obj->base.filp->f_count) == 1; } +int +i915_gem_open_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + return i915_gem_obj_insert_pid(obj); +} + +#if 0 +void +i915_gem_close_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + i915_gem_obj_remove_pid(obj); +} +#endif + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { @@ -4448,6 +5414,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (obj->base.import_attach) drm_prime_gem_destroy(&obj->base, NULL); + if (!obj->stolen && (obj->has_backing_pages == 1)) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total -= obj->base.size; + obj->has_backing_pages = 0; + } + i915_gem_obj_remove_all_pids(obj); + reservation_object_fini(&obj->__builtin_resv); drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(i915, obj->base.size); @@ -4551,7 +5525,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * it may impact the display and we are uncertain about the stability * of the reset, so this could be applied to even earlier gen. */ - if (INTEL_GEN(i915) >= 5) { + if (INTEL_GEN(i915) >= 5 && !intel_vgpu_active(i915)) { int reset = intel_gpu_reset(i915, ALL_ENGINES); WARN_ON(reset && reset != -ENODEV); } @@ -4786,7 +5760,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) return false; /* TODO: make semaphores and Execlists play nicely together */ - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return false; if (value >= 0) @@ -4807,7 +5781,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { dev_priv->gt.resume = intel_legacy_submission_resume; dev_priv->gt.cleanup_engine = intel_engine_cleanup; } else { @@ -5053,6 +6027,9 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_request *request; + i915_gem_remove_sysfs_file_entry(dev, file); + put_pid(file_priv->tgid); + /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. @@ -5068,8 +6045,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) struct drm_i915_file_private *file_priv; int ret; - DRM_DEBUG("\n"); - file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) return -ENOMEM; @@ -5078,14 +6053,47 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) file_priv->dev_priv = i915; file_priv->file = file; + rcu_read_lock(); + file_priv->tgid = get_pid(find_vpid(task_tgid_nr(current))); + rcu_read_unlock(); + + file_priv->process_name = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!file_priv->process_name) { + ret = -ENOMEM; + goto out_free_file; + } + + ret = i915_get_pid_cmdline(current, file_priv->process_name); + if (ret) + goto out_free_name; + spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); file_priv->bsd_engine = -1; + intel_runtime_pm_get(i915); ret = i915_gem_context_open(i915, file); - if (ret) - kfree(file_priv); + + if (ret) { + intel_runtime_pm_put(i915); + goto out_free_name; + } + intel_runtime_pm_put(i915); + + ret = i915_gem_create_sysfs_file_entry(&i915->drm, file); + if (ret) { + i915_gem_context_close(file); + goto out_free_name; + } + + return 0; + +out_free_name: + kfree(file_priv->process_name); +out_free_file: + put_pid(file_priv->tgid); + kfree(file_priv); return ret; } @@ -5174,6 +6182,39 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, return ERR_PTR(err); } +/** + * i915_gem_access_userdata_ioctl -Reads/writes userdata for the object + * @dev: DRM device + * @data: struct drm_i915_gem_access_userdata + * @file: GEM object info + * + * Set/Get 32-bit private user defined data stored with a given GEM object. + */ +int +i915_gem_access_userdata_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_access_userdata *args = data; + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + mutex_lock(&dev->struct_mutex); + + if (args->write) + obj->userdata = args->userdata; + else + args->userdata = obj->userdata; + + mutex_unlock(&dev->struct_mutex); + + i915_gem_object_put(obj); + + return 0; +} + struct scatterlist * i915_gem_object_get_sg(struct drm_i915_gem_object *obj, unsigned int n, diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8afd2ce59b8d..1895687ca16d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -107,14 +107,9 @@ static void lut_close(struct i915_gem_context *ctx) rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); - struct drm_i915_gem_object *obj = vma->obj; radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); - - if (!i915_vma_is_ggtt(vma)) - i915_vma_close(vma); - - __i915_gem_object_release_unless_active(obj); + __i915_gem_object_release_unless_active(vma->obj); } rcu_read_unlock(); } @@ -146,7 +141,11 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); - ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); + if (intel_vgpu_active(ctx->i915)) + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id & ~(0x7 << SIZE_CONTEXT_HW_ID)); + else + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); + kfree_rcu(ctx, rcu); } @@ -200,6 +199,11 @@ static void context_close(struct i915_gem_context *ctx) { i915_gem_context_set_closed(ctx); + /* + * The LUT uses the VMA as a backpointer to unref the object, + * so we need to clear the LUT before we close all the VMA (inside + * the ppgtt). + */ lut_close(ctx); if (ctx->ppgtt) i915_ppgtt_close(&ctx->ppgtt->base); @@ -316,7 +320,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, * present or not in use we still need a small bias as ring wraparound * at offset 0 sometimes hangs. No idea why. */ - if (HAS_GUC(dev_priv) && i915.enable_guc_loading) + if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) ctx->ggtt_offset_bias = GUC_WOPCM_TOP; else ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; @@ -409,7 +413,7 @@ i915_gem_context_create_gvt(struct drm_device *dev) i915_gem_context_set_closed(ctx); /* not user accessible */ i915_gem_context_clear_bannable(ctx); i915_gem_context_set_force_single_submission(ctx); - if (!i915.enable_guc_submission) + if (!i915_modparams.enable_guc_submission) ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); @@ -418,14 +422,43 @@ i915_gem_context_create_gvt(struct drm_device *dev) return ctx; } +static struct i915_gem_context * +create_kernel_context(struct drm_i915_private *i915, int prio) +{ + struct i915_gem_context *ctx; + + ctx = i915_gem_create_context(i915, NULL); + if (IS_ERR(ctx)) + return ctx; + + i915_gem_context_clear_bannable(ctx); + ctx->priority = prio; + ctx->ring_size = PAGE_SIZE; + + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + + return ctx; +} + +static void +destroy_kernel_context(struct i915_gem_context **ctxp) +{ + struct i915_gem_context *ctx; + + /* Keep the context ref so that we can free it immediately ourselves */ + ctx = i915_gem_context_get(fetch_and_zero(ctxp)); + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + + context_close(ctx); + i915_gem_context_free(ctx); +} + int i915_gem_contexts_init(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; + int err; - /* Init should only be called once per module load. Eventually the - * restriction on the context_disabled check can be loosened. */ - if (WARN_ON(dev_priv->kernel_context)) - return 0; + GEM_BUG_ON(dev_priv->kernel_context); INIT_LIST_HEAD(&dev_priv->contexts.list); INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); @@ -433,7 +466,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) if (intel_vgpu_active(dev_priv) && HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); return -EINVAL; } @@ -443,28 +476,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); ida_init(&dev_priv->contexts.hw_ida); - ctx = i915_gem_create_context(dev_priv, NULL); + /* lowest priority; idle task */ + ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN); if (IS_ERR(ctx)) { - DRM_ERROR("Failed to create default global context (error %ld)\n", - PTR_ERR(ctx)); - return PTR_ERR(ctx); + DRM_ERROR("Failed to create default global context\n"); + err = PTR_ERR(ctx); + goto err; } - - /* For easy recognisablity, we want the kernel context to be 0 and then + /* + * For easy recognisablity, we want the kernel context to be 0 and then * all user contexts will have non-zero hw_id. */ GEM_BUG_ON(ctx->hw_id); - - i915_gem_context_clear_bannable(ctx); - ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */ dev_priv->kernel_context = ctx; - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + /* highest priority; preempting task */ + ctx = create_kernel_context(dev_priv, INT_MAX); + if (IS_ERR(ctx)) { + DRM_ERROR("Failed to create default preempt context\n"); + err = PTR_ERR(ctx); + goto err_kernel_context; + } + dev_priv->preempt_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->engine[RCS]->context_size ? "logical" : "fake"); return 0; + +err_kernel_context: + destroy_kernel_context(&dev_priv->kernel_context); +err: + return err; } void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) @@ -485,7 +528,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) } /* Force the GPU state to be restored on enabling */ - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { struct i915_gem_context *ctx; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { @@ -509,15 +552,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) void i915_gem_contexts_fini(struct drm_i915_private *i915) { - struct i915_gem_context *ctx; - lockdep_assert_held(&i915->drm.struct_mutex); - /* Keep the context so that we can free it immediately ourselves */ - ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context)); - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); - context_close(ctx); - i915_gem_context_free(ctx); + destroy_kernel_context(&i915->preempt_context); + destroy_kernel_context(&i915->kernel_context); /* Must free all deferred contexts (via flush_workqueue) first */ ida_destroy(&i915->contexts.hw_ida); @@ -570,7 +608,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags) enum intel_engine_id id; const int num_rings = /* Use an extended w/a on gen7 if signalling from other rings */ - (i915.semaphores && INTEL_GEN(dev_priv) == 7) ? + (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ? INTEL_INFO(dev_priv)->num_rings - 1 : 0; int len; @@ -839,7 +877,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; lockdep_assert_held(&req->i915->drm.struct_mutex); - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return 0; if (!req->ctx->engine[engine->id].state) { @@ -1038,6 +1076,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_BANNABLE: args->value = i915_gem_context_is_bannable(ctx); break; + case I915_CONTEXT_PARAM_PRIORITY: + args->value = ctx->priority; + break; default: ret = -EINVAL; break; @@ -1093,6 +1134,26 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, else i915_gem_context_clear_bannable(ctx); break; + + case I915_CONTEXT_PARAM_PRIORITY: + { + int priority = args->value; + + if (args->size) + ret = -EINVAL; + else if (!to_i915(dev)->engine[RCS]->schedule) + ret = -ENODEV; + else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || + priority < I915_CONTEXT_MIN_USER_PRIORITY) + ret = -EINVAL; + else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && + !capable(CAP_SYS_NICE)) + ret = -EPERM; + else + ctx->priority = priority; + } + break; + default: ret = -EINVAL; break; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 83876a1c8d98..e344eac61933 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -268,6 +268,11 @@ static inline u64 gen8_noncanonical_addr(u64 address) return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); } +static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) +{ + return eb->engine->needs_cmd_parser && eb->batch_len; +} + static int eb_create(struct i915_execbuffer *eb) { if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { @@ -499,6 +504,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) list_add_tail(&vma->exec_link, &eb->unbound); if (drm_mm_node_allocated(&vma->node)) err = i915_vma_unbind(vma); + if (unlikely(err)) + vma->exec_flags = NULL; } return err; } @@ -1163,6 +1170,13 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, if (unlikely(!cache->rq)) { int err; + /* If we need to copy for the cmdparser, we will stall anyway */ + if (eb_use_cmdparser(eb)) + return ERR_PTR(-EWOULDBLOCK); + + if (!intel_engine_can_store_dword(eb->engine)) + return ERR_PTR(-ENODEV); + err = __reloc_gpu_alloc(eb, vma, len); if (unlikely(err)) return ERR_PTR(err); @@ -1187,9 +1201,7 @@ relocate_entry(struct i915_vma *vma, if (!eb->reloc_cache.vaddr && (DBG_FORCE_RELOC == FORCE_GPU_RELOC || - !reservation_object_test_signaled_rcu(vma->resv, true)) && - __intel_engine_can_store_dword(eb->reloc_cache.gen, - eb->engine->class)) { + !reservation_object_test_signaled_rcu(vma->resv, true))) { const unsigned int gen = eb->reloc_cache.gen; unsigned int len; u32 *batch; @@ -1581,7 +1593,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb) const unsigned int count = eb->buffer_count; unsigned int i; - if (unlikely(i915.prefault_disable)) + if (unlikely(i915_modparams.prefault_disable)) return 0; for (i = 0; i < count; i++) { @@ -2303,7 +2315,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } - if (eb.engine->needs_cmd_parser && eb.batch_len) { + if (eb_use_cmdparser(&eb)) { struct i915_vma *vma; vma = eb_parse(&eb, drm_is_current_master(file)); @@ -2408,7 +2420,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (out_fence) { if (err == 0) { fd_install(out_fence_fd, out_fence->file); - args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ + args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ args->rsvd2 |= (u64)out_fence_fd << 32; out_fence_fd = -1; } else { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ad524cb0f6fc..9111019698c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -42,6 +42,10 @@ #include "intel_drv.h" #include "intel_frontbuffer.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM) /** @@ -180,7 +184,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, return 0; } - if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) { + if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) { if (has_full_48bit_ppgtt) return 3; @@ -1878,12 +1882,12 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv) * called on driver load and after a GPU reset, so you can place * workarounds here even if they get overwritten by GPU reset. */ - /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */ + /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */ if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); else if (IS_CHERRYVIEW(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); - else if (IS_GEN9_BC(dev_priv)) + else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); else if (IS_GEN9_LP(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); @@ -1896,7 +1900,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) /* In the case of execlists, PPGTT is enabled by the context descriptor * and the PDPs are contained within the context itself. We don't * need to do anything here. */ - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return 0; if (!USES_PPGTT(dev_priv)) @@ -2202,7 +2206,15 @@ static int bxt_vtd_ggtt_insert_page__cb(void *_arg) { struct insert_page *arg = _arg; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_pause_user_domains(arg->vm->i915); +#endif gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_unpause_user_domains(arg->vm->i915); +#endif bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2229,7 +2241,15 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) { struct insert_entries *arg = _arg; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_pause_user_domains(arg->vm->i915); +#endif gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_unpause_user_domains(arg->vm->i915); +#endif bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2255,7 +2275,15 @@ static int bxt_vtd_ggtt_clear_range__cb(void *_arg) { struct clear_range *arg = _arg; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_pause_user_domains(arg->vm->i915); +#endif gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_unpause_user_domains(arg->vm->i915); +#endif bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2558,17 +2586,20 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) if (ret) return ret; - /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) { - DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", - hole_start, hole_end); - ggtt->base.clear_range(&ggtt->base, hole_start, - hole_end - hole_start); - } + if (!intel_vgpu_active(dev_priv)) { + /* Clear any non-preallocated blocks */ + drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, + hole_end) { + DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); + ggtt->base.clear_range(&ggtt->base, hole_start, + hole_end - hole_start); + } - /* And finally clear the reserved guard page */ - ggtt->base.clear_range(&ggtt->base, - ggtt->base.total - PAGE_SIZE, PAGE_SIZE); + /* And finally clear the reserved guard page */ + ggtt->base.clear_range(&ggtt->base, + ggtt->base.total - PAGE_SIZE, PAGE_SIZE); + } if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { ret = i915_gem_init_aliasing_ppgtt(dev_priv); @@ -3014,7 +3045,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) * currently don't have any bits spare to pass in this upper * restriction! */ - if (HAS_GUC(dev_priv) && i915.enable_guc_loading) { + if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) { ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP); ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); } diff --git a/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c b/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c new file mode 100644 index 000000000000..05af0a0a74f1 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c @@ -0,0 +1,291 @@ +/* + * Copyright © 2012 - 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include + +#include "gvt/fb_decoder.h" + +static struct sg_table * +i915_gem_gvtbuffer_get_pages(struct drm_i915_gem_object *obj) +{ + BUG(); + return ERR_PTR(-EINVAL); +} + +static void i915_gem_gvtbuffer_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + /* like stolen memory, this should only be called during free + * after clearing pin count. + */ + sg_free_table(pages); + kfree(pages); +} + +static void +i915_gem_gvtbuffer_release(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + +static const struct drm_i915_gem_object_ops i915_gem_gvtbuffer_ops = { + .get_pages = i915_gem_gvtbuffer_get_pages, + .put_pages = i915_gem_gvtbuffer_put_pages, + .release = i915_gem_gvtbuffer_release, +}; + +#define GEN8_DECODE_PTE(pte) \ + ((dma_addr_t)(((((u64)pte) >> 12) & 0x7ffffffULL) << 12)) + +#define GEN7_DECODE_PTE(pte) \ + ((dma_addr_t)(((((u64)pte) & 0x7f0) << 28) | (u64)(pte & 0xfffff000))) + +static struct sg_table * +i915_create_sg_pages_for_gvtbuffer(struct drm_device *dev, + u32 start, u32 num_pages) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct sg_table *st; + struct scatterlist *sg; + int i; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return NULL; + + if (sg_alloc_table(st, num_pages, GFP_KERNEL)) { + kfree(st); + return NULL; + } + + if (INTEL_INFO(dev_priv)->gen >= 8) { + gen8_pte_t __iomem *gtt_entries = + (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + + (start >> PAGE_SHIFT); + for_each_sg(st->sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = + GEN8_DECODE_PTE(readq(>t_entries[i])); + sg_dma_len(sg) = PAGE_SIZE; + } + } else { + gen6_pte_t __iomem *gtt_entries = + (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + + (start >> PAGE_SHIFT); + for_each_sg(st->sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = + GEN7_DECODE_PTE(readq(>t_entries[i])); + sg_dma_len(sg) = PAGE_SIZE; + } + } + + return st; +} + +struct drm_i915_gem_object * +i915_gem_object_create_gvtbuffer(struct drm_device *dev, + u32 start, u32 num_pages) +{ + struct drm_i915_gem_object *obj; + obj = i915_gem_object_alloc(to_i915(dev)); + if (obj == NULL) + return NULL; + + drm_gem_private_object_init(dev, &obj->base, num_pages << PAGE_SHIFT); + i915_gem_object_init(obj, &i915_gem_gvtbuffer_ops); + + obj->mm.pages = i915_create_sg_pages_for_gvtbuffer(dev, start, num_pages); + if (obj->mm.pages == NULL) { + i915_gem_object_free(obj); + return NULL; + } + + if (i915_gem_object_pin_pages(obj)) + printk(KERN_ERR "%s:%d> Pin pages failed!\n", __func__, __LINE__); + obj->cache_level = I915_CACHE_L3_LLC; + + DRM_DEBUG_DRIVER("GVT_GEM: backing store base = 0x%x pages = 0x%x\n", + start, num_pages); + return obj; +} + +static int gvt_decode_information(struct drm_device *dev, + struct drm_i915_gem_gvtbuffer *args) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct gvt_fb_format fb; + struct gvt_primary_plane_format *p; + struct gvt_cursor_plane_format *c; + struct gvt_pipe_format *pipe; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + u32 id = args->id; + + if (gvt_decode_fb_format(dev_priv->gvt, id, &fb)) + return -EINVAL; +#else + return -EINVAL; +#endif + + pipe = ((args->pipe_id >= I915_MAX_PIPES) ? + NULL : &fb.pipes[args->pipe_id]); + + if (!pipe || !pipe->primary.enabled) { + DRM_DEBUG_DRIVER("GVT_GEM: Invalid pipe_id: %d\n", + args->pipe_id); + return -EINVAL; + } + + if ((args->plane_id) == I915_GVT_PLANE_PRIMARY) { + p = &pipe->primary; + args->enabled = p->enabled; + args->x_offset = p->x_offset; + args->y_offset = p->y_offset; + args->start = p->base; + args->width = p->width; + args->height = p->height; + args->stride = p->stride; + args->bpp = p->bpp; + args->hw_format = p->hw_format; + args->drm_format = p->drm_format; + args->tiled = p->tiled; + } else if ((args->plane_id) == I915_GVT_PLANE_CURSOR) { + c = &pipe->cursor; + args->enabled = c->enabled; + args->x_offset = c->x_hot; + args->y_offset = c->y_hot; + args->x_pos = c->x_pos; + args->y_pos = c->y_pos; + args->start = c->base; + args->width = c->width; + args->height = c->height; + args->stride = c->width * (c->bpp / 8); + args->bpp = c->bpp; + args->tiled = 0; + } else { + DRM_DEBUG_DRIVER("GVT_GEM: Invalid plaine_id: %d\n", + args->plane_id); + return -EINVAL; + } + + args->size = (((args->width * args->height * args->bpp) / 8) + + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + + if (args->start & (PAGE_SIZE - 1)) { + DRM_DEBUG_DRIVER("GVT_GEM: Not aligned fb start address: " + "0x%x\n", args->start); + return -EINVAL; + } + + if (((args->start >> PAGE_SHIFT) + args->size) > + ggtt_total_entries(&dev_priv->ggtt)) { + DRM_DEBUG_DRIVER("GVT: Invalid GTT offset or size\n"); + return -EINVAL; + } + return 0; +} + +/** + * Creates a new mm object that wraps some user memory. + */ +int +i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_gvtbuffer *args = data; + struct drm_i915_gem_object *obj; + u32 handle; + int ret = 0; + + if (INTEL_INFO(dev_priv)->gen < 7) + return -EPERM; + + if (args->flags & I915_GVTBUFFER_CHECK_CAPABILITY) + return 0; +#if 0 + if (!gvt_check_host()) + return -EPERM; +#endif + /* if args->start != 0 do not decode, but use it as ggtt offset*/ + if (args->start == 0) { + ret = gvt_decode_information(dev, args); + if (ret) + return ret; + } + + if (ret) + return ret; + + if (args->flags & I915_GVTBUFFER_QUERY_ONLY) + return 0; + + obj = i915_gem_object_create_gvtbuffer(dev, args->start, args->size); + if (!obj) { + DRM_DEBUG_DRIVER("GVT_GEM: Failed to create gem object" + " for VM FB!\n"); + return -EINVAL; + } + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { + unsigned int tiling_mode = I915_TILING_NONE; + unsigned int stride = 0; + + switch (args->tiled << 10) { + case PLANE_CTL_TILED_LINEAR: + /* Default valid value */ + break; + case PLANE_CTL_TILED_X: + tiling_mode = I915_TILING_X; + stride = args->stride; + break; + case PLANE_CTL_TILED_Y: + tiling_mode = I915_TILING_Y; + stride = args->stride; + break; + default: + DRM_ERROR("gvt: tiling mode %d not supported\n", args->tiled); + } + obj->tiling_and_stride = tiling_mode | stride; + } else { + obj->tiling_and_stride = (args->tiled ? I915_TILING_X : I915_TILING_NONE) | + (args->tiled ? args->stride : 0); + } + + ret = drm_gem_handle_create(file, &obj->base, &handle); + + /* drop reference from allocate - handle holds it now */ + i915_gem_object_put(obj); + + if (ret) + return ret; + + args->handle = handle; + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index c30d8f808185..367a578b83b6 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -147,6 +147,8 @@ struct drm_i915_gem_object { #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) unsigned int cache_dirty:1; + unsigned int has_backing_pages:1; + atomic_t frontbuffer_bits; unsigned int frontbuffer_ggtt_origin; /* write once */ struct i915_gem_active frontbuffer_write; @@ -213,6 +215,9 @@ struct drm_i915_gem_object { /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; + /** Object userdata */ + u32 userdata; + union { struct i915_gem_userptr { uintptr_t ptr; @@ -226,6 +231,8 @@ struct drm_i915_gem_object { unsigned long scratch; }; + struct list_head pid_info; + /** for phys allocated objects */ struct drm_dma_handle *phys_handle; diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 813a3b546d6e..b7dce47d6ed9 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -186,7 +186,7 @@ i915_priotree_init(struct i915_priotree *pt) INIT_LIST_HEAD(&pt->signalers_list); INIT_LIST_HEAD(&pt->waiters_list); INIT_LIST_HEAD(&pt->link); - pt->priority = INT_MIN; + pt->priority = I915_PRIORITY_INVALID; } static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) @@ -587,6 +587,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, lockdep_assert_held(&dev_priv->drm.struct_mutex); + /* + * Preempt contexts are reserved for exclusive use to inject a + * preemption context switch. They are never to be used for any trivial + * request! + */ + GEM_BUG_ON(ctx == dev_priv->preempt_context); + /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report * EIO if the GPU is already wedged. */ @@ -906,6 +913,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) lockdep_assert_held(&request->i915->drm.struct_mutex); trace_i915_gem_request_add(request); + trace_i915_gem_request_add_domain(request); /* Make sure that no request gazumped us - if it was allocated after * our i915_gem_request_alloc() and called __i915_add_request() before diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 49a4c8994ff0..e1292025b501 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -30,6 +30,8 @@ #include "i915_gem.h" #include "i915_sw_fence.h" +#include + struct drm_file; struct drm_i915_gem_object; struct drm_i915_gem_request; @@ -69,9 +71,14 @@ struct i915_priotree { struct list_head waiters_list; /* those after us, they depend upon us */ struct list_head link; int priority; -#define I915_PRIORITY_MAX 1024 -#define I915_PRIORITY_NORMAL 0 -#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX) +}; + +enum { + I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, + I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, + I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, + + I915_PRIORITY_INVALID = INT_MIN }; struct i915_gem_capture_list { diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0c779671fe2d..a653d573cbd6 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -78,6 +78,11 @@ static bool __i915_error_ok(struct drm_i915_error_state_buf *e) return true; } +bool i915_error_ok(struct drm_i915_error_state_buf *e) +{ + return __i915_error_ok(e); +} + static bool __i915_error_seek(struct drm_i915_error_state_buf *e, unsigned len) { @@ -149,7 +154,7 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, __i915_error_advance(e, len); } -static void i915_error_puts(struct drm_i915_error_state_buf *e, +void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) { unsigned len; @@ -377,9 +382,9 @@ static void error_print_request(struct drm_i915_error_state_buf *m, if (!erq->seqno) return; - err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n", + err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n", prefix, erq->pid, erq->ban_score, - erq->context, erq->seqno, + erq->context, erq->seqno, erq->priority, jiffies_to_msecs(jiffies - erq->jiffies), erq->head, erq->tail); } @@ -388,14 +393,16 @@ static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, const struct drm_i915_error_context *ctx) { - err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n", + err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n", header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id, - ctx->ban_score, ctx->guilty, ctx->active); + ctx->priority, ctx->ban_score, ctx->guilty, ctx->active); } static void error_print_engine(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { + int n; + err_printf(m, "%s command stream:\n", engine_str(ee->engine_id)); err_printf(m, " START: 0x%08x\n", ee->start); err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); @@ -465,8 +472,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, jiffies_to_msecs(jiffies - ee->hangcheck_timestamp)); err_printf(m, " engine reset count: %u\n", ee->reset_count); - error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); - error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); + for (n = 0; n < ee->num_ports; n++) { + err_printf(m, " ELSP[%d]:", n); + error_print_request(m, " ", &ee->execlist[n]); + } + error_print_context(m, " Active context: ", &ee->context); } @@ -553,13 +563,13 @@ static __always_inline void err_print_param(struct drm_i915_error_state_buf *m, const void *x) { if (!__builtin_strcmp(type, "bool")) - err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x)); + err_printf(m, "i915_modparams.%s=%s\n", name, yesno(*(const bool *)x)); else if (!__builtin_strcmp(type, "int")) - err_printf(m, "i915.%s=%d\n", name, *(const int *)x); + err_printf(m, "i915_modparams.%s=%d\n", name, *(const int *)x); else if (!__builtin_strcmp(type, "unsigned int")) - err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x); + err_printf(m, "i915_modparams.%s=%u\n", name, *(const unsigned int *)x); else if (!__builtin_strcmp(type, "char *")) - err_printf(m, "i915.%s=%s\n", name, *(const char **)x); + err_printf(m, "i915_modparams.%s=%s\n", name, *(const char **)x); else BUILD_BUG(); } @@ -807,6 +817,20 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, return 0; } +int i915_obj_state_buf_init(struct drm_i915_error_state_buf *ebuf, + size_t count) +{ + memset(ebuf, 0, sizeof(*ebuf)); + + ebuf->buf = kmalloc(count, GFP_KERNEL); + + if (ebuf->buf == NULL) + return -ENOMEM; + + ebuf->size = count; + return 0; +} + static void i915_error_object_free(struct drm_i915_error_object *obj) { int page; @@ -1266,6 +1290,7 @@ static void record_request(struct drm_i915_gem_request *request, struct drm_i915_error_request *erq) { erq->context = request->ctx->hw_id; + erq->priority = request->priotree.priority; erq->ban_score = atomic_read(&request->ctx->ban_score); erq->seqno = request->global_seqno; erq->jiffies = request->emitted_jiffies; @@ -1327,17 +1352,19 @@ static void engine_record_requests(struct intel_engine_cs *engine, static void error_record_engine_execlists(struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) { - const struct execlist_port *port = engine->execlist_port; + const struct intel_engine_execlists * const execlists = &engine->execlists; unsigned int n; - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { - struct drm_i915_gem_request *rq = port_request(&port[n]); + for (n = 0; n < execlists_num_ports(execlists); n++) { + struct drm_i915_gem_request *rq = port_request(&execlists->port[n]); if (!rq) break; record_request(rq, &ee->execlist[n]); } + + ee->num_ports = n; } static void record_context(struct drm_i915_error_context *e, @@ -1357,6 +1384,7 @@ static void record_context(struct drm_i915_error_context *e, e->handle = ctx->user_handle; e->hw_id = ctx->hw_id; + e->priority = ctx->priority; e->ban_score = atomic_read(&ctx->ban_score); e->guilty = atomic_read(&ctx->guilty_count); e->active = atomic_read(&ctx->active_count); @@ -1554,7 +1582,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv, struct i915_gpu_state *error) { /* Capturing log buf contents won't be useful if logging was disabled */ - if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0)) + if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0)) return; error->guc_log = i915_error_object_create(dev_priv, @@ -1696,7 +1724,7 @@ static int capture(void *data) ktime_to_timeval(ktime_sub(ktime_get(), error->i915->gt.last_init_time)); - error->params = i915; + error->params = i915_modparams; #define DUP(T, x) dup_param(#T, &error->params.x); I915_PARAMS_FOR_EACH(DUP); #undef DUP @@ -1751,7 +1779,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, struct i915_gpu_state *error; unsigned long flags; - if (!i915.error_capture) + if (!i915_modparams.error_capture) return; if (READ_ONCE(dev_priv->gpu_error.first_error)) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 48a1e9349a2c..3afb22c72158 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -192,13 +192,12 @@ static int __create_doorbell(struct i915_guc_client *client) doorbell = __get_doorbell(client); doorbell->db_status = GUC_DOORBELL_ENABLED; - doorbell->cookie = client->doorbell_cookie; + doorbell->cookie = 0; err = __guc_allocate_doorbell(client->guc, client->stage_id); - if (err) { + if (err) doorbell->db_status = GUC_DOORBELL_DISABLED; - doorbell->cookie = 0; - } + return err; } @@ -306,7 +305,7 @@ static void guc_proc_desc_init(struct intel_guc *guc, desc->db_base_addr = 0; desc->stage_id = client->stage_id; - desc->wq_size_bytes = client->wq_size; + desc->wq_size_bytes = GUC_WQ_SIZE; desc->wq_status = WQ_STATUS_ACTIVE; desc->priority = client->priority; } @@ -391,8 +390,8 @@ static void guc_stage_desc_init(struct intel_guc *guc, desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client); desc->db_trigger_uk = gfx_addr + client->doorbell_offset; desc->process_desc = gfx_addr + client->proc_desc_offset; - desc->wq_addr = gfx_addr + client->wq_offset; - desc->wq_size = client->wq_size; + desc->wq_addr = gfx_addr + GUC_DB_SIZE; + desc->wq_size = GUC_WQ_SIZE; desc->desc_private = (uintptr_t)client; } @@ -406,82 +405,23 @@ static void guc_stage_desc_fini(struct intel_guc *guc, memset(desc, 0, sizeof(*desc)); } -/** - * i915_guc_wq_reserve() - reserve space in the GuC's workqueue - * @request: request associated with the commands - * - * Return: 0 if space is available - * -EAGAIN if space is not currently available - * - * This function must be called (and must return 0) before a request - * is submitted to the GuC via i915_guc_submit() below. Once a result - * of 0 has been returned, it must be balanced by a corresponding - * call to submit(). - * - * Reservation allows the caller to determine in advance that space - * will be available for the next submission before committing resources - * to it, and helps avoid late failures with complicated recovery paths. - */ -int i915_guc_wq_reserve(struct drm_i915_gem_request *request) -{ - const size_t wqi_size = sizeof(struct guc_wq_item); - struct i915_guc_client *client = request->i915->guc.execbuf_client; - struct guc_process_desc *desc = __get_process_desc(client); - u32 freespace; - int ret; - - spin_lock_irq(&client->wq_lock); - freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); - freespace -= client->wq_rsvd; - if (likely(freespace >= wqi_size)) { - client->wq_rsvd += wqi_size; - ret = 0; - } else { - client->no_wq_space++; - ret = -EAGAIN; - } - spin_unlock_irq(&client->wq_lock); - - return ret; -} - -static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size) -{ - unsigned long flags; - - spin_lock_irqsave(&client->wq_lock, flags); - client->wq_rsvd += size; - spin_unlock_irqrestore(&client->wq_lock, flags); -} - -void i915_guc_wq_unreserve(struct drm_i915_gem_request *request) -{ - const int wqi_size = sizeof(struct guc_wq_item); - struct i915_guc_client *client = request->i915->guc.execbuf_client; - - GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size); - guc_client_update_wq_rsvd(client, -wqi_size); -} - /* Construct a Work Item and append it to the GuC's Work Queue */ static void guc_wq_item_append(struct i915_guc_client *client, struct drm_i915_gem_request *rq) { /* wqi_len is in DWords, and does not include the one-word header */ const size_t wqi_size = sizeof(struct guc_wq_item); - const u32 wqi_len = wqi_size/sizeof(u32) - 1; + const u32 wqi_len = wqi_size / sizeof(u32) - 1; struct intel_engine_cs *engine = rq->engine; + struct i915_gem_context *ctx = rq->ctx; struct guc_process_desc *desc = __get_process_desc(client); struct guc_wq_item *wqi; - u32 freespace, tail, wq_off; + u32 ring_tail, wq_off; - /* Free space is guaranteed, see i915_guc_wq_reserve() above */ - freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); - GEM_BUG_ON(freespace < wqi_size); + lockdep_assert_held(&client->wq_lock); - /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3; - GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); + ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); + GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -491,29 +431,29 @@ static void guc_wq_item_append(struct i915_guc_client *client, * workqueue buffer dw by dw. */ BUILD_BUG_ON(wqi_size != 16); - GEM_BUG_ON(client->wq_rsvd < wqi_size); - /* postincrement WQ tail for next time */ - wq_off = client->wq_tail; + /* Free space is guaranteed. */ + wq_off = READ_ONCE(desc->tail); + GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), + GUC_WQ_SIZE) < wqi_size); GEM_BUG_ON(wq_off & (wqi_size - 1)); - client->wq_tail += wqi_size; - client->wq_tail &= client->wq_size - 1; - client->wq_rsvd -= wqi_size; /* WQ starts from the page after doorbell / process_desc */ wqi = client->vaddr + wq_off + GUC_DB_SIZE; /* Now fill in the 4-word work queue item */ wqi->header = WQ_TYPE_INORDER | - (wqi_len << WQ_LEN_SHIFT) | - (engine->guc_id << WQ_TARGET_SHIFT) | - WQ_NO_WCFLUSH_WAIT; + (wqi_len << WQ_LEN_SHIFT) | + (engine->guc_id << WQ_TARGET_SHIFT) | + WQ_NO_WCFLUSH_WAIT; - /* The GuC wants only the low-order word of the context descriptor */ - wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine); + wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine)); - wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT; + wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; wqi->fence_id = rq->global_seqno; + + /* Postincrement WQ tail for next time. */ + WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); } static void guc_reset_wq(struct i915_guc_client *client) @@ -522,106 +462,64 @@ static void guc_reset_wq(struct i915_guc_client *client) desc->head = 0; desc->tail = 0; - - client->wq_tail = 0; } -static int guc_ring_doorbell(struct i915_guc_client *client) +static void guc_ring_doorbell(struct i915_guc_client *client) { - struct guc_process_desc *desc = __get_process_desc(client); - union guc_doorbell_qw db_cmp, db_exc, db_ret; - union guc_doorbell_qw *db; - int attempt = 2, ret = -EAGAIN; - - /* Update the tail so it is visible to GuC */ - desc->tail = client->wq_tail; - - /* current cookie */ - db_cmp.db_status = GUC_DOORBELL_ENABLED; - db_cmp.cookie = client->doorbell_cookie; + struct guc_doorbell_info *db; + u32 cookie; - /* cookie to be updated */ - db_exc.db_status = GUC_DOORBELL_ENABLED; - db_exc.cookie = client->doorbell_cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; + lockdep_assert_held(&client->wq_lock); /* pointer of current doorbell cacheline */ - db = (union guc_doorbell_qw *)__get_doorbell(client); - - while (attempt--) { - /* lets ring the doorbell */ - db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, - db_cmp.value_qw, db_exc.value_qw); - - /* if the exchange was successfully executed */ - if (db_ret.value_qw == db_cmp.value_qw) { - /* db was successfully rung */ - client->doorbell_cookie = db_exc.cookie; - ret = 0; - break; - } - - /* XXX: doorbell was lost and need to acquire it again */ - if (db_ret.db_status == GUC_DOORBELL_DISABLED) - break; + db = __get_doorbell(client); - DRM_WARN("Cookie mismatch. Expected %d, found %d\n", - db_cmp.cookie, db_ret.cookie); - - /* update the cookie to newly read cookie from GuC */ - db_cmp.cookie = db_ret.cookie; - db_exc.cookie = db_ret.cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; - } + /* we're not expecting the doorbell cookie to change behind our back */ + cookie = READ_ONCE(db->cookie); + WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie); - return ret; + /* XXX: doorbell was lost and need to acquire it again */ + GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); } /** - * __i915_guc_submit() - Submit commands through GuC - * @rq: request associated with the commands - * - * The caller must have already called i915_guc_wq_reserve() above with - * a result of 0 (success), guaranteeing that there is space in the work - * queue for the new request, so enqueuing the item cannot fail. - * - * Bad Things Will Happen if the caller violates this protocol e.g. calls - * submit() when _reserve() says there's no space, or calls _submit() - * a different number of times from (successful) calls to _reserve(). + * i915_guc_submit() - Submit commands through GuC + * @engine: engine associated with the commands * * The only error here arises if the doorbell hardware isn't functioning * as expected, which really shouln't happen. */ -static void __i915_guc_submit(struct drm_i915_gem_request *rq) +static void i915_guc_submit(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = rq->i915; - struct intel_engine_cs *engine = rq->engine; - unsigned int engine_id = engine->id; - struct intel_guc *guc = &rq->i915->guc; + struct drm_i915_private *dev_priv = engine->i915; + struct intel_guc *guc = &dev_priv->guc; struct i915_guc_client *client = guc->execbuf_client; - unsigned long flags; - int b_ret; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const unsigned int engine_id = engine->id; + unsigned int n; - /* WA to flush out the pending GMADR writes to ring buffer. */ - if (i915_vma_is_map_and_fenceable(rq->ring->vma)) - POSTING_READ_FW(GUC_STATUS); + for (n = 0; n < ARRAY_SIZE(execlists->port); n++) { + struct drm_i915_gem_request *rq; + unsigned int count; - spin_lock_irqsave(&client->wq_lock, flags); + rq = port_unpack(&port[n], &count); + if (rq && count == 0) { + port_set(&port[n], port_pack(rq, ++count)); - guc_wq_item_append(client, rq); - b_ret = guc_ring_doorbell(client); + if (i915_vma_is_map_and_fenceable(rq->ring->vma)) + POSTING_READ_FW(GUC_STATUS); - client->submissions[engine_id] += 1; + spin_lock(&client->wq_lock); - spin_unlock_irqrestore(&client->wq_lock, flags); -} + guc_wq_item_append(client, rq); + guc_ring_doorbell(client); -static void i915_guc_submit(struct drm_i915_gem_request *rq) -{ - __i915_gem_request_submit(rq); - __i915_guc_submit(rq); + client->submissions[engine_id] += 1; + + spin_unlock(&client->wq_lock); + } + } } static void nested_enable_signaling(struct drm_i915_gem_request *rq) @@ -655,27 +553,33 @@ static void port_assign(struct execlist_port *port, if (port_isset(port)) i915_gem_request_put(port_request(port)); - port_set(port, i915_gem_request_get(rq)); + port_set(port, port_pack(i915_gem_request_get(rq), port_count(port))); nested_enable_signaling(rq); } -static bool i915_guc_dequeue(struct intel_engine_cs *engine) +static void i915_guc_dequeue(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; - struct drm_i915_gem_request *last = port_request(port); - struct rb_node *rb; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + struct drm_i915_gem_request *last = NULL; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; bool submit = false; + struct rb_node *rb; + + if (port_isset(port)) + port++; spin_lock_irq(&engine->timeline->lock); - rb = engine->execlist_first; - GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); + rb = execlists->first; + GEM_BUG_ON(rb_first(&execlists->queue) != rb); while (rb) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct drm_i915_gem_request *rq, *rn; list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { if (last && rq->ctx != last->ctx) { - if (port != engine->execlist_port) { + if (port == last_port) { __list_del_many(&p->requests, &rq->priotree.link); goto done; @@ -689,50 +593,51 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine) INIT_LIST_HEAD(&rq->priotree.link); rq->priotree.priority = INT_MAX; - i915_guc_submit(rq); - trace_i915_gem_request_in(rq, port_index(port, engine)); + __i915_gem_request_submit(rq); + trace_i915_gem_request_in(rq, port_index(port, execlists)); last = rq; submit = true; } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } done: - engine->execlist_first = rb; - if (submit) + execlists->first = rb; + if (submit) { port_assign(port, last); + execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); + i915_guc_submit(engine); + } spin_unlock_irq(&engine->timeline->lock); - - return submit; } static void i915_guc_irq_handler(unsigned long data) { - struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; struct drm_i915_gem_request *rq; - bool submit; - do { - rq = port_request(&port[0]); - while (rq && i915_gem_request_completed(rq)) { - trace_i915_gem_request_out(rq); - i915_gem_request_put(rq); + rq = port_request(&port[0]); + while (rq && i915_gem_request_completed(rq)) { + trace_i915_gem_request_out(rq); + i915_gem_request_put(rq); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); + execlists_port_complete(execlists, port); - rq = port_request(&port[0]); - } + rq = port_request(&port[0]); + } + if (!rq) + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); - submit = false; - if (!port_count(&port[1])) - submit = i915_guc_dequeue(engine); - } while (submit); + if (!port_isset(last_port)) + i915_guc_dequeue(engine); } /* @@ -913,8 +818,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv, client->engines = engines; client->priority = priority; client->doorbell_id = GUC_DOORBELL_INVALID; - client->wq_offset = GUC_DB_SIZE; - client->wq_size = GUC_WQ_SIZE; spin_lock_init(&client->wq_lock); ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, @@ -996,28 +899,39 @@ static void guc_client_free(struct i915_guc_client *client) kfree(client); } +static void guc_policy_init(struct guc_policy *policy) +{ + policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US; + policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US; + policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US; + policy->policy_flags = 0; +} + static void guc_policies_init(struct guc_policies *policies) { struct guc_policy *policy; u32 p, i; - policies->dpc_promote_time = 500000; + policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US; policies->max_num_work_items = POLICY_MAX_NUM_WI; for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { policy = &policies->policy[p][i]; - policy->execution_quantum = 1000000; - policy->preemption_time = 500000; - policy->fault_time = 250000; - policy->policy_flags = 0; + guc_policy_init(policy); } } policies->is_valid = 1; } +/* + * The first 80 dwords of the register state context, containing the + * execlists and ppgtt registers. + */ +#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) + static int guc_ads_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -1032,6 +946,8 @@ static int guc_ads_create(struct intel_guc *guc) } __packed *blob; struct intel_engine_cs *engine; enum intel_engine_id id; + const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE; + const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; GEM_BUG_ON(guc->ads_vma); @@ -1062,13 +978,20 @@ static int guc_ads_create(struct intel_guc *guc) * engines after a reset. Here we use the Render ring default * context, which must already exist and be pinned in the GGTT, * so its address won't change after we've told the GuC where - * to find it. + * to find it. Note that we have to skip our header (1 page), + * because our GuC shared data is there. */ blob->ads.golden_context_lrca = - dev_priv->engine[RCS]->status_page.ggtt_offset; + guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset; + /* + * The GuC expects us to exclude the portion of the context image that + * it skips from the size it is to read. It starts reading from after + * the execlist context (so skipping the first page [PPHWSP] and 80 + * dwords). Weird guc is weird. + */ for_each_engine(engine, dev_priv, id) - blob->ads.eng_state_size[engine->guc_id] = engine->context_size; + blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size; base = guc_ggtt_offset(vma); blob->ads.scheduler_policies = base + ptr_offset(blob, policies); @@ -1221,6 +1144,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) enum intel_engine_id id; int err; + /* + * We're using GuC work items for submitting work through GuC. Since + * we're coalescing multiple requests from a single context into a + * single work item prior to assigning it to execlist_port, we can + * never have more work items than the total number of ports (for all + * engines). The GuC firmware is controlling the HEAD of work queue, + * and it is guaranteed that it will remove the work item from the + * queue before our request is completed. + */ + BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) * + sizeof(struct guc_wq_item) * + I915_NUM_ENGINES > GUC_WQ_SIZE); + if (!client) { client = guc_client_alloc(dev_priv, INTEL_INFO(dev_priv)->ring_mask, @@ -1248,24 +1184,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) guc_interrupts_capture(dev_priv); for_each_engine(engine, dev_priv, id) { - const int wqi_size = sizeof(struct guc_wq_item); - struct drm_i915_gem_request *rq; - + struct intel_engine_execlists * const execlists = &engine->execlists; /* The tasklet was initialised by execlists, and may be in * a state of flux (across a reset) and so we just want to * take over the callback without changing any other state * in the tasklet. */ - engine->irq_tasklet.func = i915_guc_irq_handler; + execlists->irq_tasklet.func = i915_guc_irq_handler; clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - - /* Replay the current set of previously submitted requests */ - spin_lock_irq(&engine->timeline->lock); - list_for_each_entry(rq, &engine->timeline->requests, link) { - guc_client_update_wq_rsvd(client, wqi_size); - __i915_guc_submit(rq); - } - spin_unlock_irq(&engine->timeline->lock); + tasklet_schedule(&execlists->irq_tasklet); } return 0; @@ -1310,7 +1237,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv) /* any value greater than GUC_POWER_D0 */ data[1] = GUC_POWER_D1; /* first page is shared data with GuC */ - data[2] = guc_ggtt_offset(ctx->engine[RCS].state); + data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE; return intel_guc_send(guc, data, ARRAY_SIZE(data)); } @@ -1328,7 +1255,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) return 0; - if (i915.guc_log_level >= 0) + if (i915_modparams.guc_log_level >= 0) gen9_enable_guc_interrupts(dev_priv); ctx = dev_priv->kernel_context; @@ -1336,7 +1263,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; /* first page is shared data with GuC */ - data[2] = guc_ggtt_offset(ctx->engine[RCS].state); + data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE; return intel_guc_send(guc, data, ARRAY_SIZE(data)); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b63893eeca73..849c120397a8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -37,6 +37,10 @@ #include "i915_trace.h" #include "intel_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /** * DOC: interrupt handling * @@ -1005,6 +1009,8 @@ static void notify_ring(struct intel_engine_cs *engine) spin_lock(&engine->breadcrumbs.irq_lock); wait = engine->breadcrumbs.irq_wait; if (wait) { + bool wakeup = engine->irq_seqno_barrier; + /* We use a callback from the dma-fence to submit * requests after waiting on our own requests. To * ensure minimum delay in queuing the next request to @@ -1017,12 +1023,18 @@ static void notify_ring(struct intel_engine_cs *engine) * and many waiters. */ if (i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &wait->request->fence.flags)) - rq = i915_gem_request_get(wait->request); + wait->seqno)) { + struct drm_i915_gem_request *waiter = wait->request; + + wakeup = true; + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &waiter->fence.flags) && + intel_wait_check_request(wait, waiter)) + rq = i915_gem_request_get(waiter); + } - wake_up_process(wait->tsk); + if (wakeup) + wake_up_process(wait->tsk); } else { __intel_engine_disarm_breadcrumbs(engine); } @@ -1108,7 +1120,7 @@ static void gen6_pm_rps_work(struct work_struct *work) if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) goto out; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); @@ -1162,7 +1174,7 @@ static void gen6_pm_rps_work(struct work_struct *work) dev_priv->rps.last_adj = 0; } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); out: /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ @@ -1305,10 +1317,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, static void gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) { + struct intel_engine_execlists * const execlists = &engine->execlists; bool tasklet = false; if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { - if (port_count(&engine->execlist_port[0])) { + if (READ_ONCE(engine->execlists.active)) { __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); tasklet = true; } @@ -1316,11 +1329,17 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { notify_ring(engine); - tasklet |= i915.enable_guc_submission; + tasklet |= i915_modparams.enable_guc_submission; + } + + if ((iir & (GT_RENDER_CS_MASTER_ERROR_INTERRUPT << test_shift)) && + intel_vgpu_active(engine->i915)) { + queue_work(system_highpri_wq, &engine->reset_work); + return; } if (tasklet) - tasklet_hi_schedule(&engine->irq_tasklet); + tasklet_hi_schedule(&execlists->irq_tasklet); } static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, @@ -1706,6 +1725,17 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) } } + +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +static inline void gvt_notify_vblank(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + if (dev_priv->gvt) + queue_work(system_unbound_wq, + &dev_priv->gvt->pipe_info[pipe].vblank_work); +} +#endif + static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { @@ -2465,8 +2495,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); - if (iir & GEN8_PIPE_VBLANK) + if (iir & GEN8_PIPE_VBLANK) { drm_handle_vblank(&dev_priv->drm, pipe); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + gvt_notify_vblank(dev_priv, pipe); +#endif + } if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); @@ -2851,7 +2885,9 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + /*since guest will see all the pipes, we don't want it disable vblank*/ + if (!dev_priv->gvt) + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -3407,6 +3443,19 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) if (HAS_L3_DPF(dev_priv)) gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + if (intel_vgpu_active(dev_priv)) { + gt_interrupts[0] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_RCS_IRQ_SHIFT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_BCS_IRQ_SHIFT; + gt_interrupts[1] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VCS1_IRQ_SHIFT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VCS2_IRQ_SHIFT; + gt_interrupts[3] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VECS_IRQ_SHIFT; + } + dev_priv->pm_ier = 0x0; dev_priv->pm_imr = ~dev_priv->pm_ier; GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); @@ -3429,8 +3478,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) enum pipe pipe; if (INTEL_GEN(dev_priv) >= 9) { - de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | - GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE; de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; if (IS_GEN9_LP(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 8ab003dca113..3262d8d0fe84 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -25,7 +25,14 @@ #include "i915_params.h" #include "i915_drv.h" -struct i915_params i915 __read_mostly = { +#define i915_param_named(name, T, perm, desc) \ + module_param_named(name, i915_modparams.name, T, perm); \ + MODULE_PARM_DESC(name, desc) +#define i915_param_named_unsafe(name, T, perm, desc) \ + module_param_named_unsafe(name, i915_modparams.name, T, perm); \ + MODULE_PARM_DESC(name, desc) + +struct i915_params i915_modparams __read_mostly = { .modeset = -1, .panel_ignore_lid = 1, .semaphores = -1, @@ -65,195 +72,188 @@ struct i915_params i915 __read_mostly = { .inject_load_failure = 0, .enable_dpcd_backlight = false, .enable_gvt = false, + .enable_pvmmio = 1, + .enable_gvt_oos = 1, + .enable_conformance_check = true, + .disable_gvt_fw_loading = true, + .gvt_workload_priority = 0, + .memtrack_debug = 1, }; -module_param_named(modeset, i915.modeset, int, 0400); -MODULE_PARM_DESC(modeset, +i915_param_named(modeset, int, 0400, "Use kernel modesetting [KMS] (0=disable, " "1=on, -1=force vga console preference [default])"); -module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); -MODULE_PARM_DESC(panel_ignore_lid, +i915_param_named_unsafe(panel_ignore_lid, int, 0600, "Override lid status (0=autodetect, 1=autodetect disabled [default], " "-1=force lid closed, -2=force lid open)"); -module_param_named_unsafe(semaphores, i915.semaphores, int, 0400); -MODULE_PARM_DESC(semaphores, +i915_param_named_unsafe(semaphores, int, 0400, "Use semaphores for inter-ring sync " "(default: -1 (use per-chip defaults))"); -module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400); -MODULE_PARM_DESC(enable_rc6, +i915_param_named_unsafe(enable_rc6, int, 0400, "Enable power-saving render C-state 6. " "Different stages can be selected via bitmask values " "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "default: -1 (use per-chip default)"); -module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400); -MODULE_PARM_DESC(enable_dc, +i915_param_named_unsafe(enable_dc, int, 0400, "Enable power-saving display C-states. " "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); -module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); -MODULE_PARM_DESC(enable_fbc, +i915_param_named_unsafe(enable_fbc, int, 0600, "Enable frame buffer compression for power savings " "(default: -1 (use per-chip default))"); -module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400); -MODULE_PARM_DESC(lvds_channel_mode, +i915_param_named_unsafe(lvds_channel_mode, int, 0400, "Specify LVDS channel mode " "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); -module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600); -MODULE_PARM_DESC(lvds_use_ssc, +i915_param_named_unsafe(panel_use_ssc, int, 0600, "Use Spread Spectrum Clock with panels [LVDS/eDP] " "(default: auto from VBT)"); -module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400); -MODULE_PARM_DESC(vbt_sdvo_panel_type, +i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400, "Override/Ignore selection of SDVO panel mode in the VBT " "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); -module_param_named_unsafe(reset, i915.reset, int, 0600); -MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); +i915_param_named_unsafe(reset, int, 0600, + "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); -module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400); -MODULE_PARM_DESC(vbt_firmware, - "Load VBT from specified file under /lib/firmware"); +i915_param_named_unsafe(vbt_firmware, charp, 0400, + "Load VBT from specified file under /lib/firmware"); #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) -module_param_named(error_capture, i915.error_capture, bool, 0600); -MODULE_PARM_DESC(error_capture, +i915_param_named(error_capture, bool, 0600, "Record the GPU state following a hang. " "This information in /sys/class/drm/card/error is vital for " "triaging and debugging hangs."); #endif -module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644); -MODULE_PARM_DESC(enable_hangcheck, +i915_param_named_unsafe(enable_hangcheck, bool, 0644, "Periodically check GPU activity for detecting hangs. " "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); -module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); -MODULE_PARM_DESC(enable_ppgtt, +i915_param_named_unsafe(enable_ppgtt, int, 0400, "Override PPGTT usage. " "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)"); -module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); -MODULE_PARM_DESC(enable_execlists, +i915_param_named_unsafe(enable_execlists, int, 0400, "Override execlists usage. " "(-1=auto [default], 0=disabled, 1=enabled)"); -module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600); -MODULE_PARM_DESC(enable_psr, "Enable PSR " - "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " - "Default: -1 (use per-chip default)"); +i915_param_named_unsafe(enable_psr, int, 0600, + "Enable PSR " + "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " + "Default: -1 (use per-chip default)"); -module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400); -MODULE_PARM_DESC(alpha_support, +i915_param_named_unsafe(alpha_support, bool, 0400, "Enable alpha quality driver support for latest hardware. " "See also CONFIG_DRM_I915_ALPHA_SUPPORT."); -module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400); -MODULE_PARM_DESC(disable_power_well, +i915_param_named_unsafe(disable_power_well, int, 0400, "Disable display power wells when possible " "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); -module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); -MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); +i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)"); -module_param_named(fastboot, i915.fastboot, bool, 0600); -MODULE_PARM_DESC(fastboot, +i915_param_named(fastboot, bool, 0600, "Try to skip unnecessary mode sets at boot time (default: false)"); -module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); -MODULE_PARM_DESC(prefault_disable, +i915_param_named_unsafe(prefault_disable, bool, 0600, "Disable page prefaulting for pread/pwrite/reloc (default:false). " "For developers only."); -module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600); -MODULE_PARM_DESC(load_detect_test, +i915_param_named_unsafe(load_detect_test, bool, 0600, "Force-enable the VGA load detect code for testing (default:false). " "For developers only."); -module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600); -MODULE_PARM_DESC(force_reset_modeset_test, +i915_param_named_unsafe(force_reset_modeset_test, bool, 0600, "Force a modeset during gpu reset for testing (default:false). " "For developers only."); -module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600); -MODULE_PARM_DESC(invert_brightness, +i915_param_named_unsafe(invert_brightness, int, 0600, "Invert backlight brightness " "(-1 force normal, 0 machine defaults, 1 force inversion), please " "report PCI device ID, subsystem vendor and subsystem device ID " "to dri-devel@lists.freedesktop.org, if your machine needs it. " "It will then be included in an upcoming module version."); -module_param_named(disable_display, i915.disable_display, bool, 0400); -MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); +i915_param_named(disable_display, bool, 0400, + "Disable display (default: false)"); -module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400); -MODULE_PARM_DESC(enable_cmd_parser, - "Enable command parsing (true=enabled [default], false=disabled)"); +i915_param_named_unsafe(enable_cmd_parser, bool, 0400, + "Enable command parsing (true=enabled [default], false=disabled)"); -module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600); -MODULE_PARM_DESC(use_mmio_flip, - "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); +i915_param_named_unsafe(use_mmio_flip, int, 0600, + "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); -module_param_named(mmio_debug, i915.mmio_debug, int, 0600); -MODULE_PARM_DESC(mmio_debug, +i915_param_named(mmio_debug, int, 0600, "Enable the MMIO debug code for the first N failures (default: off). " "This may negatively affect performance."); -module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); -MODULE_PARM_DESC(verbose_state_checks, +i915_param_named(verbose_state_checks, bool, 0600, "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); -module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400); -MODULE_PARM_DESC(nuclear_pageflip, - "Force enable atomic functionality on platforms that don't have full support yet."); +i915_param_named_unsafe(nuclear_pageflip, bool, 0400, + "Force enable atomic functionality on platforms that don't have full support yet."); /* WA to get away with the default setting in VBT for early platforms.Will be removed */ -module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); -MODULE_PARM_DESC(edp_vswing, - "Ignore/Override vswing pre-emph table selection from VBT " - "(0=use value from vbt [default], 1=low power swing(200mV)," - "2=default swing(400mV))"); - -module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); -MODULE_PARM_DESC(enable_guc_loading, - "Enable GuC firmware loading " - "(-1=auto, 0=never [default], 1=if available, 2=required)"); - -module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); -MODULE_PARM_DESC(enable_guc_submission, - "Enable GuC submission " - "(-1=auto, 0=never [default], 1=if available, 2=required)"); - -module_param_named(guc_log_level, i915.guc_log_level, int, 0400); -MODULE_PARM_DESC(guc_log_level, +i915_param_named_unsafe(edp_vswing, int, 0400, + "Ignore/Override vswing pre-emph table selection from VBT " + "(0=use value from vbt [default], 1=low power swing(200mV)," + "2=default swing(400mV))"); + +i915_param_named_unsafe(enable_guc_loading, int, 0400, + "Enable GuC firmware loading " + "(-1=auto, 0=never [default], 1=if available, 2=required)"); + +i915_param_named_unsafe(enable_guc_submission, int, 0400, + "Enable GuC submission " + "(-1=auto, 0=never [default], 1=if available, 2=required)"); + +i915_param_named(guc_log_level, int, 0400, "GuC firmware logging level (-1:disabled (default), 0-3:enabled)"); -module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400); -MODULE_PARM_DESC(guc_firmware_path, +i915_param_named_unsafe(guc_firmware_path, charp, 0400, "GuC firmware path to use instead of the default one"); -module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400); -MODULE_PARM_DESC(huc_firmware_path, +i915_param_named_unsafe(huc_firmware_path, charp, 0400, "HuC firmware path to use instead of the default one"); -module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600); -MODULE_PARM_DESC(enable_dp_mst, +i915_param_named_unsafe(enable_dp_mst, bool, 0600, "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); -module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); -MODULE_PARM_DESC(inject_load_failure, + +i915_param_named_unsafe(inject_load_failure, uint, 0400, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); -module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); -MODULE_PARM_DESC(enable_dpcd_backlight, + +i915_param_named(enable_dpcd_backlight, bool, 0600, "Enable support for DPCD backlight control (default:false)"); -module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); -MODULE_PARM_DESC(enable_gvt, +i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); + +module_param_named(enable_pvmmio, i915_modparams.enable_pvmmio, bool, 0400); +MODULE_PARM_DESC(enable_pvmmio, + "Enable pv mmio feature, default TRUE. This parameter " + "could only set from host, guest value is set through vgt_if"); + +module_param_named(enable_gvt_oos, i915_modparams.enable_gvt_oos, bool, 0400); +MODULE_PARM_DESC(enable_gvt_oos, "To toggle the gvt ppgtt page table OOS (Out of Sync) feature."); + +module_param_named(enable_conformance_check, i915_modparams.enable_conformance_check, bool, 0400); +MODULE_PARM_DESC(enable_conformance_check, "To toggle the GVT guest conformance feature."); + +module_param_named(disable_gvt_fw_loading, i915_modparams.disable_gvt_fw_loading, bool, 0400); +MODULE_PARM_DESC(disable_gvt_fw_loading, "Disable GVT-g fw loading."); + +module_param_named(gvt_workload_priority, i915_modparams.gvt_workload_priority, int, 0600); +MODULE_PARM_DESC(gvt_workload_priority, + "Set GVT-g workload priority, (range: (-1023, 1023), default: 0, " + "more positive value means higher priority)."); +module_param_named(memtrack_debug, i915_modparams.memtrack_debug, int, 0600); +MODULE_PARM_DESC(memtrack_debug, + "use Memtrack debug capability (0=never, 1=always)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index ac844709c97e..0b72a0485e1b 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -68,7 +68,13 @@ func(bool, nuclear_pageflip); \ func(bool, enable_dp_mst); \ func(bool, enable_dpcd_backlight); \ - func(bool, enable_gvt) + func(bool, enable_gvt); \ + func(bool, enable_pvmmio); \ + func(bool, enable_gvt_oos); \ + func(bool, enable_conformance_check); \ + func(bool, disable_gvt_fw_loading); \ + func(int, gvt_workload_priority);\ + func(int, memtrack_debug); #define MEMBER(T, member) T member struct i915_params { @@ -76,7 +82,7 @@ struct i915_params { }; #undef MEMBER -extern struct i915_params i915 __read_mostly; +extern struct i915_params i915_modparams __read_mostly; #endif diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 09d97e0990b7..58dc44941a32 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -54,6 +54,8 @@ .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } #define CHV_COLORS \ .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } +#define GLK_COLORS \ + .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } /* Keep in gen based order, and chronological order within a gen */ #define GEN2_FEATURES \ @@ -66,19 +68,19 @@ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i830_info = { +static const struct intel_device_info intel_i830_info __initconst = { GEN2_FEATURES, .platform = INTEL_I830, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, /* legal, last one wins */ }; -static const struct intel_device_info intel_i845g_info = { +static const struct intel_device_info intel_i845g_info __initconst = { GEN2_FEATURES, .platform = INTEL_I845G, }; -static const struct intel_device_info intel_i85x_info = { +static const struct intel_device_info intel_i85x_info __initconst = { GEN2_FEATURES, .platform = INTEL_I85X, .is_mobile = 1, .num_pipes = 2, /* legal, last one wins */ @@ -86,7 +88,7 @@ static const struct intel_device_info intel_i85x_info = { .has_fbc = 1, }; -static const struct intel_device_info intel_i865g_info = { +static const struct intel_device_info intel_i865g_info __initconst = { GEN2_FEATURES, .platform = INTEL_I865G, }; @@ -98,7 +100,7 @@ static const struct intel_device_info intel_i865g_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i915g_info = { +static const struct intel_device_info intel_i915g_info __initconst = { GEN3_FEATURES, .platform = INTEL_I915G, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, @@ -106,7 +108,7 @@ static const struct intel_device_info intel_i915g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i915gm_info = { +static const struct intel_device_info intel_i915gm_info __initconst = { GEN3_FEATURES, .platform = INTEL_I915GM, .is_mobile = 1, @@ -118,7 +120,7 @@ static const struct intel_device_info intel_i915gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945g_info = { +static const struct intel_device_info intel_i945g_info __initconst = { GEN3_FEATURES, .platform = INTEL_I945G, .has_hotplug = 1, .cursor_needs_physical = 1, @@ -127,7 +129,7 @@ static const struct intel_device_info intel_i945g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945gm_info = { +static const struct intel_device_info intel_i945gm_info __initconst = { GEN3_FEATURES, .platform = INTEL_I945GM, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, @@ -138,14 +140,14 @@ static const struct intel_device_info intel_i945gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_g33_info = { +static const struct intel_device_info intel_g33_info __initconst = { GEN3_FEATURES, .platform = INTEL_G33, .has_hotplug = 1, .has_overlay = 1, }; -static const struct intel_device_info intel_pineview_info = { +static const struct intel_device_info intel_pineview_info __initconst = { GEN3_FEATURES, .platform = INTEL_PINEVIEW, .is_mobile = 1, .has_hotplug = 1, @@ -160,14 +162,14 @@ static const struct intel_device_info intel_pineview_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i965g_info = { +static const struct intel_device_info intel_i965g_info __initconst = { GEN4_FEATURES, .platform = INTEL_I965G, .has_overlay = 1, .hws_needs_physical = 1, }; -static const struct intel_device_info intel_i965gm_info = { +static const struct intel_device_info intel_i965gm_info __initconst = { GEN4_FEATURES, .platform = INTEL_I965GM, .is_mobile = 1, .has_fbc = 1, @@ -176,14 +178,14 @@ static const struct intel_device_info intel_i965gm_info = { .hws_needs_physical = 1, }; -static const struct intel_device_info intel_g45_info = { +static const struct intel_device_info intel_g45_info __initconst = { GEN4_FEATURES, .platform = INTEL_G45, .has_pipe_cxsr = 1, .ring_mask = RENDER_RING | BSD_RING, }; -static const struct intel_device_info intel_gm45_info = { +static const struct intel_device_info intel_gm45_info __initconst = { GEN4_FEATURES, .platform = INTEL_GM45, .is_mobile = 1, .has_fbc = 1, @@ -200,12 +202,12 @@ static const struct intel_device_info intel_gm45_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_ironlake_d_info = { +static const struct intel_device_info intel_ironlake_d_info __initconst = { GEN5_FEATURES, .platform = INTEL_IRONLAKE, }; -static const struct intel_device_info intel_ironlake_m_info = { +static const struct intel_device_info intel_ironlake_m_info __initconst = { GEN5_FEATURES, .platform = INTEL_IRONLAKE, .is_mobile = 1, .has_fbc = 1, @@ -224,15 +226,34 @@ static const struct intel_device_info intel_ironlake_m_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_sandybridge_d_info = { - GEN6_FEATURES, - .platform = INTEL_SANDYBRIDGE, +#define SNB_D_PLATFORM \ + GEN6_FEATURES, \ + .platform = INTEL_SANDYBRIDGE + +static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = { + SNB_D_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_sandybridge_m_info = { - GEN6_FEATURES, - .platform = INTEL_SANDYBRIDGE, - .is_mobile = 1, +static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = { + SNB_D_PLATFORM, + .gt = 2, +}; + +#define SNB_M_PLATFORM \ + GEN6_FEATURES, \ + .platform = INTEL_SANDYBRIDGE, \ + .is_mobile = 1 + + +static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = { + SNB_M_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = { + SNB_M_PLATFORM, + .gt = 2, }; #define GEN7_FEATURES \ @@ -249,27 +270,46 @@ static const struct intel_device_info intel_sandybridge_m_info = { GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS -static const struct intel_device_info intel_ivybridge_d_info = { - GEN7_FEATURES, - .platform = INTEL_IVYBRIDGE, - .has_l3_dpf = 1, +#define IVB_D_PLATFORM \ + GEN7_FEATURES, \ + .platform = INTEL_IVYBRIDGE, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = { + IVB_D_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_ivybridge_m_info = { - GEN7_FEATURES, - .platform = INTEL_IVYBRIDGE, - .is_mobile = 1, - .has_l3_dpf = 1, +static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = { + IVB_D_PLATFORM, + .gt = 2, +}; + +#define IVB_M_PLATFORM \ + GEN7_FEATURES, \ + .platform = INTEL_IVYBRIDGE, \ + .is_mobile = 1, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = { + IVB_M_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = { + IVB_M_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_ivybridge_q_info = { +static const struct intel_device_info intel_ivybridge_q_info __initconst = { GEN7_FEATURES, .platform = INTEL_IVYBRIDGE, + .gt = 2, .num_pipes = 0, /* legal, last one wins */ .has_l3_dpf = 1, }; -static const struct intel_device_info intel_valleyview_info = { +static const struct intel_device_info intel_valleyview_info __initconst = { .platform = INTEL_VALLEYVIEW, .gen = 7, .is_lp = 1, @@ -288,7 +328,7 @@ static const struct intel_device_info intel_valleyview_info = { CURSOR_OFFSETS }; -#define HSW_FEATURES \ +#define G75_FEATURES \ GEN7_FEATURES, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ .has_ddi = 1, \ @@ -299,14 +339,28 @@ static const struct intel_device_info intel_valleyview_info = { .has_rc6p = 0 /* RC6p removed-by HSW */, \ .has_runtime_pm = 1 -static const struct intel_device_info intel_haswell_info = { - HSW_FEATURES, - .platform = INTEL_HASWELL, - .has_l3_dpf = 1, +#define HSW_PLATFORM \ + G75_FEATURES, \ + .platform = INTEL_HASWELL, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_haswell_gt1_info __initconst = { + HSW_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_haswell_gt2_info __initconst = { + HSW_PLATFORM, + .gt = 2, +}; + +static const struct intel_device_info intel_haswell_gt3_info __initconst = { + HSW_PLATFORM, + .gt = 3, }; -#define BDW_FEATURES \ - HSW_FEATURES, \ +#define GEN8_FEATURES \ + G75_FEATURES, \ BDW_COLORS, \ .has_logical_ring_contexts = 1, \ .has_full_48bit_ppgtt = 1, \ @@ -314,20 +368,35 @@ static const struct intel_device_info intel_haswell_info = { .has_reset_engine = 1 #define BDW_PLATFORM \ - BDW_FEATURES, \ + GEN8_FEATURES, \ .gen = 8, \ .platform = INTEL_BROADWELL -static const struct intel_device_info intel_broadwell_info = { +static const struct intel_device_info intel_broadwell_gt1_info __initconst = { + BDW_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_broadwell_gt2_info __initconst = { BDW_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_broadwell_gt3_info = { +static const struct intel_device_info intel_broadwell_rsvd_info __initconst = { BDW_PLATFORM, + .gt = 3, + /* According to the device ID those devices are GT3, they were + * previously treated as not GT3, keep it like that. + */ +}; + +static const struct intel_device_info intel_broadwell_gt3_info __initconst = { + BDW_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; -static const struct intel_device_info intel_cherryview_info = { +static const struct intel_device_info intel_cherryview_info __initconst = { .gen = 8, .num_pipes = 3, .has_hotplug = 1, .is_lp = 1, @@ -350,21 +419,42 @@ static const struct intel_device_info intel_cherryview_info = { CHV_COLORS, }; -#define SKL_PLATFORM \ - BDW_FEATURES, \ - .gen = 9, \ - .platform = INTEL_SKYLAKE, \ +#define GEN9_FEATURES \ + GEN8_FEATURES, \ + .has_logical_ring_preemption = 1, \ .has_csr = 1, \ .has_guc = 1, \ + .has_ipc = 1, \ .ddb_size = 896 -static const struct intel_device_info intel_skylake_info = { +#define SKL_PLATFORM \ + GEN9_FEATURES, \ + .gen = 9, \ + .platform = INTEL_SKYLAKE + +static const struct intel_device_info intel_skylake_gt1_info __initconst = { SKL_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_skylake_gt3_info = { +static const struct intel_device_info intel_skylake_gt2_info __initconst = { SKL_PLATFORM, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .gt = 2, +}; + +#define SKL_GT3_PLUS_PLATFORM \ + SKL_PLATFORM, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING + + +static const struct intel_device_info intel_skylake_gt3_info __initconst = { + SKL_GT3_PLUS_PLATFORM, + .gt = 3, +}; + +static const struct intel_device_info intel_skylake_gt4_info __initconst = { + SKL_GT3_PLUS_PLATFORM, + .gt = 4, }; #define GEN9_LP_FEATURES \ @@ -385,72 +475,85 @@ static const struct intel_device_info intel_skylake_gt3_info = { .has_dp_mst = 1, \ .has_gmbus_irq = 1, \ .has_logical_ring_contexts = 1, \ + .has_logical_ring_preemption = 1, \ .has_guc = 1, \ .has_aliasing_ppgtt = 1, \ .has_full_ppgtt = 1, \ .has_full_48bit_ppgtt = 1, \ .has_reset_engine = 1, \ + .has_snoop = true, \ + .has_ipc = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS, \ BDW_COLORS -static const struct intel_device_info intel_broxton_info = { +static const struct intel_device_info intel_broxton_info __initconst = { GEN9_LP_FEATURES, .platform = INTEL_BROXTON, .ddb_size = 512, .has_reset_engine = false, }; -static const struct intel_device_info intel_geminilake_info = { +static const struct intel_device_info intel_geminilake_info __initconst = { GEN9_LP_FEATURES, .platform = INTEL_GEMINILAKE, .ddb_size = 1024, - .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } + GLK_COLORS, }; #define KBL_PLATFORM \ - BDW_FEATURES, \ + GEN9_FEATURES, \ .gen = 9, \ - .platform = INTEL_KABYLAKE, \ - .has_csr = 1, \ - .has_guc = 1, \ - .ddb_size = 896 + .platform = INTEL_KABYLAKE + +static const struct intel_device_info intel_kabylake_gt1_info __initconst = { + KBL_PLATFORM, + .gt = 1, +}; -static const struct intel_device_info intel_kabylake_info = { +static const struct intel_device_info intel_kabylake_gt2_info __initconst = { KBL_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_kabylake_gt3_info = { +static const struct intel_device_info intel_kabylake_gt3_info __initconst = { KBL_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; #define CFL_PLATFORM \ - .is_alpha_support = 1, \ - BDW_FEATURES, \ + GEN9_FEATURES, \ .gen = 9, \ - .platform = INTEL_COFFEELAKE, \ - .has_csr = 1, \ - .has_guc = 1, \ - .ddb_size = 896 + .platform = INTEL_COFFEELAKE + +static const struct intel_device_info intel_coffeelake_gt1_info __initconst = { + CFL_PLATFORM, + .gt = 1, +}; -static const struct intel_device_info intel_coffeelake_info = { +static const struct intel_device_info intel_coffeelake_gt2_info __initconst = { CFL_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_coffeelake_gt3_info = { +static const struct intel_device_info intel_coffeelake_gt3_info __initconst = { CFL_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; -static const struct intel_device_info intel_cannonlake_info = { - BDW_FEATURES, +#define GEN10_FEATURES \ + GEN9_FEATURES, \ + .ddb_size = 1024, \ + GLK_COLORS + +static const struct intel_device_info intel_cannonlake_gt2_info __initconst = { + GEN10_FEATURES, .is_alpha_support = 1, .platform = INTEL_CANNONLAKE, .gen = 10, - .ddb_size = 1024, - .has_csr = 1, - .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } + .gt = 2, }; /* @@ -476,31 +579,40 @@ static const struct pci_device_id pciidlist[] = { INTEL_PINEVIEW_IDS(&intel_pineview_info), INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), - INTEL_SNB_D_IDS(&intel_sandybridge_d_info), - INTEL_SNB_M_IDS(&intel_sandybridge_m_info), + INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info), + INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info), + INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info), + INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info), INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ - INTEL_IVB_M_IDS(&intel_ivybridge_m_info), - INTEL_IVB_D_IDS(&intel_ivybridge_d_info), - INTEL_HSW_IDS(&intel_haswell_info), + INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info), + INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info), + INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info), + INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info), + INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info), + INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info), + INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info), INTEL_VLV_IDS(&intel_valleyview_info), - INTEL_BDW_GT12_IDS(&intel_broadwell_info), + INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info), + INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info), INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info), - INTEL_BDW_RSVD_IDS(&intel_broadwell_info), + INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info), INTEL_CHV_IDS(&intel_cherryview_info), - INTEL_SKL_GT1_IDS(&intel_skylake_info), - INTEL_SKL_GT2_IDS(&intel_skylake_info), + INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info), + INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info), INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), - INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), + INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info), INTEL_BXT_IDS(&intel_broxton_info), INTEL_GLK_IDS(&intel_geminilake_info), - INTEL_KBL_GT1_IDS(&intel_kabylake_info), - INTEL_KBL_GT2_IDS(&intel_kabylake_info), + INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info), + INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - INTEL_CFL_S_IDS(&intel_coffeelake_info), - INTEL_CFL_H_IDS(&intel_coffeelake_info), - INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info), - INTEL_CNL_IDS(&intel_cannonlake_info), + INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), + INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), + INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info), + INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -519,9 +631,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (struct intel_device_info *) ent->driver_data; int err; - if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) { + if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) { DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n" - "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n" + "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915_modparams.alpha_support module parameter\n" "to enable support in this kernel version, or check for kernel updates.\n"); return -ENODEV; } @@ -541,6 +653,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; +#ifdef CONFIG_DRM_I915_LOAD_ASYNC_SUPPORT + i915_driver_load_async(pdev, ent); +#else err = i915_driver_load(pdev, ent); if (err) return err; @@ -550,7 +665,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i915_pci_remove(pdev); return err > 0 ? -ENOTTY : err; } - +#endif return 0; } @@ -577,10 +692,10 @@ static int __init i915_init(void) * vga_text_mode_force boot option. */ - if (i915.modeset == 0) + if (i915_modparams.modeset == 0) use_kms = false; - if (vgacon_text_force() && i915.modeset == -1) + if (vgacon_text_force() && i915_modparams.modeset == -1) use_kms = false; if (!use_kms) { diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 370b9d248fed..2017ec404827 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -42,7 +42,7 @@ * Streams representing a single context are accessible to applications with a * corresponding drm file descriptor, such that OpenGL can use the interface * without special privileges. Access to system-wide metrics requires root - * privileges by default, unless changed via the dev.i915.perf_event_paranoid + * privileges by default, unless changed via the dev.i915_modparams.perf_event_paranoid * sysctl option. * */ @@ -265,7 +265,7 @@ #define POLL_FREQUENCY 200 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) -/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ +/* for sysctl proc_dointvec_minmax of dev.i915_modparams.perf_stream_paranoid */ static int zero; static int one = 1; static u32 i915_perf_stream_paranoid = true; @@ -1213,7 +1213,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; else { struct intel_engine_cs *engine = dev_priv->engine[RCS]; @@ -1259,7 +1259,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; - if (i915.enable_execlists) { + if (i915_modparams.enable_execlists) { dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; } else { struct intel_engine_cs *engine = dev_priv->engine[RCS]; @@ -1300,9 +1300,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) */ mutex_lock(&dev_priv->drm.struct_mutex); dev_priv->perf.oa.exclusive_stream = NULL; - mutex_unlock(&dev_priv->drm.struct_mutex); - dev_priv->perf.oa.ops.disable_metric_set(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); free_oa_buffer(dev_priv); @@ -1754,22 +1753,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr * Note: it's only the RCS/Render context that has any OA state. */ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, - const struct i915_oa_config *oa_config, - bool interruptible) + const struct i915_oa_config *oa_config) { struct i915_gem_context *ctx; int ret; unsigned int wait_flags = I915_WAIT_LOCKED; - if (interruptible) { - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - return ret; - - wait_flags |= I915_WAIT_INTERRUPTIBLE; - } else { - mutex_lock(&dev_priv->drm.struct_mutex); - } + lockdep_assert_held(&dev_priv->drm.struct_mutex); /* Switch away from any user context. */ ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); @@ -1817,8 +1807,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, } out: - mutex_unlock(&dev_priv->drm.struct_mutex); - return ret; } @@ -1862,7 +1850,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, * to make sure all slices/subslices are ON before writing to NOA * registers. */ - ret = gen8_configure_all_contexts(dev_priv, oa_config, true); + ret = gen8_configure_all_contexts(dev_priv, oa_config); if (ret) return ret; @@ -1877,7 +1865,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) { /* Reset all contexts' slices/subslices configurations. */ - gen8_configure_all_contexts(dev_priv, NULL, false); + gen8_configure_all_contexts(dev_priv, NULL); I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & ~GT_NOA_ENABLE)); @@ -2127,6 +2115,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, if (ret) goto err_oa_buf_alloc; + ret = i915_mutex_lock_interruptible(&dev_priv->drm); + if (ret) + goto err_lock; + ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, stream->oa_config); if (ret) @@ -2134,23 +2126,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, stream->ops = &i915_oa_stream_ops; - /* Lock device for exclusive_stream access late because - * enable_metric_set() might lock as well on gen8+. - */ - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - goto err_lock; - dev_priv->perf.oa.exclusive_stream = stream; mutex_unlock(&dev_priv->drm.struct_mutex); return 0; -err_lock: +err_enable: dev_priv->perf.oa.ops.disable_metric_set(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); -err_enable: +err_lock: free_oa_buffer(dev_priv); err_oa_buf_alloc: @@ -2612,7 +2598,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, privileged_op = false; /* Similar to perf's kernel.perf_paranoid_cpu sysctl option - * we check a dev.i915.perf_stream_paranoid sysctl option + * we check a dev.i915_modparams.perf_stream_paranoid sysctl option * to determine if it's ok to access system wide OA counters * without CAP_SYS_ADMIN privileges. */ @@ -2801,7 +2787,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, if (oa_freq_hz > i915_oa_max_sample_rate && !capable(CAP_SYS_ADMIN)) { - DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", + DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915_modparams.oa_max_sample_rate) %uHz without root privileges\n", i915_oa_max_sample_rate); return -EACCES; } @@ -3409,7 +3395,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.timestamp_frequency = 12500000; dev_priv->perf.oa.oa_formats = hsw_oa_formats; - } else if (i915.enable_execlists) { + } else if (i915_modparams.enable_execlists) { /* Note: that although we could theoretically also support the * legacy ringbuffer mode on BDW (and earlier iterations of * this driver, before upstreaming did this) it didn't seem diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index 0679a58cdbae..0b2e3035f37c 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -28,6 +28,12 @@ #define VGT_PVINFO_PAGE 0x78000 #define VGT_PVINFO_SIZE 0x1000 +/* Scratch reg used for redirecting command access to registers, any + * command access to PVINFO page would be discarded, so it has no HW + * impact. + */ +#define VGT_SCRATCH_REG VGT_PVINFO_PAGE + /* * The following structure pages are defined in GEN MMIO space * for virtualization. (One page for now) @@ -49,6 +55,17 @@ enum vgt_g2v_type { VGT_G2V_MAX, }; +/* shared page(4KB) between gvt and VM, located at the first page next + * to MMIO region(2MB size normally). + */ +struct gvt_shared_page { + u32 elsp_data[4]; + u32 reg_addr; + u32 rsvd2[0x400 - 5]; +}; + +#define VGPU_PVMMIO(vgpu) vgpu_vreg(vgpu, vgtif_reg(enable_pvmmio)) + /* * VGT capabilities type */ @@ -101,8 +118,10 @@ struct vgt_if { u32 execlist_context_descriptor_lo; u32 execlist_context_descriptor_hi; + u32 enable_pvmmio; + u32 pv_mmio; /* vgpu trapped mmio read will be redirected here */ - u32 rsv7[0x200 - 24]; /* pad to one page */ + u32 rsv7[0x200 - 26]; /* pad to one page */ } __packed; #define vgtif_reg(x) \ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c9bcc6c45012..4eb65dbcbb5a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2373,6 +2373,7 @@ enum i915_power_well_id { #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) +#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24) #if 0 #define PRB0_TAIL _MMIO(0x2030) @@ -2491,6 +2492,7 @@ enum i915_power_well_id { # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 _MMIO(0x2090) #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) +#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ #define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ @@ -2993,6 +2995,7 @@ enum i915_power_well_id { # define GPIO_DATA_PULLUP_DISABLE (1 << 13) #define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ +#define GMBUS_AKSV_SELECT (1<<11) #define GMBUS_RATE_100KHZ (0<<8) #define GMBUS_RATE_50KHZ (1<<8) #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ @@ -3806,6 +3809,12 @@ enum { #define PWM2_GATING_DIS (1 << 14) #define PWM1_GATING_DIS (1 << 13) +/* + * GEN10 clock gating regs + */ +#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) +#define SARBUNIT_CLKGATE_DIS (1 << 5) + /* * Display engine regs */ @@ -6902,7 +6911,7 @@ enum { # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) #define CHICKEN_PAR1_1 _MMIO(0x42080) -#define SKL_RC_HASH_OUTSIDE (1 << 15) +#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) #define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) @@ -6934,6 +6943,7 @@ enum { #define DISP_FBC_WM_DIS (1<<15) #define DISP_ARB_CTL2 _MMIO(0x45004) #define DISP_DATA_PARTITION_5_6 (1<<6) +#define DISP_IPC_ENABLE (1<<3) #define DBUF_CTL _MMIO(0x45008) #define DBUF_POWER_REQUEST (1<<31) #define DBUF_POWER_STATE (1<<30) @@ -6944,6 +6954,7 @@ enum { #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) +#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30) #define MASK_WAKEMEM (1<<13) #define SKL_DFSM _MMIO(0x51000) @@ -6969,12 +6980,19 @@ enum { #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN8_CS_CHICKEN1 _MMIO(0x2580) +#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0) +#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) +#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0) +#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1) +#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0) +#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1) /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) +# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13) # define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12) # define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) @@ -6986,6 +7004,8 @@ enum { #define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) #define DISABLE_PIXEL_MASK_CAMMING (1<<14) +#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) + #define GEN7_L3SQCREG1 _MMIO(0xB010) #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 @@ -7018,6 +7038,7 @@ enum { /* GEN8 chicken */ #define HDC_CHICKEN0 _MMIO(0x7300) +#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0) #define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15) #define HDC_FENCE_DEST_SLM_DISABLE (1<<14) #define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) @@ -7947,6 +7968,7 @@ enum { #define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 #define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16 #define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24 +#define SKL_PCODE_LOAD_HDCP_KEYS 0x5 #define SKL_PCODE_CDCLK_CONTROL 0x7 #define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3 #define SKL_CDCLK_READY_FOR_CHANGE 0x1 @@ -8049,6 +8071,7 @@ enum { #define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define DOP_CLOCK_GATING_DISABLE (1<<0) +#define PUSH_CONSTANT_DEREF_DISABLE (1<<8) #define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) @@ -8060,9 +8083,11 @@ enum { #define HSW_SAMPLE_C_PERFORMANCE (1<<9) #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) #define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5) +#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4) #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) +#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8) #define GEN9_ENABLE_YV12_BUGFIX (1<<4) #define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2) @@ -8235,6 +8260,89 @@ enum skl_power_gate { #define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) + +/* HDCP Key Registers */ +#define HDCP_KEY_CONF _MMIO(0x66c00) +#define HDCP_AKSV_SEND_TRIGGER BIT(31) +#define HDCP_CLEAR_KEYS_TRIGGER BIT(30) +#define HDCP_KEY_LOAD_TRIGGER BIT(8) +#define HDCP_KEY_STATUS _MMIO(0x66c04) +#define HDCP_FUSE_IN_PROGRESS BIT(7) +#define HDCP_FUSE_ERROR BIT(6) +#define HDCP_FUSE_DONE BIT(5) +#define HDCP_KEY_LOAD_STATUS BIT(1) +#define HDCP_KEY_LOAD_DONE BIT(0) +#define HDCP_AKSV_LO _MMIO(0x66c10) +#define HDCP_AKSV_HI _MMIO(0x66c14) + +/* HDCP Repeater Registers */ +#define HDCP_REP_CTL _MMIO(0x66d00) +#define HDCP_DDIB_REP_PRESENT BIT(30) +#define HDCP_DDIA_REP_PRESENT BIT(29) +#define HDCP_DDIC_REP_PRESENT BIT(28) +#define HDCP_DDID_REP_PRESENT BIT(27) +#define HDCP_DDIF_REP_PRESENT BIT(26) +#define HDCP_DDIE_REP_PRESENT BIT(25) +#define HDCP_DDIB_SHA1_M0 (1 << 20) +#define HDCP_DDIA_SHA1_M0 (2 << 20) +#define HDCP_DDIC_SHA1_M0 (3 << 20) +#define HDCP_DDID_SHA1_M0 (4 << 20) +#define HDCP_DDIF_SHA1_M0 (5 << 20) +#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */ +#define HDCP_SHA1_BUSY BIT(16) +#define HDCP_SHA1_READY BIT(17) +#define HDCP_SHA1_COMPLETE BIT(18) +#define HDCP_SHA1_V_MATCH BIT(19) +#define HDCP_SHA1_TEXT_32 (1 << 1) +#define HDCP_SHA1_COMPLETE_HASH (2 << 1) +#define HDCP_SHA1_TEXT_24 (4 << 1) +#define HDCP_SHA1_TEXT_16 (5 << 1) +#define HDCP_SHA1_TEXT_8 (6 << 1) +#define HDCP_SHA1_TEXT_0 (7 << 1) +#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04) +#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08) +#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C) +#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10) +#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14) +#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + h * 4)) +#define HDCP_SHA_TEXT _MMIO(0x66d18) + +/* HDCP Auth Registers */ +#define _PORTA_HDCP_AUTHENC 0x66800 +#define _PORTB_HDCP_AUTHENC 0x66500 +#define _PORTC_HDCP_AUTHENC 0x66600 +#define _PORTD_HDCP_AUTHENC 0x66700 +#define _PORTE_HDCP_AUTHENC 0x66A00 +#define _PORTF_HDCP_AUTHENC 0x66900 +#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \ + _PORTA_HDCP_AUTHENC, \ + _PORTB_HDCP_AUTHENC, \ + _PORTC_HDCP_AUTHENC, \ + _PORTD_HDCP_AUTHENC, \ + _PORTE_HDCP_AUTHENC, \ + _PORTF_HDCP_AUTHENC) + x) +#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) +#define HDCP_CONF_CAPTURE_AN BIT(0) +#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0)) +#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4) +#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8) +#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC) +#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10) +#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14) +#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18) +#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C) +#define HDCP_STATUS_STREAM_A_ENC BIT(31) +#define HDCP_STATUS_STREAM_B_ENC BIT(30) +#define HDCP_STATUS_STREAM_C_ENC BIT(29) +#define HDCP_STATUS_STREAM_D_ENC BIT(28) +#define HDCP_STATUS_AUTH BIT(21) +#define HDCP_STATUS_ENC BIT(20) +#define HDCP_STATUS_RI_MATCH BIT(19) +#define HDCP_STATUS_R0_READY BIT(18) +#define HDCP_STATUS_AN_READY BIT(17) +#define HDCP_STATUS_CIPHER BIT(16) +#define HDCP_STATUS_FRAME_CNT(x) ((x >> 8) & 0xff) + /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 @@ -8266,6 +8374,7 @@ enum skl_power_gate { #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) #define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) #define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) +#define TRANS_DDI_HDCP_SIGNALLING (1<<9) #define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8) #define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7) #define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6) @@ -8475,6 +8584,7 @@ enum skl_power_gate { #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) +#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19) #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) @@ -9363,4 +9473,35 @@ enum skl_power_gate { #define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */ #define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */ +#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */ +#define MMCD_PCLA (1 << 31) +#define MMCD_HOTSPOT_EN (1 << 27) + +/* GVT has special read process from some MMIO register, + * which so that should be trapped to GVT to make a + * complete emulation. Such MMIO is not too much, now using + * a static list to cover them. + */ +static inline bool in_mmio_read_trap_list(u32 reg) +{ + if (unlikely(reg >= PCH_GMBUS0.reg && reg <= PCH_GMBUS5.reg)) + return true; + + if (unlikely(reg == RING_TIMESTAMP(RENDER_RING_BASE).reg || + reg == RING_TIMESTAMP(BLT_RING_BASE).reg || + reg == RING_TIMESTAMP(GEN6_BSD_RING_BASE).reg || + reg == RING_TIMESTAMP(VEBOX_RING_BASE).reg || + reg == RING_TIMESTAMP(GEN8_BSD2_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(RENDER_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(BLT_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(GEN6_BSD_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(VEBOX_RING_BASE).reg)) + return true; + + if (unlikely(reg == SBI_DATA.reg || reg == 0x6c060 || reg == 0x206c)) + return true; + + return false; +} + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index d61c8727f756..c7c4ff34b279 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -31,6 +31,7 @@ #include #include "intel_drv.h" #include "i915_drv.h" +#include "../drm_internal.h" static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) { @@ -246,7 +247,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 freq; freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); @@ -261,7 +262,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; ret = intel_gpu_freq(dev_priv, ret); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); @@ -304,9 +305,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) return -EINVAL; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); dev_priv->rps.boost_freq = val; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return count; } @@ -344,14 +345,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = intel_freq_opcode(dev_priv, val); if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq || val < dev_priv->rps.min_freq_softlimit) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); return -EINVAL; } @@ -371,7 +372,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, * frequency request may be unchanged. */ ret = intel_set_rps(dev_priv, val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); @@ -401,14 +402,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = intel_freq_opcode(dev_priv, val); if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq || val > dev_priv->rps.max_freq_softlimit) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); return -EINVAL; } @@ -424,7 +425,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, * frequency request may be unchanged. */ ret = intel_set_rps(dev_priv, val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); @@ -532,7 +533,274 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, return count; } -static const struct bin_attribute error_state_attr = { +#define dev_to_drm_minor(d) dev_get_drvdata((d)) + +static ssize_t i915_gem_clients_state_read(struct file *filp, + struct kobject *memtrack_kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct kobject *kobj = memtrack_kobj->parent; + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state_buf error_str; + ssize_t ret_count = 0; + int ret; + + ret = i915_error_state_buf_init(&error_str, dev_priv, count, off); + if (ret) + return ret; + + ret = i915_get_drm_clients_info(&error_str, dev); + if (ret) + goto out; + + ret_count = count < error_str.bytes ? count : error_str.bytes; + + memcpy(buf, error_str.buf, ret_count); +out: + i915_error_state_buf_release(&error_str); + + return ret ?: ret_count; +} + +#define GEM_OBJ_STAT_BUF_SIZE (4*1024) /* 4KB */ +#define GEM_OBJ_STAT_BUF_SIZE_MAX (1024*1024) /* 1MB */ + +struct i915_gem_file_attr_priv { + char tgid_str[16]; + struct pid *tgid; + struct drm_i915_error_state_buf buf; +}; + +static ssize_t i915_gem_read_objects(struct file *filp, + struct kobject *memtrack_kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct kobject *kobj = memtrack_kobj->parent; + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct i915_gem_file_attr_priv *attr_priv; + struct pid *tgid; + ssize_t ret_count = 0; + long bytes_available; + int ret = 0, buf_size = GEM_OBJ_STAT_BUF_SIZE; + unsigned long timeout = msecs_to_jiffies(500) + 1; + + /* + * There may arise a scenario where syfs file entry is being removed, + * and may race against sysfs read. Sysfs file remove function would + * have taken the drm_global_mutex and would wait for read to finish, + * which is again waiting to acquire drm_global_mutex, leading to + * deadlock. To avoid this, use mutex_trylock here with a timeout. + */ + while (!mutex_trylock(&drm_global_mutex) && --timeout) + schedule_timeout_killable(1); + if (timeout == 0) { + DRM_DEBUG_DRIVER("Unable to acquire drm global mutex.\n"); + return -EBUSY; + } + + if (!attr || !attr->private) { + ret = -EINVAL; + DRM_ERROR("attr | attr->private pointer is NULL\n"); + goto out; + } + attr_priv = attr->private; + tgid = attr_priv->tgid; + + if (off && !attr_priv->buf.buf) { + ret = -EINVAL; + DRM_ERROR( + "Buf not allocated during read with non-zero offset\n"); + goto out; + } + + if (off == 0) { +retry: + if (!attr_priv->buf.buf) { + ret = i915_obj_state_buf_init(&attr_priv->buf, + buf_size); + if (ret) { + DRM_ERROR( + "obj state buf init failed. buf_size=%d\n", + buf_size); + goto out; + } + } else { + /* Reset the buf parameters before filling data */ + attr_priv->buf.pos = 0; + attr_priv->buf.bytes = 0; + } + + /* Read the gfx device stats */ + ret = i915_gem_get_obj_info(&attr_priv->buf, dev, tgid); + if (ret) + goto out; + + ret = i915_error_ok(&attr_priv->buf); + if (ret) { + ret = 0; + goto copy_data; + } + if (buf_size >= GEM_OBJ_STAT_BUF_SIZE_MAX) { + DRM_DEBUG_DRIVER("obj stat buf size limit reached\n"); + ret = -ENOMEM; + goto out; + } else { + /* Try to reallocate buf of larger size */ + i915_error_state_buf_release(&attr_priv->buf); + buf_size *= 2; + + ret = i915_obj_state_buf_init(&attr_priv->buf, + buf_size); + if (ret) { + DRM_ERROR( + "obj stat buf init failed. buf_size=%d\n", + buf_size); + goto out; + } + goto retry; + } + } +copy_data: + + bytes_available = (long)attr_priv->buf.bytes - (long)off; + + if (bytes_available > 0) { + ret_count = count < bytes_available ? count : bytes_available; + memcpy(buf, attr_priv->buf.buf + off, ret_count); + } else + ret_count = 0; + +out: + mutex_unlock(&drm_global_mutex); + + return ret ?: ret_count; +} + +int i915_gem_create_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_gem_file_attr_priv *attr_priv; + struct bin_attribute *obj_attr; + struct drm_file *file_local; + int ret; + + if (!i915_modparams.memtrack_debug) + return 0; + + /* + * Check for multiple drm files having same tgid. If found, copy the + * bin attribute into the new file priv. Otherwise allocate a new + * copy of bin attribute, and create its corresponding sysfs file. + */ + mutex_lock(&dev->struct_mutex); + list_for_each_entry(file_local, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv_local = + file_local->driver_priv; + + if (file_priv->tgid == file_priv_local->tgid) { + file_priv->obj_attr = file_priv_local->obj_attr; + mutex_unlock(&dev->struct_mutex); + return 0; + } + } + mutex_unlock(&dev->struct_mutex); + + obj_attr = kzalloc(sizeof(*obj_attr), GFP_KERNEL); + if (!obj_attr) { + DRM_ERROR("Alloc failed. Out of memory\n"); + ret = -ENOMEM; + goto out; + } + + attr_priv = kzalloc(sizeof(*attr_priv), GFP_KERNEL); + if (!attr_priv) { + DRM_ERROR("Alloc failed. Out of memory\n"); + ret = -ENOMEM; + goto out_obj_attr; + } + + snprintf(attr_priv->tgid_str, 16, "%d", task_tgid_nr(current)); + obj_attr->attr.name = attr_priv->tgid_str; + obj_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + obj_attr->size = 0; + obj_attr->read = i915_gem_read_objects; + + attr_priv->tgid = file_priv->tgid; + obj_attr->private = attr_priv; + + ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj, + obj_attr); + if (ret) { + DRM_ERROR( + "sysfs tgid file setup failed. tgid=%d, process:%s, ret:%d\n", + pid_nr(file_priv->tgid), file_priv->process_name, ret); + + goto out_attr_priv; + } + + file_priv->obj_attr = obj_attr; + return 0; + +out_attr_priv: + kfree(attr_priv); +out_obj_attr: + kfree(obj_attr); +out: + return ret; +} + +void i915_gem_remove_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_file *file_local; + int open_count = 1; + + if (!i915_modparams.memtrack_debug) + return; + + /* + * The current drm file instance is already removed from filelist at + * this point. + * Check if this particular drm file being removed is the last one for + * that particular tgid, and no other instances for this tgid exist in + * the filelist. If so, remove the corresponding sysfs file entry also. + */ + list_for_each_entry(file_local, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv_local = + file_local->driver_priv; + + if (pid_nr(file_priv->tgid) == pid_nr(file_priv_local->tgid)) + open_count++; + } + + if (open_count == 1) { + struct i915_gem_file_attr_priv *attr_priv; + + if (WARN_ON(file_priv->obj_attr == NULL)) + return; + attr_priv = file_priv->obj_attr->private; + + sysfs_remove_bin_file(&dev_priv->memtrack_kobj, + file_priv->obj_attr); + + i915_error_state_buf_release(&attr_priv->buf); + kfree(file_priv->obj_attr->private); + kfree(file_priv->obj_attr); + } +} + +static struct bin_attribute error_state_attr = { .attr.name = "error", .attr.mode = S_IRUSR | S_IWUSR, .size = 0, @@ -540,6 +808,21 @@ static const struct bin_attribute error_state_attr = { .write = error_state_write, }; +static struct bin_attribute i915_gem_client_state_attr = { + .attr.name = "i915_gem_meminfo", + .attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, + .size = 0, + .read = i915_gem_clients_state_read, +}; + +static struct attribute *memtrack_kobj_attrs[] = {NULL}; + +static struct kobj_type memtrack_kobj_type = { + .release = NULL, + .sysfs_ops = NULL, + .default_attrs = memtrack_kobj_attrs, +}; + static void i915_setup_error_capture(struct device *kdev) { if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) @@ -602,6 +885,28 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv) DRM_ERROR("RPS sysfs setup failed\n"); i915_setup_error_capture(kdev); + + if (i915_modparams.memtrack_debug) { + /* + * Create the gfx_memtrack directory for memtrack sysfs files + */ + ret = kobject_init_and_add( + &dev_priv->memtrack_kobj, &memtrack_kobj_type, + &kdev->kobj, "gfx_memtrack"); + if (unlikely(ret != 0)) { + DRM_ERROR( + "i915 sysfs setup memtrack directory failed\n" + ); + kobject_put(&dev_priv->memtrack_kobj); + } else { + ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj, + &i915_gem_client_state_attr); + if (ret) + DRM_ERROR( + "i915_gem_client_state sysfs setup failed\n" + ); + } + } } void i915_teardown_sysfs(struct drm_i915_private *dev_priv) @@ -620,4 +925,10 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group); sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group); #endif + if (i915_modparams.memtrack_debug) { + sysfs_remove_bin_file(&dev_priv->memtrack_kobj, + &i915_gem_client_state_attr); + kobject_del(&dev_priv->memtrack_kobj); + kobject_put(&dev_priv->memtrack_kobj); + } } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index ef72da74b87f..281521f0fcfa 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -673,6 +673,54 @@ TRACE_EVENT(i915_gem_ring_flush, __entry->invalidate, __entry->flush) ); +TRACE_EVENT(i915_gem_multi_domains, + TP_PROTO(struct drm_i915_gem_request *req), + TP_ARGS(req), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ctx) + __field(u32, ring) + __field(u32, seqno) + __field(u32, global) + __field(int, prio_req) + __field(int, prio_ctx) + __field(bool, shadow_ctx) + __field(u32, hw_id) + __field(int, vgt_id) + __field(u32, pid) + ), + + TP_fast_assign( + __entry->dev = req->i915->drm.primary->index; + __entry->ring = req->engine->id; + __entry->ctx = req->fence.context; + __entry->seqno = req->fence.seqno; + __entry->global = req->global_seqno; + __entry->prio_req = req->priotree.priority; + __entry->prio_ctx = req->ctx->priority; + __entry->shadow_ctx = is_shadow_context(req->ctx); + __entry->hw_id = req->ctx->hw_id; + __entry->vgt_id = get_vgt_id(req->ctx); + __entry->pid = is_shadow_context(req->ctx) ? + get_pid_shadowed(req->ctx, req->engine) : + pid_nr(req->ctx->pid); + ), + + TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, " + "priority=%d (%d), is_shadow_ctx=%u, hw_id=%u, " + "vgt_id=%u, pid=%u", __entry->dev, __entry->ring, + __entry->ctx, __entry->seqno, __entry->global, + __entry->prio_req, __entry->prio_ctx, __entry->shadow_ctx, + __entry->hw_id, __entry->vgt_id, __entry->pid) +); + +DEFINE_EVENT(i915_gem_multi_domains, i915_gem_request_add_domain, + TP_PROTO(struct drm_i915_gem_request *req), + TP_ARGS(req) +); + + DECLARE_EVENT_CLASS(i915_gem_request, TP_PROTO(struct drm_i915_gem_request *req), TP_ARGS(req), diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 5fe9f3f39467..6a153f40551e 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -77,6 +77,15 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv) dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps)); + /* If guest wants to enable pvmmio, it needs to enable it explicitly + * through vgt_if interface, and then read back the enable state from + * gvt layer. + */ + __raw_i915_write32(dev_priv, vgtif_reg(enable_pvmmio), + i915_modparams.enable_pvmmio); + i915_modparams.enable_pvmmio = __raw_i915_read16(dev_priv, + vgtif_reg(enable_pvmmio)); + dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); } diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 36d4e635e4ce..d452c327dc1d 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -110,6 +110,8 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, to_intel_digital_connector_state(old_state); struct drm_crtc_state *crtc_state; + intel_hdcp_atomic_check(conn, old_state, new_state); + if (!new_state->crtc) return 0; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 5d4cd3d00564..b852befa7d5a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -356,7 +356,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct drm_display_mode *panel_fixed_mode; int index; - index = i915.vbt_sdvo_panel_type; + index = i915_modparams.vbt_sdvo_panel_type; if (index == -2) { DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); return; @@ -676,8 +676,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) uint8_t vswing; /* Don't read from VBT if module parameter has valid value*/ - if (i915.edp_vswing) { - dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1; + if (i915_modparams.edp_vswing) { + dev_priv->vbt.edp.low_vswing = + i915_modparams.edp_vswing == 1; } else { vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; dev_priv->vbt.edp.low_vswing = vswing == 0; diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 4e00e5cb9fa1..5095c095da04 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -541,29 +541,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, spin_unlock_irq(&b->rb_lock); } -static bool signal_valid(const struct drm_i915_gem_request *request) -{ - return intel_wait_check_request(&request->signaling.wait, request); -} - static bool signal_complete(const struct drm_i915_gem_request *request) { if (!request) return false; - /* If another process served as the bottom-half it may have already - * signalled that this wait is already completed. - */ - if (intel_wait_complete(&request->signaling.wait)) - return signal_valid(request); - - /* Carefully check if the request is complete, giving time for the + /* + * Carefully check if the request is complete, giving time for the * seqno to be visible or if the GPU hung. */ - if (__i915_request_irq_complete(request)) - return true; - - return false; + return __i915_request_irq_complete(request); } static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) @@ -606,9 +593,13 @@ static int intel_breadcrumbs_signaler(void *arg) request = i915_gem_request_get_rcu(request); rcu_read_unlock(); if (signal_complete(request)) { - local_bh_disable(); - dma_fence_signal(&request->fence); - local_bh_enable(); /* kick start the tasklets */ + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &request->fence.flags)) { + local_bh_disable(); + dma_fence_signal(&request->fence); + GEM_BUG_ON(!i915_gem_request_completed(request)); + local_bh_enable(); /* kick start the tasklets */ + } spin_lock_irq(&b->rb_lock); diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 1241e5891b29..67bf84987dd9 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -506,7 +506,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, else cmd = 0; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); val &= ~DSPFREQGUAR_MASK; val |= (cmd << DSPFREQGUAR_SHIFT); @@ -516,7 +516,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); mutex_lock(&dev_priv->sb_lock); @@ -593,7 +593,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, */ cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); val &= ~DSPFREQGUAR_MASK_CHV; val |= (cmd << DSPFREQGUAR_SHIFT_CHV); @@ -603,7 +603,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_update_cdclk(dev_priv); @@ -659,10 +659,10 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, "trying to change cdclk frequency with cdclk not enabled\n")) return; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("failed to inform pcode about cdclk change\n"); return; @@ -711,9 +711,9 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) DRM_ERROR("Switching back to LCPLL failed\n"); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); @@ -859,16 +859,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { - int min_cdclk = skl_calc_cdclk(0, vco); u32 val; WARN_ON(vco != 8100000 && vco != 8640000); - /* select the minimum CDCLK before enabling DPLL 0 */ - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); - I915_WRITE(CDCLK_CTL, val); - POSTING_READ(CDCLK_CTL); - /* * We always enable DPLL0 with the lowest link rate possible, but still * taking into account the VCO required to operate the eDP panel at the @@ -922,24 +916,24 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, { int cdclk = cdclk_state->cdclk; int vco = cdclk_state->vco; - u32 freq_select, pcu_ack; + u32 freq_select, pcu_ack, cdclk_ctl; int ret; WARN_ON((cdclk == 24000) != (vco == 0)); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", ret); return; } - /* set CDCLK_CTL */ + /* Choose frequency for this cdclk */ switch (cdclk) { case 450000: case 432000: @@ -967,16 +961,39 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk.hw.vco != vco) skl_dpll0_disable(dev_priv); + cdclk_ctl = I915_READ(CDCLK_CTL); + + if (dev_priv->cdclk.hw.vco != vco) { + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + } + + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; + I915_WRITE(CDCLK_CTL, cdclk_ctl); + POSTING_READ(CDCLK_CTL); + if (dev_priv->cdclk.hw.vco != vco) skl_dpll0_enable(dev_priv, vco); - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; + I915_WRITE(CDCLK_CTL, cdclk_ctl); POSTING_READ(CDCLK_CTL); /* inform PCU of the change */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_update_cdclk(dev_priv); } @@ -1273,10 +1290,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, } /* Inform power controller of upcoming frequency change */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 0x80000000); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", @@ -1305,10 +1322,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; I915_WRITE(CDCLK_CTL, val); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, DIV_ROUND_UP(cdclk, 25000)); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", @@ -1523,12 +1540,12 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, u32 val, divider, pcu_ack; int ret; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", ret); @@ -1580,9 +1597,9 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, I915_WRITE(CDCLK_CTL, val); /* inform PCU of the change */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_update_cdclk(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index b8315bca852b..d95aa81304cf 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -91,19 +91,16 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input) { int i; - for (i = 0; i < 9; i++) - result[i] = 0; - - for (i = 0; i < 3; i++) { - int64_t user_coeff = input[i * 3 + i]; + for (i = 0; i < 9; i++) { + int64_t user_coeff = input[i]; uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2; uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0, CTM_COEFF_4_0 - 1) >> 2; - result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27; + result[i] = (limited_coeff * abs_coeff) >> 28; if (CTM_COEFF_NEGATIVE(user_coeff)) - result[i * 3 + i] |= CTM_COEFF_SIGN; + result[i] |= CTM_COEFF_SIGN; } } @@ -224,13 +221,34 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) > 6) { uint16_t postoff = 0; + uint16_t postoff_red = 0; + uint16_t postoff_green = 0; + uint16_t postoff_blue = 0; - if (intel_crtc_state->limited_color_range) + if (intel_crtc_state->limited_color_range) { postoff = (16 * (1 << 12) / 255) & 0x1fff; + postoff_red = postoff; + postoff_green = postoff; + postoff_blue = postoff; + } + + if (crtc_state->ctm_post_offset) { + struct drm_color_ctm_post_offset *ctm_post_offset = + (struct drm_color_ctm_post_offset *)crtc_state->ctm_post_offset->data; + + /* Convert to U0.12 format. */ + postoff_red = ctm_post_offset->red >> 4; + postoff_green = ctm_post_offset->green >> 4; + postoff_blue = ctm_post_offset->blue >> 4; + + postoff_red = clamp_val(postoff_red, postoff, 0xfff); + postoff_green = clamp_val(postoff_green, postoff, 0xfff); + postoff_blue = clamp_val(postoff_blue, postoff, 0xfff); + } - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); + I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff_red); + I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff_green); + I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff_blue); I915_WRITE(PIPE_CSC_MODE(pipe), 0); } else { diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 70e0ff41070c..70ccd79ec766 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -712,7 +712,7 @@ intel_crt_detect(struct drm_connector *connector, * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) { + if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) { status = connector_status_disconnected; goto out; } @@ -730,7 +730,7 @@ intel_crt_detect(struct drm_connector *connector, else if (INTEL_GEN(dev_priv) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); - else if (i915.load_detect_test) + else if (i915_modparams.load_detect_test) status = connector_status_disconnected; else status = connector_status_unknown; diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 92c1f8e166dc..c83a2ff47ea5 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -440,7 +440,13 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) INIT_WORK(&dev_priv->csr.work, csr_load_work_fn); - if (!HAS_CSR(dev_priv)) + /* + * In a GVTg enabled environment, loading the CSR firmware for DomU doesn't + * make much sense since we don't allow it to control display power + * management settings. Furthermore, we can save some time for DomU bootup + * by skipping CSR loading. + */ + if (!HAS_CSR(dev_priv) || intel_vgpu_active(dev_priv)) return; if (IS_CANNONLAKE(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 5e5fe03b638c..74dd2b2af9ab 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1533,6 +1533,35 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, I915_WRITE(reg, val); } +int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + bool enable) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe = 0; + int ret = 0; + uint32_t tmp; + + if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv, + intel_encoder->power_domain))) + return -ENXIO; + + if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) { + ret = -EIO; + goto out; + } + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe)); + if (enable) + tmp |= TRANS_DDI_HDCP_SIGNALLING; + else + tmp &= ~TRANS_DDI_HDCP_SIGNALLING; + I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp); +out: + intel_display_power_put(dev_priv, intel_encoder->power_domain); + return ret; +} + bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; @@ -1554,7 +1583,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) goto out; } - if (port == PORT_A) + if (port == PORT_A && !intel_vgpu_active(dev_priv)) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; @@ -2334,6 +2363,12 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder, if (pipe_config->has_audio) intel_audio_codec_enable(intel_encoder, pipe_config, conn_state); + + + /* Enable hdcp if it's desired */ + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector)); } static void intel_disable_ddi(struct intel_encoder *intel_encoder, @@ -2343,6 +2378,8 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder, struct drm_encoder *encoder = &intel_encoder->base; int type = intel_encoder->type; + intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); + if (old_crtc_state->has_audio) intel_audio_codec_disable(intel_encoder); @@ -2541,7 +2578,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); - if (port == PORT_A) + if (port == PORT_A && !intel_vgpu_active(dev_priv)) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (type == INTEL_OUTPUT_HDMI) @@ -2632,11 +2669,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) } } - init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || + /* + * For port A check whether vgpu is active and we have a monitor + * attached to port A. + * */ + init_hdmi = (intel_vgpu_active(dev_priv) && port == PORT_A && + (I915_READ(GEN8_DE_PORT_ISR) & BXT_DE_PORT_HP_DDIA)) || + (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi); init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp; - if (intel_bios_is_lspcon_present(dev_priv, port)) { + if (!intel_vgpu_active(dev_priv) && + intel_bios_is_lspcon_present(dev_priv, port)) { /* * Lspcon device needs to be driven with DP connector * with special detection sequence. So make sure DP @@ -2739,7 +2783,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) /* In theory we don't need the encoder->type check, but leave it just in * case we have some really bad VBTs... */ - if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if ((intel_vgpu_active(dev_priv) || + (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi))) { if (!intel_ddi_init_hdmi_connector(intel_dig_port)) goto err; } diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 5f91ddc78c7a..870df613ddd9 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -343,7 +343,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->num_sprites[pipe] = 1; } - if (i915.disable_display) { + if (i915_modparams.disable_display) { DRM_INFO("Display disabled (module parameter)\n"); info->num_pipes = 0; } else if (info->num_pipes > 0 && diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5ebdb63330dd..b35aa8c8def7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -49,6 +49,10 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { DRM_FORMAT_C8, @@ -1000,7 +1004,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, return crtc->config->cpu_transcoder; } -static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) +static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, + enum pipe pipe) { i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; @@ -1015,7 +1020,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) msleep(5); line2 = I915_READ(reg) & line_mask; - return line1 == line2; + return line1 != line2; +} + +static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + /* Wait for the display line to settle/start moving */ + if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) + DRM_ERROR("pipe %c scanline %s wait timed out\n", + pipe_name(pipe), onoff(state)); +} + +static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) +{ + wait_for_pipe_scanline_moving(crtc, false); +} + +static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) +{ + wait_for_pipe_scanline_moving(crtc, true); } /* @@ -1038,7 +1064,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; - enum pipe pipe = crtc->pipe; if (INTEL_GEN(dev_priv) >= 4) { i915_reg_t reg = PIPECONF(cpu_transcoder); @@ -1049,9 +1074,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 100)) WARN(1, "pipe_off wait timed out\n"); } else { - /* Wait for the display line to settle */ - if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) - WARN(1, "pipe_off wait timed out\n"); + intel_wait_for_pipe_scanline_stopped(crtc); } } @@ -1192,23 +1215,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) pipe_name(pipe)); } -static void assert_cursor(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) -{ - bool cur_state; - - if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) - cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; - else - cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; - - I915_STATE_WARN(cur_state != state, - "cursor on pipe %c assertion failure (expected %s, current %s)\n", - pipe_name(pipe), onoff(state), onoff(cur_state)); -} -#define assert_cursor_enabled(d, p) assert_cursor(d, p, true) -#define assert_cursor_disabled(d, p) assert_cursor(d, p, false) - void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { @@ -1236,77 +1242,25 @@ void assert_pipe(struct drm_i915_private *dev_priv, pipe_name(pipe), onoff(state), onoff(cur_state)); } -static void assert_plane(struct drm_i915_private *dev_priv, - enum plane plane, bool state) +static void assert_plane(struct intel_plane *plane, bool state) { - u32 val; - bool cur_state; + bool cur_state = plane->get_hw_state(plane); - val = I915_READ(DSPCNTR(plane)); - cur_state = !!(val & DISPLAY_PLANE_ENABLE); I915_STATE_WARN(cur_state != state, - "plane %c assertion failure (expected %s, current %s)\n", - plane_name(plane), onoff(state), onoff(cur_state)); + "%s assertion failure (expected %s, current %s)\n", + plane->base.name, onoff(state), onoff(cur_state)); } -#define assert_plane_enabled(d, p) assert_plane(d, p, true) -#define assert_plane_disabled(d, p) assert_plane(d, p, false) +#define assert_plane_enabled(p) assert_plane(p, true) +#define assert_plane_disabled(p) assert_plane(p, false) -static void assert_planes_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void assert_planes_disabled(struct intel_crtc *crtc) { - int i; - - /* Primary planes are fixed to pipes on gen4+ */ - if (INTEL_GEN(dev_priv) >= 4) { - u32 val = I915_READ(DSPCNTR(pipe)); - I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, - "plane %c assertion failure, should be disabled but not\n", - plane_name(pipe)); - return; - } + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane; - /* Need to check both planes against the pipe */ - for_each_pipe(dev_priv, i) { - u32 val = I915_READ(DSPCNTR(i)); - enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> - DISPPLANE_SEL_PIPE_SHIFT; - I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, - "plane %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(i), pipe_name(pipe)); - } -} - -static void assert_sprites_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - int sprite; - - if (INTEL_GEN(dev_priv) >= 9) { - for_each_sprite(dev_priv, pipe, sprite) { - u32 val = I915_READ(PLANE_CTL(pipe, sprite)); - I915_STATE_WARN(val & PLANE_CTL_ENABLE, - "plane %d assertion failure, should be off on pipe %c but is still active\n", - sprite, pipe_name(pipe)); - } - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - for_each_sprite(dev_priv, pipe, sprite) { - u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); - I915_STATE_WARN(val & SP_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - sprite_name(pipe, sprite), pipe_name(pipe)); - } - } else if (INTEL_GEN(dev_priv) >= 7) { - u32 val = I915_READ(SPRCTL(pipe)); - I915_STATE_WARN(val & SPRITE_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(pipe), pipe_name(pipe)); - } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { - u32 val = I915_READ(DVSCNTR(pipe)); - I915_STATE_WARN(val & DVS_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(pipe), pipe_name(pipe)); - } + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) + assert_plane_disabled(plane); } static void assert_vblank_disabled(struct drm_crtc *crtc) @@ -1907,9 +1861,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); - assert_planes_disabled(dev_priv, pipe); - assert_cursor_disabled(dev_priv, pipe); - assert_sprites_disabled(dev_priv, pipe); + assert_planes_disabled(crtc); /* * A pipe without a PLL won't actually be able to drive bits from @@ -1944,15 +1896,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc) POSTING_READ(reg); /* - * Until the pipe starts DSL will read as 0, which would cause - * an apparent vblank timestamp jump, which messes up also the - * frame count when it's derived from the timestamps. So let's - * wait for the pipe to start properly before we call - * drm_crtc_vblank_on() + * Until the pipe starts PIPEDSL reads will return a stale value, + * which causes an apparent vblank timestamp jump when PIPEDSL + * resets to its proper value. That also messes up the frame count + * when it's derived from the timestamps. So let's wait for the + * pipe to start properly before we call drm_crtc_vblank_on() */ - if (dev->max_vblank_count == 0 && - wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) - DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); + if (dev->max_vblank_count == 0) + intel_wait_for_pipe_scanline_moving(crtc); } /** @@ -1979,9 +1930,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc) * Make sure planes won't keep trying to pump pixels to us, * or we might hang the display. */ - assert_planes_disabled(dev_priv, pipe); - assert_cursor_disabled(dev_priv, pipe); - assert_sprites_disabled(dev_priv, pipe); + assert_planes_disabled(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -2811,6 +2760,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, crtc_state->active_planes); } +static void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane) +{ + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + intel_set_plane_visible(crtc_state, plane_state, false); + + if (plane->id == PLANE_PRIMARY) + intel_pre_disable_primary_noatomic(&crtc->base); + + trace_intel_disable_plane(&plane->base, crtc); + plane->disable_plane(plane, crtc); +} + static void intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) @@ -2868,12 +2834,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * simplest solution is to just disable the primary plane now and * pretend the BIOS never had it enabled. */ - intel_set_plane_visible(to_intel_crtc_state(crtc_state), - to_intel_plane_state(plane_state), - false); - intel_pre_disable_primary_noatomic(&intel_crtc->base); - trace_intel_disable_plane(primary, intel_crtc); - intel_plane->disable_plane(intel_plane, intel_crtc); + intel_plane_disable_noatomic(intel_crtc, intel_plane); return; @@ -3379,6 +3340,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool i9xx_plane_get_hw_state(struct intel_plane *primary) +{ + + struct drm_i915_private *dev_priv = to_i915(primary->base.dev); + enum intel_display_power_domain power_domain; + enum plane plane = primary->plane; + enum pipe pipe = primary->pipe; + bool ret; + + /* + * Not 100% correct for planes that can move between pipes, + * but that's only the case for gen2-4 which don't have any + * display power wells. + */ + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) { @@ -3577,6 +3563,9 @@ static void skylake_update_primary_plane(struct intel_plane *plane, int dst_w = drm_rect_width(&plane_state->base.dst); int dst_h = drm_rect_height(&plane_state->base.dst); unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; +#endif /* Sizes are 0 based */ src_w--; @@ -3598,6 +3587,30 @@ static void skylake_update_primary_plane(struct intel_plane *plane, PLANE_COLOR_PLANE_GAMMA_DISABLE); } +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + struct intel_dom0_plane_regs *dom0_regs = + &gvt->pipe_info[pipe].dom0_regs[plane_id]; + + dom0_regs->plane_ctl = plane_ctl; + dom0_regs->plane_offset = (src_y << 16) | src_x; + dom0_regs->plane_stride = stride; + dom0_regs->plane_size = (src_h << 16) | src_w; + dom0_regs->plane_aux_dist = + (plane_state->aux.offset - surf_addr) | aux_stride; + dom0_regs->plane_aux_offset = + (plane_state->aux.y << 16) | plane_state->aux.x; + dom0_regs->plane_pos = (dst_y << 16) | dst_x; + dom0_regs->plane_surf = intel_plane_ggtt_offset(plane_state) + + surf_addr; + /* TODO: to support plane scaling in gvt*/ + if (scaler_id >= 0) + DRM_ERROR("GVT not support plane scaling yet\n"); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + return; + } +#endif + I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x); I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); @@ -3637,6 +3650,17 @@ static void skylake_disable_primary_plane(struct intel_plane *primary, enum plane_id plane_id = primary->id; enum pipe pipe = primary->pipe; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; + + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + struct intel_dom0_plane_regs *dom0_regs = + &gvt->pipe_info[pipe].dom0_regs[plane_id]; + dom0_regs->plane_ctl = 0; + dom0_regs->plane_surf = 0; + return; + } +#endif spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); @@ -3701,7 +3725,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) /* reset doesn't touch the display */ - if (!i915.force_reset_modeset_test && + if (!i915_modparams.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; @@ -3757,7 +3781,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) int ret; /* reset doesn't touch the display */ - if (!i915.force_reset_modeset_test && + if (!i915_modparams.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; @@ -3782,6 +3806,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_pps_unlock_regs_wa(dev_priv); intel_modeset_init_hw(dev); + intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) @@ -4954,11 +4979,12 @@ void hsw_enable_ips(struct intel_crtc *crtc) * a vblank wait. */ - assert_plane_enabled(dev_priv, crtc->plane); + assert_plane_enabled(to_intel_plane(crtc->base.primary)); + if (IS_BROADWELL(dev_priv)) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); /* Quoting Art Runyan: "its not safe to expect any particular * value in IPS_CTL bit 31 after enabling IPS through the * mailbox." Moreover, the mailbox may return a bogus state, @@ -4986,11 +5012,12 @@ void hsw_disable_ips(struct intel_crtc *crtc) if (!crtc->config->ips_enabled) return; - assert_plane_enabled(dev_priv, crtc->plane); + assert_plane_enabled(to_intel_plane(crtc->base.primary)); + if (IS_BROADWELL(dev_priv)) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); /* wait for pcode to finish disabling IPS, which may take up to 42ms */ if (intel_wait_for_register(dev_priv, IPS_CTL, IPS_ENABLE, 0, @@ -5981,6 +6008,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum intel_display_power_domain domain; + struct intel_plane *plane; u64 domains; struct drm_atomic_state *state; struct intel_crtc_state *crtc_state; @@ -5989,11 +6017,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, if (!intel_crtc->active) return; - if (crtc->primary->state->visible) { - intel_pre_disable_primary_noatomic(crtc); + for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); - intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); - crtc->primary->state->visible = false; + if (plane_state->base.visible) + intel_plane_disable_noatomic(intel_crtc, plane); } state = drm_atomic_state_alloc(crtc->dev); @@ -6307,7 +6336,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - pipe_config->ips_enabled = i915.enable_ips && + pipe_config->ips_enabled = i915_modparams.enable_ips && hsw_crtc_supports_ips(crtc) && pipe_config_supports_ips(dev_priv, pipe_config); } @@ -6488,8 +6517,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes, static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { - if (i915.panel_use_ssc >= 0) - return i915.panel_use_ssc != 0; + if (i915_modparams.panel_use_ssc >= 0) + return i915_modparams.panel_use_ssc != 0; return dev_priv->vbt.lvds_use_ssc && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } @@ -8840,11 +8869,11 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) { if (IS_HASWELL(dev_priv)) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) DRM_DEBUG_KMS("Failed to write to D_COMP\n"); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } else { I915_WRITE(D_COMP_BDW, val); POSTING_READ(D_COMP_BDW); @@ -9558,6 +9587,23 @@ static void i845_disable_cursor(struct intel_plane *plane, i845_update_cursor(plane, NULL, NULL); } +static bool i845_cursor_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(PIPE_A); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -9751,6 +9797,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane, i9xx_update_cursor(plane, NULL, NULL); } +static bool i9xx_cursor_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + /* + * Not 100% correct for planes that can move between pipes, + * but that's only the case for gen2-3 which don't have any + * display power wells. + */ + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} /* VESA 640x480x72Hz mode to set on the pipe */ static struct drm_display_mode load_detect_mode = { @@ -11624,7 +11692,16 @@ verify_crtc_state(struct drm_crtc *crtc, intel_pipe_config_sanity_check(dev_priv, pipe_config); sw_config = to_intel_crtc_state(new_crtc_state); - if (!intel_pipe_config_compare(dev_priv, sw_config, + + /* + * Only check for pipe config if we are not in a GVT guest environment, + * because such a check in a GVT guest environment doesn't make any sense + * as we don't allow the guest to do a mode set, so there can very well + * be a difference between what it has programmed vs. what the host + * truly configured the HW pipe to be in. + */ + if (!intel_vgpu_active(dev_priv) && + !intel_pipe_config_compare(dev_priv, sw_config, pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); intel_dump_pipe_config(intel_crtc, pipe_config, @@ -12080,7 +12157,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; } - if (i915.fastboot && + if (i915_modparams.fastboot && intel_pipe_config_compare(dev_priv, to_intel_crtc_state(old_crtc_state), pipe_config, true)) { @@ -13214,13 +13291,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->check_plane = intel_check_primary_plane; - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 10) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); modifiers = skl_format_modifiers_ccs; primary->update_plane = skylake_update_primary_plane; primary->disable_plane = skylake_disable_primary_plane; + primary->get_hw_state = skl_plane_get_hw_state; } else if (INTEL_GEN(dev_priv) >= 9) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); @@ -13231,6 +13309,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = skylake_update_primary_plane; primary->disable_plane = skylake_disable_primary_plane; + primary->get_hw_state = skl_plane_get_hw_state; } else if (INTEL_GEN(dev_priv) >= 4) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); @@ -13238,6 +13317,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = i9xx_update_primary_plane; primary->disable_plane = i9xx_disable_primary_plane; + primary->get_hw_state = i9xx_plane_get_hw_state; } else { intel_primary_formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); @@ -13245,6 +13325,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = i9xx_update_primary_plane; primary->disable_plane = i9xx_disable_primary_plane; + primary->get_hw_state = i9xx_plane_get_hw_state; } if (INTEL_GEN(dev_priv) >= 9) @@ -13334,10 +13415,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { cursor->update_plane = i845_update_cursor; cursor->disable_plane = i845_disable_cursor; + cursor->get_hw_state = i845_cursor_get_hw_state; cursor->check_plane = i845_check_cursor; } else { cursor->update_plane = i9xx_update_cursor; cursor->disable_plane = i9xx_disable_cursor; + cursor->get_hw_state = i9xx_cursor_get_hw_state; cursor->check_plane = i9xx_check_cursor; } @@ -13783,6 +13866,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_encoder_clones(encoder); } +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + /* + * Encoders have been initialized. If we are in VGT mode, + * let's inform the HV that it can start Dom U as Dom 0 + * is ready to accept new Dom Us. + */ + gvt_dom0_ready(dev_priv); +#endif + intel_init_pch_refclk(dev_priv); drm_helper_move_panel_connectors_to_head(&dev_priv->drm); @@ -14388,8 +14480,6 @@ void intel_modeset_init_hw(struct drm_device *dev) intel_update_cdclk(dev_priv); dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; - - intel_init_clock_gating(dev_priv); } /* @@ -14468,6 +14558,8 @@ static void sanitize_watermarks(struct drm_device *dev) cs->wm.need_postvbl_update = true; dev_priv->display.optimize_watermarks(intel_state, cs); + + to_intel_crtc_state(crtc->state)->wm = cs->wm; } put_state: @@ -14682,38 +14774,56 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", pipe_name(pipe)); - assert_plane_disabled(dev_priv, PLANE_A); - assert_plane_disabled(dev_priv, PLANE_B); + WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE); + WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE); I915_WRITE(PIPECONF(pipe), 0); POSTING_READ(PIPECONF(pipe)); - if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) - DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe)); + intel_wait_for_pipe_scanline_stopped(crtc); I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); POSTING_READ(DPLL(pipe)); } -static bool -intel_check_plane_mapping(struct intel_crtc *crtc) +static bool intel_plane_mapping_ok(struct intel_crtc *crtc, + struct intel_plane *primary) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 val; + enum plane plane = primary->plane; + u32 val = I915_READ(DSPCNTR(plane)); - if (INTEL_INFO(dev_priv)->num_pipes == 1) - return true; + return (val & DISPLAY_PLANE_ENABLE) == 0 || + (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe); +} - val = I915_READ(DSPCNTR(!crtc->plane)); +static void +intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; - if ((val & DISPLAY_PLANE_ENABLE) && - (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) - return false; + if (INTEL_GEN(dev_priv) >= 4) + return; - return true; + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + + if (intel_plane_mapping_ok(crtc, plane)) + continue; + + DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", + plane->base.name); + intel_plane_disable_noatomic(crtc, plane); + } } static bool intel_crtc_has_encoders(struct intel_crtc *crtc) @@ -14769,33 +14879,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, /* Disable everything but the primary plane */ for_each_intel_plane_on_crtc(dev, crtc, plane) { - if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) - continue; + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc); + if (plane_state->base.visible && + plane->base.type != DRM_PLANE_TYPE_PRIMARY) + intel_plane_disable_noatomic(crtc, plane); } } - /* We need to sanitize the plane -> pipe mapping first because this will - * disable the crtc (and hence change the state) if it is wrong. Note - * that gen4+ has a fixed plane -> pipe mapping. */ - if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { - bool plane; - - DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", - crtc->base.base.id, crtc->base.name); - - /* Pipe has the wrong plane attached and the plane is active. - * Temporarily change the plane mapping and disable everything - * ... */ - plane = crtc->plane; - crtc->base.primary->state->visible = true; - crtc->plane = !plane; - intel_crtc_disable_noatomic(&crtc->base, ctx); - crtc->plane = plane; - } - /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ if (crtc->active && !intel_crtc_has_encoders(crtc)) @@ -14900,24 +14992,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv) intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); } -static bool primary_get_hw_state(struct intel_plane *plane) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - - return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; -} - /* FIXME read out full plane state for all planes */ static void readout_plane_state(struct intel_crtc *crtc) { - struct intel_plane *primary = to_intel_plane(crtc->base.primary); - bool visible; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane; - visible = crtc->active && primary_get_hw_state(primary); + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + bool visible = plane->get_hw_state(plane); - intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), - to_intel_plane_state(primary->base.state), - visible); + intel_set_plane_visible(crtc_state, plane_state, visible); + } } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -15105,11 +15194,22 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct intel_encoder *encoder; int i; + if (IS_HASWELL(dev_priv)) { + /* + * WaRsPkgCStateDisplayPMReq:hsw + * System hang if this isn't done before disabling all planes! + */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); + } + intel_modeset_readout_hw_state(dev); /* HW state is read out, now we need to sanitize this mess. */ get_encoder_power_domains(dev_priv); + intel_sanitize_plane_mapping(dev_priv); + for_each_intel_encoder(dev, encoder) { intel_sanitize_encoder(encoder); } @@ -15186,6 +15286,7 @@ void intel_display_resume(struct drm_device *dev) if (!ret) ret = __intel_display_resume(dev, state, &ctx); + intel_enable_ipc(dev_priv); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); @@ -15201,6 +15302,8 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev_priv); + intel_init_clock_gating(dev_priv); + intel_setup_overlay(dev_priv); } @@ -15240,6 +15343,10 @@ static void intel_hpd_poll_fini(struct drm_device *dev) for_each_intel_connector_iter(connector, &conn_iter) { if (connector->modeset_retry_work.func) cancel_work_sync(&connector->modeset_retry_work); + if (connector->hdcp_shim) { + cancel_delayed_work_sync(&connector->hdcp_check_work); + cancel_work_sync(&connector->hdcp_prop_work); + } } drm_connector_list_iter_end(&conn_iter); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 09f274419eea..c2315e71bc43 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -36,7 +36,9 @@ #include #include #include +#include #include +#include #include "intel_drv.h" #include #include "i915_drv.h" @@ -428,7 +430,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) return v; } -static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) { int i; if (dst_bytes > 4) @@ -640,19 +642,15 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + int backlight_controller = dev_priv->vbt.backlight.controller; lockdep_assert_held(&dev_priv->pps_mutex); /* We should never land here with regular DP ports */ WARN_ON(!is_edp(intel_dp)); - /* - * TODO: BXT has 2 PPS instances. The correct port->PPS instance - * mapping needs to be retrieved from VBT, for now just hard-code to - * use instance #0 always. - */ if (!intel_dp->pps_reset) - return 0; + return backlight_controller; intel_dp->pps_reset = false; @@ -662,7 +660,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) */ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); - return 0; + return backlight_controller; } typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, @@ -1050,10 +1048,29 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); } +static uint32_t intel_dp_get_aux_send_ctl(struct intel_dp *intel_dp, + bool has_aux_irq, + int send_bytes, + uint32_t aux_clock_divider, + bool aksv_write) +{ + uint32_t val = 0; + + if (aksv_write) { + send_bytes += 5; + val |= DP_AUX_CH_CTL_AUX_AKSV_SELECT; + } + + return val | intel_dp->get_aux_send_ctl(intel_dp, + has_aux_irq, + send_bytes, + aux_clock_divider); +} + static int intel_dp_aux_ch(struct intel_dp *intel_dp, const uint8_t *send, int send_bytes, - uint8_t *recv, int recv_size) + uint8_t *recv, int recv_size, bool aksv_write) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = @@ -1113,10 +1130,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, } while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { - u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, - has_aux_irq, - send_bytes, - aux_clock_divider); + u32 send_ctl = intel_dp_get_aux_send_ctl(intel_dp, + has_aux_irq, + send_bytes, + aux_clock_divider, + aksv_write); /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { @@ -1253,7 +1271,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (msg->buffer) memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); - ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); + ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize, + false); if (ret > 0) { msg->reply = rxbuf[0] >> 4; @@ -1275,7 +1294,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (WARN_ON(rxsize > 20)) return -E2BIG; - ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); + ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize, + false); if (ret > 0) { msg->reply = rxbuf[0] >> 4; /* @@ -1977,7 +1997,12 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (intel_wait_for_register(dev_priv, + /* + * Only wait for panel status if we are not in a GVT guest environment, + * because such a wait in a GVT guest environment doesn't make any sense + * as we are exposing virtual DP monitors to the guest. + */ + if (!intel_vgpu_active(dev_priv) && intel_wait_for_register(dev_priv, pp_stat_reg, mask, value, 5000)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", @@ -3835,7 +3860,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) { u8 mstm_cap; - if (!i915.enable_dp_mst) + if (!i915_modparams.enable_dp_mst) return false; if (!intel_dp->can_mst) @@ -3853,7 +3878,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) static void intel_dp_configure_mst(struct intel_dp *intel_dp) { - if (!i915.enable_dp_mst) + if (!i915_modparams.enable_dp_mst) return; if (!intel_dp->can_mst) @@ -4988,6 +5013,234 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) pps_unlock(intel_dp); } +static +int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, + u8 *an) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base); + uint8_t txbuf[4], rxbuf[2], reply = 0; + ssize_t dpcd_ret; + int ret; + + /* Output An first, that's easy */ + dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, + an, DRM_HDCP_AN_LEN); + if (dpcd_ret != DRM_HDCP_AN_LEN) { + DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret); + return dpcd_ret >= 0 ? -EIO : dpcd_ret; + } + + /* + * Since Aksv is Oh-So-Secret, we can't access it in software. So in + * order to get it on the wire, we need to create the AUX header as if + * we were writing the data, and then tickle the hardware to output the + * data once the header is sent out. + */ + txbuf[0] = (DP_AUX_NATIVE_WRITE << 4) | + ((DP_AUX_HDCP_AKSV >> 16) & 0xf); + txbuf[1] = (DP_AUX_HDCP_AKSV >> 8) & 0xff; + txbuf[2] = DP_AUX_HDCP_AKSV & 0xff; + txbuf[3] = DRM_HDCP_KSV_LEN - 1; + + ret = intel_dp_aux_ch(intel_dp, txbuf, sizeof(txbuf), rxbuf, + sizeof(rxbuf), true); + if (ret < 0) { + DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret); + return ret; + } else if (ret == 0) { + DRM_ERROR("Aksv write over DP/AUX was empty\n"); + return -EIO; + } + + reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; + return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO; +} + +static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, + u8 *bksv) +{ + ssize_t ret; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, + DRM_HDCP_KSV_LEN); + if (ret != DRM_HDCP_KSV_LEN) { + DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, + u8 *bstatus) +{ + ssize_t ret; + /* + * For some reason the HDMI and DP HDCP specs call this register + * definition by different names. In the HDMI spec, it's called BSTATUS, + * but in DP it's called BINFO. + */ + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, + bstatus, DRM_HDCP_BSTATUS_LEN); + if (ret != DRM_HDCP_BSTATUS_LEN) { + DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, + u8 *bcaps) +{ + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, + bcaps, 1); + if (ret != 1) { + DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, + bool *repeater_present) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); + if (ret) + return ret; + + *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; + return 0; +} + +static +int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, + u8 *ri_prime) +{ + ssize_t ret; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, + ri_prime, DRM_HDCP_RI_LEN); + if (ret != DRM_HDCP_RI_LEN) { + DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, + bool *ksv_ready) +{ + ssize_t ret; + u8 bstatus; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + *ksv_ready = bstatus & DP_BSTATUS_READY; + return 0; +} + +static +int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, + int num_downstream, u8 *ksv_fifo) +{ + ssize_t ret; + int i; + + /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ + for (i = 0; i < num_downstream; i += 3) { + size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_AUX_HDCP_KSV_FIFO, + ksv_fifo + i * DRM_HDCP_KSV_LEN, + len); + if (ret != len) { + DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i, + ret); + return ret >= 0 ? -EIO : ret; + } + } + return 0; +} + +static +int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, + int i, u32 *part) +{ + ssize_t ret; + + if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) + return -EINVAL; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_AUX_HDCP_V_PRIME(i), part, + DRM_HDCP_V_PRIME_PART_LEN); + if (ret != DRM_HDCP_V_PRIME_PART_LEN) { + DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, + bool enable) +{ + /* Not used for single stream DisplayPort setups */ + return 0; +} + +static +bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) +{ + ssize_t ret; + u8 bstatus; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); +} + +static +int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, + bool *hdcp_capable) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); + if (ret) + return ret; + + *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; + return 0; +} + +static const struct intel_hdcp_shim intel_dp_hdcp_shim = { + .write_an_aksv = intel_dp_hdcp_write_an_aksv, + .read_bksv = intel_dp_hdcp_read_bksv, + .read_bstatus = intel_dp_hdcp_read_bstatus, + .repeater_present = intel_dp_hdcp_repeater_present, + .read_ri_prime = intel_dp_hdcp_read_ri_prime, + .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, + .toggle_signalling = intel_dp_hdcp_toggle_signalling, + .check_link = intel_dp_hdcp_check_link, + .hdcp_capable = intel_dp_hdcp_capable, +}; + static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -5112,6 +5365,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_display_power_get(dev_priv, intel_dp->aux_power_domain); + /* Short pulse can signify loss of hdcp authentication */ + intel_hdcp_check_link(intel_dp->attached_connector); + if (intel_dp->is_mst) { if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { /* @@ -5183,7 +5439,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) { - intel_dp->panel_power_off_time = ktime_get_boottime(); + intel_dp->panel_power_off_time = ktime_set(0, 0); intel_dp->last_power_on = jiffies; intel_dp->last_backlight_off = jiffies; } @@ -5340,6 +5596,12 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, */ final->t8 = 1; final->t9 = 1; + + /* + * HW has only a 100msec granularity for t11_t12 so round it up + * accordingly. + */ + final->t11_t12 = roundup(final->t11_t12, 100 * 10); } static void @@ -6108,6 +6370,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp_add_properties(intel_dp, connector); + if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(dev_priv, port)) { + int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); + if (ret) + DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + } + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index d2830ba3162e..2bb2ceb9d463 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -264,7 +264,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; - if (!i915.enable_dpcd_backlight) + if (!i915_modparams.enable_dpcd_backlight) return -ENODEV; if (!intel_dp_aux_display_control_capable(intel_connector)) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 79fbaf78f604..d3b01bab5f79 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -41,22 +41,21 @@ #include /** - * _wait_for - magic (register) wait macro + * __wait_for - magic wait macro * - * Does the right thing for modeset paths when run under kdgb or similar atomic - * contexts. Note that it's important that we check the condition again after - * having timed out, since the timeout could be due to preemption or similar and - * we've never had a chance to check the condition before the timeout. - * - * TODO: When modesetting has fully transitioned to atomic, the below - * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts - * added. + * Macro to help avoid open coding check/wait/timeout patterns. Note that it's + * important that we check the condition again after having timed out, since the + * timeout could be due to preemption or similar and we've never had a chance to + * check the condition before the timeout. */ -#define _wait_for(COND, US, W) ({ \ +#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ + long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ int ret__; \ + might_sleep(); \ for (;;) { \ bool expired__ = time_after(jiffies, timeout__); \ + OP; \ if (COND) { \ ret__ = 0; \ break; \ @@ -65,16 +64,16 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if ((W) && drm_can_sleep()) { \ - usleep_range((W), (W)*2); \ - } else { \ - cpu_relax(); \ - } \ + usleep_range(wait__, wait__ * 2); \ + if (wait__ < (Wmax)) \ + wait__ <<= 1; \ } \ ret__; \ }) -#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000) +#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ + (Wmax)) +#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) @@ -123,7 +122,7 @@ int ret__; \ BUILD_BUG_ON(!__builtin_constant_p(US)); \ if ((US) > 10) \ - ret__ = _wait_for((COND), (US), 10); \ + ret__ = _wait_for((COND), (US), 10, 10); \ else \ ret__ = _wait_for_atomic((COND), (US), 0); \ ret__; \ @@ -299,6 +298,80 @@ struct intel_panel { } backlight; }; +/* + * This structure serves as a translation layer between the generic HDCP code + * and the bus-specific code. What that means is that HDCP over HDMI differs + * from HDCP over DP, so to account for these differences, we need to + * communicate with the receiver through this shim. + * + * For completeness, the 2 buses differ in the following ways: + * - DP AUX vs. DDC + * HDCP registers on the receiver are set via DP AUX for DP, and + * they are set via DDC for HDMI. + * - Receiver register offsets + * The offsets of the registers are different for DP vs. HDMI + * - Receiver register masks/offsets + * For instance, the ready bit for the KSV fifo is in a different + * place on DP vs HDMI + * - Receiver register names + * Seriously. In the DP spec, the 16-bit register containing + * downstream information is called BINFO, on HDMI it's called + * BSTATUS. To confuse matters further, DP has a BSTATUS register + * with a completely different definition. + * - KSV FIFO + * On HDMI, the ksv fifo is read all at once, whereas on DP it must + * be read 3 keys at a time + * - Aksv output + * Since Aksv is hidden in hardware, there's different procedures + * to send it over DP AUX vs DDC + */ +struct intel_hdcp_shim { + /* Outputs the transmitter's An and Aksv values to the receiver. */ + int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an); + + /* Reads the receiver's key selection vector */ + int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv); + + /* + * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The + * definitions are the same in the respective specs, but the names are + * different. Call it BSTATUS since that's the name the HDMI spec + * uses and it was there first. + */ + int (*read_bstatus)(struct intel_digital_port *intel_dig_port, + u8 *bstatus); + + /* Determines whether a repeater is present downstream */ + int (*repeater_present)(struct intel_digital_port *intel_dig_port, + bool *repeater_present); + + /* Reads the receiver's Ri' value */ + int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri); + + /* Determines if the receiver's KSV FIFO is ready for consumption */ + int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port, + bool *ksv_ready); + + /* Reads the ksv fifo for num_downstream devices */ + int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port, + int num_downstream, u8 *ksv_fifo); + + /* Reads a 32-bit part of V' from the receiver */ + int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port, + int i, u32 *part); + + /* Enables HDCP signalling on the port */ + int (*toggle_signalling)(struct intel_digital_port *intel_dig_port, + bool enable); + + /* Ensures the link is still protected */ + bool (*check_link)(struct intel_digital_port *intel_dig_port); + + /* Detects panel's hdcp capability. This is optional for HDMI. */ + int (*hdcp_capable)(struct intel_digital_port *intel_dig_port, + bool *hdcp_capable); +}; + struct intel_connector { struct drm_connector base; /* @@ -330,6 +403,12 @@ struct intel_connector { /* Work struct to schedule a uevent on link train failure */ struct work_struct modeset_retry_work; + + const struct intel_hdcp_shim *hdcp_shim; + struct mutex hdcp_mutex; + uint64_t hdcp_value; /* protected by hdcp_mutex */ + struct delayed_work hdcp_check_work; + struct work_struct hdcp_prop_work; }; struct intel_digital_connector_state { @@ -495,7 +574,7 @@ struct intel_crtc_scaler_state { #define I915_MODE_FLAG_INHERITED 1 struct intel_pipe_wm { - struct intel_wm_level wm[5]; + struct intel_wm_level wm[7]; uint32_t linetime; bool fbc_wm_enabled; bool pipe_enabled; @@ -863,6 +942,7 @@ struct intel_plane { const struct intel_plane_state *plane_state); void (*disable_plane)(struct intel_plane *plane, struct intel_crtc *crtc); + bool (*get_hw_state)(struct intel_plane *plane); int (*check_plane)(struct intel_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state); @@ -1230,6 +1310,11 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) return dev_priv->pm.irqs_enabled; } +bool is_shadow_context(struct i915_gem_context *ctx); +int get_vgt_id(struct i915_gem_context *ctx); +int get_pid_shadowed(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + int intel_get_crtc_scanline(struct intel_crtc *crtc); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); @@ -1273,6 +1358,8 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, bool state); uint32_t ddi_signal_levels(struct intel_dp *intel_dp); u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); +int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + bool enable); unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, int plane, unsigned int height); @@ -1515,6 +1602,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); @@ -1707,7 +1795,7 @@ extern struct drm_display_mode *intel_find_panel_downclock( int intel_backlight_device_register(struct intel_connector *connector); void intel_backlight_device_unregister(struct intel_connector *connector); #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ -static int intel_backlight_device_register(struct intel_connector *connector) +static inline int intel_backlight_device_register(struct intel_connector *connector) { return 0; } @@ -1716,6 +1804,17 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con } #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ +/* intel_hdcp.c */ +void intel_hdcp_atomic_check(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state); +int intel_hdcp_init(struct intel_connector *connector, + const struct intel_hdcp_shim *hdcp_shim); +int intel_hdcp_enable(struct intel_connector *connector); +int intel_hdcp_disable(struct intel_connector *connector); +int intel_hdcp_check_link(struct intel_connector *connector); +bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); + /* intel_psr.c */ void intel_psr_enable(struct intel_dp *intel_dp); @@ -1866,9 +1965,11 @@ bool ilk_disable_lp_wm(struct drm_device *dev); int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, struct intel_crtc_state *cstate); +void intel_init_ipc(struct drm_i915_private *dev_priv); +void intel_enable_ipc(struct drm_i915_private *dev_priv); static inline int intel_enable_rc6(void) { - return i915.enable_rc6; + return i915_modparams.enable_rc6; } /* intel_sdvo.c */ @@ -1885,6 +1986,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(struct intel_crtc *crtc); void intel_pipe_update_end(struct intel_crtc *crtc); +bool skl_plane_get_hw_state(struct intel_plane *plane); /* intel_tv.c */ void intel_tv_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 3c2d9cf22ed5..c9bf2347f7a4 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -153,7 +153,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) case 9: return GEN9_LR_CONTEXT_RENDER_SIZE; case 8: - return i915.enable_execlists ? + return i915_modparams.enable_execlists ? GEN8_LR_CONTEXT_RENDER_SIZE : GEN8_CXT_TOTAL_SIZE; case 7: @@ -301,7 +301,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv) &intel_engine_classes[engine->class]; int (*init)(struct intel_engine_cs *engine); - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) init = class_info->init_execlists; else init = class_info->init_legacy; @@ -380,6 +380,37 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id]; } +static bool csb_force_mmio(struct drm_i915_private *i915) +{ + /* GVT emulation depends upon intercepting CSB mmio */ + if (intel_vgpu_active(i915)) + return true; + + /* + * IOMMU adds unpredictable latency causing the CSB write (from the + * GPU into the HWSP) to only be visible some time after the interrupt + * (missed breadcrumb syndrome). + */ + if (intel_vtd_active()) + return true; + + return false; +} + +static void intel_engine_init_execlist(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + execlists->csb_use_mmio = csb_force_mmio(engine->i915); + + execlists->port_mask = 1; + BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); + GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); + + execlists->queue = RB_ROOT; + execlists->first = NULL; +} + /** * intel_engines_setup_common - setup engine state not requiring hw access * @engine: Engine to setup. @@ -391,8 +422,7 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) */ void intel_engine_setup_common(struct intel_engine_cs *engine) { - engine->execlist_queue = RB_ROOT; - engine->execlist_first = NULL; + intel_engine_init_execlist(engine); intel_engine_init_timeline(engine); intel_engine_init_hangcheck(engine); @@ -442,6 +472,116 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) i915_vma_unpin_and_release(&engine->scratch); } +static void cleanup_phys_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + if (!dev_priv->status_page_dmah) + return; + + drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); + engine->status_page.page_addr = NULL; +} + +static void cleanup_status_page(struct intel_engine_cs *engine) +{ + struct i915_vma *vma; + struct drm_i915_gem_object *obj; + + vma = fetch_and_zero(&engine->status_page.vma); + if (!vma) + return; + + obj = vma->obj; + + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_gem_object_unpin_map(obj); + __i915_gem_object_release_unless_active(obj); +} + +static int init_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int flags; + void *vaddr; + int ret; + + obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate status page\n"); + return PTR_ERR(obj); + } + + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + if (ret) + goto err; + + vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } + + flags = PIN_GLOBAL; + if (!HAS_LLC(engine->i915)) + /* On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actually map it). + */ + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; + ret = i915_vma_pin(vma, 0, 4096, flags); + if (ret) + goto err; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err_unpin; + } + + engine->status_page.vma = vma; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma); + engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); + + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + engine->name, i915_ggtt_offset(vma)); + return 0; + +err_unpin: + i915_vma_unpin(vma); +err: + i915_gem_object_put(obj); + return ret; +} + +static int init_phys_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + GEM_BUG_ON(engine->id != RCS); + + dev_priv->status_page_dmah = + drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); + if (!dev_priv->status_page_dmah) + return -ENOMEM; + + engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(engine->status_page.page_addr, 0, PAGE_SIZE); + + return 0; +} + /** * intel_engines_init_common - initialize cengine state which might require hw access * @engine: Engine to initialize. @@ -471,17 +611,44 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (IS_ERR(ring)) return PTR_ERR(ring); + /* + * Similarly the preempt context must always be available so that + * we can interrupt the engine at any time. + */ + if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) { + ring = engine->context_pin(engine, + engine->i915->preempt_context); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); + goto err_unpin_kernel; + } + } + ret = intel_engine_init_breadcrumbs(engine); if (ret) - goto err_unpin; + goto err_unpin_preempt; ret = i915_gem_render_state_init(engine); if (ret) - goto err_unpin; + goto err_breadcrumbs; + + if (HWS_NEEDS_PHYSICAL(engine->i915)) + ret = init_phys_status_page(engine); + else + ret = init_status_page(engine); + if (ret) + goto err_rs_fini; return 0; -err_unpin: +err_rs_fini: + i915_gem_render_state_fini(engine); +err_breadcrumbs: + intel_engine_fini_breadcrumbs(engine); +err_unpin_preempt: + if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) + engine->context_unpin(engine, engine->i915->preempt_context); +err_unpin_kernel: engine->context_unpin(engine, engine->i915->kernel_context); return ret; } @@ -497,11 +664,18 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { intel_engine_cleanup_scratch(engine); + if (HWS_NEEDS_PHYSICAL(engine->i915)) + cleanup_phys_status_page(engine); + else + cleanup_status_page(engine); + i915_gem_render_state_fini(engine); intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); i915_gem_batch_pool_fini(&engine->batch_pool); + if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) + engine->context_unpin(engine, engine->i915->preempt_context); engine->context_unpin(engine, engine->i915->kernel_context); } @@ -672,11 +846,6 @@ static int wa_add(struct drm_i915_private *dev_priv, #define WA_SET_FIELD_MASKED(addr, mask, value) \ WA_REG(addr, mask, _MASKED_FIELD(mask, value)) -#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) -#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) - -#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) - static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg) { @@ -687,8 +856,8 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) return -EINVAL; - WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), - i915_mmio_reg_offset(reg)); + I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), + i915_mmio_reg_offset(reg)); wa->hw_whitelist_count[engine->id]++; return 0; @@ -812,6 +981,23 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | ECOCHK_DIS_TLB); + if (HAS_LLC(dev_priv)) { + /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl + * + * Must match Display Engine. See + * WaCompressedResourceDisplayNewHashMode. + */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN9_PBE_COMPRESSED_HASH_SELECTION); + WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, + GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); + + I915_WRITE(MMCD_MISC_CTRL, + I915_READ(MMCD_MISC_CTRL) | + MMCD_PCLA | + MMCD_HOTSPOT_EN); + } + /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, @@ -900,13 +1086,33 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES)); + /* + * Supporting preemption with fine-granularity requires changes in the + * batch buffer programming. Since we can't break old userspace, we + * need to set our default preemption level to safe value. Userspace is + * still able to use more fine-grained preemption levels, since in + * WaEnablePreemptionGranularityControlByUMD we're whitelisting the + * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are + * not real HW workarounds, but merely a way to start using preemption + * while maintaining old contract with userspace. + */ + + /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */ + WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + + /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); + /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); if (ret) return ret; - /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */ - ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ + I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, + _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); if (ret) return ret; @@ -968,25 +1174,19 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) if (ret) return ret; - /* - * Actual WA is to disable percontext preemption granularity control - * until D0 which is the default case so this is equivalent to - * !WaDisablePerCtxtPreemptionGranularityControl:skl - */ - I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, - _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); - /* WaEnableGapsTsvCreditFix:skl */ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); /* WaDisableGafsUnitClkGating:skl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaInPlaceDecompressionHang:skl */ if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); /* WaDisableLSQCROPERFforOCL:skl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); @@ -1022,8 +1222,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisablePooledEuLoadBalancingFix:bxt */ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { - WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, - GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + I915_WRITE(FF_SLICE_CS_CHICKEN2, + _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); } /* WaDisableSbeCacheDispatchPortSharing:bxt */ @@ -1062,8 +1262,61 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaInPlaceDecompressionHang:bxt */ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); + + return 0; +} + +static int cnl_init_workarounds(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + int ret; + + /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) + I915_WRITE(GAMT_CHKN_BIT_REG, + (I915_READ(GAMT_CHKN_BIT_REG) | + GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT)); + + /* WaForceContextSaveRestoreNonCoherent:cnl */ + WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); + + /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); + + /* WaInPlaceDecompressionHang:cnl */ + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); + + /* WaPushConstantDereferenceHoldDisable:cnl */ + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); + + /* FtrEnableFastAnisoL1BankingFix: cnl */ + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX); + + /* WaDisable3DMidCmdPreemption:cnl */ + WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + + /* WaDisableGPGPUMidCmdPreemption:cnl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); + + /* WaEnablePreemptionGranularityControlByUMD:cnl */ + I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, + _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); + if (ret) + return ret; return 0; } @@ -1083,8 +1336,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) /* WaDisableDynamicCreditSharing:kbl */ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) - WA_SET_BIT(GAMT_CHKN_BIT_REG, - GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + I915_WRITE(GAMT_CHKN_BIT_REG, + (I915_READ(GAMT_CHKN_BIT_REG) | + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING)); /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) @@ -1097,7 +1351,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableGafsUnitClkGating:kbl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaDisableSbeCacheDispatchPortSharing:kbl */ WA_SET_BIT_MASKED( @@ -1105,8 +1360,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); /* WaInPlaceDecompressionHang:kbl */ - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); /* WaDisableLSQCROPERFforOCL:kbl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); @@ -1125,6 +1381,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine) if (ret) return ret; + /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ + ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1); + if (ret) + return ret; + /* WaToEnableHwFixForPushConstHWBug:glk */ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -1150,7 +1411,8 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine) GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableGafsUnitClkGating:cfl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaDisableSbeCacheDispatchPortSharing:cfl */ WA_SET_BIT_MASKED( @@ -1158,8 +1420,9 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); /* WaInPlaceDecompressionHang:cfl */ - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); return 0; } @@ -1188,6 +1451,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine) err = glk_init_workarounds(engine); else if (IS_COFFEELAKE(dev_priv)) err = cfl_init_workarounds(engine); + else if (IS_CANNONLAKE(dev_priv)) + err = cnl_init_workarounds(engine); else err = 0; if (err) @@ -1279,12 +1544,12 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) return false; - /* Both ports drained, no more ELSP submission? */ - if (port_request(&engine->execlist_port[0])) + /* Waiting to drain ELSP? */ + if (READ_ONCE(engine->execlists.active)) return false; /* ELSP is empty, but there are ready requests? */ - if (READ_ONCE(engine->execlist_first)) + if (READ_ONCE(engine->execlists.first)) return false; /* Ring stopped? */ @@ -1333,11 +1598,188 @@ void intel_engines_mark_idle(struct drm_i915_private *i915) for_each_engine(engine, i915, id) { intel_engine_disarm_breadcrumbs(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - tasklet_kill(&engine->irq_tasklet); - engine->no_priolist = false; + tasklet_kill(&engine->execlists.irq_tasklet); + engine->execlists.no_priolist = false; + } +} + +bool intel_engine_can_store_dword(struct intel_engine_cs *engine) +{ + switch (INTEL_GEN(engine->i915)) { + case 2: + return false; /* uses physical not virtual addresses */ + case 3: + /* maybe only uses physical not virtual addresses */ + return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); + case 6: + return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ + default: + return true; } } +static void print_request(struct drm_printer *m, + struct drm_i915_gem_request *rq, + const char *prefix) +{ + drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix, + rq->global_seqno, + i915_gem_request_completed(rq) ? "!" : "", + rq->ctx->hw_id, rq->fence.seqno, + rq->priotree.priority, + jiffies_to_msecs(jiffies - rq->emitted_jiffies), + rq->timeline->common->name); +} + +void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) +{ + struct intel_breadcrumbs * const b = &engine->breadcrumbs; + const struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_gpu_error * const error = &engine->i915->gpu_error; + struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_gem_request *rq; + struct rb_node *rb; + u64 addr; + + drm_printf(m, "%s\n", engine->name); + drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", + intel_engine_get_seqno(engine), + intel_engine_last_submit(engine), + engine->hangcheck.seqno, + jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), + engine->timeline->inflight_seqnos); + drm_printf(m, "\tReset count: %d\n", + i915_reset_engine_count(error, engine)); + + rcu_read_lock(); + + drm_printf(m, "\tRequests:\n"); + + rq = list_first_entry(&engine->timeline->requests, + struct drm_i915_gem_request, link); + if (&rq->link != &engine->timeline->requests) + print_request(m, rq, "\t\tfirst "); + + rq = list_last_entry(&engine->timeline->requests, + struct drm_i915_gem_request, link); + if (&rq->link != &engine->timeline->requests) + print_request(m, rq, "\t\tlast "); + + rq = i915_gem_find_active_request(engine); + if (rq) { + print_request(m, rq, "\t\tactive "); + drm_printf(m, + "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", + rq->head, rq->postfix, rq->tail, + rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, + rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + } + + drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n", + I915_READ(RING_START(engine->mmio_base)), + rq ? i915_ggtt_offset(rq->ring->vma) : 0); + drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n", + I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR, + rq ? rq->ring->head : 0); + drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n", + I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR, + rq ? rq->ring->tail : 0); + drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n", + I915_READ(RING_CTL(engine->mmio_base)), + I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : ""); + + rcu_read_unlock(); + + addr = intel_engine_get_active_head(engine); + drm_printf(m, "\tACTHD: 0x%08x_%08x\n", + upper_32_bits(addr), lower_32_bits(addr)); + addr = intel_engine_get_last_batch_head(engine); + drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", + upper_32_bits(addr), lower_32_bits(addr)); + + if (i915_modparams.enable_execlists) { + const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + u32 ptr, read, write; + unsigned int idx; + + drm_printf(m, "\tExeclist status: 0x%08x %08x\n", + I915_READ(RING_EXECLIST_STATUS_LO(engine)), + I915_READ(RING_EXECLIST_STATUS_HI(engine))); + + ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); + read = GEN8_CSB_READ_PTR(ptr); + write = GEN8_CSB_WRITE_PTR(ptr); + drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n", + read, execlists->csb_head, + write, + intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), + yesno(test_bit(ENGINE_IRQ_EXECLIST, + &engine->irq_posted))); + if (read >= GEN8_CSB_ENTRIES) + read = 0; + if (write >= GEN8_CSB_ENTRIES) + write = 0; + if (read > write) + write += GEN8_CSB_ENTRIES; + while (read < write) { + idx = ++read % GEN8_CSB_ENTRIES; + drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", + idx, + I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), + hws[idx * 2], + I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), + hws[idx * 2 + 1]); + } + + rcu_read_lock(); + for (idx = 0; idx < execlists_num_ports(execlists); idx++) { + unsigned int count; + + rq = port_unpack(&execlists->port[idx], &count); + if (rq) { + drm_printf(m, "\t\tELSP[%d] count=%d, ", + idx, count); + print_request(m, rq, "rq: "); + } else { + drm_printf(m, "\t\tELSP[%d] idle\n", + idx); + } + } + drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active); + rcu_read_unlock(); + } else if (INTEL_GEN(dev_priv) > 6) { + drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", + I915_READ(RING_PP_DIR_BASE(engine))); + drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", + I915_READ(RING_PP_DIR_BASE_READ(engine))); + drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", + I915_READ(RING_PP_DIR_DCLV(engine))); + } + + spin_lock_irq(&engine->timeline->lock); + list_for_each_entry(rq, &engine->timeline->requests, link) + print_request(m, rq, "\t\tE "); + for (rb = execlists->first; rb; rb = rb_next(rb)) { + struct i915_priolist *p = + rb_entry(rb, typeof(*p), node); + + list_for_each_entry(rq, &p->requests, priotree.link) + print_request(m, rq, "\t\tQ "); + } + spin_unlock_irq(&engine->timeline->lock); + + spin_lock_irq(&b->rb_lock); + for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { + struct intel_wait *w = rb_entry(rb, typeof(*w), node); + + drm_printf(m, "\t%s [%d] waiting for %x\n", + w->tsk->comm, w->tsk->pid, w->seqno); + } + spin_unlock_irq(&b->rb_lock); + + drm_printf(m, "\n"); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_engine.c" #endif diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 8c8ead2276e0..9745825b9440 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -846,7 +846,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) return false; } - if (!i915.enable_fbc) { + if (!i915_modparams.enable_fbc) { fbc->no_fbc_reason = "disabled per module param or by default"; return false; } @@ -1293,8 +1293,8 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) */ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) { - if (i915.enable_fbc >= 0) - return !!i915.enable_fbc; + if (i915_modparams.enable_fbc >= 0) + return !!i915_modparams.enable_fbc; if (!HAS_FBC(dev_priv)) return 0; @@ -1338,8 +1338,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) if (need_fbc_vtd_wa(dev_priv)) mkwrite_device_info(dev_priv)->has_fbc = false; - i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); - DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); + i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); + DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", + i915_modparams.enable_fbc); if (!HAS_FBC(dev_priv)) { fbc->no_fbc_reason = "unsupported by this chipset"; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 262e75c00dd2..0a49a5f13af4 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -694,10 +694,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) /* Due to peculiar init order wrt to hpd handling this is separate. */ if (drm_fb_helper_initial_config(&ifbdev->helper, - ifbdev->preferred_bpp)) { + ifbdev->preferred_bpp)) intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); - intel_fbdev_fini(to_i915(ifbdev->helper.dev)); - } } void intel_fbdev_initial_config_async(struct drm_device *dev) @@ -775,7 +773,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous WARN_ON(state != FBINFO_STATE_RUNNING); if (!console_trylock()) { /* Don't block our own workqueue as this can - * be run in parallel with other i915.ko tasks. + * be run in parallel with other i915_modparams.ko tasks. */ schedule_work(&dev_priv->fbdev_suspend_work); return; @@ -797,7 +795,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; - if (ifbdev) + if (!ifbdev) + return; + + intel_fbdev_sync(ifbdev); + if (ifbdev->vma) drm_fb_helper_hotplug_event(&ifbdev->helper); } diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 5fa286074811..83bd401196eb 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -388,7 +388,11 @@ struct guc_ct_buffer_desc { /* Preempt to idle on quantum expiry */ #define POLICY_PREEMPT_TO_IDLE (1<<1) -#define POLICY_MAX_NUM_WI 15 +#define POLICY_MAX_NUM_WI 15 +#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 +#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000 +#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000 +#define POLICY_DEFAULT_FAULT_TIME_US 250000 struct guc_policy { /* Time for one workload to execute. (in micro seconds) */ diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 8b0ae7fce7f2..e3b21e08fab2 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -61,9 +61,6 @@ #define KBL_FW_MAJOR 9 #define KBL_FW_MINOR 14 -#define GLK_FW_MAJOR 10 -#define GLK_FW_MINOR 56 - #define GUC_FW_PATH(platform, major, minor) \ "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" @@ -76,8 +73,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE); #define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) MODULE_FIRMWARE(I915_KBL_GUC_UCODE); -#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR) - static u32 get_gttype(struct drm_i915_private *dev_priv) { @@ -131,14 +126,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv) params[GUC_CTL_LOG_PARAMS] = guc->log.flags; - if (i915.guc_log_level >= 0) { + if (i915_modparams.guc_log_level >= 0) { params[GUC_CTL_DEBUG] = - i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; + i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } else params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; /* If GuC submission is enabled, set up additional parameters here */ - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool); u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16; @@ -368,7 +363,8 @@ int intel_guc_init_hw(struct intel_guc *guc) guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS; DRM_INFO("GuC %s (firmware %s [version %u.%u])\n", - i915.enable_guc_submission ? "submission enabled" : "loaded", + i915_modparams.enable_guc_submission ? "submission enabled" : + "loaded", guc->fw.path, guc->fw.major_ver_found, guc->fw.minor_ver_found); @@ -390,8 +386,8 @@ int intel_guc_select_fw(struct intel_guc *guc) guc->fw.load_status = INTEL_UC_FIRMWARE_NONE; guc->fw.type = INTEL_UC_FW_TYPE_GUC; - if (i915.guc_firmware_path) { - guc->fw.path = i915.guc_firmware_path; + if (i915_modparams.guc_firmware_path) { + guc->fw.path = i915_modparams.guc_firmware_path; guc->fw.major_ver_wanted = 0; guc->fw.minor_ver_wanted = 0; } else if (IS_SKYLAKE(dev_priv)) { @@ -406,10 +402,6 @@ int intel_guc_select_fw(struct intel_guc *guc) guc->fw.path = I915_KBL_GUC_UCODE; guc->fw.major_ver_wanted = KBL_FW_MAJOR; guc->fw.minor_ver_wanted = KBL_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - guc->fw.path = I915_GLK_GUC_UCODE; - guc->fw.major_ver_wanted = GLK_FW_MAJOR; - guc->fw.minor_ver_wanted = GLK_FW_MINOR; } else { DRM_ERROR("No GuC firmware known for platform with GuC!\n"); return -ENOENT; diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 16d3b8719cab..6571d96704ad 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -144,7 +144,7 @@ static int guc_log_relay_file_create(struct intel_guc *guc) struct dentry *log_dir; int ret; - if (i915.guc_log_level < 0) + if (i915_modparams.guc_log_level < 0) return 0; /* For now create the log file in /sys/kernel/debug/dri/0 dir */ @@ -480,7 +480,7 @@ static int guc_log_late_setup(struct intel_guc *guc) guc_log_runtime_destroy(guc); err: /* logging will remain off */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; return ret; } @@ -502,7 +502,8 @@ static void guc_flush_logs(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - if (!i915.enable_guc_submission || (i915.guc_log_level < 0)) + if (!i915_modparams.enable_guc_submission || + (i915_modparams.guc_log_level < 0)) return; /* First disable the interrupts, will be renabled afterwards */ @@ -529,8 +530,8 @@ int intel_guc_log_create(struct intel_guc *guc) GEM_BUG_ON(guc->log.vma); - if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) - i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; + if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX) + i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX; /* The first page is to save log buffer state. Allocate one * extra page for others in case for overlap */ @@ -555,7 +556,7 @@ int intel_guc_log_create(struct intel_guc *guc) guc->log.vma = vma; - if (i915.guc_log_level >= 0) { + if (i915_modparams.guc_log_level >= 0) { ret = guc_log_runtime_create(guc); if (ret < 0) goto err_vma; @@ -576,7 +577,7 @@ int intel_guc_log_create(struct intel_guc *guc) i915_vma_unpin_and_release(&guc->log.vma); err: /* logging will be off */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; return ret; } @@ -600,7 +601,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) return -EINVAL; /* This combination doesn't make sense & won't have any effect */ - if (!log_param.logging_enabled && (i915.guc_log_level < 0)) + if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0)) return 0; ret = guc_log_control(guc, log_param.value); @@ -610,7 +611,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) } if (log_param.logging_enabled) { - i915.guc_log_level = log_param.verbosity; + i915_modparams.guc_log_level = log_param.verbosity; /* If log_level was set as -1 at boot time, then the relay channel file * wouldn't have been created by now and interrupts also would not have @@ -633,7 +634,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) guc_flush_logs(guc); /* As logging is disabled, update log level to reflect that */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; } return ret; @@ -641,7 +642,8 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) void i915_guc_log_register(struct drm_i915_private *dev_priv) { - if (!i915.enable_guc_submission || i915.guc_log_level < 0) + if (!i915_modparams.enable_guc_submission || + (i915_modparams.guc_log_level < 0)) return; mutex_lock(&dev_priv->drm.struct_mutex); @@ -651,7 +653,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv) void i915_guc_log_unregister(struct drm_i915_private *dev_priv) { - if (!i915.enable_guc_submission) + if (!i915_modparams.enable_guc_submission) return; mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index c17ed0e62b67..f0575d4ca113 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return true; if (IS_KABYLAKE(dev_priv)) return true; + if (IS_BROXTON(dev_priv)) + return true; return false; } @@ -58,7 +60,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) */ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) { - if (!i915.enable_gvt) + if (!i915_modparams.enable_gvt) return; if (intel_vgpu_active(dev_priv)) { @@ -73,7 +75,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) return; bail: - i915.enable_gvt = 0; + i915_modparams.enable_gvt = 0; } /** @@ -90,17 +92,17 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) { int ret; - if (!i915.enable_gvt) { + if (!i915_modparams.enable_gvt) { DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); return 0; } - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n"); return -EIO; } - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); return -EIO; } @@ -123,7 +125,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return 0; bail: - i915.enable_gvt = 0; + i915_modparams.enable_gvt = 0; return 0; } diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index d9d87d96fb69..e8522e31fe65 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -428,7 +428,10 @@ static void i915_hangcheck_elapsed(struct work_struct *work) unsigned int hung = 0, stuck = 0; int busy_count = 0; - if (!i915.enable_hangcheck) + if (!i915_modparams.enable_hangcheck) + return; + + if (intel_vgpu_active(dev_priv)) return; if (!READ_ONCE(dev_priv->gt.awake)) diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c new file mode 100644 index 000000000000..36cbee18df37 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul + */ + +#include +#include +#include +#include + +#include "intel_drv.h" +#include "i915_reg.h" + +#define KEY_LOAD_TRIES 5 + +static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, + const struct intel_hdcp_shim *shim) +{ + int ret, read_ret; + bool ksv_ready; + + /* Poll for ksv list ready (spec says max time allowed is 5s) */ + ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port, + &ksv_ready), + read_ret || ksv_ready, 5 * 1000 * 1000, 1000, + 100 * 1000); + if (ret) + return ret; + if (read_ret) + return read_ret; + if (!ksv_ready) + return -ETIMEDOUT; + + return 0; +} + +static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + bool enabled = false; + + mutex_lock(&power_domains->lock); + + /* PG1 (power well #1) needs to be enabled */ + for_each_power_well(dev_priv, power_well) { + if (power_well->id == SKL_DISP_PW_1) { + enabled = power_well->ops->is_enabled(dev_priv, + power_well); + break; + } + } + mutex_unlock(&power_domains->lock); + + /* + * Another req for hdcp key loadability is enabled state of pll for + * cdclk. Without active crtc we wont land here. So we are assuming that + * cdclk is already on. + */ + + return enabled; +} + +static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv) +{ + I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); + I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | + HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); +} + +static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) +{ + int ret; + u32 val; + + val = I915_READ(HDCP_KEY_STATUS); + if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) + return 0; + + /* + * On HSW and BDW HW loads the HDCP1.4 Key when Display comes + * out of reset. So if Key is not already loaded, its an error state. + */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) + return -ENXIO; + + /* + * Initiate loading the HDCP key from fuses. + * + * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL + * differ in the key load trigger process from other platforms. + */ + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + mutex_lock(&dev_priv->pcu_lock); + ret = sandybridge_pcode_write(dev_priv, + SKL_PCODE_LOAD_HDCP_KEYS, 1); + mutex_unlock(&dev_priv->pcu_lock); + if (ret) { + DRM_ERROR("Failed to initiate HDCP key load (%d)\n", + ret); + return ret; + } + } else { + I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); + } + + /* Wait for the keys to load (500us) */ + ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS, + HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, + 10, 1, &val); + if (ret) + return ret; + else if (!(val & HDCP_KEY_LOAD_STATUS)) + return -ENXIO; + + /* Send Aksv over to PCH display for use in authentication */ + I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); + + return 0; +} + +/* Returns updated SHA-1 index */ +static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) +{ + I915_WRITE(HDCP_SHA_TEXT, sha_text); + if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, + HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) { + DRM_ERROR("Timed out waiting for SHA1 ready\n"); + return -ETIMEDOUT; + } + return 0; +} + +static +u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) +{ + enum port port = intel_dig_port->base.port; + switch (port) { + case PORT_A: + return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; + case PORT_B: + return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; + case PORT_C: + return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; + case PORT_D: + return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; + case PORT_E: + return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; + default: + break; + } + DRM_ERROR("Unknown port %d\n", port); + return -EINVAL; +} + +static +bool intel_hdcp_is_ksv_valid(u8 *ksv) +{ + int i, ones = 0; + /* KSV has 20 1's and 20 0's */ + for (i = 0; i < DRM_HDCP_KSV_LEN; i++) + ones += hweight8(ksv[i]); + if (ones != 20) + return false; + return true; +} + +/* Implements Part 2 of the HDCP authorization procedure */ +static +int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, + const struct intel_hdcp_shim *shim) +{ + struct drm_i915_private *dev_priv; + u32 vprime, sha_text, sha_leftovers, rep_ctl; + u8 bstatus[2], num_downstream, *ksv_fifo; + int ret, i, j, sha_idx; + + dev_priv = intel_dig_port->base.base.dev->dev_private; + + ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); + if (ret) { + DRM_ERROR("KSV list failed to become ready (%d)\n", ret); + return ret; + } + + ret = shim->read_bstatus(intel_dig_port, bstatus); + if (ret) + return ret; + + if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || + DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { + DRM_ERROR("Max Topology Limit Exceeded\n"); + return -EPERM; + } + + /* + * When repeater reports 0 device count, HDCP1.4 spec allows disabling + * the HDCP encryption. That implies that repeater can't have its own + * display. As there is no consumption of encrypted content in the + * repeater with 0 downstream devices, we are failing the + * authentication. + */ + num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); + if (num_downstream == 0) + return -EINVAL; + + ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL); + if (!ksv_fifo) + return -ENOMEM; + + ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); + if (ret) + return ret; + + /* Process V' values from the receiver */ + for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { + ret = shim->read_v_prime_part(intel_dig_port, i, &vprime); + if (ret) + return ret; + I915_WRITE(HDCP_SHA_V_PRIME(i), vprime); + } + + /* + * We need to write the concatenation of all device KSVs, BINFO (DP) || + * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte + * stream is written via the HDCP_SHA_TEXT register in 32-bit + * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This + * index will keep track of our progress through the 64 bytes as well as + * helping us work the 40-bit KSVs through our 32-bit register. + * + * NOTE: data passed via HDCP_SHA_TEXT should be big-endian + */ + sha_idx = 0; + sha_text = 0; + sha_leftovers = 0; + rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port); + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + for (i = 0; i < num_downstream; i++) { + unsigned int sha_empty; + u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; + + /* Fill up the empty slots in sha_text and write it out */ + sha_empty = sizeof(sha_text) - sha_leftovers; + for (j = 0; j < sha_empty; j++) + sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8); + + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + return ret; + + /* Programming guide writes this every 64 bytes */ + sha_idx += sizeof(sha_text); + if (!(sha_idx % 64)) + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + + /* Store the leftover bytes from the ksv in sha_text */ + sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; + sha_text = 0; + for (j = 0; j < sha_leftovers; j++) + sha_text |= ksv[sha_empty + j] << + ((sizeof(sha_text) - j - 1) * 8); + + /* + * If we still have room in sha_text for more data, continue. + * Otherwise, write it out immediately. + */ + if (sizeof(sha_text) > sha_leftovers) + continue; + + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + return ret; + sha_leftovers = 0; + sha_text = 0; + sha_idx += sizeof(sha_text); + } + + /* + * We need to write BINFO/BSTATUS, and M0 now. Depending on how many + * bytes are leftover from the last ksv, we might be able to fit them + * all in sha_text (first 2 cases), or we might need to split them up + * into 2 writes (last 2 cases). + */ + if (sha_leftovers == 0) { + /* Write 16 bits of text, 16 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); + ret = intel_write_sha_text(dev_priv, + bstatus[0] << 8 | bstatus[1]); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + /* Write 32 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + /* Write 16 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + } else if (sha_leftovers == 1) { + /* Write 24 bits of text, 8 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); + sha_text |= bstatus[0] << 16 | bstatus[1] << 8; + /* Only 24-bits of data, must be in the LSB */ + sha_text = (sha_text & 0xffffff00) >> 8; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + /* Write 32 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + /* Write 24 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + } else if (sha_leftovers == 2) { + /* Write 32 bits of text */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + sha_text |= bstatus[0] << 24 | bstatus[1] << 16; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + /* Write 64 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + for (i = 0; i < 2; i++) { + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + } + } else if (sha_leftovers == 3) { + /* Write 32 bits of text */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + sha_text |= bstatus[0] << 24; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + /* Write 8 bits of text, 24 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); + ret = intel_write_sha_text(dev_priv, bstatus[1]); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + /* Write 32 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + + /* Write 8 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + } else { + DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers); + return -EINVAL; + } + + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ + while ((sha_idx % 64) < (64 - sizeof(sha_text))) { + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); + } + + /* + * Last write gets the length of the concatenation in bits. That is: + * - 5 bytes per device + * - 10 bytes for BINFO/BSTATUS(2), M0(8) + */ + sha_text = (num_downstream * 5 + 10) * 8; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + return ret; + + /* Tell the HW we're done with the hash and wait for it to ACK */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); + if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, + HDCP_SHA1_COMPLETE, + HDCP_SHA1_COMPLETE, 1)) { + DRM_ERROR("Timed out waiting for SHA1 complete\n"); + return -ETIMEDOUT; + } + if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { + DRM_ERROR("SHA-1 mismatch, HDCP failed\n"); + return -ENXIO; + } + + DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n", + num_downstream); + return 0; +} + +/* Implements Part 1 of the HDCP authorization procedure */ +static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, + const struct intel_hdcp_shim *shim) +{ + struct drm_i915_private *dev_priv; + enum port port; + unsigned long r0_prime_gen_start; + int ret, i, tries = 2; + union { + u32 reg[2]; + u8 shim[DRM_HDCP_AN_LEN]; + } an; + union { + u32 reg[2]; + u8 shim[DRM_HDCP_KSV_LEN]; + } bksv; + union { + u32 reg; + u8 shim[DRM_HDCP_RI_LEN]; + } ri; + bool repeater_present, hdcp_capable; + + dev_priv = intel_dig_port->base.base.dev->dev_private; + + port = intel_dig_port->base.port; + + /* + * Detects whether the display is HDCP capable. Although we check for + * valid Bksv below, the HDCP over DP spec requires that we check + * whether the display supports HDCP before we write An. For HDMI + * displays, this is not necessary. + */ + if (shim->hdcp_capable) { + ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable); + if (ret) + return ret; + if (!hdcp_capable) { + DRM_ERROR("Panel is not HDCP capable\n"); + return -EINVAL; + } + } + + /* Initialize An with 2 random values and acquire it */ + for (i = 0; i < 2; i++) + I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32()); + I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN); + + /* Wait for An to be acquired */ + if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), + HDCP_STATUS_AN_READY, + HDCP_STATUS_AN_READY, 1)) { + DRM_ERROR("Timed out waiting for An\n"); + return -ETIMEDOUT; + } + + an.reg[0] = I915_READ(PORT_HDCP_ANLO(port)); + an.reg[1] = I915_READ(PORT_HDCP_ANHI(port)); + ret = shim->write_an_aksv(intel_dig_port, an.shim); + if (ret) + return ret; + + r0_prime_gen_start = jiffies; + + memset(&bksv, 0, sizeof(bksv)); + + /* HDCP spec states that we must retry the bksv if it is invalid */ + for (i = 0; i < tries; i++) { + ret = shim->read_bksv(intel_dig_port, bksv.shim); + if (ret) + return ret; + if (intel_hdcp_is_ksv_valid(bksv.shim)) + break; + } + if (i == tries) { + DRM_ERROR("HDCP failed, Bksv is invalid\n"); + return -ENODEV; + } + + I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); + I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); + + ret = shim->repeater_present(intel_dig_port, &repeater_present); + if (ret) + return ret; + if (repeater_present) + I915_WRITE(HDCP_REP_CTL, + intel_hdcp_get_repeater_ctl(intel_dig_port)); + + ret = shim->toggle_signalling(intel_dig_port, true); + if (ret) + return ret; + + I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC); + + /* Wait for R0 ready */ + if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { + DRM_ERROR("Timed out waiting for R0 ready\n"); + return -ETIMEDOUT; + } + + /* + * Wait for R0' to become available. The spec says 100ms from Aksv, but + * some monitors can take longer than this. We'll set the timeout at + * 300ms just to be sure. + * + * On DP, there's an R0_READY bit available but no such bit + * exists on HDMI. Since the upper-bound is the same, we'll just do + * the stupid thing instead of polling on one and not the other. + */ + wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); + + ri.reg = 0; + ret = shim->read_ri_prime(intel_dig_port, ri.shim); + if (ret) + return ret; + I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); + + /* Wait for Ri prime match */ + if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { + DRM_ERROR("Timed out waiting for Ri prime match (%x)\n", + I915_READ(PORT_HDCP_STATUS(port))); + return -ETIMEDOUT; + } + + /* Wait for encryption confirmation */ + if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), + HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) { + DRM_ERROR("Timed out waiting for encryption\n"); + return -ETIMEDOUT; + } + + /* + * XXX: If we have MST-connected devices, we need to enable encryption + * on those as well. + */ + + if (repeater_present) + return intel_hdcp_auth_downstream(intel_dig_port, shim); + + DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n"); + return 0; +} + +static +struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector) +{ + return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); +} + +static int _intel_hdcp_disable(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = connector->base.dev->dev_private; + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + enum port port = intel_dig_port->base.port; + int ret; + + DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", + connector->base.name, connector->base.base.id); + + I915_WRITE(PORT_HDCP_CONF(port), 0); + if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0, + 20)) { + DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); + return -ETIMEDOUT; + } + + ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false); + if (ret) { + DRM_ERROR("Failed to disable HDCP signalling\n"); + return ret; + } + + DRM_DEBUG_KMS("HDCP is disabled\n"); + return 0; +} + +static int _intel_hdcp_enable(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = connector->base.dev->dev_private; + int i, ret, tries = 3; + + DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n", + connector->base.name, connector->base.base.id); + + if (!hdcp_key_loadable(dev_priv)) { + DRM_ERROR("HDCP key Load is not possible\n"); + return -ENXIO; + } + + for (i = 0; i < KEY_LOAD_TRIES; i++) { + ret = intel_hdcp_load_keys(dev_priv); + if (!ret) + break; + intel_hdcp_clear_keys(dev_priv); + } + if (ret) { + DRM_ERROR("Could not load HDCP keys, (%d)\n", ret); + return ret; + } + + /* Incase of authentication failures, HDCP spec expects reauth. */ + for (i = 0; i < tries; i++) { + ret = intel_hdcp_auth(conn_to_dig_port(connector), + connector->hdcp_shim); + if (!ret) + return 0; + + DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); + + /* Ensuring HDCP encryption and signalling are stopped. */ + _intel_hdcp_disable(connector); + } + + DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret); + return ret; +} + +static void intel_hdcp_check_work(struct work_struct *work) +{ + struct intel_connector *connector = container_of(to_delayed_work(work), + struct intel_connector, + hdcp_check_work); + if (!intel_hdcp_check_link(connector)) + schedule_delayed_work(&connector->hdcp_check_work, + DRM_HDCP_CHECK_PERIOD_MS); +} + +static void intel_hdcp_prop_work(struct work_struct *work) +{ + struct intel_connector *connector = container_of(work, + struct intel_connector, + hdcp_prop_work); + struct drm_device *dev = connector->base.dev; + struct drm_connector_state *state; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&connector->hdcp_mutex); + + /* + * This worker is only used to flip between ENABLED/DESIRED. Either of + * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED, + * we're running just after hdcp has been disabled, so just exit + */ + if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + state = connector->base.state; + state->content_protection = connector->hdcp_value; + } + + mutex_unlock(&connector->hdcp_mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) +{ + /* PORT E doesn't have HDCP, and PORT F is disabled */ + return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && + !IS_CHERRYVIEW(dev_priv) && port < PORT_E); +} + +int intel_hdcp_init(struct intel_connector *connector, + const struct intel_hdcp_shim *hdcp_shim) +{ + int ret; + + ret = drm_connector_attach_content_protection_property( + &connector->base); + if (ret) + return ret; + + connector->hdcp_shim = hdcp_shim; + mutex_init(&connector->hdcp_mutex); + INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work); + INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work); + return 0; +} + +int intel_hdcp_enable(struct intel_connector *connector) +{ + int ret; + + if (!connector->hdcp_shim) + return -ENOENT; + + mutex_lock(&connector->hdcp_mutex); + + ret = _intel_hdcp_enable(connector); + if (ret) + goto out; + + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&connector->hdcp_prop_work); + schedule_delayed_work(&connector->hdcp_check_work, + DRM_HDCP_CHECK_PERIOD_MS); +out: + mutex_unlock(&connector->hdcp_mutex); + return ret; +} + +int intel_hdcp_disable(struct intel_connector *connector) +{ + int ret = 0; + + if (!connector->hdcp_shim) + return -ENOENT; + + mutex_lock(&connector->hdcp_mutex); + + if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; + ret = _intel_hdcp_disable(connector); + } + + mutex_unlock(&connector->hdcp_mutex); + cancel_delayed_work_sync(&connector->hdcp_check_work); + return ret; +} + +void intel_hdcp_atomic_check(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state) +{ + uint64_t old_cp = old_state->content_protection; + uint64_t new_cp = new_state->content_protection; + struct drm_crtc_state *crtc_state; + + if (!new_state->crtc) { + /* + * If the connector is being disabled with CP enabled, mark it + * desired so it's re-enabled when the connector is brought back + */ + if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) + new_state->content_protection = + DRM_MODE_CONTENT_PROTECTION_DESIRED; + return; + } + + /* + * Nothing to do if the state didn't change, or HDCP was activated since + * the last commit + */ + if (old_cp == new_cp || + (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && + new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); + crtc_state->mode_changed = true; +} + +/* Implements Part 3 of the HDCP authorization procedure */ +int intel_hdcp_check_link(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = connector->base.dev->dev_private; + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + enum port port = intel_dig_port->base.port; + int ret = 0; + + if (!connector->hdcp_shim) + return -ENOENT; + + mutex_lock(&connector->hdcp_mutex); + + if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + goto out; + + if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { + DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n", + connector->base.name, connector->base.base.id, + I915_READ(PORT_HDCP_STATUS(port))); + ret = -ENXIO; + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&connector->hdcp_prop_work); + goto out; + } + + if (connector->hdcp_shim->check_link(intel_dig_port)) { + if (connector->hdcp_value != + DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + connector->hdcp_value = + DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&connector->hdcp_prop_work); + } + goto out; + } + + DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", + connector->base.name, connector->base.base.id); + + ret = _intel_hdcp_disable(connector); + if (ret) { + DRM_ERROR("Failed to disable hdcp (%d)\n", ret); + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&connector->hdcp_prop_work); + goto out; + } + + ret = _intel_hdcp_enable(connector); + if (ret) { + DRM_ERROR("Failed to enable hdcp (%d)\n", ret); + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&connector->hdcp_prop_work); + goto out; + } + +out: + mutex_unlock(&connector->hdcp_mutex); + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index e8abea7594ec..60fd172eccfb 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include "intel_drv.h" #include @@ -481,7 +482,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL, - intel_hdmi->rgb_quant_range_selectable); + intel_hdmi->rgb_quant_range_selectable, + is_hdmi2_sink); /* TODO: handle pixel repetition for YCBCR420 outputs */ intel_write_infoframe(encoder, crtc_state, &frame); @@ -867,6 +869,248 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) adapter, enable); } +static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port, + unsigned int offset, void *buffer, size_t size) +{ + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *dev_priv = + intel_dig_port->base.base.dev->dev_private; + struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + hdmi->ddc_bus); + int ret; + u8 start = offset & 0xff; + struct i2c_msg msgs[] = { + { + .addr = DRM_HDCP_DDC_ADDR, + .flags = 0, + .len = 1, + .buf = &start, + }, + { + .addr = DRM_HDCP_DDC_ADDR, + .flags = I2C_M_RD, + .len = size, + .buf = buffer + } + }; + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); + if (ret == ARRAY_SIZE(msgs)) + return 0; + return ret >= 0 ? -EIO : ret; +} + +static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, + unsigned int offset, void *buffer, size_t size) +{ + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *dev_priv = + intel_dig_port->base.base.dev->dev_private; + struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + hdmi->ddc_bus); + int ret; + u8 *write_buf; + struct i2c_msg msg; + + write_buf = kzalloc(size + 1, GFP_KERNEL); + if (!write_buf) + return -ENOMEM; + + write_buf[0] = offset & 0xff; + memcpy(&write_buf[1], buffer, size); + + msg.addr = DRM_HDCP_DDC_ADDR; + msg.flags = 0, + msg.len = size + 1, + msg.buf = write_buf; + + ret = i2c_transfer(adapter, &msg, 1); + if (ret == 1) + return 0; + return ret >= 0 ? -EIO : ret; +} + +static +int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, + u8 *an) +{ + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *dev_priv = + intel_dig_port->base.base.dev->dev_private; + struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + hdmi->ddc_bus); + int ret; + + ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, + DRM_HDCP_AN_LEN); + if (ret) { + DRM_ERROR("Write An over DDC failed (%d)\n", ret); + return ret; + } + + ret = intel_gmbus_output_aksv(adapter); + if (ret < 0) { + DRM_ERROR("Failed to output aksv (%d)\n", ret); + return ret; + } + return 0; +} + +static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, + u8 *bksv) +{ + int ret; + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, + DRM_HDCP_KSV_LEN); + if (ret) + DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret); + return ret; +} + +static +int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, + u8 *bstatus) +{ + int ret; + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, + bstatus, DRM_HDCP_BSTATUS_LEN); + if (ret) + DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret); + return ret; +} + +static +int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, + bool *repeater_present) +{ + int ret; + u8 val; + + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); + if (ret) { + DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); + return ret; + } + *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; + return 0; +} + +static +int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, + u8 *ri_prime) +{ + int ret; + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, + ri_prime, DRM_HDCP_RI_LEN); + if (ret) + DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret); + return ret; +} + +static +int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, + bool *ksv_ready) +{ + int ret; + u8 val; + + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); + if (ret) { + DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); + return ret; + } + *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; + return 0; +} + +static +int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, + int num_downstream, u8 *ksv_fifo) +{ + int ret; + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, + ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); + if (ret) { + DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret); + return ret; + } + return 0; +} + +static +int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, + int i, u32 *part) +{ + int ret; + + if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) + return -EINVAL; + + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), + part, DRM_HDCP_V_PRIME_PART_LEN); + if (ret) + DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret); + return ret; +} + +static +int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, + bool enable) +{ + int ret; + + if (!enable) + usleep_range(6, 60); /* Bspec says >= 6us */ + + ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable); + if (ret) { + DRM_ERROR("%s HDCP signalling failed (%d)\n", + enable ? "Enable" : "Disable", ret); + return ret; + } + return 0; +} + +static +bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) +{ + struct drm_i915_private *dev_priv = + intel_dig_port->base.base.dev->dev_private; + enum port port = intel_dig_port->base.port; + int ret; + union { + u32 reg; + u8 shim[DRM_HDCP_RI_LEN]; + } ri; + + ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim); + if (ret) + return false; + + I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); + + /* Wait for Ri prime match */ + if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { + DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n", + I915_READ(PORT_HDCP_STATUS(port))); + return false; + } + return true; +} + +static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { + .write_an_aksv = intel_hdmi_hdcp_write_an_aksv, + .read_bksv = intel_hdmi_hdcp_read_bksv, + .read_bstatus = intel_hdmi_hdcp_read_bstatus, + .repeater_present = intel_hdmi_hdcp_repeater_present, + .read_ri_prime = intel_hdmi_hdcp_read_ri_prime, + .read_ksv_ready = intel_hdmi_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_hdmi_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part, + .toggle_signalling = intel_hdmi_hdcp_toggle_signalling, + .check_link = intel_hdmi_hdcp_check_link, +}; + static void intel_hdmi_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { @@ -1562,12 +1806,20 @@ intel_hdmi_set_edid(struct drm_connector *connector) struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct edid *edid; bool connected = false; + struct i2c_adapter *i2c; intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - edid = drm_get_edid(connector, - intel_gmbus_get_adapter(dev_priv, - intel_hdmi->ddc_bus)); + i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); + + edid = drm_get_edid(connector, i2c); + + if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n"); + intel_gmbus_force_bit(i2c, true); + edid = drm_get_edid(connector, i2c); + intel_gmbus_force_bit(i2c, false); + } intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); @@ -1602,7 +1854,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) if (intel_hdmi_set_edid(connector)) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; status = connector_status_connected; } else @@ -1870,6 +2121,14 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) u8 ddc_pin; switch (port) { + case PORT_A: + if ((IS_GEN9_LP(dev_priv)) && (intel_vgpu_active(dev_priv))) + ddc_pin = GMBUS_PIN_3_BXT; + else { + MISSING_CASE(port); + ddc_pin = GMBUS_PIN_DPB; + } + break; case PORT_B: ddc_pin = GMBUS_PIN_1_BXT; break; @@ -1989,7 +2248,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); - if (WARN_ON(port == PORT_A)) + if (!intel_vgpu_active(dev_priv) && + WARN_ON(port == PORT_A)) return; intel_encoder->hpd_pin = intel_hpd_pin(port); @@ -2022,6 +2282,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi_add_properties(intel_hdmi, connector); + if (is_hdcp_supported(dev_priv, port)) { + int ret = intel_hdcp_init(intel_connector, + &intel_hdmi_hdcp_shim); + if (ret) + DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + } + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_hdmi->attached_connector = intel_connector; diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 6145fa0d6773..637c8d5a7ac5 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -52,10 +52,6 @@ #define KBL_HUC_FW_MINOR 00 #define KBL_BLD_NUM 1810 -#define GLK_HUC_FW_MAJOR 02 -#define GLK_HUC_FW_MINOR 00 -#define GLK_BLD_NUM 1748 - #define HUC_FW_PATH(platform, major, minor, bld_num) \ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ __stringify(minor) "_" __stringify(bld_num) ".bin" @@ -72,9 +68,6 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE); KBL_HUC_FW_MINOR, KBL_BLD_NUM) MODULE_FIRMWARE(I915_KBL_HUC_UCODE); -#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ - GLK_HUC_FW_MINOR, GLK_BLD_NUM) - /** * huc_ucode_xfer() - DMA's the firmware * @dev_priv: the drm_i915_private device @@ -155,8 +148,8 @@ void intel_huc_select_fw(struct intel_huc *huc) huc->fw.load_status = INTEL_UC_FIRMWARE_NONE; huc->fw.type = INTEL_UC_FW_TYPE_HUC; - if (i915.huc_firmware_path) { - huc->fw.path = i915.huc_firmware_path; + if (i915_modparams.huc_firmware_path) { + huc->fw.path = i915_modparams.huc_firmware_path; huc->fw.major_ver_wanted = 0; huc->fw.minor_ver_wanted = 0; } else if (IS_SKYLAKE(dev_priv)) { @@ -171,10 +164,6 @@ void intel_huc_select_fw(struct intel_huc *huc) huc->fw.path = I915_KBL_HUC_UCODE; huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR; huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - huc->fw.path = I915_GLK_HUC_UCODE; - huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR; - huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR; } else { DRM_ERROR("No HuC firmware known for platform with HuC!\n"); return; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index eb5827110d8f..3466c501c9b5 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "intel_drv.h" #include #include "i915_drv.h" @@ -373,7 +374,8 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, static int gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, - unsigned short addr, u8 *buf, unsigned int len) + unsigned short addr, u8 *buf, unsigned int len, + u32 gmbus1_index) { unsigned int chunk_size = len; u32 val, loop; @@ -386,7 +388,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, I915_WRITE_FW(GMBUS3, val); I915_WRITE_FW(GMBUS1, - GMBUS_CYCLE_WAIT | + gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); @@ -409,7 +411,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, } static int -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + u32 gmbus1_index) { u8 *buf = msg->buf; unsigned int tx_size = msg->len; @@ -419,7 +422,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) do { len = min(tx_size, GMBUS_BYTE_COUNT_MAX); - ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len); + ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len, + gmbus1_index); if (ret) return ret; @@ -431,19 +435,21 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) } /* - * The gmbus controller can combine a 1 or 2 byte write with a read that - * immediately follows it by using an "INDEX" cycle. + * The gmbus controller can combine a 1 or 2 byte write with another read/write + * that immediately follows it by using an "INDEX" cycle. */ static bool -gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) +gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) { return (i + 1 < num && - !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && + msgs[i].addr == msgs[i + 1].addr && + !(msgs[i].flags & I2C_M_RD) && + (msgs[i].len == 1 || msgs[i].len == 2) && (msgs[i + 1].flags & I2C_M_RD)); } static int -gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) +gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) { u32 gmbus1_index = 0; u32 gmbus5 = 0; @@ -460,7 +466,10 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) if (gmbus5) I915_WRITE_FW(GMBUS5, gmbus5); - ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + if (msgs[1].flags & I2C_M_RD) + ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + else + ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index); /* Clear GMBUS5 after each index transfer */ if (gmbus5) @@ -470,7 +479,8 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) } static int -do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) +do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, + u32 gmbus0_source) { struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, @@ -480,17 +490,17 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) int ret = 0; retry: - I915_WRITE_FW(GMBUS0, bus->reg0); + I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0); for (; i < num; i += inc) { inc = 1; - if (gmbus_is_index_read(msgs, i, num)) { - ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); - inc = 2; /* an index read is two msgs */ + if (gmbus_is_index_xfer(msgs, i, num)) { + ret = gmbus_index_xfer(dev_priv, &msgs[i]); + inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); } else { - ret = gmbus_xfer_write(dev_priv, &msgs[i]); + ret = gmbus_xfer_write(dev_priv, &msgs[i], 0); } if (!ret) @@ -598,7 +608,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) if (ret < 0) bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY; } else { - ret = do_gmbus_xfer(adapter, msgs, num); + ret = do_gmbus_xfer(adapter, msgs, num, 0); if (ret == -EAGAIN) bus->force_bit |= GMBUS_FORCE_BIT_RETRY; } @@ -608,6 +618,45 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) return ret; } +int intel_gmbus_output_aksv(struct i2c_adapter *adapter) +{ + struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + int ret; + u8 cmd = DRM_HDCP_DDC_AKSV; + u8 buf[DRM_HDCP_KSV_LEN] = { 0 }; + struct i2c_msg msgs[] = { + { + .addr = DRM_HDCP_DDC_ADDR, + .flags = 0, + .len = sizeof(cmd), + .buf = &cmd, + }, + { + .addr = DRM_HDCP_DDC_ADDR, + .flags = 0, + .len = sizeof(buf), + .buf = buf, + } + }; + + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + mutex_lock(&dev_priv->gmbus_mutex); + + /* + * In order to output Aksv to the receiver, use an indexed write to + * pass the i2c command, and tell GMBUS to use the HW-provided value + * instead of sourcing GMBUS3 for the data. + */ + ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); + + mutex_unlock(&dev_priv->gmbus_mutex); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + + return ret; +} + static u32 gmbus_func(struct i2c_adapter *adapter) { return i2c_bit_algo.functionality(adapter) & diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6f972e6ec663..59e95f6d16bc 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -208,8 +208,9 @@ /* Typical size of the average request (2 pipecontrols and a MI_BB) */ #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ - #define WA_TAIL_DWORDS 2 +#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) +#define PREEMPT_ID 0x1 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); @@ -219,9 +220,9 @@ static void execlists_init_reg_state(u32 *reg_state, struct intel_ring *ring); /** - * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists + * intel_sanitize_enable_execlists() - sanitize i915_modparams.enable_execlists * @dev_priv: i915 device private - * @enable_execlists: value of i915.enable_execlists module parameter. + * @enable_execlists: value of i915_modparams.enable_execlists module parameter. * * Only certain platforms support Execlists (the prerequisites being * support for Logical Ring Contexts and Aliasing PPGTT or better). @@ -244,7 +245,7 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && USES_PPGTT(dev_priv) && - i915.use_mmio_flip >= 0) + i915_modparams.use_mmio_flip >= 0) return 1; return 0; @@ -279,17 +280,110 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<desc_template; /* bits 0-11 */ - desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; + desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; /* bits 12-31 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ ce->lrc_desc = desc; } -uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static struct i915_priolist * +lookup_priolist(struct intel_engine_cs *engine, + struct i915_priotree *pt, + int prio) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_priolist *p; + struct rb_node **parent, *rb; + bool first = true; + + if (unlikely(execlists->no_priolist)) + prio = I915_PRIORITY_NORMAL; + +find_priolist: + /* most positive priority is scheduled first, equal priorities fifo */ + rb = NULL; + parent = &execlists->queue.rb_node; + while (*parent) { + rb = *parent; + p = rb_entry(rb, typeof(*p), node); + if (prio > p->priority) { + parent = &rb->rb_left; + } else if (prio < p->priority) { + parent = &rb->rb_right; + first = false; + } else { + return p; + } + } + + if (prio == I915_PRIORITY_NORMAL) { + p = &execlists->default_priolist; + } else { + p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); + /* Convert an allocation failure to a priority bump */ + if (unlikely(!p)) { + prio = I915_PRIORITY_NORMAL; /* recurses just once */ + + /* To maintain ordering with all rendering, after an + * allocation failure we have to disable all scheduling. + * Requests will then be executed in fifo, and schedule + * will ensure that dependencies are emitted in fifo. + * There will be still some reordering with existing + * requests, so if userspace lied about their + * dependencies that reordering may be visible. + */ + execlists->no_priolist = true; + goto find_priolist; + } + } + + p->priority = prio; + INIT_LIST_HEAD(&p->requests); + rb_link_node(&p->node, rb, parent); + rb_insert_color(&p->node, &execlists->queue); + + if (first) + execlists->first = &p->node; + + return ptr_pack_bits(p, first, 1); +} + +static void unwind_wa_tail(struct drm_i915_gem_request *rq) { - return ctx->engine[engine->id].lrc_desc; + rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); + assert_ring_tail_valid(rq->ring, rq->tail); +} + +static void unwind_incomplete_requests(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_request *rq, *rn; + struct i915_priolist *uninitialized_var(p); + int last_prio = I915_PRIORITY_INVALID; + + lockdep_assert_held(&engine->timeline->lock); + + list_for_each_entry_safe_reverse(rq, rn, + &engine->timeline->requests, + link) { + if (i915_gem_request_completed(rq)) + return; + + __i915_gem_request_unsubmit(rq); + unwind_wa_tail(rq); + + GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID); + if (rq->priotree.priority != last_prio) { + p = lookup_priolist(engine, + &rq->priotree, + rq->priotree.priority); + p = ptr_mask_bits(p, 1); + + last_prio = rq->priotree.priority; + } + + list_add(&rq->priotree.link, &p->requests); + } } static inline void @@ -336,14 +430,22 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) return ce->lrc_desc; } +static inline void elsp_write(u64 desc, u32 __iomem *elsp) +{ + writel(upper_32_bits(desc), elsp); + writel(lower_32_bits(desc), elsp); +} + static void execlists_submit_ports(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; + struct execlist_port *port = engine->execlists.port; u32 __iomem *elsp = engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); unsigned int n; + u32 descs[4]; + int i = 0; - for (n = ARRAY_SIZE(engine->execlist_port); n--; ) { + for (n = execlists_num_ports(&engine->execlists); n--; ) { struct drm_i915_gem_request *rq; unsigned int count; u64 desc; @@ -360,9 +462,24 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) GEM_BUG_ON(!n); desc = 0; } + if (intel_vgpu_active(engine->i915) && i915_modparams.enable_pvmmio) { + BUG_ON(i >= 4); + descs[i] = upper_32_bits(desc); + descs[i + 1] = lower_32_bits(desc); + i += 2; + } else { + elsp_write(desc, elsp); + } + } - writel(upper_32_bits(desc), elsp); - writel(lower_32_bits(desc), elsp); + if (intel_vgpu_active(engine->i915) && i915_modparams.enable_pvmmio) { + u32 __iomem *elsp_data = engine->i915->shared_page->elsp_data; + spin_lock(&engine->i915->shared_page_lock); + writel(descs[0], elsp_data); + writel(descs[1], elsp_data + 1); + writel(descs[2], elsp_data + 2); + writel(descs[3], elsp); + spin_unlock(&engine->i915->shared_page_lock); } } @@ -395,25 +512,57 @@ static void port_assign(struct execlist_port *port, port_set(port, port_pack(i915_gem_request_get(rq), port_count(port))); } +static void inject_preempt_context(struct intel_engine_cs *engine) +{ + struct intel_context *ce = + &engine->i915->preempt_context->engine[engine->id]; + u32 __iomem *elsp = + engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); + unsigned int n; + + GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID); + GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES)); + + memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES); + ce->ring->tail += WA_TAIL_BYTES; + ce->ring->tail &= (ce->ring->size - 1); + ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail; + + if (intel_vgpu_active(engine->i915) && i915_modparams.enable_pvmmio) { + u32 __iomem *elsp_data = engine->i915->shared_page->elsp_data; + + spin_lock(&engine->i915->shared_page_lock); + writel(0, elsp_data); + writel(0, elsp_data + 1); + writel(upper_32_bits(ce->lrc_desc), elsp_data + 2); + writel(lower_32_bits(ce->lrc_desc), elsp); + spin_unlock(&engine->i915->shared_page_lock); + + return; + } + + for (n = execlists_num_ports(&engine->execlists); --n; ) + elsp_write(0, elsp); + + elsp_write(ce->lrc_desc, elsp); +} + +static bool can_preempt(struct intel_engine_cs *engine) +{ + return !intel_vgpu_active(engine->i915) && + INTEL_INFO(engine->i915)->has_logical_ring_preemption; +} + static void execlists_dequeue(struct intel_engine_cs *engine) { - struct drm_i915_gem_request *last; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; + struct drm_i915_gem_request *last = port_request(port); struct rb_node *rb; bool submit = false; - last = port_request(port); - if (last) - /* WaIdleLiteRestore:bdw,skl - * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL - * as we resubmit the request. See gen8_emit_breadcrumb() - * for where we prepare the padding after the end of the - * request. - */ - last->tail = last->wa_tail; - - GEM_BUG_ON(port_isset(&port[1])); - /* Hardware submission is through 2 ports. Conceptually each port * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is * static for a context, and unique to each, so we only execute @@ -436,9 +585,68 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ spin_lock_irq(&engine->timeline->lock); - rb = engine->execlist_first; - GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); - while (rb) { + rb = execlists->first; + GEM_BUG_ON(rb_first(&execlists->queue) != rb); + if (!rb) + goto unlock; + + if (last) { + /* + * Don't resubmit or switch until all outstanding + * preemptions (lite-restore) are seen. Then we + * know the next preemption status we see corresponds + * to this ELSP update. + */ + if (port_count(&port[0]) > 1) + goto unlock; + + if (can_preempt(engine) && + rb_entry(rb, struct i915_priolist, node)->priority > + max(last->priotree.priority, 0)) { + /* + * Switch to our empty preempt context so + * the state of the GPU is known (idle). + */ + inject_preempt_context(engine); + execlists_set_active(execlists, + EXECLISTS_ACTIVE_PREEMPT); + goto unlock; + } else { + /* + * In theory, we could coalesce more requests onto + * the second port (the first port is active, with + * no preemptions pending). However, that means we + * then have to deal with the possible lite-restore + * of the second port (as we submit the ELSP, there + * may be a context-switch) but also we may complete + * the resubmission before the context-switch. Ergo, + * coalescing onto the second port will cause a + * preemption event, but we cannot predict whether + * that will affect port[0] or port[1]. + * + * If the second port is already active, we can wait + * until the next context-switch before contemplating + * new requests. The GPU will be busy and we should be + * able to resubmit the new ELSP before it idles, + * avoiding pipeline bubbles (momentary pauses where + * the driver is unable to keep up the supply of new + * work). + */ + if (port_count(&port[1])) + goto unlock; + + /* WaIdleLiteRestore:bdw,skl + * Apply the wa NOOPs to prevent + * ring:HEAD == req:TAIL as we resubmit the + * request. See gen8_emit_breadcrumb() for + * where we prepare the padding after the + * end of the request. + */ + last->tail = last->wa_tail; + } + } + + do { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct drm_i915_gem_request *rq, *rn; @@ -460,7 +668,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * combine this request with the last, then we * are done. */ - if (port != engine->execlist_port) { + if (port == last_port) { __list_del_many(&p->requests, &rq->priotree.link); goto done; @@ -485,38 +693,113 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (submit) port_assign(port, last); port++; + + GEM_BUG_ON(port_isset(port)); } INIT_LIST_HEAD(&rq->priotree.link); - rq->priotree.priority = INT_MAX; - __i915_gem_request_submit(rq); - trace_i915_gem_request_in(rq, port_index(port, engine)); + trace_i915_gem_request_in(rq, port_index(port, execlists)); last = rq; submit = true; } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); - } + } while (rb); done: - engine->execlist_first = rb; + execlists->first = rb; if (submit) port_assign(port, last); +unlock: spin_unlock_irq(&engine->timeline->lock); - if (submit) + if (submit) { + execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); execlists_submit_ports(engine); + } } -static bool execlists_elsp_ready(const struct intel_engine_cs *engine) +static void +execlist_cancel_port_requests(struct intel_engine_execlists *execlists) { - const struct execlist_port *port = engine->execlist_port; + struct execlist_port *port = execlists->port; + unsigned int num_ports = ARRAY_SIZE(execlists->port); + + while (num_ports-- && port_isset(port)) { + struct drm_i915_gem_request *rq = port_request(port); + + GEM_BUG_ON(!execlists->active); + + execlists_context_status_change(rq, + i915_gem_request_completed(rq) ? + INTEL_CONTEXT_SCHEDULE_OUT : + INTEL_CONTEXT_SCHEDULE_PREEMPTED); + + i915_gem_request_put(rq); + + memset(port, 0, sizeof(*port)); + port++; + } +} + +static void execlists_cancel_requests(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_gem_request *rq, *rn; + struct rb_node *rb; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); + + /* Cancel the requests on the HW and clear the ELSP tracker. */ + execlist_cancel_port_requests(execlists); + + /* Mark all executing requests as skipped. */ + list_for_each_entry(rq, &engine->timeline->requests, link) { + GEM_BUG_ON(!rq->global_seqno); + if (!i915_gem_request_completed(rq)) + dma_fence_set_error(&rq->fence, -EIO); + } + + /* Flush the queued requests to the timeline list (for retiring). */ + rb = execlists->first; + while (rb) { + struct i915_priolist *p = rb_entry(rb, typeof(*p), node); + + list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { + INIT_LIST_HEAD(&rq->priotree.link); + + dma_fence_set_error(&rq->fence, -EIO); + __i915_gem_request_submit(rq); + } + + rb = rb_next(rb); + rb_erase(&p->node, &execlists->queue); + INIT_LIST_HEAD(&p->requests); + if (p->priority != I915_PRIORITY_NORMAL) + kmem_cache_free(engine->i915->priorities, p); + } + + /* Remaining _unready_ requests will be nop'ed when submitted */ + + + execlists->queue = RB_ROOT; + execlists->first = NULL; + GEM_BUG_ON(port_isset(execlists->port)); + + /* + * The port is checked prior to scheduling a tasklet, but + * just in case we have suspended the tasklet to do the + * wedging make sure that when it wakes, it decides there + * is no work to do by clearing the irq_posted bit. + */ + clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - return port_count(&port[0]) + port_count(&port[1]) < 2; + spin_unlock_irqrestore(&engine->timeline->lock, flags); } /* @@ -525,8 +808,9 @@ static bool execlists_elsp_ready(const struct intel_engine_cs *engine) */ static void intel_lrc_irq_handler(unsigned long data) { - struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port * const port = execlists->port; struct drm_i915_private *dev_priv = engine->i915; /* We can skip acquiring intel_runtime_pm_get() here as it was taken @@ -538,19 +822,25 @@ static void intel_lrc_irq_handler(unsigned long data) */ GEM_BUG_ON(!dev_priv->gt.awake); - intel_uncore_forcewake_get(dev_priv, engine->fw_domains); + intel_uncore_forcewake_get(dev_priv, execlists->fw_domains); /* Prefer doing test_and_clear_bit() as a two stage operation to avoid * imposing the cost of a locked atomic transaction when submitting a * new request (outside of the context-switch interrupt). */ while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) { - u32 __iomem *csb_mmio = - dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); - u32 __iomem *buf = - dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)); + /* The HWSP contains a (cacheable) mirror of the CSB */ + const u32 *buf = + &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; unsigned int head, tail; + /* However GVT emulation depends upon intercepting CSB mmio */ + if (unlikely(execlists->csb_use_mmio)) { + buf = (u32 * __force) + (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); + execlists->csb_head = -1; /* force mmio read of CSB ptrs */ + } + /* The write will be ordered by the uncached read (itself * a memory barrier), so we do not need another in the form * of a locked instruction. The race between the interrupt @@ -562,9 +852,20 @@ static void intel_lrc_irq_handler(unsigned long data) * is set and we do a new loop. */ __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - head = readl(csb_mmio); - tail = GEN8_CSB_WRITE_PTR(head); - head = GEN8_CSB_READ_PTR(head); + if (unlikely(execlists->csb_head == -1)) { /* following a reset */ + head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); + tail = GEN8_CSB_WRITE_PTR(head); + head = GEN8_CSB_READ_PTR(head); + execlists->csb_head = head; + } else { + const int write_idx = + intel_hws_csb_write_index(dev_priv) - + I915_HWS_CSB_BUF0_INDEX; + + head = execlists->csb_head; + tail = READ_ONCE(buf[write_idx]); + } + while (head != tail) { struct drm_i915_gem_request *rq; unsigned int status; @@ -590,13 +891,35 @@ static void intel_lrc_irq_handler(unsigned long data) * status notifier. */ - status = readl(buf + 2 * head); + status = READ_ONCE(buf[2 * head]); /* maybe mmio! */ if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) continue; + if (status & GEN8_CTX_STATUS_ACTIVE_IDLE && + buf[2*head + 1] == PREEMPT_ID) { + execlist_cancel_port_requests(execlists); + + spin_lock_irq(&engine->timeline->lock); + unwind_incomplete_requests(engine); + spin_unlock_irq(&engine->timeline->lock); + + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT)); + execlists_clear_active(execlists, + EXECLISTS_ACTIVE_PREEMPT); + continue; + } + + if (status & GEN8_CTX_STATUS_PREEMPTED && + execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT)) + continue; + + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); + /* Check the context/desc id for this event matches */ - GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) != - port->context_id); + GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); rq = port_unpack(port, &count); GEM_BUG_ON(count == 0); @@ -608,8 +931,7 @@ static void intel_lrc_irq_handler(unsigned long data) trace_i915_gem_request_out(rq); i915_gem_request_put(rq); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); + execlists_port_complete(execlists, port); } else { port_set(port, port_pack(rq, count)); } @@ -617,80 +939,33 @@ static void intel_lrc_irq_handler(unsigned long data) /* After the final element, the hw should be idle */ GEM_BUG_ON(port_count(port) == 0 && !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); + if (port_count(port) == 0) + execlists_clear_active(execlists, + EXECLISTS_ACTIVE_USER); } - writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), - csb_mmio); + if (head != execlists->csb_head) { + execlists->csb_head = head; + writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), + dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); + } } - if (execlists_elsp_ready(engine)) + if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) execlists_dequeue(engine); - intel_uncore_forcewake_put(dev_priv, engine->fw_domains); + intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); } -static bool -insert_request(struct intel_engine_cs *engine, - struct i915_priotree *pt, - int prio) +static void insert_request(struct intel_engine_cs *engine, + struct i915_priotree *pt, + int prio) { - struct i915_priolist *p; - struct rb_node **parent, *rb; - bool first = true; - - if (unlikely(engine->no_priolist)) - prio = I915_PRIORITY_NORMAL; - -find_priolist: - /* most positive priority is scheduled first, equal priorities fifo */ - rb = NULL; - parent = &engine->execlist_queue.rb_node; - while (*parent) { - rb = *parent; - p = rb_entry(rb, typeof(*p), node); - if (prio > p->priority) { - parent = &rb->rb_left; - } else if (prio < p->priority) { - parent = &rb->rb_right; - first = false; - } else { - list_add_tail(&pt->link, &p->requests); - return false; - } - } + struct i915_priolist *p = lookup_priolist(engine, pt, prio); - if (prio == I915_PRIORITY_NORMAL) { - p = &engine->default_priolist; - } else { - p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); - /* Convert an allocation failure to a priority bump */ - if (unlikely(!p)) { - prio = I915_PRIORITY_NORMAL; /* recurses just once */ - - /* To maintain ordering with all rendering, after an - * allocation failure we have to disable all scheduling. - * Requests will then be executed in fifo, and schedule - * will ensure that dependencies are emitted in fifo. - * There will be still some reordering with existing - * requests, so if userspace lied about their - * dependencies that reordering may be visible. - */ - engine->no_priolist = true; - goto find_priolist; - } - } - - p->priority = prio; - rb_link_node(&p->node, rb, parent); - rb_insert_color(&p->node, &engine->execlist_queue); - - INIT_LIST_HEAD(&p->requests); - list_add_tail(&pt->link, &p->requests); - - if (first) - engine->execlist_first = &p->node; - - return first; + list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests); + if (ptr_unmask_bits(p, 1)) + tasklet_hi_schedule(&engine->execlists.irq_tasklet); } static void execlists_submit_request(struct drm_i915_gem_request *request) @@ -701,24 +976,23 @@ static void execlists_submit_request(struct drm_i915_gem_request *request) /* Will be called from irq-context when using foreign fences. */ spin_lock_irqsave(&engine->timeline->lock, flags); - if (insert_request(engine, - &request->priotree, - request->priotree.priority)) { - if (execlists_elsp_ready(engine)) - tasklet_hi_schedule(&engine->irq_tasklet); - } + insert_request(engine, &request->priotree, request->priotree.priority); - GEM_BUG_ON(!engine->execlist_first); + GEM_BUG_ON(!engine->execlists.first); GEM_BUG_ON(list_empty(&request->priotree.link)); spin_unlock_irqrestore(&engine->timeline->lock, flags); } +static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt) +{ + return container_of(pt, struct drm_i915_gem_request, priotree); +} + static struct intel_engine_cs * pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) { - struct intel_engine_cs *engine = - container_of(pt, struct drm_i915_gem_request, priotree)->engine; + struct intel_engine_cs *engine = pt_to_request(pt)->engine; GEM_BUG_ON(!locked); @@ -737,6 +1011,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) struct i915_dependency stack; LIST_HEAD(dfs); + GEM_BUG_ON(prio == I915_PRIORITY_INVALID); + if (prio <= READ_ONCE(request->priotree.priority)) return; @@ -772,6 +1048,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) * engines. */ list_for_each_entry(p, &pt->signalers_list, signal_link) { + if (i915_gem_request_completed(pt_to_request(p->signaler))) + continue; + GEM_BUG_ON(p->signaler->priority < pt->priority); if (prio > READ_ONCE(p->signaler->priority)) list_move_tail(&p->dfs_link, &dfs); @@ -785,7 +1064,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) * execlists_submit_request()), we can set our own priority and skip * acquiring the engine locks. */ - if (request->priotree.priority == INT_MIN) { + if (request->priotree.priority == I915_PRIORITY_INVALID) { GEM_BUG_ON(!list_empty(&request->priotree.link)); request->priotree.priority = prio; if (stack.dfs_link.next == stack.dfs_link.prev) @@ -815,8 +1094,6 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) } spin_unlock_irq(&engine->timeline->lock); - - /* XXX Do we need to preempt to make room for us and our deps? */ } static struct intel_ring * @@ -914,27 +1191,14 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request) */ request->reserved_space += EXECLISTS_REQUEST_SIZE; - if (i915.enable_guc_submission) { - /* - * Check that the GuC has space for the request before - * going any further, as the i915_add_request() call - * later on mustn't fail ... - */ - ret = i915_guc_wq_reserve(request); - if (ret) - goto err; - } - cs = intel_ring_begin(request, 0); - if (IS_ERR(cs)) { - ret = PTR_ERR(cs); - goto err_unreserve; - } + if (IS_ERR(cs)) + return PTR_ERR(cs); if (!ce->initialised) { ret = engine->init_context(request); if (ret) - goto err_unreserve; + return ret; ce->initialised = true; } @@ -948,12 +1212,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request) request->reserved_space -= EXECLISTS_REQUEST_SIZE; return 0; - -err_unreserve: - if (i915.enable_guc_submission) - i915_guc_wq_unreserve(request); -err: - return ret; } /* @@ -1031,6 +1289,8 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES); + *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + /* Pad to end of cacheline */ while ((unsigned long)batch % CACHELINE_BYTES) *batch++ = MI_NOOP; @@ -1044,26 +1304,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) return batch; } -/* - * This batch is started immediately after indirect_ctx batch. Since we ensure - * that indirect_ctx ends on a cacheline this batch is aligned automatically. - * - * The number of DWORDS written are returned using this field. - * - * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding - * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. - */ -static u32 *gen8_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch) -{ - /* WaDisableCtxRestoreArbitration:bdw,chv */ - *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - *batch++ = MI_BATCH_BUFFER_END; - - return batch; -} - static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) { + *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); @@ -1109,6 +1353,8 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) *batch++ = 0; } + *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + /* Pad to end of cacheline */ while ((unsigned long)batch % CACHELINE_BYTES) *batch++ = MI_NOOP; @@ -1175,13 +1421,15 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return -EINVAL; switch (INTEL_GEN(engine->i915)) { + case 10: + return 0; case 9: wa_bb_fn[0] = gen9_init_indirectctx_bb; wa_bb_fn[1] = gen9_init_perctx_bb; break; case 8: wa_bb_fn[0] = gen8_init_indirectctx_bb; - wa_bb_fn[1] = gen8_init_perctx_bb; + wa_bb_fn[1] = NULL; break; default: MISSING_CASE(INTEL_GEN(engine->i915)); @@ -1232,9 +1480,7 @@ static u8 gtiir[] = { static int gen8_init_common_ring(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - struct execlist_port *port = engine->execlist_port; - unsigned int n; - bool submit; + struct intel_engine_execlists * const execlists = &engine->execlists; int ret; ret = intel_mocs_init_engine(engine); @@ -1267,24 +1513,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + execlists->csb_head = -1; + execlists->active = 0; /* After a GPU reset, we may have requests to replay */ - submit = false; - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { - if (!port_isset(&port[n])) - break; - - DRM_DEBUG_DRIVER("Restarting %s:%d from 0x%x\n", - engine->name, n, - port_request(&port[n])->global_seqno); - - /* Discard the current inflight count */ - port_set(&port[n], port_request(&port[n])); - submit = true; - } - - if (submit && !i915.enable_guc_submission) - execlists_submit_ports(engine); + if (!i915_modparams.enable_guc_submission && execlists->first) + tasklet_schedule(&execlists->irq_tasklet); return 0; } @@ -1325,9 +1559,11 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) static void reset_common_ring(struct intel_engine_cs *engine, struct drm_i915_gem_request *request) { - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_context *ce; - unsigned int n; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); /* * Catch up with any missed context-switch interrupts. @@ -1338,20 +1574,12 @@ static void reset_common_ring(struct intel_engine_cs *engine, * guessing the missed context-switch events by looking at what * requests were completed. */ - if (!request) { - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) - i915_gem_request_put(port_request(&port[n])); - memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); - return; - } + execlist_cancel_port_requests(execlists); - if (request->ctx != port_request(port)->ctx) { - i915_gem_request_put(port_request(port)); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); - } + /* Push back any incomplete requests for replay after the reset. */ + unwind_incomplete_requests(engine); - GEM_BUG_ON(request->ctx != port_request(port)->ctx); + spin_unlock_irqrestore(&engine->timeline->lock, flags); /* If the request was innocent, we leave the request in the ELSP * and will try to replay it on restarting. The context image may @@ -1363,7 +1591,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, * and have to at least restore the RING register in the context * image back to the expected values to skip over the guilty request. */ - if (request->fence.error != -EIO) + if (!request || request->fence.error != -EIO) return; /* We want a simple context + ring to execute the breadcrumb update. @@ -1386,10 +1614,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, intel_ring_update_space(request->ring); /* Reset WaIdleLiteRestore:bdw,skl as well */ - request->tail = - intel_ring_wrap(request->ring, - request->wa_tail - WA_TAIL_DWORDS*sizeof(u32)); - assert_ring_tail_valid(request->ring, request->tail); + unwind_wa_tail(request); } static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) @@ -1448,13 +1673,15 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, if (IS_ERR(cs)) return PTR_ERR(cs); + /* WaDisableCtxRestoreArbitration:bdw,chv */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + /* FIXME(BDW): Address space and security selectors. */ *cs++ = MI_BATCH_BUFFER_START_GEN8 | (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) | (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0); *cs++ = lower_32_bits(offset); *cs++ = upper_32_bits(offset); - *cs++ = MI_NOOP; intel_ring_advance(req, cs); return 0; @@ -1583,7 +1810,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, */ static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs) { - *cs++ = MI_NOOP; + /* Ensure there's always at least one preemption point per-request. */ + *cs++ = MI_ARB_CHECK; *cs++ = MI_NOOP; request->wa_tail = intel_ring_offset(request, cs); } @@ -1604,7 +1832,6 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs) gen8_emit_wa_tail(request, cs); } - static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS; static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request, @@ -1632,7 +1859,6 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request, gen8_emit_wa_tail(request, cs); } - static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS; static int gen8_init_rcs_context(struct drm_i915_gem_request *req) @@ -1666,8 +1892,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) * Tasklet cannot be active at this point due intel_mark_active/idle * so this is just for documentation. */ - if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) - tasklet_kill(&engine->irq_tasklet); + if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state))) + tasklet_kill(&engine->execlists.irq_tasklet); dev_priv = engine->i915; @@ -1678,11 +1904,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (engine->status_page.vma) { - i915_gem_object_unpin_map(engine->status_page.vma->obj); - engine->status_page.vma = NULL; - } - intel_engine_cleanup_common(engine); lrc_destroy_wa_ctx(engine); @@ -1694,8 +1915,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) static void execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; + engine->cancel_requests = execlists_cancel_requests; engine->schedule = execlists_schedule; - engine->irq_tasklet.func = intel_lrc_irq_handler; + engine->execlists.irq_tasklet.func = intel_lrc_irq_handler; } static void @@ -1729,22 +1951,13 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } -static int -lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) -{ - const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE; - void *hws; - - /* The HWSP is part of the default context object in LRC mode. */ - hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(hws)) - return PTR_ERR(hws); - - engine->status_page.page_addr = hws + hws_offset; - engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset; - engine->status_page.vma = vma; - - return 0; +static void i915_error_reset(struct work_struct *work) { + struct intel_engine_cs *engine = + container_of(work, struct intel_engine_cs, + reset_work); + i915_handle_error(engine->i915, 1 << engine->id, + "Received error interrupt from engine %d", + engine->id); } static void @@ -1770,32 +1983,25 @@ logical_ring_setup(struct intel_engine_cs *engine) RING_CONTEXT_STATUS_BUF_BASE(engine), FW_REG_READ); - engine->fw_domains = fw_domains; + engine->execlists.fw_domains = fw_domains; - tasklet_init(&engine->irq_tasklet, + tasklet_init(&engine->execlists.irq_tasklet, intel_lrc_irq_handler, (unsigned long)engine); logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); + + INIT_WORK(&engine->reset_work, i915_error_reset); } -static int -logical_ring_init(struct intel_engine_cs *engine) +static int logical_ring_init(struct intel_engine_cs *engine) { - struct i915_gem_context *dctx = engine->i915->kernel_context; int ret; ret = intel_engine_init_common(engine); if (ret) goto error; - /* And setup the hardware status page. */ - ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); - if (ret) { - DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); - goto error; - } - return 0; error: @@ -2033,6 +2239,14 @@ populate_lr_context(struct i915_gem_context *ctx, execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE, ctx, engine, ring); + /* write the context's pid and hw_id/cid to the per-context HWS page */ + if(intel_vgpu_active(engine->i915) && pid_nr(ctx->pid)) { + *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_PID_ADDR) + = pid_nr(ctx->pid) & 0x3fffff; + *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_CID_ADDR) + = ctx->hw_id & 0x3fffff; + } + i915_gem_object_unpin_map(ctx_obj); return 0; @@ -2052,8 +2266,11 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); - /* One extra page as the sharing data between driver and GuC */ - context_size += PAGE_SIZE * LRC_PPHWSP_PN; + /* + * Before the actual start of the context image, we insert a few pages + * for our own use and for sharing with the GuC. + */ + context_size += LRC_HEADER_PAGES * PAGE_SIZE; ctx_obj = i915_gem_object_create(ctx->i915, context_size); if (IS_ERR(ctx_obj)) { diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 57ef5833c427..689fde1a63a9 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,7 @@ #define _INTEL_LRC_H_ #include "intel_ringbuffer.h" +#include "i915_gem_context.h" #define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT @@ -60,6 +61,7 @@ enum { INTEL_CONTEXT_SCHEDULE_IN = 0, INTEL_CONTEXT_SCHEDULE_OUT, + INTEL_CONTEXT_SCHEDULE_PREEMPTED, }; /* Logical Rings */ @@ -69,17 +71,42 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine); /* Logical Ring Contexts */ -/* One extra page is added before LRC for GuC as shared data */ +/* + * We allocate a header at the start of the context image for our own + * use, therefore the actual location of the logical state is offset + * from the start of the VMA. The layout is + * + * | [guc] | [hwsp] [logical state] | + * |<- our header ->|<- context image ->| + * + */ +/* The first page is used for sharing data with the GuC */ #define LRC_GUCSHR_PN (0) -#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) -#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) +#define LRC_GUCSHR_SZ (1) +/* At the start of the context image is its per-process HWS page */ +#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ) +#define LRC_PPHWSP_SZ (1) +/* Finally we have the logical state for the context */ +#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ) + +/* + * Currently we include the PPHWSP in __intel_engine_context_size() so + * the size of the header is synonymous with the start of the PPHWSP. + */ +#define LRC_HEADER_PAGES LRC_PPHWSP_PN struct drm_i915_private; struct i915_gem_context; void intel_lr_context_resume(struct drm_i915_private *dev_priv); -uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, - struct intel_engine_cs *engine); + +static inline uint64_t +intel_lr_context_descriptor(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + return ctx->engine[engine->id].lrc_desc; +} + /* Execlists */ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8e215777c7f4..27a0021c5f2c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -880,8 +880,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) struct drm_i915_private *dev_priv = to_i915(dev); /* use the module option value if specified */ - if (i915.lvds_channel_mode > 0) - return i915.lvds_channel_mode == 2; + if (i915_modparams.lvds_channel_mode > 0) + return i915_modparams.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 98154efcb2f4..1d946240e55f 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -921,7 +921,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; const struct firmware *fw = NULL; - const char *name = i915.vbt_firmware; + const char *name = i915_modparams.vbt_firmware; int ret; if (!name || !*name) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 3b1c5d783ee7..adc51e452e3e 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -379,13 +379,13 @@ enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv) { /* Assume that the BIOS does not lie through the OpRegion... */ - if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { + if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) { return *dev_priv->opregion.lid_state & 0x1 ? connector_status_connected : connector_status_disconnected; } - switch (i915.panel_ignore_lid) { + switch (i915_modparams.panel_ignore_lid) { case -2: return connector_status_connected; case -1: @@ -465,10 +465,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, WARN_ON(panel->backlight.max == 0); - if (i915.invert_brightness < 0) + if (i915_modparams.invert_brightness < 0) return val; - if (i915.invert_brightness > 0 || + if (i915_modparams.invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { return panel->backlight.max - val + panel->backlight.min; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cb950752c346..d5bf2c5e9ed8 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -33,6 +33,10 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /** * DOC: RC6 * @@ -58,24 +62,23 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { + if (HAS_LLC(dev_priv)) { + /* + * WaCompressedResourceDisplayNewHashMode:skl,kbl + * Display WA#0390: skl,kbl + * + * Must match Sampler, Pixel Back End, and Media. See + * WaCompressedResourceSamplerPbeMediaNewHashMode. + */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | + SKL_DE_COMPRESSED_HASH_MODE); + } + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ I915_WRITE(CHICKEN_PAR1_1, I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); - /* - * Display WA#0390: skl,bxt,kbl,glk - * - * Must match Sampler, Pixel Back End, and Media - * (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31). - * - * Including bits outside the page in the hash would - * require 2 (or 4?) MiB alignment of resources. Just - * assume the defaul hashing mode which only uses bits - * within the page. - */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE); - I915_WRITE(GEN8_CONFIG0, I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); @@ -317,7 +320,7 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) { u32 val; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); if (enable) @@ -332,14 +335,14 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) { u32 val; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); if (enable) @@ -348,7 +351,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) val &= ~DSP_MAXFIFO_PM5_ENABLE; vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } #define FW_WM(value, plane) \ @@ -2785,11 +2788,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, &val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("SKL Mailbox read error = %d\n", ret); @@ -2806,11 +2809,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, &val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("SKL Mailbox read error = %d\n", ret); return; @@ -3594,13 +3597,13 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) return 0; DRM_DEBUG_KMS("Enabling the SAGV\n"); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_ENABLE); /* We don't need to wait for the SAGV when enabling */ - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); /* * Some skl systems, pre-release machines in particular, @@ -3631,14 +3634,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) return 0; DRM_DEBUG_KMS("Disabling the SAGV\n"); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); /* bspec says to keep retrying for at least 1 ms */ ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_DISABLE, GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); /* * Some skl systems, pre-release machines in particular, @@ -4657,18 +4660,22 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, I915_WRITE(reg, 0); } -static void skl_write_wm_level(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const struct skl_wm_level *level) +static inline uint32_t skl_calc_wm_level(const struct skl_wm_level *level) { uint32_t val = 0; - if (level->plane_en) { val |= PLANE_WM_EN; val |= level->plane_res_b; val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; } + return val; +} +static void skl_write_wm_level(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const struct skl_wm_level *level) +{ + uint32_t val = skl_calc_wm_level(level); I915_WRITE(reg, val); } @@ -4682,13 +4689,41 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, struct drm_i915_private *dev_priv = to_i915(dev); int level, max_level = ilk_wm_max_level(dev_priv); enum pipe pipe = intel_crtc->pipe; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; + struct intel_dom0_plane_regs *dom0_regs = NULL; +#endif for (level = 0; level <= max_level; level++) { +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + dom0_regs = &gvt->pipe_info[pipe].dom0_regs[plane_id]; + dom0_regs->plane_wm[level] = skl_calc_wm_level( + &wm->wm[level]); + } else { + skl_write_wm_level(dev_priv, + PLANE_WM(pipe, plane_id, level), + &wm->wm[level]); + } +#else skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), - &wm->wm[level]); + &wm->wm[level]); +#endif } - skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), - &wm->trans_wm); + +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + dom0_regs = &gvt->pipe_info[pipe].dom0_regs[plane_id]; + dom0_regs->plane_wm_trans = skl_calc_wm_level( + &wm->trans_wm); + } else { + skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), + &wm->trans_wm); + } +#else + skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), + &wm->trans_wm); +#endif skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), &ddb->plane[pipe][plane_id]); @@ -4877,6 +4912,14 @@ skl_compute_ddb(struct drm_atomic_state *state) */ memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + /* In GVT environemnt, we only use the statically allocated ddb */ + if (dev_priv->gvt) { + memcpy(ddb, &dev_priv->gvt->ddb, sizeof(*ddb)); + return 0; + } +#endif + for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { struct intel_crtc_state *cstate; @@ -5535,7 +5578,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) wm->level = VLV_WM_LEVEL_PM2; if (IS_CHERRYVIEW(dev_priv)) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); if (val & DSP_MAXFIFO_PM5_ENABLE) @@ -5565,7 +5608,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) wm->level = VLV_WM_LEVEL_DDR_DVFS; } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } for_each_intel_crtc(dev, crtc) { @@ -5669,12 +5712,30 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->wm.wm_mutex); } +/* + * FIXME should probably kill this and improve + * the real watermark readout/sanitation instead + */ +static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) +{ + I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); + I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); + I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); + + /* + * Don't touch WM1S_LP_EN here. + * Doing so could cause underruns. + */ +} + void ilk_wm_get_hw_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; struct drm_crtc *crtc; + ilk_init_lp_watermarks(dev_priv); + for_each_crtc(dev, crtc) ilk_pipe_wm_get_hw_state(crtc); @@ -5739,6 +5800,36 @@ void intel_update_watermarks(struct intel_crtc *crtc) dev_priv->display.update_wm(crtc); } +void intel_enable_ipc(struct drm_i915_private *dev_priv) +{ + u32 val; + + /* Display WA #0477 WaDisableIPC: skl */ + if (IS_SKYLAKE(dev_priv)) { + dev_priv->ipc_enabled = false; + return; + } + + val = I915_READ(DISP_ARB_CTL2); + + if (dev_priv->ipc_enabled) + val |= DISP_IPC_ENABLE; + else + val &= ~DISP_IPC_ENABLE; + + I915_WRITE(DISP_ARB_CTL2, val); +} + +void intel_init_ipc(struct drm_i915_private *dev_priv) +{ + dev_priv->ipc_enabled = false; + if (!HAS_IPC(dev_priv)) + return; + + dev_priv->ipc_enabled = true; + intel_enable_ipc(dev_priv); +} + /* * Lock protecting IPS related data structures */ @@ -6108,7 +6199,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) void gen6_rps_busy(struct drm_i915_private *dev_priv) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (dev_priv->rps.enabled) { u8 freq; @@ -6131,7 +6222,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) dev_priv->rps.max_freq_softlimit))) DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } void gen6_rps_idle(struct drm_i915_private *dev_priv) @@ -6143,7 +6234,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) */ gen6_disable_rps_interrupts(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (dev_priv->rps.enabled) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_set_rps_idle(dev_priv); @@ -6153,7 +6244,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } void gen6_rps_boost(struct drm_i915_gem_request *rq, @@ -6189,7 +6280,7 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) { int err; - lockdep_assert_held(&dev_priv->rps.hw_lock); + lockdep_assert_held(&dev_priv->pcu_lock); GEM_BUG_ON(val > dev_priv->rps.max_freq); GEM_BUG_ON(val < dev_priv->rps.min_freq); @@ -6586,7 +6677,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) int rc6_mode; int ret; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); /* Here begins a magic sequence of register writes to enable * auto-downclocking. @@ -6679,7 +6770,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) int scaling_factor = 180; struct cpufreq_policy *policy; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); policy = cpufreq_cpu_get(0); if (policy) { @@ -7072,7 +7163,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) enum intel_engine_id id; u32 gtfifodbg, val, rc6_mode = 0, pcbr; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | GT_FIFO_FREE_ENTRIES_CHV); @@ -7161,7 +7252,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv) enum intel_engine_id id; u32 gtfifodbg, val, rc6_mode = 0; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); valleyview_check_pctx(dev_priv); @@ -7716,13 +7807,13 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. */ - if (!i915.enable_rc6) { + if (!i915_modparams.enable_rc6) { DRM_INFO("RC6 disabled, disabling runtime PM support\n"); intel_runtime_pm_get(dev_priv); } mutex_lock(&dev_priv->drm.struct_mutex); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); /* Initialize RPS limits (for userspace) */ if (IS_CHERRYVIEW(dev_priv)) @@ -7762,7 +7853,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) /* Finally allow us to boost to max by default */ dev_priv->rps.boost_freq = dev_priv->rps.max_freq; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&dev_priv->drm.struct_mutex); intel_autoenable_gt_powersave(dev_priv); @@ -7773,7 +7864,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); - if (!i915.enable_rc6) + if (!i915_modparams.enable_rc6) intel_runtime_pm_put(dev_priv); } @@ -7809,7 +7900,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) if (!READ_ONCE(dev_priv->rps.enabled)) return; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (INTEL_GEN(dev_priv) >= 9) { gen9_disable_rc6(dev_priv); @@ -7825,7 +7916,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) } dev_priv->rps.enabled = false; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) @@ -7840,7 +7931,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) if (intel_vgpu_active(dev_priv)) return; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (IS_CHERRYVIEW(dev_priv)) { cherryview_enable_rps(dev_priv); @@ -7869,7 +7960,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); dev_priv->rps.enabled = true; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } static void __intel_autoenable_gt_powersave(struct work_struct *work) @@ -7895,7 +7986,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) if (IS_ERR(req)) goto unlock; - if (!i915.enable_execlists && i915_switch_context(req) == 0) + if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0) rcs->init_context(req); /* Mark the device busy, calling intel_enable_gt_powersave() */ @@ -7959,18 +8050,6 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) } } -static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) -{ - I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); - - /* - * Don't touch WM1S_LP_EN here. - * Doing so could cause underruns. - */ -} - static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; @@ -8004,8 +8083,6 @@ static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); - ilk_init_lp_watermarks(dev_priv); - /* * Based on the document from hardware guys the following bits * should be set unconditionally in order to enable FBC. @@ -8118,8 +8195,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_GT_MODE, _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); - ilk_init_lp_watermarks(dev_priv); - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); @@ -8257,6 +8332,27 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, I915_WRITE(GEN7_MISCCPCTL, misccpctl); } +static void cannonlake_init_clock_gating(struct drm_i915_private *dev_priv) +{ + /* This is not an Wa. Enable for better image quality */ + I915_WRITE(_3D_CHICKEN3, + _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); + + /* WaEnableChickenDCPR:cnl */ + I915_WRITE(GEN8_CHICKEN_DCPR_1, + I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); + + /* WaFbcWakeMemOn:cnl */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_MEMORY_WAKE); + + /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) + I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, + I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | + SARBUNIT_CLKGATE_DIS); +} + static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv) { gen9_init_clock_gating(dev_priv); @@ -8293,8 +8389,6 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) { enum pipe pipe; - ilk_init_lp_watermarks(dev_priv); - /* WaSwitchSolVfFArbitrationPriority:bdw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -8349,8 +8443,6 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) { - ilk_init_lp_watermarks(dev_priv); - /* L3 caching of data atomics doesn't work -- disable it. */ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); I915_WRITE(HSW_ROW_CHICKEN3, @@ -8394,10 +8486,6 @@ static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); - /* WaRsPkgCStateDisplayPMReq:hsw */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); - lpt_init_clock_gating(dev_priv); } @@ -8405,8 +8493,6 @@ static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t snpcr; - ilk_init_lp_watermarks(dev_priv); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); /* WaDisableEarlyCull:ivb */ @@ -8737,7 +8823,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_SKYLAKE(dev_priv)) + if (IS_CANNONLAKE(dev_priv)) + dev_priv->display.init_clock_gating = cannonlake_init_clock_gating; + else if (IS_SKYLAKE(dev_priv)) dev_priv->display.init_clock_gating = skylake_init_clock_gating; else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) dev_priv->display.init_clock_gating = kabylake_init_clock_gating; @@ -8907,7 +8995,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val { int status; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); /* GEN6_PCODE_* are outside of the forcewake domain, we can * use te fw I915_READ variants to reduce the amount of work @@ -8954,7 +9042,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, { int status; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); /* GEN6_PCODE_* are outside of the forcewake domain, we can * use te fw I915_READ variants to reduce the amount of work @@ -9031,7 +9119,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, u32 status; int ret; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ &status) @@ -9046,7 +9134,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, ret = 0; goto out; } - ret = _wait_for(COND, timeout_base_ms * 1000, 10); + ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10); if (!ret) goto out; @@ -9165,7 +9253,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) void intel_pm_setup(struct drm_i915_private *dev_priv) { - mutex_init(&dev_priv->rps.hw_lock); + mutex_init(&dev_priv->pcu_lock); INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, __intel_autoenable_gt_powersave); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 1b31ab002dae..0c57a33a0347 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -397,7 +397,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if (!i915.enable_psr) { + if (!i915_modparams.enable_psr) { DRM_DEBUG_KMS("PSR disable by flag\n"); return false; } @@ -943,8 +943,8 @@ void intel_psr_init(struct drm_i915_private *dev_priv) HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; /* Per platform default: all disabled. */ - if (i915.enable_psr == -1) - i915.enable_psr = 0; + if (i915_modparams.enable_psr == -1) + i915_modparams.enable_psr = 0; /* Set link_standby x link_off defaults */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) @@ -958,11 +958,11 @@ void intel_psr_init(struct drm_i915_private *dev_priv) dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; /* Override link_standby x link_off defaults */ - if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) { + if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) { DRM_DEBUG_KMS("PSR: Forcing link standby\n"); dev_priv->psr.link_standby = true; } - if (i915.enable_psr == 3 && dev_priv->psr.link_standby) { + if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) { DRM_DEBUG_KMS("PSR: Forcing main link off\n"); dev_priv->psr.link_standby = false; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cdf084ef5aae..d6944d0f3c0c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -778,6 +778,24 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs) return cs; } +static void cancel_requests(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_request *request; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); + + /* Mark all submitted requests as skipped. */ + list_for_each_entry(request, &engine->timeline->requests, link) { + GEM_BUG_ON(!request->global_seqno); + if (!i915_gem_request_completed(request)) + dma_fence_set_error(&request->fence, -EIO); + } + /* Remaining _unready_ requests will be nop'ed when submitted */ + + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} + static void i9xx_submit_request(struct drm_i915_gem_request *request) { struct drm_i915_private *dev_priv = request->i915; @@ -1174,113 +1192,7 @@ i915_emit_bb_start(struct drm_i915_gem_request *req, return 0; } -static void cleanup_phys_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - if (!dev_priv->status_page_dmah) - return; - - drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); - engine->status_page.page_addr = NULL; -} - -static void cleanup_status_page(struct intel_engine_cs *engine) -{ - struct i915_vma *vma; - struct drm_i915_gem_object *obj; - - vma = fetch_and_zero(&engine->status_page.vma); - if (!vma) - return; - - obj = vma->obj; - - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_gem_object_unpin_map(obj); - __i915_gem_object_release_unless_active(obj); -} - -static int init_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - unsigned int flags; - void *vaddr; - int ret; - - obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate status page\n"); - return PTR_ERR(obj); - } - - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - if (ret) - goto err; - - vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err; - } - - flags = PIN_GLOBAL; - if (!HAS_LLC(engine->i915)) - /* On g33, we cannot place HWS above 256MiB, so - * restrict its pinning to the low mappable arena. - * Though this restriction is not documented for - * gen4, gen5, or byt, they also behave similarly - * and hang if the HWS is placed at the top of the - * GTT. To generalise, it appears that all !llc - * platforms have issues with us placing the HWS - * above the mappable region (even though we never - * actualy map it). - */ - flags |= PIN_MAPPABLE; - ret = i915_vma_pin(vma, 0, 4096, flags); - if (ret) - goto err; - - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_unpin; - } - - engine->status_page.vma = vma; - engine->status_page.ggtt_offset = i915_ggtt_offset(vma); - engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); - - DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", - engine->name, i915_ggtt_offset(vma)); - return 0; - -err_unpin: - i915_vma_unpin(vma); -err: - i915_gem_object_put(obj); - return ret; -} - -static int init_phys_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - GEM_BUG_ON(engine->id != RCS); - dev_priv->status_page_dmah = - drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) - return -ENOMEM; - - engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(engine->status_page.page_addr, 0, PAGE_SIZE); - - return 0; -} int intel_ring_pin(struct intel_ring *ring, struct drm_i915_private *i915, @@ -1567,17 +1479,10 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) if (err) goto err; - if (HWS_NEEDS_PHYSICAL(engine->i915)) - err = init_phys_status_page(engine); - else - err = init_status_page(engine); - if (err) - goto err; - ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); if (IS_ERR(ring)) { err = PTR_ERR(ring); - goto err_hws; + goto err; } /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ @@ -1592,11 +1497,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) err_ring: intel_ring_free(ring); -err_hws: - if (HWS_NEEDS_PHYSICAL(engine->i915)) - cleanup_phys_status_page(engine); - else - cleanup_status_page(engine); err: intel_engine_cleanup_common(engine); return err; @@ -1615,11 +1515,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (HWS_NEEDS_PHYSICAL(dev_priv)) - cleanup_phys_status_page(engine); - else - cleanup_status_page(engine); - intel_engine_cleanup_common(engine); dev_priv->engine[engine->id] = NULL; @@ -1983,7 +1878,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *obj; int ret, i; - if (!i915.semaphores) + if (!i915_modparams.semaphores) return; if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { @@ -2083,7 +1978,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, i915_gem_object_put(obj); err: DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); - i915.semaphores = 0; + i915_modparams.semaphores = 0; } static void intel_ring_init_irq(struct drm_i915_private *dev_priv, @@ -2115,11 +2010,13 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv, static void i9xx_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = i9xx_submit_request; + engine->cancel_requests = cancel_requests; } static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = gen6_bsd_submit_request; + engine->cancel_requests = cancel_requests; } static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, @@ -2138,7 +2035,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->emit_breadcrumb = i9xx_emit_breadcrumb; engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; - if (i915.semaphores) { + if (i915_modparams.semaphores) { int num_rings; engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; @@ -2182,7 +2079,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) engine->emit_breadcrumb = gen8_render_emit_breadcrumb; engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz; engine->emit_flush = gen8_render_ring_flush; - if (i915.semaphores) { + if (i915_modparams.semaphores) { int num_rings; engine->semaphore.signal = gen8_rcs_signal; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6b2067f10824..fe28309e3bf2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -185,6 +185,104 @@ struct i915_priolist { int priority; }; +/** + * struct intel_engine_execlists - execlist submission queue and port state + * + * The struct intel_engine_execlists represents the combined logical state of + * driver and the hardware state for execlist mode of submission. + */ +struct intel_engine_execlists { + /** + * @irq_tasklet: softirq tasklet for bottom handler + */ + struct tasklet_struct irq_tasklet; + + /** + * @default_priolist: priority list for I915_PRIORITY_NORMAL + */ + struct i915_priolist default_priolist; + + /** + * @no_priolist: priority lists disabled + */ + bool no_priolist; + + /** + * @port: execlist port states + * + * For each hardware ELSP (ExecList Submission Port) we keep + * track of the last request and the number of times we submitted + * that port to hw. We then count the number of times the hw reports + * a context completion or preemption. As only one context can + * be active on hw, we limit resubmission of context to port[0]. This + * is called Lite Restore, of the context. + */ + struct execlist_port { + /** + * @request_count: combined request and submission count + */ + struct drm_i915_gem_request *request_count; +#define EXECLIST_COUNT_BITS 2 +#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) +#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) +#define port_set(p, packed) ((p)->request_count = (packed)) +#define port_isset(p) ((p)->request_count) +#define port_index(p, execlists) ((p) - (execlists)->port) + + /** + * @context_id: context ID for port + */ + GEM_DEBUG_DECL(u32 context_id); + +#define EXECLIST_MAX_PORTS 2 + } port[EXECLIST_MAX_PORTS]; + + /** + * @active: is the HW active? We consider the HW as active after + * submitting any context for execution and until we have seen the + * last context completion event. After that, we do not expect any + * more events until we submit, and so can park the HW. + * + * As we have a small number of different sources from which we feed + * the HW, we track the state of each inside a single bitfield. + */ + unsigned int active; +#define EXECLISTS_ACTIVE_USER 0 +#define EXECLISTS_ACTIVE_PREEMPT 1 + + /** + * @port_mask: number of execlist ports - 1 + */ + unsigned int port_mask; + + /** + * @queue: queue of requests, in priority lists + */ + struct rb_root queue; + + /** + * @first: leftmost level in priority @queue + */ + struct rb_node *first; + + /** + * @fw_domains: forcewake domains for irq tasklet + */ + unsigned int fw_domains; + + /** + * @csb_head: context status buffer head + */ + unsigned int csb_head; + + /** + * @csb_use_mmio: access csb through mmio, instead of hwsp + */ + bool csb_use_mmio; +}; + #define INTEL_ENGINE_CS_MAX_NAME 8 struct intel_engine_cs { @@ -307,6 +405,14 @@ struct intel_engine_cs { void (*schedule)(struct drm_i915_gem_request *request, int priority); + /* + * Cancel all requests on the hardware, or queued for execution. + * This should only cancel the ready requests that have been + * submitted to the engine (via the engine->submit_request callback). + * This is called when marking the device as wedged. + */ + void (*cancel_requests)(struct intel_engine_cs *engine); + /* Some chipsets are not quite as coherent as advertised and need * an expensive kick to force a true read of the up-to-date seqno. * However, the up-to-date seqno is not always required and the last @@ -373,25 +479,8 @@ struct intel_engine_cs { u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); } semaphore; - /* Execlists */ - struct tasklet_struct irq_tasklet; - struct i915_priolist default_priolist; - bool no_priolist; - struct execlist_port { - struct drm_i915_gem_request *request_count; -#define EXECLIST_COUNT_BITS 2 -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) -#define port_set(p, packed) ((p)->request_count = (packed)) -#define port_isset(p) ((p)->request_count) -#define port_index(p, e) ((p) - (e)->execlist_port) - GEM_DEBUG_DECL(u32 context_id); - } execlist_port[2]; - struct rb_root execlist_queue; - struct rb_node *execlist_first; - unsigned int fw_domains; + struct work_struct reset_work; + struct intel_engine_execlists execlists; /* Contexts are pinned whilst they are active on the GPU. The last * context executed remains active whilst the GPU is idle - the @@ -444,6 +533,46 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); }; +static inline void +execlists_set_active(struct intel_engine_execlists *execlists, + unsigned int bit) +{ + __set_bit(bit, (unsigned long *)&execlists->active); +} + +static inline void +execlists_clear_active(struct intel_engine_execlists *execlists, + unsigned int bit) +{ + __clear_bit(bit, (unsigned long *)&execlists->active); +} + +static inline bool +execlists_is_active(const struct intel_engine_execlists *execlists, + unsigned int bit) +{ + return test_bit(bit, (unsigned long *)&execlists->active); +} + +static inline unsigned int +execlists_num_ports(const struct intel_engine_execlists * const execlists) +{ + return execlists->port_mask + 1; +} + +static inline void +execlists_port_complete(struct intel_engine_execlists * const execlists, + struct execlist_port * const port) +{ + const unsigned int m = execlists->port_mask; + + GEM_BUG_ON(port_index(port, execlists) != 0); + GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); + + memmove(port, port + 1, m * sizeof(struct execlist_port)); + memset(port + m, 0, sizeof(struct execlist_port)); +} + static inline unsigned int intel_engine_flag(const struct intel_engine_cs *engine) { @@ -494,9 +623,17 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) */ #define I915_GEM_HWS_INDEX 0x30 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) -#define I915_GEM_HWS_SCRATCH_INDEX 0x40 +#define I915_GEM_HWS_PID_INDEX 0x40 +#define I915_GEM_HWS_PID_ADDR (I915_GEM_HWS_PID_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_CID_INDEX 0x48 +#define I915_GEM_HWS_CID_ADDR (I915_GEM_HWS_CID_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_SCRATCH_INDEX 0x50 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_HWS_CSB_BUF0_INDEX 0x10 +#define I915_HWS_CSB_WRITE_INDEX 0x1f +#define CNL_HWS_CSB_WRITE_INDEX 0x2f + struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, int size); int intel_ring_pin(struct intel_ring *ring, @@ -736,16 +873,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv); void intel_engines_mark_idle(struct drm_i915_private *i915); void intel_engines_reset_default_submission(struct drm_i915_private *i915); -static inline bool -__intel_engine_can_store_dword(unsigned int gen, unsigned int class) -{ - if (gen <= 2) - return false; /* uses physical not virtual addresses */ - - if (gen == 6 && class == VIDEO_DECODE_CLASS) - return false; /* b0rked */ - - return true; -} +bool intel_engine_can_store_dword(struct intel_engine_cs *engine); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 49577eba8e7e..deeb10952095 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC5\n"); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } @@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv) { DRM_DEBUG_KMS("Disabling DC6\n"); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } @@ -785,7 +795,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : PUNIT_PWRGT_PWR_GATE(power_well_id); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); #define COND \ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) @@ -806,7 +816,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, #undef COND out: - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } static void vlv_power_well_enable(struct drm_i915_private *dev_priv, @@ -833,7 +843,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, mask = PUNIT_PWRGT_MASK(power_well_id); ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; /* @@ -852,7 +862,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; WARN_ON(ctrl != state); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return enabled; } @@ -1364,7 +1374,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, bool enabled; u32 state, ctrl; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); /* @@ -1381,7 +1391,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); WARN_ON(ctrl << 16 != state); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return enabled; } @@ -1396,7 +1406,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); #define COND \ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) @@ -1417,7 +1427,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, #undef COND out: - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, @@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ @@ -1833,6 +1844,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ BIT_ULL(POWER_DOMAIN_INIT)) static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { @@ -2413,7 +2425,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, mask = 0; } - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) max_dc = 0; if (enable_dc >= 0 && enable_dc <= max_dc) { @@ -2471,10 +2483,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, - i915.disable_power_well); - dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, - i915.enable_dc); + i915_modparams.disable_power_well = + sanitize_disable_power_well_option(dev_priv, + i915_modparams.disable_power_well); + dev_priv->csr.allowed_dc_mask = + get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); @@ -2535,7 +2548,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv) intel_display_set_init_power(dev_priv, true); /* Remove the refcount we took to keep power well support disabled. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); /* @@ -2975,7 +2988,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) /* For now, we need the power well to be always enabled. */ intel_display_set_init_power(dev_priv, true); /* Disable power support if the user asked so. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_sync_hw(dev_priv); power_domains->initializing = false; @@ -2994,7 +3007,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) * Even if power well support was disabled we still want to disable * power wells while we are system suspended. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); if (IS_CANNONLAKE(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 7d971cb56116..75c872bb8cc9 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -81,7 +81,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr) { u32 val = 0; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); mutex_lock(&dev_priv->sb_lock); vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, @@ -95,7 +95,7 @@ int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val) { int err; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); mutex_lock(&dev_priv->sb_lock); err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, @@ -125,7 +125,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) { u32 val = 0; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); mutex_lock(&dev_priv->sb_lock); vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 524933b01483..5276027e7d58 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -41,6 +41,10 @@ #include #include "i915_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + static bool format_is_yuv(uint32_t format) { @@ -249,6 +253,11 @@ skl_update_plane(struct intel_plane *plane, uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; + struct intel_dom0_plane_regs *dom0_regs = + &gvt->pipe_info[pipe].dom0_regs[plane_id]; +#endif /* Sizes are 0 based */ src_w--; @@ -258,6 +267,32 @@ skl_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + dom0_regs->plane_keyval = key->min_value; + dom0_regs->plane_keymax = key->max_value; + dom0_regs->plane_keymsk = key->channel_mask; + dom0_regs->plane_offset = (y << 16) | x; + dom0_regs->plane_stride = stride; + dom0_regs->plane_size = (src_h << 16) | src_w; + dom0_regs->plane_aux_dist = + (plane_state->aux.offset - surf_addr) | aux_stride; + dom0_regs->plane_aux_offset = + (plane_state->aux.y << 16) | plane_state->aux.x; + dom0_regs->plane_pos = (crtc_y << 16) | crtc_x; + dom0_regs->plane_ctl = plane_ctl; + dom0_regs->plane_surf = + intel_plane_ggtt_offset(plane_state) + + surf_addr; + + if (plane_state->scaler_id >= 0) + DRM_ERROR("GVT not support plane scaling yet\n"); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + return; + } +#endif + if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), PLANE_COLOR_PIPE_GAMMA_ENABLE | @@ -313,6 +348,17 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; + + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + struct intel_dom0_plane_regs *dom0_regs = + &gvt->pipe_info[pipe].dom0_regs[plane_id]; + dom0_regs->plane_ctl = 0; + dom0_regs->plane_surf = 0; + return; + } +#endif spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); @@ -324,6 +370,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +bool +skl_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static void chv_update_csc(struct intel_plane *plane, uint32_t format) { @@ -501,6 +567,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool +vlv_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -641,6 +727,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool +ivb_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -772,6 +877,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool +g4x_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static int intel_check_sprite_plane(struct intel_plane *plane, struct intel_crtc_state *crtc_state, @@ -1227,6 +1351,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = skl_update_plane; intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; plane_formats = skl_plane_formats; num_plane_formats = ARRAY_SIZE(skl_plane_formats); @@ -1237,6 +1362,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = skl_update_plane; intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; plane_formats = skl_plane_formats; num_plane_formats = ARRAY_SIZE(skl_plane_formats); @@ -1247,6 +1373,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = vlv_update_plane; intel_plane->disable_plane = vlv_disable_plane; + intel_plane->get_hw_state = vlv_plane_get_hw_state; plane_formats = vlv_plane_formats; num_plane_formats = ARRAY_SIZE(vlv_plane_formats); @@ -1262,6 +1389,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; + intel_plane->get_hw_state = ivb_plane_get_hw_state; plane_formats = snb_plane_formats; num_plane_formats = ARRAY_SIZE(snb_plane_formats); @@ -1272,6 +1400,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = g4x_update_plane; intel_plane->disable_plane = g4x_disable_plane; + intel_plane->get_hw_state = g4x_plane_get_hw_state; modifiers = i9xx_plane_format_modifiers; if (IS_GEN6(dev_priv)) { diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 0178ba42a0e5..901854007664 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -63,35 +63,35 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) void intel_uc_sanitize_options(struct drm_i915_private *dev_priv) { if (!HAS_GUC(dev_priv)) { - if (i915.enable_guc_loading > 0 || - i915.enable_guc_submission > 0) + if (i915_modparams.enable_guc_loading > 0 || + i915_modparams.enable_guc_submission > 0) DRM_INFO("Ignoring GuC options, no hardware\n"); - i915.enable_guc_loading = 0; - i915.enable_guc_submission = 0; + i915_modparams.enable_guc_loading = 0; + i915_modparams.enable_guc_submission = 0; return; } /* A negative value means "use platform default" */ - if (i915.enable_guc_loading < 0) - i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv); + if (i915_modparams.enable_guc_loading < 0) + i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv); /* Verify firmware version */ - if (i915.enable_guc_loading) { + if (i915_modparams.enable_guc_loading) { if (HAS_HUC_UCODE(dev_priv)) intel_huc_select_fw(&dev_priv->huc); if (intel_guc_select_fw(&dev_priv->guc)) - i915.enable_guc_loading = 0; + i915_modparams.enable_guc_loading = 0; } /* Can't enable guc submission without guc loaded */ - if (!i915.enable_guc_loading) - i915.enable_guc_submission = 0; + if (!i915_modparams.enable_guc_loading) + i915_modparams.enable_guc_submission = 0; /* A negative value means "use platform default" */ - if (i915.enable_guc_submission < 0) - i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv); + if (i915_modparams.enable_guc_submission < 0) + i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv); } static void gen8_guc_raise_irq(struct intel_guc *guc) @@ -290,7 +290,7 @@ static void guc_init_send_regs(struct intel_guc *guc) static void guc_capture_load_err_log(struct intel_guc *guc) { - if (!guc->log.vma || i915.guc_log_level < 0) + if (!guc->log.vma || i915_modparams.guc_log_level < 0) return; if (!guc->load_err_log) @@ -333,7 +333,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) struct intel_guc *guc = &dev_priv->guc; int ret, attempts; - if (!i915.enable_guc_loading) + if (!i915_modparams.enable_guc_loading) return 0; guc_disable_communication(guc); @@ -342,7 +342,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) /* We need to notify the guc whenever we change the GGTT */ i915_ggtt_enable_guc(dev_priv); - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later @@ -391,8 +391,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) goto err_log_capture; intel_guc_auth_huc(dev_priv); - if (i915.enable_guc_submission) { - if (i915.guc_log_level >= 0) + if (i915_modparams.enable_guc_submission) { + if (i915_modparams.guc_log_level >= 0) gen9_enable_guc_interrupts(dev_priv); ret = i915_guc_submission_enable(dev_priv); @@ -417,23 +417,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) err_log_capture: guc_capture_load_err_log(guc); err_submission: - if (i915.enable_guc_submission) + if (i915_modparams.enable_guc_submission) i915_guc_submission_fini(dev_priv); err_guc: i915_ggtt_disable_guc(dev_priv); DRM_ERROR("GuC init failed\n"); - if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1) + if (i915_modparams.enable_guc_loading > 1 || + i915_modparams.enable_guc_submission > 1) ret = -EIO; else ret = 0; - if (i915.enable_guc_submission) { - i915.enable_guc_submission = 0; + if (i915_modparams.enable_guc_submission) { + i915_modparams.enable_guc_submission = 0; DRM_NOTE("Falling back from GuC submission to execlist mode\n"); } - i915.enable_guc_loading = 0; + i915_modparams.enable_guc_loading = 0; DRM_NOTE("GuC firmware loading disabled\n"); return ret; @@ -443,15 +444,15 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv) { guc_free_load_err_log(&dev_priv->guc); - if (!i915.enable_guc_loading) + if (!i915_modparams.enable_guc_loading) return; - if (i915.enable_guc_submission) + if (i915_modparams.enable_guc_submission) i915_guc_submission_disable(dev_priv); guc_disable_communication(&dev_priv->guc); - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { gen9_disable_guc_interrupts(dev_priv); i915_guc_submission_fini(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 22ae52b17b0f..7703c9ad6511 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -52,17 +52,6 @@ struct drm_i915_gem_request; * GuC). The subsequent pages of the client object constitute the work * queue (a circular array of work items), again described in the process * descriptor. Work queue pages are mapped momentarily as required. - * - * We also keep a few statistics on failures. Ideally, these should all - * be zero! - * no_wq_space: times that the submission pre-check found no space was - * available in the work queue (note, the queue is shared, - * not per-engine). It is OK for this to be nonzero, but - * it should not be huge! - * b_fail: failed to ring the doorbell. This should never happen, unless - * somehow the hardware misbehaves, or maybe if the GuC firmware - * crashes? We probably need to reset the GPU to recover. - * retcode: errno from last guc_submit() */ struct i915_guc_client { struct i915_vma *vma; @@ -77,15 +66,8 @@ struct i915_guc_client { u16 doorbell_id; unsigned long doorbell_offset; - u32 doorbell_cookie; spinlock_t wq_lock; - uint32_t wq_offset; - uint32_t wq_size; - uint32_t wq_tail; - uint32_t wq_rsvd; - uint32_t no_wq_space; - /* Per-engine counts of GuC submissions */ uint64_t submissions[I915_NUM_ENGINES]; }; @@ -250,8 +232,6 @@ u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv); /* i915_guc_submission.c */ int i915_guc_submission_init(struct drm_i915_private *dev_priv); int i915_guc_submission_enable(struct drm_i915_private *dev_priv); -int i915_guc_wq_reserve(struct drm_i915_gem_request *rq); -void i915_guc_wq_unreserve(struct drm_i915_gem_request *request); void i915_guc_submission_disable(struct drm_i915_private *dev_priv); void i915_guc_submission_fini(struct drm_i915_private *dev_priv); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 1d7b879cc68c..4653b0e1aa79 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -434,9 +434,16 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv) i915_check_and_clear_faults(dev_priv); } +void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv) +{ + iosf_mbi_register_pmic_bus_access_notifier( + &dev_priv->uncore.pmic_bus_access_nb); +} + void intel_uncore_sanitize(struct drm_i915_private *dev_priv) { - i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); + i915_modparams.enable_rc6 = + sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6); /* BIOS often leaves RC6 enabled, but disable it for hw init */ intel_sanitize_gt_powersave(dev_priv); @@ -489,6 +496,57 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +/** + * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace + * @dev_priv: i915 device instance + * + * This function is a wrapper around intel_uncore_forcewake_get() to acquire + * the GT powerwell and in the process disable our debugging for the + * duration of userspace's bypass. + */ +void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->uncore.lock); + if (!dev_priv->uncore.user_forcewake.count++) { + intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); + + /* Save and disable mmio debugging for the user bypass */ + dev_priv->uncore.user_forcewake.saved_mmio_check = + dev_priv->uncore.unclaimed_mmio_check; + dev_priv->uncore.user_forcewake.saved_mmio_debug = + i915_modparams.mmio_debug; + + dev_priv->uncore.unclaimed_mmio_check = 0; + i915_modparams.mmio_debug = 0; + } + spin_unlock_irq(&dev_priv->uncore.lock); +} + +/** + * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace + * @dev_priv: i915 device instance + * + * This function complements intel_uncore_forcewake_user_get() and releases + * the GT powerwell taken on behalf of the userspace bypass. + */ +void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->uncore.lock); + if (!--dev_priv->uncore.user_forcewake.count) { + if (intel_uncore_unclaimed_mmio(dev_priv)) + dev_info(dev_priv->drm.dev, + "Invalid mmio detected during user access\n"); + + dev_priv->uncore.unclaimed_mmio_check = + dev_priv->uncore.user_forcewake.saved_mmio_check; + i915_modparams.mmio_debug = + dev_priv->uncore.user_forcewake.saved_mmio_debug; + + intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); + } + spin_unlock_irq(&dev_priv->uncore.lock); +} + /** * intel_uncore_forcewake_get__locked - grab forcewake domain references * @dev_priv: i915 device instance @@ -790,7 +848,8 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv, "Unclaimed %s register 0x%x\n", read ? "read from" : "write to", i915_mmio_reg_offset(reg))) - i915.mmio_debug--; /* Only report the first N failures */ + /* Only report the first N failures */ + i915_modparams.mmio_debug--; } static inline void @@ -799,7 +858,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv, const bool read, const bool before) { - if (likely(!i915.mmio_debug)) + if (likely(!i915_modparams.mmio_debug)) return; __unclaimed_reg_debug(dev_priv, reg, read, before); @@ -1171,8 +1230,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, * bus, which will be busy after this notification, leading to: * "render: timed out waiting for forcewake ack request." * errors. + * + * The notifier is unregistered during intel_runtime_suspend(), + * so it's ok to access the HW here without holding a RPM + * wake reference -> disable wakeref asserts for the time of + * the access. */ + disable_rpm_wakeref_asserts(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + enable_rpm_wakeref_asserts(dev_priv); break; case MBI_PMIC_BUS_ACCESS_END: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -1241,72 +1307,65 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv) intel_uncore_forcewake_reset(dev_priv, false); } -#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) - -static const struct register_whitelist { - i915_reg_t offset_ldw, offset_udw; - uint32_t size; - /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ - uint32_t gen_bitmask; -} whitelist[] = { - { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), - .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), - .size = 8, .gen_bitmask = GEN_RANGE(4, 9) }, -}; +static const struct reg_whitelist { + i915_reg_t offset_ldw; + i915_reg_t offset_udw; + u16 gen_mask; + u8 size; +} reg_read_whitelist[] = { { + .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), + .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), + .gen_mask = INTEL_GEN_MASK(4, 10), + .size = 8 +} }; int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reg_read *reg = data; - struct register_whitelist const *entry = whitelist; - unsigned size; - i915_reg_t offset_ldw, offset_udw; - int i, ret = 0; - - for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && - (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask)) + struct reg_whitelist const *entry; + unsigned int flags; + int remain; + int ret = 0; + + entry = reg_read_whitelist; + remain = ARRAY_SIZE(reg_read_whitelist); + while (remain) { + u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); + + GEM_BUG_ON(!is_power_of_2(entry->size)); + GEM_BUG_ON(entry->size > 8); + GEM_BUG_ON(entry_offset & (entry->size - 1)); + + if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && + entry_offset == (reg->offset & -entry->size)) break; + entry++; + remain--; } - if (i == ARRAY_SIZE(whitelist)) + if (!remain) return -EINVAL; - /* We use the low bits to encode extra flags as the register should - * be naturally aligned (and those that are not so aligned merely - * limit the available flags for that register). - */ - offset_ldw = entry->offset_ldw; - offset_udw = entry->offset_udw; - size = entry->size; - size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw); + flags = reg->offset & (entry->size - 1); intel_runtime_pm_get(dev_priv); - - switch (size) { - case 8 | 1: - reg->val = I915_READ64_2x32(offset_ldw, offset_udw); - break; - case 8: - reg->val = I915_READ64(offset_ldw); - break; - case 4: - reg->val = I915_READ(offset_ldw); - break; - case 2: - reg->val = I915_READ16(offset_ldw); - break; - case 1: - reg->val = I915_READ8(offset_ldw); - break; - default: + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) + reg->val = I915_READ64_2x32(entry->offset_ldw, + entry->offset_udw); + else if (entry->size == 8 && flags == 0) + reg->val = I915_READ64(entry->offset_ldw); + else if (entry->size == 4 && flags == 0) + reg->val = I915_READ(entry->offset_ldw); + else if (entry->size == 2 && flags == 0) + reg->val = I915_READ16(entry->offset_ldw); + else if (entry->size == 1 && flags == 0) + reg->val = I915_READ8(entry->offset_ldw); + else ret = -EINVAL; - goto out; - } - -out: intel_runtime_pm_put(dev_priv); + return ret; } @@ -1567,12 +1626,14 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, } /** - * intel_wait_for_register - wait until register matches expected state + * __intel_wait_for_register - wait until register matches expected state * @dev_priv: the i915 device * @reg: the register to read * @mask: mask to apply to register value * @value: expected value - * @timeout_ms: timeout in millisecond + * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait + * @slow_timeout_ms: slow timeout in millisecond + * @out_value: optional placeholder to hold registry value * * This routine waits until the target register @reg contains the expected * @value after applying the @mask, i.e. it waits until :: @@ -1583,14 +1644,17 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, * * Returns 0 if the register matches the desired condition, or -ETIMEOUT. */ -int intel_wait_for_register(struct drm_i915_private *dev_priv, +int __intel_wait_for_register(struct drm_i915_private *dev_priv, i915_reg_t reg, u32 mask, u32 value, - unsigned int timeout_ms) + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, + u32 *out_value) { unsigned fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); + u32 reg_value; int ret; might_sleep(); @@ -1600,14 +1664,18 @@ int intel_wait_for_register(struct drm_i915_private *dev_priv, ret = __intel_wait_for_register_fw(dev_priv, reg, mask, value, - 2, 0, NULL); + fast_timeout_us, 0, ®_value); intel_uncore_forcewake_put__locked(dev_priv, fw); spin_unlock_irq(&dev_priv->uncore.lock); if (ret) - ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, - timeout_ms); + ret = __wait_for(reg_value = I915_READ_NOTRACE(reg), + (reg_value & mask) == value, + slow_timeout_ms * 1000, 10, 1000); + + if (out_value) + *out_value = reg_value; return ret; } @@ -1662,7 +1730,7 @@ typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { - if (!i915.reset) + if (!i915_modparams.reset) return NULL; if (INTEL_INFO(dev_priv)->gen >= 8) @@ -1722,7 +1790,7 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv) { return (dev_priv->info.has_reset_engine && !dev_priv->guc.execbuf_client && - i915.reset >= 2); + i915_modparams.reset >= 2); } int intel_guc_reset(struct drm_i915_private *dev_priv) @@ -1747,15 +1815,15 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) { - if (unlikely(i915.mmio_debug || + if (unlikely(i915_modparams.mmio_debug || dev_priv->uncore.unclaimed_mmio_check <= 0)) return false; if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { DRM_DEBUG("Unclaimed register detected, " "enabling oneshot unclaimed register reporting. " - "Please use i915.mmio_debug=N for more information.\n"); - i915.mmio_debug++; + "Please use i915_modparams.mmio_debug=N for more information.\n"); + i915_modparams.mmio_debug++; dev_priv->uncore.unclaimed_mmio_check--; return true; } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 5f90278da461..8ed41edc8d5b 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -102,6 +102,13 @@ struct intel_uncore { i915_reg_t reg_ack; } fw_domain[FW_DOMAIN_ID_COUNT]; + struct { + unsigned int count; + + int saved_mmio_check; + int saved_mmio_debug; + } user_forcewake; + int unclaimed_mmio_check; }; @@ -121,6 +128,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv void intel_uncore_fini(struct drm_i915_private *dev_priv); void intel_uncore_suspend(struct drm_i915_private *dev_priv); void intel_uncore_resume_early(struct drm_i915_private *dev_priv); +void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv); u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); @@ -144,11 +152,28 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, enum forcewake_domains domains); +void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv); +void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv); + + +int __intel_wait_for_register(struct drm_i915_private *dev_priv, + i915_reg_t reg, + u32 mask, + u32 value, + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, + u32 *out_value); + +static inline int intel_wait_for_register(struct drm_i915_private *dev_priv, i915_reg_t reg, u32 mask, u32 value, - unsigned int timeout_ms); + unsigned int timeout_ms) +{ + return __intel_wait_for_register(dev_priv, reg, mask, value, 2, + timeout_ms, NULL); +} int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, i915_reg_t reg, u32 mask, diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c index fe15aa64086f..71fe60e5f01f 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c @@ -698,7 +698,7 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw, val &= div_mask(width); return divider_recalc_rate(hw, parent_rate, val, NULL, - postdiv->flags); + postdiv->flags, width); } static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index ea5bb0e1632c..6e0fb50d0de4 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -93,14 +93,17 @@ static struct page **get_pages(struct drm_gem_object *obj) return p; } + msm_obj->pages = p; + msm_obj->sgt = drm_prime_pages_to_sg(p, npages); if (IS_ERR(msm_obj->sgt)) { + void *ptr = ERR_CAST(msm_obj->sgt); + dev_err(dev->dev, "failed to allocate sgt\n"); - return ERR_CAST(msm_obj->sgt); + msm_obj->sgt = NULL; + return ptr; } - msm_obj->pages = p; - /* For non-cached buffers, ensure the new pages are clean * because display controller, GPU, etc. are not coherent: */ @@ -135,7 +138,10 @@ static void put_pages(struct drm_gem_object *obj) if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - sg_free_table(msm_obj->sgt); + + if (msm_obj->sgt) + sg_free_table(msm_obj->sgt); + kfree(msm_obj->sgt); if (use_pages(obj)) diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 380f340204e8..f56f60f695e1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev) struct nvif_device *device = &drm->client.device; struct drm_connector *connector; + INIT_LIST_HEAD(&drm->bl_connectors); + if (apple_gmux_present()) { NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); return 0; } - INIT_LIST_HEAD(&drm->bl_connectors); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && connector->connector_type != DRM_MODE_CONNECTOR_eDP) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 70d8e0d69ad5..c902a851eb51 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) nv_connector->edid = NULL; } - ret = pm_runtime_get_sync(connector->dev->dev); - if (ret < 0 && ret != -EACCES) - return conn_status; + /* Outputs are only polled while runtime active, so acquiring a + * runtime PM ref here is unnecessary (and would deadlock upon + * runtime suspend because it waits for polling to finish). + */ + if (!drm_kms_helper_is_poll_worker()) { + ret = pm_runtime_get_sync(connector->dev->dev); + if (ret < 0 && ret != -EACCES) + return conn_status; + } nv_encoder = nouveau_connector_ddc_detect(connector); if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { @@ -647,8 +653,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return conn_status; } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index fb47d46050ec..6e196bc01118 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -4426,6 +4426,7 @@ nv50_display_create(struct drm_device *dev) nouveau_display(dev)->fini = nv50_display_fini; disp->disp = &nouveau_display(dev)->disp; dev->mode_config.funcs = &nv50_disp_func; + dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; if (nouveau_atomic) dev->driver->driver_features |= DRIVER_ATOMIC; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index a2978a37b4f3..700fc754f28a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c @@ -174,6 +174,7 @@ gf119_sor = { .links = gf119_sor_dp_links, .power = g94_sor_dp_power, .pattern = gf119_sor_dp_pattern, + .drive = gf119_sor_dp_drive, .vcpi = gf119_sor_dp_vcpi, .audio = gf119_sor_dp_audio, .audio_sym = gf119_sor_dp_audio_sym, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index a4cb82495cee..245c946ea661 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev) return ret; pci->irq = pdev->irq; + + /* Ensure MSI interrupts are armed, for the case where there are + * already interrupts pending (for whatever reason) at load time. + */ + if (pci->msi) + pci->func->msi_rearm(pci); + return ret; } diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index c226da145fb3..a349cb61961e 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV config DRM_OMAP_PANEL_DPI tristate "Generic DPI panel" + depends on BACKLIGHT_CLASS_DEVICE help Driver for generic DPI panels. diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 0a38a0e8c925..a0dfa14f4fab 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -452,6 +452,8 @@ static int td028ttec1_panel_remove(struct spi_device *spi) } static const struct of_device_id td028ttec1_of_match[] = { + { .compatible = "omapdss,tpo,td028ttec1", }, + /* keep to not break older DTB */ { .compatible = "omapdss,toppoly,td028ttec1", }, {}, }; @@ -471,6 +473,7 @@ static struct spi_driver td028ttec1_spi_driver = { module_spi_driver(td028ttec1_spi_driver); +MODULE_ALIAS("spi:tpo,td028ttec1"); MODULE_ALIAS("spi:toppoly,td028ttec1"); MODULE_AUTHOR("H. Nikolaus Schaller "); MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver"); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index daf286fc8a40..ca1e3b489540 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll) } static const struct soc_device_attribute dpi_soc_devices[] = { - { .family = "OMAP3[456]*" }, - { .family = "[AD]M37*" }, + { .machine = "OMAP3[456]*" }, + { .machine = "[AD]M37*" }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 365cf07daa01..c3453f3bd603 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -889,25 +889,36 @@ struct hdmi4_features { bool audio_use_mclk; }; -static const struct hdmi4_features hdmi4_es1_features = { +static const struct hdmi4_features hdmi4430_es1_features = { .cts_swmode = false, .audio_use_mclk = false, }; -static const struct hdmi4_features hdmi4_es2_features = { +static const struct hdmi4_features hdmi4430_es2_features = { .cts_swmode = true, .audio_use_mclk = false, }; -static const struct hdmi4_features hdmi4_es3_features = { +static const struct hdmi4_features hdmi4_features = { .cts_swmode = true, .audio_use_mclk = true, }; static const struct soc_device_attribute hdmi4_soc_devices[] = { - { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features }, - { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features }, - { .family = "OMAP4", .data = &hdmi4_es3_features }, + { + .machine = "OMAP4430", + .revision = "ES1.?", + .data = &hdmi4430_es1_features, + }, + { + .machine = "OMAP4430", + .revision = "ES2.?", + .data = &hdmi4430_es2_features, + }, + { + .family = "OMAP4", + .data = &hdmi4_features, + }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 1dd3dafc59af..fd05f7e9f43f 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -298,7 +298,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) msecs_to_jiffies(100))) { dev_err(dmm->dev, "timed out waiting for done\n"); ret = -ETIMEDOUT; + goto cleanup; } + + /* Check the engine status before continue */ + ret = wait_status(engine, DMM_PATSTATUS_READY | + DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE); } cleanup: @@ -638,7 +643,8 @@ static int omap_dmm_probe(struct platform_device *dev) match = of_match_node(dmm_of_match, dev->dev.of_node); if (!match) { dev_err(&dev->dev, "failed to find matching device node\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } omap_dmm->plat_data = match->data; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 474fa759e06e..234af81fb3d0 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -369,6 +369,7 @@ static int panel_simple_remove(struct device *dev) drm_panel_remove(&panel->base); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); if (panel->ddc) put_device(&panel->ddc->dev); @@ -384,6 +385,7 @@ static void panel_simple_shutdown(struct device *dev) struct panel_simple *panel = dev_get_drvdata(dev); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); } static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = { diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 74fc9362ecf9..3eb920851141 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -388,7 +388,11 @@ void qxl_io_create_primary(struct qxl_device *qdev, create->width = bo->surf.width; create->height = bo->surf.height; create->stride = bo->surf.stride; - create->mem = qxl_bo_physical_address(qdev, bo, offset); + if (bo->shadow) { + create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset); + } else { + create->mem = qxl_bo_physical_address(qdev, bo, offset); + } QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem, bo->kptr); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index afbf50d0c08f..9a9214ae0fb5 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -289,6 +289,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) { struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); + qxl_bo_unref(&qxl_crtc->cursor_bo); drm_crtc_cleanup(crtc); kfree(qxl_crtc); } @@ -305,7 +306,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = { void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); + struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj); + WARN_ON(bo->shadow); drm_gem_object_unreference_unlocked(qxl_fb->obj); drm_framebuffer_cleanup(fb); kfree(qxl_fb); @@ -493,6 +496,53 @@ static int qxl_primary_atomic_check(struct drm_plane *plane, return 0; } +static int qxl_primary_apply_cursor(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct qxl_device *qdev = dev->dev_private; + struct drm_framebuffer *fb = plane->state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); + struct qxl_cursor_cmd *cmd; + struct qxl_release *release; + int ret = 0; + + if (!qcrtc->cursor_bo) + return 0; + + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), + QXL_RELEASE_CURSOR_CMD, + &release, NULL); + if (ret) + return ret; + + ret = qxl_release_list_add(release, qcrtc->cursor_bo); + if (ret) + goto out_free_release; + + ret = qxl_release_reserve_list(release, false); + if (ret) + goto out_free_release; + + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_CURSOR_SET; + cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x; + cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y; + + cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0); + + cmd->u.set.visible = 1; + qxl_release_unmap(qdev, release, &cmd->release_info); + + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); + qxl_release_fence_buffer_objects(release); + + return ret; + +out_free_release: + qxl_release_free(qdev, release); + return ret; +} + static void qxl_primary_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -508,6 +558,8 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, .x2 = qfb->base.width, .y2 = qfb->base.height }; + int ret; + bool same_shadow = false; if (old_state->fb) { qfb_old = to_qxl_framebuffer(old_state->fb); @@ -519,15 +571,28 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, if (bo == bo_old) return; + if (bo_old && bo_old->shadow && bo->shadow && + bo_old->shadow == bo->shadow) { + same_shadow = true; + } + if (bo_old && bo_old->is_primary) { - qxl_io_destroy_primary(qdev); + if (!same_shadow) + qxl_io_destroy_primary(qdev); bo_old->is_primary = false; + + ret = qxl_primary_apply_cursor(plane); + if (ret) + DRM_ERROR( + "could not set cursor after creating primary"); } if (!bo->is_primary) { - qxl_io_create_primary(qdev, 0, bo); + if (!same_shadow) + qxl_io_create_primary(qdev, 0, bo); bo->is_primary = true; } + qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); } @@ -560,11 +625,12 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct qxl_device *qdev = dev->dev_private; struct drm_framebuffer *fb = plane->state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); struct qxl_release *release; struct qxl_cursor_cmd *cmd; struct qxl_cursor *cursor; struct drm_gem_object *obj; - struct qxl_bo *cursor_bo, *user_bo = NULL; + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; int ret; void *user_ptr; int size = 64*64*4; @@ -617,6 +683,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); cmd->type = QXL_CURSOR_SET; + + qxl_bo_unref(&qcrtc->cursor_bo); + qcrtc->cursor_bo = cursor_bo; + cursor_bo = NULL; } else { ret = qxl_release_reserve_list(release, true); @@ -634,6 +704,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_bo_unref(&cursor_bo); + return; out_backoff: @@ -679,8 +751,9 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, static int qxl_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { + struct qxl_device *qdev = plane->dev->dev_private; struct drm_gem_object *obj; - struct qxl_bo *user_bo; + struct qxl_bo *user_bo, *old_bo = NULL; int ret; if (!new_state->fb) @@ -689,6 +762,32 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, obj = to_qxl_framebuffer(new_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + user_bo->is_dumb && !user_bo->shadow) { + if (plane->state->fb) { + obj = to_qxl_framebuffer(plane->state->fb)->obj; + old_bo = gem_to_qxl_bo(obj); + } + if (old_bo && old_bo->shadow && + user_bo->gem_base.size == old_bo->gem_base.size && + plane->state->crtc == new_state->crtc && + plane->state->crtc_w == new_state->crtc_w && + plane->state->crtc_h == new_state->crtc_h && + plane->state->src_x == new_state->src_x && + plane->state->src_y == new_state->src_y && + plane->state->src_w == new_state->src_w && + plane->state->src_h == new_state->src_h && + plane->state->rotation == new_state->rotation && + plane->state->zpos == new_state->zpos) { + drm_gem_object_get(&old_bo->shadow->gem_base); + user_bo->shadow = old_bo->shadow; + } else { + qxl_bo_create(qdev, user_bo->gem_base.size, + true, true, QXL_GEM_DOMAIN_VRAM, NULL, + &user_bo->shadow); + } + } + ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); if (ret) return ret; @@ -713,6 +812,11 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, obj = to_qxl_framebuffer(old_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); qxl_bo_unpin(user_bo); + + if (user_bo->shadow && !user_bo->is_primary) { + drm_gem_object_put_unlocked(&user_bo->shadow->gem_base); + user_bo->shadow = NULL; + } } static const uint32_t qxl_cursor_plane_formats[] = { diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3397a1907336..c0a927efa653 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -113,6 +113,8 @@ struct qxl_bo { /* Constant after initialization */ struct drm_gem_object gem_base; bool is_primary; /* is this now a primary surface */ + bool is_dumb; + struct qxl_bo *shadow; bool hw_surf_alloc; struct qxl_surface surf; uint32_t surface_id; @@ -133,6 +135,8 @@ struct qxl_bo_list { struct qxl_crtc { struct drm_crtc base; int index; + + struct qxl_bo *cursor_bo; }; struct qxl_output { diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index 5e65d5d2d937..11085ab01374 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -63,6 +63,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, &handle); if (r) return r; + qobj->is_dumb = true; args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 432cb46f6a34..fd7682bf335d 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -45,34 +45,32 @@ static char *pre_emph_names[] = { /***** radeon AUX functions *****/ -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 3cb6c55b268d..ce8b353b5753 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3229,35 +3229,8 @@ static void cik_gpu_init(struct radeon_device *rdev) case CHIP_KAVERI: rdev->config.cik.max_shader_engines = 1; rdev->config.cik.max_tile_pipes = 4; - if ((rdev->pdev->device == 0x1304) || - (rdev->pdev->device == 0x1305) || - (rdev->pdev->device == 0x130C) || - (rdev->pdev->device == 0x130F) || - (rdev->pdev->device == 0x1310) || - (rdev->pdev->device == 0x1311) || - (rdev->pdev->device == 0x131C)) { - rdev->config.cik.max_cu_per_sh = 8; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1309) || - (rdev->pdev->device == 0x130A) || - (rdev->pdev->device == 0x130D) || - (rdev->pdev->device == 0x1313) || - (rdev->pdev->device == 0x131D)) { - rdev->config.cik.max_cu_per_sh = 6; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1306) || - (rdev->pdev->device == 0x1307) || - (rdev->pdev->device == 0x130B) || - (rdev->pdev->device == 0x130E) || - (rdev->pdev->device == 0x1315) || - (rdev->pdev->device == 0x1318) || - (rdev->pdev->device == 0x131B)) { - rdev->config.cik.max_cu_per_sh = 4; - rdev->config.cik.max_backends_per_se = 1; - } else { - rdev->config.cik.max_cu_per_sh = 3; - rdev->config.cik.max_backends_per_se = 1; - } + rdev->config.cik.max_cu_per_sh = 8; + rdev->config.cik.max_backends_per_se = 2; rdev->config.cik.max_sh_per_se = 1; rdev->config.cik.max_texture_channel_caches = 4; rdev->config.cik.max_gprs = 256; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 2f642cbefd8e..424cd1b66575 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector) /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - int saved_dpms = connector->dpms; - /* Only turn off the display if it's physically disconnected */ - if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } else if (radeon_dp_needs_link_train(radeon_connector)) { - /* Don't try to start link training before we - * have the dpcd */ - if (!radeon_dp_getdpcd(radeon_connector)) - return; - - /* set it to OFF so that drm_helper_connector_dpms() - * won't return immediately since the current state - * is ON at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - connector->dpms = saved_dpms; + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && + radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && + radeon_dp_needs_link_train(radeon_connector)) { + /* Don't start link training before we have the DPCD */ + if (!radeon_dp_getdpcd(radeon_connector)) + return; + + /* Turn the connector off and back on immediately, which + * will trigger link training + */ + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } } } @@ -900,9 +893,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -925,8 +920,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) /* check acpi lid status ??? */ radeon_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -1040,9 +1039,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -1109,8 +1110,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1174,9 +1177,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (!radeon_connector->dac_load_detect) return ret; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -1188,8 +1193,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); radeon_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -1252,9 +1261,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (radeon_connector->detected_hpd_without_ddc) { force = true; @@ -1437,8 +1448,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) } exit: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1689,9 +1702,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dig_connector->is_mst) return connector_status_disconnected; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && radeon_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1778,8 +1793,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) } out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ffc10cadcf34..32b577c776b9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1397,6 +1397,10 @@ int radeon_device_init(struct radeon_device *rdev, if ((rdev->flags & RADEON_IS_PCI) && (rdev->family <= CHIP_RS740)) rdev->need_dma32 = true; +#ifdef CONFIG_PPC64 + if (rdev->family == CHIP_CEDAR) + rdev->need_dma32 = true; +#endif dma_bits = rdev->need_dma32 ? 32 : 40; r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fd25361ac681..4ef967d1a9de 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; - info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3386452bd2f0..ac467b80edc7 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) struct radeon_bo *robj = gem_to_radeon_bo(gobj); if (robj) { - if (robj->gem_base.import_attach) - drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); radeon_mn_unregister(robj); radeon_bo_unref(&robj); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 093594976126..b19a54dd18de 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); WARN_ON_ONCE(!list_empty(&bo->va)); + if (bo->gem_base.import_attach) + drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); drm_gem_object_release(&bo->gem_base); kfree(bo); } @@ -238,9 +240,10 @@ int radeon_bo_create(struct radeon_device *rdev, * may be slow * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 */ - +#ifndef CONFIG_COMPILE_TEST #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ thanks to write-combining +#endif if (bo->flags & RADEON_GEM_GTT_WC) DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 326ad068c15a..4b6542538ff9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev); static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); static void radeon_pm_update_profile(struct radeon_device *rdev); static void radeon_pm_set_clocks(struct radeon_device *rdev); -static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev); int radeon_pm_get_type_index(struct radeon_device *rdev, enum radeon_pm_state_type ps_type, @@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); } mutex_unlock(&rdev->pm.mutex); - /* allow new DPM state to be picked */ - radeon_pm_compute_clocks_dpm(rdev); } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (rdev->pm.profile == PM_PROFILE_AUTO) { mutex_lock(&rdev->pm.mutex); @@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; /* balanced states don't exist at the moment */ if (dpm_state == POWER_STATE_TYPE_BALANCED) - dpm_state = rdev->pm.dpm.ac_power ? - POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY; + dpm_state = POWER_STATE_TYPE_PERFORMANCE; restart_search: /* Pick the best power state based on current conditions */ diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index d34d1cf33895..95f4db70dd22 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, /* calc dclk divider with current vco freq */ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, pd_min, pd_even); - if (vclk_div > pd_max) + if (dclk_div > pd_max) break; /* vco is too big, it has to stop */ /* calc score with current vco freq */ diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index ee3e74266a13..97a0a639dad9 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2984,6 +2984,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6667)) { max_sclk = 75000; } + if ((rdev->pdev->revision == 0xC3) || + (rdev->pdev->device == 0x6665)) { + max_sclk = 60000; + max_mclk = 80000; + } } else if (rdev->family == CHIP_OLAND) { if ((rdev->pdev->revision == 0xC7) || (rdev->pdev->revision == 0x80) || diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index 9a20b9dc27c8..f7fc652b0027 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -1275,8 +1275,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, goto err_pllref; } - pm_runtime_enable(dev); - dsi->dsi_host.ops = &dw_mipi_dsi_host_ops; dsi->dsi_host.dev = dev; ret = mipi_dsi_host_register(&dsi->dsi_host); @@ -1291,6 +1289,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, } dev_set_drvdata(dev, dsi); + pm_runtime_enable(dev); return 0; err_mipi_dsi_host: diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index d9791292553e..7b909d814d38 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -567,12 +567,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, if (IS_ERR(tcon->crtc)) { dev_err(dev, "Couldn't create our CRTC\n"); ret = PTR_ERR(tcon->crtc); - goto err_free_clocks; + goto err_free_dotclock; } ret = sun4i_rgb_init(drm, tcon); if (ret < 0) - goto err_free_clocks; + goto err_free_dotclock; list_add_tail(&tcon->list, &drv->tcon_list); diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 4785ac090b8c..c142fbb8661e 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -80,7 +80,7 @@ #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN BIT(0) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK GENMASK(2, 1) -#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK GENMASK(11, 8) +#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_DEF (1 << 1) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_ARGB8888 (0 << 8) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 406fe4544b83..06d6e785c920 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "tilcdc_drv.h" #include "tilcdc_regs.h" @@ -48,6 +49,7 @@ struct tilcdc_crtc { unsigned int lcd_fck_rate; ktime_t last_vblank; + unsigned int hvtotal_us; struct drm_framebuffer *curr_fb; struct drm_framebuffer *next_fb; @@ -292,6 +294,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc) LCDC_V2_CORE_CLK_EN); } +uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode) +{ + return (uint) div_u64(1000llu * mode->htotal * mode->vtotal, + mode->clock); +} + static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); @@ -459,6 +467,9 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) drm_framebuffer_reference(fb); crtc->hwmode = crtc->state->adjusted_mode; + + tilcdc_crtc->hvtotal_us = + tilcdc_mode_hvtotal(&crtc->hwmode); } static void tilcdc_crtc_enable(struct drm_crtc *crtc) @@ -648,7 +659,7 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, - 1000000 / crtc->hwmode.vrefresh); + tilcdc_crtc->hvtotal_us); tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h index 9d528c0a67a4..5048ebb86835 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h @@ -133,7 +133,7 @@ static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data) struct tilcdc_drm_private *priv = dev->dev_private; volatile void __iomem *addr = priv->mmio + reg; -#ifdef iowrite64 +#if defined(iowrite64) && !defined(iowrite64_is_nonatomic) iowrite64(data, addr); #else __iowmb(); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 180ce6296416..68eed684dff5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); dma_fence_put(bo->moving); - if (bo->resv == &bo->ttm_resv) - reservation_object_fini(&bo->ttm_resv); + reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); if (bo->destroy) bo->destroy(bo); @@ -176,7 +175,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) list_add_tail(&bo->lru, &man->lru[bo->priority]); kref_get(&bo->list_kref); - if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { + if (bo->ttm && !(bo->ttm->page_flags & + (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { list_add_tail(&bo->swap, &bo->glob->swap_lru[bo->priority]); kref_get(&bo->list_kref); @@ -402,14 +402,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) if (bo->resv == &bo->ttm_resv) return 0; - reservation_object_init(&bo->ttm_resv); BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); - if (r) { + if (r) reservation_object_unlock(&bo->ttm_resv); - reservation_object_fini(&bo->ttm_resv); - } return r; } @@ -440,28 +437,30 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) struct ttm_bo_global *glob = bo->glob; int ret; + ret = ttm_bo_individualize_resv(bo); + if (ret) { + /* Last resort, if we fail to allocate memory for the + * fences block for the BO to become idle + */ + reservation_object_wait_timeout_rcu(bo->resv, true, false, + 30 * HZ); + spin_lock(&glob->lru_lock); + goto error; + } + spin_lock(&glob->lru_lock); ret = __ttm_bo_reserve(bo, false, true, NULL); - if (!ret) { - if (!ttm_bo_wait(bo, false, true)) { + if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - ttm_bo_cleanup_memtype_use(bo); + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); - return; - } - - ret = ttm_bo_individualize_resv(bo); - if (ret) { - /* Last resort, if we fail to allocate memory for the - * fences block for the BO to become idle and free it. - */ - spin_unlock(&glob->lru_lock); - ttm_bo_wait(bo, true, true); ttm_bo_cleanup_memtype_use(bo); return; } + ttm_bo_flush_all_fences(bo); /* @@ -474,11 +473,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); } - if (bo->resv != &bo->ttm_resv) - reservation_object_unlock(&bo->ttm_resv); __ttm_bo_unreserve(bo); } + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); +error: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); @@ -1203,8 +1203,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, lockdep_assert_held(&bo->resv->lock.base); } else { bo->resv = &bo->ttm_resv; - reservation_object_init(&bo->ttm_resv); } + reservation_object_init(&bo->ttm_resv); atomic_inc(&bo->glob->bo_count); drm_vma_node_reset(&bo->vma_node); bo->priority = 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c934ad5b3903..7c2fbdbbd048 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -474,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); INIT_LIST_HEAD(&fbo->io_reserve_lru); + mutex_init(&fbo->wu_mutex); fbo->moving = NULL; drm_vma_node_reset(&fbo->vma_node); atomic_set(&fbo->cpu_writers, 0); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index c8ebb757e36b..b17d0d38f290 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -299,7 +299,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, unsigned long offset, - void *buf, int len, int write) + uint8_t *buf, int len, int write) { unsigned long page = offset >> PAGE_SHIFT; unsigned long bytes_left = len; @@ -328,6 +328,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, ttm_bo_kunmap(&map); page++; + buf += bytes; bytes_left -= bytes; offset = 0; } while (bytes_left); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 871599826773..91f9263f3c3b 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -821,6 +821,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) pr_info("Initializing pool allocator\n"); _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); + if (!_manager) + return -ENOMEM; ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index b5b335c9b2bb..2ebdc6d5a76e 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long offset; unsigned long page, pos; - if (offset + size > info->fix.smem_len) + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + + offset = vma->vm_pgoff << PAGE_SHIFT; + + if (offset > info->fix.smem_len || size > info->fix.smem_len - offset) return -EINVAL; pos = (unsigned long)info->fix.smem_start + offset; diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index d1e0dc908048..04796d7d0fdb 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -866,7 +866,8 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder, adjusted_mode->clock = pixel_clock_hz / 1000 + 1; /* Given the new pixel clock, adjust HFP to keep vrefresh the same. */ - adjusted_mode->htotal = pixel_clock_hz / (mode->vrefresh * mode->vtotal); + adjusted_mode->htotal = adjusted_mode->clock * mode->htotal / + mode->clock; adjusted_mode->hsync_end += adjusted_mode->htotal - mode->htotal; adjusted_mode->hsync_start += adjusted_mode->htotal - mode->htotal; diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index d0c6bfb68c4e..d31b3d0c9955 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev) struct vc4_exec_info *exec[2]; struct vc4_bo *bo; unsigned long irqflags; - unsigned int i, j, unref_list_count, prev_idx; + unsigned int i, j, k, unref_list_count; kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); if (!kernel_state) @@ -182,24 +182,24 @@ vc4_save_hang_state(struct drm_device *dev) return; } - prev_idx = 0; + k = 0; for (i = 0; i < 2; i++) { if (!exec[i]) continue; for (j = 0; j < exec[i]->bo_count; j++) { drm_gem_object_get(&exec[i]->bo[j]->base); - kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; + kernel_state->bo[k++] = &exec[i]->bo[j]->base; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { drm_gem_object_get(&bo->base.base); - kernel_state->bo[j + prev_idx] = &bo->base.base; - j++; + kernel_state->bo[k++] = &bo->base.base; } - prev_idx = j + 1; } + WARN_ON_ONCE(k != state->bo_count); + if (exec[0]) state->start_bin = exec[0]->ct0ca; if (exec[1]) @@ -829,8 +829,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) /* If we got force-completed because of GPU reset rather than * through our IRQ handler, signal the fence now. */ - if (exec->fence) + if (exec->fence) { dma_fence_signal(exec->fence); + dma_fence_put(exec->fence); + } if (exec->bo) { for (i = 0; i < exec->bo_count; i++) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 937da8dd65b8..8f71157a2b06 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -433,7 +433,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) vc4_encoder->limited_rgb_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL, - vc4_encoder->rgb_range_selectable); + vc4_encoder->rgb_range_selectable, + false); vc4_hdmi_write_infoframe(encoder, &frame); } diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 7d7af3a93d94..3dd62d75f531 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev) list_move_tail(&exec->head, &vc4->job_done_list); if (exec->fence) { dma_fence_signal_locked(exec->fence); + dma_fence_put(exec->fence); exec->fence = NULL; } vc4_submit_next_render_job(dev); @@ -225,6 +226,9 @@ vc4_irq_uninstall(struct drm_device *dev) /* Clear any pending interrupts we might have left. */ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); + /* Finish any interrupt handler still in flight. */ + disable_irq(dev->irq); + cancel_work_sync(&vc4->overflow_mem_work); } diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 622cd43840b8..493f392b3a0a 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev) return ret; vc4_v3d_init_hw(vc4->dev); + + /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */ + enable_irq(vc4->dev->irq); vc4_irq_postinstall(vc4->dev); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 184340d486c3..86d25f18aa99 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) */ void vmw_svga_disable(struct vmw_private *dev_priv) { + /* + * Disabling SVGA will turn off device modesetting capabilities, so + * notify KMS about that so that it doesn't cache atomic state that + * isn't valid anymore, for example crtcs turned on. + * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), + * but vmw_kms_lost_device() takes the reservation sem and thus we'll + * end up with lock order reversal. Thus, a master may actually perform + * a new modeset just after we call vmw_kms_lost_device() and race with + * vmw_svga_disable(), but that should at worst cause atomic KMS state + * to be inconsistent with the device, causing modesetting problems. + * + */ + vmw_kms_lost_device(dev_priv->dev); ttm_write_lock(&dev_priv->reservation_sem, false); spin_lock(&dev_priv->svga_lock); if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 7e5f30e234b1..8c65cc3b0dda 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv, int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); +void vmw_kms_lost_device(struct drm_device *dev); int vmw_dumb_create(struct drm_file *file_priv, struct drm_device *dev, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 21c62a34e558..87e8af5776a3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, } view_type = vmw_view_cmd_to_type(header->id); + if (view_type == vmw_view_max) + return -EINVAL; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b850562fbdd6..9e010f8c36a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -31,7 +31,6 @@ #include #include - /* Might need a hrtimer here? */ #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) @@ -697,7 +696,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) vps->pinned = 0; /* Mapping is managed by prepare_fb/cleanup_fb */ - memset(&vps->guest_map, 0, sizeof(vps->guest_map)); memset(&vps->host_map, 0, sizeof(vps->host_map)); vps->cpp = 0; @@ -760,11 +758,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, /* Should have been freed by cleanup_fb */ - if (vps->guest_map.virtual) { - DRM_ERROR("Guest mapping not freed\n"); - ttm_bo_kunmap(&vps->guest_map); - } - if (vps->host_map.virtual) { DRM_ERROR("Host mapping not freed\n"); ttm_bo_kunmap(&vps->host_map); @@ -2537,9 +2530,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, * Helper to be used if an error forces the caller to undo the actions of * vmw_kms_helper_resource_prepare. */ -void vmw_kms_helper_resource_revert(struct vmw_resource *res) +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) { - vmw_kms_helper_buffer_revert(res->backup); + struct vmw_resource *res = ctx->res; + + vmw_kms_helper_buffer_revert(ctx->buf); + vmw_dmabuf_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -2556,10 +2552,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res) * interrupted by a signal. */ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, - bool interruptible) + bool interruptible, + struct vmw_validation_ctx *ctx) { int ret = 0; + ctx->buf = NULL; + ctx->res = res; + if (interruptible) ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); else @@ -2578,6 +2578,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, res->dev_priv->has_mob); if (ret) goto out_unreserve; + + ctx->buf = vmw_dmabuf_reference(res->backup); } ret = vmw_resource_validate(res); if (ret) @@ -2585,7 +2587,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, return 0; out_revert: - vmw_kms_helper_buffer_revert(res->backup); + vmw_kms_helper_buffer_revert(ctx->buf); out_unreserve: vmw_resource_unreserve(res, false, NULL, 0); out_unlock: @@ -2601,11 +2603,13 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, * @out_fence: Optional pointer to a fence pointer. If non-NULL, a * ref-counted fence pointer is returned here. */ -void vmw_kms_helper_resource_finish(struct vmw_resource *res, - struct vmw_fence_obj **out_fence) +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, + struct vmw_fence_obj **out_fence) { - if (res->backup || out_fence) - vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, + struct vmw_resource *res = ctx->res; + + if (ctx->buf || out_fence) + vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); vmw_resource_unreserve(res, false, NULL, 0); @@ -2871,3 +2875,14 @@ int vmw_kms_set_config(struct drm_mode_set *set, return drm_atomic_helper_set_config(set, ctx); } + + +/** + * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost + * + * @dev: Pointer to the drm device + */ +void vmw_kms_lost_device(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index ff9c8389ff21..3d2ca280eaa7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -175,7 +175,7 @@ struct vmw_plane_state { int pinned; /* For CPU Blit */ - struct ttm_bo_kmap_obj host_map, guest_map; + struct ttm_bo_kmap_obj host_map; unsigned int cpp; }; @@ -240,6 +240,11 @@ struct vmw_display_unit { int set_gui_y; }; +struct vmw_validation_ctx { + struct vmw_resource *res; + struct vmw_dma_buffer *buf; +}; + #define vmw_crtc_to_du(x) \ container_of(x, struct vmw_display_unit, crtc) #define vmw_connector_to_du(x) \ @@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, struct drm_vmw_fence_rep __user * user_fence_rep); int vmw_kms_helper_resource_prepare(struct vmw_resource *res, - bool interruptible); -void vmw_kms_helper_resource_revert(struct vmw_resource *res); -void vmw_kms_helper_resource_finish(struct vmw_resource *res, + bool interruptible, + struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, struct vmw_fence_obj **out_fence); int vmw_kms_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, @@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, int vmw_kms_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx); - #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index b8a09807c5de..3824595fece1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = { .set_property = vmw_du_connector_set_property, .destroy = vmw_ldu_connector_destroy, .reset = vmw_du_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = vmw_du_connector_duplicate_state, + .atomic_destroy_state = vmw_du_connector_destroy_state, .atomic_set_property = vmw_du_connector_atomic_set_property, .atomic_get_property = vmw_du_connector_atomic_get_property, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index d1552d3e0652..aacce4753a62 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = { .set_property = vmw_du_connector_set_property, .destroy = vmw_sou_connector_destroy, .reset = vmw_du_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = vmw_du_connector_duplicate_state, + .atomic_destroy_state = vmw_du_connector_destroy_state, .atomic_set_property = vmw_du_connector_atomic_set_property, .atomic_get_property = vmw_du_connector_atomic_get_property, }; @@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_kms_sou_surface_dirty sdirty; + struct vmw_validation_ctx ctx; int ret; if (!srf) srf = &vfbs->surface->res; - ret = vmw_kms_helper_resource_prepare(srf, true); + ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); if (ret) return ret; @@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, dest_x, dest_y, num_clips, inc, &sdirty.base); - vmw_kms_helper_resource_finish(srf, out_fence); + vmw_kms_helper_resource_finish(&ctx, out_fence); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index ca3afae2db1f..6c576f8df4b2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit { bool defined; /* For CPU Blit */ - struct ttm_bo_kmap_obj host_map, guest_map; + struct ttm_bo_kmap_obj host_map; unsigned int cpp; }; @@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) s32 src_pitch, dst_pitch; u8 *src, *dst; bool not_used; - + struct ttm_bo_kmap_obj guest_map; + int ret; if (!dirty->num_hits) return; @@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) if (width == 0 || height == 0) return; + ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages, + &guest_map); + if (ret) { + DRM_ERROR("Failed mapping framebuffer for blit: %d\n", + ret); + goto out_cleanup; + } /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ src_pitch = stdu->display_srf->base_size.width * stdu->cpp; @@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; dst_pitch = ddirty->pitch; - dst = ttm_kmap_obj_virtual(&stdu->guest_map, ¬_used); + dst = ttm_kmap_obj_virtual(&guest_map, ¬_used); dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; @@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) vmw_fifo_commit(dev_priv, sizeof(*cmd)); } + ttm_bo_kunmap(&guest_map); out_cleanup: ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; ddirty->right = ddirty->bottom = S32_MIN; @@ -971,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_stdu_dirty sdirty; + struct vmw_validation_ctx ctx; int ret; if (!srf) srf = &vfbs->surface->res; - ret = vmw_kms_helper_resource_prepare(srf, true); + ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); if (ret) return ret; @@ -999,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, dest_x, dest_y, num_clips, inc, &sdirty.base); out_finish: - vmw_kms_helper_resource_finish(srf, out_fence); + vmw_kms_helper_resource_finish(&ctx, out_fence); return ret; } @@ -1109,9 +1119,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane, { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); - if (vps->guest_map.virtual) - ttm_bo_kunmap(&vps->guest_map); - if (vps->host_map.virtual) ttm_bo_kunmap(&vps->host_map); @@ -1277,33 +1284,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, */ if (vps->content_fb_type == SEPARATE_DMA && !(dev_priv->capabilities & SVGA_CAP_3D)) { - - struct vmw_framebuffer_dmabuf *new_vfbd; - - new_vfbd = vmw_framebuffer_to_vfbd(new_fb); - - ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false, - NULL); - if (ret) - goto out_srf_unpin; - - ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0, - new_vfbd->buffer->base.num_pages, - &vps->guest_map); - - ttm_bo_unreserve(&new_vfbd->buffer->base); - - if (ret) { - DRM_ERROR("Failed to map content buffer to CPU\n"); - goto out_srf_unpin; - } - ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0, vps->surf->res.backup->base.num_pages, &vps->host_map); if (ret) { DRM_ERROR("Failed to map display buffer to CPU\n"); - ttm_bo_kunmap(&vps->guest_map); goto out_srf_unpin; } @@ -1350,7 +1335,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, stdu->display_srf = vps->surf; stdu->content_fb_type = vps->content_fb_type; stdu->cpp = vps->cpp; - memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map)); memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map)); if (!stdu->defined) diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 374301fcbc86..8c7a0ce147a1 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -230,7 +230,7 @@ config HID_CMEDIA config HID_CP2112 tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support" - depends on USB_HID && I2C && GPIOLIB + depends on USB_HID && HIDRAW && I2C && GPIOLIB select GPIOLIB_IRQCHIP ---help--- Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge. diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 330ca983828b..ca2fbe56635a 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2638,7 +2638,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, @@ -2717,6 +2716,9 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, @@ -2908,6 +2910,17 @@ bool hid_ignore(struct hid_device *hdev) strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) return true; break; + case USB_VENDOR_ID_ELAN: + /* + * Many Elan devices have a product id of 0x0401 and are handled + * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev + * is not (and cannot be) handled by that driver -> + * Ignore all 0x0401 devs except for the ELAN0800 dev. + */ + if (hdev->product == 0x0401 && + strncmp(hdev->name, "ELAN0800", 8) != 0) + return true; + break; } if (hdev->type == HID_TYPE_USBMOUSE && diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 078026f63b6f..4e940a096b2a 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -196,6 +196,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); + if (ret >= 0) + ret = -EIO; goto exit; } @@ -205,8 +207,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret < 0) { + if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error setting GPIO config: %d\n", ret); + if (ret >= 0) + ret = -EIO; goto exit; } @@ -214,7 +218,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) exit: mutex_unlock(&dev->lock); - return ret < 0 ? ret : -EIO; + return ret; } static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 0cd4f7216239..5eea6fe0d7bd 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev, { struct input_dev *input = hidinput->input; + /* + * ELO devices have one Button usage in GenDesk field, which makes + * hid-input map it to BTN_LEFT; that confuses userspace, which then + * considers the device to be a mouse/touchpad instead of touchscreen. + */ + clear_bit(BTN_LEFT, input->keybit); set_bit(BTN_TOUCH, input->keybit); set_bit(ABS_PRESSURE, input->absbit); input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index be2e005c3c51..ff539c0b4637 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -634,6 +634,9 @@ #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 +#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 +#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 +#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 #define USB_DEVICE_ID_LD_JWM 0x1080 #define USB_DEVICE_ID_LD_DMMP 0x1081 #define USB_DEVICE_ID_LD_UMIP 0x1090 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 9e8c4d2ba11d..6598501c1ad0 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -738,9 +738,11 @@ static int mt_touch_event(struct hid_device *hid, struct hid_field *field, } static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, - struct hid_usage *usage, __s32 value) + struct hid_usage *usage, __s32 value, + bool first_packet) { struct mt_device *td = hid_get_drvdata(hid); + __s32 cls = td->mtclass.name; __s32 quirks = td->mtclass.quirks; struct input_dev *input = field->hidinput->input; @@ -794,6 +796,15 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, break; default: + /* + * For Win8 PTP touchpads we should only look at + * non finger/touch events in the first_packet of + * a (possible) multi-packet frame. + */ + if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) && + !first_packet) + return; + if (usage->type) input_event(input, usage->type, usage->code, value); @@ -813,6 +824,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report) { struct mt_device *td = hid_get_drvdata(hid); struct hid_field *field; + bool first_packet; unsigned count; int r, n; @@ -831,6 +843,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report) td->num_expected = value; } + first_packet = td->num_received == 0; for (r = 0; r < report->maxfield; r++) { field = report->field[r]; count = field->report_count; @@ -840,7 +853,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report) for (n = 0; n < count; n++) mt_process_mt_event(hid, field, &field->usage[n], - field->value[n]); + field->value[n], first_packet); } if (td->num_received >= td->num_expected) diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 2aac097c3f70..97869b7410eb 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -28,6 +28,7 @@ #define SPT_Ax_DEVICE_ID 0x9D35 #define CNL_Ax_DEVICE_ID 0x9DFC #define GLK_Ax_DEVICE_ID 0x31A2 +#define CNL_H_DEVICE_ID 0xA37C #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 20d824f74f99..582e449be9fe 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -37,6 +37,7 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 906e654fb0ba..65f1cfbbe7fe 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2340,23 +2340,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index) int i; unsigned long flags; - spin_lock_irqsave(&remote->remote_lock, flags); - remote->remotes[index].registered = false; - spin_unlock_irqrestore(&remote->remote_lock, flags); + for (i = 0; i < WACOM_MAX_REMOTES; i++) { + if (remote->remotes[i].serial == serial) { - if (remote->remotes[index].battery.battery) - devres_release_group(&wacom->hdev->dev, - &remote->remotes[index].battery.bat_desc); + spin_lock_irqsave(&remote->remote_lock, flags); + remote->remotes[i].registered = false; + spin_unlock_irqrestore(&remote->remote_lock, flags); - if (remote->remotes[index].group.name) - devres_release_group(&wacom->hdev->dev, - &remote->remotes[index]); + if (remote->remotes[i].battery.battery) + devres_release_group(&wacom->hdev->dev, + &remote->remotes[i].battery.bat_desc); + + if (remote->remotes[i].group.name) + devres_release_group(&wacom->hdev->dev, + &remote->remotes[i]); - for (i = 0; i < WACOM_MAX_REMOTES; i++) { - if (remote->remotes[i].serial == serial) { remote->remotes[i].serial = 0; remote->remotes[i].group.name = NULL; - remote->remotes[i].registered = false; remote->remotes[i].battery.battery = NULL; wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN; } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index aa692e28b2cd..70cbe1e5a3d2 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field struct wacom_features *features = &wacom_wac->features; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); int i; - bool is_touch_on = value; bool do_report = false; /* @@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field break; case WACOM_HID_WD_MUTE_DEVICE: - if (wacom_wac->shared->touch_input && value) { - wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on; - is_touch_on = wacom_wac->shared->is_touch_on; - } - - /* fall through*/ case WACOM_HID_WD_TOUCHONOFF: if (wacom_wac->shared->touch_input) { + bool *is_touch_on = &wacom_wac->shared->is_touch_on; + + if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value) + *is_touch_on = !(*is_touch_on); + else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF) + *is_touch_on = value; + input_report_switch(wacom_wac->shared->touch_input, - SW_MUTE_DEVICE, !is_touch_on); + SW_MUTE_DEVICE, !(*is_touch_on)); input_sync(wacom_wac->shared->touch_input); } break; diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 8a03654048bf..feb62fd4dfc3 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -166,6 +166,7 @@ ((f)->physical == HID_DG_PEN) || \ ((f)->application == HID_DG_PEN) || \ ((f)->application == HID_DG_DIGITIZER) || \ + ((f)->application == WACOM_HID_WD_PEN) || \ ((f)->application == WACOM_HID_WD_DIGITIZER) || \ ((f)->application == WACOM_HID_G9_PEN) || \ ((f)->application == WACOM_HID_G11_PEN)) diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 894b67ac2cae..05964347008d 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -640,22 +640,28 @@ void vmbus_close(struct vmbus_channel *channel) */ return; } - mutex_lock(&vmbus_connection.channel_mutex); /* * Close all the sub-channels first and then close the * primary channel. */ list_for_each_safe(cur, tmp, &channel->sc_list) { cur_channel = list_entry(cur, struct vmbus_channel, sc_list); - vmbus_close_internal(cur_channel); if (cur_channel->rescind) { + wait_for_completion(&cur_channel->rescind_event); + mutex_lock(&vmbus_connection.channel_mutex); + vmbus_close_internal(cur_channel); hv_process_channel_removal( cur_channel->offermsg.child_relid); + } else { + mutex_lock(&vmbus_connection.channel_mutex); + vmbus_close_internal(cur_channel); } + mutex_unlock(&vmbus_connection.channel_mutex); } /* * Now close the primary. */ + mutex_lock(&vmbus_connection.channel_mutex); vmbus_close_internal(channel); mutex_unlock(&vmbus_connection.channel_mutex); } diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 379b0df123be..1939c0ca3741 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -71,7 +71,7 @@ static const struct vmbus_device vmbus_devs[] = { /* PCIE */ { .dev_type = HV_PCIE, HV_PCIE_GUID, - .perf_device = true, + .perf_device = false, }, /* Synthetic Frame Buffer */ @@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void) return NULL; spin_lock_init(&channel->lock); + init_completion(&channel->rescind_event); INIT_LIST_HEAD(&channel->sc_list); INIT_LIST_HEAD(&channel->percpu_list); @@ -883,6 +884,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) /* * Now wait for offer handling to complete. */ + vmbus_rescind_cleanup(channel); while (READ_ONCE(channel->probe_done) == false) { /* * We wait here until any channel offer is currently @@ -898,7 +900,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) if (channel->device_obj) { if (channel->chn_rescind_callback) { channel->chn_rescind_callback(channel); - vmbus_rescind_cleanup(channel); return; } /* @@ -907,7 +908,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) */ dev = get_device(&channel->device_obj->device); if (dev) { - vmbus_rescind_cleanup(channel); vmbus_device_unregister(channel->device_obj); put_device(dev); } @@ -921,13 +921,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) * 2. Then close the primary channel. */ mutex_lock(&vmbus_connection.channel_mutex); - vmbus_rescind_cleanup(channel); if (channel->state == CHANNEL_OPEN_STATE) { /* * The channel is currently not open; * it is safe for us to cleanup the channel. */ hv_process_channel_removal(rescind->child_relid); + } else { + complete(&channel->rescind_event); } mutex_unlock(&vmbus_connection.channel_mutex); } diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 12eb8caa4263..3f8dde8d59ba 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -394,13 +394,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel, } EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); +/* How many bytes were read in this iterator cycle */ +static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, + u32 start_read_index) +{ + if (rbi->priv_read_index >= start_read_index) + return rbi->priv_read_index - start_read_index; + else + return rbi->ring_datasize - start_read_index + + rbi->priv_read_index; +} + /* * Update host ring buffer after iterating over packets. */ void hv_pkt_iter_close(struct vmbus_channel *channel) { struct hv_ring_buffer_info *rbi = &channel->inbound; - u32 orig_write_sz = hv_get_bytes_to_write(rbi); + u32 curr_write_sz, pending_sz, bytes_read, start_read_index; /* * Make sure all reads are done before we update the read index since @@ -408,8 +419,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) * is updated. */ virt_rmb(); + start_read_index = rbi->ring_buffer->read_index; rbi->ring_buffer->read_index = rbi->priv_read_index; + if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) + return; + /* * Issue a full memory barrier before making the signaling decision. * Here is the reason for having this barrier: @@ -423,26 +438,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) */ virt_mb(); - /* If host has disabled notifications then skip */ - if (rbi->ring_buffer->interrupt_mask) + pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + if (!pending_sz) return; - if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) { - u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + /* + * Ensure the read of write_index in hv_get_bytes_to_write() + * happens after the read of pending_send_sz. + */ + virt_rmb(); + curr_write_sz = hv_get_bytes_to_write(rbi); + bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); - /* - * If there was space before we began iteration, - * then host was not blocked. Also handles case where - * pending_sz is zero then host has nothing pending - * and does not need to be signaled. - */ - if (orig_write_sz > pending_sz) - return; + /* + * If there was space before we began iteration, + * then host was not blocked. + */ - /* If pending write will not fit, don't give false hope. */ - if (hv_get_bytes_to_write(rbi) < pending_sz) - return; - } + if (curr_write_sz - bytes_read > pending_sz) + return; + + /* If pending write will not fit, don't give false hope. */ + if (curr_write_sz <= pending_sz) + return; vmbus_setevent(channel); } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 937801ac2fe0..2cd134dd94d2 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1534,7 +1534,7 @@ static int __init hv_acpi_init(void) { int ret, t; - if (x86_hyper != &x86_hyper_ms_hyperv) + if (x86_hyper_type != X86_HYPER_MS_HYPERV) return -ENODEV; init_completion(&probe_event); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index c13a4fd86b3c..a42744c7665b 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { const struct tjmax_model *tm = &tjmax_model_table[i]; if (c->x86_model == tm->model && - (tm->mask == ANY || c->x86_mask == tm->mask)) + (tm->mask == ANY || c->x86_stepping == tm->mask)) return tm->tjmax; } /* Early chips have no MSR for TjMax */ - if (c->x86_model == 0xf && c->x86_mask < 4) + if (c->x86_model == 0xf && c->x86_stepping < 4) usemsr_ee = 0; if (c->x86_model > 0xe && usemsr_ee) { @@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned int cpu) * Readings might stop update when processor visited too deep sleep, * fixed for stepping D0 (6EC). */ - if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { + if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) { pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); return -ENODEV; } diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index ef91b8a67549..84e91286fc4f 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -293,7 +293,7 @@ u8 vid_which_vrm(void) if (c->x86 < 6) /* Any CPU with family lower than 6 */ return 0; /* doesn't have VID */ - vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); + vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor); if (vrm_ret == 134) vrm_ret = get_via_model_d_vrm(); if (vrm_ret == 0) diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index 62e38fa8cda2..e9e6aeabbf84 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 }; struct ina2xx_config { u16 config_default; - int calibration_factor; + int calibration_value; int registers; int shunt_div; int bus_voltage_shift; int bus_voltage_lsb; /* uV */ - int power_lsb; /* uW */ + int power_lsb_factor; }; struct ina2xx_data { const struct ina2xx_config *config; long rshunt; + long current_lsb_uA; + long power_lsb_uW; struct mutex config_lock; struct regmap *regmap; @@ -116,21 +118,21 @@ struct ina2xx_data { static const struct ina2xx_config ina2xx_config[] = { [ina219] = { .config_default = INA219_CONFIG_DEFAULT, - .calibration_factor = 40960000, + .calibration_value = 4096, .registers = INA219_REGISTERS, .shunt_div = 100, .bus_voltage_shift = 3, .bus_voltage_lsb = 4000, - .power_lsb = 20000, + .power_lsb_factor = 20, }, [ina226] = { .config_default = INA226_CONFIG_DEFAULT, - .calibration_factor = 5120000, + .calibration_value = 2048, .registers = INA226_REGISTERS, .shunt_div = 400, .bus_voltage_shift = 0, .bus_voltage_lsb = 1250, - .power_lsb = 25000, + .power_lsb_factor = 25, }, }; @@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval) return INA226_SHIFT_AVG(avg_bits); } +/* + * Calibration register is set to the best value, which eliminates + * truncation errors on calculating current register in hardware. + * According to datasheet (eq. 3) the best values are 2048 for + * ina226 and 4096 for ina219. They are hardcoded as calibration_value. + */ static int ina2xx_calibrate(struct ina2xx_data *data) { - u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, - data->rshunt); - - return regmap_write(data->regmap, INA2XX_CALIBRATION, val); + return regmap_write(data->regmap, INA2XX_CALIBRATION, + data->config->calibration_value); } /* @@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data) if (ret < 0) return ret; - /* - * Set current LSB to 1mA, shunt is in uOhms - * (equation 13 in datasheet). - */ return ina2xx_calibrate(data); } @@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_POWER: - val = regval * data->config->power_lsb; + val = regval * data->power_lsb_uW; break; case INA2XX_CURRENT: - /* signed register, LSB=1mA (selected), in mA */ - val = (s16)regval; + /* signed register, result in mA */ + val = regval * data->current_lsb_uA; + val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_CALIBRATION: - val = DIV_ROUND_CLOSEST(data->config->calibration_factor, - regval); + val = regval; break; default: /* programmer goofed */ @@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev, ina2xx_get_value(data, attr->index, regval)); } -static ssize_t ina2xx_set_shunt(struct device *dev, - struct device_attribute *da, - const char *buf, size_t count) +/* + * In order to keep calibration register value fixed, the product + * of current_lsb and shunt_resistor should also be fixed and equal + * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order + * to keep the scale. + */ +static int ina2xx_set_shunt(struct ina2xx_data *data, long val) +{ + unsigned int dividend = DIV_ROUND_CLOSEST(1000000000, + data->config->shunt_div); + if (val <= 0 || val > dividend) + return -EINVAL; + + mutex_lock(&data->config_lock); + data->rshunt = val; + data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val); + data->power_lsb_uW = data->config->power_lsb_factor * + data->current_lsb_uA; + mutex_unlock(&data->config_lock); + + return 0; +} + +static ssize_t ina2xx_store_shunt(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) { unsigned long val; int status; @@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev, if (status < 0) return status; - if (val == 0 || - /* Values greater than the calibration factor make no sense. */ - val > data->config->calibration_factor) - return -EINVAL; - - mutex_lock(&data->config_lock); - data->rshunt = val; - status = ina2xx_calibrate(data); - mutex_unlock(&data->config_lock); + status = ina2xx_set_shunt(data, val); if (status < 0) return status; - return count; } @@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, /* shunt resistance */ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, - ina2xx_show_value, ina2xx_set_shunt, + ina2xx_show_value, ina2xx_store_shunt, INA2XX_CALIBRATION); /* update interval (ina226 only) */ @@ -438,6 +454,7 @@ static int ina2xx_probe(struct i2c_client *client, /* set the device type */ data->config = &ina2xx_config[chip]; + mutex_init(&data->config_lock); if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) { struct ina2xx_platform_data *pdata = dev_get_platdata(dev); @@ -448,10 +465,7 @@ static int ina2xx_probe(struct i2c_client *client, val = INA2XX_RSHUNT_DEFAULT; } - if (val <= 0 || val > data->config->calibration_factor) - return -ENODEV; - - data->rshunt = val; + ina2xx_set_shunt(data, val); ina2xx_regmap_config.max_register = data->config->registers; @@ -467,8 +481,6 @@ static int ina2xx_probe(struct i2c_client *client, return -ENODEV; } - mutex_init(&data->config_lock); - data->groups[group++] = &ina2xx_group; if (id->driver_data == ina226) data->groups[group++] = &ina226_group; diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 5f11dc014ed6..e5234f953a6d 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -22,6 +22,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include #include #include #include @@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = { #define JC42_REG_TEMP 0x05 #define JC42_REG_MANID 0x06 #define JC42_REG_DEVICEID 0x07 +#define JC42_REG_SMBUS 0x22 /* NXP and Atmel, possibly others? */ /* Status bits in temperature register */ #define JC42_ALARM_CRIT_BIT 15 @@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = { #define GT_MANID 0x1c68 /* Giantec */ #define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */ +/* SMBUS register */ +#define SMBUS_STMOUT BIT(7) /* SMBus time-out, active low */ + /* Supported chips */ /* Analog Devices */ @@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id) data->extended = !!(cap & JC42_CAP_RANGE); + if (device_property_read_bool(dev, "smbus-timeout-disable")) { + int smbus; + + /* + * Not all chips support this register, but from a + * quick read of various datasheets no chip appears + * incompatible with the below attempt to disable + * the timeout. And the whole thing is opt-in... + */ + smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS); + if (smbus < 0) + return smbus; + i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS, + smbus | SMBUS_STMOUT); + } + config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG); if (config < 0) return config; diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index ce3b91f22e30..5c740996aa62 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_dev *pdev) * and AM3 formats, but that's the best we can do. */ return boot_cpu_data.x86_model < 4 || - (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } static int k10temp_probe(struct pci_dev *pdev, diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 5a632bcf869b..e59f9113fb93 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev, return -ENOMEM; model = boot_cpu_data.x86_model; - stepping = boot_cpu_data.x86_mask; + stepping = boot_cpu_data.x86_stepping; /* feature available since SH-C0, exclude older revisions */ if ((model == 4 && stepping == 0) || diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 4efa2bd4f6d8..fa613bd209e3 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops; /* Function declarations */ void pmbus_clear_cache(struct i2c_client *client); -int pmbus_set_page(struct i2c_client *client, u8 page); -int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); -int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word); +int pmbus_set_page(struct i2c_client *client, int page); +int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg); +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word); int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg); int pmbus_write_byte(struct i2c_client *client, int page, u8 value); int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 302f0aef59de..a139940cd991 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -136,13 +137,13 @@ void pmbus_clear_cache(struct i2c_client *client) } EXPORT_SYMBOL_GPL(pmbus_clear_cache); -int pmbus_set_page(struct i2c_client *client, u8 page) +int pmbus_set_page(struct i2c_client *client, int page) { struct pmbus_data *data = i2c_get_clientdata(client); int rv = 0; int newpage; - if (page != data->currpage) { + if (page >= 0 && page != data->currpage) { rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE); if (newpage != page) @@ -158,11 +159,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) { int rv; - if (page >= 0) { - rv = pmbus_set_page(client, page); - if (rv < 0) - return rv; - } + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; return i2c_smbus_write_byte(client, value); } @@ -186,7 +185,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value) return pmbus_write_byte(client, page, value); } -int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, + u16 word) { int rv; @@ -219,7 +219,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg, return pmbus_write_word_data(client, page, reg, word); } -int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) +int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg) { int rv; @@ -255,11 +255,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg) { int rv; - if (page >= 0) { - rv = pmbus_set_page(client, page); - if (rv < 0) - return rv; - } + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; return i2c_smbus_read_byte_data(client, reg); } @@ -502,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data, static long pmbus_reg2data_direct(struct pmbus_data *data, struct pmbus_sensor *sensor) { - long val = (s16) sensor->data; - long m, b, R; + s64 b, val = (s16)sensor->data; + s32 m, R; m = data->info->m[sensor->class]; b = data->info->b[sensor->class]; @@ -531,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data, R--; } while (R < 0) { - val = DIV_ROUND_CLOSEST(val, 10); + val = div_s64(val + 5LL, 10L); /* round closest */ R++; } - return (val - b) / m; + val = div_s64(val - b, m); + return clamp_val(val, LONG_MIN, LONG_MAX); } /* @@ -659,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, static u16 pmbus_data2reg_direct(struct pmbus_data *data, struct pmbus_sensor *sensor, long val) { - long m, b, R; + s64 b, val64 = val; + s32 m, R; m = data->info->m[sensor->class]; b = data->info->b[sensor->class]; @@ -676,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data, R -= 3; /* Adjust R and b for data in milli-units */ b *= 1000; } - val = val * m + b; + val64 = val64 * m + b; while (R > 0) { - val *= 10; + val64 *= 10; R--; } while (R < 0) { - val = DIV_ROUND_CLOSEST(val, 10); + val64 = div_s64(val64 + 5LL, 10L); /* round closest */ R++; } - return val; + return (u16)clamp_val(val64, S16_MIN, S16_MAX); } static u16 pmbus_data2reg_vid(struct pmbus_data *data, diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index d7a3e453016d..735dca089389 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -46,8 +46,11 @@ #define TPIU_ITATBCTR0 0xef8 /** register definition **/ +/* FFSR - 0x300 */ +#define FFSR_FT_STOPPED BIT(1) /* FFCR - 0x304 */ #define FFCR_FON_MAN BIT(6) +#define FFCR_STOP_FI BIT(12) /** * @base: memory mapped base address for this component. @@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata) { CS_UNLOCK(drvdata->base); - /* Clear formatter controle reg. */ - writel_relaxed(0x0, drvdata->base + TPIU_FFCR); + /* Clear formatter and stop on flush */ + writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR); /* Generate manual flush */ - writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR); + writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR); + /* Wait for flush to complete */ + coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0); + /* Wait for formatter to stop */ + coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1); CS_LOCK(drvdata->base); } diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig index 1b412f8a56b5..f40fa18c3881 100644 --- a/drivers/hwtracing/intel_th/Kconfig +++ b/drivers/hwtracing/intel_th/Kconfig @@ -55,6 +55,28 @@ config INTEL_TH_MSU Say Y here to enable MSU output device for Intel TH. +config INTEL_TH_MSU_DVC + tristate "Intel Trace Hub Memory Storage Unit to USB-dvc" + depends on DVC_TRACE_BUS + help + Memory Storage Unit (MSU) trace output device enables + storing STP traces to system memory. + This provides the means to route this data over USB, + using DvC-Trace. + + Say Y here to enable DvC-Trace output device for Intel TH. + +config INTEL_TH_MSU_DVC_DEBUG + tristate "Intel Trace Hub Memory Storage Unit to USB-dvc debug" + depends on INTEL_TH_MSU_DVC + help + Memory Storage Unit (MSU) trace output device enables + storing STP traces to system memory. + This enables extensive logging and collection of + statistical data on MSU/DvC-Trace device performance. + + Say Y to enable extended debug features on MSU-DvC. + config INTEL_TH_PTI tristate "Intel(R) Trace Hub PTI output" help @@ -70,4 +92,15 @@ config INTEL_TH_DEBUG help Say Y here to enable debugging. +config INTEL_TH_EARLY_PRINTK + bool "Intel TH early printk console" + depends on INTEL_TH=y + default n + ---help--- + Enables early printk console. + When the early printk console is enabled in the kernel + command line, kernel log messages are sent to Intel TH + (hence they are aggregated with the other trace messages + from the platform). + endif diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile index 880c9b5e8566..f33d016f3d80 100644 --- a/drivers/hwtracing/intel_th/Makefile +++ b/drivers/hwtracing/intel_th/Makefile @@ -15,5 +15,12 @@ intel_th_sth-y := sth.o obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu.o intel_th_msu-y := msu.o +obj-$(CONFIG_INTEL_TH_MSU_DVC) += intel_th_msu_dvc.o +intel_th_msu_dvc-y := msu-dvc.o +subdir-ccflags-$(CONFIG_INTEL_TH_MSU_DVC_DEBUG) += -DMDD_DEBUG + obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o intel_th_pti-y := pti.o + +obj-$(CONFIG_INTEL_TH_EARLY_PRINTK) += intel_th_early_printk.o +intel_th_early_printk-y := early_printk.o diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 1a023e30488c..d6a62b4b765e 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -222,48 +222,75 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(port); -static int intel_th_output_activate(struct intel_th_device *thdev) +/** + * intel_th_output_activate() - call output initialization procedure + * @output: output to activate + */ +int intel_th_output_activate(struct intel_th_output *output) { - struct intel_th_driver *thdrv = - to_intel_th_driver_or_null(thdev->dev.driver); - struct intel_th *th = to_intel_th(thdev); - int ret = 0; - - if (!thdrv) - return -ENODEV; + struct intel_th_device *outdev = + container_of(output, struct intel_th_device, output); + struct intel_th_driver *outdrv = + to_intel_th_driver(outdev->dev.driver); - if (!try_module_get(thdrv->driver.owner)) - return -ENODEV; + if (WARN_ON_ONCE(outdev->type != INTEL_TH_OUTPUT)) + return -EINVAL; - pm_runtime_get_sync(&thdev->dev); + if (outdrv->activate) + return outdrv->activate(outdev); - if (th->activate) - ret = th->activate(th); - if (ret) - goto fail_put; + return 0; +} +EXPORT_SYMBOL_GPL(intel_th_output_activate); - if (thdrv->activate) - ret = thdrv->activate(thdev); - else - intel_th_trace_enable(thdev); +/** + * intel_th_first_trace() - notification callback for first trace + * + * Notify each child device that the first capture is about to begin. + * This gives a chance to save the current data as the Trace Hub may have + * already been configured by the BIOS to trace to a given output. + * + * @dev: output device to notify + * @data: private data - unused + */ +static int intel_th_first_trace(struct device *dev, void *data) +{ + struct intel_th_device *thdev = + container_of(dev, struct intel_th_device, dev); + struct intel_th_driver *thdrv = + to_intel_th_driver(thdev->dev.driver); - if (ret) - goto fail_deactivate; + if (thdrv && thdrv->first_trace) + thdrv->first_trace(thdev); return 0; +} -fail_deactivate: - if (th->deactivate) - th->deactivate(th); +/** + * intel_th_start_trace() - start tracing to an output device + * @thdev: output device that requests tracing + */ +static int intel_th_start_trace(struct intel_th_device *thdev) +{ + struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); + struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + static atomic_t first = { .counter = 1, }; -fail_put: - pm_runtime_put(&thdev->dev); - module_put(thdrv->driver.owner); + if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH)) + return -EINVAL; - return ret; + if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT)) + return -EINVAL; + + if (atomic_dec_if_positive(&first) == 0) + device_for_each_child(&hub->dev, NULL, intel_th_first_trace); + + /* The hub has control over Intel Trace Hub. + * Let the hub start a trace if possible and activate the output. */ + return hubdrv->enable(hub, &thdev->output); } -static void intel_th_output_deactivate(struct intel_th_device *thdev) +static void intel_th_stop_trace(struct intel_th_device *thdev) { struct intel_th_driver *thdrv = to_intel_th_driver_or_null(thdev->dev.driver); @@ -305,9 +332,9 @@ static ssize_t active_store(struct device *dev, struct device_attribute *attr, if (!!val != thdev->output.active) { if (val) - ret = intel_th_output_activate(thdev); + ret = intel_th_start_trace(thdev); else - intel_th_output_deactivate(thdev); + intel_th_stop_trace(thdev); } return ret ? ret : size; @@ -376,6 +403,7 @@ intel_th_device_alloc(struct intel_th *th, unsigned int type, const char *name, thdev->id = id; thdev->type = type; + thdev->th = th; strcpy(thdev->name, name); device_initialize(&thdev->dev); @@ -807,10 +835,11 @@ static const struct file_operations intel_th_output_fops = { * @devres: parent's resources * @ndevres: number of resources * @irq: irq number + * @reset: parent's reset function */ struct intel_th * intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, - struct resource *devres, unsigned int ndevres, int irq) + struct resource *devres, unsigned int ndevres, int irq, void (*reset)(struct intel_th *th)) { struct intel_th *th; int err; @@ -837,6 +866,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, th->resource = devres; th->num_resources = ndevres; th->irq = irq; + th->reset = reset; dev_set_drvdata(dev, th); @@ -891,10 +921,26 @@ void intel_th_free(struct intel_th *th) EXPORT_SYMBOL_GPL(intel_th_free); /** - * intel_th_trace_enable() - enable tracing for an output device - * @thdev: output device that requests tracing be enabled + * intel_th_reset() - reset hardware registers + * @hub: hub requesting the reset + */ +void intel_th_reset(struct intel_th_device *hub) +{ + struct intel_th *th = hub->th; + + if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH)) + return; + + if (th->reset) + th->reset(th); +} +EXPORT_SYMBOL_GPL(intel_th_reset); + +/** + * intel_th_trace_switch() - execute a switch sequence + * @thdev: output device that requests tracing switch */ -int intel_th_trace_enable(struct intel_th_device *thdev) +int intel_th_trace_switch(struct intel_th_device *thdev) { struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); @@ -905,12 +951,11 @@ int intel_th_trace_enable(struct intel_th_device *thdev) if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT)) return -EINVAL; - pm_runtime_get_sync(&thdev->dev); - hubdrv->enable(hub, &thdev->output); + hubdrv->trig_switch(hub, &thdev->output); return 0; } -EXPORT_SYMBOL_GPL(intel_th_trace_enable); +EXPORT_SYMBOL_GPL(intel_th_trace_switch); /** * intel_th_trace_disable() - disable tracing for an output device diff --git a/drivers/hwtracing/intel_th/early_printk.c b/drivers/hwtracing/intel_th/early_printk.c new file mode 100644 index 000000000000..bbcb9f89161d --- /dev/null +++ b/drivers/hwtracing/intel_th/early_printk.c @@ -0,0 +1,98 @@ +#include +#include +#include +#include "sth.h" + +static unsigned long sth_phys_addr; + +void early_intel_th_init(const char *s) +{ + size_t n; + unsigned long addr, chan; + char buf[32] = {0, }; + char *match, *next; + + /* Expect ,0x:[,keep] */ + if (*s == ',') + ++s; + if (strncmp(s, "0x", 2)) + goto fail; + + n = strcspn(s, ","); + if (n > sizeof(buf) - 1) + goto fail; + strncpy(buf, s, n); + next = buf; + + /* Get sw_bar */ + match = strsep(&next, ":"); + if (!match) + goto fail; + + if (kstrtoul(match, 16, &addr)) + goto fail; + + /* Get channel */ + if (kstrtoul(next, 0, &chan)) + goto fail; + + sth_phys_addr = addr + chan * sizeof(struct intel_th_channel); + return; + +fail: + pr_err("%s invalid parameter %s", __func__, s); +} + +static void intel_th_early_write(struct console *con, const char *buf, + unsigned len) +{ + struct intel_th_channel *channel; + const u8 *p = buf; + const u32 sven_header = 0x01000242; + + if (WARN_ON_ONCE(!sth_phys_addr)) + return; + + /* Software can send messages to Intel TH by writing to an MMIO space + * that is divided in several Master/Channel regions. + * Write directly to the address provided through the cmdline. + */ + set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, sth_phys_addr); + channel = (struct intel_th_channel *) + (__fix_to_virt(FIX_EARLYCON_MEM_BASE) + + (sth_phys_addr & (PAGE_SIZE - 1))); + + /* Add hardcoded SVEN header + * type: DEBUG_STRING + * severity: SVEN_SEVERITY_NORMAL + * length: payload size + * subtype: SVEN_DEBUGSTR_Generic + */ + iowrite32(sven_header, &channel->DnTS); + iowrite16(len, &channel->Dn); + + while (len) { + if (len >= 4) { + iowrite32(*(u32 *)p, &channel->Dn); + p += 4; + len -= 4; + } else if (len >= 2) { + iowrite16(*(u16 *)p, &channel->Dn); + p += 2; + len -= 2; + } else { + iowrite8(*(u8 *)p, &channel->Dn); + p += 1; + len -= 1; + } + } + + iowrite32(0, &channel->FLAG); +} + +struct console intel_th_early_console = { + .name = "earlyintelth", + .write = intel_th_early_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index 018678ec3c13..571f1b6956ef 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -35,14 +35,16 @@ struct gth_device; * @output: link to output device's output descriptor * @index: output port number * @port_type: one of GTH_* port type values - * @master: bitmap of masters configured for this output + * @config: output configuration backup + * @smcfreq: maintenance packet frequency backup */ struct gth_output { struct gth_device *gth; struct intel_th_output *output; unsigned int index; unsigned int port_type; - DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1); + u32 config; + u32 smcfreq; }; /** @@ -73,6 +75,8 @@ static void gth_output_set(struct gth_device *gth, int port, u32 val; int shift = (port & 3) * 8; + gth->output[port].config = config; + val = ioread32(gth->base + reg); val &= ~(0xff << shift); val |= config << shift; @@ -99,6 +103,8 @@ static void gth_smcfreq_set(struct gth_device *gth, int port, int shift = (port & 1) * 16; u32 val; + gth->output[port].smcfreq = freq; + val = ioread32(gth->base + reg); val &= ~(0xffff << shift); val |= freq << shift; @@ -147,6 +153,24 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port) iowrite32(val, gth->base + reg); } +static int gth_master_get(struct gth_device *gth, unsigned int master) +{ + unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u); + unsigned int shift = (master & 0x7) * 4; + u32 val; + + if (master >= 256) { + reg = REG_GTH_GSWTDEST; + shift = 0; + } + + val = ioread32(gth->base + reg); + val &= (0xf << shift); + val >>= shift; + + return val ? val & 0x7 : -1; +} + static ssize_t master_attr_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -190,14 +214,7 @@ static ssize_t master_attr_store(struct device *dev, old_port = gth->master[ma->master]; if (old_port >= 0) { gth->master[ma->master] = -1; - clear_bit(ma->master, gth->output[old_port].master); - - /* - * if the port is active, program this setting, - * implies that runtime PM is on - */ - if (gth->output[old_port].output->active) - gth_master_set(gth, ma->master, -1); + gth_master_set(gth, ma->master, -1); } /* connect to the new output port, if any */ @@ -208,11 +225,8 @@ static ssize_t master_attr_store(struct device *dev, goto unlock; } - set_bit(ma->master, gth->output[port].master); - - /* if the port is active, program this setting, see above */ - if (gth->output[port].output->active) - gth_master_set(gth, ma->master, port); + gth_master_set(gth, ma->master, port); + gth->master[ma->master] = port; } gth->master[ma->master] = port; @@ -280,45 +294,6 @@ gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm) return config; } -/* - * Reset outputs and sources - */ -static int intel_th_gth_reset(struct gth_device *gth) -{ - u32 reg; - int port, i; - - reg = ioread32(gth->base + REG_GTH_SCRPD0); - if (reg & SCRPD_DEBUGGER_IN_USE) - return -EBUSY; - - /* Always save/restore STH and TU registers in S0ix entry/exit */ - reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED; - iowrite32(reg, gth->base + REG_GTH_SCRPD0); - - /* output ports */ - for (port = 0; port < 8; port++) { - if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) == - GTH_NONE) - continue; - - gth_output_set(gth, port, 0); - gth_smcfreq_set(gth, port, 16); - } - /* disable overrides */ - iowrite32(0, gth->base + REG_GTH_DESTOVR); - - /* masters swdest_0~31 and gswdest */ - for (i = 0; i < 33; i++) - iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4); - - /* sources */ - iowrite32(0, gth->base + REG_GTH_SCR); - iowrite32(0xfc, gth->base + REG_GTH_SCR2); - - return 0; -} - /* * "outputs" attribute group */ @@ -464,6 +439,66 @@ static int intel_th_output_attributes(struct gth_device *gth) return sysfs_create_group(>h->dev->kobj, >h->output_group); } +/** + * intel_th_gth_stop() - stop tracing to an output device + * @gth: GTH device + * @output: output device's descriptor + * @capture_done: set when no more traces will be captured + * + * This will stop tracing using force storeEn off signal and wait for the + * pipelines to be empty for the corresponding output port. + */ +static void intel_th_gth_stop(struct gth_device *gth, + struct intel_th_output *output, + bool capture_done) +{ + struct intel_th_device *outdev = + container_of(output, struct intel_th_device, output); + unsigned long count; + u32 reg; + u32 scr2 = 0xfc | (capture_done ? 1 : 0); + + iowrite32(0, gth->base + REG_GTH_SCR); + iowrite32(scr2, gth->base + REG_GTH_SCR2); + + /* wait on pipeline empty for the given port */ + for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH; + count && !(reg & BIT(output->port)); count--) { + reg = ioread32(gth->base + REG_GTH_STAT); + cpu_relax(); + } + + if (!count) + dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n", + output->port); + + /* wait on output piepline empty */ + if (output->wait_empty) + output->wait_empty(outdev); + + /* clear force capture done for next captures */ + iowrite32(0xfc, gth->base + REG_GTH_SCR2); +} + +/** + * intel_th_gth_start() - start tracing to an output device + * @gth: GTH device + * @output: output device's descriptor + * + * This will start tracing using force storeEn signal. + */ +static void intel_th_gth_start(struct gth_device *gth, + struct intel_th_output *output) +{ + u32 scr = 0xfc0000; + + if (output->multiblock) + scr |= 0xff; + + iowrite32(scr, gth->base + REG_GTH_SCR); + iowrite32(0, gth->base + REG_GTH_SCR2); +} + /** * intel_th_gth_disable() - disable tracing to an output device * @thdev: GTH device @@ -477,48 +512,49 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, struct intel_th_output *output) { struct gth_device *gth = dev_get_drvdata(&thdev->dev); - unsigned long count; - int master; + int i; u32 reg; spin_lock(>h->gth_lock); output->active = false; - for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS) { - gth_master_set(gth, master, -1); - } - spin_unlock(>h->gth_lock); - - iowrite32(0, gth->base + REG_GTH_SCR); - iowrite32(0xfd, gth->base + REG_GTH_SCR2); - - /* wait on pipeline empty for the given port */ - for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH; - count && !(reg & BIT(output->port)); count--) { - reg = ioread32(gth->base + REG_GTH_STAT); - cpu_relax(); - } + for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) + if (gth->master[i] == output->port) + gth_master_set(gth, i, -1); - /* clear force capture done for next captures */ - iowrite32(0xfc, gth->base + REG_GTH_SCR2); + spin_unlock(>h->gth_lock); - if (!count) - dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n", - output->port); + intel_th_gth_stop(gth, output, true); reg = ioread32(gth->base + REG_GTH_SCRPD0); reg &= ~output->scratchpad; iowrite32(reg, gth->base + REG_GTH_SCRPD0); + + /* Workaround for PTI pipeline empty not set by hardware */ + if (output->type == GTH_PTI && + !(BIT(output->port) & ioread32(gth->base + REG_GTH_STAT))) + intel_th_reset(thdev); } -static void gth_tscu_resync(struct gth_device *gth) +/* + * Set default configuration. + */ +static void intel_th_gth_reset(struct gth_device *gth) { u32 reg; - reg = ioread32(gth->base + REG_TSCU_TSUCTRL); - reg &= ~TSUCTRL_CTCRESYNC; - iowrite32(reg, gth->base + REG_TSCU_TSUCTRL); + /* Always save/restore STH and TU registers in S0ix entry/exit */ + reg = ioread32(gth->base + REG_GTH_SCRPD0); + reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED; + iowrite32(reg, gth->base + REG_GTH_SCRPD0); + + /* Force sources off */ + iowrite32(0, gth->base + REG_GTH_SCR); + iowrite32(0xfc, gth->base + REG_GTH_SCR2); + + /* Setup CTS for single trigger */ + iowrite32(0x80000000, gth->base + REG_CTS_C0S0_EN); + iowrite32(0x40000010, gth->base + REG_CTS_C0S0_ACT); } /** @@ -529,35 +565,91 @@ static void gth_tscu_resync(struct gth_device *gth) * This will configure all masters set to output to this device and * enable tracing using force storeEn signal. */ -static void intel_th_gth_enable(struct intel_th_device *thdev, - struct intel_th_output *output) +static int intel_th_gth_enable(struct intel_th_device *thdev, + struct intel_th_output *output) { struct gth_device *gth = dev_get_drvdata(&thdev->dev); - struct intel_th *th = to_intel_th(thdev); - u32 scr = 0xfc0000, scrpd; - int master; + u32 scrpd; + int i; + int ret = -EBUSY; + + /* No operation allowed while a debugger is connected */ + scrpd = ioread32(gth->base + REG_GTH_SCRPD0); + if (scrpd & SCRPD_DEBUGGER_IN_USE) + return ret; spin_lock(>h->gth_lock); - for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS + 1) { - gth_master_set(gth, master, output->port); + + /* Only allow one output active at a time */ + for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) { + if (gth->output[i].output && + gth->output[i].output->active) { + spin_unlock(>h->gth_lock); + return ret; + } } - if (output->multiblock) - scr |= 0xff; + intel_th_reset(thdev); + intel_th_gth_reset(gth); + + /* Re-configure output */ + gth_output_set(gth, output->port, gth->output[output->port].config); + gth_smcfreq_set(gth, output->port, gth->output[output->port].smcfreq); + + /* Enable masters for the output, disable others */ + for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) + gth_master_set(gth, i, gth->master[i] == output->port ? + output->port : -1); output->active = true; spin_unlock(>h->gth_lock); - if (INTEL_TH_CAP(th, tscu_enable)) - gth_tscu_resync(gth); + /* Setup the output */ + ret = intel_th_output_activate(output); + if (ret) + return ret; scrpd = ioread32(gth->base + REG_GTH_SCRPD0); scrpd |= output->scratchpad; iowrite32(scrpd, gth->base + REG_GTH_SCRPD0); - iowrite32(scr, gth->base + REG_GTH_SCR); - iowrite32(0, gth->base + REG_GTH_SCR2); + /* Enable sources */ + intel_th_gth_start(gth, output); + + return 0; +} + +/** + * intel_th_gth_switch() - execute a switch sequence + * @thdev: GTH device + * @output: output device's descriptor + * + * This will execute a switch sequence that will trigger a switch window + * when tracing to MSC in multi-block mode. + */ +static void intel_th_gth_switch(struct intel_th_device *thdev, + struct intel_th_output *output) +{ + struct gth_device *gth = dev_get_drvdata(&thdev->dev); + unsigned long count, flags; + u32 reg; + + /* trigger */ + iowrite32(0, gth->base + REG_CTS_CTL); + iowrite32(1, gth->base + REG_CTS_CTL); + /* wait on trigger status */ + for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH; + count && !(reg & BIT(4)); count--) { + reg = ioread32(gth->base + REG_CTS_STAT); + cpu_relax(); + } + if (!count) + dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n"); + + local_irq_save(flags); + intel_th_gth_stop(gth, output, false); + intel_th_gth_start(gth, output); + local_irq_restore(flags); } /** @@ -574,9 +666,16 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, static int intel_th_gth_assign(struct intel_th_device *thdev, struct intel_th_device *othdev) { - struct gth_device *gth = dev_get_drvdata(&thdev->dev); + struct gth_device *gth; int i, id; + if(thdev == NULL || othdev == NULL) + return -EINVAL; + + gth = dev_get_drvdata(&thdev->dev); + if(gth == NULL) + return -EINVAL; + if (thdev->host_mode) return -EBUSY; @@ -640,10 +739,9 @@ intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master) master = TH_CONFIGURABLE_MASTERS; spin_lock(>h->gth_lock); - if (gth->master[master] == -1) { - set_bit(master, gth->output[port].master); + if (gth->master[master] == -1) gth->master[master] = port; - } + spin_unlock(>h->gth_lock); return 0; @@ -674,29 +772,19 @@ static int intel_th_gth_probe(struct intel_th_device *thdev) gth->base = base; spin_lock_init(>h->gth_lock); - dev_set_drvdata(dev, gth); - - /* - * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE - * bit. Either way, don't reset HW in this case, and don't export any - * capture configuration attributes. Also, refuse to assign output - * drivers to ports, see intel_th_gth_assign(). - */ - if (thdev->host_mode) - return 0; + dev_set_drvdata(dev, gth); - ret = intel_th_gth_reset(gth); - if (ret) { - if (ret != -EBUSY) - return ret; - - thdev->host_mode = true; - - return 0; - } + /* + * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE + * bit. Either way, don't reset HW in this case, and don't export any + * capture configuration attributes. Also, refuse to assign output + * drivers to ports, see intel_th_gth_assign(). + */ + if (thdev->host_mode) + return 0; for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) - gth->master[i] = -1; + gth->master[i] = gth_master_get(gth, i); for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) { gth->output[i].gth = gth; @@ -739,6 +827,7 @@ static struct intel_th_driver intel_th_gth_driver = { .unassign = intel_th_gth_unassign, .set_output = intel_th_gth_set_output, .enable = intel_th_gth_enable, + .trig_switch = intel_th_gth_switch, .disable = intel_th_gth_disable, .driver = { .name = "gth", diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h index f3d234251a12..cdfd5588a62a 100644 --- a/drivers/hwtracing/intel_th/gth.h +++ b/drivers/hwtracing/intel_th/gth.h @@ -57,6 +57,11 @@ enum { REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */ REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */ + /* Common Capture Sequencer (CTS) registers */ + REG_CTS_C0S0_EN = 0x30c0, /* clause_event_enable_c0s0 */ + REG_CTS_C0S0_ACT = 0x3180, /* clause_action_control_c0s0 */ + REG_CTS_STAT = 0x32a0, /* cts_status */ + REG_CTS_CTL = 0x32a4, /* cts_control */ }; /* waiting for Pipeline Empty bit(s) to assert for GTH */ @@ -64,5 +69,7 @@ enum { #define TSUCTRL_CTCRESYNC BIT(0) #define TSCUSTAT_CTCSYNCING BIT(1) +/* waiting for Trigger status to assert for CTS */ +#define CTS_TRIG_WAITLOOP_DEPTH 10000 #endif /* __INTEL_TH_GTH_H__ */ diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 99ad563fc40d..d503fb8cc06d 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -26,6 +26,8 @@ enum { INTEL_TH_SWITCH, }; +struct intel_th_device; + /** * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices * @port: output port number, assigned by the switch @@ -33,6 +35,7 @@ enum { * @scratchpad: scratchpad bits to flag when this output is enabled * @multiblock: true for multiblock output configuration * @active: true when this output is enabled + * @wait_empty: wait for device pipeline to be empty * * Output port descriptor, used by switch driver to tell which output * port this output device corresponds to. Filled in at output device's @@ -45,6 +48,7 @@ struct intel_th_output { unsigned int scratchpad; bool multiblock; bool active; + void (*wait_empty)(struct intel_th_device *); }; /** @@ -61,6 +65,7 @@ struct intel_th_drvdata { * struct intel_th_device - device on the intel_th bus * @dev: device * @drvdata: hardware capabilities/quirks + * @th: core device * @resource: array of resources available to this device * @num_resources: number of resources in @resource array * @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH} @@ -70,12 +75,13 @@ struct intel_th_drvdata { * @name: device name to match the driver */ struct intel_th_device { - struct device dev; + struct device dev; struct intel_th_drvdata *drvdata; - struct resource *resource; - unsigned int num_resources; - unsigned int type; - int id; + struct intel_th *th; + struct resource *resource; + unsigned int num_resources; + unsigned int type; + int id; /* INTEL_TH_SWITCH specific */ bool host_mode; @@ -154,6 +160,7 @@ intel_th_output_assigned(struct intel_th_device *thdev) */ struct intel_th_driver { struct device_driver driver; + void (*first_trace)(struct intel_th_device *thdev); int (*probe)(struct intel_th_device *thdev); void (*remove)(struct intel_th_device *thdev); /* switch (GTH) ops */ @@ -161,8 +168,10 @@ struct intel_th_driver { struct intel_th_device *othdev); void (*unassign)(struct intel_th_device *thdev, struct intel_th_device *othdev); - void (*enable)(struct intel_th_device *thdev, + int (*enable)(struct intel_th_device *thdev, struct intel_th_output *output); + void (*trig_switch)(struct intel_th_device *thdev, + struct intel_th_output *output); void (*disable)(struct intel_th_device *thdev, struct intel_th_output *output); /* output ops */ @@ -219,13 +228,15 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev) struct intel_th * intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, - struct resource *devres, unsigned int ndevres, int irq); + struct resource *devres, unsigned int ndevres, int irq, void (*reset)(struct intel_th *th)); void intel_th_free(struct intel_th *th); int intel_th_driver_register(struct intel_th_driver *thdrv); void intel_th_driver_unregister(struct intel_th_driver *thdrv); -int intel_th_trace_enable(struct intel_th_device *thdev); +int intel_th_output_activate(struct intel_th_output *output); +void intel_th_reset(struct intel_th_device *hub); +int intel_th_trace_switch(struct intel_th_device *thdev); int intel_th_trace_disable(struct intel_th_device *thdev); int intel_th_set_output(struct intel_th_device *thdev, unsigned int master); @@ -252,6 +263,7 @@ enum { * @num_thdevs: number of devices in the @thdev array * @num_resources: number or resources in the @resource array * @irq: irq number + * @reset: reset function of the core device * @id: this Intel TH controller's device ID in the system * @major: device node major for output devices */ @@ -269,6 +281,8 @@ struct intel_th { unsigned int num_resources; int irq; + void (*reset)(struct intel_th *th); + int id; int major; #ifdef CONFIG_MODULES @@ -296,11 +310,11 @@ to_intel_th_hub(struct intel_th_device *thdev) enum { /* Global Trace Hub (GTH) */ REG_GTH_OFFSET = 0x0000, - REG_GTH_LENGTH = 0x2000, + REG_GTH_LENGTH = 0x4000, /* Timestamp counter unit (TSCU) */ REG_TSCU_OFFSET = 0x2000, - REG_TSCU_LENGTH = 0x1000, + REG_TSCU_LENGTH = 0x2000, /* Software Trace Hub (STH) [0x4000..0x4fff] */ REG_STH_OFFSET = 0x4000, diff --git a/drivers/hwtracing/intel_th/msu-dvc.c b/drivers/hwtracing/intel_th/msu-dvc.c new file mode 100755 index 000000000000..91693c0f8175 --- /dev/null +++ b/drivers/hwtracing/intel_th/msu-dvc.c @@ -0,0 +1,1184 @@ +/* + * Intel Trace Hub to USB dvc-trace driver + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msu.h" + +#ifdef MDD_DEBUG +#define MDD_F_DEBUG() pr_debug("\n") +#else +#define MDD_F_DEBUG() do {} while (0) +#endif + +#define DTC_DRV_NAME "dvcith" + +#define MDD_MIN_TRANSFER_DEF 2048 +#define MDD_RETRY_TIMEOUT_DEF 2 +#define MDD_MAX_RETRY_CNT_DEF 150 + +/* The DWC3 gadget is able to handle a maximum of 32 TRBs per-ep (an sg based + * request counts as the number of sg-s). + * This should be updated in case some other UDC has a lower threshold. */ +#define MDD_MAX_TRB_CNT 32 + +#define mdd_err(mdd, ...) dev_err(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_warn(mdd, ...) dev_warn(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_info(mdd, ...) dev_info(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_debug(mdd, ...) dev_debug(&(mdd)->ddev.device, ## __VA_ARGS__) + +#ifdef MDD_DEBUG +struct msu_dvc_stats { + unsigned long work_start; + unsigned long work_end; + + unsigned long loop_count; + unsigned long hits; + + u64 full_block_size; + u64 valid_block_size; + u64 valid_data_size; + + u32 transfer_type:2; + u32 process_type:2; + + enum usb_device_speed speed; +}; +#endif + +enum { + MDD_TRANSFER_NO_CHANGE, + MDD_TRANSFER_AUTO, + MDD_TRANSFER_MIN = MDD_TRANSFER_AUTO, + MDD_TRANSFER_SINGLE, + MDD_TRANSFER_MULTI, + MDD_TRANSFER_MAX = MDD_TRANSFER_MULTI, +}; + +static const char *const transfer_type_name[] = { + [MDD_TRANSFER_AUTO] = "Auto", + [MDD_TRANSFER_SINGLE] = "Single", + [MDD_TRANSFER_MULTI] = "Multi", +}; + +enum { + MDD_PROC_NO_CHANGE, + MDD_PROC_NONE, + MDD_PROC_MIN = MDD_PROC_NONE, + MDD_PROC_REM_TAIL, + MDD_PROC_REM_ALL, + MDD_PROC_MAX = MDD_PROC_REM_ALL, +}; + +static const char *const process_type_name[] = { + [MDD_PROC_NONE] = "Full-Blocks", + [MDD_PROC_REM_TAIL] = "Trimmed-Blocks", + [MDD_PROC_REM_ALL] = "STP", +}; + +struct mdd_transfer_data { + u8 *buffer; + u8 *buffer_sg; + size_t buffer_sg_len; + dma_addr_t buffer_dma; + size_t buffer_len; + struct scatterlist *sg_raw; + struct scatterlist *sg_proc; + struct scatterlist *sg_trans; /* not be allocated */ + size_t block_count; + size_t block_size; + spinlock_t lock; +}; + +#define mdd_lock_transfer(mdd) spin_lock(&mdd->tdata.lock) +#define mdd_unlock_transfer(mdd) spin_unlock(&mdd->tdata.lock) + +struct msu_dvc_dev { + struct dvct_source_device ddev; + atomic_t *dtc_status; + struct usb_ep *ep; + struct usb_function *func; + enum usb_device_speed speed; + struct intel_th_device *th_dev; + + struct workqueue_struct *wrq; + struct work_struct work; + struct usb_request *req; + wait_queue_head_t wq; + atomic_t req_ongoing; + + /*attributes */ + u32 retry_timeout; + u32 max_retry_count; + u32 transfer_type:2; + u32 process_type:2; + u32 min_transfer; + +#ifdef MDD_DEBUG + struct msu_dvc_stats stats; +#endif + struct mdd_transfer_data tdata; + + struct list_head mdd_list; +}; + +static LIST_HEAD(mdd_devs); +static DEFINE_SPINLOCK(mdd_devs_lock); + +static inline struct usb_gadget *mdd_gadget(struct msu_dvc_dev *mdd) +{ + BUG_ON(!mdd->func); + BUG_ON(!mdd->func->config); + BUG_ON(!mdd->func->config->cdev); + BUG_ON(!mdd->func->config->cdev->gadget); + return mdd->func->config->cdev->gadget; +} + +/* Back-cast to msu_dvc_dev */ +static inline struct msu_dvc_dev *dtc_to_mdd(struct dvct_source_device *p_dtc) +{ + return container_of(p_dtc, struct msu_dvc_dev, ddev); +} + +static ssize_t mdd_min_transfer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->min_transfer); +} + +static ssize_t mdd_min_transfer_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + /* 48 represents the size of sync frames that are generated by + * a window switch, from this point on we have "real data" + * Going under this value could result in unneeded switching */ + if (!kstrtou32(buf, 10, &mdd->min_transfer)) { + if (mdd->min_transfer < 48) + mdd->min_transfer = 48; + return count; + } + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_min_transfer); + + +static ssize_t mdd_retry_timeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->retry_timeout); +} + +static ssize_t mdd_retry_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + if (!kstrtou32(buf, 10, &mdd->retry_timeout)) + return count; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_retry_timeout); + +static ssize_t mdd_max_retry_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->max_retry_count); +} + +static ssize_t mdd_max_retry_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + if (!kstrtou32(buf, 10, &mdd->max_retry_count)) + return count; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_max_retry); + +static ssize_t mdd_transfer_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%d %s\n", mdd->transfer_type, + transfer_type_name[mdd->transfer_type]); +} + +static ssize_t mdd_transfer_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + u8 tmp; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + if (!kstrtou8(buf, 10, &tmp) && tmp <= MDD_TRANSFER_MAX + && tmp >= MDD_TRANSFER_MIN) { + mdd->transfer_type = tmp; + return count; + } + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_transfer_type); + +static ssize_t mdd_proc_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%d %s\n", mdd->process_type, + process_type_name[mdd->process_type]); +} + +static ssize_t mdd_proc_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + u8 tmp; + + MDD_F_DEBUG(); + + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + if (!kstrtou8(buf, 10, &tmp) && tmp <= MDD_PROC_MAX + && tmp >= MDD_PROC_MIN) { + mdd->process_type = tmp; + return count; + } + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_proc_type); + +#ifdef MDD_DEBUG + +static ssize_t mdd_stats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + int len = 0; + + static const char *const u_speed_names[] = { + [USB_SPEED_UNKNOWN] = "?", + [USB_SPEED_LOW] = "LS", + [USB_SPEED_FULL] = "FS", + [USB_SPEED_HIGH] = "HS", + [USB_SPEED_WIRELESS] = "WR", + [USB_SPEED_SUPER] = "SS", + }; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + len += snprintf(buf + len, PAGE_SIZE - len, "R.count\tR.hits\t"); + + len += snprintf(buf + len, PAGE_SIZE - len, "T.tot_j\tT.tot_ms\t"); + len += + snprintf(buf + len, PAGE_SIZE - len, "D.total\tD.block\tT.stp\t"); + len += snprintf(buf + len, PAGE_SIZE - len, "Tr.type\tProc.type\t"); + + len += snprintf(buf + len, PAGE_SIZE - len, "USB.speed\n"); + + /* Actual values starts here */ + len += + snprintf(buf + len, PAGE_SIZE - len, "%lu\t%lu\t", + mdd->stats.loop_count, mdd->stats.hits); + + len += + snprintf(buf + len, PAGE_SIZE - len, "%lu\t%u\t", + (mdd->stats.work_end - mdd->stats.work_start), + jiffies_to_msecs(mdd->stats.work_end - + mdd->stats.work_start)); + len += + snprintf(buf + len, PAGE_SIZE - len, "%llu\t%llu\t%llu\t", + mdd->stats.full_block_size, mdd->stats.valid_block_size, + mdd->stats.valid_data_size); + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\t", + transfer_type_name[mdd->transfer_type]); + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\t", + process_type_name[mdd->process_type]); + + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\n", + u_speed_names[mdd->stats.speed]); + + return len; +} + +static DEVICE_ATTR_RO(mdd_stats); + +static void init_stats_start(struct msu_dvc_dev *mdd) +{ + mdd->stats.loop_count = 0; + mdd->stats.hits = 0; + + mdd->stats.work_start = jiffies; + + mdd->stats.full_block_size = 0; + mdd->stats.valid_block_size = 0; + mdd->stats.valid_data_size = 0; + + mdd->stats.process_type = mdd->process_type; + mdd->stats.transfer_type = mdd->transfer_type; + mdd->stats.speed = mdd->speed; +} + +#define stats_loop(mdd) ((mdd)->stats.loop_count++) +#define stats_hit(mdd) ((mdd)->stats.hits++) +#else +#define init_stats_start(n) do {} while (0) +#define stats_loop(mdd) do {} while (0) +#define stats_hit(mdd) do {} while (0) +#endif + +static void mdd_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msu_dvc_dev *mdd = (struct msu_dvc_dev *)req->context; + + mdd_lock_transfer(mdd); + + if (req->status != 0) { + mdd_err(mdd, "Usb request error %d\n", req->status); + dvct_clr_status(mdd->dtc_status, DVCT_MASK_TRANS); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + } + atomic_set(&mdd->req_ongoing, 0); + wake_up(&mdd->wq); + mdd_unlock_transfer(mdd); +} + +static int mdd_setup_transfer_data(struct msu_dvc_dev *mdd) +{ + int ret = -EINVAL; + + MDD_F_DEBUG(); + + if (!mdd->ep || !mdd->req) { + mdd_err(mdd, "Invalid endpoint data\n"); + goto err; + } + + mdd->tdata.block_count = msc_max_blocks(mdd->th_dev); + if (mdd->tdata.block_count == 0) { + mdd_err(mdd, "Invalid block count %zu\n", + mdd->tdata.block_count); + goto err; + } + + mdd->tdata.block_size = msc_block_max_size(mdd->th_dev); + if (mdd->tdata.block_size == 0) { + mdd_err(mdd, "Invalid block size %zu\n", mdd->tdata.block_size); + goto err; + } + + mdd->tdata.sg_raw = kmalloc_array(mdd->tdata.block_count, + sizeof(*mdd->tdata.sg_raw), + GFP_KERNEL); + if (!mdd->tdata.sg_raw) { + mdd_err(mdd, "Cannot allocate sg memory %zu\n", + mdd->tdata.block_size); + goto err_sg_raw; + } + + if (mdd->process_type != MDD_PROC_NONE) { + mdd->tdata.sg_proc = kmalloc_array(mdd->tdata.block_count, + sizeof(*mdd->tdata.sg_proc), + GFP_KERNEL); + if (!mdd->tdata.sg_proc) { + mdd_err(mdd, "Cannot allocate sg memory %zu\n", + mdd->tdata.block_size); + goto err_sg_proc; + } + mdd->tdata.sg_trans = mdd->tdata.sg_proc; + } else { + mdd->tdata.sg_trans = mdd->tdata.sg_raw; + } + + if (mdd->transfer_type == MDD_TRANSFER_SINGLE) { + mdd->tdata.buffer_len = + mdd->tdata.block_count * mdd->tdata.block_size; + mdd->tdata.buffer = + dma_alloc_coherent(&(mdd_gadget(mdd)->dev), + mdd->tdata.buffer_len, + &mdd->tdata.buffer_dma, GFP_KERNEL); + if (!mdd->tdata.buffer) { + mdd_err(mdd, "Cannot allocate DMA memory\n"); + goto err_l_buf; + } + } else { + mdd->tdata.buffer_sg_len = + mdd->tdata.block_count * mdd->tdata.block_size; + mdd->tdata.buffer_sg = kmalloc(mdd->tdata.buffer_sg_len, GFP_KERNEL); + if(mdd->tdata.buffer_sg == NULL) + mdd->tdata.buffer_sg_len = 0; + + mdd->tdata.buffer = NULL; + mdd->tdata.buffer_dma = 0; + mdd->tdata.buffer_len = 0; + } + return 0; +err_l_buf: + kfree(mdd->tdata.sg_proc); + mdd->tdata.sg_proc = NULL; +err_sg_proc: + kfree(mdd->tdata.sg_raw); + mdd->tdata.sg_raw = NULL; +err_sg_raw: + ret = -ENOMEM; +err: + return ret; +} + +static void mdd_reset_transfer_data(struct msu_dvc_dev *mdd) +{ + MDD_F_DEBUG(); + kfree(mdd->tdata.sg_proc); + mdd->tdata.sg_proc = NULL; + kfree(mdd->tdata.sg_raw); + mdd->tdata.sg_raw = NULL; + if (mdd->tdata.buffer) { + dma_free_coherent(&(mdd_gadget(mdd)->dev), + mdd->tdata.buffer_len, mdd->tdata.buffer, + mdd->tdata.buffer_dma); + mdd->tdata.buffer = NULL; + mdd->tdata.buffer_dma = 0; + mdd->tdata.buffer_len = 0; + } + + if(mdd->tdata.buffer_sg != NULL) + { + mdd->tdata.buffer_sg_len = 0; + kfree(mdd->tdata.buffer_sg); + } +} + +static unsigned mdd_sg_len(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + unsigned ret = 0; + + /*MDD_F_DEBUG(); */ + for_each_sg(sgl, sg, nents, i) { + ret += sg->length; + } + return ret; +} + +static int mdd_send_sg_buffer(struct msu_dvc_dev *mdd, int nents) +{ + size_t transfer_len; + + /*MDD_F_DEBUG(); */ + mdd_lock_transfer(mdd); + transfer_len = + sg_copy_to_buffer(mdd->tdata.sg_trans, nents, mdd->tdata.buffer_sg, + mdd->tdata.buffer_sg_len); + + if (!transfer_len) { + mdd_err(mdd, "Cannot copy into nonsg memory\n"); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + mdd->req->buf = mdd->tdata.buffer_sg; + mdd->req->length = transfer_len; + mdd->req->dma = 0; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + return 0; +} + +static int mdd_send_sg(struct msu_dvc_dev *mdd, int nents) +{ + struct scatterlist *sgl = mdd->tdata.sg_trans; + + if(mdd->tdata.buffer_sg != NULL) + { + return mdd_send_sg_buffer(mdd, nents); + } + + /*MDD_F_DEBUG(); */ + while (nents) { + int trans_ents; + + mdd_lock_transfer(mdd); + + if (nents > MDD_MAX_TRB_CNT) { + trans_ents = MDD_MAX_TRB_CNT; + sg_mark_end(&sgl[trans_ents - 1]); + } else { + trans_ents = nents; + } + + if (trans_ents == 1) { + mdd->req->buf = sg_virt(sgl); + mdd->req->length = sgl->length; + mdd->req->dma = 0; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + } else { + mdd->req->buf = NULL; + mdd->req->length = mdd_sg_len(sgl, trans_ents); + mdd->req->dma = 0; + mdd->req->sg = sgl; + mdd->req->num_sgs = trans_ents; + } + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + nents -= trans_ents; + sgl += trans_ents; + + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + } + return 0; +} + +static int mdd_send_buffer(struct msu_dvc_dev *mdd, int nents) +{ + size_t transfer_len; + + /*MDD_F_DEBUG(); */ + mdd_lock_transfer(mdd); + transfer_len = + sg_copy_to_buffer(mdd->tdata.sg_trans, nents, mdd->tdata.buffer, + mdd->tdata.buffer_len); + if (!transfer_len) { + mdd_err(mdd, "Cannot copy into nonsg memory\n"); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + mdd->req->buf = mdd->tdata.buffer; + mdd->req->length = transfer_len; + mdd->req->dma = mdd->tdata.buffer_dma; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + return 0; +} + +static int mdd_send_auto(struct msu_dvc_dev *mdd, int nents) +{ + /*MDD_F_DEBUG(); */ + if (!mdd_gadget(mdd)->sg_supported) + return mdd_send_buffer(mdd, nents); + else + return mdd_send_sg(mdd, nents); +} + +static int (*send_funcs[])(struct msu_dvc_dev *, int) = { + [MDD_TRANSFER_AUTO] = mdd_send_auto, + [MDD_TRANSFER_SINGLE] = mdd_send_buffer, + [MDD_TRANSFER_MULTI] = mdd_send_sg, +}; + +#ifdef MDD_DEBUG +static int mdd_proc_add_stats(struct msu_dvc_dev *mdd, int nents) +{ + int i, count; + struct scatterlist *sg; + + /*MDD_F_DEBUG(); */ + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + count = msc_data_sz((struct msc_block_desc *)sg_virt(sg)); + mdd->stats.full_block_size += sg->length; + mdd->stats.valid_block_size += (count + MSC_BDESC); + mdd->stats.valid_data_size += count; + } + + return i; +} +#else +#define mdd_proc_add_stats(m, n) do {} while (0) +#endif + +static int mdd_proc_trimmed_blocks(struct msu_dvc_dev *mdd, int nents) +{ + u8 *ptr; + size_t len; + int i, out_cnt = 0; + struct scatterlist *sg, *sg_dest = NULL; + + /*MDD_F_DEBUG(); */ + mdd_proc_add_stats(mdd, nents); + + sg_init_table(mdd->tdata.sg_proc, nents); + + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + ptr = sg_virt(sg); + len = msc_data_sz((struct msc_block_desc *)ptr); + if (!len) { + mdd_err(mdd, "Zero length block"); + continue; + } + + len += MSC_BDESC; + + if (!sg_dest) + sg_dest = mdd->tdata.sg_proc; + else + sg_dest = sg_next(sg_dest); + sg_set_buf(sg_dest, ptr, len); + out_cnt++; + } + if (sg_dest) + sg_mark_end(sg_dest); + + return out_cnt; +} + +static int mdd_proc_stp_only(struct msu_dvc_dev *mdd, int nents) +{ + u8 *ptr; + size_t len; + int i, out_cnt = 0; + struct scatterlist *sg, *sg_dest = NULL; + + /*MDD_F_DEBUG(); */ + mdd_proc_add_stats(mdd, nents); + + sg_init_table(mdd->tdata.sg_proc, nents); + + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + ptr = sg_virt(sg); + len = msc_data_sz((struct msc_block_desc *)ptr); + ptr += MSC_BDESC; + if (!len) { + mdd_err(mdd, "Zero data length block"); + } else { + if (!sg_dest) + sg_dest = mdd->tdata.sg_proc; + else + sg_dest = sg_next(sg_dest); + sg_set_buf(sg_dest, ptr, len); + out_cnt++; + } + } + if (sg_dest) + sg_mark_end(sg_dest); + + return out_cnt; +} + +static int (*proc_funcs[]) (struct msu_dvc_dev *, int) = { +#ifdef MDD_DEBUG + [MDD_PROC_NONE] = mdd_proc_add_stats, +#endif + [MDD_PROC_REM_TAIL] = mdd_proc_trimmed_blocks, + [MDD_PROC_REM_ALL] = mdd_proc_stp_only, +}; + +static void mdd_work(struct work_struct *work) +{ + int nents, current_bytes, retry_cnt = 0; + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(work, struct msu_dvc_dev, work); + init_stats_start(mdd); + + if (mdd_setup_transfer_data(mdd)) { + mdd_err(mdd, "Cannot setup transfer data\n"); + return; + } + mdd_info(mdd, "Start transfer loop\n"); + while (atomic_read(mdd->dtc_status) == DVCT_MASK_ONLINE_TRANS) { + sg_init_table(mdd->tdata.sg_raw, mdd->tdata.block_count); + /* Maybe will be better if msc_sg_oldest_win changes the window + * if the "oldest" one contains data and is the current one..*/ + + preempt_disable(); + current_bytes = msc_current_win_bytes(mdd->th_dev); + if (current_bytes > mdd->min_transfer || + (current_bytes && retry_cnt >= mdd->max_retry_count)) { + msc_switch_window(mdd->th_dev); + nents = msc_sg_oldest_win(mdd->th_dev, + mdd->tdata.sg_raw); + retry_cnt = 0; + } else { + if (unlikely(current_bytes < 0)) { + mdd_warn(mdd, "Unexpected state (%d), switch", + current_bytes); + msc_switch_window(mdd->th_dev); + } else { + if (retry_cnt < mdd->max_retry_count) + retry_cnt++; + } + nents = 0; + } + preempt_enable(); + stats_loop(mdd); + + if (nents < 0) { + mdd_err(mdd, "Cannot get ith data\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + break; + } + + if (nents && proc_funcs[mdd->process_type]) { + nents = proc_funcs[mdd->process_type] (mdd, nents); + if (nents < 0) { + mdd_err(mdd, "Cannot process data\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + break; + } + } + + if (nents) { + stats_hit(mdd); + if (send_funcs[mdd->transfer_type] (mdd, nents)) + break; + } else { + /*wait for stop or timeout */ + wait_event_timeout(mdd->wq, + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS), + msecs_to_jiffies(mdd-> + retry_timeout)); + } + } + mdd_info(mdd, "End transfer loop\n"); + if (atomic_read(&mdd->req_ongoing)) { + usb_ep_dequeue(mdd->ep, mdd->req); + atomic_set(&mdd->req_ongoing, 0); + } + +#ifdef MDD_DEBUG + mdd->stats.work_end = jiffies; +#endif + mdd_reset_transfer_data(mdd); +} + +static int mdd_activate(struct dvct_source_device *client, atomic_t *status) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + + mdd->dtc_status = status; + + return 0; +} + +static int mdd_binded(struct dvct_source_device *client, struct usb_ep *ep, + struct usb_function *func) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->ep = ep; + mdd->func = func; + + mdd->req = usb_ep_alloc_request(mdd->ep, GFP_KERNEL); + if (!mdd->req) { + mdd_err(mdd, "Cannot allocate usb request\n"); + return -ENOMEM; + } + return 0; +} + +static void mdd_connected(struct dvct_source_device *client, + enum usb_device_speed speed) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->speed = speed; +} + +union mdd_config { + u8 config; + struct { + u8 enable:1; /* one */ + u8 tr_type:2; + u8 proc_type:2; + } params; +}; + +static int mdd_start_transfer(struct dvct_source_device *client, u8 config) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + union mdd_config cfg; + + MDD_F_DEBUG(); + /* If we share the resources with node base reading this is + * the place where we should lock.*/ + + cfg.config = config; + + if (cfg.params.proc_type <= MDD_PROC_MAX + && cfg.params.proc_type >= MDD_PROC_MIN) { + mdd_info(mdd, "Set process type %d", cfg.params.proc_type); + mdd->process_type = cfg.params.proc_type; + } + + if (cfg.params.tr_type <= MDD_TRANSFER_MAX + && cfg.params.tr_type >= MDD_TRANSFER_MIN) { + mdd_info(mdd, "Set transfer type %d", cfg.params.tr_type); + mdd->transfer_type = cfg.params.tr_type; + } + + /*Force linear buffer transfer if the gadget is not supporting SGs */ + if (mdd->transfer_type != MDD_TRANSFER_SINGLE + && !mdd_gadget(mdd)->sg_supported) { + mdd_info(mdd, "Force linear buffer transfer"); + mdd->transfer_type = MDD_TRANSFER_SINGLE; + } + + dvct_clr_status(mdd->dtc_status, DVCT_MASK_ERR); + dvct_set_status(mdd->dtc_status, DVCT_MASK_TRANS); + queue_work(mdd->wrq, &mdd->work); + return 0; +} + +static int mdd_stop_transfer(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + dvct_clr_status(mdd->dtc_status, DVCT_MASK_TRANS); + wake_up(&mdd->wq); + + return 0; +} + +static void mdd_disconnected(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->speed = USB_SPEED_UNKNOWN; +} + +static void mdd_unbinded(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + + if (mdd->req) { + usb_ep_free_request(mdd->ep, mdd->req); + mdd->req = NULL; + } + mdd->ep = NULL; +} + +static void mdd_deactivate(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->dtc_status = NULL; +} + +/*the driver*/ +static struct dvct_source_driver mdd_drv = { + .activate = mdd_activate, + .binded = mdd_binded, + .connected = mdd_connected, + .start_transfer = mdd_start_transfer, + .stop_transfer = mdd_stop_transfer, + .disconnected = mdd_disconnected, + .unbinded = mdd_unbinded, + .deactivate = mdd_deactivate, + .driver.name = DTC_DRV_NAME, +}; + +static struct msu_dvc_dev *mdd_alloc_device(const char *name) +{ + struct msu_dvc_dev *mdd; + + mdd = kzalloc(sizeof(*mdd), GFP_KERNEL); + + if (!mdd) + return ERR_PTR(-ENOMEM); + + mdd->ddev.name_add = kstrdup(name, GFP_KERNEL); + if (!mdd->ddev.name_add) { + kfree(mdd); + return ERR_PTR(-ENOMEM); + } + + /* mdd->ddev.protocol = 0; */ + /* mdd->ddev.desc = NULL; */ + /* mdd->dtc_status = NULL; */ + /* mdd->ep = NULL; */ + mdd->speed = USB_SPEED_UNKNOWN; + /* mdd->msu_dev = NULL; */ + /* mdd->wrq = NULL; */ + mdd->retry_timeout = MDD_RETRY_TIMEOUT_DEF; + mdd->max_retry_count = MDD_MAX_RETRY_CNT_DEF; + /* mdd->req = NULL; */ + atomic_set(&mdd->req_ongoing, 0); + /*mdd->tdata is all NULL */ + mdd->transfer_type = MDD_TRANSFER_AUTO; + mdd->process_type = MDD_PROC_REM_ALL; + mdd->min_transfer = MDD_MIN_TRANSFER_DEF; + + spin_lock_init(&mdd->tdata.lock); + + return mdd; +}; + +static void mdd_free_device(struct msu_dvc_dev *mdd) +{ + kfree(mdd->ddev.name_add); + kfree(mdd); +}; + +static struct attribute *mdd_attrs[] = { + &dev_attr_mdd_min_transfer.attr, + &dev_attr_mdd_retry_timeout.attr, + &dev_attr_mdd_max_retry.attr, + &dev_attr_mdd_transfer_type.attr, + &dev_attr_mdd_proc_type.attr, +#ifdef MDD_DEBUG + &dev_attr_mdd_stats.attr, +#endif + NULL, +}; + +static struct attribute_group mdd_attrs_group = { + .attrs = mdd_attrs, +}; + +void mdd_msc_probe(struct intel_th_device *thdev) +{ + int ret; + struct msu_dvc_dev *mdd; + struct device *dev; + + pr_info("New th-msc device %s", dev_name(&thdev->dev)); + mdd = mdd_alloc_device(dev_name(&thdev->dev)); + + if (IS_ERR_OR_NULL(mdd)) { + pr_err("Cannot allocate device %s (%ld)", dev_name(&thdev->dev), + PTR_ERR(mdd)); + return; + } + + ret = dvct_source_device_add(&mdd->ddev, &mdd_drv); + if (ret) { + pr_err("Cannot register dvc device %d", ret); + mdd_free_device(mdd); + return; + } + + mdd->th_dev = thdev; + dev = &mdd->ddev.device; + + mdd->wrq = alloc_workqueue("%s_workqueue", WQ_MEM_RECLAIM | WQ_HIGHPRI, + 1, dev_name(&mdd->ddev.device)); + if (!mdd->wrq) { + mdd_err(mdd, "Cannot allocate work queue\n"); + mdd_free_device(mdd); + return; + } + + INIT_WORK(&mdd->work, mdd_work); + + init_waitqueue_head(&mdd->wq); + + /*Attributes */ + ret = sysfs_create_group(&dev->kobj, &mdd_attrs_group); + if (ret) + mdd_warn(mdd, "Cannot add attribute group %d\n", ret); + + ret = sysfs_create_link(&dev->kobj, &thdev->dev.kobj, "msc"); + if (ret) + mdd_warn(mdd, "Cannot add msc link %d\n", ret); + + spin_lock(&mdd_devs_lock); + list_add(&mdd->mdd_list, &mdd_devs); + spin_unlock(&mdd_devs_lock); +} + +void mdd_msc_remove(struct intel_th_device *thdev) +{ + struct msu_dvc_dev *mdd = NULL; + struct msu_dvc_dev *mdd_iter = NULL; + + spin_lock(&mdd_devs_lock); + list_for_each_entry(mdd_iter, &mdd_devs, mdd_list) { + if (mdd_iter->th_dev == thdev) + mdd = mdd_iter; + } + + if (!mdd) { + pr_err("No such mdd device, %s", dev_name(&thdev->dev)); + spin_unlock(&mdd_devs_lock); + return; + } + list_del(&mdd->mdd_list); + + spin_unlock(&mdd_devs_lock); + + flush_workqueue(mdd->wrq); + destroy_workqueue(mdd->wrq); + + sysfs_remove_group(&mdd->ddev.device.kobj, &mdd_attrs_group); + sysfs_remove_link(&mdd->ddev.device.kobj, "msc"); + + mdd->wrq = NULL; + + dvct_source_device_del(&mdd->ddev); + mdd_free_device(mdd); +} + +struct msc_probe_rem_cb mdd_msc_cbs = { + .probe = mdd_msc_probe, + .remove = mdd_msc_remove, +}; + +static int __init msu_dvc_init(void) +{ + int ret; + + MDD_F_DEBUG(); + ret = dvct_source_driver_register(&mdd_drv); + if (ret) { + pr_err("Cannot register dvc driver %d", ret); + return ret; + } + + msc_register_callbacks(mdd_msc_cbs); + return 0; +} + +static void __exit msu_dvc_exit(void) +{ + MDD_F_DEBUG(); + msc_unregister_callbacks(); + dvct_source_driver_unregister(&mdd_drv); +} + +module_init(msu_dvc_init); +module_exit(msu_dvc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Traian Schiau "); diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index dfb57eaa9f22..32ded8110a90 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -31,6 +31,9 @@ #include #endif +#include +#include + #include "intel_th.h" #include "msu.h" @@ -97,6 +100,7 @@ struct msc_iter { * @single_wrap: single mode wrap occurred * @base: buffer's base pointer * @base_addr: buffer's base address + * @nwsa: next window start address backup * @user_count: number of users of the buffer * @mmap_count: number of mappings * @buf_mutex: mutex to serialize access to buffer-related bits @@ -106,6 +110,8 @@ struct msc_iter { * @mode: MSC operating mode * @burst_len: write burst length * @index: number of this MSC in the MSU + * + * @max_blocks: Maximum number of blocks in a window */ struct msc { void __iomem *reg_base; @@ -117,6 +123,7 @@ struct msc { unsigned int single_wrap : 1; void *base; dma_addr_t base_addr; + unsigned long nwsa; /* <0: no buffer, 0: no users, >0: active users */ atomic_t user_count; @@ -132,8 +139,99 @@ struct msc { unsigned int mode; unsigned int burst_len; unsigned int index; + unsigned int max_blocks; +}; + +static struct msc_probe_rem_cb msc_probe_rem_cb; + +struct msc_device_instance { + struct list_head list; + struct intel_th_device *thdev; }; +static LIST_HEAD(msc_dev_instances); +static DEFINE_MUTEX(msc_dev_reg_lock); +/** + * msc_register_callbacks() + * @cbs + */ +int msc_register_callbacks(struct msc_probe_rem_cb cbs) +{ + struct msc_device_instance *it; + + mutex_lock(&msc_dev_reg_lock); + + msc_probe_rem_cb.probe = cbs.probe; + msc_probe_rem_cb.remove = cbs.remove; + /* Call the probe callback for the already existing ones*/ + list_for_each_entry(it, &msc_dev_instances, list) { + cbs.probe(it->thdev); + } + + mutex_unlock(&msc_dev_reg_lock); + return 0; +} +EXPORT_SYMBOL_GPL(msc_register_callbacks); + +/** + * msc_unregister_callbacks() + */ +void msc_unregister_callbacks(void) +{ + mutex_lock(&msc_dev_reg_lock); + + msc_probe_rem_cb.probe = NULL; + msc_probe_rem_cb.remove = NULL; + + mutex_unlock(&msc_dev_reg_lock); +} +EXPORT_SYMBOL_GPL(msc_unregister_callbacks); + +static void msc_add_instance(struct intel_th_device *thdev) +{ + struct msc_device_instance *instance; + + instance = kmalloc(sizeof(*instance), GFP_KERNEL); + if (!instance) + return; + + mutex_lock(&msc_dev_reg_lock); + + instance->thdev = thdev; + list_add(&instance->list, &msc_dev_instances); + + if (msc_probe_rem_cb.probe) + msc_probe_rem_cb.probe(thdev); + + mutex_unlock(&msc_dev_reg_lock); +} + +static void msc_rm_instance(struct intel_th_device *thdev) +{ + struct msc_device_instance *instance = NULL, *it; + + mutex_lock(&msc_dev_reg_lock); + + if (msc_probe_rem_cb.remove) + msc_probe_rem_cb.remove(thdev); + + list_for_each_entry(it, &msc_dev_instances, list) { + if (it->thdev == thdev) { + instance = it; + break; + } + } + + if (instance) { + list_del(&instance->list); + kfree(instance); + } else { + pr_warn("msu: cannot remove %p (not found)", thdev); + } + + mutex_unlock(&msc_dev_reg_lock); +} + static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) { /* header hasn't been written */ @@ -147,6 +245,37 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) return false; } +/** + * msc_current_window() - locate the window in use + * @msc: MSC device + * + * This should only be used in multiblock mode. Caller should hold the + * msc::user_count reference. + * + * Return: the current output window + */ +static struct msc_window *msc_current_window(struct msc *msc) +{ + struct msc_window *win, *prev = NULL; + /*BAR is never changing, so the current one is the one before the next*/ + u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT; + + if (list_empty(&msc->win_list)) + return NULL; + + list_for_each_entry(win, &msc->win_list, entry) { + if (win->block[0].addr == win_addr) + break; + prev = win; + } + if (!prev) + prev = list_entry(msc->win_list.prev, struct msc_window, entry); + + return prev; +} + + /** * msc_oldest_window() - locate the window with oldest data * @msc: MSC device @@ -159,20 +288,26 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) static struct msc_window *msc_oldest_window(struct msc *msc) { struct msc_window *win; - u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); - unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT; unsigned int found = 0; + unsigned long nwsa; if (list_empty(&msc->win_list)) return NULL; + if (msc->enabled) { + u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + + nwsa = (unsigned long)reg << PAGE_SHIFT; + } else { + nwsa = msc->nwsa; + } /* * we might need a radix tree for this, depending on how * many windows a typical user would allocate; ideally it's * something like 2, in which case we're good */ list_for_each_entry(win, &msc->win_list, entry) { - if (win->block[0].addr == win_addr) + if (win->block[0].addr == nwsa) found++; /* skip the empty ones */ @@ -215,6 +350,160 @@ static unsigned int msc_win_oldest_block(struct msc_window *win) return 0; } +/** + * msc_max_blocks() - get the maximum number of block + * @thdev: the sub-device + * + * Return: the maximum number of blocks / window + */ +unsigned int msc_max_blocks(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + + return msc->max_blocks; +} +EXPORT_SYMBOL_GPL(msc_max_blocks); + +/** + * msc_block_max_size() - get the size of biggest block + * @thdev: the sub-device + * + * Return: the size of biggest block + */ +unsigned int msc_block_max_size(struct intel_th_device *thdev) +{ + return PAGE_SIZE; +} +EXPORT_SYMBOL_GPL(msc_block_max_size); + +/** + * msc_switch_window() - perform a window switch + * @thdev: the sub-device + */ +int msc_switch_window(struct intel_th_device *thdev) +{ + intel_th_trace_switch(thdev); + return 0; +} +EXPORT_SYMBOL_GPL(msc_switch_window); + +/** + * msc_current_win_bytes() - get the current window data size + * @thdev: the sub-device + * + * Get the number of valid data bytes in the current window. + * Based on this the dvc-source part can decide to request a window switch. + */ +int msc_current_win_bytes(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + struct msc_window *win; + u32 reg_mwp, blk, offset, i; + int size = 0; + + /* proceed only if actively storing in muli-window mode */ + if (!msc->enabled || + (msc->mode != MSC_MODE_MULTI) || + !atomic_inc_unless_negative(&msc->user_count)) + return -EINVAL; + + win = msc_current_window(msc); + reg_mwp = ioread32(msc->reg_base + REG_MSU_MSC0MWP); + + if (!win) { + atomic_dec(&msc->user_count); + return -EINVAL; + } + + blk = 0; + while (blk < win->nr_blocks) { + if (win->block[blk].addr == (reg_mwp & PAGE_MASK)) + break; + blk++; + } + + if (blk >= win->nr_blocks) { + atomic_dec(&msc->user_count); + return -EINVAL; + } + + offset = (reg_mwp & (PAGE_SIZE - 1)); + + + /*if wrap*/ + if (msc_block_wrapped(win->block[blk].bdesc)) { + for (i = blk+1; i < win->nr_blocks; i++) + size += msc_data_sz(win->block[i].bdesc); + } + + for (i = 0; i < blk; i++) + size += msc_data_sz(win->block[i].bdesc); + + /*finaly the current one*/ + size += (offset - MSC_BDESC); + + atomic_dec(&msc->user_count); + return size; +} +EXPORT_SYMBOL_GPL(msc_current_win_bytes); + +/** + * msc_sg_oldest_win() - get the data from the oldest window + * @thdev: the sub-device + * @sg_array: destination sg array + * + * Return: sg count + */ +int msc_sg_oldest_win(struct intel_th_device *thdev, + struct scatterlist *sg_array) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + struct msc_window *win, *c_win; + struct msc_block_desc *bdesc; + unsigned int blk, sg = 0; + + /* proceed only if actively storing in muli-window mode */ + if (!msc->enabled || + (msc->mode != MSC_MODE_MULTI) || + !atomic_inc_unless_negative(&msc->user_count)) + return -EINVAL; + + win = msc_oldest_window(msc); + if (!win) + return 0; + + c_win = msc_current_window(msc); + + if (win == c_win) + return 0; + + blk = msc_win_oldest_block(win); + + /* start with the first block containing only oldest data */ + if (msc_block_wrapped(win->block[blk].bdesc)) + if (++blk == win->nr_blocks) + blk = 0; + + do { + bdesc = win->block[blk].bdesc; + sg_set_buf(&sg_array[sg++], bdesc, PAGE_SIZE); + + if (bdesc->hw_tag & MSC_HW_TAG_ENDBIT) + break; + + if (++blk == win->nr_blocks) + blk = 0; + + } while (sg <= win->nr_blocks); + + sg_mark_end(&sg_array[sg - 1]); + + atomic_dec(&msc->user_count); + + return sg; +} +EXPORT_SYMBOL_GPL(msc_sg_oldest_win); + /** * msc_is_last_win() - check if a window is the last one for a given MSC * @win: window @@ -486,9 +775,9 @@ static void msc_buffer_clear_hw_header(struct msc *msc) * msc_configure() - set up MSC hardware * @msc: the MSC device to configure * - * Program storage mode, wrapping, burst length and trace buffer address - * into a given MSC. Then, enable tracing and set msc::enabled. - * The latter is serialized on msc::buf_mutex, so make sure to hold it. + * Program all relevant registers for a given MSC. + * Programming registers must be delayed until this stage since the hardware + * will be reset before a capture is started. */ static int msc_configure(struct msc *msc) { @@ -523,10 +812,8 @@ static int msc_configure(struct msc *msc) iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; - intel_th_trace_enable(msc->thdev); msc->enabled = 1; - return 0; } @@ -539,23 +826,14 @@ static int msc_configure(struct msc *msc) */ static void msc_disable(struct msc *msc) { - unsigned long count; u32 reg; lockdep_assert_held(&msc->buf_mutex); intel_th_trace_disable(msc->thdev); - for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; - count && !(reg & MSCSTS_PLE); count--) { - reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); - cpu_relax(); - } - - if (!count) - dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); - if (msc->mode == MSC_MODE_SINGLE) { + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); @@ -564,6 +842,10 @@ static void msc_disable(struct msc *msc) reg, msc->single_sz, msc->single_wrap); } + /* Save next window start address before disabling */ + reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + msc->nwsa = (unsigned long)reg << PAGE_SHIFT; + reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); reg &= ~MSC_EN; iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); @@ -572,8 +854,7 @@ static void msc_disable(struct msc *msc) iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE); - dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", - ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); + dev_dbg(msc_dev(msc), "MSCnNWSA: %08lx\n", msc->nwsa); reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); @@ -741,7 +1022,7 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) /* Reset the page to write-back before releasing */ set_memory_wb((unsigned long)win->block[i].bdesc, 1); #endif - dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, + dma_free_coherent(msc_dev(msc)->parent->parent, size, win->block[i].bdesc, win->block[i].addr); } kfree(win); @@ -777,7 +1058,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) /* Reset the page to write-back before releasing */ set_memory_wb((unsigned long)win->block[i].bdesc, 1); #endif - dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, win->block[i].bdesc, win->block[i].addr); } @@ -1069,16 +1350,19 @@ static int intel_th_msc_release(struct inode *inode, struct file *file) } static ssize_t -msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) +msc_single_to_user(void *in_buf, unsigned long in_pages, + unsigned long in_sz, bool wrapped, + char __user *buf, loff_t off, size_t len) { - unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; + unsigned long size = in_pages << PAGE_SHIFT, rem = len; unsigned long start = off, tocopy = 0; - if (msc->single_wrap) { - start += msc->single_sz; + /* With wrapping, copy the end of the buffer first */ + if (wrapped) { + start += in_sz; if (start < size) { tocopy = min(rem, size - start); - if (copy_to_user(buf, msc->base + start, tocopy)) + if (copy_to_user(buf, in_buf + start, tocopy)) return -EFAULT; buf += tocopy; @@ -1087,21 +1371,17 @@ msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) } start &= size - 1; - if (rem) { - tocopy = min(rem, msc->single_sz - start); - if (copy_to_user(buf, msc->base + start, tocopy)) - return -EFAULT; - - rem -= tocopy; - } - - return len - rem; } + /* Copy the beginning of the buffer */ + if (rem) { + tocopy = min(rem, in_sz - start); + if (copy_to_user(buf, in_buf + start, tocopy)) + return -EFAULT; - if (copy_to_user(buf, msc->base + start, rem)) - return -EFAULT; + rem -= tocopy; + } - return len; + return len - rem; } static ssize_t intel_th_msc_read(struct file *file, char __user *buf, @@ -1131,8 +1411,10 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, len = size - off; if (msc->mode == MSC_MODE_SINGLE) { - ret = msc_single_to_user(msc, buf, off, len); - if (ret >= 0) + ret = msc_single_to_user(msc->base, msc->nr_pages, + msc->single_sz, msc->single_wrap, + buf, off, len); + if (ret > 0) *ppos += ret; } else if (msc->mode == MSC_MODE_MULTI) { struct msc_win_to_user_struct u = { @@ -1258,6 +1540,283 @@ static const struct file_operations intel_th_msc_fops = { .owner = THIS_MODULE, }; +static void msc_wait_ple(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + unsigned long count; + u32 reg; + + for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; + count && !(reg & MSCSTS_PLE); count--) { + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); + cpu_relax(); + } + + if (!count) + dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); +} + +#ifdef CONFIG_ACPI +#define ACPI_SIG_NPKT "NPKT" + +/* Buffers that may be handed through NPKT ACPI table */ +enum NPKT_BUF_TYPE { + NPKT_MTB = 0, + NPKT_MTB_REC, + NPKT_CSR, + NPKT_CSR_REC, + NPKT_NBUF +}; +static const char * const npkt_buf_name[NPKT_NBUF] = { + [NPKT_MTB] = "mtb", + [NPKT_MTB_REC] = "mtb_rec", + [NPKT_CSR] = "csr", + [NPKT_CSR_REC] = "csr_rec" +}; + +/* CSR capture still active */ +#define NPKT_CSR_USED BIT(4) + +struct acpi_npkt_buf { + u64 addr; + u32 size; + u32 offset; +}; + +/* NPKT ACPI table */ +struct acpi_table_npkt { + struct acpi_table_header header; + struct acpi_npkt_buf buffers[NPKT_NBUF]; + u8 flags; +} __packed; + +/* Trace buffer obtained from NPKT table */ +struct npkt_buf { + dma_addr_t phy; + void *buf; + u32 size; + u32 offset; + bool wrapped; + atomic_t active; + struct msc *msc; +}; + +static struct npkt_buf *npkt_bufs; +static struct dentry *npkt_dump_dir; +static DEFINE_MUTEX(npkt_lock); + +/** + * Stop current trace if a buffer was marked with a capture in pogress. + * + * Update buffer write offset and wrap status after stopping the trace. + */ +static void stop_buffer_trace(struct npkt_buf *buf) +{ + u32 reg, mode; + struct msc *msc = buf->msc; + + mutex_lock(&npkt_lock); + if (!atomic_read(&buf->active)) + goto unlock; + + reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); + mode = (reg & MSC_MODE) >> __ffs(MSC_MODE); + if (!(reg & MSC_EN) || mode != MSC_MODE_SINGLE) { + /* Assume full buffer */ + pr_warn("NPKT reported CSR in use but not tracing to CSR\n"); + buf->offset = 0; + buf->wrapped = true; + atomic_set(&buf->active, 0); + goto unlock; + } + + /* The hub must be able to stop a capture not started by the driver */ + intel_th_trace_disable(msc->thdev); + + /* Update offset and wrap status */ + reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); + buf->offset = reg - (u32)buf->phy; + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); + buf->wrapped = !!(reg & MSCSTS_WRAPSTAT); + atomic_set(&buf->active, 0); + +unlock: + mutex_unlock(&npkt_lock); +} + +/** + * Copy re-ordered data from an NPKT buffer to a user buffer. + */ +static ssize_t read_npkt_dump_buf(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct npkt_buf *buf = file->private_data; + size_t size = buf->size; + loff_t off = *ppos; + ssize_t ret; + + if (atomic_read(&buf->active)) + stop_buffer_trace(buf); + + if (off >= size) + return 0; + + ret = msc_single_to_user(buf->buf, size >> PAGE_SHIFT, + buf->offset, buf->wrapped, + user_buf, off, count); + if (ret > 0) + *ppos += ret; + + return ret; +} + +static const struct file_operations npkt_dump_buf_fops = { + .read = read_npkt_dump_buf, + .open = simple_open, + .llseek = noop_llseek, +}; + +/** + * Prepare a buffer with remapped address for a given NPKT buffer and add + * an entry for it in debugfs. + */ +static void npkt_bind_buffer(enum NPKT_BUF_TYPE type, + struct acpi_npkt_buf *abuf, u8 flags, + struct npkt_buf *buf, struct msc *msc) +{ + const char *name = npkt_buf_name[type]; + + /* No buffer handed through ACPI */ + if (!abuf->addr || !abuf->size) + return; + + /* Only expect multiples of page size */ + if (abuf->size & (PAGE_SIZE - 1)) { + pr_warn("invalid size 0x%x for buffer %s\n", + abuf->size, name); + return; + } + + buf->size = abuf->size; + buf->offset = abuf->offset; + buf->wrapped = !!(flags & BIT(type)); + /* CSR may still be active */ + if (type == NPKT_CSR && (flags & NPKT_CSR_USED)) { + atomic_set(&buf->active, 1); + buf->msc = msc; + } + + buf->phy = abuf->addr; + buf->buf = (__force void *)ioremap(buf->phy, buf->size); + if (!buf->buf) { + pr_err("ioremap failed for buffer %s 0x%llx size:0x%x\n", + name, buf->phy, buf->size); + return; + } + + debugfs_create_file(name, S_IRUGO, npkt_dump_dir, buf, + &npkt_dump_buf_fops); +} + +static void npkt_bind_buffers(struct acpi_table_npkt *npkt, + struct npkt_buf *bufs, struct msc *msc) +{ + int i; + + for (i = 0; i < NPKT_NBUF; i++) + npkt_bind_buffer(i, &npkt->buffers[i], npkt->flags, + &bufs[i], msc); +} + +static void npkt_unbind_buffers(struct npkt_buf *bufs) +{ + int i; + + for (i = 0; i < NPKT_NBUF; i++) + if (bufs[i].buf) + iounmap((__force void __iomem *)bufs[i].buf); +} + +/** + * Prepare debugfs access to NPKT buffers. + */ +static void intel_th_npkt_init(struct msc *msc) +{ + acpi_status status; + struct acpi_table_npkt *npkt; + + /* Associate NPKT to msc0 */ + if (npkt_bufs || msc->index != 0) + return; + + status = acpi_get_table(ACPI_SIG_NPKT, 0, + (struct acpi_table_header **)&npkt); + if (ACPI_FAILURE(status)) { + pr_warn("Failed to get NPKT table, %s\n", + acpi_format_exception(status)); + return; + } + + npkt_bufs = kzalloc(sizeof(struct npkt_buf) * NPKT_NBUF, GFP_KERNEL); + if (!npkt_bufs) + return; + + npkt_dump_dir = debugfs_create_dir("npkt_dump", NULL); + if (!npkt_dump_dir) { + pr_err("npkt_dump debugfs create dir failed\n"); + goto free_npkt_bufs; + } + + npkt_bind_buffers(npkt, npkt_bufs, msc); + + return; + +free_npkt_bufs: + kfree(npkt_bufs); + npkt_bufs = NULL; +} + +/** + * Remove debugfs access to NPKT buffers and release resources. + */ +static void intel_th_npkt_remove(struct msc *msc) +{ + /* Only clean for msc 0 if necessary */ + if (!npkt_bufs || msc->index != 0) + return; + + npkt_unbind_buffers(npkt_bufs); + debugfs_remove_recursive(npkt_dump_dir); + kfree(npkt_bufs); + npkt_bufs = NULL; +} + +/** + * First trace callback. + * + * If NPKT notified a CSR capture is in progress, stop it and update buffer + * write offset and wrap status. + */ +static void intel_th_msc_first_trace(struct intel_th_device *thdev) +{ + struct device *dev = &thdev->dev; + struct msc *msc = dev_get_drvdata(dev); + struct npkt_buf *buf; + + if (!npkt_bufs || msc->index != 0) + return; + + buf = &npkt_bufs[NPKT_CSR]; + if (atomic_read(&buf->active)) + stop_buffer_trace(buf); +} + +#else /* !CONFIG_ACPI */ +static inline void intel_th_npkt_init(struct msc *msc) {} +static inline void intel_th_npkt_remove(struct msc *msc) {} +#define intel_th_msc_first_trace NULL +#endif /* !CONFIG_ACPI */ + static int intel_th_msc_init(struct msc *msc) { atomic_set(&msc->user_count, -1); @@ -1271,6 +1830,8 @@ static int intel_th_msc_init(struct msc *msc) (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> __ffs(MSC_LEN); + msc->thdev->output.wait_empty = msc_wait_ple; + return 0; } @@ -1394,6 +1955,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; + msc->max_blocks = 0; + /* scan the comma-separated list of allocation sizes */ end = memchr(buf, '\n', len); if (end) @@ -1428,6 +1991,9 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, win = rewin; win[nr_wins - 1] = val; + msc->max_blocks = + (val > msc->max_blocks) ? val : msc->max_blocks; + if (!end) break; @@ -1447,10 +2013,32 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RW(nr_pages); +static ssize_t +win_switch_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct msc *msc = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val != 1) + return -EINVAL; + + intel_th_trace_switch(msc->thdev); + return size; +} + +static DEVICE_ATTR_WO(win_switch); + static struct attribute *msc_output_attrs[] = { &dev_attr_wrap.attr, &dev_attr_mode.attr, &dev_attr_nr_pages.attr, + &dev_attr_win_switch.attr, NULL, }; @@ -1487,28 +2075,25 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) if (err) return err; + msc->max_blocks = 0; dev_set_drvdata(dev, msc); + intel_th_npkt_init(msc); + msc_add_instance(thdev); + return 0; } static void intel_th_msc_remove(struct intel_th_device *thdev) { struct msc *msc = dev_get_drvdata(&thdev->dev); - int ret; - - intel_th_msc_deactivate(thdev); - - /* - * Buffers should not be used at this point except if the - * output character device is still open and the parent - * device gets detached from its bus, which is a FIXME. - */ - ret = msc_buffer_free_unless_used(msc); - WARN_ON_ONCE(ret); + intel_th_npkt_remove(msc); + msc_rm_instance(thdev); + sysfs_remove_group(&thdev->dev.kobj, &msc_output_group); } static struct intel_th_driver intel_th_msc_driver = { + .first_trace = intel_th_msc_first_trace, .probe = intel_th_msc_probe, .remove = intel_th_msc_remove, .activate = intel_th_msc_activate, diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h index 9b710e4aa98a..bd6933b80e68 100644 --- a/drivers/hwtracing/intel_th/msu.h +++ b/drivers/hwtracing/intel_th/msu.h @@ -16,6 +16,8 @@ #ifndef __INTEL_TH_MSU_H__ #define __INTEL_TH_MSU_H__ +#include "intel_th.h" + enum { REG_MSU_MSUPARAMS = 0x0000, REG_MSU_MSUSTS = 0x0008, @@ -113,4 +115,19 @@ static inline bool msc_block_last_written(struct msc_block_desc *bdesc) /* waiting for Pipeline Empty bit(s) to assert for MSC */ #define MSC_PLE_WAITLOOP_DEPTH 10000 +/* API */ +struct msc_probe_rem_cb { + void (*probe)(struct intel_th_device *thdev); + void (*remove)(struct intel_th_device *thdev); +}; + +int msc_register_callbacks(struct msc_probe_rem_cb cbs); +void msc_unregister_callbacks(void); +unsigned int msc_max_blocks(struct intel_th_device *thdev); +unsigned int msc_block_max_size(struct intel_th_device *thdev); +int msc_switch_window(struct intel_th_device *thdev); +int msc_sg_oldest_win(struct intel_th_device *thdev, + struct scatterlist *sg_array); +int msc_current_win_bytes(struct intel_th_device *thdev); + #endif /* __INTEL_TH_MSU_H__ */ diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index c2a2ce8ee541..b48d54a75f51 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -69,6 +69,35 @@ static void intel_th_pci_deactivate(struct intel_th *th) if (err) dev_err(&pdev->dev, "failed to read NPKDSC register\n"); } +/* + * PCI Configuration Registers + */ +enum { + REG_PCI_NPKDSC = 0x80, /* NPK Device Specific Control */ + REG_PCI_NPKDSD = 0x90, /* NPK Device Specific Defeature */ +}; + +/* Trace Hub software reset */ +#define NPKDSC_RESET BIT(1) + +/* Force On */ +#define NPKDSD_FON BIT(0) + +static void intel_th_pci_reset(struct intel_th *th) +{ + struct pci_dev *pdev = container_of(th->dev, struct pci_dev, dev); + u32 val; + + /* Software reset */ + pci_read_config_dword(pdev, REG_PCI_NPKDSC, &val); + val |= NPKDSC_RESET; + pci_write_config_dword(pdev, REG_PCI_NPKDSC, val); + + /* Always set FON for S0ix flow */ + pci_read_config_dword(pdev, REG_PCI_NPKDSD, &val); + val |= NPKDSD_FON; + pci_write_config_dword(pdev, REG_PCI_NPKDSD, val); +} static int intel_th_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -86,7 +115,7 @@ static int intel_th_pci_probe(struct pci_dev *pdev, return err; th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource, - DEVICE_COUNT_RESOURCE, pdev->irq); + DEVICE_COUNT_RESOURCE, pdev->irq, intel_th_pci_reset); if (IS_ERR(th)) return PTR_ERR(th); @@ -173,11 +202,34 @@ static const struct pci_device_id intel_th_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, intel_th_pci_id_table); +static int intel_th_suspend(struct device *dev) +{ + /* + * Stub the call to avoid disabling the device. + * Suspend is fully handled by firmwares. + */ + return 0; +} + +static int intel_th_resume(struct device *dev) +{ + /* Firmwares have already restored the device state. */ + return 0; +} + +static const struct dev_pm_ops intel_th_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(intel_th_suspend, + intel_th_resume) +}; + static struct pci_driver intel_th_pci_driver = { .name = DRIVER_NAME, .id_table = intel_th_pci_id_table, .probe = intel_th_pci_probe, .remove = intel_th_pci_remove, + .driver = { + .pm = &intel_th_pm_ops, + }, }; module_pci_driver(intel_th_pci_driver); diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c index e96a1fcb57b2..10f08f7b67d1 100644 --- a/drivers/hwtracing/intel_th/pti.c +++ b/drivers/hwtracing/intel_th/pti.c @@ -169,8 +169,6 @@ static int intel_th_pti_activate(struct intel_th_device *thdev) iowrite32(ctl, pti->base + REG_PTI_CTL); - intel_th_trace_enable(thdev); - return 0; } diff --git a/drivers/hwtracing/stm/console.c b/drivers/hwtracing/stm/console.c index c9d9a8d2ff52..00f7bbbeab79 100644 --- a/drivers/hwtracing/stm/console.c +++ b/drivers/hwtracing/stm/console.c @@ -39,8 +39,22 @@ static void stm_console_write(struct console *con, const char *buf, unsigned len) { struct stm_console *sc = container_of(con, struct stm_console, console); + static char svenbuf[1024]; + char *p = svenbuf; + unsigned int towrite; + u16 textlen; + const u32 sven_header = 0x01000242; - stm_source_write(&sc->data, 0, buf, len); + textlen = min_t(u16, len, 1024 - sizeof(sven_header) - sizeof(textlen)); + towrite = textlen + sizeof(sven_header) + sizeof(textlen); + + memcpy(p, &sven_header, sizeof(sven_header)); + p += sizeof(sven_header); + memcpy(p, &textlen, sizeof(textlen)); + p += sizeof(textlen); + memcpy(p, buf, textlen); + + stm_source_write(&sc->data, 0, svenbuf, towrite); } static int stm_console_link(struct stm_source_data *data) diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index cd07a69e2e93..44deae78913e 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -50,6 +50,9 @@ #define BCM2835_I2C_S_CLKT BIT(9) #define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ +#define BCM2835_I2C_FEDL_SHIFT 16 +#define BCM2835_I2C_REDL_SHIFT 0 + #define BCM2835_I2C_CDIV_MIN 0x0002 #define BCM2835_I2C_CDIV_MAX 0xFFFE @@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg) static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) { - u32 divider; + u32 divider, redl, fedl; divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), i2c_dev->bus_clk_rate); @@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); + /* + * Number of core clocks to wait after falling edge before + * outputting the next data bit. Note that both FEDL and REDL + * can't be greater than CDIV/2. + */ + fedl = max(divider / 16, 1u); + + /* + * Number of core clocks to wait after rising edge before + * sampling the next incoming data bit. + */ + redl = max(divider / 4, 1u); + + bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL, + (fedl << BCM2835_I2C_FEDL_SHIFT) | + (redl << BCM2835_I2C_REDL_SHIFT)); return 0; } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 418c233075d3..13e849bf9aa0 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -207,7 +207,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) i2c_dw_disable_int(dev); /* Enable the adapter */ - __i2c_dw_enable(dev, true); + __i2c_dw_enable_and_wait(dev, true); /* Clear and enable interrupts */ dw_readl(dev, DW_IC_CLR_INTR); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 9e12a53ef7b8..8eac00efadc1 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) /* Default timeout in interrupt mode: 200 ms */ priv->adapter.timeout = HZ / 5; + if (dev->irq == IRQ_NOTCONNECTED) + priv->features &= ~FEATURE_IRQ; + if (priv->features & FEATURE_IRQ) { u16 pcictl, pcists; diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index d4a6e9c2e9aa..124f9b1cf1b0 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -887,6 +887,11 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) } setup = of_device_get_match_data(&pdev->dev); + if (!setup) { + dev_err(&pdev->dev, "Can't get device data\n"); + ret = -ENODEV; + goto clk_free; + } i2c_dev->setup = *setup; ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c index 31186ead5a40..509a6007cdf6 100644 --- a/drivers/i2c/i2c-boardinfo.c +++ b/drivers/i2c/i2c-boardinfo.c @@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig property_entries_dup(info->properties); if (IS_ERR(devinfo->board_info.properties)) { status = PTR_ERR(devinfo->board_info.properties); + kfree(devinfo); break; } } @@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig GFP_KERNEL); if (!devinfo->board_info.resources) { status = -ENOMEM; + kfree(devinfo); break; } } diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index 10f00a82ec9d..e54a9b835b62 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -396,16 +396,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, the underlying bus driver */ break; case I2C_SMBUS_I2C_BLOCK_DATA: + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, "Invalid block %s size %d\n", + read_write == I2C_SMBUS_READ ? "read" : "write", + data->block[0]); + return -EINVAL; + } + if (read_write == I2C_SMBUS_READ) { msg[1].len = data->block[0]; } else { msg[0].len = data->block[0] + 1; - if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) { - dev_err(&adapter->dev, - "Invalid block write size %d\n", - data->block[0]); - return -EINVAL; - } for (i = 1; i <= data->block[0]; i++) msgbuf0[i] = data->block[i]; } diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 14d1e7d9a1d6..0e6bc631a1ca 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -282,7 +282,7 @@ int ide_cd_expiry(ide_drive_t *drive) struct request *rq = drive->hwif->rq; unsigned long wait = 0; - debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]); + debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]); /* * Some commands are *slow* and normally take a long time to complete. @@ -463,7 +463,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) return ide_do_reset(drive); } - debug_log("[cmd %x]: check condition\n", rq->cmd[0]); + debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]); /* Retry operation */ ide_retry_pc(drive); @@ -531,7 +531,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) ide_pad_transfer(drive, write, bcount); debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n", - rq->cmd[0], done, bcount, scsi_req(rq)->resid_len); + scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len); /* And set the interrupt handler again */ ide_set_handler(drive, ide_pc_intr, timeout); diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c index 98fbb628d5bd..38411e1c155b 100644 --- a/drivers/iio/accel/kxsd9-i2c.c +++ b/drivers/iio/accel/kxsd9-i2c.c @@ -63,3 +63,6 @@ static struct i2c_driver kxsd9_i2c_driver = { .id_table = kxsd9_i2c_id, }; module_i2c_driver(kxsd9_i2c_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("KXSD9 accelerometer I2C interface"); diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 752856b3a849..bef1f96c177c 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -164,7 +164,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x25, .mask_ihl = 0x02, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, @@ -236,7 +239,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, @@ -318,7 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x23, .mask_ihl = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, .ig1 = { .en_addr = 0x23, .en_mask = 0x08, @@ -389,7 +398,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .drdy_irq = { .addr = 0x21, .mask_int1 = 0x04, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -451,7 +463,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -569,7 +584,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .drdy_irq = { .addr = 0x21, .mask_int1 = 0x04, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -640,7 +658,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x25, .mask_ihl = 0x02, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, @@ -773,7 +794,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev) if (!pdata) pdata = (struct st_sensors_platform_data *)&default_accel_pdata; - err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); + err = st_sensors_init_sensor(indio_dev, pdata); if (err < 0) goto st_accel_power_off; diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c index 6e419d5a7c14..f153e02686a0 100644 --- a/drivers/iio/adc/cpcap-adc.c +++ b/drivers/iio/adc/cpcap-adc.c @@ -1012,7 +1012,7 @@ static int cpcap_adc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, indio_dev); ddata->irq = platform_get_irq_byname(pdev, "adcdone"); - if (!ddata->irq) + if (ddata->irq < 0) return -ENODEV; error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL, diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index f387b972e4f4..59f99b3a180d 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -44,7 +44,6 @@ #define INA226_MASK_ENABLE 0x06 #define INA226_CVRF BIT(3) -#define INA219_CNVR BIT(1) #define INA2XX_MAX_REGISTERS 8 @@ -79,6 +78,11 @@ #define INA226_ITS_MASK GENMASK(5, 3) #define INA226_SHIFT_ITS(val) ((val) << 3) +/* INA219 Bus voltage register, low bits are flags */ +#define INA219_OVF BIT(0) +#define INA219_CNVR BIT(1) +#define INA219_BUS_VOLTAGE_SHIFT 3 + /* Cosmetic macro giving the sampling period for a full P=UxI cycle */ #define SAMPLING_PERIOD(c) ((c->int_time_vbus + c->int_time_vshunt) \ * c->avg) @@ -112,7 +116,7 @@ struct ina2xx_config { u16 config_default; int calibration_factor; int shunt_div; - int bus_voltage_shift; + int bus_voltage_shift; /* position of lsb */ int bus_voltage_lsb; /* uV */ int power_lsb; /* uW */ enum ina2xx_ids chip_id; @@ -135,7 +139,7 @@ static const struct ina2xx_config ina2xx_config[] = { .config_default = INA219_CONFIG_DEFAULT, .calibration_factor = 40960000, .shunt_div = 100, - .bus_voltage_shift = 3, + .bus_voltage_shift = INA219_BUS_VOLTAGE_SHIFT, .bus_voltage_lsb = 4000, .power_lsb = 20000, .chip_id = ina219, @@ -170,6 +174,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev, else *val = regval; + if (chan->address == INA2XX_BUS_VOLTAGE) + *val >>= chip->config->bus_voltage_shift; + return IIO_VAL_INT; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: @@ -203,9 +210,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev, return IIO_VAL_FRACTIONAL; case INA2XX_BUS_VOLTAGE: - /* processed (mV) = raw*lsb (uV) / (1000 << shift) */ + /* processed (mV) = raw * lsb (uV) / 1000 */ *val = chip->config->bus_voltage_lsb; - *val2 = 1000 << chip->config->bus_voltage_shift; + *val2 = 1000; return IIO_VAL_FRACTIONAL; case INA2XX_POWER: @@ -532,7 +539,7 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev, * Sampling Freq is a consequence of the integration times of * the Voltage channels. */ -#define INA219_CHAN_VOLTAGE(_index, _address) { \ +#define INA219_CHAN_VOLTAGE(_index, _address, _shift) { \ .type = IIO_VOLTAGE, \ .address = (_address), \ .indexed = 1, \ @@ -544,7 +551,8 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev, .scan_index = (_index), \ .scan_type = { \ .sign = 'u', \ - .realbits = 16, \ + .shift = _shift, \ + .realbits = 16 - _shift, \ .storagebits = 16, \ .endianness = IIO_LE, \ } \ @@ -579,8 +587,8 @@ static const struct iio_chan_spec ina226_channels[] = { }; static const struct iio_chan_spec ina219_channels[] = { - INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE), - INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE), + INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE, 0), + INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE, INA219_BUS_VOLTAGE_SHIFT), INA219_CHAN(IIO_POWER, 2, INA2XX_POWER), INA219_CHAN(IIO_CURRENT, 3, INA2XX_CURRENT), IIO_CHAN_SOFT_TIMESTAMP(4), diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 2e8dbb89c8c9..11484cb38b84 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel { struct meson_sar_adc_data { bool has_bl30_integration; + u32 bandgap_reg; unsigned int resolution; const char *name; + const struct regmap_config *regmap_config; }; struct meson_sar_adc_priv { @@ -242,13 +244,20 @@ struct meson_sar_adc_priv { int calibscale; }; -static const struct regmap_config meson_sar_adc_regmap_config = { +static const struct regmap_config meson_sar_adc_regmap_config_gxbb = { .reg_bits = 8, .val_bits = 32, .reg_stride = 4, .max_register = MESON_SAR_ADC_REG13, }; +static const struct regmap_config meson_sar_adc_regmap_config_meson8 = { + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 4, + .max_register = MESON_SAR_ADC_DELTA_10, +}; + static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); @@ -453,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev) regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val); } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); - if (timeout < 0) + if (timeout < 0) { + mutex_unlock(&indio_dev->mlock); return -ETIMEDOUT; + } } return 0; @@ -600,7 +611,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev, init.num_parents = 1; priv->clk_gate.reg = base + MESON_SAR_ADC_REG3; - priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN); + priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN); priv->clk_gate.hw.init = &init; priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw); @@ -685,6 +696,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev) return 0; } +static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off) +{ + struct meson_sar_adc_priv *priv = iio_priv(indio_dev); + u32 enable_mask; + + if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11) + enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN; + else + enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN; + + regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask, + on_off ? enable_mask : 0); +} + static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); @@ -717,9 +742,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, - MESON_SAR_ADC_REG11_BANDGAP_EN); + + meson_sar_adc_set_bandgap(indio_dev, true); + regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, MESON_SAR_ADC_REG3_ADC_EN); @@ -739,8 +764,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) err_adc_clk: regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, 0); + meson_sar_adc_set_bandgap(indio_dev, false); clk_disable_unprepare(priv->sana_clk); err_sana_clk: clk_disable_unprepare(priv->core_clk); @@ -765,8 +789,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev) regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, 0); + + meson_sar_adc_set_bandgap(indio_dev, false); clk_disable_unprepare(priv->sana_clk); clk_disable_unprepare(priv->core_clk); @@ -845,30 +869,40 @@ static const struct iio_info meson_sar_adc_iio_info = { static const struct meson_sar_adc_data meson_sar_adc_meson8_data = { .has_bl30_integration = false, + .bandgap_reg = MESON_SAR_ADC_DELTA_10, + .regmap_config = &meson_sar_adc_regmap_config_meson8, .resolution = 10, .name = "meson-meson8-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = { .has_bl30_integration = false, + .bandgap_reg = MESON_SAR_ADC_DELTA_10, + .regmap_config = &meson_sar_adc_regmap_config_meson8, .resolution = 10, .name = "meson-meson8b-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 10, .name = "meson-gxbb-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxl_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 12, .name = "meson-gxl-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxm_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 12, .name = "meson-gxm-saradc", }; @@ -946,7 +980,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev) return ret; priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, - &meson_sar_adc_regmap_config); + priv->data->regmap_config); if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c index 47d24ae5462f..fe3d7826783c 100644 --- a/drivers/iio/adc/qcom-vadc-common.c +++ b/drivers/iio/adc/qcom-vadc-common.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "qcom-vadc-common.h" @@ -229,3 +230,6 @@ int qcom_vadc_decimation_from_dt(u32 value) return __ffs64(value / VADC_DECIMATION_MIN); } EXPORT_SYMBOL(qcom_vadc_decimation_from_dt); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm ADC common functionality"); diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 4df32cf1650e..04be8bd951be 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -764,8 +764,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc) int ret; u32 val; - /* Clear ADRDY by writing one, then enable ADC */ - stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY); stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); /* Poll for ADRDY to be set (after adc startup time) */ @@ -773,8 +771,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc) val & STM32H7_ADRDY, 100, STM32_ADC_TIMEOUT_US); if (ret) { - stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); + stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS); dev_err(&indio_dev->dev, "Failed to enable ADC\n"); + } else { + /* Clear ADRDY by writing one */ + stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY); } return ret; @@ -1314,6 +1315,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) { struct stm32_adc *adc = iio_priv(indio_dev); unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2; + unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE; /* * dma cyclic transfers are used, buffer is split into two periods. @@ -1322,7 +1324,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) * - one buffer (period) driver can push with iio_trigger_poll(). */ watermark = min(watermark, val * (unsigned)(sizeof(u16))); - adc->rx_buf_sz = watermark * 2; + adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv); return 0; } diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index e0dc20488335..9ac2fb032df6 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -369,6 +369,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]); conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]); + conv_time += conv_time / 10; /* 10% internal clock inaccuracy */ usleep_range(conv_time, conv_time + 1); data->conv_invalid = false; } diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 840a6cbd5f0f..9dd0e1cd93dd 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -91,7 +91,6 @@ static const struct iio_chan_spec ccs811_channels[] = { .channel2 = IIO_MOD_CO2, .modified = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), .scan_index = 0, .scan_type = { @@ -129,6 +128,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client) if (ret < 0) return ret; + if ((ret & CCS811_STATUS_FW_MODE_APPLICATION)) + return 0; + if ((ret & CCS811_STATUS_APP_VALID_MASK) != CCS811_STATUS_APP_VALID_LOADED) return -EIO; @@ -245,24 +247,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev, switch (chan->channel2) { case IIO_MOD_CO2: *val = 0; - *val2 = 12834; + *val2 = 100; return IIO_VAL_INT_PLUS_MICRO; case IIO_MOD_VOC: *val = 0; - *val2 = 84246; - return IIO_VAL_INT_PLUS_MICRO; + *val2 = 100; + return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } default: return -EINVAL; } - case IIO_CHAN_INFO_OFFSET: - if (!(chan->type == IIO_CONCENTRATION && - chan->channel2 == IIO_MOD_CO2)) - return -EINVAL; - *val = -400; - return IIO_VAL_INT; default: return -EINVAL; } diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 02e833b14db0..34115f05d5c4 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -470,7 +470,7 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable) * different one. Take into account irq status register * to understand if irq trigger can be properly supported */ - if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) + if (sdata->sensor_settings->drdy_irq.stat_drdy.addr) sdata->hw_irq_trigger = enable; return 0; } diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index fa73e6795359..fdcc5a891958 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -31,7 +31,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev, int ret; /* How would I know if I can't check it? */ - if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy) + if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) return -EINVAL; /* No scan mask, no interrupt */ @@ -39,23 +39,15 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev, return 0; ret = sdata->tf->read_byte(&sdata->tb, sdata->dev, - sdata->sensor_settings->drdy_irq.addr_stat_drdy, + sdata->sensor_settings->drdy_irq.stat_drdy.addr, &status); if (ret < 0) { dev_err(sdata->dev, "error checking samples available\n"); return ret; } - /* - * the lower bits of .active_scan_mask[0] is directly mapped - * to the channels on the sensor: either bit 0 for - * one-dimensional sensors, or e.g. x,y,z for accelerometers, - * gyroscopes or magnetometers. No sensor use more than 3 - * channels, so cut the other status bits here. - */ - status &= 0x07; - if (status & (u8)indio_dev->active_scan_mask[0]) + if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask) return 1; return 0; @@ -212,7 +204,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, * it was "our" interrupt. */ if (sdata->int_pin_open_drain && - sdata->sensor_settings->drdy_irq.addr_stat_drdy) + sdata->sensor_settings->drdy_irq.stat_drdy.addr) irq_trig |= IRQF_SHARED; err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev), diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index e366422e8512..2536a8400c98 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -118,7 +118,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -188,7 +191,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -253,7 +259,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 839b875c29b9..3ac25b21cbfc 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -329,20 +329,31 @@ static int max30102_read_temp(struct max30102_data *data, int *val) return 0; } -static int max30102_get_temp(struct max30102_data *data, int *val) +static int max30102_get_temp(struct max30102_data *data, int *val, bool en) { int ret; + if (en) { + ret = max30102_set_powermode(data, true); + if (ret) + return ret; + } + /* start acquisition */ ret = regmap_update_bits(data->regmap, MAX30102_REG_TEMP_CONFIG, MAX30102_REG_TEMP_CONFIG_TEMP_EN, MAX30102_REG_TEMP_CONFIG_TEMP_EN); if (ret) - return ret; + goto out; msleep(35); + ret = max30102_read_temp(data, val); + +out: + if (en) + max30102_set_powermode(data, false); - return max30102_read_temp(data, val); + return ret; } static int max30102_read_raw(struct iio_dev *indio_dev, @@ -355,23 +366,22 @@ static int max30102_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: /* - * Temperature reading can only be acquired while engine - * is running + * Temperature reading can only be acquired when not in + * shutdown; leave shutdown briefly when buffer not running */ mutex_lock(&indio_dev->mlock); - if (!iio_buffer_enabled(indio_dev)) - ret = -EBUSY; - else { - ret = max30102_get_temp(data, val); - if (!ret) - ret = IIO_VAL_INT; - } - + ret = max30102_get_temp(data, val, true); + else + ret = max30102_get_temp(data, val, false); mutex_unlock(&indio_dev->mlock); + if (ret) + return ret; + + ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: - *val = 1; /* 0.0625 */ + *val = 1000; /* 62.5 */ *val2 = 16; ret = IIO_VAL_FRACTIONAL; break; diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index f53e9a803a0e..93b99bd93738 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c @@ -47,6 +47,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) if (adis->trig == NULL) return -ENOMEM; + adis->trig->dev.parent = &adis->spi->dev; + adis->trig->ops = &adis_trigger_ops; + iio_trigger_set_drvdata(adis->trig, adis); + ret = request_irq(adis->spi->irq, &iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING, @@ -55,9 +59,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) if (ret) goto error_free_trig; - adis->trig->dev.parent = &adis->spi->dev; - adis->trig->ops = &adis_trigger_ops; - iio_trigger_set_drvdata(adis->trig, adis); ret = iio_trigger_register(adis->trig); indio_dev->trig = iio_trigger_get(adis->trig); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index b485540da89e..cce0c93accef 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -392,7 +392,7 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, st_lsm6dsx_sensor_disable(sensor); - *val = (s16)data; + *val = (s16)le16_to_cpu(data); return IIO_VAL_INT; } diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index d2b465140a6b..78482d456c3b 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -175,7 +175,7 @@ unsigned int iio_buffer_poll(struct file *filp, struct iio_dev *indio_dev = filp->private_data; struct iio_buffer *rb = indio_dev->buffer; - if (!indio_dev->info) + if (!indio_dev->info || rb == NULL) return 0; poll_wait(filp, &rb->pollq, wait); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index a47428b4d31b..e565fd4fc414 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -631,7 +631,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, * iio_format_value() - Formats a IIO value into its string representation * @buf: The buffer to which the formatted value gets written * which is assumed to be big enough (i.e. PAGE_SIZE). - * @type: One of the IIO_VAL_... constants. This decides how the val + * @type: One of the IIO_VAL_* constants. This decides how the val * and val2 parameters are formatted. * @size: Number of IIO value entries contained in vals * @vals: Pointer to the values, exact meaning depends on the @@ -639,7 +639,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, * * Return: 0 by default, a negative number on failure or the * total number of characters written for a type that belongs - * to the IIO_VAL_... constant. + * to the IIO_VAL_* constant. */ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) { diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 08aafba4481c..19031a7bce23 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -317,7 +317,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { }, .drdy_irq = { /* drdy line is routed drdy pin */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -361,7 +364,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { .drdy_irq = { .addr = 0x62, .mask_int1 = 0x01, - .addr_stat_drdy = 0x67, + .stat_drdy = { + .addr = 0x67, + .mask = 0x07, + }, }, .multi_read_bit = false, .bootime = 2, diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c index 37ba007f8dca..74831fcd0313 100644 --- a/drivers/iio/multiplexer/iio-mux.c +++ b/drivers/iio/multiplexer/iio-mux.c @@ -285,6 +285,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux, child->ext_info_cache = devm_kzalloc(dev, sizeof(*child->ext_info_cache) * num_ext_info, GFP_KERNEL); + if (!child->ext_info_cache) + return -ENOMEM; + for (i = 0; i < num_ext_info; ++i) { child->ext_info_cache[i].size = -1; @@ -309,6 +312,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux, child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1, GFP_KERNEL); + if (!child->ext_info_cache[i].data) + return -ENOMEM; + child->ext_info_cache[i].data[ret] = 0; child->ext_info_cache[i].size = ret; } diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 34611a8ea2ce..ec5ca03529b5 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -287,7 +287,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = true, .bootime = 2, @@ -395,7 +398,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = true, .bootime = 2, @@ -454,7 +460,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x12, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = false, .bootime = 2, @@ -608,7 +617,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) if (!pdata && press_data->sensor_settings->drdy_irq.addr) pdata = (struct st_sensors_platform_data *)&default_press_pdata; - err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); + err = st_sensors_init_sensor(indio_dev, pdata); if (err < 0) goto st_press_power_off; diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig index ae070950f920..c03de24d3c51 100644 --- a/drivers/iio/proximity/Kconfig +++ b/drivers/iio/proximity/Kconfig @@ -58,6 +58,8 @@ config SX9500 config SRF08 tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER depends on I2C help Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index f42b3a1c75ff..dba796c06ba6 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -871,6 +871,7 @@ static int sx9500_init_device(struct iio_dev *indio_dev) static void sx9500_gpio_probe(struct i2c_client *client, struct sx9500_data *data) { + struct gpio_desc *gpiod_int; struct device *dev; if (!client) @@ -878,6 +879,14 @@ static void sx9500_gpio_probe(struct i2c_client *client, dev = &client->dev; + if (client->irq <= 0) { + gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN); + if (IS_ERR(gpiod_int)) + dev_err(dev, "gpio get irq failed\n"); + else + client->irq = gpiod_to_irq(gpiod_int); + } + data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH); if (IS_ERR(data->gpiod_rst)) { dev_warn(dev, "gpio get reset pin failed\n"); diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 12523f630b61..40475ebf3a61 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -207,6 +207,22 @@ int rdma_addr_size(struct sockaddr *addr) } EXPORT_SYMBOL(rdma_addr_size); +int rdma_addr_size_in6(struct sockaddr_in6 *addr) +{ + int ret = rdma_addr_size((struct sockaddr *) addr); + + return ret <= sizeof(*addr) ? ret : 0; +} +EXPORT_SYMBOL(rdma_addr_size_in6); + +int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) +{ + int ret = rdma_addr_size((struct sockaddr *) addr); + + return ret <= sizeof(*addr) ? ret : 0; +} +EXPORT_SYMBOL(rdma_addr_size_kss); + static struct rdma_addr_client self; void rdma_addr_register_client(struct rdma_addr_client *client) @@ -597,6 +613,15 @@ static void process_one_req(struct work_struct *_work) list_del(&req->list); mutex_unlock(&lock); + /* + * Although the work will normally have been canceled by the + * workqueue, it can still be requeued as long as it is on the + * req_list, so it could have been requeued before we grabbed &lock. + * We need to cancel it after it is removed from req_list to really be + * sure it is safe to free. + */ + cancel_delayed_work(&req->work); + req->callback(req->status, (struct sockaddr *)&req->src_addr, req->addr, req->context); put_client(req->client); @@ -852,7 +877,7 @@ static struct notifier_block nb = { int addr_init(void) { - addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); + addr_wq = alloc_ordered_workqueue("ib_addr", 0); if (!addr_wq) return -ENOMEM; diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 4c4b46586af2..2af79e4f3235 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1575,7 +1575,7 @@ static void cm_format_req_event(struct cm_work *work, param->bth_pkey = cm_get_bth_pkey(work); param->port = cm_id_priv->av.port->port_num; param->primary_path = &work->path[0]; - if (req_msg->alt_local_lid) + if (cm_req_has_alt_path(req_msg)) param->alternate_path = &work->path[1]; else param->alternate_path = NULL; @@ -1856,7 +1856,8 @@ static int cm_req_handler(struct cm_work *work) cm_process_routed_req(req_msg, work->mad_recv_wc->wc); memset(&work->path[0], 0, sizeof(work->path[0])); - memset(&work->path[1], 0, sizeof(work->path[1])); + if (cm_req_has_alt_path(req_msg)) + memset(&work->path[1], 0, sizeof(work->path[1])); grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); ret = ib_get_cached_gid(work->port->cm_dev->ib_device, work->port->port_num, @@ -3817,14 +3818,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, struct cm_port *port = mad_agent->context; struct cm_work *work; enum ib_cm_event_type event; + bool alt_path = false; u16 attr_id; int paths = 0; int going_down = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: - paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> - alt_local_lid != 0); + alt_path = cm_req_has_alt_path((struct cm_req_msg *) + mad_recv_wc->recv_buf.mad); + paths = 1 + (alt_path != 0); event = IB_CM_REQ_RECEIVED; break; case CM_MRA_ATTR_ID: diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 852c8fec8088..25de7cc9f49f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net, INIT_LIST_HEAD(&id_priv->mc_list); get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); id_priv->id.route.addr.dev_addr.net = get_net(net); + id_priv->seq_num &= 0x00ffffff; return &id_priv->id; } @@ -1540,7 +1541,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, return id_priv; } -static inline int cma_user_data_offset(struct rdma_id_private *id_priv) +static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) { return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); } @@ -1942,7 +1943,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) struct rdma_id_private *listen_id, *conn_id = NULL; struct rdma_cm_event event; struct net_device *net_dev; - int offset, ret; + u8 offset; + int ret; listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); if (IS_ERR(listen_id)) @@ -3015,7 +3017,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, continue; /* different dest port -> unique */ - if (!cma_any_port(cur_daddr) && + if (!cma_any_port(daddr) && + !cma_any_port(cur_daddr) && (dport != cur_dport)) continue; @@ -3026,7 +3029,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, continue; /* different dst address -> unique */ - if (!cma_any_addr(cur_daddr) && + if (!cma_any_addr(daddr) && + !cma_any_addr(cur_daddr) && cma_addr_cmp(daddr, cur_daddr)) continue; @@ -3324,13 +3328,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) } #endif } + daddr = cma_dst_addr(id_priv); + daddr->sa_family = addr->sa_family; + ret = cma_get_port(id_priv); if (ret) goto err2; - daddr = cma_dst_addr(id_priv); - daddr->sa_family = addr->sa_family; - return 0; err2: if (id_priv->cma_dev) @@ -3440,7 +3444,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, struct ib_cm_sidr_req_param req; struct ib_cm_id *id; void *private_data; - int offset, ret; + u8 offset; + int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); @@ -3497,7 +3502,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, struct rdma_route *route; void *private_data; struct ib_cm_id *id; - int offset, ret; + u8 offset; + int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); @@ -4114,6 +4120,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, struct cma_multicast *mc; int ret; + if (!id->device) + return -EINVAL; + id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) @@ -4432,7 +4441,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) goto out; if (ibnl_put_attr(skb, nlh, - rdma_addr_size(cma_src_addr(id_priv)), + rdma_addr_size(cma_dst_addr(id_priv)), cma_dst_addr(id_priv), RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) goto out; @@ -4444,6 +4453,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) id_stats->qp_type = id->qp_type; i_id++; + nlmsg_end(skb, nlh); } cb->args[1] = 0; @@ -4458,7 +4468,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -static const struct rdma_nl_cbs cma_cb_table[] = { +static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = { [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, }; diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index a1d687a664f8..66f0268f37a6 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, } #endif -struct ib_device *__ib_device_get_by_index(u32 ifindex); +struct ib_device *ib_device_get_by_index(u32 ifindex); /* RDMA device netlink */ void nldev_init(void); void nldev_exit(void); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 84fc32a2c8b3..4dff06ab771e 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device) return 0; } -struct ib_device *__ib_device_get_by_index(u32 index) +static struct ib_device *__ib_device_get_by_index(u32 index) { struct ib_device *device; @@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index) return NULL; } +/* + * Caller is responsible to return refrerence count by calling put_device() + */ +struct ib_device *ib_device_get_by_index(u32 index) +{ + struct ib_device *device; + + down_read(&lists_rwsem); + device = __ib_device_get_by_index(index); + if (device) + get_device(&device->dev); + + up_read(&lists_rwsem); + return device; +} + static struct ib_device *__ib_device_get_by_name(const char *name) { struct ib_device *device; @@ -446,7 +462,6 @@ int ib_register_device(struct ib_device *device, struct ib_udata uhw = {.outlen = 0, .inlen = 0}; struct device *parent = device->dev.parent; - WARN_ON_ONCE(!parent); WARN_ON_ONCE(device->dma_device); if (device->dev.dma_ops) { /* @@ -455,16 +470,25 @@ int ib_register_device(struct ib_device *device, * into device->dev. */ device->dma_device = &device->dev; - if (!device->dev.dma_mask) - device->dev.dma_mask = parent->dma_mask; - if (!device->dev.coherent_dma_mask) - device->dev.coherent_dma_mask = - parent->coherent_dma_mask; + if (!device->dev.dma_mask) { + if (parent) + device->dev.dma_mask = parent->dma_mask; + else + WARN_ON_ONCE(true); + } + if (!device->dev.coherent_dma_mask) { + if (parent) + device->dev.coherent_dma_mask = + parent->coherent_dma_mask; + else + WARN_ON_ONCE(true); + } } else { /* * The caller did not provide custom DMA operations. Use the * DMA mapping operations of the parent device. */ + WARN_ON_ONCE(!parent); device->dma_device = parent; } @@ -510,14 +534,14 @@ int ib_register_device(struct ib_device *device, ret = device->query_device(device, &device->attrs, &uhw); if (ret) { pr_warn("Couldn't query the device attributes\n"); - goto cache_cleanup; + goto cg_cleanup; } ret = ib_device_register_sysfs(device, port_callback); if (ret) { pr_warn("Couldn't register device %s with driver model\n", device->name); - goto cache_cleanup; + goto cg_cleanup; } device->reg_state = IB_DEV_REGISTERED; @@ -533,6 +557,8 @@ int ib_register_device(struct ib_device *device, mutex_unlock(&device_mutex); return 0; +cg_cleanup: + ib_device_unregister_rdmacg(device); cache_cleanup: ib_cache_cleanup_one(device); ib_cache_release_one(device); @@ -1146,7 +1172,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, } EXPORT_SYMBOL(ib_get_net_dev_by_params); -static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { +static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { [RDMA_NL_LS_OP_RESOLVE] = { .doit = ib_nl_handle_resolve_resp, .flags = RDMA_NL_ADMIN_PERM, @@ -1253,5 +1279,5 @@ static void __exit ib_core_cleanup(void) MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); -module_init(ib_core_init); +subsys_initcall(ib_core_init); module_exit(ib_core_cleanup); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index fcf42f6bb82a..30d7277249b8 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason) } EXPORT_SYMBOL(iwcm_reject_msg); -static struct rdma_nl_cbs iwcm_nl_cb_table[] = { +static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = { [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 3c4faadb8cdd..81528f64061a 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -654,6 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) } skb_num++; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); + ret = -EINVAL; for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], hlist_node) { diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index f8f53bb90837..cb91245e9163 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, unsigned long flags; int ret; + INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); ret = ib_mad_enforce_security(mad_agent_priv, mad_recv_wc->wc->pkey_index); if (ret) { ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); + return; } - INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 2fae850a3eff..0dcd1aa6f683 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(index); + device = ib_device_get_by_index(index); if (!device) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; + if (!msg) { + err = -ENOMEM; + goto err; + } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); err = fill_dev_info(msg, device); - if (err) { - nlmsg_free(msg); - return err; - } + if (err) + goto err_free; nlmsg_end(msg, nlh); + put_device(&device->dev); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + +err_free: + nlmsg_free(msg); +err: + put_device(&device->dev); + return err; } static int _nldev_get_dumpit(struct ib_device *device, @@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(index); + device = ib_device_get_by_index(index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); - if (!rdma_is_port_valid(device, port)) - return -EINVAL; + if (!rdma_is_port_valid(device, port)) { + err = -EINVAL; + goto err; + } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; + if (!msg) { + err = -ENOMEM; + goto err; + } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); err = fill_port_info(msg, device, port); - if (err) { - nlmsg_free(msg); - return err; - } + if (err) + goto err_free; nlmsg_end(msg, nlh); + put_device(&device->dev); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + +err_free: + nlmsg_free(msg); +err: + put_device(&device->dev); + return err; } static int nldev_port_get_dumpit(struct sk_buff *skb, @@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, return -EINVAL; ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(ifindex); + device = ib_device_get_by_index(ifindex); if (!device) return -EINVAL; @@ -299,11 +315,13 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, nlmsg_end(skb, nlh); } -out: cb->args[0] = idx; +out: + put_device(&device->dev); + cb->args[0] = idx; return skb->len; } -static const struct rdma_nl_cbs nldev_cb_table[] = { +static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { [RDMA_NLDEV_CMD_GET] = { .doit = nldev_get_doit, .dump = nldev_get_dumpit, diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 85b5ee4defa4..9cb801d1fe54 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -196,7 +196,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t goto free; } - uverbs_uobject_get(uobj); + /* + * The idr_find is guaranteed to return a pointer to something that + * isn't freed yet, or NULL, as the free after idr_remove goes through + * kfree_rcu(). However the object may still have been released and + * kfree() could be called at any time. + */ + if (!kref_get_unless_zero(&uobj->ref)) + uobj = ERR_PTR(-ENOENT); + free: rcu_read_unlock(); return uobj; @@ -399,13 +407,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj, return ret; } -static void lockdep_check(struct ib_uobject *uobj, bool exclusive) +static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive) { #ifdef CONFIG_LOCKDEP if (exclusive) - WARN_ON(atomic_read(&uobj->usecnt) > 0); + WARN_ON(atomic_read(&uobj->usecnt) != -1); else - WARN_ON(atomic_read(&uobj->usecnt) == -1); + WARN_ON(atomic_read(&uobj->usecnt) <= 0); #endif } @@ -444,7 +452,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj) WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); return 0; } - lockdep_check(uobj, true); + assert_uverbs_usecnt(uobj, true); ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); up_read(&ucontext->cleanup_rwsem); @@ -474,7 +482,7 @@ int rdma_explicit_destroy(struct ib_uobject *uobject) WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); return 0; } - lockdep_check(uobject, true); + assert_uverbs_usecnt(uobject, true); ret = uobject->type->type_class->remove_commit(uobject, RDMA_REMOVE_DESTROY); if (ret) @@ -561,7 +569,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive) void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) { - lockdep_check(uobj, exclusive); + assert_uverbs_usecnt(uobj, exclusive); uobj->type->type_class->lookup_put(uobj, exclusive); /* * In order to unlock an object, either decrease its usecnt for diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 88bdafb297f5..59b2f96d986a 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey, if (ret) return ret; - if (qp_sec->qp == qp_sec->qp->real_qp) { - list_for_each_entry(shared_qp_sec, - &qp_sec->shared_qp_list, - shared_qp_list) { - ret = security_ib_pkey_access(shared_qp_sec->security, - subnet_prefix, - pkey); - if (ret) - return ret; - } + list_for_each_entry(shared_qp_sec, + &qp_sec->shared_qp_list, + shared_qp_list) { + ret = security_ib_pkey_access(shared_qp_sec->security, + subnet_prefix, + pkey); + if (ret) + return ret; } return 0; } @@ -388,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) if (ret) return ret; + if (!qp->qp_sec) + return 0; + mutex_lock(&real_qp->qp_sec->mutex); ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, qp->qp_sec); @@ -419,8 +420,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec) int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) { + u8 i = rdma_start_port(dev); + bool is_ib = false; int ret; + while (i <= rdma_end_port(dev) && !is_ib) + is_ib = rdma_protocol_ib(dev, i++); + + /* If this isn't an IB device don't create the security context */ + if (!is_ib) + return 0; + qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); if (!qp->qp_sec) return -ENOMEM; @@ -443,6 +453,10 @@ EXPORT_SYMBOL(ib_create_qp_security); void ib_destroy_qp_security_begin(struct ib_qp_security *sec) { + /* Return if not IB */ + if (!sec) + return; + mutex_lock(&sec->mutex); /* Remove the QP from the lists so it won't get added to @@ -472,6 +486,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec) int ret; int i; + /* Return if not IB */ + if (!sec) + return; + /* If a concurrent cache update is in progress this * QP security could be marked for an error state * transition. Wait for this to complete. @@ -507,6 +525,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec) { int i; + /* Return if not IB */ + if (!sec) + return; + /* If a concurrent cache update is occurring we must * wait until this QP security structure is processed * in the QP to error flow before destroying it because @@ -559,19 +581,35 @@ int ib_security_modify_qp(struct ib_qp *qp, { int ret = 0; struct ib_ports_pkeys *tmp_pps; - struct ib_ports_pkeys *new_pps; - bool special_qp = (qp->qp_type == IB_QPT_SMI || - qp->qp_type == IB_QPT_GSI || - qp->qp_type >= IB_QPT_RESERVED1); + struct ib_ports_pkeys *new_pps = NULL; + struct ib_qp *real_qp = qp->real_qp; + bool special_qp = (real_qp->qp_type == IB_QPT_SMI || + real_qp->qp_type == IB_QPT_GSI || + real_qp->qp_type >= IB_QPT_RESERVED1); bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || (qp_attr_mask & IB_QP_ALT_PATH)); - if (pps_change && !special_qp) { - mutex_lock(&qp->qp_sec->mutex); - new_pps = get_new_pps(qp, + WARN_ONCE((qp_attr_mask & IB_QP_PORT && + rdma_protocol_ib(real_qp->device, qp_attr->port_num) && + !real_qp->qp_sec), + "%s: QP security is not initialized for IB QP: %d\n", + __func__, real_qp->qp_num); + + /* The port/pkey settings are maintained only for the real QP. Open + * handles on the real QP will be in the shared_qp_list. When + * enforcing security on the real QP all the shared QPs will be + * checked as well. + */ + + if (pps_change && !special_qp && real_qp->qp_sec) { + mutex_lock(&real_qp->qp_sec->mutex); + new_pps = get_new_pps(real_qp, qp_attr, qp_attr_mask); - + if (!new_pps) { + mutex_unlock(&real_qp->qp_sec->mutex); + return -ENOMEM; + } /* Add this QP to the lists for the new port * and pkey settings before checking for permission * in case there is a concurrent cache update @@ -586,24 +624,24 @@ int ib_security_modify_qp(struct ib_qp *qp, if (!ret) ret = check_qp_port_pkey_settings(new_pps, - qp->qp_sec); + real_qp->qp_sec); } if (!ret) - ret = qp->device->modify_qp(qp->real_qp, - qp_attr, - qp_attr_mask, - udata); + ret = real_qp->device->modify_qp(real_qp, + qp_attr, + qp_attr_mask, + udata); - if (pps_change && !special_qp) { + if (new_pps) { /* Clean up the lists and free the appropriate * ports_pkeys structure. */ if (ret) { tmp_pps = new_pps; } else { - tmp_pps = qp->qp_sec->ports_pkeys; - qp->qp_sec->ports_pkeys = new_pps; + tmp_pps = real_qp->qp_sec->ports_pkeys; + real_qp->qp_sec->ports_pkeys = new_pps; } if (tmp_pps) { @@ -611,7 +649,7 @@ int ib_security_modify_qp(struct ib_qp *qp, port_pkey_list_remove(&tmp_pps->alt); } kfree(tmp_pps); - mutex_unlock(&qp->qp_sec->mutex); + mutex_unlock(&real_qp->qp_sec->mutex); } return ret; } @@ -626,6 +664,9 @@ int ib_security_pkey_access(struct ib_device *dev, u16 pkey; int ret; + if (!rdma_protocol_ib(dev, port_num)) + return 0; + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); if (ret) return ret; @@ -660,6 +701,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, { int ret; + if (!rdma_protocol_ib(agent->device, agent->port_num)) + return 0; + ret = security_ib_alloc_security(&agent->security); if (ret) return ret; @@ -685,6 +729,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) { + if (!rdma_protocol_ib(agent->device, agent->port_num)) + return; + security_ib_free_security(agent->security); if (agent->lsm_nb_reg) unregister_lsm_notifier(&agent->lsm_nb); @@ -692,20 +739,19 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) { - int ret; - - if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) - return -EACCES; - - ret = ib_security_pkey_access(map->agent.device, - map->agent.port_num, - pkey_index, - map->agent.security); + if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) + return 0; - if (ret) - return ret; + if (map->agent.qp->qp_type == IB_QPT_SMI) { + if (!map->agent.smp_allowed) + return -EACCES; + return 0; + } - return 0; + return ib_security_pkey_access(map->agent.device, + map->agent.port_num, + pkey_index, + map->agent.security); } #endif /* CONFIG_SECURITY_INFINIBAND */ diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index abc5ab581f82..0a1e96c25ca3 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1262,7 +1262,6 @@ int ib_device_register_sysfs(struct ib_device *device, int ret; int i; - WARN_ON_ONCE(!device->dev.parent); ret = dev_set_name(class_dev, "%s", device->name); if (ret) return ret; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index eb85b546e223..d6fa38f8604f 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id, ctx = idr_find(&ctx_idr, id); if (!ctx) ctx = ERR_PTR(-ENOENT); - else if (ctx->file != file) + else if (ctx->file != file || !ctx->cm_id) ctx = ERR_PTR(-EINVAL); return ctx; } @@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; + struct rdma_cm_id *cm_id; enum ib_qp_type qp_type; int ret; @@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, return -ENOMEM; ctx->uid = cmd.uid; - ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, - ucma_event_handler, ctx, cmd.ps, qp_type); - if (IS_ERR(ctx->cm_id)) { - ret = PTR_ERR(ctx->cm_id); + cm_id = rdma_create_id(current->nsproxy->net_ns, + ucma_event_handler, ctx, cmd.ps, qp_type); + if (IS_ERR(cm_id)) { + ret = PTR_ERR(cm_id); goto err1; } @@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, ret = -EFAULT; goto err2; } + + ctx->cm_id = cm_id; return 0; err2: - rdma_destroy_id(ctx->cm_id); + rdma_destroy_id(cm_id); err1: mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); + mutex_lock(&file->mut); + list_del(&ctx->list); + mutex_unlock(&file->mut); kfree(ctx); return ret; } @@ -626,6 +632,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_in6(&cmd.addr)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -639,22 +648,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; - struct sockaddr *addr; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - addr = (struct sockaddr *) &cmd.addr; - if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) + if (cmd.reserved || !cmd.addr_size || + cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_bind_addr(ctx->cm_id, addr); + ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } @@ -670,13 +678,16 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_in6(&cmd.src_addr) || + !rdma_addr_size_in6(&cmd.dst_addr)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, - (struct sockaddr *) &cmd.dst_addr, - cmd.timeout_ms); + (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -686,24 +697,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; - struct sockaddr *src, *dst; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - src = (struct sockaddr *) &cmd.src_addr; - dst = (struct sockaddr *) &cmd.dst_addr; - if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || - !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst))) + if (cmd.reserved || + (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || + !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); + ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, + (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -904,13 +914,14 @@ static ssize_t ucma_query_path(struct ucma_context *ctx, resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL; - if (rec->rec_type == SA_PATH_REC_TYPE_IB) { - ib_sa_pack_path(rec, &resp->path_data[i].path_rec); - } else { + if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { struct sa_path_rec ib; sa_convert_path_opa_to_ib(&ib, rec); ib_sa_pack_path(&ib, &resp->path_data[i].path_rec); + + } else { + ib_sa_pack_path(rec, &resp->path_data[i].path_rec); } } @@ -1148,10 +1159,18 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (cmd.qp_state > IB_QPS_ERR) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (!ctx->cm_id->device) { + ret = -EINVAL; + goto out; + } + resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; @@ -1293,6 +1312,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) + return -EINVAL; + optval = memdup_user((void __user *) (unsigned long) cmd.optval, cmd.optlen); if (IS_ERR(optval)) { @@ -1314,7 +1336,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, { struct rdma_ucm_notify cmd; struct ucma_context *ctx; - int ret; + int ret = -EINVAL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; @@ -1323,7 +1345,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); + if (ctx->cm_id->device) + ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + ucma_put_ctx(ctx); return ret; } @@ -1342,7 +1366,7 @@ static ssize_t ucma_process_join(struct ucma_file *file, return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; - if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) + if (cmd->addr_size != rdma_addr_size(addr)) return -EINVAL; if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) @@ -1409,7 +1433,10 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file, join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; - join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); + join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); + if (!join_cmd.addr_size) + return -EINVAL; + join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); @@ -1425,6 +1452,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_kss(&cmd.addr)) + return -EINVAL; + return ucma_process_join(file, &cmd, out_len); } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 21e60b1e2ff4..9a4e899d94b3 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, sg_list_start = umem->sg_head.sgl; while (npages) { - ret = get_user_pages(cur_base, + ret = get_user_pages_longterm(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof (struct page *)), gup_flags, page_list, vma_list); @@ -352,7 +352,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, return -EINVAL; } - ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, + ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length, offset + ib_umem_offset(umem)); if (ret < 0) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index c1696e6084b2..6511cb21f6e2 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent, packet->mad.hdr.status = 0; packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); - packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); + /* + * On OPA devices it is okay to lose the upper 16 bits of LID as this + * information is obtained elsewhere. Mask off the upper 16 bits. + */ + if (agent->device->port_immutable[agent->port_num].core_cap_flags & + RDMA_CORE_PORT_INTEL_OPA) + packet->mad.hdr.lid = ib_lid_be16(0xFFFF & + mad_recv_wc->wc->slid); + else + packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); packet->mad.hdr.sl = mad_recv_wc->wc->sl; packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; @@ -491,7 +500,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, } memset(&ah_attr, 0, sizeof ah_attr); - ah_attr.type = rdma_ah_find_type(file->port->ib_dev, + ah_attr.type = rdma_ah_find_type(agent->device, file->port->port_num); rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid)); rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 52a2cf2d83aa..186dce6bba8f 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -565,9 +565,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, if (f.file) fdput(f); + mutex_unlock(&file->device->xrcd_tree_mutex); + uobj_alloc_commit(&obj->uobject); - mutex_unlock(&file->device->xrcd_tree_mutex); return in_len; err_copy: @@ -606,10 +607,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, file->ucontext); - if (IS_ERR(uobj)) { - mutex_unlock(&file->device->xrcd_tree_mutex); + if (IS_ERR(uobj)) return PTR_ERR(uobj); - } ret = uobj_remove_commit(uobj); return ret ?: in_len; @@ -1982,6 +1981,19 @@ static int modify_qp(struct ib_uverbs_file *file, goto release_qp; } + if ((cmd->base.attr_mask & IB_QP_AV) && + !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { + ret = -EINVAL; + goto release_qp; + } + + if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && + (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || + !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { + ret = -EINVAL; + goto release_qp; + } + attr->qp_state = cmd->base.qp_state; attr->cur_qp_state = cmd->base.cur_qp_state; attr->path_mtu = cmd->base.path_mtu; @@ -2079,8 +2091,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, return -EOPNOTSUPP; if (ucore->inlen > sizeof(cmd)) { - if (ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) + if (!ib_is_udata_cleared(ucore, sizeof(cmd), + ucore->inlen - sizeof(cmd))) return -EOPNOTSUPP; } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index dc2aed6fb21b..0f70ff91276e 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -647,12 +647,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command) return -1; } +static bool verify_command_idx(u32 command, bool extended) +{ + if (extended) + return command < ARRAY_SIZE(uverbs_ex_cmd_table); + + return command < ARRAY_SIZE(uverbs_cmd_table); +} + static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_file *file = filp->private_data; struct ib_device *ib_dev; struct ib_uverbs_cmd_hdr hdr; + bool extended_command; __u32 command; __u32 flags; int srcu_key; @@ -685,6 +694,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, } command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; + flags = (hdr.command & + IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; + + extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED; + if (!verify_command_idx(command, extended_command)) { + ret = -EINVAL; + goto out; + } + if (verify_command_mask(ib_dev, command)) { ret = -EOPNOTSUPP; goto out; @@ -696,12 +714,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, goto out; } - flags = (hdr.command & - IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; - if (!flags) { - if (command >= ARRAY_SIZE(uverbs_cmd_table) || - !uverbs_cmd_table[command]) { + if (!uverbs_cmd_table[command]) { ret = -EINVAL; goto out; } @@ -722,8 +736,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, struct ib_udata uhw; size_t written_count = count; - if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || - !uverbs_ex_cmd_table[command]) { + if (!uverbs_ex_cmd_table[command]) { ret = -ENOSYS; goto out; } diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 0a98579700ec..5f9321eda1b7 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -315,7 +315,7 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev, cq->uobject = &obj->uobject; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; - cq->cq_context = &ev_file->ev_queue; + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; obj->uobject.object = cq; obj->uobject.user_handle = user_handle; atomic_set(&cq->usecnt, 0); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index de57d6c11a25..9032f77cc38d 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1400,7 +1400,8 @@ int ib_close_qp(struct ib_qp *qp) spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); atomic_dec(&real_qp->usecnt); - ib_close_shared_qp_security(qp->qp_sec); + if (qp->qp_sec) + ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 0d89621d9fe8..b210495ff33c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -394,6 +394,7 @@ int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num, ctx->idx = tbl_idx; ctx->refcnt = 1; ctx_tbl[tbl_idx] = ctx; + *context = ctx; return rc; } diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index be07da1997e6..73feeeeb4283 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -410,6 +410,11 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) { + if (DRAIN_CQE(cqe)) { + WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); + return 0; + } + if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) return 0; @@ -504,7 +509,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, /* * Special cqe for drain WR completions... */ - if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { + if (DRAIN_CQE(hw_cqe)) { *cookie = CQE_DRAIN_COOKIE(hw_cqe); *cqe = *hw_cqe; goto skip_cqe; @@ -581,10 +586,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ret = -EAGAIN; goto skip_cqe; } - if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { + if (unlikely(!CQE_STATUS(hw_cqe) && + CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { t4_set_wq_in_error(wq); - hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); - goto proc_cqe; + hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN)); } goto proc_cqe; } @@ -761,9 +766,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) c4iw_invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); break; - case C4IW_DRAIN_OPCODE: - wc->opcode = IB_WC_SEND; - break; default: pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", CQE_OPCODE(&cqe), CQE_QPID(&cqe)); diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index 8f963df0bffc..9d25298d96fa 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); - spin_lock_irqsave(&chp->comp_handler_lock, flag); - (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); - spin_unlock_irqrestore(&chp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&chp->cq)) { + spin_lock_irqsave(&chp->comp_handler_lock, flag); + (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); + spin_unlock_irqrestore(&chp->comp_handler_lock, flag); + } } void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 819a30635d53..20c481115a99 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -631,8 +631,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state) return IB_QPS_ERR; } -#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN - static inline u32 c4iw_ib_to_tpt_access(int a) { return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cb7fc0d35d1d..f311ea73c806 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -794,21 +794,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) return 0; } -static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) +static int ib_to_fw_opcode(int ib_opcode) +{ + int opcode; + + switch (ib_opcode) { + case IB_WR_SEND_WITH_INV: + opcode = FW_RI_SEND_WITH_INV; + break; + case IB_WR_SEND: + opcode = FW_RI_SEND; + break; + case IB_WR_RDMA_WRITE: + opcode = FW_RI_RDMA_WRITE; + break; + case IB_WR_RDMA_READ: + case IB_WR_RDMA_READ_WITH_INV: + opcode = FW_RI_READ_REQ; + break; + case IB_WR_REG_MR: + opcode = FW_RI_FAST_REGISTER; + break; + case IB_WR_LOCAL_INV: + opcode = FW_RI_LOCAL_INV; + break; + default: + opcode = -EINVAL; + } + return opcode; +} + +static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) { struct t4_cqe cqe = {}; struct c4iw_cq *schp; unsigned long flag; struct t4_cq *cq; + int opcode; schp = to_c4iw_cq(qhp->ibqp.send_cq); cq = &schp->cq; + opcode = ib_to_fw_opcode(wr->opcode); + if (opcode < 0) + return opcode; + cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | - CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_OPCODE_V(opcode) | CQE_TYPE_V(1) | CQE_SWCQE_V(1) | + CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&schp->lock, flag); @@ -817,10 +853,29 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) t4_swcq_produce(cq); spin_unlock_irqrestore(&schp->lock, flag); - spin_lock_irqsave(&schp->comp_handler_lock, flag); - (*schp->ibcq.comp_handler)(&schp->ibcq, - schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&schp->cq)) { + spin_lock_irqsave(&schp->comp_handler_lock, flag); + (*schp->ibcq.comp_handler)(&schp->ibcq, + schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + } + return 0; +} + +static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + int ret = 0; + + while (wr) { + ret = complete_sq_drain_wr(qhp, wr); + if (ret) { + *bad_wr = wr; + break; + } + wr = wr->next; + } + return ret; } static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) @@ -835,9 +890,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | - CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_OPCODE_V(FW_RI_SEND) | CQE_TYPE_V(0) | CQE_SWCQE_V(1) | + CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&rchp->lock, flag); @@ -846,10 +902,20 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) t4_swcq_produce(cq); spin_unlock_irqrestore(&rchp->lock, flag); - spin_lock_irqsave(&rchp->comp_handler_lock, flag); - (*rchp->ibcq.comp_handler)(&rchp->ibcq, - rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&rchp->cq)) { + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + } +} + +static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr) +{ + while (wr) { + complete_rq_drain_wr(qhp, wr); + wr = wr->next; + } } int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, @@ -868,9 +934,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); - if (t4_wq_in_error(&qhp->wq)) { + + /* + * If the qp has been flushed, then just insert a special + * drain cqe. + */ + if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - complete_sq_drain_wr(qhp, wr); + err = complete_sq_drain_wrs(qhp, wr, bad_wr); return err; } num_wrs = t4_sq_avail(&qhp->wq); @@ -1012,9 +1083,14 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); - if (t4_wq_in_error(&qhp->wq)) { + + /* + * If the qp has been flushed, then just insert a special + * drain cqe. + */ + if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - complete_rq_drain_wr(qhp, wr); + complete_rq_drain_wrs(qhp, wr); return err; } num_wrs = t4_rq_avail(&qhp->wq); @@ -1257,48 +1333,51 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); - /* locking hierarchy: cq lock first, then qp lock. */ + /* locking hierarchy: cqs lock first, then qp lock. */ spin_lock_irqsave(&rchp->lock, flag); + if (schp != rchp) + spin_lock(&schp->lock); spin_lock(&qhp->lock); if (qhp->wq.flushed) { spin_unlock(&qhp->lock); + if (schp != rchp) + spin_unlock(&schp->lock); spin_unlock_irqrestore(&rchp->lock, flag); return; } qhp->wq.flushed = 1; + t4_set_wq_in_error(&qhp->wq); c4iw_flush_hw_cq(rchp); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); - spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&rchp->lock, flag); - /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock_irqsave(&schp->lock, flag); - spin_lock(&qhp->lock); if (schp != rchp) c4iw_flush_hw_cq(schp); sq_flushed = c4iw_flush_sq(qhp); + spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&schp->lock, flag); + if (schp != rchp) + spin_unlock(&schp->lock); + spin_unlock_irqrestore(&rchp->lock, flag); if (schp == rchp) { - if (t4_clear_cq_armed(&rchp->cq) && - (rq_flushed || sq_flushed)) { + if ((rq_flushed || sq_flushed) && + t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } } else { - if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { + if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } - if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { + if (sq_flushed && t4_clear_cq_armed(&schp->cq)) { spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); @@ -1315,8 +1394,8 @@ static void flush_qp(struct c4iw_qp *qhp) rchp = to_c4iw_cq(qhp->ibqp.recv_cq); schp = to_c4iw_cq(qhp->ibqp.send_cq); - t4_set_wq_in_error(&qhp->wq); if (qhp->ibqp.uobject) { + t4_set_wq_in_error(&qhp->wq); t4_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index e765c00303cd..80b390e861dc 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -171,7 +171,7 @@ struct t4_cqe { __be32 msn; } rcqe; struct { - u32 stag; + __be32 stag; u16 nada2; u16 cidx; } scqe; @@ -197,6 +197,11 @@ struct t4_cqe { #define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M) #define CQE_SWCQE_V(x) ((x)<> CQE_DRAIN_S)) & CQE_DRAIN_M) +#define CQE_DRAIN_V(x) ((x)<> CQE_STATUS_S)) & CQE_STATUS_M) @@ -213,6 +218,7 @@ struct t4_cqe { #define CQE_OPCODE_V(x) ((x)<header))) +#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header))) #define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header))) #define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header))) #define SQ_TYPE(x) (CQE_TYPE((x))) diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 010c709ba3bb..58c531db4f4a 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr { __u16 wrid; __u8 r1[3]; __u8 len16; - __u32 r2; - __u32 stag; + __be32 r2; + __be32 stag; struct fw_ri_tpte tpte; __u64 pbl[2]; }; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 0be42787759f..0e17d03ef1cb 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -9952,7 +9952,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) goto unimplemented; case HFI1_IB_CFG_OP_VLS: - val = ppd->vls_operational; + val = ppd->actual_vls_operational; break; case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ val = VL_ARB_HIGH_PRIO_TABLE_SIZE; @@ -13074,7 +13074,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) first_sdma = last_general; last_sdma = first_sdma + dd->num_sdma; first_rx = last_sdma; - last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; + last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts; /* VNIC MSIx interrupts get mapped when VNIC contexts are created */ dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues; @@ -13294,8 +13294,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd) * slow source, SDMACleanupDone) * N interrupts - one per used SDMA engine * M interrupt - one per kernel receive context + * V interrupt - one for each VNIC context */ - total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; + total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts; /* ask for MSI-X interrupts */ request = request_msix(dd, total); @@ -13356,10 +13357,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd) * in array of contexts * freectxts - number of free user contexts * num_send_contexts - number of PIO send contexts being used + * num_vnic_contexts - number of contexts reserved for VNIC */ static int set_up_context_variables(struct hfi1_devdata *dd) { unsigned long num_kernel_contexts; + u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT; int total_contexts; int ret; unsigned ngroups; @@ -13393,6 +13396,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd) num_kernel_contexts); num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; } + + /* Accommodate VNIC contexts if possible */ + if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) { + dd_dev_err(dd, "No receive contexts available for VNIC\n"); + num_vnic_contexts = 0; + } + total_contexts = num_kernel_contexts + num_vnic_contexts; + /* * User contexts: * - default to 1 user context per real (non-HT) CPU core if @@ -13402,19 +13413,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd) num_user_contexts = cpumask_weight(&node_affinity.real_cpu_mask); - total_contexts = num_kernel_contexts + num_user_contexts; - /* * Adjust the counts given a global max. */ - if (total_contexts > dd->chip_rcv_contexts) { + if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) { dd_dev_err(dd, "Reducing # user receive contexts to: %d, from %d\n", - (int)(dd->chip_rcv_contexts - num_kernel_contexts), + (int)(dd->chip_rcv_contexts - total_contexts), (int)num_user_contexts); - num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts; /* recalculate */ - total_contexts = num_kernel_contexts + num_user_contexts; + num_user_contexts = dd->chip_rcv_contexts - total_contexts; } /* each user context requires an entry in the RMT */ @@ -13427,25 +13435,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd) user_rmt_reduced); /* recalculate */ num_user_contexts = user_rmt_reduced; - total_contexts = num_kernel_contexts + num_user_contexts; } - /* Accommodate VNIC contexts */ - if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts) - total_contexts += HFI1_NUM_VNIC_CTXT; + total_contexts += num_user_contexts; /* the first N are kernel contexts, the rest are user/vnic contexts */ dd->num_rcv_contexts = total_contexts; dd->n_krcv_queues = num_kernel_contexts; dd->first_dyn_alloc_ctxt = num_kernel_contexts; + dd->num_vnic_contexts = num_vnic_contexts; dd->num_user_contexts = num_user_contexts; dd->freectxts = num_user_contexts; dd_dev_info(dd, - "rcv contexts: chip %d, used %d (kernel %d, user %d)\n", + "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n", (int)dd->chip_rcv_contexts, (int)dd->num_rcv_contexts, (int)dd->n_krcv_queues, - (int)dd->num_rcv_contexts - dd->n_krcv_queues); + dd->num_vnic_contexts, + dd->num_user_contexts); /* * Receive array allocation: diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a1e9893136..fd28f09b4445 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -881,11 +881,11 @@ static int complete_subctxt(struct hfi1_filedata *fd) } if (ret) { - hfi1_rcd_put(fd->uctxt); - fd->uctxt = NULL; spin_lock_irqsave(&fd->dd->uctxt_lock, flags); __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); + hfi1_rcd_put(fd->uctxt); + fd->uctxt = NULL; } return ret; diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 3ac9c307a285..3409eee16092 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1047,6 +1047,8 @@ struct hfi1_devdata { u64 z_send_schedule; u64 __percpu *send_schedule; + /* number of reserved contexts for VNIC usage */ + u16 num_vnic_contexts; /* number of receive contexts in use by the driver */ u32 num_rcv_contexts; /* number of pio send contexts in use by the driver */ @@ -1127,7 +1129,6 @@ struct hfi1_devdata { u16 pcie_lnkctl; u16 pcie_devctl2; u32 pci_msix0; - u32 pci_lnkctl3; u32 pci_tph2; /* diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index f4c0ffc040cc..07b80faf1675 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -4293,7 +4293,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp, const struct ib_wc *in_wc) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - u16 slid = ib_lid_cpu16(in_wc->slid); u16 pkey; if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys)) @@ -4320,7 +4319,11 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp, */ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) return 0; - ingress_pkey_table_fail(ppd, pkey, slid); + /* + * On OPA devices it is okay to lose the upper 16 bits of LID as this + * information is obtained elsewhere. Mask off the upper 16 bits. + */ + ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid)); return 1; } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 09e50fd2a08f..8c7e7a60b715 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd) if (ret) goto error; - ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - dd->pci_lnkctl3); - if (ret) - goto error; - - ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2); - if (ret) - goto error; - + if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { + ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, + dd->pci_tph2); + if (ret) + goto error; + } return 0; error: @@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd) if (ret) goto error; - ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - &dd->pci_lnkctl3); - if (ret) - goto error; - - ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2); - if (ret) - goto error; - + if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { + ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, + &dd->pci_tph2); + if (ret) + goto error; + } return 0; error: diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index e1cf0c08ca6f..84c6a6ff4a67 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -815,7 +815,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp, struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_16b_header *hdr = &opa_hdr->opah; struct ib_other_headers *ohdr; - u32 bth0, bth1; + u32 bth0, bth1 = 0; u16 len, pkey; u8 becn = !!is_fecn; u8 l4 = OPA_16B_L4_IB_LOCAL; diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 6d2702ef34ac..25e867393463 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device, * give a more accurate picture of total contexts available. */ return scnprintf(buf, PAGE_SIZE, "%u\n", - min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt, + min(dd->num_user_contexts, (u32)dd->sc_sizes[SC_USER].count)); } diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 9938bb983ce6..9749ec9dd9f2 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c @@ -154,7 +154,7 @@ void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr, *opcode = ib_bth_get_opcode(ohdr); *tver = ib_bth_get_tver(ohdr); *pkey = ib_bth_get_pkey(ohdr); - *psn = ib_bth_get_psn(ohdr); + *psn = mask_psn(ib_bth_get_psn(ohdr)); *qpn = ib_bth_get_qpn(ohdr); } @@ -169,7 +169,7 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr, *pad = ib_bth_get_pad(ohdr); *se = ib_bth_get_se(ohdr); *tver = ib_bth_get_tver(ohdr); - *psn = ib_bth_get_psn(ohdr); + *psn = mask_psn(ib_bth_get_psn(ohdr)); *qpn = ib_bth_get_qpn(ohdr); } diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index f419cbb05928..1a17708be46a 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -840,6 +840,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, struct rdma_netdev *rn; int i, size, rc; + if (!dd->num_vnic_contexts) + return ERR_PTR(-ENOMEM); + if (!port_num || (port_num > dd->num_pports)) return ERR_PTR(-EINVAL); @@ -848,7 +851,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo); netdev = alloc_netdev_mqs(size, name, name_assign_type, setup, - dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT); + dd->chip_sdma_engines, dd->num_vnic_contexts); if (!netdev) return ERR_PTR(-ENOMEM); @@ -856,7 +859,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, vinfo = opa_vnic_dev_priv(netdev); vinfo->dd = dd; vinfo->num_tx_q = dd->chip_sdma_engines; - vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT; + vinfo->num_rx_q = dd->num_vnic_contexts; vinfo->netdev = netdev; rn->free_rdma_netdev = hfi1_vnic_free_rn; rn->set_id = hfi1_vnic_set_vesw_id; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 747efd1ae5a6..8208c30f03c5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1001,6 +1001,11 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) } } + if (!ne) { + dev_err(dev, "Reseved loop qp is absent!\n"); + goto free_work; + } + do { ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); if (ret < 0) { diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 5230dd3c938c..b7f1ce5333cb 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -125,7 +125,8 @@ static u8 i40iw_derive_hw_ird_setting(u16 cm_ird) * @conn_ird: connection IRD * @conn_ord: connection ORD */ -static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord) +static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird, + u32 conn_ord) { if (conn_ird > I40IW_MAX_IRD_SIZE) conn_ird = I40IW_MAX_IRD_SIZE; @@ -1043,7 +1044,7 @@ static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, * i40iw_schedule_cm_timer * @@cm_node: connection's node * @sqbuf: buffer to send - * @type: if it es send ot close + * @type: if it is send or close * @send_retrans: if rexmits to be done * @close_when_complete: is cm_node to be removed * @@ -1067,7 +1068,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); if (!new_send) { - i40iw_free_sqbuf(vsi, (void *)sqbuf); + if (type != I40IW_TIMER_TYPE_CLOSE) + i40iw_free_sqbuf(vsi, (void *)sqbuf); return -ENOMEM; } new_send->retrycount = I40IW_DEFAULT_RETRYS; @@ -1082,7 +1084,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, new_send->timetosend += (HZ / 10); if (cm_node->close_entry) { kfree(new_send); - i40iw_free_sqbuf(vsi, (void *)sqbuf); i40iw_pr_err("already close entry\n"); return -EINVAL; } @@ -3841,7 +3842,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } cm_node->apbvt_set = true; - i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); + i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord); if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && !cm_node->ord_size) cm_node->ord_size = 1; diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 42ca5346777d..472ef4d6e858 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -506,7 +506,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp, ret_code = i40iw_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf, - 128, + I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size, I40IW_SD_BUF_ALIGNMENT); if (ret_code) @@ -589,14 +589,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp) } /** - * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq - * @cqp: struct for cqp hw - * @wqe_idx: we index of cqp ring + * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index + * @cqp: pointer to CQP structure + * @scratch: private data for CQP WQE + * @wqe_idx: WQE index for next WQE on CQP SQ */ -u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) +static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp, + u64 scratch, u32 *wqe_idx) { u64 *wqe = NULL; - u32 wqe_idx; enum i40iw_status_code ret_code; if (I40IW_RING_FULL_ERR(cqp->sq_ring)) { @@ -609,20 +610,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) cqp->sq_ring.size); return NULL; } - I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code); + I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++; if (ret_code) return NULL; - if (!wqe_idx) + if (!*wqe_idx) cqp->polarity = !cqp->polarity; - wqe = cqp->sq_base[wqe_idx].elem; - cqp->scratch_array[wqe_idx] = scratch; + wqe = cqp->sq_base[*wqe_idx].elem; + cqp->scratch_array[*wqe_idx] = scratch; I40IW_CQP_INIT_WQE(wqe); return wqe; } +/** + * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq + * @cqp: struct for cqp hw + * @scratch: private data for CQP WQE + */ +u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) +{ + u32 wqe_idx; + + return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); +} + /** * i40iw_sc_cqp_destroy - destroy cqp during close * @cqp: struct for cqp hw @@ -3534,8 +3547,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, u64 *wqe; int mem_entries, wqe_entries; struct i40iw_dma_mem *sdbuf = &cqp->sdbuf; + u64 offset; + u32 wqe_idx; - wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); if (!wqe) return I40IW_ERR_RING_FULL; @@ -3548,8 +3563,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT); if (mem_entries) { - memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4)); - data = sdbuf->pa; + offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE; + memcpy((char *)sdbuf->va + offset, &info->entry[3], + mem_entries << 4); + data = (u64)sdbuf->pa + offset; } else { data = 0; } @@ -3858,8 +3875,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_ hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1; hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted; - hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted; - hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted; + hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = + roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted); + hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = + roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted); hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt = hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt = diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h index 2ebaadbed379..019ad3b939f9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h @@ -93,6 +93,7 @@ #define RDMA_OPCODE_MASK 0x0f #define RDMA_READ_REQ_OPCODE 1 #define Q2_BAD_FRAME_OFFSET 72 +#define Q2_FPSN_OFFSET 64 #define CQE_MAJOR_DRV 0x8000 #define I40IW_TERM_SENT 0x01 @@ -1109,7 +1110,7 @@ #define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT) #define I40IWQPC_ARPIDX_SHIFT 48 -#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT) +#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT) #define I40IWQPC_FLOWLABEL_SHIFT 0 #define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT) @@ -1516,7 +1517,7 @@ enum i40iw_alignment { I40IW_AEQ_ALIGNMENT = 0x100, I40IW_CEQ_ALIGNMENT = 0x100, I40IW_CQ0_ALIGNMENT = 0x100, - I40IW_SD_BUF_ALIGNMENT = 0x100 + I40IW_SD_BUF_ALIGNMENT = 0x80 }; #define I40IW_WQE_SIZE_64 64 @@ -1524,6 +1525,8 @@ enum i40iw_alignment { #define I40IW_QP_WQE_MIN_SIZE 32 #define I40IW_QP_WQE_MAX_SIZE 128 +#define I40IW_UPDATE_SD_BUF_SIZE 128 + #define I40IW_CQE_QTYPE_RQ 0 #define I40IW_CQE_QTYPE_SQ 1 diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index 59f70676f0e0..14d38d733cb4 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -1376,7 +1376,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; u32 rcv_wnd = hw_host_ctx[23]; /* first partial seq # in q2 */ - u32 fps = qp->q2_buf[16]; + u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET); struct list_head *rxlist = &pfpdu->rxlist; struct list_head *plist; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c636842c5be0..8c681a36e6c7 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2972,9 +2972,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) kfree(ibdev->ib_uc_qpns_bitmap); err_steer_qp_release: - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) - mlx4_qp_release_range(dev, ibdev->steer_qpn_base, - ibdev->steer_qpn_count); + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); err_counter: for (i = 0; i < ibdev->num_ports; ++i) mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); @@ -3079,11 +3078,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) ibdev->iboe.nb.notifier_call = NULL; } - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { - mlx4_qp_release_range(dev, ibdev->steer_qpn_base, - ibdev->steer_qpn_count); - kfree(ibdev->ib_uc_qpns_bitmap); - } + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); + kfree(ibdev->ib_uc_qpns_bitmap); iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index e6f77f63da75..e80a7f764a74 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -406,7 +406,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, goto err_free_mr; mr->max_pages = max_num_sg; - err = mlx4_mr_enable(dev->dev, &mr->mmr); if (err) goto err_free_pl; @@ -417,6 +416,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, return &mr->ibmr; err_free_pl: + mr->ibmr.device = pd->device; mlx4_free_priv_pages(mr); err_free_mr: (void) mlx4_mr_free(dev->dev, &mr->mmr); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b6b33d99b0b4..9354fec8efe7 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } + if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | + MLX4_IB_RX_HASH_DST_IPV4 | + MLX4_IB_RX_HASH_SRC_IPV6 | + MLX4_IB_RX_HASH_DST_IPV6 | + MLX4_IB_RX_HASH_SRC_PORT_TCP | + MLX4_IB_RX_HASH_DST_PORT_TCP | + MLX4_IB_RX_HASH_SRC_PORT_UDP | + MLX4_IB_RX_HASH_DST_PORT_UDP)) { + pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", + ucmd->rx_hash_fields_mask); + return (-EOPNOTSUPP); + } + if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { rss_ctx->flags = MLX4_RSS_IPV4; @@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } - if (rss_ctx->flags & MLX4_RSS_IPV4) { + if (rss_ctx->flags & MLX4_RSS_IPV4) rss_ctx->flags |= MLX4_RSS_UDP_IPV4; - } else if (rss_ctx->flags & MLX4_RSS_IPV6) { + if (rss_ctx->flags & MLX4_RSS_IPV6) rss_ctx->flags |= MLX4_RSS_UDP_IPV6; - } else { + if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); return (-EOPNOTSUPP); } @@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { - if (rss_ctx->flags & MLX4_RSS_IPV4) { + if (rss_ctx->flags & MLX4_RSS_IPV4) rss_ctx->flags |= MLX4_RSS_TCP_IPV4; - } else if (rss_ctx->flags & MLX4_RSS_IPV6) { + if (rss_ctx->flags & MLX4_RSS_IPV6) rss_ctx->flags |= MLX4_RSS_TCP_IPV6; - } else { + if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); return (-EOPNOTSUPP); } - } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); @@ -2182,11 +2194,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); - if (rwq_ind_tbl) { - fill_qp_rss_context(context, qp); - context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); - } - if (!(attr_mask & IB_QP_PATH_MIG_STATE)) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); else { @@ -2216,7 +2223,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->mtu_msgmax = (IB_MTU_4096 << 5) | ilog2(dev->dev->caps.max_gso_sz); else - context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; + context->mtu_msgmax = (IB_MTU_4096 << 5) | 13; } else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { pr_err("path MTU (%u) is invalid\n", @@ -2387,6 +2394,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->pd = cpu_to_be32(pd->pdn); if (!rwq_ind_tbl) { + context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); get_cqs(qp, src_type, &send_cq, &recv_cq); } else { /* Set dummy CQs to be compatible with HV and PRM */ send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq); @@ -2394,7 +2402,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, } context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); - context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); /* Set "fast registration enabled" for all kernel QPs */ if (!ibuobject) @@ -2513,7 +2520,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, MLX4_IB_LINK_TYPE_ETH; if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { /* set QP to receive both tunneled & non-tunneled packets */ - if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET))) + if (!rwq_ind_tbl) context->srqn = cpu_to_be32(7 << 28); } } @@ -2562,6 +2569,13 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, } } + if (rwq_ind_tbl && + cur_state == IB_QPS_RESET && + new_state == IB_QPS_INIT) { + fill_qp_rss_context(context, qp); + context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); + } + err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), to_mlx4_state(new_state), context, optpar, sqd_event, &qp->mqp); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 2aa53f427685..faedc080a5e6 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -1154,7 +1154,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 552f7bd4ecc3..fb5302ee57c7 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1276,7 +1276,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn) return err; if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || - !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) + (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && + !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return err; mutex_lock(&dev->lb_mutex); @@ -1294,7 +1295,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn) mlx5_core_dealloc_transport_domain(dev->mdev, tdn); if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || - !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) + (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && + !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return; mutex_lock(&dev->lb_mutex); @@ -1415,6 +1417,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, } INIT_LIST_HEAD(&context->vma_private_list); + mutex_init(&context->vma_private_list_mutex); INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); @@ -1576,7 +1579,9 @@ static void mlx5_ib_vma_close(struct vm_area_struct *area) * mlx5_ib_disassociate_ucontext(). */ mlx5_ib_vma_priv_data->vma = NULL; + mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex); list_del(&mlx5_ib_vma_priv_data->list); + mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex); kfree(mlx5_ib_vma_priv_data); } @@ -1596,10 +1601,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, return -ENOMEM; vma_prv->vma = vma; + vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex; vma->vm_private_data = vma_prv; vma->vm_ops = &mlx5_ib_vm_ops; + mutex_lock(&ctx->vma_private_list_mutex); list_add(&vma_prv->list, vma_head); + mutex_unlock(&ctx->vma_private_list_mutex); return 0; } @@ -1642,6 +1650,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) * mlx5_ib_vma_close. */ down_write(&owning_mm->mmap_sem); + mutex_lock(&context->vma_private_list_mutex); list_for_each_entry_safe(vma_private, n, &context->vma_private_list, list) { vma = vma_private->vma; @@ -1656,6 +1665,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) list_del(&vma_private->list); kfree(vma_private); } + mutex_unlock(&context->vma_private_list_mutex); up_write(&owning_mm->mmap_sem); mmput(owning_mm); put_task_struct(owning_process); @@ -3097,6 +3107,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev) qp->real_qp = qp; qp->uobject = NULL; qp->qp_type = MLX5_IB_QPT_REG_UMR; + qp->send_cq = init_attr->send_cq; + qp->recv_cq = init_attr->recv_cq; attr->qp_state = IB_QPS_INIT; attr->port_num = 1; @@ -4151,7 +4163,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) } if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && - MLX5_CAP_GEN(mdev, disable_local_lb)) + (MLX5_CAP_GEN(mdev, disable_local_lb_uc) || + MLX5_CAP_GEN(mdev, disable_local_lb_mc))) mutex_init(&dev->lb_mutex); dev->ib_active = true; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 189e80cd6b2f..754103372faa 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -115,6 +115,8 @@ enum { struct mlx5_ib_vma_private_data { struct list_head list; struct vm_area_struct *vma; + /* protect vma_private_list add/del */ + struct mutex *vma_private_list_mutex; }; struct mlx5_ib_ucontext { @@ -129,6 +131,8 @@ struct mlx5_ib_ucontext { /* Transport Domain number */ u32 tdn; struct list_head vma_private_list; + /* protect vma_private_list add/del */ + struct mutex vma_private_list_mutex; unsigned long upd_xlt_page; /* protect ODP/KSM */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 37bbc543847a..6d48d8a93b62 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, *umem = ib_umem_get(pd->uobject->context, start, length, access_flags, 0); err = PTR_ERR_OR_ZERO(*umem); - if (err < 0) { + if (err) { + *umem = NULL; mlx5_ib_err(dev, "umem get failed (%d)\n", err); return err; } @@ -1206,6 +1207,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int err; bool use_umr = true; + if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) + return ERR_PTR(-EINVAL); + mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", start, virt_addr, length, access_flags); @@ -1412,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, if (err) { mlx5_ib_warn(dev, "Failed to rereg UMR\n"); ib_umem_release(mr->umem); + mr->umem = NULL; clean_mr(dev, mr); return err; } @@ -1495,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) u32 key = mr->mmkey.key; err = destroy_mkey(dev, mr); - kfree(mr); if (err) { mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", key, err); return err; } - } else { - mlx5_mr_cache_free(dev, mr); } return 0; @@ -1545,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) atomic_sub(npages, &dev->mdev->priv.reg_pages); } + if (!mr->allocated_from_cache) + kfree(mr); + else + mlx5_mr_cache_free(dev, mr); + return 0; } @@ -1637,6 +1644,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, MLX5_SET(mkc, mkc, access_mode, mr->access_mode); MLX5_SET(mkc, mkc, umr_en, 1); + mr->ibmr.device = pd->device; err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); if (err) goto err_destroy_psv; @@ -1812,7 +1820,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.length = 0; - mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { if (unlikely(i >= mr->max_descs)) @@ -1824,6 +1831,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, sg_offset = 0; } + mr->ndescs = i; if (sg_offset_p) *sg_offset_p = sg_offset; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index acb79d3a4f1d..c4d8cc1c2b1d 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1130,7 +1130,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ib_umem_release(sq->ubuffer.umem); } -static int get_rq_pas_size(void *qpc) +static size_t get_rq_pas_size(void *qpc) { u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); @@ -1146,7 +1146,8 @@ static int get_rq_pas_size(void *qpc) } static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, - struct mlx5_ib_rq *rq, void *qpin) + struct mlx5_ib_rq *rq, void *qpin, + size_t qpinlen) { struct mlx5_ib_qp *mqp = rq->base.container_mibqp; __be64 *pas; @@ -1155,9 +1156,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, void *rqc; void *wq; void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); - int inlen; + size_t rq_pas_size = get_rq_pas_size(qpc); + size_t inlen; int err; - u32 rq_pas_size = get_rq_pas_size(qpc); + + if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas)) + return -EINVAL; inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; in = kvzalloc(inlen, GFP_KERNEL); @@ -1236,7 +1240,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, } static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, - u32 *in, + u32 *in, size_t inlen, struct ib_pd *pd) { struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; @@ -1266,7 +1270,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING) rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; - err = create_raw_packet_qp_rq(dev, rq, in); + err = create_raw_packet_qp_rq(dev, rq, in, inlen); if (err) goto err_destroy_sq; @@ -1781,11 +1785,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, qp->flags |= MLX5_IB_QP_LSO; } + if (inlen < 0) { + err = -EINVAL; + goto err; + } + if (init_attr->qp_type == IB_QPT_RAW_PACKET || qp->flags & MLX5_IB_QP_UNDERLAY) { qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); - err = create_raw_packet_qp(dev, qp, in, pd); + err = create_raw_packet_qp(dev, qp, in, inlen, pd); } else { err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); } @@ -1825,6 +1834,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, else if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); +err: kvfree(in); return err; } @@ -4303,12 +4313,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, memset(ah_attr, 0, sizeof(*ah_attr)); - ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); - rdma_ah_set_port_num(ah_attr, path->port); - if (rdma_ah_get_port_num(ah_attr) == 0 || - rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports)) + if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports)) return; + ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); + rdma_ah_set_port_num(ah_attr, path->port); rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 6d5fadad9090..3c7522d025f2 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_srq *srq; - int desc_size; - int buf_size; + size_t desc_size; + size_t buf_size; int err; struct mlx5_srq_attr in = {0}; __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); @@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); + if (desc_size == 0 || srq->msrq.max_gs > desc_size) + return ERR_PTR(-EINVAL); desc_size = roundup_pow_of_two(desc_size); - desc_size = max_t(int, 32, desc_size); + desc_size = max_t(size_t, 32, desc_size); + if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) + return ERR_PTR(-EINVAL); srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / sizeof(struct mlx5_wqe_data_seg); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; - mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", - desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, - srq->msrq.max_avail_gather); + if (buf_size < desc_size) + return ERR_PTR(-EINVAL); in.type = init_attr->srq_type; if (pd->uobject) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 66056f9a9700..48a49f8a5014 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -834,7 +834,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev) dev->reset_stats.type = OCRDMA_RESET_STATS; dev->reset_stats.dev = dev; - if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, + if (!debugfs_create_file("reset_stats", 0200, dev->dir, &dev->reset_stats, &ocrdma_dbg_ops)) goto err; diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index e9a91736b12d..d80b61a71eb8 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -434,13 +434,13 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) qp->s_state = OP(COMPARE_SWAP); put_ib_ateth_swap(wqe->atomic_wr.swap, &ohdr->u.atomic_eth); - put_ib_ateth_swap(wqe->atomic_wr.compare_add, - &ohdr->u.atomic_eth); + put_ib_ateth_compare(wqe->atomic_wr.compare_add, + &ohdr->u.atomic_eth); } else { qp->s_state = OP(FETCH_ADD); put_ib_ateth_swap(wqe->atomic_wr.compare_add, &ohdr->u.atomic_eth); - put_ib_ateth_swap(0, &ohdr->u.atomic_eth); + put_ib_ateth_compare(0, &ohdr->u.atomic_eth); } put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, &ohdr->u.atomic_eth); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 3562c0c30492..6286b95d77ed 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_cq *cmd = &req.create_cq; struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; + struct pvrdma_create_cq_resp cq_resp = {0}; struct pvrdma_create_cq ucmd; BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); @@ -198,6 +199,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, cq->ibcq.cqe = resp->cqe; cq->cq_handle = resp->cq_handle; + cq_resp.cqn = resp->cq_handle; spin_lock_irqsave(&dev->cq_tbl_lock, flags); dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); @@ -206,7 +208,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, cq->uar = &(to_vucontext(context)->uar); /* Copy udata back. */ - if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); pvrdma_destroy_cq(&cq->ibcq); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index ed34d5a581fa..d7162f2b7979 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -406,6 +406,13 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp) atomic_dec(&qp->refcnt); wait_event(qp->wait, !atomic_read(&qp->refcnt)); + if (!qp->is_kernel) { + if (qp->rumem) + ib_umem_release(qp->rumem); + if (qp->sumem) + ib_umem_release(qp->sumem); + } + pvrdma_page_dir_cleanup(dev, &qp->pdir); kfree(qp); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 48776f5ffb0e..aa533f08e017 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -444,6 +444,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_pd *cmd = &req.create_pd; struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; + struct pvrdma_alloc_pd_resp pd_resp = {0}; int ret; void *ptr; @@ -472,9 +473,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, pd->privileged = !context; pd->pd_handle = resp->pd_handle; pd->pdn = resp->pd_handle; + pd_resp.pdn = resp->pd_handle; if (context) { - if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back protection domain\n"); pvrdma_dealloc_pd(&pd->ibpd); diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 97d71e49c092..88fa4d44ab5f 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -198,7 +198,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, return ERR_PTR(-EINVAL); /* Allocate the completion queue structure. */ - cq = kzalloc(sizeof(*cq), GFP_KERNEL); + cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node); if (!cq) return ERR_PTR(-ENOMEM); @@ -214,7 +214,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, sz += sizeof(struct ib_uverbs_wc) * (entries + 1); else sz += sizeof(struct ib_wc) * (entries + 1); - wc = vmalloc_user(sz); + wc = udata ? + vmalloc_user(sz) : + vzalloc_node(sz, rdi->dparms.node); if (!wc) { ret = ERR_PTR(-ENOMEM); goto bail_cq; @@ -369,7 +371,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); else sz += sizeof(struct ib_wc) * (cqe + 1); - wc = vmalloc_user(sz); + wc = udata ? + vmalloc_user(sz) : + vzalloc_node(sz, rdi->dparms.node); if (!wc) return -ENOMEM; diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 42713511b53b..524e6134642e 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t) unsigned long timeout; struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); - if (percpu_ref_is_zero(&mr->refcount)) - return 0; - /* avoid dma mr */ - if (mr->lkey) + if (mr->lkey) { + /* avoid dma mr */ rvt_dereg_clean_qps(mr); + /* @mr was indexed on rcu protected @lkey_table */ + synchronize_rcu(); + } + timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); if (!timeout) { rvt_pr_err(rdi, diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 77b3ed0df936..7f945f65d8cd 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, void rxe_release(struct kref *kref); -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify); int rxe_completer(void *arg); int rxe_requester(void *arg); int rxe_responder(void *arg); diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index c1b5f38f31a5..3b4916680018 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -404,6 +404,8 @@ void *rxe_alloc(struct rxe_pool *pool) elem = kmem_cache_zalloc(pool_cache(pool), (pool->flags & RXE_POOL_ATOMIC) ? GFP_ATOMIC : GFP_KERNEL); + if (!elem) + return NULL; elem->pool = pool; kref_init(&elem->ref_cnt); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 00bda9380a2e..aeea994b04c4 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp) } /* called when the last reference to the qp is dropped */ -void rxe_qp_cleanup(struct rxe_pool_entry *arg) +static void rxe_qp_do_cleanup(struct work_struct *work) { - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); rxe_drop_all_mcast_groups(qp); @@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg) kernel_sock_shutdown(qp->sk, SHUT_RDWR); sock_release(qp->sk); } + +/* called when the last reference to the qp is dropped */ +void rxe_qp_cleanup(struct rxe_pool_entry *arg) +{ + struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + + execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); +} diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index d84222f9d5d2..44b838ec9420 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -594,15 +594,8 @@ int rxe_requester(void *arg) rxe_add_ref(qp); next_wqe: - if (unlikely(!qp->valid)) { - rxe_drain_req_pkts(qp, true); + if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) goto exit; - } - - if (unlikely(qp->req.state == QP_STATE_ERROR)) { - rxe_drain_req_pkts(qp, true); - goto exit; - } if (unlikely(qp->req.state == QP_STATE_RESET)) { qp->req.wqe_index = consumer_index(qp->sq.queue); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 4240866a5331..01f926fd9029 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -1210,7 +1210,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp) } } -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) +static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) { struct sk_buff *skb; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0b362f49a10a..afbf701dc9a7 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -813,6 +813,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, (queue_count(qp->sq.queue) > 1); rxe_run_task(&qp->req.task, must_sched); + if (unlikely(qp->req.state == QP_STATE_ERROR)) + rxe_run_task(&qp->comp.task, 1); return err; } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 0c2dbe45c729..1019f5e7dbdd 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -35,6 +35,7 @@ #define RXE_VERBS_H #include +#include #include #include "rxe_pool.h" #include "rxe_task.h" @@ -281,6 +282,8 @@ struct rxe_qp { struct timer_list rnr_nak_timer; spinlock_t state_lock; /* guard requester and completer */ + + struct execute_work cleanup_work; }; enum rxe_mem_state { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 6cd61638b441..c97384c914a4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1203,10 +1203,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_ib_dev_down(dev); if (level == IPOIB_FLUSH_HEAVY) { + rtnl_lock(); if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) ipoib_ib_dev_stop(dev); - if (ipoib_ib_dev_open(dev) != 0) + + result = ipoib_ib_dev_open(dev); + rtnl_unlock(); + if (result) return; + if (netif_queue_stopped(dev)) netif_start_queue(dev); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index dcc77014018d..a009e943362a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -776,6 +776,22 @@ static void path_rec_completion(int status, spin_lock_irqsave(&priv->lock, flags); if (!IS_ERR_OR_NULL(ah)) { + /* + * pathrec.dgid is used as the database key from the LLADDR, + * it must remain unchanged even if the SA returns a different + * GID to use in the AH. + */ + if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid))) { + ipoib_dbg( + priv, + "%s got PathRec for gid %pI6 while asked for %pI6\n", + dev->name, pathrec->dgid.raw, + path->pathrec.dgid.raw); + memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid)); + } + path->pathrec = *pathrec; old_ah = path->ah; @@ -903,8 +919,8 @@ static int path_rec_start(struct net_device *dev, return 0; } -static void neigh_add_path(struct sk_buff *skb, u8 *daddr, - struct net_device *dev) +static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr, + struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); struct rdma_netdev *rn = netdev_priv(dev); @@ -918,7 +934,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, spin_unlock_irqrestore(&priv->lock, flags); ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); - return; + return NULL; + } + + /* To avoid race condition, make sure that the + * neigh will be added only once. + */ + if (unlikely(!list_empty(&neigh->list))) { + spin_unlock_irqrestore(&priv->lock, flags); + return neigh; } path = __path_find(dev, daddr + 4); @@ -957,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, path->ah->last_send = rn->send(dev, skb, path->ah->ah, IPOIB_QPN(daddr)); ipoib_neigh_put(neigh); - return; + return NULL; } } else { neigh->ah = NULL; @@ -974,7 +998,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, spin_unlock_irqrestore(&priv->lock, flags); ipoib_neigh_put(neigh); - return; + return NULL; err_path: ipoib_neigh_free(neigh); @@ -984,6 +1008,8 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, spin_unlock_irqrestore(&priv->lock, flags); ipoib_neigh_put(neigh); + + return NULL; } static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, @@ -1092,8 +1118,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) case htons(ETH_P_TIPC): neigh = ipoib_neigh_get(dev, phdr->hwaddr); if (unlikely(!neigh)) { - neigh_add_path(skb, phdr->hwaddr, dev); - return NETDEV_TX_OK; + neigh = neigh_add_path(skb, phdr->hwaddr, dev); + if (likely(!neigh)) + return NETDEV_TX_OK; } break; case htons(ETH_P_ARP): diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 93e149efc1f5..9b3f47ae2016 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) spin_lock_irqsave(&priv->lock, flags); if (!neigh) { neigh = ipoib_neigh_alloc(daddr, dev); - if (neigh) { + /* Make sure that the neigh will be added only + * once to mcast list. + */ + if (neigh && list_empty(&neigh->list)) { kref_get(&mcast->ah->ref); neigh->ah = mcast->ah; list_add_tail(&neigh->list, &mcast->neigh_list); diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index ceabdb85df8b..e770c17cbca9 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + ib_drain_qp(isert_conn->qp); list_del_init(&isert_conn->node); isert_conn->cm_id = NULL; isert_put_conn(isert_conn); @@ -2123,6 +2124,9 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, u32 rkey, offset; int ret; + if (cmd->ctx_init_done) + goto rdma_ctx_post; + if (dir == DMA_FROM_DEVICE) { addr = cmd->write_va; rkey = cmd->write_stag; @@ -2150,11 +2154,15 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, se_cmd->t_data_sg, se_cmd->t_data_nents, offset, addr, rkey, dir); } + if (ret < 0) { isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); return ret; } + cmd->ctx_init_done = true; + +rdma_ctx_post: ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); if (ret < 0) isert_err("Cmd: %p failed to post RDMA res\n", cmd); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index d6fd248320ae..3b296bac4f60 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -126,6 +126,7 @@ struct isert_cmd { struct rdma_rw_ctx rw; struct work_struct comp_work; struct scatterlist sg; + bool ctx_init_done; }; static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc) diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c index afa938bd26d6..a72278e9cd27 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c @@ -139,6 +139,7 @@ void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter) rcu_assign_pointer(adapter->mactbl, NULL); synchronize_rcu(); opa_vnic_free_mac_tbl(mactbl); + adapter->info.vport.mac_tbl_digest = 0; mutex_unlock(&adapter->mactbl_lock); } diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c index c2733964379c..9655cc3aa3a0 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c @@ -348,7 +348,7 @@ void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter, void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, struct opa_veswport_iface_macs *macs) { - u16 start_idx, tot_macs, num_macs, idx = 0, count = 0; + u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0; struct netdev_hw_addr *ha; start_idx = be16_to_cpu(macs->start_idx); @@ -359,8 +359,10 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, /* Do not include EM specified MAC address */ if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr, - ARRAY_SIZE(adapter->info.vport.base_mac_addr))) + ARRAY_SIZE(adapter->info.vport.base_mac_addr))) { + em_macs++; continue; + } if (start_idx > idx++) continue; @@ -383,7 +385,7 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, } tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) + - netdev_uc_count(adapter->netdev); + netdev_uc_count(adapter->netdev) - em_macs; macs->tot_macs_in_lst = cpu_to_be16(tot_macs); macs->num_macs_in_msg = cpu_to_be16(count); macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index fa5ccdb3bb2a..60d7b493ed2d 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status, static int srp_lookup_path(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; - int ret; + int ret = -ENODEV; ch->path.numb_path = 1; init_completion(&ch->done); + /* + * Avoid that the SCSI host can be removed by srp_remove_target() + * before srp_path_rec_completion() is called. + */ + if (!scsi_host_get(target->scsi_host)) + goto out; + ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, target->srp_host->srp_dev->dev, target->srp_host->port, @@ -684,18 +691,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch) GFP_KERNEL, srp_path_rec_completion, ch, &ch->path_query); - if (ch->path_query_id < 0) - return ch->path_query_id; + ret = ch->path_query_id; + if (ret < 0) + goto put; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) - return ret; + goto put; - if (ch->status < 0) + ret = ch->status; + if (ret < 0) shost_printk(KERN_WARNING, target->scsi_host, PFX "Path record query failed\n"); - return ch->status; +put: + scsi_host_put(target->scsi_host); + +out: + return ret; } static int srp_send_req(struct srp_rdma_ch *ch, bool multich) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 9e8e9220f816..ee578fa713c2 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1000,8 +1000,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) return -ENOMEM; attr->qp_state = IB_QPS_INIT; - attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_WRITE; + attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; attr->port_num = ch->sport->port; attr->pkey_index = 0; @@ -1992,7 +1991,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, goto destroy_ib; } - guid = (__be16 *)¶m->primary_path->sgid.global.interface_id; + guid = (__be16 *)¶m->primary_path->dgid.global.interface_id; snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x", be16_to_cpu(guid[0]), be16_to_cpu(guid[1]), be16_to_cpu(guid[2]), be16_to_cpu(guid[3])); @@ -2777,7 +2776,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) { const char *p; unsigned len, count, leading_zero_bytes; - int ret, rc; + int ret; p = name; if (strncasecmp(p, "0x", 2) == 0) @@ -2789,10 +2788,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) count = min(len / 2, 16U); leading_zero_bytes = 16 - count; memset(i_port_id, 0, leading_zero_bytes); - rc = hex2bin(i_port_id + leading_zero_bytes, p, count); - if (rc < 0) - pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); - ret = 0; + ret = hex2bin(i_port_id + leading_zero_bytes, p, count); + if (ret < 0) + pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret); out: return ret; } diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index ff8037798779..724715e4f8bc 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig @@ -184,6 +184,19 @@ config INPUT_APMPOWER To compile this driver as a module, choose M here: the module will be called apm-power. +config INPUT_KEYRESET + bool "Reset key" + depends on INPUT + select INPUT_KEYCOMBO + ---help--- + Say Y here if you want to reboot when some keys are pressed; + +config INPUT_KEYCOMBO + bool "Key combo" + depends on INPUT + ---help--- + Say Y here if you want to take action when some keys are pressed; + comment "Input Device Drivers" source "drivers/input/keyboard/Kconfig" diff --git a/drivers/input/Makefile b/drivers/input/Makefile index 40de6a7be641..f0351af763bd 100644 --- a/drivers/input/Makefile +++ b/drivers/input/Makefile @@ -27,5 +27,7 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/ obj-$(CONFIG_INPUT_MISC) += misc/ obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o +obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o +obj-$(CONFIG_INPUT_KEYCOMBO) += keycombo.o obj-$(CONFIG_RMI4_CORE) += rmi4/ diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d86e59515b9c..d88d3e0f59fb 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -229,6 +229,7 @@ static const struct xpad_device { { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, @@ -475,6 +476,22 @@ static const u8 xboxone_hori_init[] = { 0x00, 0x00, 0x00, 0x80, 0x00 }; +/* + * This packet is required for some of the PDP pads to start + * sending input reports. One of those pads is (0x0e6f:0x02ab). + */ +static const u8 xboxone_pdp_init1[] = { + 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 +}; + +/* + * This packet is required for some of the PDP pads to start + * sending input reports. One of those pads is (0x0e6f:0x02ab). + */ +static const u8 xboxone_pdp_init2[] = { + 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 +}; + /* * A specific rumble packet is required for some PowerA pads to start * sending input reports. One of those pads is (0x24c6:0x543a). @@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1), + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c index f6e643b589b6..c877e56a9bd5 100644 --- a/drivers/input/keyboard/goldfish_events.c +++ b/drivers/input/keyboard/goldfish_events.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,8 @@ #include #include +#define GOLDFISH_MAX_FINGERS 5 + enum { REG_READ = 0x00, REG_SET_PAGE = 0x00, @@ -52,7 +55,21 @@ static irqreturn_t events_interrupt(int irq, void *dev_id) value = __raw_readl(edev->addr + REG_READ); input_event(edev->input, type, code, value); - input_sync(edev->input); + // Send an extra (EV_SYN, SYN_REPORT, 0x0) event + // if a key was pressed. Some keyboard device + // drivers may only send the EV_KEY event and + // not EV_SYN. + // Note that sending an extra SYN_REPORT is not + // necessary nor correct protocol with other + // devices such as touchscreens, which will send + // their own SYN_REPORT's when sufficient event + // information has been collected (e.g., for + // touchscreens, when pressure and X/Y coordinates + // have been received). Hence, we will only send + // this extra SYN_REPORT if type == EV_KEY. + if (type == EV_KEY) { + input_sync(edev->input); + } return IRQ_HANDLED; } @@ -154,6 +171,15 @@ static int events_probe(struct platform_device *pdev) input_dev->name = edev->name; input_dev->id.bustype = BUS_HOST; + // Set the Goldfish Device to be multi-touch. + // In the Ranchu kernel, there is multi-touch-specific + // code for handling ABS_MT_SLOT events. + // See drivers/input/input.c:input_handle_abs_event. + // If we do not issue input_mt_init_slots, + // the kernel will filter out needed ABS_MT_SLOT + // events when we touch the screen in more than one place, + // preventing multi-touch with more than one finger from working. + input_mt_init_slots(input_dev, GOLDFISH_MAX_FINGERS, 0); events_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX); events_import_bits(edev, input_dev->keybit, EV_KEY, KEY_MAX); diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 1f316d66e6f7..41614c185918 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev) { struct matrix_keypad *keypad = input_get_drvdata(dev); + spin_lock_irq(&keypad->lock); keypad->stopped = true; - mb(); + spin_unlock_irq(&keypad->lock); + flush_work(&keypad->work.work); /* * matrix_keypad_scan() will leave IRQs enabled; diff --git a/drivers/input/keycombo.c b/drivers/input/keycombo.c new file mode 100644 index 000000000000..2fba451b91d5 --- /dev/null +++ b/drivers/input/keycombo.c @@ -0,0 +1,261 @@ +/* drivers/input/keycombo.c + * + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct keycombo_state { + struct input_handler input_handler; + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long upbit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; + spinlock_t lock; + struct workqueue_struct *wq; + int key_down_target; + int key_down; + int key_up; + struct delayed_work key_down_work; + int delay; + struct work_struct key_up_work; + void (*key_up_fn)(void *); + void (*key_down_fn)(void *); + void *priv; + int key_is_down; + struct wakeup_source combo_held_wake_source; + struct wakeup_source combo_up_wake_source; +}; + +static void do_key_down(struct work_struct *work) +{ + struct delayed_work *dwork = container_of(work, struct delayed_work, + work); + struct keycombo_state *state = container_of(dwork, + struct keycombo_state, key_down_work); + if (state->key_down_fn) + state->key_down_fn(state->priv); +} + +static void do_key_up(struct work_struct *work) +{ + struct keycombo_state *state = container_of(work, struct keycombo_state, + key_up_work); + if (state->key_up_fn) + state->key_up_fn(state->priv); + __pm_relax(&state->combo_up_wake_source); +} + +static void keycombo_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + unsigned long flags; + struct keycombo_state *state = handle->private; + + if (type != EV_KEY) + return; + + if (code >= KEY_MAX) + return; + + if (!test_bit(code, state->keybit)) + return; + + spin_lock_irqsave(&state->lock, flags); + if (!test_bit(code, state->key) == !value) + goto done; + __change_bit(code, state->key); + if (test_bit(code, state->upbit)) { + if (value) + state->key_up++; + else + state->key_up--; + } else { + if (value) + state->key_down++; + else + state->key_down--; + } + if (state->key_down == state->key_down_target && state->key_up == 0) { + __pm_stay_awake(&state->combo_held_wake_source); + state->key_is_down = 1; + if (queue_delayed_work(state->wq, &state->key_down_work, + state->delay)) + pr_debug("Key down work already queued!"); + } else if (state->key_is_down) { + if (!cancel_delayed_work(&state->key_down_work)) { + __pm_stay_awake(&state->combo_up_wake_source); + queue_work(state->wq, &state->key_up_work); + } + __pm_relax(&state->combo_held_wake_source); + state->key_is_down = 0; + } +done: + spin_unlock_irqrestore(&state->lock, flags); +} + +static int keycombo_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + int i; + int ret; + struct input_handle *handle; + struct keycombo_state *state = + container_of(handler, struct keycombo_state, input_handler); + for (i = 0; i < KEY_MAX; i++) { + if (test_bit(i, state->keybit) && test_bit(i, dev->keybit)) + break; + } + if (i == KEY_MAX) + return -ENODEV; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = KEYCOMBO_NAME; + handle->private = state; + + ret = input_register_handle(handle); + if (ret) + goto err_input_register_handle; + + ret = input_open_device(handle); + if (ret) + goto err_input_open_device; + + return 0; + +err_input_open_device: + input_unregister_handle(handle); +err_input_register_handle: + kfree(handle); + return ret; +} + +static void keycombo_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id keycombo_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, + .evbit = { BIT_MASK(EV_KEY) }, + }, + { }, +}; +MODULE_DEVICE_TABLE(input, keycombo_ids); + +static int keycombo_probe(struct platform_device *pdev) +{ + int ret; + int key, *keyp; + struct keycombo_state *state; + struct keycombo_platform_data *pdata = pdev->dev.platform_data; + + if (!pdata) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + spin_lock_init(&state->lock); + keyp = pdata->keys_down; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + state->key_down_target++; + __set_bit(key, state->keybit); + } + if (pdata->keys_up) { + keyp = pdata->keys_up; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + __set_bit(key, state->keybit); + __set_bit(key, state->upbit); + } + } + + state->wq = alloc_ordered_workqueue("keycombo", 0); + if (!state->wq) + return -ENOMEM; + + state->priv = pdata->priv; + + if (pdata->key_down_fn) + state->key_down_fn = pdata->key_down_fn; + INIT_DELAYED_WORK(&state->key_down_work, do_key_down); + + if (pdata->key_up_fn) + state->key_up_fn = pdata->key_up_fn; + INIT_WORK(&state->key_up_work, do_key_up); + + wakeup_source_init(&state->combo_held_wake_source, "key combo"); + wakeup_source_init(&state->combo_up_wake_source, "key combo up"); + state->delay = msecs_to_jiffies(pdata->key_down_delay); + + state->input_handler.event = keycombo_event; + state->input_handler.connect = keycombo_connect; + state->input_handler.disconnect = keycombo_disconnect; + state->input_handler.name = KEYCOMBO_NAME; + state->input_handler.id_table = keycombo_ids; + ret = input_register_handler(&state->input_handler); + if (ret) { + kfree(state); + return ret; + } + platform_set_drvdata(pdev, state); + return 0; +} + +int keycombo_remove(struct platform_device *pdev) +{ + struct keycombo_state *state = platform_get_drvdata(pdev); + input_unregister_handler(&state->input_handler); + destroy_workqueue(state->wq); + kfree(state); + return 0; +} + + +struct platform_driver keycombo_driver = { + .driver.name = KEYCOMBO_NAME, + .probe = keycombo_probe, + .remove = keycombo_remove, +}; + +static int __init keycombo_init(void) +{ + return platform_driver_register(&keycombo_driver); +} + +static void __exit keycombo_exit(void) +{ + return platform_driver_unregister(&keycombo_driver); +} + +module_init(keycombo_init); +module_exit(keycombo_exit); diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c new file mode 100644 index 000000000000..7e5222aec7c1 --- /dev/null +++ b/drivers/input/keyreset.c @@ -0,0 +1,144 @@ +/* drivers/input/keyreset.c + * + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct keyreset_state { + int restart_requested; + int (*reset_fn)(void); + struct platform_device *pdev_child; + struct work_struct restart_work; +}; + +static void do_restart(struct work_struct *unused) +{ + orderly_reboot(); +} + +static void do_reset_fn(void *priv) +{ + struct keyreset_state *state = priv; + if (state->restart_requested) + panic("keyboard reset failed, %d", state->restart_requested); + if (state->reset_fn) { + state->restart_requested = state->reset_fn(); + } else { + pr_info("keyboard reset\n"); + schedule_work(&state->restart_work); + state->restart_requested = 1; + } +} + +static int keyreset_probe(struct platform_device *pdev) +{ + int ret = -ENOMEM; + struct keycombo_platform_data *pdata_child; + struct keyreset_platform_data *pdata = pdev->dev.platform_data; + int up_size = 0, down_size = 0, size; + int key, *keyp; + struct keyreset_state *state; + + if (!pdata) + return -EINVAL; + state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->pdev_child = platform_device_alloc(KEYCOMBO_NAME, + PLATFORM_DEVID_AUTO); + if (!state->pdev_child) + return -ENOMEM; + state->pdev_child->dev.parent = &pdev->dev; + INIT_WORK(&state->restart_work, do_restart); + + keyp = pdata->keys_down; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + down_size++; + } + if (pdata->keys_up) { + keyp = pdata->keys_up; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + up_size++; + } + } + size = sizeof(struct keycombo_platform_data) + + sizeof(int) * (down_size + 1); + pdata_child = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!pdata_child) + goto error; + memcpy(pdata_child->keys_down, pdata->keys_down, + sizeof(int) * down_size); + if (up_size > 0) { + pdata_child->keys_up = devm_kzalloc(&pdev->dev, up_size + 1, + GFP_KERNEL); + if (!pdata_child->keys_up) + goto error; + memcpy(pdata_child->keys_up, pdata->keys_up, + sizeof(int) * up_size); + if (!pdata_child->keys_up) + goto error; + } + state->reset_fn = pdata->reset_fn; + pdata_child->key_down_fn = do_reset_fn; + pdata_child->priv = state; + pdata_child->key_down_delay = pdata->key_down_delay; + ret = platform_device_add_data(state->pdev_child, pdata_child, size); + if (ret) + goto error; + platform_set_drvdata(pdev, state); + return platform_device_add(state->pdev_child); +error: + platform_device_put(state->pdev_child); + return ret; +} + +int keyreset_remove(struct platform_device *pdev) +{ + struct keyreset_state *state = platform_get_drvdata(pdev); + platform_device_put(state->pdev_child); + return 0; +} + + +struct platform_driver keyreset_driver = { + .driver.name = KEYRESET_NAME, + .probe = keyreset_probe, + .remove = keyreset_remove, +}; + +static int __init keyreset_init(void) +{ + return platform_driver_register(&keyreset_driver); +} + +static void __exit keyreset_exit(void) +{ + return platform_driver_unregister(&keyreset_driver); +} + +module_init(keyreset_init); +module_exit(keyreset_exit); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 9f082a388388..4b269b332636 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -367,6 +367,17 @@ config INPUT_ATI_REMOTE2 To compile this driver as a module, choose M here: the module will be called ati_remote2. +config INPUT_KEYCHORD + tristate "Key chord input driver support" + help + Say Y here if you want to enable the key chord driver + accessible at /dev/keychord. This driver can be used + for receiving notifications when client specified key + combinations are pressed. + + To compile this driver as a module, choose M here: the + module will be called keychord. + config INPUT_KEYSPAN_REMOTE tristate "Keyspan DMR USB remote control" depends on USB_ARCH_HAS_HCD @@ -535,6 +546,11 @@ config INPUT_SGI_BTNS To compile this driver as a module, choose M here: the module will be called sgi_btns. +config INPUT_GPIO + tristate "GPIO driver support" + help + Say Y here if you want to support gpio based keys, wheels etc... + config HP_SDC_RTC tristate "HP SDC Real Time Clock" depends on (GSC || HP300) && SERIO diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 4b6118d313fe..2ecb6869e4b6 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -38,10 +38,12 @@ obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o +obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o +obj-$(CONFIG_INPUT_KEYCHORD) += keychord.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c new file mode 100644 index 000000000000..0acf4a576f53 --- /dev/null +++ b/drivers/input/misc/gpio_axis.c @@ -0,0 +1,192 @@ +/* drivers/input/misc/gpio_axis.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +struct gpio_axis_state { + struct gpio_event_input_devs *input_devs; + struct gpio_event_axis_info *info; + uint32_t pos; +}; + +uint16_t gpio_axis_4bit_gray_map_table[] = { + [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */ + [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */ + [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */ + [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */ + [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */ + [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */ + [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */ + [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */ +}; +uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in) +{ + return gpio_axis_4bit_gray_map_table[in]; +} + +uint16_t gpio_axis_5bit_singletrack_map_table[] = { + [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */ + [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */ + [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */ + [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */ + [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */ + [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */ + [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */ + [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */ + [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */ + [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */ +}; +uint16_t gpio_axis_5bit_singletrack_map( + struct gpio_event_axis_info *info, uint16_t in) +{ + return gpio_axis_5bit_singletrack_map_table[in]; +} + +static void gpio_event_update_axis(struct gpio_axis_state *as, int report) +{ + struct gpio_event_axis_info *ai = as->info; + int i; + int change; + uint16_t state = 0; + uint16_t pos; + uint16_t old_pos = as->pos; + for (i = ai->count - 1; i >= 0; i--) + state = (state << 1) | gpio_get_value(ai->gpio[i]); + pos = ai->map(ai, state); + if (ai->flags & GPIOEAF_PRINT_RAW) + pr_info("axis %d-%d raw %x, pos %d -> %d\n", + ai->type, ai->code, state, old_pos, pos); + if (report && pos != old_pos) { + if (ai->type == EV_REL) { + change = (ai->decoded_size + pos - old_pos) % + ai->decoded_size; + if (change > ai->decoded_size / 2) + change -= ai->decoded_size; + if (change == ai->decoded_size / 2) { + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d unknown direction, " + "pos %d -> %d\n", ai->type, + ai->code, old_pos, pos); + change = 0; /* no closest direction */ + } + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d change %d\n", + ai->type, ai->code, change); + input_report_rel(as->input_devs->dev[ai->dev], + ai->code, change); + } else { + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d now %d\n", + ai->type, ai->code, pos); + input_event(as->input_devs->dev[ai->dev], + ai->type, ai->code, pos); + } + input_sync(as->input_devs->dev[ai->dev]); + } + as->pos = pos; +} + +static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id) +{ + struct gpio_axis_state *as = dev_id; + gpio_event_update_axis(as, 1); + return IRQ_HANDLED; +} + +int gpio_event_axis_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int ret; + int i; + int irq; + struct gpio_event_axis_info *ai; + struct gpio_axis_state *as; + + ai = container_of(info, struct gpio_event_axis_info, info); + if (func == GPIO_EVENT_FUNC_SUSPEND) { + for (i = 0; i < ai->count; i++) + disable_irq(gpio_to_irq(ai->gpio[i])); + return 0; + } + if (func == GPIO_EVENT_FUNC_RESUME) { + for (i = 0; i < ai->count; i++) + enable_irq(gpio_to_irq(ai->gpio[i])); + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + *data = as = kmalloc(sizeof(*as), GFP_KERNEL); + if (as == NULL) { + ret = -ENOMEM; + goto err_alloc_axis_state_failed; + } + as->input_devs = input_devs; + as->info = ai; + if (ai->dev >= input_devs->count) { + pr_err("gpio_event_axis: bad device index %d >= %d " + "for %d:%d\n", ai->dev, input_devs->count, + ai->type, ai->code); + ret = -EINVAL; + goto err_bad_device_index; + } + + input_set_capability(input_devs->dev[ai->dev], + ai->type, ai->code); + if (ai->type == EV_ABS) { + input_set_abs_params(input_devs->dev[ai->dev], ai->code, + 0, ai->decoded_size - 1, 0, 0); + } + for (i = 0; i < ai->count; i++) { + ret = gpio_request(ai->gpio[i], "gpio_event_axis"); + if (ret < 0) + goto err_request_gpio_failed; + ret = gpio_direction_input(ai->gpio[i]); + if (ret < 0) + goto err_gpio_direction_input_failed; + ret = irq = gpio_to_irq(ai->gpio[i]); + if (ret < 0) + goto err_get_irq_num_failed; + ret = request_irq(irq, gpio_axis_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + "gpio_event_axis", as); + if (ret < 0) + goto err_request_irq_failed; + } + gpio_event_update_axis(as, 0); + return 0; + } + + ret = 0; + as = *data; + for (i = ai->count - 1; i >= 0; i--) { + free_irq(gpio_to_irq(ai->gpio[i]), as); +err_request_irq_failed: +err_get_irq_num_failed: +err_gpio_direction_input_failed: + gpio_free(ai->gpio[i]); +err_request_gpio_failed: + ; + } +err_bad_device_index: + kfree(as); + *data = NULL; +err_alloc_axis_state_failed: + return ret; +} diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c new file mode 100644 index 000000000000..90f07eba3ce9 --- /dev/null +++ b/drivers/input/misc/gpio_event.c @@ -0,0 +1,228 @@ +/* drivers/input/misc/gpio_event.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +struct gpio_event { + struct gpio_event_input_devs *input_devs; + const struct gpio_event_platform_data *info; + void *state[0]; +}; + +static int gpio_input_event( + struct input_dev *dev, unsigned int type, unsigned int code, int value) +{ + int i; + int devnr; + int ret = 0; + int tmp_ret; + struct gpio_event_info **ii; + struct gpio_event *ip = input_get_drvdata(dev); + + for (devnr = 0; devnr < ip->input_devs->count; devnr++) + if (ip->input_devs->dev[devnr] == dev) + break; + if (devnr == ip->input_devs->count) { + pr_err("gpio_input_event: unknown device %p\n", dev); + return -EIO; + } + + for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) { + if ((*ii)->event) { + tmp_ret = (*ii)->event(ip->input_devs, *ii, + &ip->state[i], + devnr, type, code, value); + if (tmp_ret) + ret = tmp_ret; + } + } + return ret; +} + +static int gpio_event_call_all_func(struct gpio_event *ip, int func) +{ + int i; + int ret; + struct gpio_event_info **ii; + + if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) { + ii = ip->info->info; + for (i = 0; i < ip->info->info_count; i++, ii++) { + if ((*ii)->func == NULL) { + ret = -ENODEV; + pr_err("gpio_event_probe: Incomplete pdata, " + "no function\n"); + goto err_no_func; + } + if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend) + continue; + ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i], + func); + if (ret) { + pr_err("gpio_event_probe: function failed\n"); + goto err_func_failed; + } + } + return 0; + } + + ret = 0; + i = ip->info->info_count; + ii = ip->info->info + i; + while (i > 0) { + i--; + ii--; + if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend) + continue; + (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1); +err_func_failed: +err_no_func: + ; + } + return ret; +} + +static void __maybe_unused gpio_event_suspend(struct gpio_event *ip) +{ + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND); + if (ip->info->power) + ip->info->power(ip->info, 0); +} + +static void __maybe_unused gpio_event_resume(struct gpio_event *ip) +{ + if (ip->info->power) + ip->info->power(ip->info, 1); + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME); +} + +static int gpio_event_probe(struct platform_device *pdev) +{ + int err; + struct gpio_event *ip; + struct gpio_event_platform_data *event_info; + int dev_count = 1; + int i; + int registered = 0; + + event_info = pdev->dev.platform_data; + if (event_info == NULL) { + pr_err("gpio_event_probe: No pdata\n"); + return -ENODEV; + } + if ((!event_info->name && !event_info->names[0]) || + !event_info->info || !event_info->info_count) { + pr_err("gpio_event_probe: Incomplete pdata\n"); + return -ENODEV; + } + if (!event_info->name) + while (event_info->names[dev_count]) + dev_count++; + ip = kzalloc(sizeof(*ip) + + sizeof(ip->state[0]) * event_info->info_count + + sizeof(*ip->input_devs) + + sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL); + if (ip == NULL) { + err = -ENOMEM; + pr_err("gpio_event_probe: Failed to allocate private data\n"); + goto err_kp_alloc_failed; + } + ip->input_devs = (void*)&ip->state[event_info->info_count]; + platform_set_drvdata(pdev, ip); + + for (i = 0; i < dev_count; i++) { + struct input_dev *input_dev = input_allocate_device(); + if (input_dev == NULL) { + err = -ENOMEM; + pr_err("gpio_event_probe: " + "Failed to allocate input device\n"); + goto err_input_dev_alloc_failed; + } + input_set_drvdata(input_dev, ip); + input_dev->name = event_info->name ? + event_info->name : event_info->names[i]; + input_dev->event = gpio_input_event; + ip->input_devs->dev[i] = input_dev; + } + ip->input_devs->count = dev_count; + ip->info = event_info; + if (event_info->power) + ip->info->power(ip->info, 1); + + err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT); + if (err) + goto err_call_all_func_failed; + + for (i = 0; i < dev_count; i++) { + err = input_register_device(ip->input_devs->dev[i]); + if (err) { + pr_err("gpio_event_probe: Unable to register %s " + "input device\n", ip->input_devs->dev[i]->name); + goto err_input_register_device_failed; + } + registered++; + } + + return 0; + +err_input_register_device_failed: + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); +err_call_all_func_failed: + if (event_info->power) + ip->info->power(ip->info, 0); + for (i = 0; i < registered; i++) + input_unregister_device(ip->input_devs->dev[i]); + for (i = dev_count - 1; i >= registered; i--) { + input_free_device(ip->input_devs->dev[i]); +err_input_dev_alloc_failed: + ; + } + kfree(ip); +err_kp_alloc_failed: + return err; +} + +static int gpio_event_remove(struct platform_device *pdev) +{ + struct gpio_event *ip = platform_get_drvdata(pdev); + int i; + + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); + if (ip->info->power) + ip->info->power(ip->info, 0); + for (i = 0; i < ip->input_devs->count; i++) + input_unregister_device(ip->input_devs->dev[i]); + kfree(ip); + return 0; +} + +static struct platform_driver gpio_event_driver = { + .probe = gpio_event_probe, + .remove = gpio_event_remove, + .driver = { + .name = GPIO_EVENT_DEV_NAME, + }, +}; + +module_platform_driver(gpio_event_driver); + +MODULE_DESCRIPTION("GPIO Event Driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c new file mode 100644 index 000000000000..5875d739c550 --- /dev/null +++ b/drivers/input/misc/gpio_input.c @@ -0,0 +1,390 @@ +/* drivers/input/misc/gpio_input.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */ + DEBOUNCE_PRESSED = BIT(1), + DEBOUNCE_NOTPRESSED = BIT(2), + DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */ + DEBOUNCE_POLL = BIT(4), /* Stable polling state */ + + DEBOUNCE_UNKNOWN = + DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED, +}; + +struct gpio_key_state { + struct gpio_input_state *ds; + uint8_t debounce; +}; + +struct gpio_input_state { + struct gpio_event_input_devs *input_devs; + const struct gpio_event_input_info *info; + struct hrtimer timer; + int use_irq; + int debounce_count; + spinlock_t irq_lock; + struct wakeup_source *ws; + struct gpio_key_state key_state[0]; +}; + +static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer) +{ + int i; + int pressed; + struct gpio_input_state *ds = + container_of(timer, struct gpio_input_state, timer); + unsigned gpio_flags = ds->info->flags; + unsigned npolarity; + int nkeys = ds->info->keymap_size; + const struct gpio_event_direct_entry *key_entry; + struct gpio_key_state *key_state; + unsigned long irqflags; + uint8_t debounce; + bool sync_needed; + +#if 0 + key_entry = kp->keys_info->keymap; + key_state = kp->key_state; + for (i = 0; i < nkeys; i++, key_entry++, key_state++) + pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio, + gpio_read_detect_status(key_entry->gpio)); +#endif + key_entry = ds->info->keymap; + key_state = ds->key_state; + sync_needed = false; + spin_lock_irqsave(&ds->irq_lock, irqflags); + for (i = 0; i < nkeys; i++, key_entry++, key_state++) { + debounce = key_state->debounce; + if (debounce & DEBOUNCE_WAIT_IRQ) + continue; + if (key_state->debounce & DEBOUNCE_UNSTABLE) { + debounce = key_state->debounce = DEBOUNCE_UNKNOWN; + enable_irq(gpio_to_irq(key_entry->gpio)); + if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) continue debounce\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + } + npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH); + pressed = gpio_get_value(key_entry->gpio) ^ npolarity; + if (debounce & DEBOUNCE_POLL) { + if (pressed == !(debounce & DEBOUNCE_PRESSED)) { + ds->debounce_count++; + key_state->debounce = DEBOUNCE_UNKNOWN; + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-" + "%x, %d (%d) start debounce\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + } + continue; + } + if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) { + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) debounce pressed 1\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + key_state->debounce = DEBOUNCE_PRESSED; + continue; + } + if (!pressed && (debounce & DEBOUNCE_PRESSED)) { + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) debounce pressed 0\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + key_state->debounce = DEBOUNCE_NOTPRESSED; + continue; + } + /* key is stable */ + ds->debounce_count--; + if (ds->use_irq) + key_state->debounce |= DEBOUNCE_WAIT_IRQ; + else + key_state->debounce |= DEBOUNCE_POLL; + if (gpio_flags & GPIOEDF_PRINT_KEYS) + pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) " + "changed to %d\n", ds->info->type, + key_entry->code, i, key_entry->gpio, pressed); + input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, + key_entry->code, pressed); + sync_needed = true; + } + if (sync_needed) { + for (i = 0; i < ds->input_devs->count; i++) + input_sync(ds->input_devs->dev[i]); + } + +#if 0 + key_entry = kp->keys_info->keymap; + key_state = kp->key_state; + for (i = 0; i < nkeys; i++, key_entry++, key_state++) { + pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio, + gpio_read_detect_status(key_entry->gpio)); + } +#endif + + if (ds->debounce_count) + hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL); + else if (!ds->use_irq) + hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL); + else + __pm_relax(ds->ws); + + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + + return HRTIMER_NORESTART; +} + +static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id) +{ + struct gpio_key_state *ks = dev_id; + struct gpio_input_state *ds = ks->ds; + int keymap_index = ks - ds->key_state; + const struct gpio_event_direct_entry *key_entry; + unsigned long irqflags; + int pressed; + + if (!ds->use_irq) + return IRQ_HANDLED; + + key_entry = &ds->info->keymap[keymap_index]; + + if (ds->info->debounce_time) { + spin_lock_irqsave(&ds->irq_lock, irqflags); + if (ks->debounce & DEBOUNCE_WAIT_IRQ) { + ks->debounce = DEBOUNCE_UNKNOWN; + if (ds->debounce_count++ == 0) { + __pm_stay_awake(ds->ws); + hrtimer_start( + &ds->timer, ds->info->debounce_time, + HRTIMER_MODE_REL); + } + if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_event_input_irq_handler: " + "key %x-%x, %d (%d) start debounce\n", + ds->info->type, key_entry->code, + keymap_index, key_entry->gpio); + } else { + disable_irq_nosync(irq); + ks->debounce = DEBOUNCE_UNSTABLE; + } + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + } else { + pressed = gpio_get_value(key_entry->gpio) ^ + !(ds->info->flags & GPIOEDF_ACTIVE_HIGH); + if (ds->info->flags & GPIOEDF_PRINT_KEYS) + pr_info("gpio_event_input_irq_handler: key %x-%x, %d " + "(%d) changed to %d\n", + ds->info->type, key_entry->code, keymap_index, + key_entry->gpio, pressed); + input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, + key_entry->code, pressed); + input_sync(ds->input_devs->dev[key_entry->dev]); + } + return IRQ_HANDLED; +} + +static int gpio_event_input_request_irqs(struct gpio_input_state *ds) +{ + int i; + int err; + unsigned int irq; + unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; + + for (i = 0; i < ds->info->keymap_size; i++) { + err = irq = gpio_to_irq(ds->info->keymap[i].gpio); + if (err < 0) + goto err_gpio_get_irq_num_failed; + err = request_irq(irq, gpio_event_input_irq_handler, + req_flags, "gpio_keys", &ds->key_state[i]); + if (err) { + pr_err("gpio_event_input_request_irqs: request_irq " + "failed for input %d, irq %d\n", + ds->info->keymap[i].gpio, irq); + goto err_request_irq_failed; + } + if (ds->info->info.no_suspend) { + err = enable_irq_wake(irq); + if (err) { + pr_err("gpio_event_input_request_irqs: " + "enable_irq_wake failed for input %d, " + "irq %d\n", + ds->info->keymap[i].gpio, irq); + goto err_enable_irq_wake_failed; + } + } + } + return 0; + + for (i = ds->info->keymap_size - 1; i >= 0; i--) { + irq = gpio_to_irq(ds->info->keymap[i].gpio); + if (ds->info->info.no_suspend) + disable_irq_wake(irq); +err_enable_irq_wake_failed: + free_irq(irq, &ds->key_state[i]); +err_request_irq_failed: +err_gpio_get_irq_num_failed: + ; + } + return err; +} + +int gpio_event_input_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int ret; + int i; + unsigned long irqflags; + struct gpio_event_input_info *di; + struct gpio_input_state *ds = *data; + char *wlname; + + di = container_of(info, struct gpio_event_input_info, info); + + if (func == GPIO_EVENT_FUNC_SUSPEND) { + if (ds->use_irq) + for (i = 0; i < di->keymap_size; i++) + disable_irq(gpio_to_irq(di->keymap[i].gpio)); + hrtimer_cancel(&ds->timer); + return 0; + } + if (func == GPIO_EVENT_FUNC_RESUME) { + spin_lock_irqsave(&ds->irq_lock, irqflags); + if (ds->use_irq) + for (i = 0; i < di->keymap_size; i++) + enable_irq(gpio_to_irq(di->keymap[i].gpio)); + hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + if (ktime_to_ns(di->poll_time) <= 0) + di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC); + + *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) * + di->keymap_size, GFP_KERNEL); + if (ds == NULL) { + ret = -ENOMEM; + pr_err("gpio_event_input_func: " + "Failed to allocate private data\n"); + goto err_ds_alloc_failed; + } + ds->debounce_count = di->keymap_size; + ds->input_devs = input_devs; + ds->info = di; + wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s", + input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : ""); + + ds->ws = wakeup_source_register(wlname); + kfree(wlname); + if (!ds->ws) { + ret = -ENOMEM; + pr_err("gpio_event_input_func: " + "Failed to allocate wakeup source\n"); + goto err_ws_failed; + } + + spin_lock_init(&ds->irq_lock); + + for (i = 0; i < di->keymap_size; i++) { + int dev = di->keymap[i].dev; + if (dev >= input_devs->count) { + pr_err("gpio_event_input_func: bad device " + "index %d >= %d for key code %d\n", + dev, input_devs->count, + di->keymap[i].code); + ret = -EINVAL; + goto err_bad_keymap; + } + input_set_capability(input_devs->dev[dev], di->type, + di->keymap[i].code); + ds->key_state[i].ds = ds; + ds->key_state[i].debounce = DEBOUNCE_UNKNOWN; + } + + for (i = 0; i < di->keymap_size; i++) { + ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in"); + if (ret) { + pr_err("gpio_event_input_func: gpio_request " + "failed for %d\n", di->keymap[i].gpio); + goto err_gpio_request_failed; + } + ret = gpio_direction_input(di->keymap[i].gpio); + if (ret) { + pr_err("gpio_event_input_func: " + "gpio_direction_input failed for %d\n", + di->keymap[i].gpio); + goto err_gpio_configure_failed; + } + } + + ret = gpio_event_input_request_irqs(ds); + + spin_lock_irqsave(&ds->irq_lock, irqflags); + ds->use_irq = ret == 0; + + pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s " + "mode\n", input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : "", + ret == 0 ? "interrupt" : "polling"); + + hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ds->timer.function = gpio_event_input_timer_func; + hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + return 0; + } + + ret = 0; + spin_lock_irqsave(&ds->irq_lock, irqflags); + hrtimer_cancel(&ds->timer); + if (ds->use_irq) { + for (i = di->keymap_size - 1; i >= 0; i--) { + int irq = gpio_to_irq(di->keymap[i].gpio); + if (ds->info->info.no_suspend) + disable_irq_wake(irq); + free_irq(irq, &ds->key_state[i]); + } + } + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + + for (i = di->keymap_size - 1; i >= 0; i--) { +err_gpio_configure_failed: + gpio_free(di->keymap[i].gpio); +err_gpio_request_failed: + ; + } +err_bad_keymap: + wakeup_source_unregister(ds->ws); +err_ws_failed: + kfree(ds); +err_ds_alloc_failed: + return ret; +} diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c new file mode 100644 index 000000000000..08769dd88f56 --- /dev/null +++ b/drivers/input/misc/gpio_matrix.c @@ -0,0 +1,440 @@ +/* drivers/input/misc/gpio_matrix.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +struct gpio_kp { + struct gpio_event_input_devs *input_devs; + struct gpio_event_matrix_info *keypad_info; + struct hrtimer timer; + struct wakeup_source wake_src; + int current_output; + unsigned int use_irq:1; + unsigned int key_state_changed:1; + unsigned int last_key_state_changed:1; + unsigned int some_keys_pressed:2; + unsigned int disabled_irq:1; + unsigned long keys_pressed[0]; +}; + +static void clear_phantom_key(struct gpio_kp *kp, int out, int in) +{ + struct gpio_event_matrix_info *mi = kp->keypad_info; + int key_index = out * mi->ninputs + in; + unsigned short keyentry = mi->keymap[key_index]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + + if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) { + if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS) + pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) " + "cleared\n", keycode, out, in, + mi->output_gpios[out], mi->input_gpios[in]); + __clear_bit(key_index, kp->keys_pressed); + } else { + if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS) + pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) " + "not cleared\n", keycode, out, in, + mi->output_gpios[out], mi->input_gpios[in]); + } +} + +static int restore_keys_for_input(struct gpio_kp *kp, int out, int in) +{ + int rv = 0; + int key_index; + + key_index = out * kp->keypad_info->ninputs + in; + while (out < kp->keypad_info->noutputs) { + if (test_bit(key_index, kp->keys_pressed)) { + rv = 1; + clear_phantom_key(kp, out, in); + } + key_index += kp->keypad_info->ninputs; + out++; + } + return rv; +} + +static void remove_phantom_keys(struct gpio_kp *kp) +{ + int out, in, inp; + int key_index; + + if (kp->some_keys_pressed < 3) + return; + + for (out = 0; out < kp->keypad_info->noutputs; out++) { + inp = -1; + key_index = out * kp->keypad_info->ninputs; + for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) { + if (test_bit(key_index, kp->keys_pressed)) { + if (inp == -1) { + inp = in; + continue; + } + if (inp >= 0) { + if (!restore_keys_for_input(kp, out + 1, + inp)) + break; + clear_phantom_key(kp, out, inp); + inp = -2; + } + restore_keys_for_input(kp, out, in); + } + } + } +} + +static void report_key(struct gpio_kp *kp, int key_index, int out, int in) +{ + struct gpio_event_matrix_info *mi = kp->keypad_info; + int pressed = test_bit(key_index, kp->keys_pressed); + unsigned short keyentry = mi->keymap[key_index]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + + if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) { + if (keycode == KEY_RESERVED) { + if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS) + pr_info("gpiomatrix: unmapped key, %d-%d " + "(%d-%d) changed to %d\n", + out, in, mi->output_gpios[out], + mi->input_gpios[in], pressed); + } else { + if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS) + pr_info("gpiomatrix: key %x, %d-%d (%d-%d) " + "changed to %d\n", keycode, + out, in, mi->output_gpios[out], + mi->input_gpios[in], pressed); + input_report_key(kp->input_devs->dev[dev], keycode, pressed); + } + } +} + +static void report_sync(struct gpio_kp *kp) +{ + int i; + + for (i = 0; i < kp->input_devs->count; i++) + input_sync(kp->input_devs->dev[i]); +} + +static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer) +{ + int out, in; + int key_index; + int gpio; + struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer); + struct gpio_event_matrix_info *mi = kp->keypad_info; + unsigned gpio_keypad_flags = mi->flags; + unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH); + + out = kp->current_output; + if (out == mi->noutputs) { + out = 0; + kp->last_key_state_changed = kp->key_state_changed; + kp->key_state_changed = 0; + kp->some_keys_pressed = 0; + } else { + key_index = out * mi->ninputs; + for (in = 0; in < mi->ninputs; in++, key_index++) { + gpio = mi->input_gpios[in]; + if (gpio_get_value(gpio) ^ !polarity) { + if (kp->some_keys_pressed < 3) + kp->some_keys_pressed++; + kp->key_state_changed |= !__test_and_set_bit( + key_index, kp->keys_pressed); + } else + kp->key_state_changed |= __test_and_clear_bit( + key_index, kp->keys_pressed); + } + gpio = mi->output_gpios[out]; + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(gpio, !polarity); + else + gpio_direction_input(gpio); + out++; + } + kp->current_output = out; + if (out < mi->noutputs) { + gpio = mi->output_gpios[out]; + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(gpio, polarity); + else + gpio_direction_output(gpio, polarity); + hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) { + if (kp->key_state_changed) { + hrtimer_start(&kp->timer, mi->debounce_delay, + HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + kp->key_state_changed = kp->last_key_state_changed; + } + if (kp->key_state_changed) { + if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS) + remove_phantom_keys(kp); + key_index = 0; + for (out = 0; out < mi->noutputs; out++) + for (in = 0; in < mi->ninputs; in++, key_index++) + report_key(kp, key_index, out, in); + report_sync(kp); + } + if (!kp->use_irq || kp->some_keys_pressed) { + hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + + /* No keys are pressed, reenable interrupt */ + for (out = 0; out < mi->noutputs; out++) { + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(mi->output_gpios[out], polarity); + else + gpio_direction_output(mi->output_gpios[out], polarity); + } + for (in = 0; in < mi->ninputs; in++) + enable_irq(gpio_to_irq(mi->input_gpios[in])); + __pm_relax(&kp->wake_src); + return HRTIMER_NORESTART; +} + +static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id) +{ + int i; + struct gpio_kp *kp = dev_id; + struct gpio_event_matrix_info *mi = kp->keypad_info; + unsigned gpio_keypad_flags = mi->flags; + + if (!kp->use_irq) { + /* ignore interrupt while registering the handler */ + kp->disabled_irq = 1; + disable_irq_nosync(irq_in); + return IRQ_HANDLED; + } + + for (i = 0; i < mi->ninputs; i++) + disable_irq_nosync(gpio_to_irq(mi->input_gpios[i])); + for (i = 0; i < mi->noutputs; i++) { + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(mi->output_gpios[i], + !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH)); + else + gpio_direction_input(mi->output_gpios[i]); + } + __pm_stay_awake(&kp->wake_src); + hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + return IRQ_HANDLED; +} + +static int gpio_keypad_request_irqs(struct gpio_kp *kp) +{ + int i; + int err; + unsigned int irq; + unsigned long request_flags; + struct gpio_event_matrix_info *mi = kp->keypad_info; + + switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) { + default: + request_flags = IRQF_TRIGGER_FALLING; + break; + case GPIOKPF_ACTIVE_HIGH: + request_flags = IRQF_TRIGGER_RISING; + break; + case GPIOKPF_LEVEL_TRIGGERED_IRQ: + request_flags = IRQF_TRIGGER_LOW; + break; + case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH: + request_flags = IRQF_TRIGGER_HIGH; + break; + } + + for (i = 0; i < mi->ninputs; i++) { + err = irq = gpio_to_irq(mi->input_gpios[i]); + if (err < 0) + goto err_gpio_get_irq_num_failed; + err = request_irq(irq, gpio_keypad_irq_handler, request_flags, + "gpio_kp", kp); + if (err) { + pr_err("gpiomatrix: request_irq failed for input %d, " + "irq %d\n", mi->input_gpios[i], irq); + goto err_request_irq_failed; + } + err = enable_irq_wake(irq); + if (err) { + pr_err("gpiomatrix: set_irq_wake failed for input %d, " + "irq %d\n", mi->input_gpios[i], irq); + } + disable_irq(irq); + if (kp->disabled_irq) { + kp->disabled_irq = 0; + enable_irq(irq); + } + } + return 0; + + for (i = mi->noutputs - 1; i >= 0; i--) { + free_irq(gpio_to_irq(mi->input_gpios[i]), kp); +err_request_irq_failed: +err_gpio_get_irq_num_failed: + ; + } + return err; +} + +int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int i; + int err; + int key_count; + struct gpio_kp *kp; + struct gpio_event_matrix_info *mi; + + mi = container_of(info, struct gpio_event_matrix_info, info); + if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) { + /* TODO: disable scanning */ + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + if (mi->keymap == NULL || + mi->input_gpios == NULL || + mi->output_gpios == NULL) { + err = -ENODEV; + pr_err("gpiomatrix: Incomplete pdata\n"); + goto err_invalid_platform_data; + } + key_count = mi->ninputs * mi->noutputs; + + *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) * + BITS_TO_LONGS(key_count), GFP_KERNEL); + if (kp == NULL) { + err = -ENOMEM; + pr_err("gpiomatrix: Failed to allocate private data\n"); + goto err_kp_alloc_failed; + } + kp->input_devs = input_devs; + kp->keypad_info = mi; + for (i = 0; i < key_count; i++) { + unsigned short keyentry = mi->keymap[i]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + if (dev >= input_devs->count) { + pr_err("gpiomatrix: bad device index %d >= " + "%d for key code %d\n", + dev, input_devs->count, keycode); + err = -EINVAL; + goto err_bad_keymap; + } + if (keycode && keycode <= KEY_MAX) + input_set_capability(input_devs->dev[dev], + EV_KEY, keycode); + } + + for (i = 0; i < mi->noutputs; i++) { + err = gpio_request(mi->output_gpios[i], "gpio_kp_out"); + if (err) { + pr_err("gpiomatrix: gpio_request failed for " + "output %d\n", mi->output_gpios[i]); + goto err_request_output_gpio_failed; + } + if (gpio_cansleep(mi->output_gpios[i])) { + pr_err("gpiomatrix: unsupported output gpio %d," + " can sleep\n", mi->output_gpios[i]); + err = -EINVAL; + goto err_output_gpio_configure_failed; + } + if (mi->flags & GPIOKPF_DRIVE_INACTIVE) + err = gpio_direction_output(mi->output_gpios[i], + !(mi->flags & GPIOKPF_ACTIVE_HIGH)); + else + err = gpio_direction_input(mi->output_gpios[i]); + if (err) { + pr_err("gpiomatrix: gpio_configure failed for " + "output %d\n", mi->output_gpios[i]); + goto err_output_gpio_configure_failed; + } + } + for (i = 0; i < mi->ninputs; i++) { + err = gpio_request(mi->input_gpios[i], "gpio_kp_in"); + if (err) { + pr_err("gpiomatrix: gpio_request failed for " + "input %d\n", mi->input_gpios[i]); + goto err_request_input_gpio_failed; + } + err = gpio_direction_input(mi->input_gpios[i]); + if (err) { + pr_err("gpiomatrix: gpio_direction_input failed" + " for input %d\n", mi->input_gpios[i]); + goto err_gpio_direction_input_failed; + } + } + kp->current_output = mi->noutputs; + kp->key_state_changed = 1; + + hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + kp->timer.function = gpio_keypad_timer_func; + wakeup_source_init(&kp->wake_src, "gpio_kp"); + err = gpio_keypad_request_irqs(kp); + kp->use_irq = err == 0; + + pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for " + "%s%s in %s mode\n", input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : "", + kp->use_irq ? "interrupt" : "polling"); + + if (kp->use_irq) + __pm_stay_awake(&kp->wake_src); + hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + + return 0; + } + + err = 0; + kp = *data; + + if (kp->use_irq) + for (i = mi->noutputs - 1; i >= 0; i--) + free_irq(gpio_to_irq(mi->input_gpios[i]), kp); + + hrtimer_cancel(&kp->timer); + wakeup_source_trash(&kp->wake_src); + for (i = mi->noutputs - 1; i >= 0; i--) { +err_gpio_direction_input_failed: + gpio_free(mi->input_gpios[i]); +err_request_input_gpio_failed: + ; + } + for (i = mi->noutputs - 1; i >= 0; i--) { +err_output_gpio_configure_failed: + gpio_free(mi->output_gpios[i]); +err_request_output_gpio_failed: + ; + } +err_bad_keymap: + kfree(kp); +err_kp_alloc_failed: +err_invalid_platform_data: + return err; +} diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c new file mode 100644 index 000000000000..2aac2fad0a17 --- /dev/null +++ b/drivers/input/misc/gpio_output.c @@ -0,0 +1,97 @@ +/* drivers/input/misc/gpio_output.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +int gpio_event_output_event( + struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, + void **data, unsigned int dev, unsigned int type, + unsigned int code, int value) +{ + int i; + struct gpio_event_output_info *oi; + oi = container_of(info, struct gpio_event_output_info, info); + if (type != oi->type) + return 0; + if (!(oi->flags & GPIOEDF_ACTIVE_HIGH)) + value = !value; + for (i = 0; i < oi->keymap_size; i++) + if (dev == oi->keymap[i].dev && code == oi->keymap[i].code) + gpio_set_value(oi->keymap[i].gpio, value); + return 0; +} + +int gpio_event_output_func( + struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, + void **data, int func) +{ + int ret; + int i; + struct gpio_event_output_info *oi; + oi = container_of(info, struct gpio_event_output_info, info); + + if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) + return 0; + + if (func == GPIO_EVENT_FUNC_INIT) { + int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH); + + for (i = 0; i < oi->keymap_size; i++) { + int dev = oi->keymap[i].dev; + if (dev >= input_devs->count) { + pr_err("gpio_event_output_func: bad device " + "index %d >= %d for key code %d\n", + dev, input_devs->count, + oi->keymap[i].code); + ret = -EINVAL; + goto err_bad_keymap; + } + input_set_capability(input_devs->dev[dev], oi->type, + oi->keymap[i].code); + } + + for (i = 0; i < oi->keymap_size; i++) { + ret = gpio_request(oi->keymap[i].gpio, + "gpio_event_output"); + if (ret) { + pr_err("gpio_event_output_func: gpio_request " + "failed for %d\n", oi->keymap[i].gpio); + goto err_gpio_request_failed; + } + ret = gpio_direction_output(oi->keymap[i].gpio, + output_level); + if (ret) { + pr_err("gpio_event_output_func: " + "gpio_direction_output failed for %d\n", + oi->keymap[i].gpio); + goto err_gpio_direction_output_failed; + } + } + return 0; + } + + ret = 0; + for (i = oi->keymap_size - 1; i >= 0; i--) { +err_gpio_direction_output_failed: + gpio_free(oi->keymap[i].gpio); +err_gpio_request_failed: + ; + } +err_bad_keymap: + return ret; +} + diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c new file mode 100644 index 000000000000..b09ecf743635 --- /dev/null +++ b/drivers/input/misc/keychord.c @@ -0,0 +1,467 @@ +/* + * drivers/input/misc/keychord.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KEYCHORD_NAME "keychord" +#define BUFFER_SIZE 16 + +MODULE_AUTHOR("Mike Lockwood "); +MODULE_DESCRIPTION("Key chord input driver"); +MODULE_SUPPORTED_DEVICE("keychord"); +MODULE_LICENSE("GPL"); + +#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \ + ((char *)kc + sizeof(struct input_keychord) + \ + kc->count * sizeof(kc->keycodes[0]))) + +struct keychord_device { + struct input_handler input_handler; + int registered; + + /* list of keychords to monitor */ + struct input_keychord *keychords; + int keychord_count; + + /* bitmask of keys contained in our keychords */ + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + /* current state of the keys */ + unsigned long keystate[BITS_TO_LONGS(KEY_CNT)]; + /* number of keys that are currently pressed */ + int key_down; + + /* second input_device_id is needed for null termination */ + struct input_device_id device_ids[2]; + + spinlock_t lock; + wait_queue_head_t waitq; + unsigned char head; + unsigned char tail; + __u16 buff[BUFFER_SIZE]; + /* Bit to serialize writes to this device */ +#define KEYCHORD_BUSY 0x01 + unsigned long flags; + wait_queue_head_t write_waitq; +}; + +static int check_keychord(struct keychord_device *kdev, + struct input_keychord *keychord) +{ + int i; + + if (keychord->count != kdev->key_down) + return 0; + + for (i = 0; i < keychord->count; i++) { + if (!test_bit(keychord->keycodes[i], kdev->keystate)) + return 0; + } + + /* we have a match */ + return 1; +} + +static void keychord_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + struct keychord_device *kdev = handle->private; + struct input_keychord *keychord; + unsigned long flags; + int i, got_chord = 0; + + if (type != EV_KEY || code >= KEY_MAX) + return; + + spin_lock_irqsave(&kdev->lock, flags); + /* do nothing if key state did not change */ + if (!test_bit(code, kdev->keystate) == !value) + goto done; + __change_bit(code, kdev->keystate); + if (value) + kdev->key_down++; + else + kdev->key_down--; + + /* don't notify on key up */ + if (!value) + goto done; + /* ignore this event if it is not one of the keys we are monitoring */ + if (!test_bit(code, kdev->keybit)) + goto done; + + keychord = kdev->keychords; + if (!keychord) + goto done; + + /* check to see if the keyboard state matches any keychords */ + for (i = 0; i < kdev->keychord_count; i++) { + if (check_keychord(kdev, keychord)) { + kdev->buff[kdev->head] = keychord->id; + kdev->head = (kdev->head + 1) % BUFFER_SIZE; + got_chord = 1; + break; + } + /* skip to next keychord */ + keychord = NEXT_KEYCHORD(keychord); + } + +done: + spin_unlock_irqrestore(&kdev->lock, flags); + + if (got_chord) { + pr_info("keychord: got keychord id %d. Any tasks: %d\n", + keychord->id, + !list_empty_careful(&kdev->waitq.head)); + wake_up_interruptible(&kdev->waitq); + } +} + +static int keychord_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + int i, ret; + struct input_handle *handle; + struct keychord_device *kdev = + container_of(handler, struct keychord_device, input_handler); + + /* + * ignore this input device if it does not contain any keycodes + * that we are monitoring + */ + for (i = 0; i < KEY_MAX; i++) { + if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit)) + break; + } + if (i == KEY_MAX) + return -ENODEV; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = KEYCHORD_NAME; + handle->private = kdev; + + ret = input_register_handle(handle); + if (ret) + goto err_input_register_handle; + + ret = input_open_device(handle); + if (ret) + goto err_input_open_device; + + pr_info("keychord: using input dev %s for fevent\n", dev->name); + return 0; + +err_input_open_device: + input_unregister_handle(handle); +err_input_register_handle: + kfree(handle); + return ret; +} + +static void keychord_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +/* + * keychord_read is used to read keychord events from the driver + */ +static ssize_t keychord_read(struct file *file, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct keychord_device *kdev = file->private_data; + __u16 id; + int retval; + unsigned long flags; + + if (count < sizeof(id)) + return -EINVAL; + count = sizeof(id); + + if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + retval = wait_event_interruptible(kdev->waitq, + kdev->head != kdev->tail); + if (retval) + return retval; + + spin_lock_irqsave(&kdev->lock, flags); + /* pop a keychord ID off the queue */ + id = kdev->buff[kdev->tail]; + kdev->tail = (kdev->tail + 1) % BUFFER_SIZE; + spin_unlock_irqrestore(&kdev->lock, flags); + + if (copy_to_user(buffer, &id, count)) + return -EFAULT; + + return count; +} + +/* + * serializes writes on a device. can use mutex_lock_interruptible() + * for this particular use case as well - a matter of preference. + */ +static int +keychord_write_lock(struct keychord_device *kdev) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&kdev->lock, flags); + while (kdev->flags & KEYCHORD_BUSY) { + spin_unlock_irqrestore(&kdev->lock, flags); + ret = wait_event_interruptible(kdev->write_waitq, + ((kdev->flags & KEYCHORD_BUSY) == 0)); + if (ret) + return ret; + spin_lock_irqsave(&kdev->lock, flags); + } + kdev->flags |= KEYCHORD_BUSY; + spin_unlock_irqrestore(&kdev->lock, flags); + return 0; +} + +static void +keychord_write_unlock(struct keychord_device *kdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kdev->lock, flags); + kdev->flags &= ~KEYCHORD_BUSY; + spin_unlock_irqrestore(&kdev->lock, flags); + wake_up_interruptible(&kdev->write_waitq); +} + +/* + * keychord_write is used to configure the driver + */ +static ssize_t keychord_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct keychord_device *kdev = file->private_data; + struct input_keychord *keychords = 0; + struct input_keychord *keychord; + int ret, i, key; + unsigned long flags; + size_t resid = count; + size_t key_bytes; + + if (count < sizeof(struct input_keychord)) + return -EINVAL; + keychords = kzalloc(count, GFP_KERNEL); + if (!keychords) + return -ENOMEM; + + /* read list of keychords from userspace */ + if (copy_from_user(keychords, buffer, count)) { + kfree(keychords); + return -EFAULT; + } + + /* + * Serialize writes to this device to prevent various races. + * 1) writers racing here could do duplicate input_unregister_handler() + * calls, resulting in attempting to unlink a node from a list that + * does not exist. + * 2) writers racing here could do duplicate input_register_handler() calls + * below, resulting in a duplicate insertion of a node into the list. + * 3) a double kfree of keychords can occur (in the event that + * input_register_handler() fails below. + */ + ret = keychord_write_lock(kdev); + if (ret) { + kfree(keychords); + return ret; + } + + /* unregister handler before changing configuration */ + if (kdev->registered) { + input_unregister_handler(&kdev->input_handler); + kdev->registered = 0; + } + + spin_lock_irqsave(&kdev->lock, flags); + /* clear any existing configuration */ + kfree(kdev->keychords); + kdev->keychords = 0; + kdev->keychord_count = 0; + kdev->key_down = 0; + memset(kdev->keybit, 0, sizeof(kdev->keybit)); + memset(kdev->keystate, 0, sizeof(kdev->keystate)); + kdev->head = kdev->tail = 0; + + keychord = keychords; + + while (resid > 0) { + /* Is the entire keychord entry header present ? */ + if (resid < sizeof(struct input_keychord)) { + pr_err("keychord: Insufficient bytes present for header %zu\n", + resid); + goto err_unlock_return; + } + resid -= sizeof(struct input_keychord); + if (keychord->count <= 0) { + pr_err("keychord: invalid keycode count %d\n", + keychord->count); + goto err_unlock_return; + } + key_bytes = keychord->count * sizeof(keychord->keycodes[0]); + /* Do we have all the expected keycodes ? */ + if (resid < key_bytes) { + pr_err("keychord: Insufficient bytes present for keycount %zu\n", + resid); + goto err_unlock_return; + } + resid -= key_bytes; + + if (keychord->version != KEYCHORD_VERSION) { + pr_err("keychord: unsupported version %d\n", + keychord->version); + goto err_unlock_return; + } + + /* keep track of the keys we are monitoring in keybit */ + for (i = 0; i < keychord->count; i++) { + key = keychord->keycodes[i]; + if (key < 0 || key >= KEY_CNT) { + pr_err("keychord: keycode %d out of range\n", + key); + goto err_unlock_return; + } + __set_bit(key, kdev->keybit); + } + + kdev->keychord_count++; + keychord = NEXT_KEYCHORD(keychord); + } + + kdev->keychords = keychords; + spin_unlock_irqrestore(&kdev->lock, flags); + + ret = input_register_handler(&kdev->input_handler); + if (ret) { + kfree(keychords); + kdev->keychords = 0; + keychord_write_unlock(kdev); + return ret; + } + kdev->registered = 1; + + keychord_write_unlock(kdev); + + return count; + +err_unlock_return: + spin_unlock_irqrestore(&kdev->lock, flags); + kfree(keychords); + keychord_write_unlock(kdev); + return -EINVAL; +} + +static unsigned int keychord_poll(struct file *file, poll_table *wait) +{ + struct keychord_device *kdev = file->private_data; + + poll_wait(file, &kdev->waitq, wait); + + if (kdev->head != kdev->tail) + return POLLIN | POLLRDNORM; + + return 0; +} + +static int keychord_open(struct inode *inode, struct file *file) +{ + struct keychord_device *kdev; + + kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL); + if (!kdev) + return -ENOMEM; + + spin_lock_init(&kdev->lock); + init_waitqueue_head(&kdev->waitq); + init_waitqueue_head(&kdev->write_waitq); + + kdev->input_handler.event = keychord_event; + kdev->input_handler.connect = keychord_connect; + kdev->input_handler.disconnect = keychord_disconnect; + kdev->input_handler.name = KEYCHORD_NAME; + kdev->input_handler.id_table = kdev->device_ids; + + kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT; + __set_bit(EV_KEY, kdev->device_ids[0].evbit); + + file->private_data = kdev; + + return 0; +} + +static int keychord_release(struct inode *inode, struct file *file) +{ + struct keychord_device *kdev = file->private_data; + + if (kdev->registered) + input_unregister_handler(&kdev->input_handler); + kfree(kdev->keychords); + kfree(kdev); + + return 0; +} + +static const struct file_operations keychord_fops = { + .owner = THIS_MODULE, + .open = keychord_open, + .release = keychord_release, + .read = keychord_read, + .write = keychord_write, + .poll = keychord_poll, +}; + +static struct miscdevice keychord_misc = { + .fops = &keychord_fops, + .name = KEYCHORD_NAME, + .minor = MISC_DYNAMIC_MINOR, +}; + +static int __init keychord_init(void) +{ + return misc_register(&keychord_misc); +} + +static void __exit keychord_exit(void) +{ + misc_deregister(&keychord_misc); +} + +module_init(keychord_init); +module_exit(keychord_exit); diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index 6c51d404874b..c37aea9ac272 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops, twl4030_vibra_suspend, twl4030_vibra_resume); static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, - struct device_node *node) + struct device_node *parent) { + struct device_node *node; + if (pdata && pdata->coexist) return true; - node = of_find_node_by_name(node, "codec"); + node = of_get_child_by_name(parent, "codec"); if (node) { of_node_put(node); return true; diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c index 5690eb7ff954..15e0d352c4cc 100644 --- a/drivers/input/misc/twl6040-vibra.c +++ b/drivers/input/misc/twl6040-vibra.c @@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev) int vddvibr_uV = 0; int error; - of_node_get(twl6040_core_dev->of_node); - twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node, + twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node, "vibra"); if (!twl6040_core_node) { dev_err(&pdev->dev, "parent of node is missing?\n"); diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 6bf56bb5f8d9..d91f3b1c5375 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev, 0, width, 0, 0); input_set_abs_params(mtouch, ABS_MT_POSITION_Y, 0, height, 0, 0); - input_set_abs_params(mtouch, ABS_MT_PRESSURE, - 0, 255, 0, 0); ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT); if (ret) { diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 850b00e3ad8e..9a234da8cac2 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f, case SS4_PACKET_ID_MULTI: if (priv->flags & ALPS_BUTTONPAD) { if (IS_SS4PLUS_DEV(priv->dev_id)) { - f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); - f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0); + f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL; } else { f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); + no_data_x = SS4_MFPACKET_NO_AX_BL; } + no_data_y = SS4_MFPACKET_NO_AY_BL; f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); - no_data_x = SS4_MFPACKET_NO_AX_BL; - no_data_y = SS4_MFPACKET_NO_AY_BL; } else { if (IS_SS4PLUS_DEV(priv->dev_id)) { - f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); - f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); + f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0); + f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1); + no_data_x = SS4_PLUS_MFPACKET_NO_AX; } else { - f->mt[0].x = SS4_STD_MF_X_V2(p, 0); - f->mt[1].x = SS4_STD_MF_X_V2(p, 1); + f->mt[2].x = SS4_STD_MF_X_V2(p, 0); + f->mt[3].x = SS4_STD_MF_X_V2(p, 1); + no_data_x = SS4_MFPACKET_NO_AX; } + no_data_y = SS4_MFPACKET_NO_AY; + f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); - no_data_x = SS4_MFPACKET_NO_AX; - no_data_y = SS4_MFPACKET_NO_AY; } f->first_mp = 0; @@ -2541,13 +2544,31 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], } static int alps_update_dual_info_ss4_v2(unsigned char otp[][4], - struct alps_data *priv) + struct alps_data *priv, + struct psmouse *psmouse) { bool is_dual = false; + int reg_val = 0; + struct ps2dev *ps2dev = &psmouse->ps2dev; - if (IS_SS4PLUS_DEV(priv->dev_id)) + if (IS_SS4PLUS_DEV(priv->dev_id)) { is_dual = (otp[0][0] >> 4) & 0x01; + if (!is_dual) { + /* For support TrackStick of Thinkpad L/E series */ + if (alps_exit_command_mode(psmouse) == 0 && + alps_enter_command_mode(psmouse) == 0) { + reg_val = alps_command_mode_read_reg(psmouse, + 0xD7); + } + alps_exit_command_mode(psmouse); + ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE); + + if (reg_val == 0x0C || reg_val == 0x1D) + is_dual = true; + } + } + if (is_dual) priv->flags |= ALPS_DUALPOINT | ALPS_DUALPOINT_WITH_PRESSURE; @@ -2570,7 +2591,7 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, alps_update_btn_info_ss4_v2(otp, priv); - alps_update_dual_info_ss4_v2(otp, priv); + alps_update_dual_info_ss4_v2(otp, priv, psmouse); return 0; } diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index c80a7c76cb76..79b6d69d1486 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -141,10 +141,12 @@ enum SS4_PACKET_ID { #define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F) -#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */ -#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */ -#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */ -#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */ +#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */ +#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */ +#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */ +#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */ +#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */ +#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */ /* * enum V7_PACKET_ID - defines the packet type for V7 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index b84cd978fce2..a4aaa748e987 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd) case 5: etd->hw_version = 3; break; - case 6 ... 14: + case 6 ... 15: etd->hw_version = 4; break; default: diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 0871010f18d5..bbd29220dbe9 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -19,6 +19,13 @@ #include "psmouse.h" #include "trackpoint.h" +static const char * const trackpoint_variants[] = { + [TP_VARIANT_IBM] = "IBM", + [TP_VARIANT_ALPS] = "ALPS", + [TP_VARIANT_ELAN] = "Elan", + [TP_VARIANT_NXP] = "NXP", +}; + /* * Power-on Reset: Resets all trackpoint parameters, including RAM values, * to defaults. @@ -26,7 +33,7 @@ */ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) { - unsigned char results[2]; + u8 results[2]; int tries = 0; /* Issue POR command, and repeat up to once if 0xFC00 received */ @@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) /* Check for success response -- 0xAA00 */ if (results[0] != 0xAA || results[1] != 0x00) - return -1; + return -ENODEV; return 0; } @@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) /* * Device IO: read, write and toggle bit */ -static int trackpoint_read(struct ps2dev *ps2dev, - unsigned char loc, unsigned char *results) +static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results) { if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) { @@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev, return 0; } -static int trackpoint_write(struct ps2dev *ps2dev, - unsigned char loc, unsigned char val) +static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val) { if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) || @@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev, return 0; } -static int trackpoint_toggle_bit(struct ps2dev *ps2dev, - unsigned char loc, unsigned char mask) +static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask) { /* Bad things will happen if the loc param isn't in this range */ if (loc < 0x20 || loc >= 0x2F) @@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev, return 0; } -static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc, - unsigned char mask, unsigned char value) +static int trackpoint_update_bit(struct ps2dev *ps2dev, + u8 loc, u8 mask, u8 value) { int retval = 0; - unsigned char data; + u8 data; trackpoint_read(ps2dev, loc, &data); if (((data & mask) == mask) != !!value) @@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc, */ struct trackpoint_attr_data { size_t field_offset; - unsigned char command; - unsigned char mask; - unsigned char inverted; - unsigned char power_on_default; + u8 command; + u8 mask; + bool inverted; + u8 power_on_default; }; -static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf) +static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, + void *data, char *buf) { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset); + u8 value = *(u8 *)((void *)tp + attr->field_offset); if (attr->inverted) value = !value; @@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data, { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); - unsigned char value; + u8 *field = (void *)tp + attr->field_offset; + u8 value; int err; err = kstrtou8(buf, 10, &value); @@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data, { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); - unsigned int value; + bool *field = (void *)tp + attr->field_offset; + bool value; int err; - err = kstrtouint(buf, 10, &value); + err = kstrtobool(buf, &value); if (err) return err; - if (value > 1) - return -EINVAL; - if (attr->inverted) value = !value; @@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \ &trackpoint_attr_##_name, \ trackpoint_show_int_attr, trackpoint_set_bit_attr) -#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \ -do { \ - struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \ - \ - trackpoint_update_bit(&_psmouse->ps2dev, \ - _attr->command, _attr->mask, _tp->_name); \ -} while (0) - -#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \ -do { \ - if (!_power_on || \ - _tp->_name != trackpoint_attr_##_name.power_on_default) { \ - if (!trackpoint_attr_##_name.mask) \ - trackpoint_write(&_psmouse->ps2dev, \ - trackpoint_attr_##_name.command, \ - _tp->_name); \ - else \ - TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \ - } \ -} while (0) - -#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \ - (_tp->_name = trackpoint_attr_##_name.power_on_default) - TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS); TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED); TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA); @@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME); TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV); TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME); -TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0, +TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false, TP_DEF_PTSON); -TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0, +TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false, TP_DEF_SKIPBACK); -TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1, +TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true, TP_DEF_EXT_DEV); +static bool trackpoint_is_attr_available(struct psmouse *psmouse, + struct attribute *attr) +{ + struct trackpoint_data *tp = psmouse->private; + + return tp->variant_id == TP_VARIANT_IBM || + attr == &psmouse_attr_sensitivity.dattr.attr || + attr == &psmouse_attr_press_to_select.dattr.attr; +} + +static umode_t trackpoint_is_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct serio *serio = to_serio_port(dev); + struct psmouse *psmouse = serio_get_drvdata(serio); + + return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0; +} + static struct attribute *trackpoint_attrs[] = { &psmouse_attr_sensitivity.dattr.attr, &psmouse_attr_speed.dattr.attr, @@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = { }; static struct attribute_group trackpoint_attr_group = { - .attrs = trackpoint_attrs, + .is_visible = trackpoint_is_attr_visible, + .attrs = trackpoint_attrs, }; -static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id) -{ - unsigned char param[2] = { 0 }; +#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \ +do { \ + struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \ + \ + if ((!_power_on || _tp->_name != _attr->power_on_default) && \ + trackpoint_is_attr_available(_psmouse, \ + &psmouse_attr_##_name.dattr.attr)) { \ + if (!_attr->mask) \ + trackpoint_write(&_psmouse->ps2dev, \ + _attr->command, _tp->_name); \ + else \ + trackpoint_update_bit(&_psmouse->ps2dev, \ + _attr->command, _attr->mask, \ + _tp->_name); \ + } \ +} while (0) - if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) - return -1; +#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \ +do { \ + _tp->_name = trackpoint_attr_##_name.power_on_default; \ +} while (0) - /* add new TP ID. */ - if (!(param[0] & TP_MAGIC_IDENT)) - return -1; +static int trackpoint_start_protocol(struct psmouse *psmouse, + u8 *variant_id, u8 *firmware_id) +{ + u8 param[2] = { 0 }; + int error; - if (firmware_id) - *firmware_id = param[1]; + error = ps2_command(&psmouse->ps2dev, + param, MAKE_PS2_CMD(0, 2, TP_READ_ID)); + if (error) + return error; + + switch (param[0]) { + case TP_VARIANT_IBM: + case TP_VARIANT_ALPS: + case TP_VARIANT_ELAN: + case TP_VARIANT_NXP: + if (variant_id) + *variant_id = param[0]; + if (firmware_id) + *firmware_id = param[1]; + return 0; + } - return 0; + return -ENODEV; } /* @@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state) { struct trackpoint_data *tp = psmouse->private; - if (!in_power_on_state) { + if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) { /* * Disable features that may make device unusable * with this driver. @@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp) static void trackpoint_disconnect(struct psmouse *psmouse) { - sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group); + device_remove_group(&psmouse->ps2dev.serio->dev, + &trackpoint_attr_group); kfree(psmouse->private); psmouse->private = NULL; @@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse) static int trackpoint_reconnect(struct psmouse *psmouse) { - int reset_fail; + struct trackpoint_data *tp = psmouse->private; + int error; + bool was_reset; - if (trackpoint_start_protocol(psmouse, NULL)) - return -1; + error = trackpoint_start_protocol(psmouse, NULL, NULL); + if (error) + return error; - reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev); - if (trackpoint_sync(psmouse, !reset_fail)) - return -1; + was_reset = tp->variant_id == TP_VARIANT_IBM && + trackpoint_power_on_reset(&psmouse->ps2dev) == 0; + + error = trackpoint_sync(psmouse, was_reset); + if (error) + return error; return 0; } @@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse) int trackpoint_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; - unsigned char firmware_id; - unsigned char button_info; + struct trackpoint_data *tp; + u8 variant_id; + u8 firmware_id; + u8 button_info; int error; - if (trackpoint_start_protocol(psmouse, &firmware_id)) - return -1; + error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id); + if (error) + return error; if (!set_properties) return 0; - if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { - psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); - button_info = 0x33; - } - - psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); - if (!psmouse->private) + tp = kzalloc(sizeof(*tp), GFP_KERNEL); + if (!tp) return -ENOMEM; - psmouse->vendor = "IBM"; + trackpoint_defaults(tp); + tp->variant_id = variant_id; + tp->firmware_id = firmware_id; + + psmouse->private = tp; + + psmouse->vendor = trackpoint_variants[variant_id]; psmouse->name = "TrackPoint"; psmouse->reconnect = trackpoint_reconnect; psmouse->disconnect = trackpoint_disconnect; + if (variant_id != TP_VARIANT_IBM) { + /* Newer variants do not support extended button query. */ + button_info = 0x33; + } else { + error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info); + if (error) { + psmouse_warn(psmouse, + "failed to get extended button data, assuming 3 buttons\n"); + button_info = 0x33; + } else if (!button_info) { + psmouse_warn(psmouse, + "got 0 in extended button data, assuming 3 buttons\n"); + button_info = 0x33; + } + } + if ((button_info & 0x0f) >= 3) - __set_bit(BTN_MIDDLE, psmouse->dev->keybit); + input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE); __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit); __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit); - trackpoint_defaults(psmouse->private); - - error = trackpoint_power_on_reset(ps2dev); - - /* Write defaults to TP only if reset fails. */ - if (error) + if (variant_id != TP_VARIANT_IBM || + trackpoint_power_on_reset(ps2dev) != 0) { + /* + * Write defaults to TP if we did not reset the trackpoint. + */ trackpoint_sync(psmouse, false); + } - error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group); + error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group); if (error) { psmouse_err(psmouse, "failed to create sysfs attributes, error: %d\n", @@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) } psmouse_info(psmouse, - "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n", - firmware_id, + "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n", + psmouse->vendor, firmware_id, (button_info & 0xf0) >> 4, button_info & 0x0f); return 0; diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 88055755f82e..10a039148234 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -21,10 +21,16 @@ #define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_READ_ID 0xE1 /* Sent for device identification */ -#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ - /* by the firmware ID */ - /* Firmware ID includes 0x1, 0x2, 0x3 */ +/* + * Valid first byte responses to the "Read Secondary ID" (0xE1) command. + * 0x01 was the original IBM trackpoint, others implement very limited + * subset of trackpoint features. + */ +#define TP_VARIANT_IBM 0x01 +#define TP_VARIANT_ALPS 0x02 +#define TP_VARIANT_ELAN 0x03 +#define TP_VARIANT_NXP 0x04 /* * Commands @@ -136,18 +142,20 @@ #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) -struct trackpoint_data -{ - unsigned char sensitivity, speed, inertia, reach; - unsigned char draghys, mindrag; - unsigned char thresh, upthresh; - unsigned char ztime, jenks; - unsigned char drift_time; +struct trackpoint_data { + u8 variant_id; + u8 firmware_id; + + u8 sensitivity, speed, inertia, reach; + u8 draghys, mindrag; + u8 thresh, upthresh; + u8 ztime, jenks; + u8 drift_time; /* toggles */ - unsigned char press_to_select; - unsigned char skipback; - unsigned char ext_dev; + bool press_to_select; + bool skipback; + bool ext_dev; }; #ifdef CONFIG_MOUSE_PS2_TRACKPOINT diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c index 0f586780ceb4..1ae5c1ef3f5b 100644 --- a/drivers/input/mouse/vmmouse.c +++ b/drivers/input/mouse/vmmouse.c @@ -316,11 +316,9 @@ static int vmmouse_enable(struct psmouse *psmouse) /* * Array of supported hypervisors. */ -static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = { - &x86_hyper_vmware, -#ifdef CONFIG_KVM_GUEST - &x86_hyper_kvm, -#endif +static enum x86_hypervisor_type vmmouse_supported_hypervisors[] = { + X86_HYPER_VMWARE, + X86_HYPER_KVM, }; /** @@ -331,7 +329,7 @@ static bool vmmouse_check_hypervisor(void) int i; for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++) - if (vmmouse_supported_hypervisors[i] == x86_hyper) + if (vmmouse_supported_hypervisors[i] == x86_hyper_type) return true; return false; diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 4f2bb5947a4e..f5954981e9ee 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -41,6 +41,13 @@ void rmi_free_function_list(struct rmi_device *rmi_dev) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n"); + /* Doing it in the reverse order so F01 will be removed last */ + list_for_each_entry_safe_reverse(fn, tmp, + &data->function_list, node) { + list_del(&fn->node); + rmi_unregister_function(fn); + } + devm_kfree(&rmi_dev->dev, data->irq_memory); data->irq_memory = NULL; data->irq_status = NULL; @@ -50,13 +57,6 @@ void rmi_free_function_list(struct rmi_device *rmi_dev) data->f01_container = NULL; data->f34_container = NULL; - - /* Doing it in the reverse order so F01 will be removed last */ - list_for_each_entry_safe_reverse(fn, tmp, - &data->function_list, node) { - list_del(&fn->node); - rmi_unregister_function(fn); - } } static int reset_one_function(struct rmi_function *fn) @@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Failed to process interrupt request: %d\n", ret); - if (count) + if (count) { kfree(attn_data.data); + attn_data.data = NULL; + } if (!kfifo_is_empty(&drvdata->attn_fifo)) return rmi_irq_fn(irq, dev_id); diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c index ad71a5e768dc..7ccbb370a9a8 100644 --- a/drivers/input/rmi4/rmi_f03.c +++ b/drivers/input/rmi4/rmi_f03.c @@ -32,6 +32,7 @@ struct f03_data { struct rmi_function *fn; struct serio *serio; + bool serio_registered; unsigned int overwrite_buttons; @@ -138,6 +139,37 @@ static int rmi_f03_initialize(struct f03_data *f03) return 0; } +static int rmi_f03_pt_open(struct serio *serio) +{ + struct f03_data *f03 = serio->port_data; + struct rmi_function *fn = f03->fn; + const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE; + const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET; + u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE]; + int error; + + /* + * Consume any pending data. Some devices like to spam with + * 0xaa 0x00 announcements which may confuse us as we try to + * probe the device. + */ + error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len); + if (!error) + rmi_dbg(RMI_DEBUG_FN, &fn->dev, + "%s: Consumed %*ph (%d) from PS2 guest\n", + __func__, ob_len, obs, ob_len); + + return fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); +} + +static void rmi_f03_pt_close(struct serio *serio) +{ + struct f03_data *f03 = serio->port_data; + struct rmi_function *fn = f03->fn; + + fn->rmi_dev->driver->clear_irq_bits(fn->rmi_dev, fn->irq_mask); +} + static int rmi_f03_register_pt(struct f03_data *f03) { struct serio *serio; @@ -148,6 +180,8 @@ static int rmi_f03_register_pt(struct f03_data *f03) serio->id.type = SERIO_PS_PSTHRU; serio->write = rmi_f03_pt_write; + serio->open = rmi_f03_pt_open; + serio->close = rmi_f03_pt_close; serio->port_data = f03; strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through", @@ -184,17 +218,27 @@ static int rmi_f03_probe(struct rmi_function *fn) f03->device_count); dev_set_drvdata(dev, f03); - - error = rmi_f03_register_pt(f03); - if (error) - return error; - return 0; } static int rmi_f03_config(struct rmi_function *fn) { - fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); + struct f03_data *f03 = dev_get_drvdata(&fn->dev); + int error; + + if (!f03->serio_registered) { + error = rmi_f03_register_pt(f03); + if (error) + return error; + + f03->serio_registered = true; + } else { + /* + * We must be re-configuring the sensor, just enable + * interrupts for this function. + */ + fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); + } return 0; } @@ -204,7 +248,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f03_data *f03 = dev_get_drvdata(&fn->dev); - u16 data_addr = fn->fd.data_base_addr; + const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET; const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE; u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE]; u8 ob_status; @@ -226,8 +270,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) drvdata->attn_data.size -= ob_len; } else { /* Grab all of the data registers, and check them for data */ - error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET, - &obs, ob_len); + error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len); if (error) { dev_err(&fn->dev, "%s: Failed to read F03 output buffers: %d\n", @@ -266,7 +309,8 @@ static void rmi_f03_remove(struct rmi_function *fn) { struct f03_data *f03 = dev_get_drvdata(&fn->dev); - serio_unregister_port(f03->serio); + if (f03->serio_registered) + serio_unregister_port(f03->serio); } struct rmi_function_handler rmi_f03_handler = { diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 6cbbdc6e9687..b353d494ad40 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -530,6 +530,20 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { { } }; +static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = { + { + /* + * Sony Vaio VGN-CS series require MUX or the touch sensor + * buttons will disturb touchpad operation + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), + }, + }, + { } +}; + /* * On some Asus laptops, just running self tests cause problems. */ @@ -620,6 +634,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "20046"), }, }, + { + /* Lenovo ThinkPad L460 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"), + }, + }, { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ .matches = { @@ -1163,6 +1184,9 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_nomux_table)) i8042_nomux = true; + if (dmi_check_system(i8042_dmi_forcemux_table)) + i8042_nomux = false; + if (dmi_check_system(i8042_dmi_notimeout_table)) i8042_notimeout = true; diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c index 7ed828a51f4c..3486d9403805 100644 --- a/drivers/input/touchscreen/88pm860x-ts.c +++ b/drivers/input/touchscreen/88pm860x-ts.c @@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, int data, n, ret; if (!np) return -ENODEV; - np = of_find_node_by_name(np, "touch"); + np = of_get_child_by_name(np, "touch"); if (!np) { dev_err(&pdev->dev, "Can't find touch node\n"); return -EINVAL; @@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, if (data) { ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } /* set tsi prebias time */ if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } /* set prebias & prechg time of pen detect */ data = 0; @@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, if (data) { ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); + + of_node_put(np); + return 0; + +err_put_node: + of_node_put(np); + + return -EINVAL; } #else #define pm860x_touch_dt_init(x, y, z) (-1) diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index b3bbad7d2282..5dafafad6351 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -808,8 +808,10 @@ static int __maybe_unused goodix_suspend(struct device *dev) int error; /* We need gpio pins to suspend/resume */ - if (!ts->gpiod_int || !ts->gpiod_rst) + if (!ts->gpiod_int || !ts->gpiod_rst) { + disable_irq(client->irq); return 0; + } wait_for_completion(&ts->firmware_loading_complete); @@ -849,8 +851,10 @@ static int __maybe_unused goodix_resume(struct device *dev) struct goodix_ts_data *ts = i2c_get_clientdata(client); int error; - if (!ts->gpiod_int || !ts->gpiod_rst) + if (!ts->gpiod_int || !ts->gpiod_rst) { + enable_irq(client->irq); return 0; + } /* * Exit sleep mode by outputting HIGH level to INT pin diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 8e8874d23717..99a2a57b6cfd 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3155,7 +3155,7 @@ static void amd_iommu_apply_resv_region(struct device *dev, unsigned long start, end; start = IOVA_PFN(region->start); - end = IOVA_PFN(region->start + region->length); + end = IOVA_PFN(region->start + region->length - 1); WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); } diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index e67ba6c40faf..8f7a3c00b6cf 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1611,13 +1611,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; domain->geometry.aperture_end = (1UL << ias) - 1; domain->geometry.force_aperture = true; - smmu_domain->pgtbl_ops = pgtbl_ops; ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); - if (ret < 0) + if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); + return ret; + } - return ret; + smmu_domain->pgtbl_ops = pgtbl_ops; + return 0; } static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) @@ -1644,7 +1646,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) { - int i; + int i, j; struct arm_smmu_master_data *master = fwspec->iommu_priv; struct arm_smmu_device *smmu = master->smmu; @@ -1652,6 +1654,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) u32 sid = fwspec->ids[i]; __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + /* Bridged PCI devices may end up with duplicated IDs */ + for (j = 0; j < i; j++) + if (fwspec->ids[j] == sid) + break; + if (j < i) + continue; + arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); } } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 6784a05dd6b2..83f3d4831f94 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2254,10 +2254,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, uint64_t tmp; if (!sg_res) { + unsigned int pgoff = sg->offset & ~PAGE_MASK; + sg_res = aligned_nrpages(sg->offset, sg->length); - sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; + sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff; sg->dma_length = sg->length; - pteval = page_to_phys(sg_page(sg)) | prot; + pteval = (sg_phys(sg) - pgoff) | prot; phys_pfn = pteval >> VTD_PAGE_SHIFT; } @@ -3790,7 +3792,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, for_each_sg(sglist, sg, nelems, i) { BUG_ON(!sg_page(sg)); - sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; + sg->dma_address = sg_phys(sg); sg->dma_length = sg->length; } return nelems; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index f6697e55c2d4..003b4a4d4b78 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -129,6 +129,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", iommu->name); dmar_free_hwirq(irq); + iommu->pr_irq = 0; goto err; } dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); @@ -144,9 +145,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); - free_irq(iommu->pr_irq, iommu); - dmar_free_hwirq(iommu->pr_irq); - iommu->pr_irq = 0; + if (iommu->pr_irq) { + free_irq(iommu->pr_irq, iommu); + dmar_free_hwirq(iommu->pr_irq); + iommu->pr_irq = 0; + } free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index bc1efbfb9ddf..542930cd183d 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = { .probe = mtk_iommu_probe, .remove = mtk_iommu_remove, .driver = { - .name = "mtk-iommu", + .name = "mtk-iommu-v1", .of_match_table = mtk_iommu_of_ids, .pm = &mtk_iommu_pm_ops, } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e88395605e32..af57f8473a88 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1310,7 +1310,7 @@ static struct irq_chip its_irq_chip = { * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. */ #define IRQS_PER_CHUNK_SHIFT 5 -#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) +#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ static unsigned long *lpi_bitmap; @@ -2026,11 +2026,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev = kzalloc(sizeof(*dev), GFP_KERNEL); /* - * At least one bit of EventID is being used, hence a minimum - * of two entries. No, the architecture doesn't let you - * express an ITT with a single entry. + * We allocate at least one chunk worth of LPIs bet device, + * and thus that many ITEs. The device may require less though. */ - nr_ites = max(2UL, roundup_pow_of_two(nvecs)); + nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc(sz, GFP_KERNEL); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index b5df99c6f680..848fcdf6a112 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -660,7 +660,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) * Ensure that stores to Normal memory are visible to the * other CPUs before issuing the IPI. */ - smp_wmb(); + wmb(); for_each_cpu(cpu, mask) { unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; @@ -1071,18 +1071,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) int nr_parts; struct partition_affinity *parts; - parts_node = of_find_node_by_name(gic_node, "ppi-partitions"); + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); if (!parts_node) return; nr_parts = of_get_child_count(parts_node); if (!nr_parts) - return; + goto out_put_node; parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL); if (WARN_ON(!parts)) - return; + goto out_put_node; for_each_child_of_node(parts_node, child_part) { struct partition_affinity *part; @@ -1149,6 +1149,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) gic_data.ppi_descs[i] = desc; } + +out_put_node: + of_node_put(parts_node); } static void __init gic_of_setup_kvm_info(struct device_node *node) @@ -1294,6 +1297,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; void __iomem *redist_base; + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + redist_base = ioremap(gicc->gicr_base_address, size); if (!redist_base) return -ENOMEM; @@ -1343,6 +1350,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) return 0; + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + return -ENODEV; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index c90976d7e53c..a9f300efce54 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -427,8 +427,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, spin_lock_irqsave(&gic_lock, flags); write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); - gic_clear_pcpu_masks(intr); - set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); irq_data_update_effective_affinity(data, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index 6aa3ea479214..f31265937439 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev) { struct combiner *combiner; size_t alloc_sz; - u32 nregs; + int nregs; int err; nregs = count_registers(pdev); diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index ef1360445413..9ce6b32f52a1 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -189,6 +189,7 @@ void led_blink_set(struct led_classdev *led_cdev, { del_timer_sync(&led_cdev->blink_timer); + clear_bit(LED_BLINK_SW, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 905729191d3e..78183f90820e 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -61,6 +61,10 @@ #define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */ #define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */ +#define PCA955X_GPIO_INPUT LED_OFF +#define PCA955X_GPIO_HIGH LED_OFF +#define PCA955X_GPIO_LOW LED_FULL + enum pca955x_type { pca9550, pca9551, @@ -329,9 +333,9 @@ static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset, struct pca955x_led *led = &pca955x->leds[offset]; if (val) - return pca955x_led_set(&led->led_cdev, LED_FULL); - else - return pca955x_led_set(&led->led_cdev, LED_OFF); + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_HIGH); + + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW); } static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset, @@ -355,8 +359,11 @@ static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset) static int pca955x_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { - /* To use as input ensure pin is not driven */ - return pca955x_set_value(gc, offset, 0); + struct pca955x *pca955x = gpiochip_get_data(gc); + struct pca955x_led *led = &pca955x->leds[offset]; + + /* To use as input ensure pin is not driven. */ + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_INPUT); } static int pca955x_gpio_direction_output(struct gpio_chip *gc, diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c index a52674327857..8988ba3b2d65 100644 --- a/drivers/leds/leds-pm8058.c +++ b/drivers/leds/leds-pm8058.c @@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev) if (!led) return -ENOMEM; - led->ledtype = (u32)of_device_get_match_data(&pdev->dev); + led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev); map = dev_get_regmap(pdev->dev.parent, NULL); if (!map) { diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 81501644fb15..3f0ddc0d7393 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -193,7 +193,7 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off, bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE); for (i = off; i < nr_pages + off; i++) { bv = bio->bi_io_vec[i]; - mempool_free(bv.bv_page, pblk->page_pool); + mempool_free(bv.bv_page, pblk->page_bio_pool); } } @@ -205,14 +205,14 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags, int i, ret; for (i = 0; i < nr_pages; i++) { - page = mempool_alloc(pblk->page_pool, flags); + page = mempool_alloc(pblk->page_bio_pool, flags); if (!page) goto err; ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0); if (ret != PBLK_EXPOSED_PAGE_SIZE) { pr_err("pblk: could not add page to bio\n"); - mempool_free(page, pblk->page_pool); + mempool_free(page, pblk->page_bio_pool); goto err; } } @@ -486,12 +486,14 @@ void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) u64 addr; int i; + spin_lock(&line->lock); addr = find_next_zero_bit(line->map_bitmap, pblk->lm.sec_per_line, line->cur_sec); line->cur_sec = addr - nr_secs; for (i = 0; i < nr_secs; i++, line->cur_sec--) WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap)); + spin_unlock(&line->lock); } u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index 6090d28f7995..d6bae085e1d2 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -486,10 +486,10 @@ void pblk_gc_should_start(struct pblk *pblk) { struct pblk_gc *gc = &pblk->gc; - if (gc->gc_enabled && !gc->gc_active) + if (gc->gc_enabled && !gc->gc_active) { pblk_gc_start(pblk); - - pblk_gc_kick(pblk); + pblk_gc_kick(pblk); + } } /* @@ -628,7 +628,8 @@ void pblk_gc_exit(struct pblk *pblk) flush_workqueue(gc->gc_reader_wq); flush_workqueue(gc->gc_line_reader_wq); - del_timer(&gc->gc_timer); + gc->gc_enabled = 0; + del_timer_sync(&gc->gc_timer); pblk_gc_stop(pblk, 1); if (gc->gc_ts) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 1b0f61233c21..1b75675ee67b 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -132,7 +132,6 @@ static int pblk_rwb_init(struct pblk *pblk) } /* Minimum pages needed within a lun */ -#define PAGE_POOL_SIZE 16 #define ADDR_POOL_SIZE 64 static int pblk_set_ppaf(struct pblk *pblk) @@ -247,14 +246,16 @@ static int pblk_core_init(struct pblk *pblk) if (pblk_init_global_caches(pblk)) return -ENOMEM; - pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); - if (!pblk->page_pool) + /* internal bios can be at most the sectors signaled by the device. */ + pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev), + 0); + if (!pblk->page_bio_pool) return -ENOMEM; pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE, pblk_blk_ws_cache); if (!pblk->line_ws_pool) - goto free_page_pool; + goto free_page_bio_pool; pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache); if (!pblk->rec_pool) @@ -309,8 +310,8 @@ static int pblk_core_init(struct pblk *pblk) mempool_destroy(pblk->rec_pool); free_blk_ws_pool: mempool_destroy(pblk->line_ws_pool); -free_page_pool: - mempool_destroy(pblk->page_pool); +free_page_bio_pool: + mempool_destroy(pblk->page_bio_pool); return -ENOMEM; } @@ -322,7 +323,7 @@ static void pblk_core_free(struct pblk *pblk) if (pblk->bb_wq) destroy_workqueue(pblk->bb_wq); - mempool_destroy(pblk->page_pool); + mempool_destroy(pblk->page_bio_pool); mempool_destroy(pblk->line_ws_pool); mempool_destroy(pblk->rec_pool); mempool_destroy(pblk->g_rq_pool); @@ -681,8 +682,8 @@ static int pblk_lines_init(struct pblk *pblk) lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); - lm->high_thrs = lm->sec_per_line / 2; - lm->mid_thrs = lm->sec_per_line / 4; + lm->mid_thrs = lm->sec_per_line / 2; + lm->high_thrs = lm->sec_per_line / 4; lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs; /* Calculate necessary pages for smeta. See comment over struct @@ -923,6 +924,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, pblk->dev = dev; pblk->disk = tdisk; pblk->state = PBLK_STATE_RUNNING; + pblk->gc.gc_enabled = 0; spin_lock_init(&pblk->trans_lock); spin_lock_init(&pblk->lock); @@ -944,6 +946,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, atomic_long_set(&pblk->recov_writes, 0); atomic_long_set(&pblk->recov_writes, 0); atomic_long_set(&pblk->recov_gc_writes, 0); + atomic_long_set(&pblk->recov_gc_reads, 0); #endif atomic_long_set(&pblk->read_failed, 0); diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index d682e89e6493..402c732f0970 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -238,7 +238,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, kunmap_atomic(src_p); kunmap_atomic(dst_p); - mempool_free(src_bv.bv_page, pblk->page_pool); + mempool_free(src_bv.bv_page, pblk->page_bio_pool); hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); } while (hole < nr_secs); @@ -499,7 +499,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, data_len = (*secs_to_gc) * geo->sec_size; bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len, - PBLK_KMALLOC_META, GFP_KERNEL); + PBLK_VMALLOC_META, GFP_KERNEL); if (IS_ERR(bio)) { pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio)); goto err_free_dma; @@ -519,7 +519,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, if (ret) { bio_endio(bio); pr_err("pblk: GC read request failed\n"); - goto err_free_dma; + goto err_free_bio; } if (!wait_for_completion_io_timeout(&wait, @@ -541,10 +541,13 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, atomic_long_sub(*secs_to_gc, &pblk->inflight_reads); #endif + bio_put(bio); out: nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); return NVM_IO_OK; +err_free_bio: + bio_put(bio); err_free_dma: nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); return NVM_IO_ERR; diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 67e623bd5c2d..053164deb072 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -618,7 +618,7 @@ struct pblk { struct list_head compl_list; - mempool_t *page_pool; + mempool_t *page_bio_pool; mempool_t *line_ws_pool; mempool_t *rec_pool; mempool_t *g_rq_pool; diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index ae6146311934..f052a3eb2098 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1365,8 +1365,8 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Disable/inactivate ring */ writel_relaxed(0x0, ring->regs + RING_CONTROL); - /* Flush ring with timeout of 1s */ - timeout = 1000; + /* Set ring flush state */ + timeout = 1000; /* timeout of 1s */ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring->regs + RING_CONTROL); do { @@ -1374,7 +1374,23 @@ static void flexrm_shutdown(struct mbox_chan *chan) FLUSH_DONE_MASK) break; mdelay(1); - } while (timeout--); + } while (--timeout); + if (!timeout) + dev_err(ring->mbox->dev, + "setting ring%d flush state timedout\n", ring->num); + + /* Clear ring flush state */ + timeout = 1000; /* timeout of 1s */ + writel_relaxed(0x0, ring + RING_CONTROL); + do { + if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + FLUSH_DONE_MASK)) + break; + mdelay(1); + } while (--timeout); + if (!timeout) + dev_err(ring->mbox->dev, + "clearing ring%d flush state timedout\n", ring->num); /* Abort all in-flight requests */ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 97fb956bb6e0..93f3d4d61fa7 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -30,6 +30,7 @@ #define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \ (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE)) +static bool mbox_data_ready; static struct dentry *root_debugfs_dir; struct mbox_test_device { @@ -152,16 +153,14 @@ static ssize_t mbox_test_message_write(struct file *filp, static bool mbox_test_message_data_ready(struct mbox_test_device *tdev) { - unsigned char data; + bool data_ready; unsigned long flags; spin_lock_irqsave(&tdev->lock, flags); - data = tdev->rx_buffer[0]; + data_ready = mbox_data_ready; spin_unlock_irqrestore(&tdev->lock, flags); - if (data != '\0') - return true; - return false; + return data_ready; } static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf, @@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf, *(touser + l) = '\0'; memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN); + mbox_data_ready = false; spin_unlock_irqrestore(&tdev->lock, flags); @@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message) message, MBOX_MAX_MSG_LEN); memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN); } + mbox_data_ready = true; spin_unlock_irqrestore(&tdev->lock, flags); wake_up_interruptible(&tdev->waitq); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 4a249ee86364..6ac0297db28c 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -460,6 +460,21 @@ config DM_VERITY If unsure, say N. +config DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + bool "Prefetch size 128" + +config DM_VERITY_HASH_PREFETCH_MIN_SIZE + int "Verity hash prefetch minimum size" + depends on DM_VERITY + range 1 4096 + default 128 if DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + default 1 + ---help--- + This sets minimum number of hash blocks to prefetch for dm-verity. + For devices like eMMC, having larger prefetch size like 128 can improve + performance with increased memory consumption for keeping more hashes + in RAM. + config DM_VERITY_FEC bool "Verity forward error correction support" depends on DM_VERITY @@ -540,4 +555,21 @@ config DM_ZONED If unsure, say N. +config DM_ANDROID_VERITY + bool "Android verity target support" + depends on DM_VERITY + depends on X509_CERTIFICATE_PARSER + depends on SYSTEM_TRUSTED_KEYRING + depends on PUBLIC_KEY_ALGO_RSA + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE + select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + ---help--- + This device-mapper target is virtually a VERITY target. This + target is setup by reading the metadata contents piggybacked + to the actual data blocks in the block device. The signature + of the metadata contents are verified against the key included + in the system keyring. Upon success, the underlying verity + target is setup. endif # MD diff --git a/drivers/md/Makefile b/drivers/md/Makefile index e94b6f9be941..83109ad30a48 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -71,3 +71,7 @@ endif ifeq ($(CONFIG_DM_VERITY_FEC),y) dm-verity-objs += dm-verity-fec.o endif + +ifeq ($(CONFIG_DM_ANDROID_VERITY),y) +dm-verity-objs += dm-android-verity.o +endif diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 08035634795c..f0dc8e2aee65 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -407,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) finish_wait(&ca->set->bucket_wait, &w); out: - wake_up_process(ca->alloc_thread); + if (ca->alloc_thread) + wake_up_process(ca->alloc_thread); trace_bcache_alloc(ca, reserve); @@ -479,7 +480,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, if (b == -1) goto err; - k->ptr[i] = PTR(ca->buckets[b].gen, + k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, bucket_to_sector(c, b), ca->sb.nr_this_dev); @@ -514,15 +515,21 @@ struct open_bucket { /* * We keep multiple buckets open for writes, and try to segregate different - * write streams for better cache utilization: first we look for a bucket where - * the last write to it was sequential with the current write, and failing that - * we look for a bucket that was last used by the same task. + * write streams for better cache utilization: first we try to segregate flash + * only volume write streams from cached devices, secondly we look for a bucket + * where the last write to it was sequential with the current write, and + * failing that we look for a bucket that was last used by the same task. * * The ideas is if you've got multiple tasks pulling data into the cache at the * same time, you'll get better cache utilization if you try to segregate their * data and preserve locality. * - * For example, say you've starting Firefox at the same time you're copying a + * For example, dirty sectors of flash only volume is not reclaimable, if their + * dirty sectors mixed with dirty sectors of cached device, such buckets will + * be marked as dirty and won't be reclaimed, though the dirty data of cached + * device have been written back to backend device. + * + * And say you've starting Firefox at the same time you're copying a * bunch of files. Firefox will likely end up being fairly hot and stay in the * cache awhile, but the data you copied might not be; if you wrote all that * data to the same buckets it'd get invalidated at the same time. @@ -539,7 +546,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c, struct open_bucket *ret, *ret_task = NULL; list_for_each_entry_reverse(ret, &c->data_buckets, list) - if (!bkey_cmp(&ret->key, search)) + if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != + UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) + continue; + else if (!bkey_cmp(&ret->key, search)) goto found; else if (ret->last_write_point == write_point) ret_task = ret; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 658c54b3b07a..1598d1e04989 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c) c->shrink.scan_objects = bch_mca_scan; c->shrink.seeks = 4; c->shrink.batch = c->btree_pages * 2; - register_shrinker(&c->shrink); + + if (register_shrinker(&c->shrink)) + pr_warn("bcache: %s: could not register shrinker", + __func__); return 0; } diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 41c238fc3733..f9d391711595 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey return false; for (i = 0; i < KEY_PTRS(l); i++) - if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || + if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) return false; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 02a98ddb592d..03cc0722ae48 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -507,7 +507,7 @@ static void journal_reclaim(struct cache_set *c) continue; ja->cur_idx = next; - k->ptr[n++] = PTR(0, + k->ptr[n++] = MAKE_PTR(0, bucket_to_sector(c, ca->sb.d[ja->cur_idx]), ca->sb.nr_this_dev); } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 3475d6628e21..f34ad8720756 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -463,6 +463,7 @@ struct search { unsigned recoverable:1; unsigned write:1; unsigned read_dirty_data:1; + unsigned cache_missed:1; unsigned long start_time; @@ -567,6 +568,7 @@ static void cache_lookup(struct closure *cl) { struct search *s = container_of(cl, struct search, iop.cl); struct bio *bio = &s->bio.bio; + struct cached_dev *dc; int ret; bch_btree_op_init(&s->op, -1); @@ -579,6 +581,27 @@ static void cache_lookup(struct closure *cl) return; } + /* + * We might meet err when searching the btree, If that happens, we will + * get negative ret, in this scenario we should not recover data from + * backing device (when cache device is dirty) because we don't know + * whether bkeys the read request covered are all clean. + * + * And after that happened, s->iop.status is still its initial value + * before we submit s->bio.bio + */ + if (ret < 0) { + BUG_ON(ret == -EINTR); + if (s->d && s->d->c && + !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) { + dc = container_of(s->d, struct cached_dev, disk); + if (dc && atomic_read(&dc->has_dirty)) + s->recoverable = false; + } + if (!s->iop.status) + s->iop.status = BLK_STS_IOERR; + } + closure_return(cl); } @@ -649,6 +672,7 @@ static inline struct search *search_alloc(struct bio *bio, s->orig_bio = bio; s->cache_miss = NULL; + s->cache_missed = 0; s->d = d; s->recoverable = 1; s->write = op_is_write(bio_op(bio)); @@ -699,7 +723,14 @@ static void cached_dev_read_error(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; - if (s->recoverable) { + /* + * If read request hit dirty data (s->read_dirty_data is true), + * then recovery a failed read request from cached device may + * get a stale data back. So read failure recovery is only + * permitted when read request hit clean data in cache device, + * or when cache read race happened. + */ + if (s->recoverable && !s->read_dirty_data) { /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); @@ -760,7 +791,7 @@ static void cached_dev_read_done_bh(struct closure *cl) struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); bch_mark_cache_accounting(s->iop.c, s->d, - !s->cache_miss, s->iop.bypass); + !s->cache_missed, s->iop.bypass); trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); if (s->iop.status) @@ -779,6 +810,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct bio *miss, *cache_bio; + s->cache_missed = 1; + if (s->cache_miss || s->iop.bypass) { miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); ret = miss == bio ? MAP_DONE : MAP_CONTINUE; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index fc0a31b13ac4..5d0430777dda 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -893,6 +893,12 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_lock(&bch_register_lock); + cancel_delayed_work_sync(&dc->writeback_rate_update); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) { + kthread_stop(dc->writeback_thread); + dc->writeback_thread = NULL; + } + memset(&dc->sb.set_uuid, 0, 16); SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); @@ -938,6 +944,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) uint32_t rtime = cpu_to_le32(get_seconds()); struct uuid_entry *u; char buf[BDEVNAME_SIZE]; + struct cached_dev *exist_dc, *t; bdevname(dc->bdev, buf); @@ -961,6 +968,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) return -EINVAL; } + /* Check whether already attached */ + list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { + if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { + pr_err("Tried to attach %s but duplicate UUID already attached", + buf); + + return -EINVAL; + } + } + u = uuid_find(c, dc->sb.uuid); if (u && @@ -1181,7 +1198,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, return; err: - pr_notice("error opening %s: %s", bdevname(bdev, name), err); + pr_notice("error %s: %s", bdevname(bdev, name), err); bcache_device_stop(&dc->disk); } @@ -1849,6 +1866,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, const char *err = NULL; /* must be set for any error case */ int ret = 0; + bdevname(bdev, name); + memcpy(&ca->sb, sb, sizeof(struct cache_sb)); ca->bdev = bdev; ca->bdev->bd_holder = ca; @@ -1857,11 +1876,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ca->sb_bio.bi_io_vec[0].bv_page = sb_page; get_page(sb_page); - if (blk_queue_discard(bdev_get_queue(ca->bdev))) + if (blk_queue_discard(bdev_get_queue(bdev))) ca->discard = CACHE_DISCARD(&ca->sb); ret = cache_alloc(ca); if (ret != 0) { + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); if (ret == -ENOMEM) err = "cache_alloc(): -ENOMEM"; else @@ -1884,14 +1904,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, goto out; } - pr_info("registered cache device %s", bdevname(bdev, name)); + pr_info("registered cache device %s", name); out: kobject_put(&ca->kobj); err: if (err) - pr_notice("error opening %s: %s", bdevname(bdev, name), err); + pr_notice("error %s: %s", name, err); return ret; } @@ -1980,6 +2000,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (err) goto err_close; + err = "failed to register device"; if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); if (!dc) @@ -1994,7 +2015,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, goto err_close; if (register_cache(sb, sb_page, bdev, ca) != 0) - goto err_close; + goto err; } out: if (sb_page) @@ -2007,7 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, err_close: blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); err: - pr_info("error opening %s: %s", path, err); + pr_info("error %s: %s", path, err); ret = -EINVAL; goto out; } @@ -2085,6 +2106,7 @@ static void bcache_exit(void) if (bcache_major) unregister_blkdev(bcache_major, "bcache"); unregister_reboot_notifier(&reboot); + mutex_destroy(&bch_register_lock); } static int __init bcache_init(void) @@ -2103,14 +2125,15 @@ static int __init bcache_init(void) bcache_major = register_blkdev(0, "bcache"); if (bcache_major < 0) { unregister_reboot_notifier(&reboot); + mutex_destroy(&bch_register_lock); return bcache_major; } if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || - sysfs_create_files(bcache_kobj, files) || bch_request_init() || - bch_debug_init(bcache_kobj)) + bch_debug_init(bcache_kobj) || + sysfs_create_files(bcache_kobj, files)) goto err; return 0; diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index d2121637b4ab..0cabf31fb163 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) err = read_sb_page(bitmap->mddev, offset, sb_page, - 0, PAGE_SIZE); + 0, sizeof(bitmap_super_t)); } if (err) return err; @@ -1816,6 +1816,12 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot) BUG_ON(file && mddev->bitmap_info.offset); + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { + pr_notice("md/raid:%s: array with journal cannot have bitmap\n", + mdname(mddev)); + return ERR_PTR(-EBUSY); + } + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return ERR_PTR(-ENOMEM); @@ -2123,7 +2129,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (store.sb_page && bitmap->storage.sb_page) memcpy(page_address(store.sb_page), page_address(bitmap->storage.sb_page), - PAGE_SIZE); + sizeof(bitmap_super_t)); bitmap_file_unmap(&bitmap->storage); bitmap->storage = store; @@ -2152,6 +2158,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, for (k = 0; k < page; k++) { kfree(new_bp[k].map); } + kfree(new_bp); /* restore some fields from old_counts */ bitmap->counts.bp = old_counts.bp; @@ -2202,6 +2209,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, block += old_blocks; } + if (bitmap->counts.bp != old_counts.bp) { + unsigned long k; + for (k = 0; k < old_counts.pages; k++) + if (!old_counts.bp[k].hijacked) + kfree(old_counts.bp[k].map); + kfree(old_counts.bp); + } + if (!init) { int i; while (block < (chunks << chunkshift)) { diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c new file mode 100644 index 000000000000..0dd69244f77c --- /dev/null +++ b/drivers/md/dm-android-verity.c @@ -0,0 +1,949 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "dm-verity.h" +#include "dm-android-verity.h" + +static char verifiedbootstate[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char veritymode[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char veritykeyid[VERITY_DEFAULT_KEY_ID_LENGTH]; +static char buildvariant[BUILD_VARIANT]; + +static bool target_added; +static bool verity_enabled = true; +struct dentry *debug_dir; +static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv); + +static struct target_type android_verity_target = { + .name = "android-verity", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = android_verity_ctr, + .dtr = verity_dtr, + .map = verity_map, + .status = verity_status, + .prepare_ioctl = verity_prepare_ioctl, + .iterate_devices = verity_iterate_devices, + .io_hints = verity_io_hints, +}; + +static int __init verified_boot_state_param(char *line) +{ + strlcpy(verifiedbootstate, line, sizeof(verifiedbootstate)); + return 1; +} + +__setup("androidboot.verifiedbootstate=", verified_boot_state_param); + +static int __init verity_mode_param(char *line) +{ + strlcpy(veritymode, line, sizeof(veritymode)); + return 1; +} + +__setup("androidboot.veritymode=", verity_mode_param); + +static int __init verity_keyid_param(char *line) +{ + strlcpy(veritykeyid, line, sizeof(veritykeyid)); + return 1; +} + +__setup("veritykeyid=", verity_keyid_param); + +static int __init verity_buildvariant(char *line) +{ + strlcpy(buildvariant, line, sizeof(buildvariant)); + return 1; +} + +__setup("buildvariant=", verity_buildvariant); + +static inline bool default_verity_key_id(void) +{ + return veritykeyid[0] != '\0'; +} + +static inline bool is_eng(void) +{ + static const char typeeng[] = "eng"; + + return !strncmp(buildvariant, typeeng, sizeof(typeeng)); +} + +static inline bool is_userdebug(void) +{ + static const char typeuserdebug[] = "userdebug"; + + return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug)); +} + +static inline bool is_unlocked(void) +{ + static const char unlocked[] = "orange"; + + return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked)); +} + +static int table_extract_mpi_array(struct public_key_signature *pks, + const void *data, size_t len) +{ + MPI mpi = mpi_read_raw_data(data, len); + + if (!mpi) { + DMERR("Error while allocating mpi array"); + return -ENOMEM; + } + + pks->mpi[0] = mpi; + pks->nr_mpi = 1; + return 0; +} + +static struct public_key_signature *table_make_digest( + enum hash_algo hash, + const void *table, + unsigned long table_len) +{ + struct public_key_signature *pks = NULL; + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t digest_size, desc_size; + int ret; + + /* Allocate the hashing algorithm we're going to need and find out how + * big the hash operational data will be. + */ + tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + digest_size = crypto_shash_digestsize(tfm); + + /* We allocate the hash operational data storage on the end of out + * context data and the digest output buffer on the end of that. + */ + ret = -ENOMEM; + pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); + if (!pks) + goto error; + + pks->pkey_hash_algo = hash; + pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; + pks->digest_size = digest_size; + + desc = (struct shash_desc *)(pks + 1); + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + ret = crypto_shash_finup(desc, table, table_len, pks->digest); + if (ret < 0) + goto error; + + crypto_free_shash(tfm); + return pks; + +error: + kfree(pks); + crypto_free_shash(tfm); + return ERR_PTR(ret); +} + +static int read_block_dev(struct bio_read *payload, struct block_device *bdev, + sector_t offset, int length) +{ + struct bio *bio; + int err = 0, i; + + payload->number_of_pages = DIV_ROUND_UP(length, PAGE_SIZE); + + bio = bio_alloc(GFP_KERNEL, payload->number_of_pages); + if (!bio) { + DMERR("Error while allocating bio"); + return -ENOMEM; + } + + bio->bi_bdev = bdev; + bio->bi_iter.bi_sector = offset; + + payload->page_io = kzalloc(sizeof(struct page *) * + payload->number_of_pages, GFP_KERNEL); + if (!payload->page_io) { + DMERR("page_io array alloc failed"); + err = -ENOMEM; + goto free_bio; + } + + for (i = 0; i < payload->number_of_pages; i++) { + payload->page_io[i] = alloc_page(GFP_KERNEL); + if (!payload->page_io[i]) { + DMERR("alloc_page failed"); + err = -ENOMEM; + goto free_pages; + } + if (!bio_add_page(bio, payload->page_io[i], PAGE_SIZE, 0)) { + DMERR("bio_add_page error"); + err = -EIO; + goto free_pages; + } + } + + if (!submit_bio_wait(READ, bio)) + /* success */ + goto free_bio; + DMERR("bio read failed"); + err = -EIO; + +free_pages: + for (i = 0; i < payload->number_of_pages; i++) + if (payload->page_io[i]) + __free_page(payload->page_io[i]); + kfree(payload->page_io); +free_bio: + bio_put(bio); + return err; +} + +static inline u64 fec_div_round_up(u64 x, u64 y) +{ + u64 remainder; + + return div64_u64_rem(x, y, &remainder) + + (remainder > 0 ? 1 : 0); +} + +static inline void populate_fec_metadata(struct fec_header *header, + struct fec_ecc_metadata *ecc) +{ + ecc->blocks = fec_div_round_up(le64_to_cpu(header->inp_size), + FEC_BLOCK_SIZE); + ecc->roots = le32_to_cpu(header->roots); + ecc->start = le64_to_cpu(header->inp_size); +} + +static inline int validate_fec_header(struct fec_header *header, u64 offset) +{ + /* move offset to make the sanity check work for backup header + * as well. */ + offset -= offset % FEC_BLOCK_SIZE; + if (le32_to_cpu(header->magic) != FEC_MAGIC || + le32_to_cpu(header->version) != FEC_VERSION || + le32_to_cpu(header->size) != sizeof(struct fec_header) || + le32_to_cpu(header->roots) == 0 || + le32_to_cpu(header->roots) >= FEC_RSM) + return -EINVAL; + + return 0; +} + +static int extract_fec_header(dev_t dev, struct fec_header *fec, + struct fec_ecc_metadata *ecc) +{ + u64 device_size; + struct bio_read payload; + int i, err = 0; + struct block_device *bdev; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + + if (IS_ERR_OR_NULL(bdev)) { + DMERR("bdev get error"); + return PTR_ERR(bdev); + } + + device_size = i_size_read(bdev->bd_inode); + + /* fec metadata size is a power of 2 and PAGE_SIZE + * is a power of 2 as well. + */ + BUG_ON(FEC_BLOCK_SIZE > PAGE_SIZE); + /* 512 byte sector alignment */ + BUG_ON(((device_size - FEC_BLOCK_SIZE) % (1 << SECTOR_SHIFT)) != 0); + + err = read_block_dev(&payload, bdev, (device_size - + FEC_BLOCK_SIZE) / (1 << SECTOR_SHIFT), FEC_BLOCK_SIZE); + if (err) { + DMERR("Error while reading verity metadata"); + goto error; + } + + BUG_ON(sizeof(struct fec_header) > PAGE_SIZE); + memcpy(fec, page_address(payload.page_io[0]), + sizeof(*fec)); + + ecc->valid = true; + if (validate_fec_header(fec, device_size - FEC_BLOCK_SIZE)) { + /* Try the backup header */ + memcpy(fec, page_address(payload.page_io[0]) + FEC_BLOCK_SIZE + - sizeof(*fec) , + sizeof(*fec)); + if (validate_fec_header(fec, device_size - + sizeof(struct fec_header))) + ecc->valid = false; + } + + if (ecc->valid) + populate_fec_metadata(fec, ecc); + + for (i = 0; i < payload.number_of_pages; i++) + __free_page(payload.page_io[i]); + kfree(payload.page_io); + +error: + blkdev_put(bdev, FMODE_READ); + return err; +} +static void find_metadata_offset(struct fec_header *fec, + struct block_device *bdev, u64 *metadata_offset) +{ + u64 device_size; + + device_size = i_size_read(bdev->bd_inode); + + if (le32_to_cpu(fec->magic) == FEC_MAGIC) + *metadata_offset = le64_to_cpu(fec->inp_size) - + VERITY_METADATA_SIZE; + else + *metadata_offset = device_size - VERITY_METADATA_SIZE; +} + +static int find_size(dev_t dev, u64 *device_size) +{ + struct block_device *bdev; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + if (IS_ERR_OR_NULL(bdev)) { + DMERR("blkdev_get_by_dev failed"); + return PTR_ERR(bdev); + } + + *device_size = i_size_read(bdev->bd_inode); + *device_size >>= SECTOR_SHIFT; + + DMINFO("blkdev size in sectors: %llu", *device_size); + blkdev_put(bdev, FMODE_READ); + return 0; +} + +static int verify_header(struct android_metadata_header *header) +{ + int retval = -EINVAL; + + if (is_userdebug() && le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_DISABLE) + return VERITY_STATE_DISABLE; + + if (!(le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_NUMBER) || + (le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_DISABLE)) { + DMERR("Incorrect magic number"); + return retval; + } + + if (le32_to_cpu(header->protocol_version) != + VERITY_METADATA_VERSION) { + DMERR("Unsupported version %u", + le32_to_cpu(header->protocol_version)); + return retval; + } + + return 0; +} + +static int extract_metadata(dev_t dev, struct fec_header *fec, + struct android_metadata **metadata, + bool *verity_enabled) +{ + struct block_device *bdev; + struct android_metadata_header *header; + int i; + u32 table_length, copy_length, offset; + u64 metadata_offset; + struct bio_read payload; + int err = 0; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + + if (IS_ERR_OR_NULL(bdev)) { + DMERR("blkdev_get_by_dev failed"); + return -ENODEV; + } + + find_metadata_offset(fec, bdev, &metadata_offset); + + /* Verity metadata size is a power of 2 and PAGE_SIZE + * is a power of 2 as well. + * PAGE_SIZE is also a multiple of 512 bytes. + */ + if (VERITY_METADATA_SIZE > PAGE_SIZE) + BUG_ON(VERITY_METADATA_SIZE % PAGE_SIZE != 0); + /* 512 byte sector alignment */ + BUG_ON(metadata_offset % (1 << SECTOR_SHIFT) != 0); + + err = read_block_dev(&payload, bdev, metadata_offset / + (1 << SECTOR_SHIFT), VERITY_METADATA_SIZE); + if (err) { + DMERR("Error while reading verity metadata"); + goto blkdev_release; + } + + header = kzalloc(sizeof(*header), GFP_KERNEL); + if (!header) { + DMERR("kzalloc failed for header"); + err = -ENOMEM; + goto free_payload; + } + + memcpy(header, page_address(payload.page_io[0]), + sizeof(*header)); + + DMINFO("bio magic_number:%u protocol_version:%d table_length:%u", + le32_to_cpu(header->magic_number), + le32_to_cpu(header->protocol_version), + le32_to_cpu(header->table_length)); + + err = verify_header(header); + + if (err == VERITY_STATE_DISABLE) { + DMERR("Mounting root with verity disabled"); + *verity_enabled = false; + /* we would still have to read the metadata to figure out + * the data blocks size. Or may be could map the entire + * partition similar to mounting the device. + * + * Reset error as well as the verity_enabled flag is changed. + */ + err = 0; + } else if (err) + goto free_header; + + *metadata = kzalloc(sizeof(**metadata), GFP_KERNEL); + if (!*metadata) { + DMERR("kzalloc for metadata failed"); + err = -ENOMEM; + goto free_header; + } + + (*metadata)->header = header; + table_length = le32_to_cpu(header->table_length); + + if (table_length == 0 || + table_length > (VERITY_METADATA_SIZE - + sizeof(struct android_metadata_header))) { + DMERR("table_length too long"); + err = -EINVAL; + goto free_metadata; + } + + (*metadata)->verity_table = kzalloc(table_length + 1, GFP_KERNEL); + + if (!(*metadata)->verity_table) { + DMERR("kzalloc verity_table failed"); + err = -ENOMEM; + goto free_metadata; + } + + if (sizeof(struct android_metadata_header) + + table_length <= PAGE_SIZE) { + memcpy((*metadata)->verity_table, + page_address(payload.page_io[0]) + + sizeof(struct android_metadata_header), + table_length); + } else { + copy_length = PAGE_SIZE - + sizeof(struct android_metadata_header); + memcpy((*metadata)->verity_table, + page_address(payload.page_io[0]) + + sizeof(struct android_metadata_header), + copy_length); + table_length -= copy_length; + offset = copy_length; + i = 1; + while (table_length != 0) { + if (table_length > PAGE_SIZE) { + memcpy((*metadata)->verity_table + offset, + page_address(payload.page_io[i]), + PAGE_SIZE); + offset += PAGE_SIZE; + table_length -= PAGE_SIZE; + } else { + memcpy((*metadata)->verity_table + offset, + page_address(payload.page_io[i]), + table_length); + table_length = 0; + } + i++; + } + } + (*metadata)->verity_table[table_length] = '\0'; + + DMINFO("verity_table: %s", (*metadata)->verity_table); + goto free_payload; + +free_metadata: + kfree(*metadata); +free_header: + kfree(header); +free_payload: + for (i = 0; i < payload.number_of_pages; i++) + if (payload.page_io[i]) + __free_page(payload.page_io[i]); + kfree(payload.page_io); +blkdev_release: + blkdev_put(bdev, FMODE_READ); + return err; +} + +/* helper functions to extract properties from dts */ +const char *find_dt_value(const char *name) +{ + struct device_node *firmware; + const char *value; + + firmware = of_find_node_by_path("/firmware/android"); + if (!firmware) + return NULL; + value = of_get_property(firmware, name, NULL); + of_node_put(firmware); + + return value; +} + +static int verity_mode(void) +{ + static const char enforcing[] = "enforcing"; + static const char verified_mode_prop[] = "veritymode"; + const char *value; + + value = find_dt_value(verified_mode_prop); + if (!value) + value = veritymode; + if (!strncmp(value, enforcing, sizeof(enforcing) - 1)) + return DM_VERITY_MODE_RESTART; + + return DM_VERITY_MODE_EIO; +} + +static int verify_verity_signature(char *key_id, + struct android_metadata *metadata) +{ + key_ref_t key_ref; + struct key *key; + struct public_key_signature *pks = NULL; + int retval = -EINVAL; + + key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1), + &key_type_asymmetric, key_id); + + if (IS_ERR(key_ref)) { + DMERR("keyring: key not found"); + return -ENOKEY; + } + + key = key_ref_to_ptr(key_ref); + + pks = table_make_digest(HASH_ALGO_SHA256, + (const void *)metadata->verity_table, + le32_to_cpu(metadata->header->table_length)); + + if (IS_ERR(pks)) { + DMERR("hashing failed"); + retval = PTR_ERR(pks); + pks = NULL; + goto error; + } + + retval = table_extract_mpi_array(pks, &metadata->header->signature[0], + RSANUMBYTES); + if (retval < 0) { + DMERR("Error extracting mpi %d", retval); + goto error; + } + + retval = verify_signature(key, pks); + mpi_free(pks->rsa.s); +error: + kfree(pks); + key_put(key); + + return retval; +} + +static void handle_error(void) +{ + int mode = verity_mode(); + if (mode == DM_VERITY_MODE_RESTART) { + DMERR("triggering restart"); + kernel_restart("dm-verity device corrupted"); + } else { + DMERR("Mounting verity root failed"); + } +} + +static inline bool test_mult_overflow(sector_t a, u32 b) +{ + sector_t r = (sector_t)~0ULL; + + sector_div(r, b); + return a > r; +} + +static int add_as_linear_device(struct dm_target *ti, char *dev) +{ + /*Move to linear mapping defines*/ + char *linear_table_args[DM_LINEAR_ARGS] = {dev, + DM_LINEAR_TARGET_OFFSET}; + int err = 0; + + android_verity_target.dtr = dm_linear_dtr, + android_verity_target.map = dm_linear_map, + android_verity_target.status = dm_linear_status, + android_verity_target.end_io = dm_linear_end_io, + android_verity_target.prepare_ioctl = dm_linear_prepare_ioctl, + android_verity_target.iterate_devices = dm_linear_iterate_devices, + android_verity_target.direct_access = dm_linear_dax_direct_access, + android_verity_target.dax_copy_from_iter = dm_linear_dax_copy_from_iter, + android_verity_target.io_hints = NULL; + + set_disk_ro(dm_disk(dm_table_get_md(ti->table)), 0); + + err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args); + + if (!err) { + DMINFO("Added android-verity as a linear target"); + target_added = true; + } else + DMERR("Failed to add android-verity as linear target"); + + return err; +} + +static int create_linear_device(struct dm_target *ti, dev_t dev, + char *target_device) +{ + u64 device_size = 0; + int err = find_size(dev, &device_size); + + if (err) { + DMERR("error finding bdev size"); + handle_error(); + return err; + } + + ti->len = device_size; + err = add_as_linear_device(ti, target_device); + if (err) { + handle_error(); + return err; + } + verity_enabled = false; + return 0; +} + +/* + * Target parameters: + * Key id of the public key in the system keyring. + * Verity metadata's signature would be verified against + * this. If the key id contains spaces, replace them + * with '#'. + * The block device for which dm-verity is being setup. + */ +static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + dev_t uninitialized_var(dev); + struct android_metadata *metadata = NULL; + int err = 0, i, mode; + char *key_id, *table_ptr, dummy, *target_device, + *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS]; + /* One for specifying number of opt args and one for mode */ + sector_t data_sectors; + u32 data_block_size; + unsigned int no_of_args = VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS; + struct fec_header uninitialized_var(fec); + struct fec_ecc_metadata uninitialized_var(ecc); + char buf[FEC_ARG_LENGTH], *buf_ptr; + unsigned long long tmpll; + + if (argc == 1) { + /* Use the default keyid */ + if (default_verity_key_id()) + key_id = veritykeyid; + else if (!is_eng()) { + DMERR("veritykeyid= is not set"); + handle_error(); + return -EINVAL; + } + } else if (argc == 2) + key_id = argv[1]; + else { + DMERR("Incorrect number of arguments"); + handle_error(); + return -EINVAL; + } + + target_device = argv[0]; + + dev = name_to_dev_t(target_device); + if (!dev) { + DMERR("no dev found for %s", target_device); + handle_error(); + return -EINVAL; + } + + if (is_eng()) + return create_linear_device(ti, dev, target_device); + + strreplace(key_id, '#', ' '); + + DMINFO("key:%s dev:%s", key_id, target_device); + + if (extract_fec_header(dev, &fec, &ecc)) { + DMERR("Error while extracting fec header"); + handle_error(); + return -EINVAL; + } + + err = extract_metadata(dev, &fec, &metadata, &verity_enabled); + + if (err) { + /* Allow invalid metadata when the device is unlocked */ + if (is_unlocked()) { + DMWARN("Allow invalid metadata when unlocked"); + return create_linear_device(ti, dev, target_device); + } + DMERR("Error while extracting metadata"); + handle_error(); + goto free_metadata; + } + + if (verity_enabled) { + err = verify_verity_signature(key_id, metadata); + + if (err) { + DMERR("Signature verification failed"); + handle_error(); + goto free_metadata; + } else + DMINFO("Signature verification success"); + } + + table_ptr = metadata->verity_table; + + for (i = 0; i < VERITY_TABLE_ARGS; i++) { + verity_table_args[i] = strsep(&table_ptr, " "); + if (verity_table_args[i] == NULL) + break; + } + + if (i != VERITY_TABLE_ARGS) { + DMERR("Verity table not in the expected format"); + err = -EINVAL; + handle_error(); + goto free_metadata; + } + + if (sscanf(verity_table_args[5], "%llu%c", &tmpll, &dummy) + != 1) { + DMERR("Verity table not in the expected format"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + if (tmpll > ULONG_MAX) { + DMERR(" too large. Forgot to turn on CONFIG_LBDAF?"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + data_sectors = tmpll; + + if (sscanf(verity_table_args[3], "%u%c", &data_block_size, &dummy) + != 1) { + DMERR("Verity table not in the expected format"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + if (test_mult_overflow(data_sectors, data_block_size >> + SECTOR_SHIFT)) { + DMERR("data_sectors too large"); + handle_error(); + err = -EOVERFLOW; + goto free_metadata; + } + + data_sectors *= data_block_size >> SECTOR_SHIFT; + DMINFO("Data sectors %llu", (unsigned long long)data_sectors); + + /* update target length */ + ti->len = data_sectors; + + /* Setup linear target and free */ + if (!verity_enabled) { + err = add_as_linear_device(ti, target_device); + goto free_metadata; + } + + /*substitute data_dev and hash_dev*/ + verity_table_args[1] = target_device; + verity_table_args[2] = target_device; + + mode = verity_mode(); + + if (ecc.valid && IS_BUILTIN(CONFIG_DM_VERITY_FEC)) { + if (mode) { + err = snprintf(buf, FEC_ARG_LENGTH, + "%u %s " VERITY_TABLE_OPT_FEC_FORMAT, + 1 + VERITY_TABLE_OPT_FEC_ARGS, + mode == DM_VERITY_MODE_RESTART ? + VERITY_TABLE_OPT_RESTART : + VERITY_TABLE_OPT_LOGGING, + target_device, + ecc.start / FEC_BLOCK_SIZE, ecc.blocks, + ecc.roots); + } else { + err = snprintf(buf, FEC_ARG_LENGTH, + "%u " VERITY_TABLE_OPT_FEC_FORMAT, + VERITY_TABLE_OPT_FEC_ARGS, target_device, + ecc.start / FEC_BLOCK_SIZE, ecc.blocks, + ecc.roots); + } + } else if (mode) { + err = snprintf(buf, FEC_ARG_LENGTH, + "2 " VERITY_TABLE_OPT_IGNZERO " %s", + mode == DM_VERITY_MODE_RESTART ? + VERITY_TABLE_OPT_RESTART : VERITY_TABLE_OPT_LOGGING); + } else { + err = snprintf(buf, FEC_ARG_LENGTH, "1 %s", + "ignore_zero_blocks"); + } + + if (err < 0 || err >= FEC_ARG_LENGTH) + goto free_metadata; + + buf_ptr = buf; + + for (i = VERITY_TABLE_ARGS; i < (VERITY_TABLE_ARGS + + VERITY_TABLE_OPT_FEC_ARGS + 2); i++) { + verity_table_args[i] = strsep(&buf_ptr, " "); + if (verity_table_args[i] == NULL) { + no_of_args = i; + break; + } + } + + err = verity_ctr(ti, no_of_args, verity_table_args); + + if (err) + DMERR("android-verity failed to mount as verity target"); + else { + target_added = true; + DMINFO("android-verity mounted as verity target"); + } + +free_metadata: + if (metadata) { + kfree(metadata->header); + kfree(metadata->verity_table); + } + kfree(metadata); + return err; +} + +static int __init dm_android_verity_init(void) +{ + int r; + struct dentry *file; + + r = dm_register_target(&android_verity_target); + if (r < 0) + DMERR("register failed %d", r); + + /* Tracks the status of the last added target */ + debug_dir = debugfs_create_dir("android_verity", NULL); + + if (IS_ERR_OR_NULL(debug_dir)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + goto end; + } + + file = debugfs_create_bool("target_added", S_IRUGO, debug_dir, + &target_added); + + if (IS_ERR_OR_NULL(file)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + debugfs_remove_recursive(debug_dir); + goto end; + } + + file = debugfs_create_bool("verity_enabled", S_IRUGO, debug_dir, + &verity_enabled); + + if (IS_ERR_OR_NULL(file)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + debugfs_remove_recursive(debug_dir); + } + +end: + return r; +} + +static void __exit dm_android_verity_exit(void) +{ + if (!IS_ERR_OR_NULL(debug_dir)) + debugfs_remove_recursive(debug_dir); + + dm_unregister_target(&android_verity_target); +} + +module_init(dm_android_verity_init); +module_exit(dm_android_verity_exit); diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h new file mode 100644 index 000000000000..ed67d567f3ee --- /dev/null +++ b/drivers/md/dm-android-verity.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef DM_ANDROID_VERITY_H +#define DM_ANDROID_VERITY_H + +#include + +#define RSANUMBYTES 256 +#define VERITY_METADATA_MAGIC_NUMBER 0xb001b001 +#define VERITY_METADATA_MAGIC_DISABLE 0x46464f56 +#define VERITY_METADATA_VERSION 0 +#define VERITY_STATE_DISABLE 1 +#define DATA_BLOCK_SIZE (4 * 1024) +#define VERITY_METADATA_SIZE (8 * DATA_BLOCK_SIZE) +#define VERITY_TABLE_ARGS 10 +#define VERITY_COMMANDLINE_PARAM_LENGTH 20 +#define BUILD_VARIANT 20 + +/* + * : is the format for the identifier. + * subject can either be the Common Name(CN) + Organization Name(O) or + * just the CN if the it is prefixed with O + * From https://tools.ietf.org/html/rfc5280#appendix-A + * ub-organization-name-length INTEGER ::= 64 + * ub-common-name-length INTEGER ::= 64 + * + * http://lxr.free-electrons.com/source/crypto/asymmetric_keys/x509_cert_parser.c?v=3.9#L278 + * ctx->o_size + 2 + ctx->cn_size + 1 + * + 41 characters for ":" and sha1 id + * 64 + 2 + 64 + 1 + 1 + 40 (172) + * setting VERITY_DEFAULT_KEY_ID_LENGTH to 200 characters. + */ +#define VERITY_DEFAULT_KEY_ID_LENGTH 200 + +#define FEC_MAGIC 0xFECFECFE +#define FEC_BLOCK_SIZE (4 * 1024) +#define FEC_VERSION 0 +#define FEC_RSM 255 +#define FEC_ARG_LENGTH 300 + +#define VERITY_TABLE_OPT_RESTART "restart_on_corruption" +#define VERITY_TABLE_OPT_LOGGING "ignore_corruption" +#define VERITY_TABLE_OPT_IGNZERO "ignore_zero_blocks" + +#define VERITY_TABLE_OPT_FEC_FORMAT \ + "use_fec_from_device %s fec_start %llu fec_blocks %llu fec_roots %u ignore_zero_blocks" +#define VERITY_TABLE_OPT_FEC_ARGS 9 + +#define VERITY_DEBUG 0 + +#define DM_MSG_PREFIX "android-verity" + +#define DM_LINEAR_ARGS 2 +#define DM_LINEAR_TARGET_OFFSET "0" + +/* + * There can be two formats. + * if fec is present + * + * if fec is not present + * + */ +struct fec_header { + __le32 magic; + __le32 version; + __le32 size; + __le32 roots; + __le32 fec_size; + __le64 inp_size; + u8 hash[SHA256_DIGEST_SIZE]; +} __attribute__((packed)); + +struct android_metadata_header { + __le32 magic_number; + __le32 protocol_version; + char signature[RSANUMBYTES]; + __le32 table_length; +}; + +struct android_metadata { + struct android_metadata_header *header; + char *verity_table; +}; + +struct fec_ecc_metadata { + bool valid; + u32 roots; + u64 blocks; + u64 rounds; + u64 start; +}; + +struct bio_read { + struct page **page_io; + int number_of_pages; +}; + +extern struct target_type linear_target; + +extern void dm_linear_dtr(struct dm_target *ti); +extern int dm_linear_map(struct dm_target *ti, struct bio *bio); +extern int dm_linear_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error); +extern void dm_linear_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen); +extern int dm_linear_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); +extern int dm_linear_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data); +extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv); +extern long dm_linear_dax_direct_access(struct dm_target *ti, sector_t sector, + void **kaddr, pfn_t *pfn, long size); +extern size_t dm_linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i); +#endif /* DM_ANDROID_VERITY_H */ diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d216a8f7bc22..1e17e6421da3 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -386,9 +386,6 @@ static void __cache_size_refresh(void) static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, enum data_mode *data_mode) { - unsigned noio_flag; - void *ptr; - if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { *data_mode = DATA_MODE_SLAB; return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); @@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, * all allocations done by this process (including pagetables) are done * as if GFP_NOIO was specified. */ + if (gfp_mask & __GFP_NORETRY) { + unsigned noio_flag = memalloc_noio_save(); + void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); - if (gfp_mask & __GFP_NORETRY) - noio_flag = memalloc_noio_save(); - - ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); - - if (gfp_mask & __GFP_NORETRY) memalloc_noio_restore(noio_flag); + return ptr; + } - return ptr; + return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); } /* @@ -974,7 +970,8 @@ static void __get_memory_limit(struct dm_bufio_client *c, buffers = c->minimum_buffers; *limit_buffers = buffers; - *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; + *threshold_buffers = mult_frac(buffers, + DM_BUFIO_WRITEBACK_PERCENT, 100); } /* @@ -1610,7 +1607,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, int l; struct dm_buffer *b, *tmp; unsigned long freed = 0; - unsigned long count = nr_to_scan; + unsigned long count = c->n_buffers[LIST_CLEAN] + + c->n_buffers[LIST_DIRTY]; unsigned long retain_target = get_retain_buffers(c); for (l = 0; l < LIST_SIZE; l++) { @@ -1646,8 +1644,11 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); + unsigned long count = ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); + unsigned long retain_target = get_retain_buffers(c); - return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); + return (count < retain_target) ? 0 : (count - retain_target); } /* @@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void) memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); - mem = (__u64)((totalram_pages - totalhigh_pages) * - DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; + mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, + DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; if (mem > ULONG_MAX) mem = ULONG_MAX; #ifdef CONFIG_MMU - /* - * Get the size of vmalloc space the same way as VMALLOC_TOTAL - * in fs/proc/internal.h - */ - if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) - mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; + if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) + mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); #endif dm_bufio_default_cache_size = mem; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 8785134c9f1f..71c3507df9a0 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1201,6 +1201,18 @@ static void background_work_end(struct cache *cache) /*----------------------------------------------------------------*/ +static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) +{ + return (bio_data_dir(bio) == WRITE) && + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); +} + +static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) +{ + return writeback_mode(&cache->features) && + (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); +} + static void quiesce(struct dm_cache_migration *mg, void (*continuation)(struct work_struct *)) { @@ -1474,12 +1486,50 @@ static void mg_upgrade_lock(struct work_struct *ws) } } +static void mg_full_copy(struct work_struct *ws) +{ + struct dm_cache_migration *mg = ws_to_mg(ws); + struct cache *cache = mg->cache; + struct policy_work *op = mg->op; + bool is_policy_promote = (op->op == POLICY_PROMOTE); + + if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || + is_discarded_oblock(cache, op->oblock)) { + mg_upgrade_lock(ws); + return; + } + + init_continuation(&mg->k, mg_upgrade_lock); + + if (copy(mg, is_policy_promote)) { + DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); + mg->k.input = BLK_STS_IOERR; + mg_complete(mg, false); + } +} + static void mg_copy(struct work_struct *ws) { - int r; struct dm_cache_migration *mg = ws_to_mg(ws); if (mg->overwrite_bio) { + /* + * No exclusive lock was held when we last checked if the bio + * was optimisable. So we have to check again in case things + * have changed (eg, the block may no longer be discarded). + */ + if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { + /* + * Fallback to a real full copy after doing some tidying up. + */ + bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); + BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */ + mg->overwrite_bio = NULL; + inc_io_migrations(mg->cache); + mg_full_copy(ws); + return; + } + /* * It's safe to do this here, even though it's new data * because all IO has been locked out of the block. @@ -1489,26 +1539,8 @@ static void mg_copy(struct work_struct *ws) */ overwrite(mg, mg_update_metadata_after_copy); - } else { - struct cache *cache = mg->cache; - struct policy_work *op = mg->op; - bool is_policy_promote = (op->op == POLICY_PROMOTE); - - if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || - is_discarded_oblock(cache, op->oblock)) { - mg_upgrade_lock(ws); - return; - } - - init_continuation(&mg->k, mg_upgrade_lock); - - r = copy(mg, is_policy_promote); - if (r) { - DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); - mg->k.input = BLK_STS_IOERR; - mg_complete(mg, false); - } - } + } else + mg_full_copy(ws); } static int mg_lock_writes(struct dm_cache_migration *mg) @@ -1748,18 +1780,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio) /*----------------------------------------------------------------*/ -static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) -{ - return (bio_data_dir(bio) == WRITE) && - (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); -} - -static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) -{ - return writeback_mode(&cache->features) && - (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); -} - static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, bool *commit_needed) { @@ -3534,18 +3554,18 @@ static int __init dm_cache_init(void) { int r; - r = dm_register_target(&cache_target); - if (r) { - DMERR("cache target registration failed: %d", r); - return r; - } - migration_cache = KMEM_CACHE(dm_cache_migration, 0); if (!migration_cache) { dm_unregister_target(&cache_target); return -ENOMEM; } + r = dm_register_target(&cache_target); + if (r) { + DMERR("cache target registration failed: %d", r); + return r; + } + return 0; } diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 203144762f36..6a14f945783c 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -29,7 +29,6 @@ struct dm_kobject_holder { * DM targets must _not_ deference a mapped_device to directly access its members! */ struct mapped_device { - struct srcu_struct io_barrier; struct mutex suspend_lock; /* @@ -127,6 +126,8 @@ struct mapped_device { struct blk_mq_tag_set *tag_set; bool use_blk_mq:1; bool init_tio_pdu:1; + + struct srcu_struct io_barrier; }; void dm_init_md_queue(struct mapped_device *md); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 96ab46512e1f..554d60394c06 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc, BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); /* Reject unexpected unaligned bio. */ - if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) + if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) return -EIO; dmreq = dmreq_of_req(cc, req); @@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, int r = 0; /* Reject unexpected unaligned bio. */ - if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) + if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) return -EIO; dmreq = dmreq_of_req(cc, req); @@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc) /* Ignore extra keys (which are used for IV etc) */ subkey_size = crypt_subkey_size(cc); - if (crypt_integrity_hmac(cc)) + if (crypt_integrity_hmac(cc)) { + if (subkey_size < cc->key_mac_size) + return -EINVAL; + crypt_copy_authenckey(cc->authenc_key, cc->key, subkey_size - cc->key_mac_size, cc->key_mac_size); + } + for (i = 0; i < cc->tfms_count; i++) { if (crypt_integrity_hmac(cc)) r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], @@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string ret = crypt_setkey(cc); - /* wipe the kernel key payload copy in each case */ - memset(cc->key, 0, cc->key_size * sizeof(u8)); - if (!ret) { set_bit(DM_CRYPT_KEY_VALID, &cc->flags); kzfree(cc->key_string); @@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) } } + /* wipe the kernel key payload copy */ + if (cc->key_string) + memset(cc->key, 0, cc->key_size * sizeof(u8)); + return ret; } @@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->tag_pool_max_sectors * cc->on_disk_tag_size); if (!cc->tag_pool) { ti->error = "Cannot allocate integrity tags mempool"; + ret = -ENOMEM; goto bad; } @@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) return ret; if (cc->iv_gen_ops && cc->iv_gen_ops->init) ret = cc->iv_gen_ops->init(cc); + /* wipe the kernel key payload copy */ + if (cc->key_string) + memset(cc->key, 0, cc->key_size * sizeof(u8)); return ret; } if (argc == 2 && !strcasecmp(argv[1], "wipe")) { @@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type crypt_target = { .name = "crypt", - .version = {1, 18, 0}, + .version = {1, 18, 1}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 096fe9b66c50..3cc2052f972c 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1376,7 +1376,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) struct bvec_iter iter; struct bio_vec bv; bio_for_each_segment(bv, bio, iter) { - if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { + if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", bv.bv_offset, bv.bv_len, ic->sectors_per_block); return DM_MAPIO_KILL; @@ -2558,7 +2558,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error) int r = 0; unsigned i; __u64 journal_pages, journal_desc_size, journal_tree_size; - unsigned char *crypt_data = NULL; + unsigned char *crypt_data = NULL, *crypt_iv = NULL; + struct skcipher_request *req = NULL; ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); @@ -2616,9 +2617,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error) if (blocksize == 1) { struct scatterlist *sg; - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); - unsigned char iv[ivsize]; - skcipher_request_set_tfm(req, ic->journal_crypt); + + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); + if (!req) { + *error = "Could not allocate crypt request"; + r = -ENOMEM; + goto bad; + } + + crypt_iv = kmalloc(ivsize, GFP_KERNEL); + if (!crypt_iv) { + *error = "Could not allocate iv"; + r = -ENOMEM; + goto bad; + } ic->journal_xor = dm_integrity_alloc_page_list(ic); if (!ic->journal_xor) { @@ -2640,9 +2652,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error) sg_set_buf(&sg[i], va, PAGE_SIZE); } sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); - memset(iv, 0x00, ivsize); + memset(crypt_iv, 0x00, ivsize); - skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); + skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv); init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) @@ -2658,10 +2670,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error) crypto_free_skcipher(ic->journal_crypt); ic->journal_crypt = NULL; } else { - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); - unsigned char iv[ivsize]; unsigned crypt_len = roundup(ivsize, blocksize); + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); + if (!req) { + *error = "Could not allocate crypt request"; + r = -ENOMEM; + goto bad; + } + + crypt_iv = kmalloc(ivsize, GFP_KERNEL); + if (!crypt_iv) { + *error = "Could not allocate iv"; + r = -ENOMEM; + goto bad; + } + crypt_data = kmalloc(crypt_len, GFP_KERNEL); if (!crypt_data) { *error = "Unable to allocate crypt data"; @@ -2669,8 +2693,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error) goto bad; } - skcipher_request_set_tfm(req, ic->journal_crypt); - ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); if (!ic->journal_scatterlist) { *error = "Unable to allocate sg list"; @@ -2694,12 +2716,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error) struct skcipher_request *section_req; __u32 section_le = cpu_to_le32(i); - memset(iv, 0x00, ivsize); + memset(crypt_iv, 0x00, ivsize); memset(crypt_data, 0x00, crypt_len); memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le))); sg_init_one(&sg, crypt_data, crypt_len); - skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); + skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv); init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) @@ -2757,6 +2779,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error) } bad: kfree(crypt_data); + kfree(crypt_iv); + skcipher_request_free(req); + return r; } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e52676fa9832..4a94d510aeff 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1992,6 +1992,45 @@ void dm_interface_exit(void) dm_hash_exit(); } + +/** + * dm_ioctl_export - Permanently export a mapped device via the ioctl interface + * @md: Pointer to mapped_device + * @name: Buffer (size DM_NAME_LEN) for name + * @uuid: Buffer (size DM_UUID_LEN) for uuid or NULL if not desired + */ +int dm_ioctl_export(struct mapped_device *md, const char *name, + const char *uuid) +{ + int r = 0; + struct hash_cell *hc; + + if (!md) { + r = -ENXIO; + goto out; + } + + /* The name and uuid can only be set once. */ + mutex_lock(&dm_hash_cells_mutex); + hc = dm_get_mdptr(md); + mutex_unlock(&dm_hash_cells_mutex); + if (hc) { + DMERR("%s: already exported", dm_device_name(md)); + r = -ENXIO; + goto out; + } + + r = dm_hash_insert(name, uuid, md); + if (r) { + DMERR("%s: could not bind to '%s'", dm_device_name(md), name); + goto out; + } + + /* Let udev know we've changed. */ + dm_kobject_uevent(md, KOBJ_CHANGE, dm_get_event_nr(md)); +out: + return r; +} /** * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers * @md: Pointer to mapped_device diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index d5f8eff7c11d..e6fd31b03c38 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -26,7 +26,7 @@ struct linear_c { /* * Construct a linear mapping: */ -static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) +int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct linear_c *lc; unsigned long long tmp; @@ -69,7 +69,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) return ret; } -static void linear_dtr(struct dm_target *ti) +void dm_linear_dtr(struct dm_target *ti) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -94,14 +94,14 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) linear_map_sector(ti, bio->bi_iter.bi_sector); } -static int linear_map(struct dm_target *ti, struct bio *bio) +int dm_linear_map(struct dm_target *ti, struct bio *bio) { linear_map_bio(ti, bio); return DM_MAPIO_REMAPPED; } -static int linear_end_io(struct dm_target *ti, struct bio *bio, +int dm_linear_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) { struct linear_c *lc = ti->private; @@ -111,8 +111,9 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio, return DM_ENDIO_DONE; } +EXPORT_SYMBOL_GPL(dm_linear_end_io); -static void linear_status(struct dm_target *ti, status_type_t type, +void dm_linear_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -129,7 +130,7 @@ static void linear_status(struct dm_target *ti, status_type_t type, } } -static int linear_prepare_ioctl(struct dm_target *ti, +int dm_linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -146,7 +147,7 @@ static int linear_prepare_ioctl(struct dm_target *ti, return 0; } -static int linear_iterate_devices(struct dm_target *ti, +int dm_linear_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct linear_c *lc = ti->private; @@ -154,7 +155,7 @@ static int linear_iterate_devices(struct dm_target *ti, return fn(ti, lc->dev, lc->start, ti->len, data); } -static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, +long dm_linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { long ret; @@ -169,8 +170,9 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, return ret; return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } +EXPORT_SYMBOL_GPL(dm_linear_dax_direct_access); -static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, +size_t dm_linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { struct linear_c *lc = ti->private; @@ -183,21 +185,22 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return 0; return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } +EXPORT_SYMBOL_GPL(dm_linear_dax_copy_from_iter); static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, .module = THIS_MODULE, - .ctr = linear_ctr, - .dtr = linear_dtr, - .map = linear_map, - .end_io = linear_end_io, - .status = linear_status, - .prepare_ioctl = linear_prepare_ioctl, - .iterate_devices = linear_iterate_devices, - .direct_access = linear_dax_direct_access, - .dax_copy_from_iter = linear_dax_copy_from_iter, + .ctr = dm_linear_ctr, + .dtr = dm_linear_dtr, + .map = dm_linear_map, + .status = dm_linear_status, + .end_io = dm_linear_end_io, + .prepare_ioctl = dm_linear_prepare_ioctl, + .iterate_devices = dm_linear_iterate_devices, + .direct_access = dm_linear_dax_direct_access, + .dax_copy_from_iter = dm_linear_dax_copy_from_iter, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 11f273d2f018..2704a55f8b6e 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m, pgpath = path_to_pgpath(path); - if (unlikely(lockless_dereference(m->current_pg) != pg)) { + if (unlikely(READ_ONCE(m->current_pg) != pg)) { /* Only update current_pgpath if pg changed */ spin_lock_irqsave(&m->lock, flags); m->current_pgpath = pgpath; @@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) } /* Were we instructed to switch PG? */ - if (lockless_dereference(m->next_pg)) { + if (READ_ONCE(m->next_pg)) { spin_lock_irqsave(&m->lock, flags); pg = m->next_pg; if (!pg) { @@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) /* Don't change PG until it has no remaining paths */ check_current_pg: - pg = lockless_dereference(m->current_pg); + pg = READ_ONCE(m->current_pg); if (pg) { pgpath = choose_path_in_pg(m, pg, nr_bytes); if (!IS_ERR_OR_NULL(pgpath)) @@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, struct request *clone; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, nr_bytes); @@ -499,8 +499,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, if (IS_ERR(clone)) { /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ bool queue_dying = blk_queue_dying(q); - DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing", - PTR_ERR(clone), queue_dying ? " (path offline)" : ""); if (queue_dying) { atomic_inc(&m->pg_init_in_progress); activate_or_offline_path(pgpath); @@ -535,7 +533,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m bool queue_io; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); if (!pgpath || !queue_io) pgpath = choose_pgpath(m, nr_bytes); @@ -1804,7 +1802,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, struct pgpath *current_pgpath; int r; - current_pgpath = lockless_dereference(m->current_pgpath); + current_pgpath = READ_ONCE(m->current_pgpath); if (!current_pgpath) current_pgpath = choose_pgpath(m, 0); @@ -1826,7 +1824,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, } if (r == -ENOTCONN) { - if (!lockless_dereference(m->current_pg)) { + if (!READ_ONCE(m->current_pg)) { /* Path status changed, redo selection */ (void) choose_pgpath(m, 0); } @@ -1895,9 +1893,9 @@ static int multipath_busy(struct dm_target *ti) return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); /* Guess which priority_group will be used at next mapping time */ - pg = lockless_dereference(m->current_pg); - next_pg = lockless_dereference(m->next_pg); - if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg)) + pg = READ_ONCE(m->current_pg); + next_pg = READ_ONCE(m->next_pg); + if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) pg = next_pg; if (!pg) { @@ -1943,8 +1941,9 @@ static int multipath_busy(struct dm_target *ti) *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 12, 0}, - .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, + .version = {1, 13, 0}, + .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | + DM_TARGET_PASSES_INTEGRITY, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, @@ -1967,13 +1966,6 @@ static int __init dm_multipath_init(void) { int r; - r = dm_register_target(&multipath_target); - if (r < 0) { - DMERR("request-based register failed %d", r); - r = -EINVAL; - goto bad_register_target; - } - kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); if (!kmultipathd) { DMERR("failed to create workqueue kmpathd"); @@ -1995,13 +1987,20 @@ static int __init dm_multipath_init(void) goto bad_alloc_kmpath_handlerd; } + r = dm_register_target(&multipath_target); + if (r < 0) { + DMERR("request-based register failed %d", r); + r = -EINVAL; + goto bad_register_target; + } + return 0; +bad_register_target: + destroy_workqueue(kmpath_handlerd); bad_alloc_kmpath_handlerd: destroy_workqueue(kmultipathd); bad_alloc_kmultipathd: - dm_unregister_target(&multipath_target); -bad_register_target: return r; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2245d06d2045..33834db7c0a0 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -675,15 +675,11 @@ static struct raid_type *get_raid_type_by_ll(const int level, const int layout) return NULL; } -/* - * Conditionally change bdev capacity of @rs - * in case of a disk add/remove reshape - */ -static void rs_set_capacity(struct raid_set *rs) +/* Adjust rdev sectors */ +static void rs_set_rdev_sectors(struct raid_set *rs) { struct mddev *mddev = &rs->md; struct md_rdev *rdev; - struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); /* * raid10 sets rdev->sector to the device size, which @@ -692,8 +688,16 @@ static void rs_set_capacity(struct raid_set *rs) rdev_for_each(rdev, mddev) if (!test_bit(Journal, &rdev->flags)) rdev->sectors = mddev->dev_sectors; +} + +/* + * Change bdev capacity of @rs in case of a disk add/remove reshape + */ +static void rs_set_capacity(struct raid_set *rs) +{ + struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); - set_capacity(gendisk, mddev->array_sectors); + set_capacity(gendisk, rs->md.array_sectors); revalidate_disk(gendisk); } @@ -1674,8 +1678,11 @@ static void do_table_event(struct work_struct *ws) struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); smp_rmb(); /* Make sure we access most actual mddev properties */ - if (!rs_is_reshaping(rs)) + if (!rs_is_reshaping(rs)) { + if (rs_is_raid10(rs)) + rs_set_rdev_sectors(rs); rs_set_capacity(rs); + } dm_table_event(rs->ti->table); } @@ -2143,13 +2150,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) struct dm_raid_superblock *refsb; uint64_t events_sb, events_refsb; - rdev->sb_start = 0; - rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); - if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { - DMERR("superblock size of a logical block is no longer valid"); - return -EINVAL; - } - r = read_disk_sb(rdev, rdev->sb_size, false); if (r) return r; @@ -2494,6 +2494,17 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (test_bit(Journal, &rdev->flags)) continue; + if (!rdev->meta_bdev) + continue; + + /* Set superblock offset/size for metadata device. */ + rdev->sb_start = 0; + rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); + if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) { + DMERR("superblock size of a logical block is no longer valid"); + return -EINVAL; + } + /* * Skipping super_load due to CTR_FLAG_SYNC will cause * the array to undergo initialization again as @@ -2506,9 +2517,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) continue; - if (!rdev->meta_bdev) - continue; - r = super_load(rdev, freshest); switch (r) { @@ -3844,11 +3852,10 @@ static int raid_preresume(struct dm_target *ti) mddev->resync_min = mddev->recovery_cp; } - rs_set_capacity(rs); - /* Check for any reshape request unless new raid set */ if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { /* Initiate a reshape. */ + rs_set_rdev_sectors(rs); mddev_lock_nointr(mddev); r = rs_start_reshape(rs); mddev_unlock(mddev); @@ -3877,6 +3884,10 @@ static void raid_resume(struct dm_target *ti) mddev->ro = 0; mddev->in_sync = 0; + /* Only reduce raid set size before running a disk removing reshape. */ + if (mddev->delta_disks < 0) + rs_set_capacity(rs); + /* * Keep the RAID set frozen if reshape/rebuild flags are set. * The RAID set is unfrozen once the next table load/resume, diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1113b42e1eda..a0613bd8ed00 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void) return r; } - r = dm_register_target(&snapshot_target); - if (r < 0) { - DMERR("snapshot target register failed %d", r); - goto bad_register_snapshot_target; - } - - r = dm_register_target(&origin_target); - if (r < 0) { - DMERR("Origin target register failed %d", r); - goto bad_register_origin_target; - } - - r = dm_register_target(&merge_target); - if (r < 0) { - DMERR("Merge target register failed %d", r); - goto bad_register_merge_target; - } - r = init_origin_hash(); if (r) { DMERR("init_origin_hash failed."); @@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void) goto bad_pending_cache; } + r = dm_register_target(&snapshot_target); + if (r < 0) { + DMERR("snapshot target register failed %d", r); + goto bad_register_snapshot_target; + } + + r = dm_register_target(&origin_target); + if (r < 0) { + DMERR("Origin target register failed %d", r); + goto bad_register_origin_target; + } + + r = dm_register_target(&merge_target); + if (r < 0) { + DMERR("Merge target register failed %d", r); + goto bad_register_merge_target; + } + return 0; -bad_pending_cache: - kmem_cache_destroy(exception_cache); -bad_exception_cache: - exit_origin_hash(); -bad_origin_hash: - dm_unregister_target(&merge_target); bad_register_merge_target: dm_unregister_target(&origin_target); bad_register_origin_target: dm_unregister_target(&snapshot_target); bad_register_snapshot_target: + kmem_cache_destroy(pending_cache); +bad_pending_cache: + kmem_cache_destroy(exception_cache); +bad_exception_cache: + exit_origin_hash(); +bad_origin_hash: dm_exception_store_exit(); return r; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ef7b8f201f73..a02672047d66 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -1758,13 +1759,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t) return true; } - -static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && blk_queue_discard(q); + return q && !blk_queue_discard(q); } static bool dm_table_supports_discards(struct dm_table *t) @@ -1772,28 +1772,24 @@ static bool dm_table_supports_discards(struct dm_table *t) struct dm_target *ti; unsigned i; - /* - * Unless any target used by the table set discards_supported, - * require at least one underlying device to support discards. - * t->devices includes internal dm devices such as mirror logs - * so we need to use iterate_devices here, which targets - * supporting discard selectively must provide. - */ for (i = 0; i < dm_table_get_num_targets(t); i++) { ti = dm_table_get_target(t, i); if (!ti->num_discard_bios) - continue; - - if (ti->discards_supported) - return true; + return false; - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_discard_capable, NULL)) - return true; + /* + * Either the target provides discard support (as implied by setting + * 'discards_supported') or it relies on _all_ data devices having + * discard support. + */ + if (!ti->discards_supported && + (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) + return false; } - return false; + return true; } void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index d31d18d9727c..36ef284ad086 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -80,10 +80,14 @@ #define SECTOR_TO_BLOCK_SHIFT 3 /* + * For btree insert: * 3 for btree insert + * 2 for btree lookup used within space map + * For btree remove: + * 2 for shadow spine + + * 4 for rebalance 3 child node */ -#define THIN_MAX_CONCURRENT_LOCKS 5 +#define THIN_MAX_CONCURRENT_LOCKS 6 /* This should be plenty */ #define SPACE_MAP_ROOT_SIZE 128 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1e25705209c2..02e42ba2ecbc 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -4355,30 +4355,28 @@ static struct target_type thin_target = { static int __init dm_thin_init(void) { - int r; + int r = -ENOMEM; pool_table_init(); + _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); + if (!_new_mapping_cache) + return r; + r = dm_register_target(&thin_target); if (r) - return r; + goto bad_new_mapping_cache; r = dm_register_target(&pool_target); if (r) - goto bad_pool_target; - - r = -ENOMEM; - - _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); - if (!_new_mapping_cache) - goto bad_new_mapping_cache; + goto bad_thin_target; return 0; -bad_new_mapping_cache: - dm_unregister_target(&pool_target); -bad_pool_target: +bad_thin_target: dm_unregister_target(&thin_target); +bad_new_mapping_cache: + kmem_cache_destroy(_new_mapping_cache); return r; } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index e13f90832b6b..776a4f77f76c 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -11,6 +11,7 @@ #include "dm-verity-fec.h" #include +#include #define DM_MSG_PREFIX "verity-fec" @@ -175,9 +176,11 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, if (r < 0 && neras) DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", v->data_dev->name, (unsigned long long)rsb, r); - else if (r > 0) + else if (r > 0) { DMWARN_LIMIT("%s: FEC %llu: corrected %d errors", v->data_dev->name, (unsigned long long)rsb, r); + atomic_add_unless(&v->fec->corrected, 1, INT_MAX); + } return r; } @@ -545,6 +548,7 @@ unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz, void verity_fec_dtr(struct dm_verity *v) { struct dm_verity_fec *f = v->fec; + struct kobject *kobj = &f->kobj_holder.kobj; if (!verity_fec_is_enabled(v)) goto out; @@ -561,6 +565,12 @@ void verity_fec_dtr(struct dm_verity *v) if (f->dev) dm_put_device(v->ti, f->dev); + + if (kobj->state_initialized) { + kobject_put(kobj); + wait_for_completion(dm_get_completion_from_kobject(kobj)); + } + out: kfree(f); v->fec = NULL; @@ -649,6 +659,28 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, return 0; } +static ssize_t corrected_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct dm_verity_fec *f = container_of(kobj, struct dm_verity_fec, + kobj_holder.kobj); + + return sprintf(buf, "%d\n", atomic_read(&f->corrected)); +} + +static struct kobj_attribute attr_corrected = __ATTR_RO(corrected); + +static struct attribute *fec_attrs[] = { + &attr_corrected.attr, + NULL +}; + +static struct kobj_type fec_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = fec_attrs, + .release = dm_kobject_release +}; + /* * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr. */ @@ -672,8 +704,10 @@ int verity_fec_ctr_alloc(struct dm_verity *v) */ int verity_fec_ctr(struct dm_verity *v) { + int r; struct dm_verity_fec *f = v->fec; struct dm_target *ti = v->ti; + struct mapped_device *md = dm_table_get_md(ti->table); u64 hash_blocks; if (!verity_fec_is_enabled(v)) { @@ -681,6 +715,16 @@ int verity_fec_ctr(struct dm_verity *v) return 0; } + /* Create a kobject and sysfs attributes */ + init_completion(&f->kobj_holder.completion); + + r = kobject_init_and_add(&f->kobj_holder.kobj, &fec_ktype, + &disk_to_dev(dm_disk(md))->kobj, "%s", "fec"); + if (r) { + ti->error = "Cannot create kobject"; + return r; + } + /* * FEC is computed over data blocks, possible metadata, and * hash blocks. In other words, FEC covers total of fec_blocks diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index bb31ce87a933..4db0cae262eb 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -12,6 +12,8 @@ #ifndef DM_VERITY_FEC_H #define DM_VERITY_FEC_H +#include "dm.h" +#include "dm-core.h" #include "dm-verity.h" #include @@ -51,6 +53,8 @@ struct dm_verity_fec { mempool_t *extra_pool; /* mempool for extra buffers */ mempool_t *output_pool; /* mempool for output */ struct kmem_cache *cache; /* cache for buffers */ + atomic_t corrected; /* corrected errors */ + struct dm_kobject_holder kobj_holder; /* for sysfs attributes */ }; /* per-bio data */ diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index bda3caca23ca..5c6d441a8a8a 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -582,6 +582,7 @@ static void verity_prefetch_io(struct work_struct *work) container_of(work, struct dm_verity_prefetch_work, work); struct dm_verity *v = pw->v; int i; + sector_t prefetch_size; for (i = v->levels - 2; i >= 0; i--) { sector_t hash_block_start; @@ -604,8 +605,14 @@ static void verity_prefetch_io(struct work_struct *work) hash_block_end = v->hash_blocks - 1; } no_prefetch_cluster: + // for emmc, it is more efficient to send bigger read + prefetch_size = max((sector_t)CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE, + hash_block_end - hash_block_start + 1); + if ((hash_block_start + prefetch_size) >= (v->hash_start + v->hash_blocks)) { + prefetch_size = hash_block_end - hash_block_start + 1; + } dm_bufio_prefetch(v->bufio, hash_block_start, - hash_block_end - hash_block_start + 1); + prefetch_size); } kfree(pw); @@ -632,7 +639,7 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) * Bio map function. It allocates dm_verity_io structure and bio vector and * fills them. Then it issues prefetches and the I/O. */ -static int verity_map(struct dm_target *ti, struct bio *bio) +int verity_map(struct dm_target *ti, struct bio *bio) { struct dm_verity *v = ti->private; struct dm_verity_io *io; @@ -677,7 +684,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) /* * Status: V (valid) or C (corruption found) */ -static void verity_status(struct dm_target *ti, status_type_t type, +void verity_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct dm_verity *v = ti->private; @@ -737,7 +744,7 @@ static void verity_status(struct dm_target *ti, status_type_t type, } } -static int verity_prepare_ioctl(struct dm_target *ti, +int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct dm_verity *v = ti->private; @@ -750,7 +757,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, return 0; } -static int verity_iterate_devices(struct dm_target *ti, +int verity_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dm_verity *v = ti->private; @@ -758,7 +765,7 @@ static int verity_iterate_devices(struct dm_target *ti, return fn(ti, v->data_dev, v->data_start, ti->len, data); } -static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) +void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct dm_verity *v = ti->private; @@ -771,7 +778,7 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, limits->logical_block_size); } -static void verity_dtr(struct dm_target *ti) +void verity_dtr(struct dm_target *ti) { struct dm_verity *v = ti->private; @@ -898,7 +905,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) * * Hex string or "-" if no salt. */ -static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) +int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) { struct dm_verity *v; struct dm_arg_set as; diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index a59e0ada6fd3..29831d66d9c2 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -131,4 +131,14 @@ extern int verity_hash(struct dm_verity *v, struct ahash_request *req, extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero); +extern void verity_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen); +extern int verity_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); +extern int verity_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data); +extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits); +extern void verity_dtr(struct dm_target *ti); +extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv); +extern int verity_map(struct dm_target *ti, struct bio *bio); #endif /* DM_VERITY_H */ diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index b87c1741da4b..6d7bda6f8190 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) struct dmz_target *dmz = ti->private; struct request_queue *q; struct dmz_dev *dev; + sector_t aligned_capacity; int ret; /* Get the target device */ @@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) goto err; } + q = bdev_get_queue(dev->bdev); dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; - if (ti->begin || (ti->len != dev->capacity)) { + aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1); + if (ti->begin || + ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { ti->error = "Partial mapping not supported"; ret = -EINVAL; goto err; } - q = bdev_get_queue(dev->bdev); - dev->zone_nr_sectors = q->limits.chunk_sectors; + dev->zone_nr_sectors = blk_queue_zone_sectors(q); dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); @@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dmz_target *dmz = ti->private; + struct dmz_dev *dev = dmz->dev; + sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1); - return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data); + return fn(ti, dmz->ddev, 0, capacity, data); } static struct target_type dmz_type = { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4be85324f44d..1dfc855ac708 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -815,7 +815,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error) queue_io(md, bio); } else { /* done with normal IO or empty flush */ - bio->bi_status = io_error; + if (io_error) + bio->bi_status = io_error; bio_endio(bio); } } @@ -1695,7 +1696,7 @@ static struct mapped_device *alloc_dev(int minor) struct mapped_device *md; void *old_md; - md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); + md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; @@ -1795,7 +1796,7 @@ static struct mapped_device *alloc_dev(int minor) bad_minor: module_put(THIS_MODULE); bad_module_get: - kfree(md); + kvfree(md); return NULL; } @@ -1814,7 +1815,7 @@ static void free_dev(struct mapped_device *md) free_minor(minor); module_put(THIS_MODULE); - kfree(md); + kvfree(md); } static void __bind_mempools(struct mapped_device *md, struct dm_table *t) @@ -2709,11 +2710,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) md = container_of(kobj, struct mapped_device, kobj_holder.kobj); - if (test_bit(DMF_FREEING, &md->flags) || - dm_deleting_md(md)) - return NULL; - + spin_lock(&_minor_lock); + if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { + md = NULL; + goto out; + } dm_get(md); +out: + spin_unlock(&_minor_lock); + return md; } diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 38c84c0a35d4..ab289ce9c3cd 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -80,8 +80,6 @@ void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type); enum dm_queue_mode dm_get_md_type(struct mapped_device *md); struct target_type *dm_get_immutable_target_type(struct mapped_device *md); -int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); - /* * To check the return value from dm_table_find_target(). */ diff --git a/drivers/md/md.c b/drivers/md/md.c index 0ff1bbf6c90e..e058c209bbcf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6362,7 +6362,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) break; } } - if (has_journal) { + if (has_journal || mddev->bitmap) { export_rdev(rdev); return -EBUSY; } @@ -7468,8 +7468,8 @@ void md_wakeup_thread(struct md_thread *thread) { if (thread) { pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); - if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags)) - wake_up(&thread->wqueue); + set_bit(THREAD_WAKEUP, &thread->flags); + wake_up(&thread->wqueue); } } EXPORT_SYMBOL(md_wakeup_thread); @@ -8039,7 +8039,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || + mddev->suspended); if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { percpu_ref_put(&mddev->writes_pending); return false; @@ -8110,7 +8111,6 @@ void md_allow_write(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_state); /* wait for the dirty state to be recorded in the metadata */ wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) && !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } else spin_unlock(&mddev->lock); @@ -8522,6 +8522,10 @@ static int remove_and_add_spares(struct mddev *mddev, int removed = 0; bool remove_some = false; + if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + /* Mustn't remove devices when resync thread is running */ + return 0; + rdev_for_each(rdev, mddev) { if ((this == NULL || rdev == this) && rdev->raid_disk >= 0 && diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index f21ce6a3d4cf..58b319757b1e 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) pn->keys[1] = rn->keys[0]; memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); - /* - * rejig the spine. This is ugly, since it knows too - * much about the spine - */ - if (s->nodes[0] != new_parent) { - unlock_block(s->info, s->nodes[0]); - s->nodes[0] = new_parent; - } - if (key < le64_to_cpu(rn->keys[0])) { - unlock_block(s->info, right); - s->nodes[1] = left; - } else { - unlock_block(s->info, left); - s->nodes[1] = right; - } - s->count = 2; - + unlock_block(s->info, left); + unlock_block(s->info, right); return 0; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f3f3e40dc9d8..788fc0800465 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -810,11 +810,15 @@ static void flush_pending_writes(struct r1conf *conf) spin_lock_irq(&conf->device_lock); if (conf->pending_bio_list.head) { + struct blk_plug plug; struct bio *bio; + bio = bio_list_get(&conf->pending_bio_list); conf->pending_count = 0; spin_unlock_irq(&conf->device_lock); + blk_start_plug(&plug); flush_bio_list(conf, bio); + blk_finish_plug(&plug); } else spin_unlock_irq(&conf->device_lock); } @@ -990,14 +994,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr) _wait_barrier(conf, idx); } -static void wait_all_barriers(struct r1conf *conf) -{ - int idx; - - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) - _wait_barrier(conf, idx); -} - static void _allow_barrier(struct r1conf *conf, int idx) { atomic_dec(&conf->nr_pending[idx]); @@ -1011,14 +1007,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr) _allow_barrier(conf, idx); } -static void allow_all_barriers(struct r1conf *conf) -{ - int idx; - - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) - _allow_barrier(conf, idx); -} - /* conf->resync_lock should be held */ static int get_unqueued_pending(struct r1conf *conf) { @@ -1325,12 +1313,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, sigset_t full, old; prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); - if (bio_end_sector(bio) <= mddev->suspend_lo || - bio->bi_iter.bi_sector >= mddev->suspend_hi || - (mddev_is_clustered(mddev) && + if ((bio_end_sector(bio) <= mddev->suspend_lo || + bio->bi_iter.bi_sector >= mddev->suspend_hi) && + (!mddev_is_clustered(mddev) || !md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, - bio_end_sector(bio)))) + bio->bi_iter.bi_sector, + bio_end_sector(bio)))) break; sigfillset(&full); sigprocmask(SIG_BLOCK, &full, &old); @@ -1654,8 +1642,12 @@ static void print_conf(struct r1conf *conf) static void close_sync(struct r1conf *conf) { - wait_all_barriers(conf); - allow_all_barriers(conf); + int idx; + + for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) { + _wait_barrier(conf, idx); + _allow_barrier(conf, idx); + } mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 374df5796649..0d18d3b95201 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -890,10 +890,13 @@ static void flush_pending_writes(struct r10conf *conf) spin_lock_irq(&conf->device_lock); if (conf->pending_bio_list.head) { + struct blk_plug plug; struct bio *bio; + bio = bio_list_get(&conf->pending_bio_list); conf->pending_count = 0; spin_unlock_irq(&conf->device_lock); + blk_start_plug(&plug); /* flush any pending bitmap writes to disk * before proceeding w/ I/O */ bitmap_unplug(conf->mddev->bitmap); @@ -914,6 +917,7 @@ static void flush_pending_writes(struct r10conf *conf) generic_make_request(bio); bio = next; } + blk_finish_plug(&plug); } else spin_unlock_irq(&conf->device_lock); } diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 0b7406ac8ce1..9a340728b846 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -2571,31 +2571,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) int r5c_journal_mode_set(struct mddev *mddev, int mode) { struct r5conf *conf; - int err; if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || mode > R5C_JOURNAL_MODE_WRITE_BACK) return -EINVAL; - err = mddev_lock(mddev); - if (err) - return err; conf = mddev->private; - if (!conf || !conf->log) { - mddev_unlock(mddev); + if (!conf || !conf->log) return -ENODEV; - } if (raid5_calc_degraded(conf) > 0 && - mode == R5C_JOURNAL_MODE_WRITE_BACK) { - mddev_unlock(mddev); + mode == R5C_JOURNAL_MODE_WRITE_BACK) return -EINVAL; - } mddev_suspend(mddev); conf->log->r5c_journal_mode = mode; mddev_resume(mddev); - mddev_unlock(mddev); pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", mdname(mddev), mode, r5c_journal_mode_str[mode]); @@ -2608,6 +2599,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, { int mode = ARRAY_SIZE(r5c_journal_mode_str); size_t len = length; + int ret; if (len < 2) return -EINVAL; @@ -2619,8 +2611,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, if (strlen(r5c_journal_mode_str[mode]) == len && !strncmp(page, r5c_journal_mode_str[mode], len)) break; - - return r5c_journal_mode_set(mddev, mode) ?: length; + ret = mddev_lock(mddev); + if (ret) + return ret; + ret = r5c_journal_mode_set(mddev, mode); + mddev_unlock(mddev); + return ret ?: length; } struct md_sysfs_entry diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index cd026c88f7ef..702b76008886 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -758,7 +758,8 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, (unsigned long long)sector); rdev = conf->disks[dd_idx].rdev; - if (!rdev) { + if (!rdev || (!test_bit(In_sync, &rdev->flags) && + sector >= rdev->recovery_offset)) { pr_debug("%s:%*s data member disk %d missing\n", __func__, indent, "", dd_idx); update_parity = false; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 928e24a07133..7ec822ced80b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1818,8 +1818,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref) struct r5dev *dev = &sh->dev[i]; if (dev->written || i == pd_idx || i == qd_idx) { - if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) + if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) { set_bit(R5_UPTODATE, &dev->flags); + if (test_bit(STRIPE_EXPAND_READY, &sh->state)) + set_bit(R5_Expanded, &dev->flags); + } if (fua) set_bit(R5_WantFUA, &dev->flags); if (sync) @@ -2675,13 +2678,13 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) pr_debug("raid456: error called\n"); spin_lock_irqsave(&conf->device_lock, flags); + set_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); mddev->degraded = raid5_calc_degraded(conf); spin_unlock_irqrestore(&conf->device_lock, flags); set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); - set_bit(Faulty, &rdev->flags); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" @@ -7156,6 +7159,13 @@ static int raid5_run(struct mddev *mddev) min_offset_diff = diff; } + if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && + (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { + pr_notice("md/raid:%s: array cannot have both journal and bitmap\n", + mdname(mddev)); + return -EINVAL; + } + if (mddev->reshape_position != MaxSector) { /* Check that we can continue the reshape. * Difficulties arise if the stripe we would write to diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 9139d01ba7ed..33d844fe2e70 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -145,15 +145,13 @@ static void __dvb_frontend_free(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; - if (!fepriv) - return; - - dvb_free_device(fepriv->dvbdev); + if (fepriv) + dvb_free_device(fepriv->dvbdev); dvb_frontend_invoke_release(fe, fe->ops.release); - kfree(fepriv); - fe->frontend_priv = NULL; + if (fepriv) + kfree(fepriv); } static void dvb_frontend_free(struct kref *ref) diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c index 0ee0df53b91b..79d5d89bc95e 100644 --- a/drivers/media/dvb-frontends/ascot2e.c +++ b/drivers/media/dvb-frontends/ascot2e.c @@ -155,7 +155,9 @@ static int ascot2e_write_regs(struct ascot2e_priv *priv, static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val) { - return ascot2e_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return ascot2e_write_regs(priv, reg, &tmp, 1); } static int ascot2e_read_regs(struct ascot2e_priv *priv, diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index 48ee9bc00c06..ccbd84fd6428 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -257,7 +257,9 @@ static int cxd2841er_write_regs(struct cxd2841er_priv *priv, static int cxd2841er_write_reg(struct cxd2841er_priv *priv, u8 addr, u8 reg, u8 val) { - return cxd2841er_write_regs(priv, addr, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return cxd2841er_write_regs(priv, addr, reg, &tmp, 1); } static int cxd2841er_read_regs(struct cxd2841er_priv *priv, diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c index 4bf5a551ba40..2ab8d83e5576 100644 --- a/drivers/media/dvb-frontends/helene.c +++ b/drivers/media/dvb-frontends/helene.c @@ -331,7 +331,9 @@ static int helene_write_regs(struct helene_priv *priv, static int helene_write_reg(struct helene_priv *priv, u8 reg, u8 val) { - return helene_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return helene_write_regs(priv, reg, &tmp, 1); } static int helene_read_regs(struct helene_priv *priv, diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c index 68d759c4c52e..5c8b405f2ddc 100644 --- a/drivers/media/dvb-frontends/horus3a.c +++ b/drivers/media/dvb-frontends/horus3a.c @@ -89,7 +89,9 @@ static int horus3a_write_regs(struct horus3a_priv *priv, static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val) { - return horus3a_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return horus3a_write_regs(priv, reg, &tmp, 1); } static int horus3a_enter_power_save(struct horus3a_priv *priv) diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c index 5bb1e73a10b4..ce7c443d3eac 100644 --- a/drivers/media/dvb-frontends/itd1000.c +++ b/drivers/media/dvb-frontends/itd1000.c @@ -95,8 +95,9 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg) static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v) { - int ret = itd1000_write_regs(state, r, &v, 1); - state->shadow[r] = v; + u8 tmp = v; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + int ret = itd1000_write_regs(state, r, &tmp, 1); + state->shadow[r] = tmp; return ret; } diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index 50bce68ffd66..65d157fe76d1 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan) * New users must use I2C client binding directly! */ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, - struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) + struct i2c_adapter *i2c, + struct i2c_adapter **tuner_i2c_adapter) { struct i2c_client *client; struct i2c_board_info board_info; - struct m88ds3103_platform_data pdata; + struct m88ds3103_platform_data pdata = {}; pdata.clk = cfg->clock; pdata.i2c_wr_max = cfg->i2c_wr_max; @@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client, case M88DS3103_CHIP_ID: break; default: + ret = -ENODEV; + dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id); goto err_kfree; } diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index 961b9a2508e0..0b23cbc021b8 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -142,7 +142,10 @@ static inline int mt312_readreg(struct mt312_state *state, static inline int mt312_writereg(struct mt312_state *state, const enum mt312_reg_addr reg, const u8 val) { - return mt312_write(state, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + + return mt312_write(state, reg, &tmp, 1); } static inline u32 mt312_div(u32 a, u32 b) diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 172fc367ccaa..24840a2e5a75 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -14,6 +14,8 @@ * GNU General Public License for more details. */ +#include + #include "si2168_priv.h" static const struct dvb_frontend_ops si2168_ops; @@ -435,6 +437,7 @@ static int si2168_init(struct dvb_frontend *fe) if (ret) goto err; + udelay(100); memcpy(cmd.args, "\x85", 1); cmd.wlen = 1; cmd.rlen = 1; diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c index 02347598277a..db5dde3215f0 100644 --- a/drivers/media/dvb-frontends/stb0899_drv.c +++ b/drivers/media/dvb-frontends/stb0899_drv.c @@ -539,7 +539,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data) { - return stb0899_write_regs(state, reg, &data, 1); + u8 tmp = data; + return stb0899_write_regs(state, reg, &tmp, 1); } /* diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c index 17a955d0031b..75509bec66e4 100644 --- a/drivers/media/dvb-frontends/stb6100.c +++ b/drivers/media/dvb-frontends/stb6100.c @@ -226,12 +226,14 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data) { + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + if (unlikely(reg >= STB6100_NUMREGS)) { dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg); return -EREMOTEIO; } - data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set; - return stb6100_write_reg_range(state, &data, reg, 1); + tmp = (tmp & stb6100_template[reg].mask) | stb6100_template[reg].set; + return stb6100_write_reg_range(state, &tmp, reg, 1); } diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index f3529df8211d..1a726196c126 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -166,7 +166,9 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) { - return stv0367_writeregs(state, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv0367_writeregs(state, reg, &tmp, 1); } static u8 stv0367_readreg(struct stv0367_state *state, u16 reg) diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c index 7ef469c0c866..2695e1eb6d9c 100644 --- a/drivers/media/dvb-frontends/stv090x.c +++ b/drivers/media/dvb-frontends/stv090x.c @@ -755,7 +755,9 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data) { - return stv090x_write_regs(state, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv090x_write_regs(state, reg, &tmp, 1); } static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable) diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c index 66eba38f1014..7e8e01389c55 100644 --- a/drivers/media/dvb-frontends/stv6110x.c +++ b/drivers/media/dvb-frontends/stv6110x.c @@ -97,7 +97,9 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data) { - return stv6110x_write_regs(stv6110x, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv6110x_write_regs(stv6110x, reg, &tmp, 1); } static int stv6110x_init(struct dvb_frontend *fe) diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index 931e5c98da8a..b879e1571469 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -368,7 +368,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, gain2 = clamp_t(long, gain2, 0, 13); v_agc = clamp_t(long, v_agc, 400, 1100); - *_gain = -(gain1 * 2330 + + *_gain = -((__s64)gain1 * 2330 + gain2 * 3500 + v_agc * 24 / 10 * 10 + 10000); @@ -386,7 +386,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, gain3 = clamp_t(long, gain3, 0, 6); v_agc = clamp_t(long, v_agc, 600, 1600); - *_gain = -(gain1 * 2650 + + *_gain = -((__s64)gain1 * 2650 + gain2 * 3380 + gain3 * 2850 + v_agc * 176 / 100 * 10 - diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c index 623355fc2666..3208b866d1cb 100644 --- a/drivers/media/dvb-frontends/zl10039.c +++ b/drivers/media/dvb-frontends/zl10039.c @@ -134,7 +134,9 @@ static inline int zl10039_writereg(struct zl10039_state *state, const enum zl10039_reg_addr reg, const u8 val) { - return zl10039_write(state, reg, &val, 1); + const u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return zl10039_write(state, reg, &tmp, 1); } static int zl10039_init(struct dvb_frontend *fe) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 94153895fcd4..3bdc34deae7b 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -660,6 +660,7 @@ config VIDEO_OV13858 tristate "OmniVision OV13858 sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API depends on MEDIA_CAMERA_SUPPORT + select V4L2_FWNODE ---help--- This is a Video4Linux2 sensor-level driver for the OmniVision OV13858 camera. diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c index 9fd254a8e20d..13c10b5e2b45 100644 --- a/drivers/media/i2c/s5k6aa.c +++ b/drivers/media/i2c/s5k6aa.c @@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client) /** * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration + * @s5k6aa: pointer to &struct s5k6aa describing the device * * Configure the internal ISP PLL for the required output frequency. * Locking: called with s5k6aa.lock mutex held. @@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa) /** * s5k6aa_configure_video_bus - configure the video output interface + * @s5k6aa: pointer to &struct s5k6aa describing the device * @bus_type: video bus type: parallel or MIPI-CSI * @nlanes: number of MIPI lanes to be used (MIPI-CSI only) * @@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout, /** * s5k6aa_set_prev_config - write user preview register set + * @s5k6aa: pointer to &struct s5k6aa describing the device + * @preset: s5kaa preset to be applied * * Configure output resolution and color fromat, pixel clock * frequency range, device frame rate type and frame period range. @@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa, /** * s5k6aa_initialize_isp - basic ISP MCU initialization + * @sd: pointer to V4L2 sub-device descriptor * * Configure AHB addresses for registers read/write; configure PLLs for * required output pixel clock. The ISP power supply needs to be already diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index a5f52137d306..d4bc78b4fcb5 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -422,8 +422,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) bt878_num); if (bt878_num >= BT878_MAX) { printk(KERN_ERR "bt878: Too many devices inserted\n"); - result = -ENOMEM; - goto fail0; + return -ENOMEM; } if (pci_enable_device(dev)) return -EIO; diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 0ef36cec21d1..dc8fc2120b63 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -1397,9 +1397,9 @@ static int vpif_async_bound(struct v4l2_async_notifier *notifier, vpif_obj.config->chan_config->inputs[i].subdev_name = (char *)to_of_node(subdev->fwnode)->full_name; vpif_dbg(2, debug, - "%s: setting input %d subdev_name = %pOF\n", + "%s: setting input %d subdev_name = %s\n", __func__, i, - to_of_node(subdev->fwnode)); + vpif_obj.config->chan_config->inputs[i].subdev_name); return 0; } } @@ -1545,6 +1545,8 @@ vpif_capture_get_pdata(struct platform_device *pdev) sizeof(*chan->inputs) * VPIF_CAPTURE_NUM_CHANNELS, GFP_KERNEL); + if (!chan->inputs) + return NULL; chan->input_count++; chan->inputs[i].input.type = V4L2_INPUT_TYPE_CAMERA; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c index 46768c056193..0c28d0b995cc 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c @@ -115,3 +115,6 @@ struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev) return ctx; } EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Mediatek video codec driver"); diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c index b22d2dfcd3c2..55232a912950 100644 --- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c @@ -622,6 +622,9 @@ static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + } else { + /* On current devices output->wm_num is always <= 2 */ + break; } if (output->wm_idx[i] % 2 == 1) diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index cba092bcb76d..a0fe80df0cbd 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -194,7 +194,6 @@ struct venus_buffer { * @fh: a holder of v4l file handle structure * @streamon_cap: stream on flag for capture queue * @streamon_out: stream on flag for output queue - * @cmd_stop: a flag to signal encoder/decoder commands * @width: current capture width * @height: current capture height * @out_width: current output width @@ -258,7 +257,6 @@ struct venus_inst { } controls; struct v4l2_fh fh; unsigned int streamon_cap, streamon_out; - bool cmd_stop; u32 width; u32 height; u32 out_width; diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 9b2a401a4891..0ce9559a2924 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) mutex_lock(&inst->lock); - if (inst->cmd_stop) { - vbuf->flags |= V4L2_BUF_FLAG_LAST; - v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); - inst->cmd_stop = false; - goto unlock; - } - v4l2_m2m_buf_queue(m2m_ctx, vbuf); if (!(inst->streamon_out & inst->streamon_cap)) diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c index c09490876516..ba29fd4d4984 100644 --- a/drivers/media/platform/qcom/venus/hfi.c +++ b/drivers/media/platform/qcom/venus/hfi.c @@ -484,6 +484,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd) return -EINVAL; } +EXPORT_SYMBOL_GPL(hfi_session_process_buf); irqreturn_t hfi_isr_thread(int irq, void *dev_id) { diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index 1caae8feaa36..734ce11b0ed0 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc, desc->attrs = DMA_ATTR_WRITE_COMBINE; desc->size = ALIGN(size, SZ_4K); - desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL, + desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL, desc->attrs); if (!desc->kva) return -ENOMEM; @@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev) if (ret) return ret; - hdev->ifaceq_table.kva = desc.kva; - hdev->ifaceq_table.da = desc.da; - hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE; - offset = hdev->ifaceq_table.size; + hdev->ifaceq_table = desc; + offset = IFACEQ_TABLE_SIZE; for (i = 0; i < IFACEQ_NUM; i++) { queue = &hdev->queues[i]; @@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev) if (ret) { hdev->sfr.da = 0; } else { - hdev->sfr.da = desc.da; - hdev->sfr.kva = desc.kva; - hdev->sfr.size = ALIGNED_SFR_SIZE; + hdev->sfr = desc; sfr = hdev->sfr.kva; sfr->buf_size = ALIGNED_SFR_SIZE; } diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index da611a5eb670..c9e9576bb08a 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh, static int vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) { - if (cmd->cmd != V4L2_DEC_CMD_STOP) + switch (cmd->cmd) { + case V4L2_DEC_CMD_STOP: + if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK) + return -EINVAL; + break; + default: return -EINVAL; + } return 0; } @@ -479,6 +485,7 @@ static int vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) { struct venus_inst *inst = to_inst(file); + struct hfi_frame_data fdata = {0}; int ret; ret = vdec_try_decoder_cmd(file, fh, cmd); @@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) return ret; mutex_lock(&inst->lock); - inst->cmd_stop = true; - mutex_unlock(&inst->lock); - hfi_session_flush(inst); + /* + * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder + * input to signal EOS. + */ + if (!(inst->streamon_out & inst->streamon_cap)) + goto unlock; + + fdata.buffer_type = HFI_BUFFER_INPUT; + fdata.flags |= HFI_BUFFERFLAG_EOS; + fdata.device_addr = 0xdeadbeef; - return 0; + ret = hfi_session_process_buf(inst, &fdata); + +unlock: + mutex_unlock(&inst->lock); + return ret; } static const struct v4l2_ioctl_ops vdec_ioctl_ops = { @@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) inst->reconfig = false; inst->sequence_cap = 0; inst->sequence_out = 0; - inst->cmd_stop = false; ret = vdec_init_session(inst); if (ret) @@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type, vb->timestamp = timestamp_us * NSEC_PER_USEC; vbuf->sequence = inst->sequence_cap++; - if (inst->cmd_stop) { - vbuf->flags |= V4L2_BUF_FLAG_LAST; - inst->cmd_stop = false; - } - if (vbuf->flags & V4L2_BUF_FLAG_LAST) { const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 6f123a387cf9..3fcf0e9b7b29 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type, if (!vbuf) return; - vb = &vbuf->vb2_buf; - vb->planes[0].bytesused = bytesused; - vb->planes[0].data_offset = data_offset; - vbuf->flags = flags; if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + vb = &vbuf->vb2_buf; + vb2_set_plane_payload(vb, 0, bytesused + data_offset); + vb->planes[0].data_offset = data_offset; vb->timestamp = timestamp_us * NSEC_PER_USEC; vbuf->sequence = inst->sequence_cap++; } else { diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 1afde5021ca6..8e9531f7f83f 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1315,6 +1315,12 @@ static int s5p_mfc_probe(struct platform_device *pdev) goto err_dma; } + /* + * Load fails if fs isn't mounted. Try loading anyway. + * _open() will load it, it it fails now. Ignore failure. + */ + s5p_mfc_load_firmware(dev); + mutex_init(&dev->mfc_mutex); init_waitqueue_head(&dev->queue); dev->hw_lock = 0; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index 4220914529b2..76119a8cc477 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -290,6 +290,8 @@ struct s5p_mfc_priv_buf { * @mfc_cmds: cmd structure holding HW commands function pointers * @mfc_regs: structure holding MFC registers * @fw_ver: loaded firmware sub-version + * @fw_get_done flag set when request_firmware() is complete and + * copied into fw_buf * risc_on: flag indicates RISC is on or off * */ @@ -336,6 +338,7 @@ struct s5p_mfc_dev { struct s5p_mfc_hw_cmds *mfc_cmds; const struct s5p_mfc_regs *mfc_regs; enum s5p_mfc_fw_ver fw_ver; + bool fw_get_done; bool risc_on; /* indicates if RISC is on or off */ }; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index 69ef9c23a99a..d94e59e79fe9 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -55,6 +55,9 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev) * into kernel. */ mfc_debug_enter(); + if (dev->fw_get_done) + return 0; + for (i = MFC_FW_MAX_VERSIONS - 1; i >= 0; i--) { if (!dev->variant->fw_name[i]) continue; @@ -82,6 +85,7 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev) } memcpy(dev->fw_buf.virt, fw_blob->data, fw_blob->size); wmb(); + dev->fw_get_done = true; release_firmware(fw_blob); mfc_debug_leave(); return 0; @@ -93,6 +97,7 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev) /* Before calling this function one has to make sure * that MFC is no longer processing */ s5p_mfc_release_priv_buf(dev, &dev->fw_buf); + dev->fw_get_done = false; return 0; } diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c index 0116097c0c0f..092c73f24589 100644 --- a/drivers/media/platform/soc_camera/soc_scale_crop.c +++ b/drivers/media/platform/soc_camera/soc_scale_crop.c @@ -419,3 +419,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd, mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); } EXPORT_SYMBOL(soc_camera_calc_client_output); + +MODULE_DESCRIPTION("soc-camera scaling-cropping functions"); +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index 59280ac31937..23d0cedf4d9d 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -83,7 +83,7 @@ static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei) static void channel_swdemux_tsklet(unsigned long data) { struct channel_info *channel = (struct channel_info *)data; - struct c8sectpfei *fei = channel->fei; + struct c8sectpfei *fei; unsigned long wp, rp; int pos, num_packets, n, size; u8 *buf; @@ -91,6 +91,8 @@ static void channel_swdemux_tsklet(unsigned long data) if (unlikely(!channel || !channel->irec)) return; + fei = channel->fei; + wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0)); rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0)); diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index 8b5cbb6b7a70..e5d8d99e124c 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -508,7 +508,8 @@ static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm) return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD); else - return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR)); + return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) + & VI6_CMD_UPDHDR); } static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl) diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 962e4c304076..eed9516e25e1 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -571,7 +571,13 @@ static int __maybe_unused vsp1_pm_suspend(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); - vsp1_pipelines_suspend(vsp1); + /* + * When used as part of a display pipeline, the VSP is stopped and + * restarted explicitly by the DU. + */ + if (!vsp1->drm) + vsp1_pipelines_suspend(vsp1); + pm_runtime_force_suspend(vsp1->dev); return 0; @@ -582,7 +588,13 @@ static int __maybe_unused vsp1_pm_resume(struct device *dev) struct vsp1_device *vsp1 = dev_get_drvdata(dev); pm_runtime_force_resume(vsp1->dev); - vsp1_pipelines_resume(vsp1); + + /* + * When used as part of a display pipeline, the VSP is stopped and + * restarted explicitly by the DU. + */ + if (!vsp1->drm) + vsp1_pipelines_resume(vsp1); return 0; } diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 7b3f31cc63d2..0c46155a8e9d 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -2517,6 +2517,11 @@ static int imon_probe(struct usb_interface *interface, mutex_lock(&driver_lock); first_if = usb_ifnum_to_if(usbdev, 0); + if (!first_if) { + ret = -ENODEV; + goto fail; + } + first_if_ctx = usb_get_intfdata(first_if); if (ifnum == 0) { diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index d2223c04e9ad..4c8f456238bc 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, if (!dev->max_timeout) return -ENOTTY; + /* Check for multiply overflow */ + if (val > U32_MAX / 1000) + return -EINVAL; + tmp = val * 1000; - if (tmp < dev->min_timeout || - tmp > dev->max_timeout) - return -EINVAL; + if (tmp < dev->min_timeout || tmp > dev->max_timeout) + return -EINVAL; if (dev->s_timeout) ret = dev->s_timeout(dev, tmp); diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c index 817c18f2ddd1..a95d09acc22a 100644 --- a/drivers/media/rc/ir-nec-decoder.c +++ b/drivers/media/rc/ir-nec-decoder.c @@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) data->state = STATE_BIT_PULSE; return 0; } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) { - rc_repeat(dev); - IR_dprintk(1, "Repeat last key\n"); data->state = STATE_TRAILER_PULSE; return 0; } @@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2)) break; - address = bitrev8((data->bits >> 24) & 0xff); - not_address = bitrev8((data->bits >> 16) & 0xff); - command = bitrev8((data->bits >> 8) & 0xff); - not_command = bitrev8((data->bits >> 0) & 0xff); + if (data->count == NEC_NBITS) { + address = bitrev8((data->bits >> 24) & 0xff); + not_address = bitrev8((data->bits >> 16) & 0xff); + command = bitrev8((data->bits >> 8) & 0xff); + not_command = bitrev8((data->bits >> 0) & 0xff); + + scancode = ir_nec_bytes_to_scancode(address, + not_address, + command, + not_command, + &rc_proto); - scancode = ir_nec_bytes_to_scancode(address, not_address, - command, not_command, - &rc_proto); + if (data->is_nec_x) + data->necx_repeat = true; - if (data->is_nec_x) - data->necx_repeat = true; + rc_keydown(dev, rc_proto, scancode, 0); + } else { + rc_repeat(dev); + } - rc_keydown(dev, rc_proto, scancode, 0); data->state = STATE_INACTIVE; return 0; } diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 981cccd6b988..72f381522cb2 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -38,41 +38,41 @@ static const struct { [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 }, [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 }, [RC_PROTO_RC5] = { .name = "rc-5", - .scancode_bits = 0x1f7f, .repeat_period = 164 }, + .scancode_bits = 0x1f7f, .repeat_period = 250 }, [RC_PROTO_RC5X_20] = { .name = "rc-5x-20", - .scancode_bits = 0x1f7f3f, .repeat_period = 164 }, + .scancode_bits = 0x1f7f3f, .repeat_period = 250 }, [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz", - .scancode_bits = 0x2fff, .repeat_period = 164 }, + .scancode_bits = 0x2fff, .repeat_period = 250 }, [RC_PROTO_JVC] = { .name = "jvc", .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_SONY12] = { .name = "sony-12", - .scancode_bits = 0x1f007f, .repeat_period = 100 }, + .scancode_bits = 0x1f007f, .repeat_period = 250 }, [RC_PROTO_SONY15] = { .name = "sony-15", - .scancode_bits = 0xff007f, .repeat_period = 100 }, + .scancode_bits = 0xff007f, .repeat_period = 250 }, [RC_PROTO_SONY20] = { .name = "sony-20", - .scancode_bits = 0x1fff7f, .repeat_period = 100 }, + .scancode_bits = 0x1fff7f, .repeat_period = 250 }, [RC_PROTO_NEC] = { .name = "nec", - .scancode_bits = 0xffff, .repeat_period = 160 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_NECX] = { .name = "nec-x", - .scancode_bits = 0xffffff, .repeat_period = 160 }, + .scancode_bits = 0xffffff, .repeat_period = 250 }, [RC_PROTO_NEC32] = { .name = "nec-32", - .scancode_bits = 0xffffffff, .repeat_period = 160 }, + .scancode_bits = 0xffffffff, .repeat_period = 250 }, [RC_PROTO_SANYO] = { .name = "sanyo", .scancode_bits = 0x1fffff, .repeat_period = 250 }, [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd", - .scancode_bits = 0xffff, .repeat_period = 150 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse", - .scancode_bits = 0x1fffff, .repeat_period = 150 }, + .scancode_bits = 0x1fffff, .repeat_period = 250 }, [RC_PROTO_RC6_0] = { .name = "rc-6-0", - .scancode_bits = 0xffff, .repeat_period = 164 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20", - .scancode_bits = 0xfffff, .repeat_period = 164 }, + .scancode_bits = 0xfffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24", - .scancode_bits = 0xffffff, .repeat_period = 164 }, + .scancode_bits = 0xffffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32", - .scancode_bits = 0xffffffff, .repeat_period = 164 }, + .scancode_bits = 0xffffffff, .repeat_period = 250 }, [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce", - .scancode_bits = 0xffff7fff, .repeat_period = 164 }, + .scancode_bits = 0xffff7fff, .repeat_period = 250 }, [RC_PROTO_SHARP] = { .name = "sharp", .scancode_bits = 0x1fff, .repeat_period = 250 }, [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 }, diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c index bc906fb128d5..d59918878eb2 100644 --- a/drivers/media/rc/sir_ir.c +++ b/drivers/media/rc/sir_ir.c @@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val); static irqreturn_t sir_interrupt(int irq, void *dev_id); static void send_space(unsigned long len); static void send_pulse(unsigned long len); -static void init_hardware(void); +static int init_hardware(void); static void drop_hardware(void); /* Initialisation */ @@ -263,11 +263,36 @@ static void send_pulse(unsigned long len) } } -static void init_hardware(void) +static int init_hardware(void) { + u8 scratch, scratch2, scratch3; unsigned long flags; spin_lock_irqsave(&hardware_lock, flags); + + /* + * This is a simple port existence test, borrowed from the autoconfig + * function in drivers/tty/serial/8250/8250_port.c + */ + scratch = sinp(UART_IER); + soutp(UART_IER, 0); +#ifdef __i386__ + outb(0xff, 0x080); +#endif + scratch2 = sinp(UART_IER) & 0x0f; + soutp(UART_IER, 0x0f); +#ifdef __i386__ + outb(0x00, 0x080); +#endif + scratch3 = sinp(UART_IER) & 0x0f; + soutp(UART_IER, scratch); + if (scratch2 != 0 || scratch3 != 0x0f) { + /* we fail, there's nothing here */ + spin_unlock_irqrestore(&hardware_lock, flags); + pr_err("port existence test failed, cannot continue\n"); + return -ENODEV; + } + /* reset UART */ outb(0, io + UART_MCR); outb(0, io + UART_IER); @@ -285,6 +310,8 @@ static void init_hardware(void) /* turn on UART */ outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR); spin_unlock_irqrestore(&hardware_lock, flags); + + return 0; } static void drop_hardware(void) @@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev) pr_err("IRQ %d already in use.\n", irq); return retval; } + + retval = init_hardware(); + if (retval) { + del_timer_sync(&timerlist); + return retval; + } + pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq); retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev); if (retval < 0) return retval; - init_hardware(); - return 0; } diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c index ba80376a3b86..d097eb04a0e9 100644 --- a/drivers/media/tuners/r820t.c +++ b/drivers/media/tuners/r820t.c @@ -396,9 +396,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val, return 0; } -static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) +static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) { - return r820t_write(priv, reg, &val, 1); + u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */ + + return r820t_write(priv, reg, &tmp, 1); } static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) @@ -411,17 +413,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) return -EINVAL; } -static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, +static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, u8 bit_mask) { + u8 tmp = val; int rc = r820t_read_cache_reg(priv, reg); if (rc < 0) return rc; - val = (rc & ~bit_mask) | (val & bit_mask); + tmp = (rc & ~bit_mask) | (tmp & bit_mask); - return r820t_write(priv, reg, &val, 1); + return r820t_write(priv, reg, &tmp, 1); } static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len) diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c index 5a28ce3a1d49..38dbc128340d 100644 --- a/drivers/media/usb/as102/as102_fw.c +++ b/drivers/media/usb/as102/as102_fw.c @@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, unsigned char *cmd, const struct firmware *firmware) { - struct as10x_fw_pkt_t fw_pkt; + struct as10x_fw_pkt_t *fw_pkt; int total_read_bytes = 0, errno = 0; unsigned char addr_has_changed = 0; + fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL); + if (!fw_pkt) + return -ENOMEM; + + for (total_read_bytes = 0; total_read_bytes < firmware->size; ) { int read_bytes = 0, data_len = 0; /* parse intel hex line */ read_bytes = parse_hex_line( (u8 *) (firmware->data + total_read_bytes), - fw_pkt.raw.address, - fw_pkt.raw.data, + fw_pkt->raw.address, + fw_pkt->raw.data, &data_len, &addr_has_changed); @@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, /* detect the end of file */ total_read_bytes += read_bytes; if (total_read_bytes == firmware->size) { - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x03; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x03; /* send EOF command */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, 2, 0); + fw_pkt, 2, 0); if (errno < 0) goto error; } else { if (!addr_has_changed) { /* prepare command to send */ - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x01; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x01; - data_len += sizeof(fw_pkt.u.request); - data_len += sizeof(fw_pkt.raw.address); + data_len += sizeof(fw_pkt->u.request); + data_len += sizeof(fw_pkt->raw.address); /* send cmd to device */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, + fw_pkt, data_len, 0); if (errno < 0) @@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, } } error: + kfree(fw_pkt); return (errno == 0) ? total_read_bytes : errno; } diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 3dedd83f0b19..a1c59f19cf2d 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -808,7 +808,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct camera_data *cam = video_drvdata(file); if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || - buf->index > cam->num_frames) + buf->index >= cam->num_frames) return -EINVAL; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; @@ -859,7 +859,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP || - buf->index > cam->num_frames) + buf->index >= cam->num_frames) return -EINVAL; DBG("QBUF #%d\n", buf->index); diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index e0daa9b6c2a0..9b742d569fb5 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -1684,7 +1684,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface, nr = dev->devno; assoc_desc = udev->actconfig->intf_assoc[0]; - if (assoc_desc->bFirstInterface != ifnum) { + if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) { dev_err(d, "Not found matching IAD interface\n"); retval = -ENODEV; goto err_if; diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 5e320fa4a795..be26c029546b 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -494,18 +494,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, static int lme2510_return_status(struct dvb_usb_device *d) { - int ret = 0; + int ret; u8 *data; - data = kzalloc(10, GFP_KERNEL); + data = kzalloc(6, GFP_KERNEL); if (!data) return -ENOMEM; - ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), - 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200); - info("Firmware Status: %x (%x)", ret , data[2]); + ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), + 0x06, 0x80, 0x0302, 0x00, + data, 0x6, 200); + if (ret != 6) + ret = -EINVAL; + else + ret = data[2]; + + info("Firmware Status: %6ph", data); - ret = (ret < 0) ? -ENODEV : data[2]; kfree(data); return ret; } @@ -1071,8 +1076,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) if (adap->fe[0]) { info("FE Found M88RS2000"); - dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config, - &d->i2c_adap); st->i2c_tuner_gate_w = 5; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0x60; @@ -1138,17 +1141,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) ret = st->tuner_config; break; case TUNER_RS2000: - ret = st->tuner_config; + if (dvb_attach(ts2020_attach, adap->fe[0], + &ts2020_config, &d->i2c_adap)) + ret = st->tuner_config; break; default: break; } - if (ret) + if (ret) { info("TUN Found %s tuner", tun_msg[ret]); - else { - info("TUN No tuner found --- resetting device"); - lme_coldreset(d); + } else { + info("TUN No tuner found"); return -ENODEV; } @@ -1189,6 +1193,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d) static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) { struct lme2510_state *st = d->priv; + int status; usb_reset_configuration(d->udev); @@ -1197,12 +1202,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware; - if (lme2510_return_status(d) == 0x44) { + status = lme2510_return_status(d); + if (status == 0x44) { *name = lme_firmware_switch(d, 0); return COLD; } - return 0; + if (status != 0x47) + return -EINVAL; + + return WARM; } static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 37dea0adc695..cfe86b4864b3 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -677,6 +677,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component, case XC2028_RESET_CLK: deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg); break; + case XC2028_I2C_FLUSH: + break; default: deb_info("%s: unknown command %d, arg %d\n", __func__, command, arg); diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 6020170fe99a..9be1e658ef47 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -291,7 +291,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -325,7 +325,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -430,6 +430,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component, state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); break; case XC2028_RESET_CLK: + case XC2028_I2C_FLUSH: break; default: err("%s: unknown command %d, arg %d\n", __func__, @@ -478,7 +479,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap) &stk7700ph_dib7700_xc3028_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1010,7 +1011,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) &dib7070p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1068,7 +1069,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) &dib7770p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3056,7 +3057,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config); @@ -3109,7 +3110,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) /* initialize IC 0 */ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3139,7 +3140,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap) i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1); if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3214,7 +3215,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap) 1, 0x10, &tfe7790p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, @@ -3309,7 +3310,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3384,7 +3385,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -3620,7 +3621,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) { /* Demodulator not found for some reason? */ - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index 8207e6900656..bcacb0f22028 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c @@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo); int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val) { - u8 wbuf[1] = { offs }; - return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1); + u8 *buf; + int rc; + + buf = kmalloc(2, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = offs; + + rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1); + *val = buf[1]; + kfree(buf); + + return rc; } EXPORT_SYMBOL(dibusb_read_eeprom_byte); diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index dbe29c6c4d8b..1e8cbaf36896 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -292,7 +292,7 @@ static int hdpvr_probe(struct usb_interface *interface, /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); - goto error; + goto error_free_dev; } mutex_init(&dev->io_mutex); @@ -301,7 +301,7 @@ static int hdpvr_probe(struct usb_interface *interface, dev->usbc_buf = kmalloc(64, GFP_KERNEL); if (!dev->usbc_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); - goto error; + goto error_v4l2_unregister; } init_waitqueue_head(&dev->wait_buffer); @@ -339,13 +339,13 @@ static int hdpvr_probe(struct usb_interface *interface, } if (!dev->bulk_in_endpointAddr) { v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n"); - goto error; + goto error_put_usb; } /* init the device */ if (hdpvr_device_init(dev)) { v4l2_err(&dev->v4l2_dev, "device init failed\n"); - goto error; + goto error_put_usb; } mutex_lock(&dev->io_mutex); @@ -353,7 +353,7 @@ static int hdpvr_probe(struct usb_interface *interface, mutex_unlock(&dev->io_mutex); v4l2_err(&dev->v4l2_dev, "allocating transfer buffers failed\n"); - goto error; + goto error_put_usb; } mutex_unlock(&dev->io_mutex); @@ -361,7 +361,7 @@ static int hdpvr_probe(struct usb_interface *interface, retval = hdpvr_register_i2c_adapter(dev); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); - goto error; + goto error_free_buffers; } client = hdpvr_register_ir_rx_i2c(dev); @@ -394,13 +394,17 @@ static int hdpvr_probe(struct usb_interface *interface, reg_fail: #if IS_ENABLED(CONFIG_I2C) i2c_del_adapter(&dev->i2c_adapter); +error_free_buffers: #endif + hdpvr_free_buffers(dev); +error_put_usb: + usb_put_dev(dev->udev); + kfree(dev->usbc_buf); +error_v4l2_unregister: + v4l2_device_unregister(&dev->v4l2_dev); +error_free_dev: + kfree(dev); error: - if (dev) { - flush_work(&dev->worker); - /* this frees allocated memory */ - hdpvr_delete(dev); - } return retval; } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index ad5b25b89699..44975061b953 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -3642,6 +3642,12 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw, hdw); hdw->ctl_write_urb->actual_length = 0; hdw->ctl_write_pend_flag = !0; + if (usb_urb_ep_type_check(hdw->ctl_write_urb)) { + pvr2_trace( + PVR2_TRACE_ERROR_LEGS, + "Invalid write control endpoint"); + return -EINVAL; + } status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL); if (status < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, @@ -3666,6 +3672,12 @@ status); hdw); hdw->ctl_read_urb->actual_length = 0; hdw->ctl_read_pend_flag = !0; + if (usb_urb_ep_type_check(hdw->ctl_read_urb)) { + pvr2_trace( + PVR2_TRACE_ERROR_LEGS, + "Invalid read control endpoint"); + return -EINVAL; + } status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL); if (status < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index f06f09a0876e..68df16b3ce72 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -112,6 +112,8 @@ static int usbtv_probe(struct usb_interface *intf, return 0; usbtv_audio_fail: + /* we must not free at this point */ + usb_get_dev(usbtv->udev); usbtv_video_free(usbtv); usbtv_video_fail: @@ -144,6 +146,7 @@ static void usbtv_disconnect(struct usb_interface *intf) static const struct usb_device_id usbtv_id_table[] = { { USB_DEVICE(0x1b71, 0x3002) }, + { USB_DEVICE(0x1f71, 0x3301) }, {} }; MODULE_DEVICE_TABLE(usb, usbtv_id_table); diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 95b5f4319ec2..3668a04359e8 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -718,8 +718,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl) */ if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) { ret = usb_control_msg(usbtv->udev, - usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, USBTV_BASE + 0x0244, (void *)data, 3, 0); if (ret < 0) goto error; diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 821f2aa299ae..6730fd08ef03 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -18,8 +18,18 @@ #include #include #include +#include +#include #include +/* Use the same argument order as copy_in_user */ +#define assign_in_user(to, from) \ +({ \ + typeof(*from) __assign_tmp; \ + \ + get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \ +}) + static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = -ENOIOCTLCMD; @@ -46,135 +56,77 @@ struct v4l2_window32 { __u8 global_alpha; }; -static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) -{ - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) || - copy_from_user(&kp->w, &up->w, sizeof(up->w)) || - get_user(kp->field, &up->field) || - get_user(kp->chromakey, &up->chromakey) || - get_user(kp->clipcount, &up->clipcount) || - get_user(kp->global_alpha, &up->global_alpha)) - return -EFAULT; - if (kp->clipcount > 2048) - return -EINVAL; - if (kp->clipcount) { - struct v4l2_clip32 __user *uclips; - struct v4l2_clip __user *kclips; - int n = kp->clipcount; - compat_caddr_t p; - - if (get_user(p, &up->clips)) - return -EFAULT; - uclips = compat_ptr(p); - kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip)); - kp->clips = kclips; - while (--n >= 0) { - if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) - return -EFAULT; - if (put_user(n ? kclips + 1 : NULL, &kclips->next)) - return -EFAULT; - uclips += 1; - kclips += 1; - } - } else - kp->clips = NULL; - return 0; -} - -static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) -{ - if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || - put_user(kp->field, &up->field) || - put_user(kp->chromakey, &up->chromakey) || - put_user(kp->clipcount, &up->clipcount) || - put_user(kp->global_alpha, &up->global_alpha)) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, - struct v4l2_pix_format_mplane __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) +static int get_v4l2_window32(struct v4l2_window __user *kp, + struct v4l2_window32 __user *up, + void __user *aux_buf, u32 aux_space) { - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, - struct v4l2_pix_format_mplane __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format))) + struct v4l2_clip32 __user *uclips; + struct v4l2_clip __user *kclips; + compat_caddr_t p; + u32 clipcount; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + copy_in_user(&kp->w, &up->w, sizeof(up->w)) || + assign_in_user(&kp->field, &up->field) || + assign_in_user(&kp->chromakey, &up->chromakey) || + assign_in_user(&kp->global_alpha, &up->global_alpha) || + get_user(clipcount, &up->clipcount) || + put_user(clipcount, &kp->clipcount)) return -EFAULT; - return 0; -} + if (clipcount > 2048) + return -EINVAL; + if (!clipcount) + return put_user(NULL, &kp->clips); -static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format))) + if (get_user(p, &up->clips)) return -EFAULT; - return 0; -} - -static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format))) + uclips = compat_ptr(p); + if (aux_space < clipcount * sizeof(*kclips)) return -EFAULT; - return 0; -} - -static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format))) + kclips = aux_buf; + if (put_user(kclips, &kp->clips)) return -EFAULT; - return 0; -} -static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format))) - return -EFAULT; + while (clipcount--) { + if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) + return -EFAULT; + if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next)) + return -EFAULT; + uclips++; + kclips++; + } return 0; } -static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) +static int put_v4l2_window32(struct v4l2_window __user *kp, + struct v4l2_window32 __user *up) { - if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format))) + struct v4l2_clip __user *kclips; + struct v4l2_clip32 __user *uclips; + compat_caddr_t p; + u32 clipcount; + + if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) || + assign_in_user(&up->field, &kp->field) || + assign_in_user(&up->chromakey, &kp->chromakey) || + assign_in_user(&up->global_alpha, &kp->global_alpha) || + get_user(clipcount, &kp->clipcount) || + put_user(clipcount, &up->clipcount)) return -EFAULT; - return 0; -} + if (!clipcount) + return 0; -static inline int get_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_meta_format))) + if (get_user(kclips, &kp->clips)) return -EFAULT; - return 0; -} - -static inline int put_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_meta_format))) + if (get_user(p, &up->clips)) return -EFAULT; + uclips = compat_ptr(p); + while (clipcount--) { + if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c))) + return -EFAULT; + uclips++; + kclips++; + } return 0; } @@ -209,101 +161,164 @@ struct v4l2_create_buffers32 { __u32 reserved[8]; }; -static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) { - if (get_user(kp->type, &up->type)) + u32 type; + + if (get_user(type, &up->type)) return -EFAULT; - switch (kp->type) { + switch (type) { + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: { + u32 clipcount; + + if (get_user(clipcount, &up->fmt.win.clipcount)) + return -EFAULT; + if (clipcount > 2048) + return -EINVAL; + *size = clipcount * sizeof(struct v4l2_clip); + return 0; + } + default: + *size = 0; + return 0; + } +} + +static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) +{ + if (!access_ok(VERIFY_READ, up, sizeof(*up))) + return -EFAULT; + return __bufsize_v4l2_format(up, size); +} + +static int __get_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up, + void __user *aux_buf, u32 aux_space) +{ + u32 type; + + if (get_user(type, &up->type) || put_user(type, &kp->type)) + return -EFAULT; + + switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: - return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); + return copy_in_user(&kp->fmt.pix, &up->fmt.pix, + sizeof(kp->fmt.pix)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp, - &up->fmt.pix_mp); + return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp, + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - return get_v4l2_window32(&kp->fmt.win, &up->fmt.win); + return get_v4l2_window32(&kp->fmt.win, &up->fmt.win, + aux_buf, aux_space); case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: - return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); + return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi, + sizeof(kp->fmt.vbi)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); + return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced, + sizeof(kp->fmt.sliced)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); + return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr, + sizeof(kp->fmt.sdr)) ? -EFAULT : 0; case V4L2_BUF_TYPE_META_CAPTURE: - return get_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); + return copy_in_user(&kp->fmt.meta, &up->fmt.meta, + sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", - kp->type); return -EINVAL; } } -static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int get_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up, + void __user *aux_buf, u32 aux_space) +{ + if (!access_ok(VERIFY_READ, up, sizeof(*up))) + return -EFAULT; + return __get_v4l2_format32(kp, up, aux_buf, aux_space); +} + +static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up, + u32 *size) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) + if (!access_ok(VERIFY_READ, up, sizeof(*up))) return -EFAULT; - return __get_v4l2_format32(kp, up); + return __bufsize_v4l2_format(&up->format, size); } -static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) +static int get_v4l2_create32(struct v4l2_create_buffers __user *kp, + struct v4l2_create_buffers32 __user *up, + void __user *aux_buf, u32 aux_space) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + copy_in_user(kp, up, + offsetof(struct v4l2_create_buffers32, format))) return -EFAULT; - return __get_v4l2_format32(&kp->format, &up->format); + return __get_v4l2_format32(&kp->format, &up->format, + aux_buf, aux_space); } -static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int __put_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up) { - if (put_user(kp->type, &up->type)) + u32 type; + + if (get_user(type, &kp->type)) return -EFAULT; - switch (kp->type) { + switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: - return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); + return copy_in_user(&up->fmt.pix, &kp->fmt.pix, + sizeof(kp->fmt.pix)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp, - &up->fmt.pix_mp); + return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp, + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: - return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); + return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi, + sizeof(kp->fmt.vbi)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); + return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced, + sizeof(kp->fmt.sliced)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); + return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr, + sizeof(kp->fmt.sdr)) ? -EFAULT : 0; case V4L2_BUF_TYPE_META_CAPTURE: - return put_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); + return copy_in_user(&up->fmt.meta, &kp->fmt.meta, + sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", - kp->type); return -EINVAL; } } -static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int put_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32))) + if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) return -EFAULT; return __put_v4l2_format32(kp, up); } -static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) +static int put_v4l2_create32(struct v4l2_create_buffers __user *kp, + struct v4l2_create_buffers32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || - copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) || - copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + copy_in_user(up, kp, + offsetof(struct v4l2_create_buffers32, format)) || + copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved))) return -EFAULT; return __put_v4l2_format32(&kp->format, &up->format); } @@ -317,25 +332,28 @@ struct v4l2_standard32 { __u32 reserved[4]; }; -static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) +static int get_v4l2_standard32(struct v4l2_standard __user *kp, + struct v4l2_standard32 __user *up) { /* other fields are not set by the user, nor used by the driver */ - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) || - get_user(kp->index, &up->index)) + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + assign_in_user(&kp->index, &up->index)) return -EFAULT; return 0; } -static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) +static int put_v4l2_standard32(struct v4l2_standard __user *kp, + struct v4l2_standard32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || - put_user(kp->index, &up->index) || - put_user(kp->id, &up->id) || - copy_to_user(up->name, kp->name, 24) || - copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) || - put_user(kp->framelines, &up->framelines) || - copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32))) - return -EFAULT; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->index, &kp->index) || + assign_in_user(&up->id, &kp->id) || + copy_in_user(up->name, kp->name, sizeof(up->name)) || + copy_in_user(&up->frameperiod, &kp->frameperiod, + sizeof(up->frameperiod)) || + assign_in_user(&up->framelines, &kp->framelines) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) + return -EFAULT; return 0; } @@ -374,136 +392,186 @@ struct v4l2_buffer32 { __u32 reserved; }; -static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, - enum v4l2_memory memory) +static int get_v4l2_plane32(struct v4l2_plane __user *up, + struct v4l2_plane32 __user *up32, + enum v4l2_memory memory) { - void __user *up_pln; - compat_long_t p; + compat_ulong_t p; if (copy_in_user(up, up32, 2 * sizeof(__u32)) || - copy_in_user(&up->data_offset, &up32->data_offset, - sizeof(__u32))) + copy_in_user(&up->data_offset, &up32->data_offset, + sizeof(up->data_offset))) return -EFAULT; - if (memory == V4L2_MEMORY_USERPTR) { - if (get_user(p, &up32->m.userptr)) - return -EFAULT; - up_pln = compat_ptr(p); - if (put_user((unsigned long)up_pln, &up->m.userptr)) + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, + sizeof(up32->m.mem_offset))) return -EFAULT; - } else if (memory == V4L2_MEMORY_DMABUF) { - if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int))) + break; + case V4L2_MEMORY_USERPTR: + if (get_user(p, &up32->m.userptr) || + put_user((unsigned long)compat_ptr(p), &up->m.userptr)) return -EFAULT; - } else { - if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, - sizeof(__u32))) + break; + case V4L2_MEMORY_DMABUF: + if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd))) return -EFAULT; + break; } return 0; } -static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, - enum v4l2_memory memory) +static int put_v4l2_plane32(struct v4l2_plane __user *up, + struct v4l2_plane32 __user *up32, + enum v4l2_memory memory) { + unsigned long p; + if (copy_in_user(up32, up, 2 * sizeof(__u32)) || - copy_in_user(&up32->data_offset, &up->data_offset, - sizeof(__u32))) + copy_in_user(&up32->data_offset, &up->data_offset, + sizeof(up->data_offset))) return -EFAULT; - /* For MMAP, driver might've set up the offset, so copy it back. - * USERPTR stays the same (was userspace-provided), so no copying. */ - if (memory == V4L2_MEMORY_MMAP) + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, - sizeof(__u32))) + sizeof(up->m.mem_offset))) return -EFAULT; - /* For DMABUF, driver might've set up the fd, so copy it back. */ - if (memory == V4L2_MEMORY_DMABUF) - if (copy_in_user(&up32->m.fd, &up->m.fd, - sizeof(int))) + break; + case V4L2_MEMORY_USERPTR: + if (get_user(p, &up->m.userptr) || + put_user((compat_ulong_t)ptr_to_compat((__force void *)p), + &up32->m.userptr)) + return -EFAULT; + break; + case V4L2_MEMORY_DMABUF: + if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd))) return -EFAULT; + break; + } + + return 0; +} + +static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size) +{ + u32 type; + u32 length; + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(type, &up->type) || + get_user(length, &up->length)) + return -EFAULT; + + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + if (length > VIDEO_MAX_PLANES) + return -EINVAL; + + /* + * We don't really care if userspace decides to kill itself + * by passing a very big length value + */ + *size = length * sizeof(struct v4l2_plane); + } else { + *size = 0; + } return 0; } -static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) +static int get_v4l2_buffer32(struct v4l2_buffer __user *kp, + struct v4l2_buffer32 __user *up, + void __user *aux_buf, u32 aux_space) { + u32 type; + u32 length; + enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane __user *uplane; compat_caddr_t p; int ret; - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) || - get_user(kp->index, &up->index) || - get_user(kp->type, &up->type) || - get_user(kp->flags, &up->flags) || - get_user(kp->memory, &up->memory) || - get_user(kp->length, &up->length)) - return -EFAULT; + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + assign_in_user(&kp->index, &up->index) || + get_user(type, &up->type) || + put_user(type, &kp->type) || + assign_in_user(&kp->flags, &up->flags) || + get_user(memory, &up->memory) || + put_user(memory, &kp->memory) || + get_user(length, &up->length) || + put_user(length, &kp->length)) + return -EFAULT; - if (V4L2_TYPE_IS_OUTPUT(kp->type)) - if (get_user(kp->bytesused, &up->bytesused) || - get_user(kp->field, &up->field) || - get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - get_user(kp->timestamp.tv_usec, - &up->timestamp.tv_usec)) + if (V4L2_TYPE_IS_OUTPUT(type)) + if (assign_in_user(&kp->bytesused, &up->bytesused) || + assign_in_user(&kp->field, &up->field) || + assign_in_user(&kp->timestamp.tv_sec, + &up->timestamp.tv_sec) || + assign_in_user(&kp->timestamp.tv_usec, + &up->timestamp.tv_usec)) return -EFAULT; - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { - unsigned int num_planes; + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + u32 num_planes = length; - if (kp->length == 0) { - kp->m.planes = NULL; - /* num_planes == 0 is legal, e.g. when userspace doesn't - * need planes array on DQBUF*/ - return 0; - } else if (kp->length > VIDEO_MAX_PLANES) { - return -EINVAL; + if (num_planes == 0) { + /* + * num_planes == 0 is legal, e.g. when userspace doesn't + * need planes array on DQBUF + */ + return put_user(NULL, &kp->m.planes); } + if (num_planes > VIDEO_MAX_PLANES) + return -EINVAL; if (get_user(p, &up->m.planes)) return -EFAULT; uplane32 = compat_ptr(p); if (!access_ok(VERIFY_READ, uplane32, - kp->length * sizeof(struct v4l2_plane32))) + num_planes * sizeof(*uplane32))) return -EFAULT; - /* We don't really care if userspace decides to kill itself - * by passing a very big num_planes value */ - uplane = compat_alloc_user_space(kp->length * - sizeof(struct v4l2_plane)); - kp->m.planes = (__force struct v4l2_plane *)uplane; + /* + * We don't really care if userspace decides to kill itself + * by passing a very big num_planes value + */ + if (aux_space < num_planes * sizeof(*uplane)) + return -EFAULT; + + uplane = aux_buf; + if (put_user((__force struct v4l2_plane *)uplane, + &kp->m.planes)) + return -EFAULT; - for (num_planes = 0; num_planes < kp->length; num_planes++) { - ret = get_v4l2_plane32(uplane, uplane32, kp->memory); + while (num_planes--) { + ret = get_v4l2_plane32(uplane, uplane32, memory); if (ret) return ret; - ++uplane; - ++uplane32; + uplane++; + uplane32++; } } else { - switch (kp->memory) { + switch (memory) { case V4L2_MEMORY_MMAP: - if (get_user(kp->m.offset, &up->m.offset)) + case V4L2_MEMORY_OVERLAY: + if (assign_in_user(&kp->m.offset, &up->m.offset)) return -EFAULT; break; - case V4L2_MEMORY_USERPTR: - { - compat_long_t tmp; + case V4L2_MEMORY_USERPTR: { + compat_ulong_t userptr; - if (get_user(tmp, &up->m.userptr)) - return -EFAULT; - - kp->m.userptr = (unsigned long)compat_ptr(tmp); - } - break; - case V4L2_MEMORY_OVERLAY: - if (get_user(kp->m.offset, &up->m.offset)) + if (get_user(userptr, &up->m.userptr) || + put_user((unsigned long)compat_ptr(userptr), + &kp->m.userptr)) return -EFAULT; break; + } case V4L2_MEMORY_DMABUF: - if (get_user(kp->m.fd, &up->m.fd)) + if (assign_in_user(&kp->m.fd, &up->m.fd)) return -EFAULT; break; } @@ -512,65 +580,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user return 0; } -static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) +static int put_v4l2_buffer32(struct v4l2_buffer __user *kp, + struct v4l2_buffer32 __user *up) { + u32 type; + u32 length; + enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane __user *uplane; compat_caddr_t p; - int num_planes; int ret; - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) || - put_user(kp->index, &up->index) || - put_user(kp->type, &up->type) || - put_user(kp->flags, &up->flags) || - put_user(kp->memory, &up->memory)) - return -EFAULT; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->index, &kp->index) || + get_user(type, &kp->type) || + put_user(type, &up->type) || + assign_in_user(&up->flags, &kp->flags) || + get_user(memory, &kp->memory) || + put_user(memory, &up->memory)) + return -EFAULT; - if (put_user(kp->bytesused, &up->bytesused) || - put_user(kp->field, &up->field) || - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || - copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) || - put_user(kp->sequence, &up->sequence) || - put_user(kp->reserved2, &up->reserved2) || - put_user(kp->reserved, &up->reserved) || - put_user(kp->length, &up->length)) - return -EFAULT; + if (assign_in_user(&up->bytesused, &kp->bytesused) || + assign_in_user(&up->field, &kp->field) || + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || + assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) || + copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) || + assign_in_user(&up->sequence, &kp->sequence) || + assign_in_user(&up->reserved2, &kp->reserved2) || + assign_in_user(&up->reserved, &kp->reserved) || + get_user(length, &kp->length) || + put_user(length, &up->length)) + return -EFAULT; + + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + u32 num_planes = length; - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { - num_planes = kp->length; if (num_planes == 0) return 0; - uplane = (__force struct v4l2_plane __user *)kp->m.planes; + if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes))) + return -EFAULT; if (get_user(p, &up->m.planes)) return -EFAULT; uplane32 = compat_ptr(p); - while (--num_planes >= 0) { - ret = put_v4l2_plane32(uplane, uplane32, kp->memory); + while (num_planes--) { + ret = put_v4l2_plane32(uplane, uplane32, memory); if (ret) return ret; ++uplane; ++uplane32; } } else { - switch (kp->memory) { + switch (memory) { case V4L2_MEMORY_MMAP: - if (put_user(kp->m.offset, &up->m.offset)) + case V4L2_MEMORY_OVERLAY: + if (assign_in_user(&up->m.offset, &kp->m.offset)) return -EFAULT; break; case V4L2_MEMORY_USERPTR: - if (put_user(kp->m.userptr, &up->m.userptr)) - return -EFAULT; - break; - case V4L2_MEMORY_OVERLAY: - if (put_user(kp->m.offset, &up->m.offset)) + if (assign_in_user(&up->m.userptr, &kp->m.userptr)) return -EFAULT; break; case V4L2_MEMORY_DMABUF: - if (put_user(kp->m.fd, &up->m.fd)) + if (assign_in_user(&up->m.fd, &kp->m.fd)) return -EFAULT; break; } @@ -595,30 +668,33 @@ struct v4l2_framebuffer32 { } fmt; }; -static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) +static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, + struct v4l2_framebuffer32 __user *up) { - u32 tmp; - - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) || - get_user(tmp, &up->base) || - get_user(kp->capability, &up->capability) || - get_user(kp->flags, &up->flags) || - copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) - return -EFAULT; - kp->base = (__force void *)compat_ptr(tmp); + compat_caddr_t tmp; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(tmp, &up->base) || + put_user((__force void *)compat_ptr(tmp), &kp->base) || + assign_in_user(&kp->capability, &up->capability) || + assign_in_user(&kp->flags, &up->flags) || + copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt))) + return -EFAULT; return 0; } -static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) +static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, + struct v4l2_framebuffer32 __user *up) { - u32 tmp = (u32)((unsigned long)kp->base); - - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) || - put_user(tmp, &up->base) || - put_user(kp->capability, &up->capability) || - put_user(kp->flags, &up->flags) || - copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt))) - return -EFAULT; + void *base; + + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + get_user(base, &kp->base) || + put_user(ptr_to_compat(base), &up->base) || + assign_in_user(&up->capability, &kp->capability) || + assign_in_user(&up->flags, &kp->flags) || + copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt))) + return -EFAULT; return 0; } @@ -634,18 +710,22 @@ struct v4l2_input32 { __u32 reserved[3]; }; -/* The 64-bit v4l2_input struct has extra padding at the end of the struct. - Otherwise it is identical to the 32-bit version. */ -static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) +/* + * The 64-bit v4l2_input struct has extra padding at the end of the struct. + * Otherwise it is identical to the 32-bit version. + */ +static inline int get_v4l2_input32(struct v4l2_input __user *kp, + struct v4l2_input32 __user *up) { - if (copy_from_user(kp, up, sizeof(struct v4l2_input32))) + if (copy_in_user(kp, up, sizeof(*up))) return -EFAULT; return 0; } -static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) +static inline int put_v4l2_input32(struct v4l2_input __user *kp, + struct v4l2_input32 __user *up) { - if (copy_to_user(up, kp, sizeof(struct v4l2_input32))) + if (copy_in_user(up, kp, sizeof(*up))) return -EFAULT; return 0; } @@ -669,60 +749,95 @@ struct v4l2_ext_control32 { }; } __attribute__ ((packed)); -/* The following function really belong in v4l2-common, but that causes - a circular dependency between modules. We need to think about this, but - for now this will do. */ - -/* Return non-zero if this control is a pointer type. Currently only - type STRING is a pointer type. */ -static inline int ctrl_is_pointer(u32 id) +/* Return true if this control is a pointer type. */ +static inline bool ctrl_is_pointer(struct file *file, u32 id) { - switch (id) { - case V4L2_CID_RDS_TX_PS_NAME: - case V4L2_CID_RDS_TX_RADIO_TEXT: - return 1; - default: - return 0; + struct video_device *vdev = video_devdata(file); + struct v4l2_fh *fh = NULL; + struct v4l2_ctrl_handler *hdl = NULL; + struct v4l2_query_ext_ctrl qec = { id }; + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) + fh = file->private_data; + + if (fh && fh->ctrl_handler) + hdl = fh->ctrl_handler; + else if (vdev->ctrl_handler) + hdl = vdev->ctrl_handler; + + if (hdl) { + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id); + + return ctrl && ctrl->is_ptr; } + + if (!ops || !ops->vidioc_query_ext_ctrl) + return false; + + return !ops->vidioc_query_ext_ctrl(file, fh, &qec) && + (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); } -static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) +static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up, + u32 *size) +{ + u32 count; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(count, &up->count)) + return -EFAULT; + if (count > V4L2_CID_MAX_CTRLS) + return -EINVAL; + *size = count * sizeof(struct v4l2_ext_control); + return 0; +} + +static int get_v4l2_ext_controls32(struct file *file, + struct v4l2_ext_controls __user *kp, + struct v4l2_ext_controls32 __user *up, + void __user *aux_buf, u32 aux_space) { struct v4l2_ext_control32 __user *ucontrols; struct v4l2_ext_control __user *kcontrols; - unsigned int n; + u32 count; + u32 n; compat_caddr_t p; - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) || - get_user(kp->which, &up->which) || - get_user(kp->count, &up->count) || - get_user(kp->error_idx, &up->error_idx) || - copy_from_user(kp->reserved, up->reserved, - sizeof(kp->reserved))) - return -EFAULT; - if (kp->count == 0) { - kp->controls = NULL; - return 0; - } else if (kp->count > V4L2_CID_MAX_CTRLS) { + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + assign_in_user(&kp->which, &up->which) || + get_user(count, &up->count) || + put_user(count, &kp->count) || + assign_in_user(&kp->error_idx, &up->error_idx) || + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) + return -EFAULT; + + if (count == 0) + return put_user(NULL, &kp->controls); + if (count > V4L2_CID_MAX_CTRLS) return -EINVAL; - } if (get_user(p, &up->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_READ, ucontrols, - kp->count * sizeof(struct v4l2_ext_control32))) + if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols))) return -EFAULT; - kcontrols = compat_alloc_user_space(kp->count * - sizeof(struct v4l2_ext_control)); - kp->controls = (__force struct v4l2_ext_control *)kcontrols; - for (n = 0; n < kp->count; n++) { + if (aux_space < count * sizeof(*kcontrols)) + return -EFAULT; + kcontrols = aux_buf; + if (put_user((__force struct v4l2_ext_control *)kcontrols, + &kp->controls)) + return -EFAULT; + + for (n = 0; n < count; n++) { u32 id; if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) return -EFAULT; + if (get_user(id, &kcontrols->id)) return -EFAULT; - if (ctrl_is_pointer(id)) { + + if (ctrl_is_pointer(file, id)) { void __user *s; if (get_user(p, &ucontrols->string)) @@ -737,43 +852,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext return 0; } -static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) +static int put_v4l2_ext_controls32(struct file *file, + struct v4l2_ext_controls __user *kp, + struct v4l2_ext_controls32 __user *up) { struct v4l2_ext_control32 __user *ucontrols; - struct v4l2_ext_control __user *kcontrols = - (__force struct v4l2_ext_control __user *)kp->controls; - int n = kp->count; + struct v4l2_ext_control __user *kcontrols; + u32 count; + u32 n; compat_caddr_t p; - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) || - put_user(kp->which, &up->which) || - put_user(kp->count, &up->count) || - put_user(kp->error_idx, &up->error_idx) || - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) - return -EFAULT; - if (!kp->count) - return 0; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->which, &kp->which) || + get_user(count, &kp->count) || + put_user(count, &up->count) || + assign_in_user(&up->error_idx, &kp->error_idx) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) || + get_user(kcontrols, &kp->controls)) + return -EFAULT; + if (!count) + return 0; if (get_user(p, &up->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_WRITE, ucontrols, - n * sizeof(struct v4l2_ext_control32))) + if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols))) return -EFAULT; - while (--n >= 0) { - unsigned size = sizeof(*ucontrols); + for (n = 0; n < count; n++) { + unsigned int size = sizeof(*ucontrols); u32 id; - if (get_user(id, &kcontrols->id)) + if (get_user(id, &kcontrols->id) || + put_user(id, &ucontrols->id) || + assign_in_user(&ucontrols->size, &kcontrols->size) || + copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2, + sizeof(ucontrols->reserved2))) return -EFAULT; - /* Do not modify the pointer when copying a pointer control. - The contents of the pointer was changed, not the pointer - itself. */ - if (ctrl_is_pointer(id)) + + /* + * Do not modify the pointer when copying a pointer control. + * The contents of the pointer was changed, not the pointer + * itself. + */ + if (ctrl_is_pointer(file, id)) size -= sizeof(ucontrols->value64); + if (copy_in_user(ucontrols, kcontrols, size)) return -EFAULT; + ucontrols++; kcontrols++; } @@ -793,18 +920,19 @@ struct v4l2_event32 { __u32 reserved[8]; }; -static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up) +static int put_v4l2_event32(struct v4l2_event __user *kp, + struct v4l2_event32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) || - put_user(kp->type, &up->type) || - copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || - put_user(kp->pending, &up->pending) || - put_user(kp->sequence, &up->sequence) || - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || - put_user(kp->id, &up->id) || - copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) - return -EFAULT; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->type, &kp->type) || + copy_in_user(&up->u, &kp->u, sizeof(kp->u)) || + assign_in_user(&up->pending, &kp->pending) || + assign_in_user(&up->sequence, &kp->sequence) || + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || + assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) || + assign_in_user(&up->id, &kp->id) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) + return -EFAULT; return 0; } @@ -816,32 +944,35 @@ struct v4l2_edid32 { compat_caddr_t edid; }; -static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) +static int get_v4l2_edid32(struct v4l2_edid __user *kp, + struct v4l2_edid32 __user *up) { - u32 tmp; - - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) || - get_user(kp->pad, &up->pad) || - get_user(kp->start_block, &up->start_block) || - get_user(kp->blocks, &up->blocks) || - get_user(tmp, &up->edid) || - copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) - return -EFAULT; - kp->edid = (__force u8 *)compat_ptr(tmp); + compat_uptr_t tmp; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + assign_in_user(&kp->pad, &up->pad) || + assign_in_user(&kp->start_block, &up->start_block) || + assign_in_user(&kp->blocks, &up->blocks) || + get_user(tmp, &up->edid) || + put_user(compat_ptr(tmp), &kp->edid) || + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) + return -EFAULT; return 0; } -static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) +static int put_v4l2_edid32(struct v4l2_edid __user *kp, + struct v4l2_edid32 __user *up) { - u32 tmp = (u32)((unsigned long)kp->edid); - - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) || - put_user(kp->pad, &up->pad) || - put_user(kp->start_block, &up->start_block) || - put_user(kp->blocks, &up->blocks) || - put_user(tmp, &up->edid) || - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) - return -EFAULT; + void *edid; + + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->pad, &kp->pad) || + assign_in_user(&up->start_block, &kp->start_block) || + assign_in_user(&up->blocks, &kp->blocks) || + get_user(edid, &kp->edid) || + put_user(ptr_to_compat(edid), &up->edid) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) + return -EFAULT; return 0; } @@ -873,22 +1004,23 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32) #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32) +static int alloc_userspace(unsigned int size, u32 aux_space, + void __user **up_native) +{ + *up_native = compat_alloc_user_space(size + aux_space); + if (!*up_native) + return -ENOMEM; + if (clear_user(*up_native, size)) + return -EFAULT; + return 0; +} + static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - union { - struct v4l2_format v2f; - struct v4l2_buffer v2b; - struct v4l2_framebuffer v2fb; - struct v4l2_input v2i; - struct v4l2_standard v2s; - struct v4l2_ext_controls v2ecs; - struct v4l2_event v2ev; - struct v4l2_create_buffers v2crt; - struct v4l2_edid v2edid; - unsigned long vx; - int vi; - } karg; void __user *up = compat_ptr(arg); + void __user *up_native = NULL; + void __user *aux_buf; + u32 aux_space; int compatible_arg = 1; long err = 0; @@ -927,30 +1059,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_STREAMOFF: case VIDIOC_S_INPUT: case VIDIOC_S_OUTPUT: - err = get_user(karg.vi, (s32 __user *)up); + err = alloc_userspace(sizeof(unsigned int), 0, &up_native); + if (!err && assign_in_user((unsigned int __user *)up_native, + (compat_uint_t __user *)up)) + err = -EFAULT; compatible_arg = 0; break; case VIDIOC_G_INPUT: case VIDIOC_G_OUTPUT: + err = alloc_userspace(sizeof(unsigned int), 0, &up_native); compatible_arg = 0; break; case VIDIOC_G_EDID: case VIDIOC_S_EDID: - err = get_v4l2_edid32(&karg.v2edid, up); + err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native); + if (!err) + err = get_v4l2_edid32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_FMT: case VIDIOC_S_FMT: case VIDIOC_TRY_FMT: - err = get_v4l2_format32(&karg.v2f, up); + err = bufsize_v4l2_format(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_format), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_format); + err = get_v4l2_format32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_CREATE_BUFS: - err = get_v4l2_create32(&karg.v2crt, up); + err = bufsize_v4l2_create(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_create_buffers), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_create_buffers); + err = get_v4l2_create32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; @@ -958,36 +1112,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_QUERYBUF: case VIDIOC_QBUF: case VIDIOC_DQBUF: - err = get_v4l2_buffer32(&karg.v2b, up); + err = bufsize_v4l2_buffer(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_buffer), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_buffer); + err = get_v4l2_buffer32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_S_FBUF: - err = get_v4l2_framebuffer32(&karg.v2fb, up); + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, + &up_native); + if (!err) + err = get_v4l2_framebuffer32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_FBUF: + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, + &up_native); compatible_arg = 0; break; case VIDIOC_ENUMSTD: - err = get_v4l2_standard32(&karg.v2s, up); + err = alloc_userspace(sizeof(struct v4l2_standard), 0, + &up_native); + if (!err) + err = get_v4l2_standard32(up_native, up); compatible_arg = 0; break; case VIDIOC_ENUMINPUT: - err = get_v4l2_input32(&karg.v2i, up); + err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native); + if (!err) + err = get_v4l2_input32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_EXT_CTRLS: case VIDIOC_S_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: - err = get_v4l2_ext_controls32(&karg.v2ecs, up); + err = bufsize_v4l2_ext_controls(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_ext_controls), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_ext_controls); + err = get_v4l2_ext_controls32(file, up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_DQEVENT: + err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native); compatible_arg = 0; break; } @@ -996,26 +1177,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar if (compatible_arg) err = native_ioctl(file, cmd, (unsigned long)up); - else { - mm_segment_t old_fs = get_fs(); + else + err = native_ioctl(file, cmd, (unsigned long)up_native); - set_fs(KERNEL_DS); - err = native_ioctl(file, cmd, (unsigned long)&karg); - set_fs(old_fs); - } + if (err == -ENOTTY) + return err; - /* Special case: even after an error we need to put the - results back for these ioctls since the error_idx will - contain information on which control failed. */ + /* + * Special case: even after an error we need to put the + * results back for these ioctls since the error_idx will + * contain information on which control failed. + */ switch (cmd) { case VIDIOC_G_EXT_CTRLS: case VIDIOC_S_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: - if (put_v4l2_ext_controls32(&karg.v2ecs, up)) + if (put_v4l2_ext_controls32(file, up_native, up)) err = -EFAULT; break; case VIDIOC_S_EDID: - if (put_v4l2_edid32(&karg.v2edid, up)) + if (put_v4l2_edid32(up_native, up)) err = -EFAULT; break; } @@ -1027,43 +1208,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_S_OUTPUT: case VIDIOC_G_INPUT: case VIDIOC_G_OUTPUT: - err = put_user(((s32)karg.vi), (s32 __user *)up); + if (assign_in_user((compat_uint_t __user *)up, + ((unsigned int __user *)up_native))) + err = -EFAULT; break; case VIDIOC_G_FBUF: - err = put_v4l2_framebuffer32(&karg.v2fb, up); + err = put_v4l2_framebuffer32(up_native, up); break; case VIDIOC_DQEVENT: - err = put_v4l2_event32(&karg.v2ev, up); + err = put_v4l2_event32(up_native, up); break; case VIDIOC_G_EDID: - err = put_v4l2_edid32(&karg.v2edid, up); + err = put_v4l2_edid32(up_native, up); break; case VIDIOC_G_FMT: case VIDIOC_S_FMT: case VIDIOC_TRY_FMT: - err = put_v4l2_format32(&karg.v2f, up); + err = put_v4l2_format32(up_native, up); break; case VIDIOC_CREATE_BUFS: - err = put_v4l2_create32(&karg.v2crt, up); + err = put_v4l2_create32(up_native, up); break; + case VIDIOC_PREPARE_BUF: case VIDIOC_QUERYBUF: case VIDIOC_QBUF: case VIDIOC_DQBUF: - err = put_v4l2_buffer32(&karg.v2b, up); + err = put_v4l2_buffer32(up_native, up); break; case VIDIOC_ENUMSTD: - err = put_v4l2_standard32(&karg.v2s, up); + err = put_v4l2_standard32(up_native, up); break; case VIDIOC_ENUMINPUT: - err = put_v4l2_input32(&karg.v2i, up); + err = put_v4l2_input32(up_native, up); break; } return err; diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index dd1db678718c..8033d6f73501 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, } EXPORT_SYMBOL(v4l2_ctrl_fill); +static u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { memset(ev->reserved, 0, sizeof(ev->reserved)); @@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change ev->id = ctrl->id; ev->u.ctrl.changes = changes; ev->u.ctrl.type = ctrl->type; - ev->u.ctrl.flags = ctrl->flags; + ev->u.ctrl.flags = user_flags(ctrl); if (ctrl->is_ptr) ev->u.ctrl.value64 = 0; else @@ -2577,10 +2587,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr else qc->id = ctrl->id; strlcpy(qc->name, ctrl->name, sizeof(qc->name)); - qc->flags = ctrl->flags; + qc->flags = user_flags(ctrl); qc->type = ctrl->type; - if (ctrl->is_ptr) - qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; qc->elem_size = ctrl->elem_size; qc->elems = ctrl->elems; qc->nr_of_dims = ctrl->nr_of_dims; diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index b60a6b0841d1..d06941cc6a55 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1308,52 +1308,50 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_fmtdesc *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret = -EINVAL; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + ret = -EINVAL; switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_enum_fmt_vid_cap)) + if (unlikely(!ops->vidioc_enum_fmt_vid_cap)) break; ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane)) break; ret = ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_enum_fmt_vid_overlay)) break; ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out)) + if (unlikely(!ops->vidioc_enum_fmt_vid_out)) break; ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane)) break; ret = ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg); break; case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_enum_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_enum_fmt_sdr_cap)) break; ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg); break; case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_enum_fmt_sdr_out)) + if (unlikely(!ops->vidioc_enum_fmt_sdr_out)) break; ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg); break; case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_meta_cap)) + if (unlikely(!ops->vidioc_enum_fmt_meta_cap)) break; ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg); break; @@ -1367,13 +1365,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; /* * fmt can't be cleared for these overlay types due to the 'clips' @@ -1401,7 +1396,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_g_fmt_vid_cap)) + if (unlikely(!ops->vidioc_g_fmt_vid_cap)) break; p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); @@ -1409,23 +1404,15 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap_mplane)) - break; return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_overlay)) - break; return ops->vidioc_g_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_vbi_cap)) - break; return ops->vidioc_g_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_cap)) - break; return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out)) + if (unlikely(!ops->vidioc_g_fmt_vid_out)) break; p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; ret = ops->vidioc_g_fmt_vid_out(file, fh, arg); @@ -1433,32 +1420,18 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_mplane)) - break; return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_overlay)) - break; return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_vbi_out)) - break; return ops->vidioc_g_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out)) - break; return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap)) - break; return ops->vidioc_g_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_g_fmt_sdr_out)) - break; return ops->vidioc_g_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_meta_cap)) - break; return ops->vidioc_g_fmt_meta_cap(file, fh, arg); } return -EINVAL; @@ -1484,12 +1457,10 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, { struct v4l2_format *p = arg; struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; ret = v4l_enable_media_source(vfd); if (ret) @@ -1498,37 +1469,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_s_fmt_vid_cap)) + if (unlikely(!ops->vidioc_s_fmt_vid_cap)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; - if (is_tch) + if (vfd->vfl_type == VFL_TYPE_TOUCH) v4l_pix_format_touch(&p->fmt.pix); return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_s_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_vbi_cap)) + if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_cap)) + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out)) + if (unlikely(!ops->vidioc_s_fmt_vid_out)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_s_fmt_vid_out(file, fh, arg); @@ -1536,37 +1507,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay)) + if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_vbi_out)) + if (unlikely(!ops->vidioc_s_fmt_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_s_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_out)) + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_s_fmt_sdr_out)) + if (unlikely(!ops->vidioc_s_fmt_sdr_out)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_s_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_meta_cap)) + if (unlikely(!ops->vidioc_s_fmt_meta_cap)) break; CLEAR_AFTER_FIELD(p, fmt.meta); return ops->vidioc_s_fmt_meta_cap(file, fh, arg); @@ -1578,19 +1549,16 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; v4l_sanitize_format(p); switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_try_fmt_vid_cap)) + if (unlikely(!ops->vidioc_try_fmt_vid_cap)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg); @@ -1598,27 +1566,27 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_try_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_vbi_cap)) + if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_cap)) + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out)) + if (unlikely(!ops->vidioc_try_fmt_vid_out)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_try_fmt_vid_out(file, fh, arg); @@ -1626,37 +1594,37 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay)) + if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_vbi_out)) + if (unlikely(!ops->vidioc_try_fmt_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_try_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_out)) + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_try_fmt_sdr_out)) + if (unlikely(!ops->vidioc_try_fmt_sdr_out)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_try_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_meta_cap)) + if (unlikely(!ops->vidioc_try_fmt_meta_cap)) break; CLEAR_AFTER_FIELD(p, fmt.meta); return ops->vidioc_try_fmt_meta_cap(file, fh, arg); @@ -2924,8 +2892,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, /* Handles IOCTL */ err = func(file, cmd, parg); - if (err == -ENOIOCTLCMD) + if (err == -ENOTTY || err == -ENOIOCTLCMD) { err = -ENOTTY; + goto out; + } + if (err == 0) { if (cmd == VIDIOC_DQBUF) trace_v4l2_dqbuf(video_devdata(file)->minor, parg); diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 0b5c43f7e020..f412429cf5ba 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", data, size, dma->nr_pages); - err = get_user_pages(data & PAGE_MASK, dma->nr_pages, + err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages, flags, dma->pages, NULL); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; - dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages); + dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err, + dma->nr_pages); return err < 0 ? err : -EINVAL; } return 0; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index cb115ba6a1d2..6d9adcaa26ba 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -332,6 +332,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, struct vb2_buffer *vb; int ret; + /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ + num_buffers = min_t(unsigned int, num_buffers, + VB2_MAX_FRAME - q->num_buffers); + for (buffer = 0; buffer < num_buffers; ++buffer) { /* Allocate videobuf buffer structures */ vb = kzalloc(q->buf_struct_size, GFP_KERNEL); diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c index 5fe12961cfe5..e77d3c1a5310 100644 --- a/drivers/mfd/arizona-i2c.c +++ b/drivers/mfd/arizona-i2c.c @@ -23,6 +23,144 @@ #include "arizona.h" +/************************************************************/ +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA +/***********WM8280 1.8V REGULATOR*************/ +static struct regulator_consumer_supply vflorida1_consumer[] = { + REGULATOR_SUPPLY("AVDD", "0-001a"), + REGULATOR_SUPPLY("DBVDD1", "0-001a"), + REGULATOR_SUPPLY("LDOVDD", "0-001a"), + REGULATOR_SUPPLY("CPVDD", "0-001a"), + REGULATOR_SUPPLY("DBVDD2", "0-001a"), + REGULATOR_SUPPLY("DBVDD3", "0-001a"), +}; + +/***********WM8280 5V REGULATOR*************/ +static struct regulator_consumer_supply vflorida2_consumer[] = { + REGULATOR_SUPPLY("SPKVDDL", "0-001a"), + REGULATOR_SUPPLY("SPKVDDR", "0-001a"), +}; +#else +/***********WM8280 1.8V REGULATOR*************/ +static struct regulator_consumer_supply vflorida1_consumer[] = { + REGULATOR_SUPPLY("AVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD1", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("LDOVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("CPVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD2", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD3", "i2c-INT34C1:00"), +}; + +/***********WM8280 5V REGULATOR*************/ +static struct regulator_consumer_supply vflorida2_consumer[] = { + REGULATOR_SUPPLY("SPKVDDL", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("SPKVDDR", "i2c-INT34C1:00"), +}; +#endif + +static struct regulator_init_data vflorida1_data = { + .constraints = { + .always_on = 1, + }, + .num_consumer_supplies = ARRAY_SIZE(vflorida1_consumer), + .consumer_supplies = vflorida1_consumer, +}; + +static struct fixed_voltage_config vflorida1_config = { + .supply_name = "DC_1V8", + .microvolts = 1800000, + .gpio = -EINVAL, + .init_data = &vflorida1_data, +}; + +static struct platform_device vflorida1_device = { + .name = "reg-fixed-voltage", + .id = PLATFORM_DEVID_AUTO, + .dev = { + .platform_data = &vflorida1_config, + }, +}; + +static struct regulator_init_data vflorida2_data = { + .constraints = { + .always_on = 1, + }, + .num_consumer_supplies = ARRAY_SIZE(vflorida2_consumer), + .consumer_supplies = vflorida2_consumer, +}; + +static struct fixed_voltage_config vflorida2_config = { + .supply_name = "DC_5V", + .microvolts = 3700000, + .gpio = -EINVAL, + .init_data = &vflorida2_data, +}; + +static struct platform_device vflorida2_device = { + .name = "reg-fixed-voltage", + .id = PLATFORM_DEVID_AUTO, + .dev = { + .platform_data = &vflorida2_config, + }, +}; + +/***********WM8280 Codec Driver platform data*************/ +static const struct arizona_micd_range micd_ctp_ranges[] = { + { .max = 11, .key = BTN_0 }, + { .max = 28, .key = BTN_1 }, + { .max = 54, .key = BTN_2 }, + { .max = 100, .key = BTN_3 }, + { .max = 186, .key = BTN_4 }, + { .max = 430, .key = BTN_5 }, +}; + +static struct arizona_micd_config micd_modes[] = { + /*{Acc Det on Micdet1, Use Micbias2 for detection, + * Set GPIO to 1 to selecte this polarity}*/ + { 0, 2, 1 }, +}; + +static struct arizona_pdata __maybe_unused florida_pdata = { + .reset = 0, /*No Reset GPIO from AP, use SW reset*/ + .ldo1 = { + .ldoena = 0, /*TODO: Add actual GPIO for LDOEN, use SW Control for now*/ + }, + .irq_flags = IRQF_TRIGGER_LOW | IRQF_ONESHOT, + .clk32k_src = ARIZONA_32KZ_MCLK2, /*Onboard OSC provides 32K on MCLK2*/ + /* + * IN1 uses both MICBIAS1 and MICBIAS2 based on jack polarity, + * the below values in dmic_ref only has meaning for DMIC's and not + * AMIC's + */ +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + .dmic_ref = {ARIZONA_DMIC_MICBIAS1, ARIZONA_DMIC_MICBIAS3, 0, 0}, + .inmode = {ARIZONA_INMODE_DIFF, ARIZONA_INMODE_DMIC, 0, 0}, +#else + .dmic_ref = {ARIZONA_DMIC_MICBIAS1, 0, ARIZONA_DMIC_MICVDD, 0}, + .inmode = {ARIZONA_INMODE_SE, 0, ARIZONA_INMODE_DMIC, 0}, +#endif + .gpio_base = 0, /* Base allocated by gpio core */ + .micd_pol_gpio = 2, /* GPIO3 (offset 2 from gpio_base) of the codec */ + .micd_configs = micd_modes, + .num_micd_configs = ARRAY_SIZE(micd_modes), + .micd_force_micbias = true, +}; + +/************************************************************/ +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA +static struct i2c_board_info arizona_i2c_device = { + I2C_BOARD_INFO("wm8280", 0x1A), + .platform_data = &florida_pdata, +}; +#endif + static int arizona_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { @@ -31,10 +169,16 @@ static int arizona_i2c_probe(struct i2c_client *i2c, unsigned long type; int ret; + pr_debug("%s:%d\n", __func__, __LINE__); if (i2c->dev.of_node) type = arizona_of_get_type(&i2c->dev); +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + else + type = WM8280; +#else else type = id->driver_data; +#endif switch (type) { case WM5102: @@ -105,6 +249,13 @@ static const struct i2c_device_id arizona_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, arizona_i2c_id); +#ifndef CONFIG_SND_SOC_INTEL_CNL_FPGA +static struct acpi_device_id __maybe_unused arizona_acpi_match[] = { + { "INT34C1", WM8280 }, + { } +}; +#endif + static struct i2c_driver arizona_i2c_driver = { .driver = { .name = "arizona", @@ -116,7 +267,53 @@ static struct i2c_driver arizona_i2c_driver = { .id_table = arizona_i2c_id, }; -module_i2c_driver(arizona_i2c_driver); +static int __init arizona_modinit(void) +{ + int ret = 0; +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + struct i2c_adapter *adapter; + struct i2c_client *client; +#endif + + pr_debug("%s Entry\n", __func__); + /***********WM8280 Register Regulator*************/ + platform_device_register(&vflorida1_device); + platform_device_register(&vflorida2_device); + +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + adapter = i2c_get_adapter(0); + pr_debug("%s:%d\n", __func__, __LINE__); + if (adapter) { + client = i2c_new_device(adapter, &arizona_i2c_device); + pr_debug("%s:%d\n", __func__, __LINE__); + if (!client) { + pr_err("can't create i2c device %s\n", + arizona_i2c_device.type); + i2c_put_adapter(adapter); + pr_debug("%s:%d\n", __func__, __LINE__); + return -ENODEV; + } + } else { + pr_err("adapter is NULL\n"); + return -ENODEV; + } +#endif + pr_debug("%s:%d\n", __func__, __LINE__); + ret = i2c_add_driver(&arizona_i2c_driver); + + pr_debug("%s Exit\n", __func__); + + return ret; +} + +module_init(arizona_modinit); + +static void __exit arizona_modexit(void) +{ + i2c_del_driver(&arizona_i2c_driver); +} + +module_exit(arizona_modexit); MODULE_DESCRIPTION("Arizona I2C bus interface"); MODULE_AUTHOR("Mark Brown "); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index 09cf3699e354..32d4154fb87b 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -375,7 +375,8 @@ int arizona_irq_init(struct arizona *arizona) ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread, flags, "arizona", arizona); - if (ret != 0) { + /* FPGA board doesn't have irq line */ + if (ret != 0 && !IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA)) { dev_err(arizona->dev, "Failed to request primary IRQ %d: %d\n", arizona->irq, ret); goto err_main_irq; diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c index c9714072e224..a14196e95e9b 100644 --- a/drivers/mfd/cros_ec_spi.c +++ b/drivers/mfd/cros_ec_spi.c @@ -667,6 +667,7 @@ static int cros_ec_spi_probe(struct spi_device *spi) sizeof(struct ec_response_get_protocol_info); ec_dev->dout_size = sizeof(struct ec_host_request); + ec_spi->last_transfer_ns = ktime_get_ns(); err = cros_ec_register(ec_dev); if (err) { diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c index b3767c3141e5..461b0990b56f 100644 --- a/drivers/mfd/fsl-imx25-tsadc.c +++ b/drivers/mfd/fsl-imx25-tsadc.c @@ -180,6 +180,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev) return devm_of_platform_populate(dev); } +static int mx25_tsadc_remove(struct platform_device *pdev) +{ + struct mx25_tsadc *tsadc = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + if (irq) { + irq_set_chained_handler_and_data(irq, NULL, NULL); + irq_domain_remove(tsadc->domain); + } + + return 0; +} + static const struct of_device_id mx25_tsadc_ids[] = { { .compatible = "fsl,imx25-tsadc" }, { /* Sentinel */ } @@ -192,6 +205,7 @@ static struct platform_driver mx25_tsadc_driver = { .of_match_table = of_match_ptr(mx25_tsadc_ids), }, .probe = mx25_tsadc_probe, + .remove = mx25_tsadc_remove, }; module_platform_driver(mx25_tsadc_driver); diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index d1c46de89eb4..4890549f909e 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -134,13 +134,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x0ab6), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x0ab8), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x0aba), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x0abc), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x0abe), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x0ac0), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x0ac2), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x0ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x0ac6), (kernel_ulong_t)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x0aee), (kernel_ulong_t)&bxt_uart_info }, /* BXT B-Step */ { PCI_VDEVICE(INTEL, 0x1aac), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x1aae), (kernel_ulong_t)&bxt_i2c_info }, @@ -150,13 +146,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ab6), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x1ab8), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x1aba), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x1abc), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x1abe), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x1ac0), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x1ac2), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, /* GLK */ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&bxt_i2c_info }, @@ -166,10 +158,6 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x31ee), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x31c2), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x31c4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x31c6), (kernel_ulong_t)&bxt_info }, @@ -182,16 +170,10 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x5ab6), (kernel_ulong_t)&apl_i2c_info }, { PCI_VDEVICE(INTEL, 0x5ab8), (kernel_ulong_t)&apl_i2c_info }, { PCI_VDEVICE(INTEL, 0x5aba), (kernel_ulong_t)&apl_i2c_info }, - { PCI_VDEVICE(INTEL, 0x5abc), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x5abe), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x5ac0), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x5ac2), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info }, /* SPT-LP */ - { PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_i2c_info }, @@ -200,46 +182,33 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info }, /* CNL-LP */ - { PCI_VDEVICE(INTEL, 0x9da8), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0x9da9), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&spt_i2c_info }, /* SPT-H */ - { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa162), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, /* KBL-H */ - { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, /* CNL-H */ - { PCI_VDEVICE(INTEL, 0xa328), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0xa329), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&spt_i2c_info }, @@ -258,7 +227,74 @@ static struct pci_driver intel_lpss_pci_driver = { }, }; -module_pci_driver(intel_lpss_pci_driver); +static int __init intel_lpss_pci_driver_init(void) +{ + return pci_register_driver(&intel_lpss_pci_driver); +} +fs_initcall_sync(intel_lpss_pci_driver_init); + +static void __exit intel_lpss_pci_driver_exit(void) +{ + pci_unregister_driver(&intel_lpss_pci_driver); +} +module_exit(intel_lpss_pci_driver_exit); + + +static const struct pci_device_id intel_lpss_pci_uart_ids[] = { + /* BXT A-Step */ + { PCI_VDEVICE(INTEL, 0x0abc), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x0abe), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x0ac0), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x0aee), (kernel_ulong_t)&bxt_uart_info }, + /* BXT B-Step */ + { PCI_VDEVICE(INTEL, 0x1abc), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1abe), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1ac0), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, + /* GLK */ + { PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x31ee), (kernel_ulong_t)&bxt_uart_info }, + /* APL */ + { PCI_VDEVICE(INTEL, 0x5abc), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x5abe), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x5ac0), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info }, + /* SPT-LP */ + { PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info }, + /* CNL-LP */ + { PCI_VDEVICE(INTEL, 0x9da8), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x9da9), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info }, + /* SPT-H */ + { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, + /* KBL-H */ + { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, + /* CNL-H */ + { PCI_VDEVICE(INTEL, 0xa328), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa329), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info }, + { } +}; +MODULE_DEVICE_TABLE(pci, intel_lpss_pci_uart_ids); + +static struct pci_driver intel_lpss_pci_uart_driver = { + .name = "intel-lpss-uart", + .id_table = intel_lpss_pci_uart_ids, + .probe = intel_lpss_pci_probe, + .remove = intel_lpss_pci_remove, + .driver = { + .pm = &intel_lpss_pci_pm_ops, + }, +}; +module_pci_driver(intel_lpss_pci_uart_driver); MODULE_AUTHOR("Andy Shevchenko "); MODULE_AUTHOR("Mika Westerberg "); diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index 450ae36645aa..cf1120abbf52 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = { .name = "Avoton SoC", .iTCO_version = 3, .gpio_version = AVOTON_GPIO, + .spi_type = INTEL_SPI_BYT, }, [LPC_BAYTRAIL] = { .name = "Bay Trail SoC", diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c index 630bd19b2c0a..98e732a7ae96 100644 --- a/drivers/mfd/mxs-lradc.c +++ b/drivers/mfd/mxs-lradc.c @@ -196,8 +196,10 @@ static int mxs_lradc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, lradc); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENOMEM; + if (!res) { + ret = -ENOMEM; + goto err_clk; + } switch (lradc->soc) { case IMX23_LRADC: diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c index da16bf45fab4..dc94ffc6321a 100644 --- a/drivers/mfd/twl4030-audio.c +++ b/drivers/mfd/twl4030-audio.c @@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void) EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk); static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata, - struct device_node *node) + struct device_node *parent) { + struct device_node *node; + if (pdata && pdata->codec) return true; - if (of_find_node_by_name(node, "codec")) + node = of_get_child_by_name(parent, "codec"); + if (node) { + of_node_put(node); return true; + } return false; } diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index d66502d36ba0..dd19f17a1b63 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = { }; -static bool twl6040_has_vibra(struct device_node *node) +static bool twl6040_has_vibra(struct device_node *parent) { -#ifdef CONFIG_OF - if (of_find_node_by_name(node, "vibra")) + struct device_node *node; + + node = of_get_child_by_name(parent, "vibra"); + if (node) { + of_node_put(node); return true; -#endif + } + return false; } diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 8136dc7e863d..ce2449ad1688 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -506,6 +506,27 @@ config PCI_ENDPOINT_TEST Enable this configuration option to enable the host side test driver for PCI Endpoint. +config UID_SYS_STATS + bool "Per-UID statistics" + depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING + help + Per UID based cpu time statistics exported to /proc/uid_cputime + Per UID based io statistics exported to /proc/uid_io + Per UID based procstat control in /proc/uid_procstat + +config UID_SYS_STATS_DEBUG + bool "Per-TASK statistics" + depends on UID_SYS_STATS + default n + help + Per TASK based io statistics exported to /proc/uid_io + +config MEMORY_STATE_TIME + tristate "Memory freq/bandwidth time statistics" + depends on PROFILING + help + Memory time statistics exported to /sys/kernel/memory_state_time + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index ad0e64fdba34..63e802aebb03 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -57,6 +57,9 @@ obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o +obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o +obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o + lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index 1922cb8f6b88..1c5b7aec13d4 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -904,7 +903,6 @@ struct c2port_device *c2port_device_register(char *name, return ERR_PTR(-EINVAL); c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); - kmemcheck_annotate_bitfield(c2dev, flags); if (unlikely(!c2dev)) return ERR_PTR(-ENOMEM); diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 3ba04f371380..81093f8157a9 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -2043,6 +2043,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, /* There should only be one entry, but go through the list * anyway */ + if (afu->phb == NULL) + return result; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { if (!afu_dev->driver) continue; @@ -2084,8 +2087,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, * Tell the AFU drivers; but we don't care what they * say, we're going away. */ - if (afu->phb != NULL) - cxl_vphb_error_detected(afu, state); + cxl_vphb_error_detected(afu, state); } return PCI_ERS_RESULT_DISCONNECT; } @@ -2225,6 +2227,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) if (cxl_afu_select_best_mode(afu)) goto err; + if (afu->phb == NULL) + continue; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { /* Reset the device context. * TODO: make this less disruptive @@ -2287,6 +2292,9 @@ static void cxl_pci_resume(struct pci_dev *pdev) for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; + if (afu->phb == NULL) + continue; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { if (afu_dev->driver && afu_dev->driver->err_handler && afu_dev->driver->err_handler->resume) diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 764ff5df0dbc..4cc0b42f2acc 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -365,7 +365,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf, memset(msg, 0, sizeof(msg)); msg[0].addr = client->addr; msg[0].buf = addrbuf; - addrbuf[0] = 0x90 + offset; + /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */ + addrbuf[0] = 0xa0 - at24->chip.byte_len + offset; msg[0].len = 1; msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; @@ -506,6 +507,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) if (unlikely(!count)) return count; + if (off + count > at24->chip.byte_len) + return -EINVAL; + /* * Read data from chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -538,6 +542,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) if (unlikely(!count)) return -EINVAL; + if (off + count > at24->chip.byte_len) + return -EINVAL; + /* * Write data to chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -631,6 +638,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) dev_warn(&client->dev, "page_size looks suspicious (no power of 2)!\n"); + /* + * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while + * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4. + * + * Eventually we'll get rid of the magic values altoghether in favor of + * real structs, but for now just manually set the right size. + */ + if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4) + chip.byte_len = 6; + /* Use I2C operations unless we're stuck with SMBus extensions. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { if (chip.flags & AT24_FLAG_ADDR16) @@ -759,7 +776,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) at24->nvmem_config.reg_read = at24_read; at24->nvmem_config.reg_write = at24_write; at24->nvmem_config.priv = at24; - at24->nvmem_config.stride = 4; + at24->nvmem_config.stride = 1; at24->nvmem_config.word_size = 1; at24->nvmem_config.size = chip.byte_len; diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index c49e1d2269af..242b6c7b351e 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -43,3 +43,6 @@ config INTEL_MEI_TXE Supported SoCs: Intel Bay Trail + +source "drivers/misc/mei/dal/Kconfig" +source "drivers/misc/mei/spd/Kconfig" diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index cd6825afa8e1..66b218c67b65 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -9,6 +9,7 @@ mei-objs += hbm.o mei-objs += interrupt.o mei-objs += client.o mei-objs += main.o +mei-objs += dma-ring.o mei-objs += bus.o mei-objs += bus-fixup.o mei-$(CONFIG_DEBUG_FS) += debugfs.o @@ -23,3 +24,6 @@ mei-txe-objs += hw-txe.o mei-$(CONFIG_EVENT_TRACING) += mei-trace.o CFLAGS_mei-trace.o = -I$(src) + +obj-$(CONFIG_INTEL_MEI_DAL) += dal/ +obj-$(CONFIG_INTEL_MEI_SPD) += spd/ diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index be64969d986a..81a09e8cd28d 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -460,7 +460,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, if (length == 0) return cb; - cb->buf.data = kmalloc(length, GFP_KERNEL); + cb->buf.data = kmalloc(roundup(length, MEI_DMA_SLOT_SIZE), GFP_KERNEL); if (!cb->buf.data) { mei_io_cb_free(cb); return NULL; @@ -1513,6 +1513,22 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) return rets; } +static inline void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, + struct mei_cl_cb *cb) +{ + mei_hdr->host_addr = mei_cl_host_addr(cb->cl); + mei_hdr->me_addr = mei_cl_me_id(cb->cl); + mei_hdr->reserved = 0; + mei_hdr->msg_complete = 0; + mei_hdr->dma_ring = 0; + mei_hdr->internal = cb->internal; +} + +struct mei_msg_hdr_ext { + struct mei_msg_hdr hdr; + u32 dma_len; +}; + /** * mei_cl_irq_write - write a message to device * from the interrupt thread context @@ -1528,12 +1544,15 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, { struct mei_device *dev; struct mei_msg_data *buf; - struct mei_msg_hdr mei_hdr; + struct mei_msg_hdr_ext ext_hdr; + struct mei_msg_hdr *mei_hdr = &ext_hdr.hdr; size_t len; u32 msg_slots; - int slots; + u32 dr_slots; + int hbuf_slots; int rets; bool first_chunk; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1553,40 +1572,48 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, return 0; } - slots = mei_hbuf_empty_slots(dev); + mei_msg_hdr_init(mei_hdr, cb); + + hbuf_slots = mei_hbuf_empty_slots(dev); + dr_slots = mei_dma_ring_empty_slots(dev); len = buf->size - cb->buf_idx; + data = buf->data + cb->buf_idx; msg_slots = mei_data2slots(len); - mei_hdr.host_addr = mei_cl_host_addr(cl); - mei_hdr.me_addr = mei_cl_me_id(cl); - mei_hdr.reserved = 0; - mei_hdr.internal = cb->internal; - - if (slots >= msg_slots) { - mei_hdr.length = len; - mei_hdr.msg_complete = 1; - /* Split the message only if we can write the whole host buffer */ - } else if (slots == dev->hbuf_depth) { - msg_slots = slots; - len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); - mei_hdr.length = len; - mei_hdr.msg_complete = 0; + if (hbuf_slots >= msg_slots) { + mei_hdr->length = len; + mei_hdr->msg_complete = 1; + } else if (dev->hbm_f_dr_supported && hbuf_slots > sizeof(ext_hdr) && + dr_slots) { + if (msg_slots < dr_slots) + mei_hdr->msg_complete = 1; + else + len = mei_slots2data(dr_slots); + + mei_hdr->dma_ring = 1; + mei_hdr->length = sizeof(ext_hdr.dma_len); + ext_hdr.dma_len = len; + data = &ext_hdr.dma_len; + + } else if (hbuf_slots == dev->hbuf_depth) { + len = mei_hbuf_max_len(dev); + mei_hdr->length = len; } else { /* wait for next time the host buffer is empty */ return 0; } - cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", - cb->buf.size, cb->buf_idx); + if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); - rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); + rets = mei_write_message(dev, mei_hdr, data); if (rets) goto err; cl->status = 0; cl->writing_state = MEI_WRITING; - cb->buf_idx += mei_hdr.length; - cb->completed = mei_hdr.msg_complete == 1; + cb->buf_idx += len; + cb->completed = mei_hdr->msg_complete == 1; if (first_chunk) { if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { @@ -1595,7 +1622,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } } - if (mei_hdr.msg_complete) + if (mei_hdr->msg_complete) list_move_tail(&cb->list, &dev->write_waiting_list); return 0; @@ -1619,10 +1646,13 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) { struct mei_device *dev; struct mei_msg_data *buf; - struct mei_msg_hdr mei_hdr; - int size; + struct mei_msg_hdr_ext ext_hdr; + struct mei_msg_hdr *mei_hdr = &ext_hdr.hdr; int rets; bool blocking; + size_t len; + u32 hbuf_slots, dr_slots, msg_slots; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1633,10 +1663,11 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) dev = cl->dev; buf = &cb->buf; - size = buf->size; + len = buf->size; + data = buf->data; blocking = cb->blocking; - cl_dbg(dev, cl, "size=%d\n", size); + cl_dbg(dev, cl, "size = %zu\n", len); rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { @@ -1648,11 +1679,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) cb->buf_idx = 0; cl->writing_state = MEI_IDLE; - mei_hdr.host_addr = mei_cl_host_addr(cl); - mei_hdr.me_addr = mei_cl_me_id(cl); - mei_hdr.reserved = 0; - mei_hdr.msg_complete = 0; - mei_hdr.internal = cb->internal; + mei_msg_hdr_init(mei_hdr, cb); rets = mei_cl_tx_flow_ctrl_creds(cl); if (rets < 0) @@ -1660,25 +1687,41 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) if (rets == 0) { cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); - rets = size; goto out; } + if (!mei_hbuf_acquire(dev)) { cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); - rets = size; goto out; } - /* Check for a maximum length */ - if (size > mei_hbuf_max_len(dev)) { - mei_hdr.length = mei_hbuf_max_len(dev); - mei_hdr.msg_complete = 0; + hbuf_slots = mei_hbuf_empty_slots(dev); + dr_slots = mei_dma_ring_empty_slots(dev); + msg_slots = mei_data2slots(len); + + if (hbuf_slots >= msg_slots) { + mei_hdr->length = len; + mei_hdr->msg_complete = 1; + } else if (dev->hbm_f_dr_supported && hbuf_slots > sizeof(ext_hdr) && + dr_slots) { + if (msg_slots < dr_slots) + mei_hdr->msg_complete = 1; + else + len = mei_slots2data(dr_slots); + + mei_hdr->dma_ring = 1; + mei_hdr->length = sizeof(ext_hdr.dma_len); + ext_hdr.dma_len = len; + data = &ext_hdr.dma_len; } else { - mei_hdr.length = size; - mei_hdr.msg_complete = 1; + len = mei_hbuf_max_len(dev); + mei_hdr->length = len; } - rets = mei_write_message(dev, &mei_hdr, buf->data); + if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data, len); + + rets = mei_write_message(dev, mei_hdr, data); if (rets) goto err; @@ -1687,11 +1730,13 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) goto err; cl->writing_state = MEI_WRITING; - cb->buf_idx = mei_hdr.length; - cb->completed = mei_hdr.msg_complete == 1; + cb->buf_idx = len; + cb->completed = mei_hdr->msg_complete == 1; + /* reset len to the original size for a function return value */ + len = buf->size; out: - if (mei_hdr.msg_complete) + if (mei_hdr->msg_complete) list_add_tail(&cb->list, &dev->write_waiting_list); else list_add_tail(&cb->list, &dev->write_list); @@ -1716,7 +1761,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) } } - rets = size; + rets = len; err: cl_dbg(dev, cl, "rpm: autosuspend\n"); pm_runtime_mark_last_busy(dev->dev); diff --git a/drivers/misc/mei/dal/Kconfig b/drivers/misc/mei/dal/Kconfig new file mode 100644 index 000000000000..8943d4f7f7e4 --- /dev/null +++ b/drivers/misc/mei/dal/Kconfig @@ -0,0 +1,15 @@ +config INTEL_MEI_DAL + tristate "Dynamic Application Loader for ME" + depends on INTEL_MEI + help + Dynamic Application Loader enables downloading java applets + to DAL FW and run it in a secure environment. + The DAL module exposes both user space api and kernel space api. + +config INTEL_MEI_DAL_TEST + tristate "Test Module for Dynamic Application Loader for ME" + depends on INTEL_MEI_DAL + help + Testing Module for Dynamic Application Loader, to test the + kernel space api from a user space client. The test module + calls the kernel space api functions of DAL module. diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile new file mode 100644 index 000000000000..636fb6027e2c --- /dev/null +++ b/drivers/misc/mei/dal/Makefile @@ -0,0 +1,12 @@ +ccflags-y += -D__CHECK_ENDIAN__ + +obj-$(CONFIG_INTEL_MEI_DAL) += mei_dal.o +mei_dal-objs := dal_class.o +mei_dal-objs += acp_parser.o +mei_dal-objs += bh_external.o +mei_dal-objs += bh_internal.o +mei_dal-objs += dal_cdev.o +mei_dal-objs += dal_kdi.o +mei_dal-objs += dal_ta_access.o + +obj-$(CONFIG_INTEL_MEI_DAL_TEST) += dal_test.o diff --git a/drivers/misc/mei/dal/acp_format.h b/drivers/misc/mei/dal/acp_format.h new file mode 100644 index 000000000000..6f9c1f5b7647 --- /dev/null +++ b/drivers/misc/mei/dal/acp_format.h @@ -0,0 +1,253 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef _ACP_FORMAT_H +#define _ACP_FORMAT_H + +#include + +#define AC_MAX_INS_REASONS_LENGTH 1024 +#define AC_MAX_USED_SERVICES 20 +#define AC_MAX_PROPS_LENGTH 2048 +#define AC_MAX_PACK_HASH_LEN 32 + +/** + * enum ac_cmd_id - acp file command (acp type) + * + * @AC_CMD_INVALID: invalid command + * @AC_INSTALL_SD: install new sub security domain + * @AC_UNINSTALL_SD: uninstall sub security domain + * @AC_INSTALL_JTA: install java ta + * @AC_UNINSTALL_JTA: uninstall java ta + * @AC_INSTALL_NTA: install native ta (currently NOT SUPPORTED) + * @AC_UNINSTALL_NTA: uninstall native ta (currently NOT SUPPORTED) + * @AC_UPDATE_SVL: update the security version list + * @AC_INSTALL_JTA_PROP: ta properties for installation + * @AC_CMD_NUM: number of acp commands + */ +enum ac_cmd_id { + AC_CMD_INVALID, + AC_INSTALL_SD, + AC_UNINSTALL_SD, + AC_INSTALL_JTA, + AC_UNINSTALL_JTA, + AC_INSTALL_NTA, + AC_UNINSTALL_NTA, + AC_UPDATE_SVL, + AC_INSTALL_JTA_PROP, + AC_CMD_NUM +}; + +/** + * struct ac_pack_hash - ta pack hash + * + * @data: ta hash + */ +struct ac_pack_hash { + u8 data[AC_MAX_PACK_HASH_LEN]; +} __packed; + +/** + * struct ac_pack_header - admin comman pack header + * + * @magic: magic string which represents an ACP + * @version: package format version + * @byte_order: byte order of package (0 big endian, 1 little endian) + * @reserved: reserved bytes + * @size: total package size + * @cmd_id: acp command (acp file type) + * @svn: security version number + * + * @idx_num: the number of the indexed sections + * @idx_condition: condition section offset + * @idx_data: data section offset + */ +struct ac_pack_header { + /*ACP Header*/ + u8 magic[4]; + u8 version; + u8 byte_order; + u16 reserved; + u32 size; + u32 cmd_id; + u32 svn; + + /* Index Section */ + u32 idx_num; + u32 idx_condition; + u32 idx_data; +} __packed; + +/** + * struct ac_ta_id_list - A list of ta ids which the ta + * is allowed to communicate with. + * + * @num: ta ids count + * @list: ta ids list + */ +struct ac_ta_id_list { + u32 num; + uuid_t list[0]; +} __packed; + +/** + * struct ac_prop_list - TLV list of acp properties + * + * @num: number of properties + * @len: size of all properties + * @data: acp properties. TLV format is "type\0key\0value\0" + * (e.g. string\0name\0Tom\0int\0Age\013\0) + */ +struct ac_prop_list { + u32 num; + u32 len; + s8 data[0]; +} __packed; + +/** + * struct ac_ins_reasons - list of event codes that can be + * received or posted by ta + * + * @len: event codes count + * @data: event codes list + */ +struct ac_ins_reasons { + u32 len; + u32 data[0]; +} __packed; + +/** + * struct ac_pack - general struct to hold parsed acp content + * + * @head: acp pack header + * @data: acp parsed content + */ +struct ac_pack { + struct ac_pack_header *head; + char data[0]; +} __packed; + +/** + * struct ac_ins_ta_header - ta installation header + * + * @ta_id: ta id + * @ta_svn: ta security version number + * @hash_alg_type: ta hash algorithm type + * @ta_reserved: reserved bytes + * @hash: ta pack hash + */ +struct ac_ins_ta_header { + uuid_t ta_id; + u32 ta_svn; + u8 hash_alg_type; + u8 ta_reserved[3]; + struct ac_pack_hash hash; +} __packed; + +/** + * struct ac_ins_jta_pack - ta installation information + * + * @ins_cond: ta install conditions (contains some of the manifest data, + * including security.version, applet.version, applet.platform, + * applet.api.level) + * @head: ta installation header + */ +struct ac_ins_jta_pack { + struct ac_prop_list *ins_cond; + struct ac_ins_ta_header *head; +} __packed; + +/** + * struct ac_ins_jta_prop_header - ta manifest header + * + * @mem_quota: ta heap size + * @ta_encrypted: ta encrypted by provider flag + * @padding: padding + * @allowed_inter_session_num: allowed internal session count + * @ac_groups: ta permission groups + * @timeout: ta timeout in milliseconds + */ +struct ac_ins_jta_prop_header { + u32 mem_quota; + u8 ta_encrypted; + u8 padding; + u16 allowed_inter_session_num; + u64 ac_groups; + u32 timeout; +} __packed; + +/** + * struct ac_ins_jta_prop - ta manifest + * + * @head: manifest header + * @post_reasons: list of event codes that can be posted by ta + * @reg_reasons: list of event codes that can be received by ta + * @prop: all other manifest fields (acp properties) + * @used_service_list: list of ta ids which ta is allowed to communicate with + */ +struct ac_ins_jta_prop { + struct ac_ins_jta_prop_header *head; + struct ac_ins_reasons *post_reasons; + struct ac_ins_reasons *reg_reasons; + struct ac_prop_list *prop; + struct ac_ta_id_list *used_service_list; +} __packed; + +#endif /* _ACP_FORMAT_H */ diff --git a/drivers/misc/mei/dal/acp_parser.c b/drivers/misc/mei/dal/acp_parser.c new file mode 100644 index 000000000000..32e00cf8a74c --- /dev/null +++ b/drivers/misc/mei/dal/acp_parser.c @@ -0,0 +1,562 @@ +/****************************************************************************** + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include + +#include "acp_format.h" +#include "acp_parser.h" + +#define PR_ALIGN 4 + +/* CSS Header + CSS Crypto Block + * Prefixes each signed ACP package + */ +#define AC_CSS_HEADER_LENGTH (128 + 520) + +/** + * struct ac_pr_state - admin command pack reader state + * + * @cur : current read position + * @head : acp file head + * @total : size of acp file + */ +struct ac_pr_state { + const char *cur; + const char *head; + unsigned int total; +}; + +/** + * ac_pr_init - init pack reader + * + * @pr: pack reader + * @data: acp file content (without CSS header) + * @n: acp file size (without CSS header) + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_init(struct ac_pr_state *pr, const char *data, + unsigned int n) +{ + /* check integer overflow */ + if ((size_t)data > SIZE_MAX - n) + return -EINVAL; + + pr->cur = data; + pr->head = data; + pr->total = n; + return 0; +} + +/** + * ac_pr_8b_align_move - update pack reader cur pointer after reading n_move + * bytes. Leave cur aligned to 8 bytes. + * (e.g. when n_move is 3, increase cur by 8) + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * will be rounded up to keep cur 8 bytes aligned + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_8b_align_move(struct ac_pr_state *pr, size_t n_move) +{ + unsigned long offset; + const char *new_cur = pr->cur + n_move; + size_t len_from_head = new_cur - pr->head; + + if ((size_t)pr->cur > SIZE_MAX - n_move || new_cur < pr->head) + return -EINVAL; + + offset = ((8 - (len_from_head & 7)) & 7); + if ((size_t)new_cur > SIZE_MAX - offset) + return -EINVAL; + + new_cur = new_cur + offset; + if (new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + return 0; +} + +/** + * ac_pr_align_move - update pack reader cur pointer after reading n_move bytes + * Leave cur aligned to 4 bytes. + * (e.g. when n_move is 1, increase cur by 4) + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * will be rounded up to keep cur 4 bytes aligned + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_align_move(struct ac_pr_state *pr, size_t n_move) +{ + const char *new_cur = pr->cur + n_move; + size_t len_from_head = new_cur - pr->head; + size_t offset; + + if ((size_t)pr->cur > SIZE_MAX - n_move || new_cur < pr->head) + return -EINVAL; + + offset = ((4 - (len_from_head & 3)) & 3); + if ((size_t)new_cur > SIZE_MAX - offset) + return -EINVAL; + + new_cur = new_cur + offset; + if (new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + return 0; +} + +/** + * ac_pr_move - update pack reader cur pointer after reading n_move bytes + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_move(struct ac_pr_state *pr, size_t n_move) +{ + const char *new_cur = pr->cur + n_move; + + /* integer overflow or out of acp pkg size */ + if ((size_t)pr->cur > SIZE_MAX - n_move || + new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + + return 0; +} + +/** + * ac_pr_is_safe_to_read - check whether it is safe to read more n_move + * bytes from the acp file + * + * @pr: pack reader + * @n_move: number of bytes to check if it is safe to read + * + * Return: true when it is safe to read more n_move bytes + * false otherwise + */ +static bool ac_pr_is_safe_to_read(const struct ac_pr_state *pr, size_t n_move) +{ + /* pointer overflow */ + if ((size_t)pr->cur > SIZE_MAX - n_move) + return false; + + if (pr->cur + n_move > pr->head + pr->total) + return false; + + return true; +} + +/** + * ac_pr_is_end - check if cur is at the end of the acp file + * + * @pr: pack reader + * + * Return: true when cur is at the end of the acp + * false otherwise + */ +static bool ac_pr_is_end(struct ac_pr_state *pr) +{ + return (pr->cur == pr->head + pr->total); +} + +/** + * acp_load_reasons - load list of event codes that can be + * received or posted by ta + * + * @pr: pack reader + * @reasons: out param to hold the list of event codes + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_reasons(struct ac_pr_state *pr, + struct ac_ins_reasons **reasons) +{ + size_t len; + struct ac_ins_reasons *r; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*r))) + return -EINVAL; + + r = (struct ac_ins_reasons *)pr->cur; + + if (r->len > AC_MAX_INS_REASONS_LENGTH) + return -EINVAL; + + len = sizeof(*r) + r->len * sizeof(r->data[0]); + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *reasons = r; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_taid_list - load list of ta ids which ta is allowed + * to communicate with + * + * @pr: pack reader + * @taid_list: out param to hold the loaded ta ids + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_taid_list(struct ac_pr_state *pr, + struct ac_ta_id_list **taid_list) +{ + size_t len; + struct ac_ta_id_list *t; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*t))) + return -EINVAL; + + t = (struct ac_ta_id_list *)pr->cur; + if (t->num > AC_MAX_USED_SERVICES) + return -EINVAL; + + len = sizeof(*t) + t->num * sizeof(t->list[0]); + + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *taid_list = t; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_prop - load property from acp + * + * @pr: pack reader + * @prop: out param to hold the loaded property + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_prop(struct ac_pr_state *pr, struct ac_prop_list **prop) +{ + size_t len; + struct ac_prop_list *p; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*p))) + return -EINVAL; + + p = (struct ac_prop_list *)pr->cur; + if (p->len > AC_MAX_PROPS_LENGTH) + return -EINVAL; + + len = sizeof(*p) + p->len * sizeof(p->data[0]); + + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *prop = p; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_ta_pack - load ta pack from acp + * + * @pr: pack reader + * @ta_pack: out param to hold the ta pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ta_pack(struct ac_pr_state *pr, char **ta_pack) +{ + size_t len; + char *t; + + /*8 byte align to obey jeff rule*/ + if (ac_pr_8b_align_move(pr, 0)) + return -EINVAL; + + t = (char *)pr->cur; + + /* + *assume ta pack is the last item of one package, + *move cursor to the end directly + */ + if (pr->cur > pr->head + pr->total) + return -EINVAL; + + len = pr->head + pr->total - pr->cur; + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *ta_pack = t; + return ac_pr_move(pr, len); +} + +/** + * acp_load_ins_jta_prop_head - load ta manifest header + * + * @pr: pack reader + * @head: out param to hold manifest header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_prop_head(struct ac_pr_state *pr, + struct ac_ins_jta_prop_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_ins_jta_prop_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_ins_jta_prop - load ta properties information (ta manifest) + * + * @pr: pack reader + * @pack: out param to hold ta manifest + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_prop(struct ac_pr_state *pr, + struct ac_ins_jta_prop *pack) +{ + int ret; + + ret = acp_load_ins_jta_prop_head(pr, &pack->head); + if (ret) + return ret; + + ret = acp_load_reasons(pr, &pack->post_reasons); + if (ret) + return ret; + + ret = acp_load_reasons(pr, &pack->reg_reasons); + if (ret) + return ret; + + ret = acp_load_prop(pr, &pack->prop); + if (ret) + return ret; + + ret = acp_load_taid_list(pr, &pack->used_service_list); + + return ret; +} + +/** + * acp_load_ins_jta_head - load ta installation header + * + * @pr: pack reader + * @head: out param to hold the installation header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_head(struct ac_pr_state *pr, + struct ac_ins_ta_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_ins_ta_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_ins_jta - load ta installation information from acp + * + * @pr: pack reader + * @pack: out param to hold install information + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta(struct ac_pr_state *pr, + struct ac_ins_jta_pack *pack) +{ + int ret; + + ret = acp_load_prop(pr, &pack->ins_cond); + if (ret) + return ret; + + ret = acp_load_ins_jta_head(pr, &pack->head); + + return ret; +} + +/** + * acp_load_pack_head - load acp pack header + * + * @pr: pack reader + * @head: out param to hold the acp header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_pack_head(struct ac_pr_state *pr, + struct ac_pack_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_pack_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_pack - load and parse pack from acp file + * + * @raw_pack: acp file content, without the acp CSS header + * @size: acp file size (without CSS header) + * @cmd_id: command id + * @pack: out param to hold the loaded pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_pack(const char *raw_pack, unsigned int size, + unsigned int cmd_id, struct ac_pack *pack) +{ + int ret; + struct ac_pr_state pr; + struct ac_ins_jta_pack_ext *pack_ext; + struct ac_ins_jta_prop_ext *prop_ext; + + ret = ac_pr_init(&pr, raw_pack, size); + if (ret) + return ret; + + if (cmd_id != AC_INSTALL_JTA_PROP) { + ret = acp_load_pack_head(&pr, &pack->head); + if (ret) + return ret; + } + + if (cmd_id != AC_INSTALL_JTA_PROP && cmd_id != pack->head->cmd_id) + return -EINVAL; + + switch (cmd_id) { + case AC_INSTALL_JTA: + pack_ext = (struct ac_ins_jta_pack_ext *)pack; + ret = acp_load_ins_jta(&pr, &pack_ext->cmd_pack); + if (ret) + break; + ret = acp_load_ta_pack(&pr, &pack_ext->ta_pack); + break; + case AC_INSTALL_JTA_PROP: + prop_ext = (struct ac_ins_jta_prop_ext *)pack; + ret = acp_load_ins_jta_prop(&pr, &prop_ext->cmd_pack); + if (ret) + break; + /* Note: the next section is JEFF file, + * and not ta_pack(JTA_properties+JEFF file), + * but we could reuse the ACP_load_ta_pack() here. + */ + ret = acp_load_ta_pack(&pr, &prop_ext->jeff_pack); + break; + default: + return -EINVAL; + } + + if (!ac_pr_is_end(&pr)) + return -EINVAL; + + return ret; +} + +/** + * acp_pload_ins_jta - load and parse ta pack from acp file + * + * Exported function in acp parser API + * + * @raw_data: acp file content + * @size: acp file size + * @pack: out param to hold the ta pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +int acp_pload_ins_jta(const void *raw_data, unsigned int size, + struct ac_ins_jta_pack_ext *pack) +{ + int ret; + + if (!raw_data || size <= AC_CSS_HEADER_LENGTH || !pack) + return -EINVAL; + + ret = acp_load_pack((const char *)raw_data + AC_CSS_HEADER_LENGTH, + size - AC_CSS_HEADER_LENGTH, + AC_INSTALL_JTA, (struct ac_pack *)pack); + + return ret; +} diff --git a/drivers/misc/mei/dal/acp_parser.h b/drivers/misc/mei/dal/acp_parser.h new file mode 100644 index 000000000000..11b104ffa71f --- /dev/null +++ b/drivers/misc/mei/dal/acp_parser.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef _ACP_PARSER_H +#define _ACP_PARSER_H + +#include "acp_format.h" + +/** + * struct ac_ins_jta_pack_ext - parsed ta pack from acp file + * + * @head: acp pack header + * @cmd_pack: ta installation information pack + * @ta_pack: raw ta pack + */ +struct ac_ins_jta_pack_ext { + struct ac_pack_header *head; + struct ac_ins_jta_pack cmd_pack; + char *ta_pack; +} __packed; + +/** + * struct ac_ins_jta_prop_ext - parsed ta properties information + * from acp file + * + * @cmd_pack: ta installation properties pack + * @jeff_pack: ta jeff pack + */ +struct ac_ins_jta_prop_ext { + struct ac_ins_jta_prop cmd_pack; + char *jeff_pack; +} __packed; + +int acp_pload_ins_jta(const void *raw_data, unsigned int size, + struct ac_ins_jta_pack_ext *pack); + +#endif /* _ACP_PARSER_H */ diff --git a/drivers/misc/mei/dal/bh_cmd_defs.h b/drivers/misc/mei/dal/bh_cmd_defs.h new file mode 100644 index 000000000000..aa86f522628e --- /dev/null +++ b/drivers/misc/mei/dal/bh_cmd_defs.h @@ -0,0 +1,293 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __BH_DAL_H_ +#define __BH_DAL_H_ + +#include +#include + +/** + * enum bh_command_id - bh command ids + * + * @BHP_CMD_INIT: init command + * @BHP_CMD_DEINIT: deinit command + * @BHP_CMD_VERIFY_JAVATA: verify ta + * @BHP_CMD_DOWNLOAD_JAVATA: download ta to DAL + * @BHP_CMD_OPEN_JTASESSION: open session to ta + * @BHP_CMD_CLOSE_JTASESSION: close session with ta + * @BHP_CMD_FORCECLOSE_JTASESSION: force close session + * @BHP_CMD_SENDANDRECV: send and receive massages to ta + * @BHP_CMD_SENDANDRECV_INTERNAL: internal send and receive + * @BHP_CMD_RUN_NATIVETA: run native trusted application + * (currently NOT SUPPORTED) + * @BHP_CMD_STOP_NATIVETA: stop running native ta (currently NOT SUPPORTED) + * @BHP_CMD_OPEN_SDSESSION: open security domain session + * @BHP_CMD_CLOSE_SDSESSION: close security domain session + * @BHP_CMD_INSTALL_SD: install new sub security domain + * @BHP_CMD_UNINSTALL_SD: uninstall sub security domain + * @BHP_CMD_INSTALL_JAVATA: install java ta + * @BHP_CMD_UNINSTALL_JAVATA: uninstall java ta + * @BHP_CMD_INSTALL_NATIVETA: install native ta (currently NOT SUPPORTED) + * @BHP_CMD_UNINSTALL_NATIVETA: uninstall native ta (currently NOT SUPPORTED) + * @BHP_CMD_LIST_SD: get list of all security domains + * @BHP_CMD_LIST_TA: get list of all installed trusted applications + * @BHP_CMD_RESET: reset command + * @BHP_CMD_LIST_TA_PROPERTIES: get list of all ta properties (ta manifest) + * @BHP_CMD_QUERY_TA_PROPERTY: query specified ta property + * @BHP_CMD_LIST_JTA_SESSIONS: get list of all opened ta sessions + * @BHP_CMD_LIST_TA_PACKAGES: get list of all ta packages in DAL + * @BHP_CMD_GET_ISD: get Intel security domain uuid + * @BHP_CMD_GET_SD_BY_TA: get security domain id of ta + * @BHP_CMD_LAUNCH_VM: lunch IVM + * @BHP_CMD_CLOSE_VM: close IVM + * @BHP_CMD_QUERY_NATIVETA_STATUS: query specified native ta status + * (currently NOT SUPPORTED) + * @BHP_CMD_QUERY_SD_STATUS: query specified security domain status + * @BHP_CMD_LIST_DOWNLOADED_NTA: get list of all native trusted applications + * (currently NOT SUPPORTED) + * @BHP_CMD_UPDATE_SVL: update security version list + * @BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE: check if ta security version is blocked + * @BHP_CMD_QUERY_TEE_METADATA: get DAL metadata (including api_level, + * library_version, dal_key_hash and more) + * + * @BHP_CMD_MAX: max command id + */ + +enum bh_command_id { + BHP_CMD_INIT = 0, + BHP_CMD_DEINIT, + BHP_CMD_VERIFY_JAVATA, + BHP_CMD_DOWNLOAD_JAVATA, + BHP_CMD_OPEN_JTASESSION, + BHP_CMD_CLOSE_JTASESSION, + BHP_CMD_FORCECLOSE_JTASESSION, + BHP_CMD_SENDANDRECV, + BHP_CMD_SENDANDRECV_INTERNAL, + BHP_CMD_RUN_NATIVETA, + BHP_CMD_STOP_NATIVETA, + BHP_CMD_OPEN_SDSESSION, + BHP_CMD_CLOSE_SDSESSION, + BHP_CMD_INSTALL_SD, + BHP_CMD_UNINSTALL_SD, + BHP_CMD_INSTALL_JAVATA, + BHP_CMD_UNINSTALL_JAVATA, + BHP_CMD_INSTALL_NATIVETA, + BHP_CMD_UNINSTALL_NATIVETA, + BHP_CMD_LIST_SD, + BHP_CMD_LIST_TA, + BHP_CMD_RESET, + BHP_CMD_LIST_TA_PROPERTIES, + BHP_CMD_QUERY_TA_PROPERTY, + BHP_CMD_LIST_JTA_SESSIONS, + BHP_CMD_LIST_TA_PACKAGES, + BHP_CMD_GET_ISD, + BHP_CMD_GET_SD_BY_TA, + BHP_CMD_LAUNCH_VM, + BHP_CMD_CLOSE_VM, + BHP_CMD_QUERY_NATIVETA_STATUS, + BHP_CMD_QUERY_SD_STATUS, + BHP_CMD_LIST_DOWNLOADED_NTA, + BHP_CMD_UPDATE_SVL, + BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE, + BHP_CMD_QUERY_TEE_METADATA, + BHP_CMD_MAX +}; + +#define BH_MSG_RESP_MAGIC 0x55aaa5ff +#define BH_MSG_CMD_MAGIC 0x55aaa3ff + +/** + * struct bh_msg_header - transport header + * + * @magic: BH_MSG_RESP/CMD_MAGIC + * @length: overall message length + */ +struct bh_msg_header { + u32 magic; + u32 length; +}; + +/** + * struct bh_command_header - bh command header + * + * @h: transport header + * @seq: message sequence number + * @id: the command id (enum bh_command_id) + * @pad: padded for 64 bit + * @cmd: command buffer + */ +struct bh_command_header { + struct bh_msg_header h; + u64 seq; + u32 id; + u8 pad[4]; + s8 cmd[0]; +} __packed; + +/** + * struct bh_response_header - response header (from the DAL) + * + * @h: transport header + * @seq: message sequence number + * @ta_session_id: session id (DAL firmware address) + * @code: response code + * @pad: padded for 64 bit + * @data: response buffer + */ +struct bh_response_header { + struct bh_msg_header h; + u64 seq; + u64 ta_session_id; + s32 code; + u8 pad[4]; + s8 data[0]; +} __packed; + +/** + * struct bh_download_jta_cmd - download java trusted application. + * + * @ta_id: trusted application (ta) id + * @ta_blob: trusted application blob + */ +struct bh_download_jta_cmd { + uuid_t ta_id; + s8 ta_blob[0]; +} __packed; + +/** + * struct bh_open_jta_session_cmd - open session to TA command + * + * @ta_id: trusted application (ta) id + * @buffer: session initial parameters (optional) + */ +struct bh_open_jta_session_cmd { + uuid_t ta_id; + s8 buffer[0]; +} __packed; + +/** + * struct bh_close_jta_session_cmd - close session to TA command + * + * @ta_session_id: session id + */ +struct bh_close_jta_session_cmd { + u64 ta_session_id; +} __packed; + +/** + * struct bh_cmd - bh command + * + * @ta_session_id: session id + * @command: command id to ta + * @outlen: length of output buffer + * @buffer: data to send + */ +struct bh_cmd { + u64 ta_session_id; + s32 command; + u32 outlen; + s8 buffer[0]; +} __packed; + +/** + * struct bh_check_svl_ta_blocked_state_cmd - command to check if + * the trusted application security version is blocked + * + * @ta_id: trusted application id + */ +struct bh_check_svl_jta_blocked_state_cmd { + uuid_t ta_id; +} __packed; + +/** + * struct bh_resp - bh response + * + * @response: response code. Originated from java in big endian format + * @buffer: response buffer + */ +struct bh_resp { + __be32 response; + s8 buffer[0]; +} __packed; + +/** + * struct bh_resp_bof - response when output buffer is too small + * + * @response: response code. Originated from java in big endian format + * @request_length: the needed output buffer length + */ +struct bh_resp_bof { + __be32 response; + __be32 request_length; +} __packed; + +/** + * struct bh_resp_list_ta_packages - list of ta packages from DAL + * + * @count: count of ta packages + * @ta_ids: ta packages ids + */ +struct bh_resp_list_ta_packages { + u32 count; + uuid_t ta_ids[0]; +} __packed; + +#endif /* __BH_DAL_H_*/ diff --git a/drivers/misc/mei/dal/bh_errcode.h b/drivers/misc/mei/dal/bh_errcode.h new file mode 100644 index 000000000000..127324230c3a --- /dev/null +++ b/drivers/misc/mei/dal/bh_errcode.h @@ -0,0 +1,208 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __BH_SHARED_ERRCODE_H +#define __BH_SHARED_ERRCODE_H + +/* + * BH Error codes numbers across Beihai Host and Firmware. + */ + +#define BH_SUCCESS 0x000 + +/* BHP specific error code section */ + +#define BPE_NOT_INIT 0x001 +#define BPE_SERVICE_UNAVAILABLE 0x002 +#define BPE_INTERNAL_ERROR 0x003 +#define BPE_COMMS_ERROR 0x004 +#define BPE_OUT_OF_MEMORY 0x005 +#define BPE_INVALID_PARAMS 0x006 +#define BPE_MESSAGE_TOO_SHORT 0x007 +#define BPE_MESSAGE_ILLEGAL 0x008 +#define BPE_NO_CONNECTION_TO_FIRMWARE 0x009 +#define BPE_NOT_IMPLEMENT 0x00A +#define BPE_OUT_OF_RESOURCE 0x00B +#define BPE_INITIALIZED_ALREADY 0x00C +#define BPE_CONNECT_FAILED 0x00D + +/* General error code section for Beihai on FW: 0x100 */ + +#define BHE_OUT_OF_MEMORY 0x101 +#define BHE_BAD_PARAMETER 0x102 +#define BHE_INSUFFICIENT_BUFFER 0x103 +#define BHE_MUTEX_INIT_FAIL 0x104 +#define BHE_COND_INIT_FAIL 0x105 +#define BHE_WD_TIMEOUT 0x106 +#define BHE_FAILED 0x107 +#define BHE_INVALID_HANDLE 0x108 +#define BHE_IPC_ERR_DEFAULT 0x109 +#define BHE_IPC_ERR_PLATFORM 0x10A +#define BHE_IPC_SRV_INIT_FAIL 0x10B + +/* VM communication error code section: 0x200 */ + +#define BHE_MAILBOX_NOT_FOUND 0x201 +#define BHE_APPLET_CRASHED BHE_MAILBOX_NOT_FOUND +#define BHE_MSG_QUEUE_IS_FULL 0x202 +#define BHE_MAILBOX_DENIED 0x203 + +/* VM InternalAppletCommunication error 0x240 */ + +#define BHE_IAC_INTERNAL_SESSION_NUM_EXCEED 0x241 +#define BHE_IAC_CLIENT_SLOT_FULL 0x242 +#define BHE_IAC_SERVICETA_EXITED 0x243 +#define BHE_IAC_EXIST_INTERNAL_SESSION 0x244 +#define BHE_IAC_SERVICETA_UNCAUGHT_EXCEPTION 0x245 +#define BHE_IAC_SERVICE_SESSION_NOT_FOUND 0x246 +#define BHE_IAC_SERVICE_HOST_SESSION_NUM_EXCEED 0x247 + +/* Firmware thread/mutex error code section: 0x280 */ +#define BHE_THREAD_ERROR 0x281 +#define BHE_THREAD_TIMED_OUT 0x282 + +/* Applet manager error code section: 0x300 */ + +#define BHE_LOAD_JEFF_FAIL 0x303 +#define BHE_PACKAGE_NOT_FOUND 0x304 +#define BHE_EXIST_LIVE_SESSION 0x305 +#define BHE_VM_INSTANCE_INIT_FAIL 0x306 +#define BHE_QUERY_PROP_NOT_SUPPORT 0x307 +#define BHE_INVALID_BPK_FILE 0x308 +#define BHE_PACKAGE_EXIST 0x309 +#define BHE_VM_INSTNACE_NOT_FOUND 0x312 +#define BHE_STARTING_JDWP_FAIL 0x313 +#define BHE_GROUP_CHECK_FAIL 0x314 +#define BHE_SDID_UNMATCH 0x315 +#define BHE_APPPACK_UNINITED 0x316 +#define BHE_SESSION_NUM_EXCEED 0x317 +#define BHE_TA_PACKAGE_HASH_VERIFY_FAIL 0x318 +#define BHE_SWITCH_ISD 0x319 +#define BHE_OPERATION_NOT_PERMITTED 0x31A + +/* VM Applet instance error code section: 0x400 */ +#define BHE_APPLET_GENERIC 0x400 +#define BHE_UNCAUGHT_EXCEPTION 0x401 +/* Bad parameters to applet */ +#define BHE_APPLET_BAD_PARAMETER 0x402 +/* Small response buffer */ +#define BHE_APPLET_SMALL_BUFFER 0x403 +/* Bad state */ +#define BHE_BAD_STATE 0x404 + +/*TODO: Should be removed these UI error code when integrate with ME 9 */ +#define BHE_UI_EXCEPTION 0x501 +#define BHE_UI_ILLEGAL_USE 0x502 +#define BHE_UI_ILLEGAL_PARAMETER 0x503 +#define BHE_UI_NOT_INITIALIZED 0x504 +#define BHE_UI_NOT_SUPPORTED 0x505 +#define BHE_UI_OUT_OF_RESOURCES 0x506 + +/* BeiHai VMInternalError code section: 0x600 */ +#define BHE_UNKNOWN 0x602 +#define BHE_MAGIC_UNMATCH 0x603 +#define BHE_UNIMPLEMENTED 0x604 +#define BHE_INTR 0x605 +#define BHE_CLOSED 0x606 +/* TODO: no used error, should remove*/ +#define BHE_BUFFER_OVERFLOW 0x607 +#define BHE_NOT_SUPPORTED 0x608 +#define BHE_WEAR_OUT_VIOLATION 0x609 +#define BHE_NOT_FOUND 0x610 +#define BHE_INVALID_PARAMS 0x611 +#define BHE_ACCESS_DENIED 0x612 +#define BHE_INVALID 0x614 +#define BHE_TIMEOUT 0x615 + +/* SDM specific error code section: 0x800 */ +#define BHE_SDM_FAILED 0x800 +#define BHE_SDM_NOT_FOUND 0x801 +#define BHE_SDM_ALREADY_EXIST 0x803 +#define BHE_SDM_TATYPE_MISMATCH 0x804 +#define BHE_SDM_TA_NUMBER_LIMIT 0x805 +#define BHE_SDM_SIGNAGURE_VERIFY_FAIL 0x806 +#define BHE_SDM_PERMGROUP_CHECK_FAIL 0x807 +#define BHE_SDM_INSTALL_CONDITION_FAIL 0x808 +#define BHE_SDM_SVN_CHECK_FAIL 0x809 +#define BHE_SDM_TA_DB_NO_FREE_SLOT 0x80A +#define BHE_SDM_SD_DB_NO_FREE_SLOT 0x80B +#define BHE_SDM_SVL_DB_NO_FREE_SLOT 0x80C +#define BHE_SDM_SVL_CHECK_FAIL 0x80D +#define BHE_SDM_DB_READ_FAIL 0x80E +#define BHE_SDM_DB_WRITE_FAIL 0x80F + +/* Launcher specific error code section: 0x900 */ +#define BHE_LAUNCHER_INIT_FAILED 0x901 +#define BHE_SD_NOT_INSTALLED 0x902 +#define BHE_NTA_NOT_INSTALLED 0x903 +#define BHE_PROCESS_SPAWN_FAILED 0x904 +#define BHE_PROCESS_KILL_FAILED 0x905 +#define BHE_PROCESS_ALREADY_RUNNING 0x906 +#define BHE_PROCESS_IN_TERMINATING 0x907 +#define BHE_PROCESS_NOT_EXIST 0x908 +#define BHE_PLATFORM_API_ERR 0x909 +#define BHE_PROCESS_NUM_EXCEED 0x09A + +/* + * BeihaiHAL Layer error code section: + * 0x1000,0x2000 reserved here, defined in CSG BeihaiStatusHAL.h + */ + +#endif /* __BH_SHARED_ERRCODE_H */ diff --git a/drivers/misc/mei/dal/bh_external.c b/drivers/misc/mei/dal/bh_external.c new file mode 100644 index 000000000000..282f9c395b11 --- /dev/null +++ b/drivers/misc/mei/dal/bh_external.c @@ -0,0 +1,582 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include +#include + +#include "bh_errcode.h" +#include "bh_external.h" +#include "bh_internal.h" + +/* BH initialization state */ +static atomic_t bh_state = ATOMIC_INIT(0); + +/** + * uuid_is_valid_hyphenless - check if uuid is valid in hyphenless format + * + * @uuid_str: uuid string + * + * Return: true when uuid is valid in hyphenless format + * false when uuid is invalid + */ +static bool uuid_is_valid_hyphenless(const char *uuid_str) +{ + unsigned int i; + + /* exclude (i == 8 || i == 13 || i == 18 || i == 23) */ + for (i = 0; i < UUID_STRING_LEN - 4; i++) + if (!isxdigit(uuid_str[i])) + return false; + + return true; +} + +/** + * uuid_normalize_hyphenless - convert uuid from hyphenless format + * to standard format + * + * @uuid_hl: uuid string in hyphenless format + * @uuid_str: output param to hold uuid string in standard format + */ +static void uuid_normalize_hyphenless(const char *uuid_hl, char *uuid_str) +{ + unsigned int i; + + for (i = 0; i < UUID_STRING_LEN; i++) { + if (i == 8 || i == 13 || i == 18 || i == 23) + uuid_str[i] = '-'; + else + uuid_str[i] = *uuid_hl++; + } + uuid_str[i] = '\0'; +} + +/** + * dal_uuid_parse - convert uuid string to binary form + * + * Input uuid is in either hyphenless or standard format + * + * @uuid_str: uuid string + * @uuid: output param to hold uuid bin + * + * Return: 0 on success + * <0 on failure + */ +int dal_uuid_parse(const char *uuid_str, uuid_t *uuid) +{ + char __uuid_str[UUID_STRING_LEN + 1]; + + if (!uuid_str || !uuid) + return -EINVAL; + + if (uuid_is_valid_hyphenless(uuid_str)) { + uuid_normalize_hyphenless(uuid_str, __uuid_str); + uuid_str = __uuid_str; + } + + return uuid_parse(uuid_str, uuid); +} +EXPORT_SYMBOL(dal_uuid_parse); + +/** + * bh_msg_is_response - check if message is response + * + * @msg: message + * @len: message length + * + * Return: true when message is response + * false otherwise + */ +bool bh_msg_is_response(const void *msg, size_t len) +{ + const struct bh_response_header *r = msg; + + return (len >= sizeof(*r) && r->h.magic == BH_MSG_RESP_MAGIC); +} + +/** + * bh_msg_is_cmd - check if message is command + * + * @msg: message + * @len: message length + * + * Return: true when message is command + * false otherwise + */ +bool bh_msg_is_cmd(const void *msg, size_t len) +{ + const struct bh_command_header *c = msg; + + return (len >= sizeof(*c) && c->h.magic == BH_MSG_CMD_MAGIC); +} + +/** + * bh_msg_cmd_hdr - get the command header if message is command + * + * @msg: message + * @len: message length + * + * Return: pointer to the command header when message is command + * NULL otherwise + */ +const struct bh_command_header *bh_msg_cmd_hdr(const void *msg, size_t len) +{ + if (!bh_msg_is_cmd(msg, len)) + return NULL; + + return msg; +} + +/** + * bh_msg_is_cmd_open_session - check if command is open session command + * + * @hdr: message header + * + * Return: true when command is open session command + * false otherwise + */ +bool bh_msg_is_cmd_open_session(const struct bh_command_header *hdr) +{ + return hdr->id == BHP_CMD_OPEN_JTASESSION; +} + +/** + * bh_open_session_ta_id - get ta id from open session command + * + * @hdr: message header + * @count: message size + * + * Return: pointer to ta id when command is valid + * NULL otherwise + */ +const uuid_t *bh_open_session_ta_id(const struct bh_command_header *hdr, + size_t count) +{ + struct bh_open_jta_session_cmd *open_cmd; + + if (count < sizeof(*hdr) + sizeof(*open_cmd)) + return NULL; + + open_cmd = (struct bh_open_jta_session_cmd *)hdr->cmd; + + return &open_cmd->ta_id; +} + +/** + * bh_session_is_killed - check if session is killed + * + * @code: the session return code + * + * Return: true when the session is killed + * false otherwise + */ +static bool bh_session_is_killed(int code) +{ + return (code == BHE_WD_TIMEOUT || + code == BHE_UNCAUGHT_EXCEPTION || + code == BHE_APPLET_CRASHED); +} + +/** + * bh_ta_session_open - open session to ta + * + * This function will block until VM replied the response + * + * @host_id: out param to hold the session host_id + * @ta_id: trusted application (ta) id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * @init_param: init parameters to the session (optional) + * @init_len: length of the init parameters + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_ta_session_open(u64 *host_id, const char *ta_id, + const u8 *ta_pkg, size_t pkg_len, + const u8 *init_param, size_t init_len) +{ + int ret; + uuid_t bin_ta_id; + unsigned int conn_idx; + unsigned int count; + bool found; + uuid_t *ta_ids = NULL; + int i; + + if (!ta_id || !host_id) + return -EINVAL; + + if (!ta_pkg || !pkg_len) + return -EINVAL; + + if (!init_param && init_len != 0) + return -EINVAL; + + if (dal_uuid_parse(ta_id, &bin_ta_id)) + return -EINVAL; + + *host_id = 0; + + ret = bh_proxy_check_svl_jta_blocked_state(&bin_ta_id); + if (ret) + return ret; + + /* 1: vm conn_idx is IVM dal FW client */ + conn_idx = CONN_IDX_IVM; + + /* 2.1: check whether the ta pkg existed in VM or not */ + count = 0; + ret = bh_proxy_list_jta_packages(conn_idx, &count, &ta_ids); + if (ret) + return ret; + + found = false; + for (i = 0; i < count; i++) { + if (uuid_equal(&bin_ta_id, &ta_ids[i])) { + found = true; + break; + } + } + kfree(ta_ids); + + /* 2.2: download ta pkg if not already present. */ + if (!found) { + ret = bh_proxy_dnload_jta(conn_idx, &bin_ta_id, + ta_pkg, pkg_len); + if (ret && ret != BHE_PACKAGE_EXIST) + return ret; + } + + /* 3: send open session command to VM */ + ret = bh_proxy_open_jta_session(conn_idx, &bin_ta_id, + init_param, init_len, + host_id, ta_pkg, pkg_len); + return ret; +} + +/** + * bh_ta_session_command - send and receive data to/from ta + * + * This function will block until VM replied the response + * + * @host_id: session host id + * @command_id: command id + * @input: message to be sent + * @length: sent message size + * @output: output param to hold pointer to the buffer which + * will contain received message. + * This buffer is allocated by Beihai and freed by the user + * @output_length: input and output param - + * - input: the expected maximum length of the received message + * - output: size of the received message + * @response_code: output param to hold the return value from the applet + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_ta_session_command(u64 host_id, int command_id, + const void *input, size_t length, + void **output, size_t *output_length, + int *response_code) +{ + int ret; + struct bh_command_header *h; + struct bh_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + unsigned int resp_len; + struct bh_session_record *session; + struct bh_resp *resp; + unsigned int conn_idx = CONN_IDX_IVM; + unsigned int len; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + if (!bh_is_initialized()) + return -EFAULT; + + if (!input && length != 0) + return -EINVAL; + + if (!output_length) + return -EINVAL; + + if (output) + *output = NULL; + + session = bh_session_find(conn_idx, host_id); + if (!session) + return -EINVAL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_cmd *)h->cmd; + h->id = BHP_CMD_SENDANDRECV; + cmd->ta_session_id = session->ta_session_id; + cmd->command = command_id; + cmd->outlen = *output_length; + + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), input, length, + host_id, (void **)&resp_hdr); + if (!resp_hdr) + return ret ? ret : -EFAULT; + + if (!ret) + ret = resp_hdr->code; + + session->ta_session_id = resp_hdr->ta_session_id; + resp_len = resp_hdr->h.length - sizeof(*resp_hdr); + + if (ret == BHE_APPLET_SMALL_BUFFER && + resp_len == sizeof(struct bh_resp_bof)) { + struct bh_resp_bof *bof = + (struct bh_resp_bof *)resp_hdr->data; + + if (response_code) + *response_code = be32_to_cpu(bof->response); + + *output_length = be32_to_cpu(bof->request_length); + } + + if (ret) + goto out; + + if (resp_len < sizeof(struct bh_resp)) { + ret = -EBADMSG; + goto out; + } + + resp = (struct bh_resp *)resp_hdr->data; + + if (response_code) + *response_code = be32_to_cpu(resp->response); + + len = resp_len - sizeof(*resp); + + if (*output_length < len) { + ret = -EMSGSIZE; + goto out; + } + + if (len && output) { + *output = kmemdup(resp->buffer, len, GFP_KERNEL); + if (!*output) { + ret = -ENOMEM; + goto out; + } + } + + *output_length = len; + +out: + if (bh_session_is_killed(resp_hdr->code)) + bh_session_remove(conn_idx, session->host_id); + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_ta_session_close - close ta session + * + * This function will block until VM replied the response + * + * @host_id: session host id + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_ta_session_close(u64 host_id) +{ + int ret; + struct bh_command_header *h; + struct bh_close_jta_session_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + struct bh_session_record *session; + unsigned int conn_idx = CONN_IDX_IVM; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + session = bh_session_find(conn_idx, host_id); + if (!session) + return -EINVAL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_close_jta_session_cmd *)h->cmd; + h->id = BHP_CMD_CLOSE_JTASESSION; + cmd->ta_session_id = session->ta_session_id; + + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), NULL, 0, host_id, + (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + /* + * An internal session exists, so we should not close the session. + * It means that host app should call this API at appropriate time. + */ + if (ret != BHE_IAC_EXIST_INTERNAL_SESSION) + bh_session_remove(conn_idx, host_id); + + return ret; +} + +/** + * bh_filter_hdr - filter the sent message + * + * Allow to send valid messages only. + * The filtering is done using given filter functions table + * + * @hdr: message header + * @count: message size + * @ctx: context to send to the filter functions + * @tbl: filter functions table + * + * Return: 0 when message is valid + * <0 on otherwise + */ +int bh_filter_hdr(const struct bh_command_header *hdr, size_t count, void *ctx, + const bh_filter_func tbl[]) +{ + int i; + int ret; + + for (i = 0; tbl[i]; i++) { + ret = tbl[i](hdr, count, ctx); + if (ret < 0) + return ret; + } + return 0; +} + +/** + * bh_prep_access_denied_response - prepare package with 'access denied' + * response code. + * + * This function is used to send in band error to user who trying to send + * message when he lacks the needed permissions + * + * @cmd: the invalid command message + * @res: out param to hold the response header + */ +void bh_prep_access_denied_response(const char *cmd, + struct bh_response_header *res) +{ + struct bh_command_header *cmd_hdr = (struct bh_command_header *)cmd; + + res->h.magic = BH_MSG_RESP_MAGIC; + res->h.length = sizeof(*res); + res->code = BHE_OPERATION_NOT_PERMITTED; + res->seq = cmd_hdr->seq; +} + +/** + * bh_is_initialized - check if bhp is initialized + * + * Return: true when bhp is initialized + * false when bhp is not initialized + */ +bool bh_is_initialized(void) +{ + return atomic_read(&bh_state) == 1; +} + +/** + * bh_init_internal - Beihai init function + * + * The plugin initialization includes initializing the session lists of all + * dal devices (dal fw clients) + * + * Return: 0 + */ +void bh_init_internal(void) +{ + unsigned int i; + + if (atomic_add_unless(&bh_state, 1, 1)) + for (i = CONN_IDX_START; i < MAX_CONNECTIONS; i++) + bh_session_list_init(i); +} + +/** + * bh_deinit_internal - Beihai plugin deinit function + * + * The plugin deinitialization includes deinit the session lists of all + * dal devices (dal fw clients) + */ +void bh_deinit_internal(void) +{ + unsigned int i; + + if (atomic_add_unless(&bh_state, -1, 0)) + for (i = CONN_IDX_START; i < MAX_CONNECTIONS; i++) + bh_session_list_free(i); +} diff --git a/drivers/misc/mei/dal/bh_external.h b/drivers/misc/mei/dal/bh_external.h new file mode 100644 index 000000000000..c56809df2560 --- /dev/null +++ b/drivers/misc/mei/dal/bh_external.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __BH_EXTERNAL_H +#define __BH_EXTERNAL_H + +#include +#include "bh_cmd_defs.h" + +# define MSG_SEQ_START_NUMBER BIT_ULL(32) + +bool bh_is_initialized(void); +void bh_init_internal(void); +void bh_deinit_internal(void); + +int bh_ta_session_open(u64 *host_id, const char *ta_id, const u8 *ta_pkg, + size_t pkg_len, const u8 *init_param, size_t init_len); + +int bh_ta_session_close(u64 host_id); + +int bh_ta_session_command(u64 host_id, int command_id, const void *input, + size_t length, void **output, size_t *output_length, + int *response_code); + +const struct bh_command_header *bh_msg_cmd_hdr(const void *msg, size_t len); + +typedef int (*bh_filter_func)(const struct bh_command_header *hdr, + size_t count, void *ctx); + +int bh_filter_hdr(const struct bh_command_header *hdr, size_t count, void *ctx, + const bh_filter_func tbl[]); + +bool bh_msg_is_cmd_open_session(const struct bh_command_header *hdr); + +const uuid_t *bh_open_session_ta_id(const struct bh_command_header *hdr, + size_t count); + +void bh_prep_access_denied_response(const char *cmd, + struct bh_response_header *res); + +bool bh_msg_is_cmd(const void *msg, size_t len); +bool bh_msg_is_response(const void *msg, size_t len); + +#endif /* __BH_EXTERNAL_H */ diff --git a/drivers/misc/mei/dal/bh_internal.c b/drivers/misc/mei/dal/bh_internal.c new file mode 100644 index 000000000000..d9a2c76e8a0b --- /dev/null +++ b/drivers/misc/mei/dal/bh_internal.c @@ -0,0 +1,660 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include "bh_errcode.h" +#include "bh_external.h" +#include "bh_internal.h" +#include "dal_dev.h" + +static u64 bh_host_id_number = MSG_SEQ_START_NUMBER; + +/* + * dal device session records list (array of list per dal device) + * represents opened sessions to dal fw client + */ +static struct list_head dal_dev_session_list[MAX_CONNECTIONS]; + +/** + * bh_get_msg_host_id - increase the shared variable bh_host_id_number by 1 + * and wrap around if needed + * + * Return: the updated host id number + */ +u64 bh_get_msg_host_id(void) +{ + bh_host_id_number++; + /* wrap around. sequence_number must + * not be 0, as required by Firmware VM + */ + if (bh_host_id_number == 0) + bh_host_id_number = MSG_SEQ_START_NUMBER; + + return bh_host_id_number; +} + +/** + * bh_session_find - find session record by handle + * + * @conn_idx: DAL client connection idx + * @host_id: session host id + * + * Return: pointer to bh_session_record if found + * NULL if the session wasn't found + */ +struct bh_session_record *bh_session_find(unsigned int conn_idx, u64 host_id) +{ + struct bh_session_record *pos; + struct list_head *session_list = &dal_dev_session_list[conn_idx]; + + list_for_each_entry(pos, session_list, link) { + if (pos->host_id == host_id) + return pos; + } + + return NULL; +} + +/** + * bh_session_add - add session record to list + * + * @conn_idx: fw client connection idx + * @session: session record + */ +void bh_session_add(unsigned int conn_idx, struct bh_session_record *session) +{ + list_add_tail(&session->link, &dal_dev_session_list[conn_idx]); +} + +/** + * bh_session_remove - remove session record from list, ad release its memory + * + * @conn_idx: fw client connection idx + * @host_id: session host id + */ +void bh_session_remove(unsigned int conn_idx, u64 host_id) +{ + struct bh_session_record *session; + + session = bh_session_find(conn_idx, host_id); + + if (session) { + list_del(&session->link); + kfree(session); + } +} + +static char skip_buffer[DAL_MAX_BUFFER_SIZE] = {0}; +/** + * bh_transport_recv - receive message from DAL FW, using kdi callback + * 'dal_kdi_recv' + * + * @conn_idx: fw client connection idx + * @buffer: output buffer to hold the received message + * @size: output buffer size + * + * Return: 0 on success + * <0 on failure + */ +static int bh_transport_recv(unsigned int conn_idx, void *buffer, size_t size) +{ + size_t got; + unsigned int count; + char *buf = buffer; + int ret; + + if (conn_idx > DAL_MEI_DEVICE_MAX) + return -ENODEV; + + for (count = 0; count < size; count += got) { + got = min_t(size_t, size - count, DAL_MAX_BUFFER_SIZE); + if (buf) + ret = dal_kdi_recv(conn_idx, buf + count, &got); + else + ret = dal_kdi_recv(conn_idx, skip_buffer, &got); + + if (!got) + return -EFAULT; + + if (ret) + return ret; + } + + if (count != size) + return -EFAULT; + + return 0; +} + +/** + * bh_transport_send - send message to DAL FW, + * using kdi callback 'dal_kdi_send' + * + * @conn_idx: fw client connection idx + * @buffer: message to send + * @size: message size + * @host_id: message host id + * + * Return: 0 on success + * <0 on failure + */ +static int bh_transport_send(unsigned int conn_idx, const void *buffer, + unsigned int size, u64 host_id) +{ + size_t chunk_sz; + unsigned int count; + const char *buf = buffer; + int ret; + + if (conn_idx > DAL_MEI_DEVICE_MAX) + return -ENODEV; + + for (count = 0; count < size; count += chunk_sz) { + chunk_sz = min_t(size_t, size - count, DAL_MAX_BUFFER_SIZE); + ret = dal_kdi_send(conn_idx, buf + count, chunk_sz, host_id); + if (ret) + return ret; + } + + return 0; +} + +/** + * bh_send_message - build and send command message to DAL + * + * @conn_idx: fw client connection idx + * @hdr: command header + * @hdr_len: command header length + * @data: command data (message content) + * @data_len: data length + * @host_id: message host id + * + * Return: 0 on success + * <0 on failure + */ +static int bh_send_message(unsigned int conn_idx, + void *hdr, unsigned int hdr_len, + const void *data, unsigned int data_len, + u64 host_id) +{ + int ret; + struct bh_command_header *h; + + if (hdr_len < sizeof(*h) || !hdr) + return -EINVAL; + + h = hdr; + h->h.magic = BH_MSG_CMD_MAGIC; + h->h.length = hdr_len + data_len; + h->seq = host_id; + + ret = bh_transport_send(conn_idx, hdr, hdr_len, host_id); + if (!ret && data_len > 0) + ret = bh_transport_send(conn_idx, data, data_len, host_id); + + return ret; +} + +/** + * bh_recv_message - receive and prosses message from DAL + * + * @conn_idx: fw client connection idx + * @response: output param to hold the response + * @out_host_id: output param to hold the received message host id + * it should be identical to the sent message host id + * + * Return: 0 on success + * <0 on failure + */ +static int bh_recv_message(unsigned int conn_idx, void **response, + u64 *out_host_id) +{ + int ret; + char *data; + struct bh_response_header hdr; + + if (!response) + return -EINVAL; + + *response = NULL; + + memset(&hdr, 0, sizeof(hdr)); + ret = bh_transport_recv(conn_idx, &hdr, sizeof(hdr)); + if (ret) + return ret; + + if (hdr.h.length < sizeof(hdr)) + return -EBADMSG; + + /* check magic */ + if (hdr.h.magic != BH_MSG_RESP_MAGIC) + return -EBADMSG; + + data = kzalloc(hdr.h.length, GFP_KERNEL); + if (!data) + return -ENOMEM; + + memcpy(data, &hdr, sizeof(hdr)); + + /* message contains hdr only */ + if (hdr.h.length == sizeof(hdr)) + goto out; + + ret = bh_transport_recv(conn_idx, data + sizeof(hdr), + hdr.h.length - sizeof(hdr)); +out: + if (out_host_id) + *out_host_id = hdr.seq; + + *response = data; + + return ret; +} + +#define MAX_RETRY_COUNT 3 +/** + * bh_request - send request to DAL FW and receive response back + * + * @conn_idx: fw client connection idx + * @cmd_hdr: command header + * @cmd_hdr_len: command header length + * @cmd_data: command data (message content) + * @cmd_data_len: data length + * @host_id: message host id + * @response: output param to hold the response + * + * Return: 0 on success + * <0 on failure + */ +int bh_request(unsigned int conn_idx, void *cmd_hdr, unsigned int cmd_hdr_len, + const void *cmd_data, unsigned int cmd_data_len, + u64 host_id, void **response) +{ + int ret; + u32 retry_count; + u64 res_host_id; + + if (!cmd_hdr || !response) + return -EINVAL; + + ret = bh_send_message(conn_idx, cmd_hdr, cmd_hdr_len, cmd_data, + cmd_data_len, host_id); + if (ret) + return ret; + + for (retry_count = 0; retry_count < MAX_RETRY_COUNT; retry_count++) { + ret = bh_recv_message(conn_idx, response, &res_host_id); + if (ret) { + pr_debug("failed to recv msg = %d\n", ret); + continue; + } + + if (res_host_id != host_id) { + pr_debug("recv message with host_id=%llu != sent host_id=%llu\n", + res_host_id, host_id); + continue; + } + + pr_debug("recv message with try=%d host_id=%llu\n", + retry_count, host_id); + break; + } + + if (retry_count == MAX_RETRY_COUNT) { + pr_err("out of retry attempts\n"); + return -EFAULT; + } + + return ret; +} + +/** + * bh_ession_list_free - free session list of given dal fw client + * + * @conn_idx: fw client connection idx + */ +void bh_session_list_free(unsigned int conn_idx) +{ + struct bh_session_record *pos, *next; + struct list_head *session_list = &dal_dev_session_list[conn_idx]; + + list_for_each_entry_safe(pos, next, session_list, link) { + list_del(&pos->link); + kfree(pos); + } + + INIT_LIST_HEAD(session_list); +} + +/** + * bh_session_list_init - initialize session list of given dal fw client + * + * @conn_idx: fw client connection idx + */ +void bh_session_list_init(unsigned int conn_idx) +{ + INIT_LIST_HEAD(&dal_dev_session_list[conn_idx]); +} + +/** + * bh_proxy_check_svl_jta_blocked_state - check if ta security version + * is blocked + * + * When installing a ta, a minimum security version is given, + * so DAL will block installation of this ta from lower version. + * (even after the ta will be uninstalled) + * + * @ta_id: trusted application (ta) id + * + * Return: 0 when ta security version isn't blocked + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_check_svl_jta_blocked_state(uuid_t *ta_id) +{ + int ret; + struct bh_command_header *h; + struct bh_check_svl_jta_blocked_state_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + u64 host_id; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_check_svl_jta_blocked_state_cmd *)h->cmd; + h->id = BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE; + cmd->ta_id = *ta_id; + + host_id = bh_get_msg_host_id(); + ret = bh_request(CONN_IDX_SDM, h, CMD_BUF_SIZE(*cmd), NULL, 0, + host_id, (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_proxy_list_jta_packages - get list of ta packages in DAL + * + * @conn_idx: fw client connection idx + * @count: out param to hold the count of ta packages in DAL + * @ta_ids: out param to hold pointer to the ids of ta packages in DAL + * The buffer which holds the ids is allocated in this function + * and freed by the caller + * + * Return: 0 when ta security version isn't blocked + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_list_jta_packages(unsigned int conn_idx, unsigned int *count, + uuid_t **ta_ids) +{ + int ret; + struct bh_command_header h; + struct bh_response_header *resp_hdr; + unsigned int resp_len; + struct bh_resp_list_ta_packages *resp; + uuid_t *outbuf; + unsigned int i; + u64 host_id; + + memset(&h, 0, sizeof(h)); + resp_hdr = NULL; + + if (!bh_is_initialized()) + return -EFAULT; + + if (!count || !ta_ids) + return -EINVAL; + + *ta_ids = NULL; + *count = 0; + + h.id = BHP_CMD_LIST_TA_PACKAGES; + + host_id = bh_get_msg_host_id(); + ret = bh_request(conn_idx, &h, sizeof(h), NULL, 0, host_id, + (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + if (ret) + goto out; + + resp_len = resp_hdr->h.length - sizeof(*resp_hdr); + if (resp_len < sizeof(*resp)) { + ret = -EBADMSG; + goto out; + } + + resp = (struct bh_resp_list_ta_packages *)resp_hdr->data; + if (!resp->count) { + ret = -EBADMSG; + goto out; + } + + if (resp_len != sizeof(uuid_t) * resp->count + sizeof(*resp)) { + ret = -EBADMSG; + goto out; + } + + outbuf = kcalloc(resp->count, sizeof(uuid_t), GFP_KERNEL); + + if (!outbuf) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < resp->count; i++) + outbuf[i] = resp->ta_ids[i]; + + *ta_ids = outbuf; + *count = resp->count; + +out: + kfree(resp_hdr); + return ret; +} + +/** + * bh_proxy_dnload_jta - download ta package to DAL + * + * @conn_idx: fw client connection idx + * @ta_id: trusted application (ta) id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_dnload_jta(unsigned int conn_idx, uuid_t *ta_id, + const char *ta_pkg, unsigned int pkg_len) +{ + struct bh_command_header *h; + struct bh_download_jta_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + u64 host_id; + int ret; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + if (!ta_pkg || !pkg_len) + return -EINVAL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_download_jta_cmd *)h->cmd; + h->id = BHP_CMD_DOWNLOAD_JAVATA; + cmd->ta_id = *ta_id; + + host_id = bh_get_msg_host_id(); + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), ta_pkg, pkg_len, + host_id, (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_proxy_open_jta_session - send open session command + * + * @conn_idx: fw client connection idx + * @ta_id: trusted application (ta) id + * @init_buffer: init parameters to the session (optional) + * @init_len: length of the init parameters + * @host_id: out param to hold the session host id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_open_jta_session(unsigned int conn_idx, + uuid_t *ta_id, + const char *init_buffer, + unsigned int init_len, + u64 *host_id, + const char *ta_pkg, + unsigned int pkg_len) +{ + int ret; + struct bh_command_header *h; + struct bh_open_jta_session_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + struct bh_session_record *session; + + if (!host_id) + return -EINVAL; + + if (!init_buffer && init_len > 0) + return -EINVAL; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_open_jta_session_cmd *)h->cmd; + + session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return -ENOMEM; + + session->host_id = bh_get_msg_host_id(); + bh_session_add(conn_idx, session); + + h->id = BHP_CMD_OPEN_JTASESSION; + cmd->ta_id = *ta_id; + + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), init_buffer, + init_len, session->host_id, (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + if (ret == BHE_PACKAGE_NOT_FOUND) { + /* + * VM might delete the TA pkg when no live session. + * Download the TA pkg and open session again + */ + ret = bh_proxy_dnload_jta(conn_idx, ta_id, ta_pkg, pkg_len); + if (ret) + goto out_err; + + kfree(resp_hdr); + resp_hdr = NULL; + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), init_buffer, + init_len, session->host_id, + (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + } + + if (ret) + goto out_err; + + session->ta_session_id = resp_hdr->ta_session_id; + + kfree(resp_hdr); + + *host_id = session->host_id; + + return 0; + +out_err: + bh_session_remove(conn_idx, session->host_id); + + return ret; +} diff --git a/drivers/misc/mei/dal/bh_internal.h b/drivers/misc/mei/dal/bh_internal.h new file mode 100644 index 000000000000..b15113986257 --- /dev/null +++ b/drivers/misc/mei/dal/bh_internal.h @@ -0,0 +1,133 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __BH_INTERNAL_H +#define __BH_INTERNAL_H + +#include +#include +#include +#include + +#include "bh_cmd_defs.h" + +/** + * struct bh_session_record - session record + * + * @link: link in dal_dev_session_list of dal fw client + * @host_id: message/session host id + * @ta_session_id: session id + */ +struct bh_session_record { + struct list_head link; + u64 host_id; + u64 ta_session_id; +}; + +/* command buffer size */ +#define CMD_BUF_SIZE(cmd) (sizeof(struct bh_command_header) + sizeof(cmd)) + +/** + * enum bh_connection_index - connection index to dal fw clients + * + * @CONN_IDX_START: start idx + * + * @CONN_IDX_IVM: Intel/Issuer Virtual Machine + * @CONN_IDX_SDM: Security Domain Manager + * @CONN_IDX_LAUNCHER: Run Time Manager (Launcher) + * + * @MAX_CONNECTIONS: max connection idx + */ +enum bh_connection_index { + CONN_IDX_START = 0, + + CONN_IDX_IVM = 0, + CONN_IDX_SDM = 1, + CONN_IDX_LAUNCHER = 2, + + MAX_CONNECTIONS +}; + +u64 bh_get_msg_host_id(void); + +struct bh_session_record *bh_session_find(unsigned int conn_idx, u64 host_id); +void bh_session_list_init(unsigned int conn_idx); +void bh_session_list_free(unsigned int conn_idx); +void bh_session_add(unsigned int conn_idx, struct bh_session_record *session); +void bh_session_remove(unsigned int conn_idx, u64 host_id); + +int bh_request(unsigned int conn_idx, + void *hdr, unsigned int hdr_len, + const void *data, unsigned int data_len, + u64 host_id, void **response); + +int bh_proxy_check_svl_jta_blocked_state(uuid_t *ta_id); + +int bh_proxy_list_jta_packages(unsigned int conn_idx, + unsigned int *count, uuid_t **ta_ids); + +int bh_proxy_dnload_jta(unsigned int conn_idx, uuid_t *ta_id, + const char *ta_pkg, unsigned int pkg_len); + +int bh_proxy_open_jta_session(unsigned int conn_idx, uuid_t *ta_id, + const char *init_buffer, unsigned int init_len, + u64 *host_id, const char *ta_pkg, + unsigned int pkg_len); + +#endif /* __BH_INTERNAL_H */ diff --git a/drivers/misc/mei/dal/dal_cdev.c b/drivers/misc/mei/dal/dal_cdev.c new file mode 100644 index 000000000000..c5d6e967d636 --- /dev/null +++ b/drivers/misc/mei/dal/dal_cdev.c @@ -0,0 +1,300 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dal_dev.h" +#include "dal_cdev.h" + +/* KDI user space devices major and minor numbers */ +static dev_t dal_devt; + +/** + * dal_dev_open - dal cdev open function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_dev_open(struct inode *inode, struct file *fp) +{ + int ret; + struct dal_device *ddev; + + ddev = container_of(inode->i_cdev, struct dal_device, cdev); + if (!ddev) + return -ENODEV; + + /* single open */ + if (test_and_set_bit(DAL_DEV_OPENED, &ddev->status)) + return -EBUSY; + + ret = dal_dc_setup(ddev, DAL_INTF_CDEV); + if (ret) + goto err; + + fp->private_data = ddev->clients[DAL_INTF_CDEV]; + + return nonseekable_open(inode, fp); + +err: + clear_bit(DAL_DEV_OPENED, &ddev->status); + return ret; +} + +/** + * dal_dev_release - dal cdev release function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_dev_release(struct inode *inode, struct file *fp) +{ + struct dal_client *dc = fp->private_data; + struct dal_device *ddev = dc->ddev; + + if (mutex_lock_interruptible(&ddev->context_lock)) { + dev_dbg(&ddev->dev, "signal interrupted\n"); + return -ERESTARTSYS; + } + + dal_dc_destroy(ddev, dc->intf); + + mutex_unlock(&ddev->context_lock); + + clear_bit(DAL_DEV_OPENED, &ddev->status); + + return 0; +} + +/** + * dal_dev_read - dal cdev read function + * + * @fp: pointer to file structure + * @buf: pointer to user buffer + * @count: buffer length + * @off: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_dev_read(struct file *fp, char __user *buf, + size_t count, loff_t *off) +{ + struct dal_client *dc = fp->private_data; + struct dal_device *ddev = dc->ddev; + int ret; + size_t len; + unsigned int copied; + + ret = dal_wait_for_read(dc); + + if (ret != 0) + return ret; + + if (kfifo_is_empty(&dc->read_queue)) + return 0; + + ret = kfifo_out(&dc->read_queue, &len, sizeof(len)); + if (len > count) { + dev_dbg(&ddev->dev, "could not copy buffer: src size = %zd, dest size = %zu\n", + len, count); + return -EFAULT; + } + + ret = kfifo_to_user(&dc->read_queue, buf, count, &copied); + if (ret) { + dev_dbg(&ddev->dev, "copy_to_user() failed\n"); + return -EFAULT; + } + + /*FIXME: need to drop rest of the data */ + + return copied; +} + +/** + * dal_dev_write - dal cdev write function + * + * @fp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @off: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_dev_write(struct file *fp, const char __user *buff, + size_t count, loff_t *off) +{ + struct dal_device *ddev; + struct dal_client *dc = fp->private_data; + + ddev = dc->ddev; + + if (count > DAL_MAX_BUFFER_SIZE) { + dev_dbg(&ddev->dev, "count is too big, count = %zu\n", count); + return -EMSGSIZE; + } + + if (count == 0) + return 0; + + if (!buff) + return -EINVAL; + + if (copy_from_user(dc->write_buffer, buff, count)) + return -EFAULT; + + return dal_write(dc, count, 0); +} + +static const struct file_operations mei_dal_fops = { + .owner = THIS_MODULE, + .open = dal_dev_open, + .release = dal_dev_release, + .read = dal_dev_read, + .write = dal_dev_write, + .llseek = no_llseek, +}; + +/** + * dal_dev_del - delete dal cdev + * + * @ddev: dal device + */ +void dal_dev_del(struct dal_device *ddev) +{ + cdev_del(&ddev->cdev); +} + +/** + * dal_dev_setup - initialize dal cdev + * + * @ddev: dal device + */ +void dal_dev_setup(struct dal_device *ddev) +{ + dev_t devno; + + cdev_init(&ddev->cdev, &mei_dal_fops); + devno = MKDEV(MAJOR(dal_devt), ddev->device_id); + ddev->cdev.owner = THIS_MODULE; + ddev->dev.devt = devno; + ddev->cdev.kobj.parent = &ddev->dev.kobj; +} + +/** + * dal_dev_add - add dal cdev + * + * @ddev: dal device + * + * Return: 0 on success + * <0 on failure + */ +int dal_dev_add(struct dal_device *ddev) +{ + return cdev_add(&ddev->cdev, ddev->dev.devt, 1); +} + +/** + * dal_dev_init - allocate dev_t number + * + * Return: 0 on success + * <0 on failure + */ +int __init dal_dev_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&dal_devt, 0, DAL_MEI_DEVICE_MAX, "dal"); + if (ret < 0) + pr_err("failed allocate chrdev region = %d\n", ret); + + return ret; +} + +/** + * dal_dev_exit - unregister allocated dev_t number + */ +void dal_dev_exit(void) +{ + unregister_chrdev_region(dal_devt, DAL_MEI_DEVICE_MAX); +} diff --git a/drivers/misc/mei/dal/dal_cdev.h b/drivers/misc/mei/dal/dal_cdev.h new file mode 100644 index 000000000000..16b0cc6fb0a4 --- /dev/null +++ b/drivers/misc/mei/dal/dal_cdev.h @@ -0,0 +1,68 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __MEI_DAL_DEV_H__ +#define __MEI_DAL_DEV_H__ +void dal_dev_del(struct dal_device *ddev); +void dal_dev_setup(struct dal_device *ddev); +int dal_dev_add(struct dal_device *ddev); +int __init dal_dev_init(void); +void dal_dev_exit(void); +#endif /* __MEI_DAL_DEV_H__ */ diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c new file mode 100644 index 000000000000..af1a1a31e9ef --- /dev/null +++ b/drivers/misc/mei/dal/dal_class.c @@ -0,0 +1,888 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bh_external.h" +#include "bh_cmd_defs.h" +#include "bh_errcode.h" +#include "dal_dev.h" +#include "dal_cdev.h" + +/* + * this class contains the 3 mei_cl_device, ivm, sdm, rtm. + * it is initialized during dal_probe and is used by the kernel space kdi + * to send/recv data to/from mei. + * + * this class must be initialized before the kernel space kdi uses it. + */ +struct class *dal_class; + +/** + * dal_dc_print - print client data for debug purpose + * + * @dev: device structure + * @dc: dal client + */ +void dal_dc_print(struct device *dev, struct dal_client *dc) +{ + if (!dc) { + dev_dbg(dev, "dc is null\n"); + return; + } + + dev_dbg(dev, "dc: intf = %d. expected to send: %d, sent: %d. expected to receive: %d, received: %d\n", + dc->intf, + dc->expected_msg_size_to_fw, + dc->bytes_sent_to_fw, + dc->expected_msg_size_from_fw, + dc->bytes_rcvd_from_fw); +} + +/** + * dal_dc_update_read_state - update client read state + * + * @dc : dal client + * @len: received message length + * + * Locking: called under "ddev->context_lock" lock + */ +static void dal_dc_update_read_state(struct dal_client *dc, ssize_t len) +{ + struct dal_device *ddev = dc->ddev; + + /* check BH msg magic, if it exists this is the header */ + if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { + struct bh_response_header *hdr = + (struct bh_response_header *)dc->ddev->bh_fw_msg.msg; + + dc->expected_msg_size_from_fw = hdr->h.length; + dev_dbg(&ddev->dev, "expected_msg_size_from_fw = %d bytes read = %zd\n", + dc->expected_msg_size_from_fw, len); + + /* clear data from the past. */ + dc->bytes_rcvd_from_fw = 0; + } + + /* update number of bytes rcvd */ + dc->bytes_rcvd_from_fw += len; +} + +/** + * dal_get_client_by_squence_number - find the client interface which + * the received message is sent to + * + * @ddev : dal device + * + * Return: kernel space interface or user space interface + */ +static enum dal_intf dal_get_client_by_squence_number(struct dal_device *ddev) +{ + struct bh_response_header *head; + + if (!ddev->clients[DAL_INTF_KDI]) + return DAL_INTF_CDEV; + + head = (struct bh_response_header *)ddev->bh_fw_msg.msg; + + dev_dbg(&ddev->dev, "msg seq = %llu\n", head->seq); + + if (head->seq == ddev->clients[DAL_INTF_KDI]->seq) + return DAL_INTF_KDI; + + return DAL_INTF_CDEV; +} + +/** + * dal_recv_cb - callback to receive message from DAL FW over mei + * + * @cldev : mei client device + */ +static void dal_recv_cb(struct mei_cl_device *cldev) +{ + struct dal_device *ddev; + struct dal_client *dc; + enum dal_intf intf; + ssize_t len; + ssize_t ret; + bool is_unexpected_msg = false; + + ddev = mei_cldev_get_drvdata(cldev); + + /* + * read the msg from MEI + */ + len = mei_cldev_recv(cldev, ddev->bh_fw_msg.msg, DAL_MAX_BUFFER_SIZE); + if (len < 0) { + dev_err(&cldev->dev, "recv failed %zd\n", len); + return; + } + + /* + * lock to prevent read from MEI while writing to MEI and to + * deal with just one msg at the same time + */ + mutex_lock(&ddev->context_lock); + + /* save msg len */ + ddev->bh_fw_msg.len = len; + + /* set to which interface the msg should be sent */ + if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { + intf = dal_get_client_by_squence_number(ddev); + dev_dbg(&ddev->dev, "recv_cb(): Client set by sequence number\n"); + dc = ddev->clients[intf]; + } else if (!ddev->current_read_client) { + intf = DAL_INTF_CDEV; + dev_dbg(&ddev->dev, "recv_cb(): EXTRA msg received - curr == NULL\n"); + dc = ddev->clients[intf]; + is_unexpected_msg = true; + } else { + dc = ddev->current_read_client; + dev_dbg(&ddev->dev, "recv_cb(): FRAGMENT msg received - curr != NULL\n"); + } + + /* save the current read client */ + ddev->current_read_client = dc; + dev_dbg(&cldev->dev, "read client type %d data from mei client seq = %llu\n", + dc->intf, dc->seq); + + /* + * save new msg in queue, + * if the queue is full all new messages will be thrown + */ + ret = kfifo_in(&dc->read_queue, &ddev->bh_fw_msg.len, sizeof(len)); + ret += kfifo_in(&dc->read_queue, ddev->bh_fw_msg.msg, len); + if (ret < len + sizeof(len)) + dev_dbg(&ddev->dev, "queue is full - MSG THROWN\n"); + + dal_dc_update_read_state(dc, len); + + /* + * To clear current client we check if the whole msg received + * for the current client + */ + if (is_unexpected_msg || + dc->bytes_rcvd_from_fw == dc->expected_msg_size_from_fw) { + dev_dbg(&ddev->dev, "recv_cb(): setting CURRENT_READER to NULL\n"); + ddev->current_read_client = NULL; + } + /* wake up all clients waiting for read or write */ + wake_up_interruptible(&ddev->wq); + + mutex_unlock(&ddev->context_lock); + dev_dbg(&cldev->dev, "recv_cb(): unlock\n"); +} + +/** + * dal_mei_enable - enable mei cldev + * + * @ddev: dal device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_mei_enable(struct dal_device *ddev) +{ + int ret; + + ret = mei_cldev_enable(ddev->cldev); + if (ret < 0) { + dev_err(&ddev->cldev->dev, "mei_cl_enable_device() failed with ret = %d\n", + ret); + return ret; + } + + /* save pointer to the context in the device */ + mei_cldev_set_drvdata(ddev->cldev, ddev); + + /* register to mei bus callbacks */ + ret = mei_cldev_register_rx_cb(ddev->cldev, dal_recv_cb); + if (ret) { + dev_err(&ddev->cldev->dev, "mei_cl_register_event_cb() failed ret = %d\n", + ret); + mei_cldev_disable(ddev->cldev); + } + + return ret; +} + +/** + * dal_wait_for_write - wait until the dal client is the first writer + * in writers queue + * + * @ddev: dal device + * @dc: dal client + * + * Return: 0 on success + * -ERESTARTSYS when wait was interrupted + * -ENODEV when the device was removed + */ +static int dal_wait_for_write(struct dal_device *ddev, struct dal_client *dc) +{ + if (wait_event_interruptible(ddev->wq, + list_first_entry(&ddev->writers, + struct dal_client, + wrlink) == dc || + ddev->is_device_removed)) { + return -ERESTARTSYS; + } + + /* if the device was removed indicate that to the caller */ + if (ddev->is_device_removed) + return -ENODEV; + + return 0; +} + +/** + * dal_send_error_access_denied - put 'access denied' message + * into the client read queue. In-band error message. + * + * @dc: dal client + * + * Return: 0 on success + * -ENOMEM when client read queue is full + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_send_error_access_denied(struct dal_client *dc) +{ + struct dal_device *ddev = dc->ddev; + struct bh_response_header res; + size_t len; + int ret; + + mutex_lock(&ddev->context_lock); + + bh_prep_access_denied_response(dc->write_buffer, &res); + len = sizeof(res); + + ret = kfifo_in(&dc->read_queue, &len, sizeof(len)); + if (ret < sizeof(len)) { + ret = -ENOMEM; + goto out; + } + + ret = kfifo_in(&dc->read_queue, &res, len); + if (ret < len) { + ret = -ENOMEM; + goto out; + } + ret = 0; + +out: + mutex_unlock(&ddev->context_lock); + return ret; +} + +/** + * dal_validate_access - validate that the access is permitted. + * + * in case of open session command, validate that the client has the permissions + * to open session to the requested ta + * + * @hdr: command header + * @count: message size + * @ctx: context (not used) + * + * Return: 0 when command is permitted + * -EINVAL when message is invalid + * -EPERM when access is not permitted + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_validate_access(const struct bh_command_header *hdr, + size_t count, void *ctx) +{ + struct dal_client *dc = ctx; + struct dal_device *ddev = dc->ddev; + const uuid_t *ta_id; + + if (!bh_msg_is_cmd_open_session(hdr)) + return 0; + + ta_id = bh_open_session_ta_id(hdr, count); + if (!ta_id) + return -EINVAL; + + return dal_access_policy_allowed(ddev, ta_id, dc); +} + +/** + * dal_is_kdi_msg - check if sequence is in kernel space sequence range + * + * Each interface (kernel space and user space) has different range of + * sequence number. This function checks if given number is in kernel space + * sequence range + * + * @hdr: command header + * + * Return: true when seq fits kernel space intf + * false when seq fits user space intf + */ +static bool dal_is_kdi_msg(const struct bh_command_header *hdr) +{ + return hdr->seq >= MSG_SEQ_START_NUMBER; +} + +/** + * dal_validate_seq - validate that message sequence fits client interface, + * prevent user space client to use kernel space sequence + * + * @hdr: command header + * @count: message size + * @ctx: context - dal client + * + * Return: 0 when sequence match + * -EPERM when user space client uses kernel space sequence + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_validate_seq(const struct bh_command_header *hdr, + size_t count, void *ctx) +{ + struct dal_client *dc = ctx; + + if (dc->intf != DAL_INTF_KDI && dal_is_kdi_msg(hdr)) + return -EPERM; + + return 0; +} + +/* + * dal_write_filter_tbl - filter functions to validate that the message + * is being sent is valid, and the user client + * has the permissions to send it + */ +static const bh_filter_func dal_write_filter_tbl[] = { + dal_validate_access, + dal_validate_seq, + NULL, +}; + +/** + * dal_write - write message to DAL FW over mei + * + * @dc: dal client + * @count: message size + * @seq: message sequence (if client is kernel space client) + * + * Return: >=0 data length on success + * <0 on failure + */ +ssize_t dal_write(struct dal_client *dc, size_t count, u64 seq) +{ + struct dal_device *ddev = dc->ddev; + struct device *dev; + ssize_t wr; + ssize_t ret; + enum dal_intf intf = dc->intf; + + dev = &ddev->dev; + + dev_dbg(dev, "client interface %d\n", intf); + dal_dc_print(dev, dc); + + /* lock for adding new client that want to write to fifo */ + mutex_lock(&ddev->write_lock); + /* update client on latest msg seq number*/ + dc->seq = seq; + dev_dbg(dev, "current_write_client seq = %llu\n", dc->seq); + + /* put dc in the writers queue if not already */ + if (list_first_entry_or_null(&ddev->writers, + struct dal_client, wrlink) != dc) { + /* adding client to write queue - this is the first fragment */ + const struct bh_command_header *hdr; + + hdr = bh_msg_cmd_hdr(dc->write_buffer, count); + if (!hdr) { + dev_dbg(dev, "expected cmd hdr at first fragment\n"); + ret = -EINVAL; + goto out; + } + ret = bh_filter_hdr(hdr, count, dc, dal_write_filter_tbl); + if (ret == -EPERM) { + ret = dal_send_error_access_denied(dc); + ret = ret ? ret : count; + } + if (ret) + goto out; + + dc->bytes_sent_to_fw = 0; + dc->expected_msg_size_to_fw = hdr->h.length; + + list_add_tail(&dc->wrlink, &ddev->writers); + } + + /* wait for current writer to finish his write session */ + mutex_unlock(&ddev->write_lock); + ret = dal_wait_for_write(ddev, dc); + mutex_lock(&ddev->write_lock); + if (ret < 0) + goto out; + + dev_dbg(dev, "before mei_cldev_send - client type %d\n", intf); + + /* send msg via MEI */ + wr = mei_cldev_send(ddev->cldev, dc->write_buffer, count); + if (wr != count) { + /* ENODEV can be issued upon internal reset */ + if (wr != -ENODEV) { + dev_err(dev, "mei_cl_send() failed, write_bytes != count (%zd != %zu)\n", + wr, count); + ret = -EFAULT; + goto out; + } + /* if DAL FW client is disconnected, try to reconnect */ + dev_dbg(dev, "try to reconnect to DAL FW cl\n"); + ret = mei_cldev_disable(ddev->cldev); + if (ret < 0) { + dev_err(&ddev->cldev->dev, "failed to disable mei cl [%zd]\n", + ret); + goto out; + } + ret = dal_mei_enable(ddev); + if (ret < 0) + dev_err(&ddev->cldev->dev, "failed to reconnect to DAL FW client [%zd]\n", + ret); + else + ret = -EAGAIN; + + goto out; + } + + dev_dbg(dev, "wrote %zu bytes to fw - client type %d\n", wr, intf); + + /* update client byte sent */ + dc->bytes_sent_to_fw += count; + ret = wr; + + if (dc->bytes_sent_to_fw != dc->expected_msg_size_to_fw) { + dev_dbg(dev, "expecting to write more data to DAL FW - client type %d\n", + intf); + goto write_more; + } +out: + /* remove current dc from the queue */ + list_del_init(&dc->wrlink); + if (list_empty(&ddev->writers)) + wake_up_interruptible(&ddev->wq); + +write_more: + mutex_unlock(&ddev->write_lock); + return ret; +} + +/** + * dal_wait_for_read - wait until the client (dc) will have data + * in his read queue + * + * @dc: dal client + * + * Return: 0 on success + * -ENODEV when the device was removed + */ +int dal_wait_for_read(struct dal_client *dc) +{ + struct dal_device *ddev = dc->ddev; + struct device *dev = &ddev->dev; + + dal_dc_print(dev, dc); + + dev_dbg(dev, "before wait_for_data_to_read() - client type %d kfifo status %d\n", + dc->intf, kfifo_is_empty(&dc->read_queue)); + + /* wait until there is data in the read_queue */ + wait_event_interruptible(ddev->wq, !kfifo_is_empty(&dc->read_queue) || + ddev->is_device_removed); + + dev_dbg(dev, "after wait_for_data_to_read() - client type %d\n", + dc->intf); + + /* FIXME: use reference counter */ + if (ddev->is_device_removed) { + dev_dbg(dev, "woke up, device was removed\n"); + return -ENODEV; + } + + return 0; +} + +/** + * dal_dc_destroy - destroy dal client + * + * @ddev: dal device + * @intf: device interface + * + * Locking: called under "ddev->context_lock" lock + */ +void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf) +{ + struct dal_client *dc; + + dc = ddev->clients[intf]; + if (!dc) + return; + + kfifo_free(&dc->read_queue); + kfree(dc); + ddev->clients[intf] = NULL; +} + +/** + * dal_dc_setup - initialize dal client + * + * @ddev: dal device + * @intf: device interface + * + * Return: 0 on success + * -EINVAL when client is already initialized + * -ENOMEM on memory allocation failure + */ +int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf) +{ + int ret; + struct dal_client *dc; + size_t readq_sz; + + if (ddev->clients[intf]) { + dev_err(&ddev->dev, "client already set\n"); + return -EINVAL; + } + + dc = kzalloc(sizeof(*dc), GFP_KERNEL); + if (!dc) + return -ENOMEM; + + /* each buffer contains data and length */ + readq_sz = (DAL_MAX_BUFFER_SIZE + sizeof(ddev->bh_fw_msg.len)) * + DAL_BUFFERS_PER_CLIENT; + ret = kfifo_alloc(&dc->read_queue, readq_sz, GFP_KERNEL); + if (ret) { + kfree(dc); + return ret; + } + + dc->intf = intf; + dc->ddev = ddev; + INIT_LIST_HEAD(&dc->wrlink); + ddev->clients[intf] = dc; + return 0; +} + +/** + * dal_dev_match - match function to find dal device + * + * Used to get dal device from dal_class by device id + * + * @dev: device structure + * @data: the device id + * + * Return: 1 on match + * 0 on mismatch + */ +static int dal_dev_match(struct device *dev, const void *data) +{ + struct dal_device *ddev; + const enum dal_dev_type *device_id = + (enum dal_dev_type *)data; + + ddev = container_of(dev, struct dal_device, dev); + + return ddev->device_id == *device_id; +} + +/** + * dal_find_dev - get dal device from dal_class by device id + * + * @device_id: device id + * + * Return: pointer to the requested device + * NULL if the device wasn't found + */ +struct device *dal_find_dev(enum dal_dev_type device_id) +{ + return class_find_device(dal_class, NULL, &device_id, dal_dev_match); +} + +/** + * dal_remove - dal remove callback in mei_cl_driver + * + * @cldev: mei client device + * + * Return: 0 + */ +static int dal_remove(struct mei_cl_device *cldev) +{ + struct dal_device *ddev = mei_cldev_get_drvdata(cldev); + + if (!ddev) + return 0; + + dal_dev_del(ddev); + + ddev->is_device_removed = true; + /* make sure the above is set */ + smp_mb(); + /* wakeup write waiters so we can unload */ + if (waitqueue_active(&ddev->wq)) + wake_up_interruptible(&ddev->wq); + + device_del(&ddev->dev); + + mei_cldev_set_drvdata(cldev, NULL); + + mei_cldev_disable(cldev); + + put_device(&ddev->dev); + + return 0; +} +/** + * dal_device_release - dal release callback in dev structure + * + * @dev: device structure + */ +static void dal_device_release(struct device *dev) +{ + struct dal_device *ddev = to_dal_device(dev); + + dal_access_list_free(ddev); + kfree(ddev->bh_fw_msg.msg); + kfree(ddev); +} + +/** + * dal_probe - dal probe callback in mei_cl_driver + * + * @cldev: mei client device + * @id: mei client device id + * + * Return: 0 on success + * <0 on failure + */ +static int dal_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct dal_device *ddev; + struct device *pdev = &cldev->dev; + int ret; + + ddev = kzalloc(sizeof(*ddev), GFP_KERNEL); + if (!ddev) + return -ENOMEM; + + /* initialize the mutex and wait queue */ + mutex_init(&ddev->context_lock); + mutex_init(&ddev->write_lock); + init_waitqueue_head(&ddev->wq); + INIT_LIST_HEAD(&ddev->writers); + ddev->cldev = cldev; + ddev->device_id = id->driver_info; + + ddev->dev.parent = pdev; + ddev->dev.class = dal_class; + ddev->dev.release = dal_device_release; + dev_set_name(&ddev->dev, "dal%d", ddev->device_id); + + dal_dev_setup(ddev); + + ret = device_register(&ddev->dev); + if (ret) { + dev_err(pdev, "unable to register device\n"); + goto err; + } + + ddev->bh_fw_msg.msg = kzalloc(DAL_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!ddev->bh_fw_msg.msg) { + ret = -ENOMEM; + goto err; + } + + ret = dal_access_list_init(ddev); + if (ret) + goto err; + + ret = dal_mei_enable(ddev); + if (ret < 0) + goto err; + + ret = dal_dev_add(ddev); + if (ret) + goto err; + + return 0; + +err: + dal_remove(cldev); + + return ret; +} + +/* DAL FW HECI client GUIDs */ +#define IVM_UUID UUID_LE(0x3c4852d6, 0xd47b, 0x4f46, \ + 0xb0, 0x5e, 0xb5, 0xed, 0xc1, 0xaa, 0x44, 0x0e) +#define SDM_UUID UUID_LE(0xdba4d603, 0xd7ed, 0x4931, \ + 0x88, 0x23, 0x17, 0xad, 0x58, 0x57, 0x05, 0xd5) +#define RTM_UUID UUID_LE(0x5565a099, 0x7fe2, 0x45c1, \ + 0xa2, 0x2b, 0xd7, 0xe9, 0xdf, 0xea, 0x9a, 0x2e) + +#define DAL_DEV_ID(__uuid, __device_type) \ + {.uuid = __uuid, \ + .version = MEI_CL_VERSION_ANY, \ + .driver_info = __device_type} + +/* + * dal_device_id - ids of dal FW devices, + * for all 3 dal FW clients (IVM, SDM and RTM) + */ +static const struct mei_cl_device_id dal_device_id[] = { + DAL_DEV_ID(IVM_UUID, DAL_MEI_DEVICE_IVM), + DAL_DEV_ID(SDM_UUID, DAL_MEI_DEVICE_SDM), + DAL_DEV_ID(RTM_UUID, DAL_MEI_DEVICE_RTM), + /* required last entry */ + { } +}; +MODULE_DEVICE_TABLE(mei, dal_device_id); + +static struct mei_cl_driver dal_driver = { + .id_table = dal_device_id, + .name = KBUILD_MODNAME, + + .probe = dal_probe, + .remove = dal_remove, +}; + +/** + * mei_dal_exit - module exit function + */ +static void __exit mei_dal_exit(void) +{ + mei_cldev_driver_unregister(&dal_driver); + + dal_dev_exit(); + + dal_kdi_exit(); + + class_destroy(dal_class); +} + +/** + * mei_dal_init - module init function + * + * Return: 0 on success + * <0 on failure + */ +static int __init mei_dal_init(void) +{ + int ret; + + dal_class = class_create(THIS_MODULE, "dal"); + if (IS_ERR(dal_class)) { + pr_err("couldn't create class\n"); + return PTR_ERR(dal_class); + } + + ret = dal_dev_init(); + if (ret < 0) + goto err_class; + + ret = dal_kdi_init(); + if (ret) + goto err_dev; + + ret = mei_cldev_driver_register(&dal_driver); + if (ret < 0) { + pr_err("mei_cl_driver_register failed with status = %d\n", ret); + goto err; + } + + return 0; + +err: + dal_kdi_exit(); +err_dev: + dal_dev_exit(); +err_class: + class_destroy(dal_class); + return ret; +} + +module_init(mei_dal_init); +module_exit(mei_dal_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) MEI Dynamic Application Loader (DAL)"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/dal/dal_dev.h b/drivers/misc/mei/dal/dal_dev.h new file mode 100644 index 000000000000..2ddb490ad560 --- /dev/null +++ b/drivers/misc/mei/dal/dal_dev.h @@ -0,0 +1,219 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _DAL_KDI_H_ +#define _DAL_KDI_H_ + +#include +#include +#include +#include + +#define DAL_MAX_BUFFER_SIZE 4096 +#define DAL_BUFFERS_PER_CLIENT 10 + +#define DAL_CLIENTS_PER_DEVICE 2 + +extern struct class *dal_class; + +/** + * enum dal_intf - dal interface type + * + * @DAL_INTF_KDI: (kdi) kernel space interface + * @DAL_INTF_CDEV: char device interface + */ +enum dal_intf { + DAL_INTF_KDI, + DAL_INTF_CDEV, +}; + +/** + * enum dal_dev_type - devices that are exposed to userspace + * + * @DAL_MEI_DEVICE_IVM: IVM - Intel/Issuer Virtual Machine + * @DAL_MEI_DEVICE_SDM: SDM - Security Domain Manager + * @DAL_MEI_DEVICE_RTM: RTM - Run Time Manager (Launcher) + * + * @DAL_MEI_DEVICE_MAX: max dal device type + */ +enum dal_dev_type { + DAL_MEI_DEVICE_IVM, + DAL_MEI_DEVICE_SDM, + DAL_MEI_DEVICE_RTM, + + DAL_MEI_DEVICE_MAX +}; + +/** + * struct dal_client - host client + * + * @ddev: dal parent device + * @wrlink: link in the writers list + * @read_queue: queue of received messages from DAL FW + * @write_buffer: buffer to send to DAL FW + * @intf: client interface - user space or kernel space + * + * @seq: the sequence number of the last message sent (in kernel space API only) + * When a message is received from DAL FW, we use this sequence number + * to decide which client should get the message. If the sequence + * number of the message is equals to the kernel space sequence number, + * the kernel space client should get the message. + * Otherwise the user space client will get it. + * @expected_msg_size_from_fw: the expected msg size from DALFW + * @expected_msg_size_to_fw: the expected msg size that will be sent to DAL FW + * @bytes_rcvd_from_fw: number of bytes that were received from DAL FW + * @bytes_sent_to_fw: number of bytes that were sent to DAL FW + */ +struct dal_client { + struct dal_device *ddev; + struct list_head wrlink; + struct kfifo read_queue; + char write_buffer[DAL_MAX_BUFFER_SIZE]; + enum dal_intf intf; + + u64 seq; + u32 expected_msg_size_from_fw; + u32 expected_msg_size_to_fw; + u32 bytes_rcvd_from_fw; + u32 bytes_sent_to_fw; +}; + +/** + * struct dal_bh_msg - msg received from DAL FW. + * + * @len: message length + * @msg: message buffer + */ +struct dal_bh_msg { + size_t len; + char *msg; +}; + +/** + * struct dal_device - DAL private device struct. + * each DAL device has a context (i.e IVM, SDM, RTM) + * + * @dev: device on a bus + * @cdev: character device + * @status: dal device status + * + * @context_lock: big device lock + * @write_lock: lock over write list + * @wq: dal clients wait queue. When client wants to send or receive message, + * he waits in this queue until he is ready + * @writers: write pending list + * @clients: clients on this device (userspace and kernel space) + * @bh_fw_msg: message which was received from DAL FW + * @current_read_client: current reading client (which receives message from + * DAL FW) + * + * @cldev: the MEI CL device which corresponds to a single DAL FW HECI client + * + * @is_device_removed: device removed flag + * + * @device_id: DAL device type + */ +struct dal_device { + struct device dev; + struct cdev cdev; +#define DAL_DEV_OPENED 0 + unsigned long status; + + struct mutex context_lock; /* device lock */ + struct mutex write_lock; /* write lock */ + wait_queue_head_t wq; + struct list_head writers; + struct dal_client *clients[DAL_CLIENTS_PER_DEVICE]; + struct dal_bh_msg bh_fw_msg; + struct dal_client *current_read_client; + + struct mei_cl_device *cldev; + + bool is_device_removed; + + int device_id; +}; + +#define to_dal_device(d) container_of(d, struct dal_device, dev) + +ssize_t dal_write(struct dal_client *dc, size_t count, u64 seq); +int dal_wait_for_read(struct dal_client *dc); + +struct device *dal_find_dev(enum dal_dev_type device_id); + +void dal_dc_print(struct device *dev, struct dal_client *dc); +int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf); +void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf); + +int dal_kdi_send(unsigned int handle, const unsigned char *buf, + size_t len, u64 seq); +int dal_kdi_recv(unsigned int handle, unsigned char *buf, size_t *count); +int dal_kdi_init(void); +void dal_kdi_exit(void); + +int dal_access_policy_add(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +int dal_access_policy_remove(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +int dal_access_policy_allowed(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +void dal_access_list_free(struct dal_device *ddev); +int dal_access_list_init(struct dal_device *ddev); + +#endif /* _DAL_KDI_H_ */ diff --git a/drivers/misc/mei/dal/dal_kdi.c b/drivers/misc/mei/dal/dal_kdi.c new file mode 100644 index 000000000000..bed43c96b80b --- /dev/null +++ b/drivers/misc/mei/dal/dal_kdi.c @@ -0,0 +1,610 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bh_external.h" +#include "bh_errcode.h" +#include "acp_parser.h" +#include "dal_dev.h" + +static DEFINE_MUTEX(dal_kdi_lock); + +/** + * to_kdi_err- converts error number to kdi error + * + * Beihai errors (>0) are converted to DAL_KDI errors (those errors came + * from DAL FW) + * system errors and success value (<=0) stay as is + * + * @err: error code to convert (either bh err or system err) + * + * Return: the converted kdi error number or system error + */ +static int to_kdi_err(int err) +{ + if (err) + pr_debug("got error: %d\n", err); + + if (err <= 0) + return err; + + /* err > 0: is error from DAL FW */ + switch (err) { + case BPE_INTERNAL_ERROR: + return DAL_KDI_STATUS_INTERNAL_ERROR; + case BPE_INVALID_PARAMS: + case BHE_INVALID_PARAMS: + return DAL_KDI_STATUS_INVALID_PARAMS; + case BHE_INVALID_HANDLE: + return DAL_KDI_STATUS_INVALID_HANDLE; + case BPE_NOT_INIT: + return DAL_KDI_STATUS_NOT_INITIALIZED; + case BPE_OUT_OF_MEMORY: + case BHE_OUT_OF_MEMORY: + return DAL_KDI_STATUS_OUT_OF_MEMORY; + case BHE_INSUFFICIENT_BUFFER: + case BHE_APPLET_SMALL_BUFFER: + return DAL_KDI_STATUS_BUFFER_TOO_SMALL; + case BPE_OUT_OF_RESOURCE: + case BHE_VM_INSTANCE_INIT_FAIL: + return DAL_KDI_STATUS_OUT_OF_RESOURCE; + case BHE_SESSION_NUM_EXCEED: + return DAL_KDI_STATUS_MAX_SESSIONS_REACHED; + case BHE_UNCAUGHT_EXCEPTION: + return DAL_KDI_STATUS_UNCAUGHT_EXCEPTION; + case BHE_WD_TIMEOUT: + return DAL_KDI_STATUS_WD_TIMEOUT; + case BHE_APPLET_CRASHED: + return DAL_KDI_STATUS_APPLET_CRASHED; + case BHE_TA_PACKAGE_HASH_VERIFY_FAIL: + return DAL_KDI_STATUS_INVALID_ACP; + case BHE_PACKAGE_NOT_FOUND: + return DAL_KDI_STATUS_TA_NOT_FOUND; + case BHE_PACKAGE_EXIST: + return DAL_KDI_STATUS_TA_EXIST; + default: + return DAL_KDI_STATUS_INTERNAL_ERROR; + } +} + +/** + * dal_kdi_send - a callback which is called from bhp to send msg over mei + * + * @dev_idx: DAL device type + * @buf: message buffer + * @len: buffer length + * @seq: message sequence + * + * Return: 0 on success + * -EINVAL on incorrect input + * -ENODEV when the device can't be found + * -EFAULT if client is NULL + * <0 on dal_write failure + */ +int dal_kdi_send(unsigned int dev_idx, const unsigned char *buf, + size_t len, u64 seq) +{ + enum dal_dev_type mei_device; + struct dal_device *ddev; + struct dal_client *dc; + struct device *dev; + ssize_t wr; + int ret; + + if (!buf) + return -EINVAL; + + if (dev_idx >= DAL_MEI_DEVICE_MAX) + return -EINVAL; + + if (!len) + return 0; + + if (len > DAL_MAX_BUFFER_SIZE) + return -EMSGSIZE; + + mei_device = (enum dal_dev_type)dev_idx; + dev = dal_find_dev(mei_device); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + return -ENODEV; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + if (!dc) { + dev_dbg(dev, "client is NULL\n"); + ret = -EFAULT; + goto out; + } + + /* copy data to client object */ + memcpy(dc->write_buffer, buf, len); + wr = dal_write(dc, len, seq); + if (wr > 0) + ret = 0; + else + ret = wr; +out: + put_device(dev); + return ret; +} + +/** + * dal_kdi_recv - a callback which is called from bhp to recv msg from DAL FW + * + * @dev_idx: DAL device type + * @buf: buffer of received message + * @count: input and output param - + * - input: buffer length + * - output: size of the received message + * + * Return: 0 on success + * -EINVAL on incorrect input + * -ENODEV when the device can't be found + * -EFAULT when client is NULL or copy failed + * -EMSGSIZE when buffer is too small + * <0 on dal_wait_for_read failure + */ +int dal_kdi_recv(unsigned int dev_idx, unsigned char *buf, size_t *count) +{ + enum dal_dev_type mei_device; + struct dal_device *ddev; + struct dal_client *dc; + struct device *dev; + int ret; + size_t len; + + if (!buf || !count) + return -EINVAL; + + if (dev_idx >= DAL_MEI_DEVICE_MAX) + return -EINVAL; + + mei_device = (enum dal_dev_type)dev_idx; + dev = dal_find_dev(mei_device); + if (!dev) + return -ENODEV; + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + if (!dc) { + dev_dbg(dev, "client is NULL\n"); + ret = -EFAULT; + goto out; + } + + ret = dal_wait_for_read(dc); + + if (ret) + goto out; + + if (kfifo_is_empty(&dc->read_queue)) { + *count = 0; + goto out; + } + + ret = kfifo_out(&dc->read_queue, &len, sizeof(len)); + if (ret != sizeof(len)) { + dev_err(&ddev->dev, "could not copy buffer: cannot fetch size\n"); + ret = -EFAULT; + goto out; + } + + if (len > *count) { + dev_dbg(&ddev->dev, "could not copy buffer: src size = %zd > dest size = %zd\n", + len, *count); + ret = -EMSGSIZE; + goto out; + } + + ret = kfifo_out(&dc->read_queue, buf, len); + if (ret != len) { + dev_err(&ddev->dev, "could not copy buffer: src size = %zd, dest size = %d\n", + len, ret); + ret = -EFAULT; + } + + *count = len; + ret = 0; +out: + put_device(dev); + return ret; +} + +/** + * dal_create_session - create session to an installed trusted application. + * + * @session_handle: output param to hold the session handle + * @ta_id: trusted application (ta) id + * @acp_pkg: acp file of the ta + * @acp_pkg_len: acp file length + * @init_param: init parameters to the session (optional) + * @init_param_len: length of the init parameters + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int dal_create_session(u64 *session_handle, const char *ta_id, + const u8 *acp_pkg, size_t acp_pkg_len, + const u8 *init_param, size_t init_param_len) +{ + struct ac_ins_jta_pack_ext pack; + char *ta_pkg; + int ta_pkg_size; + int ret; + + if (!ta_id || !acp_pkg || !acp_pkg_len || !session_handle) + return -EINVAL; + + /* init_param are optional, if they exists the length shouldn't be 0 */ + if (!init_param && init_param_len != 0) { + pr_debug("INVALID_PARAMS init_param %p init_param_len %zu\n", + init_param, init_param_len); + return -EINVAL; + } + + mutex_lock(&dal_kdi_lock); + + ret = acp_pload_ins_jta(acp_pkg, acp_pkg_len, &pack); + if (ret) { + pr_debug("acp_pload_ins_jta() return %d\n", ret); + goto out; + } + + ta_pkg = pack.ta_pack; + if (!ta_pkg) { + ret = -EINVAL; + goto out; + } + + ta_pkg_size = ta_pkg - (char *)acp_pkg; + + if (ta_pkg_size < 0 || (unsigned int)ta_pkg_size > acp_pkg_len) { + ret = -EINVAL; + goto out; + } + + ta_pkg_size = acp_pkg_len - ta_pkg_size; + + ret = bh_ta_session_open(session_handle, ta_id, ta_pkg, ta_pkg_size, + init_param, init_param_len); + + if (ret) + pr_debug("bh_ta_session_open failed = %d\n", ret); + +out: + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_create_session); + +/** + * dal_send_and_receive - send and receive data to/from ta + * + * @session_handle: session handle + * @command_id: command id + * @input: message to be sent + * @input_len: sent message size + * @output: output param to hold a pointer to the buffer which + * will contain the received message. + * This buffer is allocated by DAL KDI module and freed by the user + * @output_len: input and output param - + * - input: the expected maximum length of the received message + * - output: size of the received message + * @response_code: output param to hold the return value from the applet + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int dal_send_and_receive(u64 session_handle, int command_id, const u8 *input, + size_t input_len, u8 **output, size_t *output_len, + int *response_code) +{ + int ret; + + mutex_lock(&dal_kdi_lock); + + ret = bh_ta_session_command(session_handle, command_id, + input, input_len, + (void **)output, output_len, + response_code); + + if (ret) + pr_debug("bh_ta_session_command failed, status = %d\n", ret); + + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_send_and_receive); + +/** + * dal_close_session - close ta session + * + * @session_handle: session handle + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int dal_close_session(u64 session_handle) +{ + int ret; + + mutex_lock(&dal_kdi_lock); + + ret = bh_ta_session_close(session_handle); + + if (ret) + pr_debug("hp_close_ta_session failed = %d\n", ret); + + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_close_session); + +/** + * dal_set_ta_exclusive_access - set client to be owner of the ta, + * so no one else (especially user space client) + * will be able to open session to it + * + * @ta_id: trusted application (ta) id + * + * Return: 0 on success + * -ENODEV when the device can't be found + * -ENOMEM on memory allocation failure + * -EPERM when ta is owned by another client + * -EEXIST when ta is already owned by current client + */ +int dal_set_ta_exclusive_access(const uuid_t *ta_id) +{ + struct dal_device *ddev; + struct device *dev; + struct dal_client *dc; + int ret; + + mutex_lock(&dal_kdi_lock); + + dev = dal_find_dev(DAL_MEI_DEVICE_IVM); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + ret = -ENODEV; + goto unlock; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + + ret = dal_access_policy_add(ddev, ta_id, dc); + + put_device(dev); +unlock: + mutex_unlock(&dal_kdi_lock); + return ret; +} +EXPORT_SYMBOL(dal_set_ta_exclusive_access); + +/** + * dal_unset_ta_exclusive_access - unset client from owning ta + * + * @ta_id: trusted application (ta) id + * + * Return: 0 on success + * -ENODEV when the device can't be found + * -ENOENT when ta isn't found in exclusiveness ta list + * -EPERM when ta is owned by another client + */ +int dal_unset_ta_exclusive_access(const uuid_t *ta_id) +{ + struct dal_device *ddev; + struct device *dev; + struct dal_client *dc; + int ret; + + mutex_lock(&dal_kdi_lock); + + dev = dal_find_dev(DAL_MEI_DEVICE_IVM); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + ret = -ENODEV; + goto unlock; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + + ret = dal_access_policy_remove(ddev, ta_id, dc); + + put_device(dev); +unlock: + mutex_unlock(&dal_kdi_lock); + return ret; +} +EXPORT_SYMBOL(dal_unset_ta_exclusive_access); + +#define KDI_MAJOR_VER "1" +#define KDI_MINOR_VER "0" +#define KDI_HOTFIX_VER "0" + +#define KDI_VERSION KDI_MAJOR_VER "." \ + KDI_MINOR_VER "." \ + KDI_HOTFIX_VER + +/** + * dal_get_version_info - return DAL driver version + * + * @version_info: output param to hold DAL driver version information + * + * Return: 0 on success + * -EINVAL on incorrect input + */ +int dal_get_version_info(struct dal_version_info *version_info) +{ + if (!version_info) + return -EINVAL; + + memset(version_info, 0, sizeof(*version_info)); + snprintf(version_info->version, DAL_VERSION_LEN, "%s", KDI_VERSION); + + return 0; +} +EXPORT_SYMBOL(dal_get_version_info); + +/** + * dal_kdi_add_dev - add new dal device (one of dal_dev_type) + * + * @dev: device object which is associated with dal device + * @class_intf: class interface + * + * Return: 0 on success + * <0 on failure + * + * When new dal device is added, a new client is created for + * this device in kernel space interface + */ +static int dal_kdi_add_dev(struct device *dev, + struct class_interface *class_intf) +{ + int ret; + struct dal_device *ddev; + + ddev = to_dal_device(dev); + mutex_lock(&ddev->context_lock); + ret = dal_dc_setup(ddev, DAL_INTF_KDI); + mutex_unlock(&ddev->context_lock); + return ret; +} + +/** + * dal_kdi_rm_dev - rm dal device (one of dal_dev_type) + * + * @dev: device object which is associated with dal device + * @class_intf: class interface + * + * Return: 0 on success + * <0 on failure + */ +static void dal_kdi_rm_dev(struct device *dev, + struct class_interface *class_intf) +{ + struct dal_device *ddev; + + ddev = to_dal_device(dev); + mutex_lock(&ddev->context_lock); + dal_dc_destroy(ddev, DAL_INTF_KDI); + mutex_unlock(&ddev->context_lock); +} + +/* + * dal_kdi_interface handles addition/removal of dal devices + */ +static struct class_interface dal_kdi_interface __refdata = { + .add_dev = dal_kdi_add_dev, + .remove_dev = dal_kdi_rm_dev, +}; + +/** + * dal_kdi_init - initialize dal kdi + * + * Return: 0 on success + * <0 on failure + */ +int dal_kdi_init(void) +{ + int ret; + + bh_init_internal(); + + dal_kdi_interface.class = dal_class; + ret = class_interface_register(&dal_kdi_interface); + if (ret) { + pr_err("failed to register class interface = %d\n", ret); + goto err; + } + + return 0; + +err: + bh_deinit_internal(); + return ret; +} + +/** + * dal_kdi_exit - dal kdi exit function + */ +void dal_kdi_exit(void) +{ + bh_deinit_internal(); + class_interface_unregister(&dal_kdi_interface); +} diff --git a/drivers/misc/mei/dal/dal_ta_access.c b/drivers/misc/mei/dal/dal_ta_access.c new file mode 100644 index 000000000000..1a8f27b6a95d --- /dev/null +++ b/drivers/misc/mei/dal/dal_ta_access.c @@ -0,0 +1,288 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include + +#include +#include "dal_dev.h" + +/* Spooler UUID */ +static const uuid_t spooler_ta_id = UUID_INIT(0xba8d1643, 0x50b6, 0x49cc, + 0x86, 0x1d, 0x2c, 0x01, + 0xbe, 0xd1, 0x4b, 0xe8); + +/** + * struct dal_access_policy - ta access information node + * + * @list: link in access list + * @ta_id: trusted application id + * @owner: owner of ta + */ +struct dal_access_policy { + struct list_head list; + uuid_t ta_id; + void *owner; +}; + +/** + * dal_dev_get_access_list - get access list of dal device + * + * @ddev: dal device + * + * Return: pointer to access list + */ +static struct list_head *dal_dev_get_access_list(struct dal_device *ddev) +{ + return dev_get_drvdata(&ddev->dev); +} + +/** + * dal_access_policy_alloc - allocate memory and initialize access list node + * + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: pointer to the new initialized access list node + * + * Locking: called under "kdi_lock" lock + */ +static struct dal_access_policy * +dal_access_policy_alloc(const uuid_t *ta_id, void *owner) +{ + struct dal_access_policy *e; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return NULL; + + INIT_LIST_HEAD(&e->list); + e->ta_id = *ta_id; + e->owner = owner; + + return e; +} + +/** + * dal_access_policy_find - find ta id in access list + * + * @access_list: access list + * @ta_id: trusted application id + * + * Return: pointer to access list node of ta + * NULL if ta is not found in access list + */ +static struct dal_access_policy * +dal_access_policy_find(struct list_head *access_list, const uuid_t *ta_id) +{ + struct dal_access_policy *e; + + list_for_each_entry(e, access_list, list) { + if (uuid_equal(&e->ta_id, ta_id)) + return e; + } + return NULL; +} + +/** + * dal_access_policy_add - add access information of ta and its owner + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: 0 on success + * -ENOMEM on memory allocation failure + * -EPERM when ta already has another owner + * -EEXIST when access information already exists (same ta and owner) + * + * Locking: called under "kdi_lock" lock + */ +int dal_access_policy_add(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (e) { + if (!e->owner) + return -EPERM; + + return -EEXIST; + } + + e = dal_access_policy_alloc(ta_id, owner); + if (!e) + return -ENOMEM; + + list_add_tail(&e->list, access_list); + return 0; +} + +/** + * dal_access_policy_remove - remove access information of ta and its owner + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: 0 on success + * -ENOENT when ta isn't found in access list + * -EPERM when ta has another owner + * + * Locking: called under "kdi_lock" lock + */ +int dal_access_policy_remove(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (!e) + return -ENOENT; + + if (!e->owner || e->owner != owner) + return -EPERM; + + list_del(&e->list); + kfree(e); + return 0; +} + +/** + * dal_access_policy_allowed - check if owner is allowed to use ta + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner + * + * Return: 0 on success + * -EPERM when owner is not allowed to use ta + * + * Locking: called under "ddev->write_lock" lock + */ +int dal_access_policy_allowed(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (!e) + return 0; + + if (e->owner && e->owner != owner) + return -EPERM; + + return 0; +} + +/** + * dal_access_list_free - free memory of access list + * + * @ddev: dal device + */ +void dal_access_list_free(struct dal_device *ddev) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e, *n; + + if (!access_list) + return; + + list_for_each_entry_safe(e, n, access_list, list) { + list_del(&e->list); + kfree(e); + } + + kfree(access_list); + dev_set_drvdata(&ddev->dev, NULL); +} + +/** + * dal_access_list_init - initialize an empty access list + * + * @ddev: dal device + * + * Note: Add spooler ta id with blank owner to the list. + * This will prevent any user from setting itself owner of the spooler, + * which will block others from openning session to it. + * + * Return: 0 on success + * -ENOMEM on memory allocation failure + */ +int dal_access_list_init(struct dal_device *ddev) +{ + struct list_head *access_list; + + access_list = kzalloc(sizeof(*access_list), GFP_KERNEL); + if (!access_list) + return -ENOMEM; + + INIT_LIST_HEAD(access_list); + dev_set_drvdata(&ddev->dev, access_list); + + /* Nobody can own SPOOLER TA */ + dal_access_policy_add(ddev, &spooler_ta_id, NULL); + + return 0; +} diff --git a/drivers/misc/mei/dal/dal_test.c b/drivers/misc/mei/dal/dal_test.c new file mode 100644 index 000000000000..baa3decab59c --- /dev/null +++ b/drivers/misc/mei/dal/dal_test.c @@ -0,0 +1,829 @@ +/****************************************************************************** + * Intel dal test Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "uapi/kdi_cmd_defs.h" +#define KDI_MODULE "mei_dal" + +/** + * this is the max data size possible: + * there is no actually max size for acp file, + * but for testing 512k is good enough + */ +#define MAX_DATA_SIZE SZ_512K + +#define KDI_TEST_OPENED 0 + +/** + * struct dal_test_data - dal test cmd and response data + * + * @cmd_data_size: size of cmd got from user space + * @cmd_data: the cmd got from user space + * @cmd_lock: protects cmd_data buffer + * + * @resp_data_size: size of response from kdi + * @resp_data: the response from kdi + * @resp_lock: protects resp_data buffer + */ +struct dal_test_data { + u32 cmd_data_size; + u8 *cmd_data; + struct mutex cmd_lock; /* protects cmd_data buffer */ + + u32 resp_data_size; + u8 *resp_data; + struct mutex resp_lock; /* protects resp_data buffer */ +}; + +/** + * struct dal_test_device - dal test private data + * + * @dev: the device structure + * @cdev: character device + * + * @kdi_test_status: status of test module + * @data: cmd and response data + */ +static struct dal_test_device { + struct device *dev; + struct cdev cdev; + + unsigned long kdi_test_status; + struct dal_test_data *data; +} dal_test_dev; + +#ifdef CONFIG_MODULES +/** + * dal_test_find_module - find the given module + * + * @mod_name: the module name to find + * + * Return: pointer to the module if it is found + * NULL otherwise + */ +static struct module *dal_test_find_module(const char *mod_name) +{ + struct module *mod; + + mutex_lock(&module_mutex); + mod = find_module(mod_name); + mutex_unlock(&module_mutex); + + return mod; +} + +/** + * dal_test_load_kdi - load kdi module + * + * @dev: dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_load_kdi(struct dal_test_device *dev) +{ + struct module *mod; + + /* load KDI if it wasn't loaded */ + request_module(KDI_MODULE); + + mod = dal_test_find_module(KDI_MODULE); + if (!mod) { + dev_err(dev->dev, "failed to find KDI module: %s\n", + KDI_MODULE); + return -ENODEV; + } + + if (!try_module_get(mod)) { + dev_err(dev->dev, "failed to get KDI module\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dal_test_unload_kdi - unload kdi module + * + * @dev: dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_unload_kdi(struct dal_test_device *dev) +{ + struct module *mod; + + mod = dal_test_find_module(KDI_MODULE); + if (!mod) { + dev_err(dev->dev, "failed to find KDI module: %s\n", + KDI_MODULE); + return -ENODEV; + } + module_put(mod); + + return 0; +} +#else +static inline int dal_test_load_kdi(struct dal_test_device *dev) { return 0; } +static inline int dal_test_unload_kdi(struct dal_test_device *dev) { return 0; } +#endif + +/** + * dal_test_result_set - set data to the result buffer + * + * @test_data: test command and response buffers + * @data: new data + * @size: size of the data buffer + */ +static void dal_test_result_set(struct dal_test_data *test_data, + void *data, u32 size) +{ + memcpy(test_data->resp_data, data, size); + test_data->resp_data_size = size; +} + +/** + * dal_test_result_append - append data to the result buffer + * + * @test_data: test command and response buffers + * @data: new data + * @size: size of the data buffer + */ +static void dal_test_result_append(struct dal_test_data *test_data, + void *data, u32 size) +{ + size_t offset = test_data->resp_data_size; + + memcpy(test_data->resp_data + offset, data, size); + test_data->resp_data_size += size; +} + +/** + * dal_test_send_and_recv - call send and receive function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_send_and_recv(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct send_and_rcv_cmd *cmd; + struct send_and_rcv_resp resp; + ssize_t data_size; + size_t output_len; + s32 response_code; + u8 *input; + u8 *output; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct send_and_rcv_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + if (data_size < 0) { + dev_dbg(dev->dev, "malformed command struct: data_size = %zu\n", + data_size); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + response_code = 0; + output = NULL; + input = (data_size) ? cmd->input : NULL; + output_len = (cmd->is_output_len_ptr) ? cmd->output_buf_len : 0; + + dev_dbg(dev->dev, "call dal_send_and_receive: handle=%llu command_id=%d input_len=%zd\n", + cmd->session_handle, cmd->command_id, data_size); + + status = dal_send_and_receive(cmd->session_handle, cmd->command_id, + input, data_size, + cmd->is_output_buf ? &output : NULL, + cmd->is_output_len_ptr ? + &output_len : NULL, + cmd->is_response_code_ptr ? + &response_code : NULL); + + dev_dbg(dev->dev, "dal_send_and_receive return: status=%d output_len=%zu response_code=%d\n", + status, output_len, response_code); + + resp.output_len = (u32)output_len; + resp.response_code = response_code; + resp.status = status; + resp.test_mod_status = 0; + + /* in case the call failed we don't copy the data */ + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + if (output && resp.output_len) + dal_test_result_append(t_data, output, resp.output_len); + mutex_unlock(&t_data->resp_lock); + + kfree(output); +} + +/** + * dal_test_create_session - call create session function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_create_session(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct session_create_cmd *cmd; + struct session_create_resp resp; + u32 data_size; + u64 handle; + char *app_id; + u8 *acp_pkg; + u8 *init_params; + u32 offset; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct session_create_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + + if (cmd->app_id_len + cmd->acp_pkg_len + cmd->init_param_len != + data_size) { + dev_dbg(dev->dev, "malformed command struct: data_size = %d\n", + data_size); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + handle = 0; + + offset = 0; + app_id = (cmd->app_id_len) ? cmd->data + offset : NULL; + offset += cmd->app_id_len; + + acp_pkg = (cmd->acp_pkg_len) ? cmd->data + offset : NULL; + offset += cmd->acp_pkg_len; + + init_params = (cmd->init_param_len) ? cmd->data + offset : NULL; + offset += cmd->init_param_len; + + dev_dbg(dev->dev, "call dal_create_session params: app_id = %s, app_id len = %d, acp pkg len = %d, init params len = %d\n", + app_id, cmd->app_id_len, cmd->acp_pkg_len, cmd->init_param_len); + + status = dal_create_session(cmd->is_session_handle_ptr ? + &handle : NULL, + app_id, acp_pkg, + cmd->acp_pkg_len, + init_params, + cmd->init_param_len); + dev_dbg(dev->dev, "dal_create_session return: status = %d, handle = %llu\n", + status, handle); + + resp.session_handle = handle; + resp.status = status; + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_close_session - call close session function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_close_session(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct session_close_cmd *cmd; + struct session_close_resp resp; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct session_close_cmd *)t_cmd->data; + if (t_data->cmd_data_size != sizeof(t_cmd->cmd_id) + sizeof(*cmd)) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + resp.status = dal_close_session(cmd->session_handle); + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_version_info - call get version function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_version_info(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct version_get_info_cmd *cmd; + struct version_get_info_resp resp; + struct dal_version_info *version; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct version_get_info_cmd *)t_cmd->data; + if (t_data->cmd_data_size != sizeof(t_cmd->cmd_id) + sizeof(*cmd)) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + version = (cmd->is_version_ptr) ? + (struct dal_version_info *)resp.kdi_version : NULL; + + resp.status = dal_get_version_info(version); + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_set_ex_access - call set/remove access function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + * @set_access: true when calling set access function + * false when calling remove access function + */ +static void dal_test_set_ex_access(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data, + bool set_access) +{ + struct ta_access_set_remove_cmd *cmd; + struct ta_access_set_remove_resp resp; + u32 data_size; + uuid_t app_uuid; + char *app_id; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct ta_access_set_remove_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + + if (cmd->app_id_len != data_size) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + app_id = (cmd->app_id_len) ? cmd->data : NULL; + + status = dal_uuid_parse(app_id, &app_uuid); + if (status < 0) + goto out; + + if (set_access) + status = dal_set_ta_exclusive_access(&app_uuid); + else + status = dal_unset_ta_exclusive_access(&app_uuid); + +out: + resp.status = status; + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_kdi_command - parse and invoke the requested command + * + * @dev: dal test device + */ +static void dal_test_kdi_command(struct dal_test_device *dev) +{ + struct dal_test_data *test_data; + struct kdi_test_command *cmd; + s32 status; + + test_data = dev->data; + cmd = (struct kdi_test_command *)test_data->cmd_data; + + if (test_data->cmd_data_size < sizeof(cmd->cmd_id)) { + dev_dbg(dev->dev, "malformed command struct\n"); + status = -EINVAL; + goto prep_err_test_mod; + } + + switch (cmd->cmd_id) { + case KDI_SESSION_CREATE: { + dev_dbg(dev->dev, "KDI_CREATE_SESSION[%d]\n", cmd->cmd_id); + dal_test_create_session(dev, cmd, test_data); + break; + } + case KDI_SESSION_CLOSE: { + dev_dbg(dev->dev, "KDI_CLOSE_SESSION[%d]\n", cmd->cmd_id); + dal_test_close_session(dev, cmd, test_data); + break; + } + case KDI_SEND_AND_RCV: { + dev_dbg(dev->dev, "KDI_SEND_AND_RCV[%d]\n", cmd->cmd_id); + dal_test_send_and_recv(dev, cmd, test_data); + break; + } + case KDI_VERSION_GET_INFO: { + dev_dbg(dev->dev, "KDI_GET_VERSION_INFO[%d]\n", cmd->cmd_id); + dal_test_version_info(dev, cmd, test_data); + break; + } + case KDI_EXCLUSIVE_ACCESS_SET: + case KDI_EXCLUSIVE_ACCESS_REMOVE: { + dev_dbg(dev->dev, "KDI_SET_EXCLUSIVE_ACCESS or KDI_REMOVE_EXCLUSIVE_ACCESS[%d]\n", + cmd->cmd_id); + dal_test_set_ex_access(dev, cmd, test_data, + cmd->cmd_id == KDI_EXCLUSIVE_ACCESS_SET); + break; + } + default: + dev_dbg(dev->dev, "unknown command %d\n", cmd->cmd_id); + status = -EINVAL; + goto prep_err_test_mod; + } + + return; + +prep_err_test_mod: + mutex_lock(&test_data->resp_lock); + dal_test_result_set(test_data, &status, sizeof(status)); + mutex_unlock(&test_data->resp_lock); +} + +/** + * dal_test_read - dal test read function + * + * @filp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @offp: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_test_read(struct file *filp, char __user *buff, size_t count, + loff_t *offp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + int ret; + + dev = filp->private_data; + test_data = dev->data; + + mutex_lock(&test_data->resp_lock); + + if (test_data->resp_data_size > count) { + ret = -EMSGSIZE; + goto unlock; + } + + dev_dbg(dev->dev, "copying %d bytes to userspace\n", + test_data->resp_data_size); + if (copy_to_user(buff, test_data->resp_data, + test_data->resp_data_size)) { + dev_dbg(dev->dev, "copy_to_user failed\n"); + ret = -EFAULT; + goto unlock; + } + ret = test_data->resp_data_size; + +unlock: + mutex_unlock(&test_data->resp_lock); + + return ret; +} + +/** + * dal_test_write - dal test write function + * + * @filp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @offp: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_test_write(struct file *filp, const char __user *buff, + size_t count, loff_t *offp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + + dev = filp->private_data; + test_data = dev->data; + + if (count > MAX_DATA_SIZE) + return -EMSGSIZE; + + mutex_lock(&test_data->cmd_lock); + + if (copy_from_user(test_data->cmd_data, buff, count)) { + mutex_unlock(&test_data->cmd_lock); + dev_dbg(dev->dev, "copy_from_user failed\n"); + return -EFAULT; + } + + test_data->cmd_data_size = count; + dev_dbg(dev->dev, "write %zu bytes\n", count); + + dal_test_kdi_command(dev); + + mutex_unlock(&test_data->cmd_lock); + + return count; +} + +/** + * dal_test_open - dal test open function + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_open(struct inode *inode, struct file *filp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + int ret; + + dev = container_of(inode->i_cdev, struct dal_test_device, cdev); + if (!dev) + return -ENODEV; + + /* single open */ + if (test_and_set_bit(KDI_TEST_OPENED, &dev->kdi_test_status)) + return -EBUSY; + + test_data = kzalloc(sizeof(*test_data), GFP_KERNEL); + if (!test_data) { + ret = -ENOMEM; + goto err_clear_bit; + } + + test_data->cmd_data = kzalloc(MAX_DATA_SIZE, GFP_KERNEL); + test_data->resp_data = kzalloc(MAX_DATA_SIZE, GFP_KERNEL); + if (!test_data->cmd_data || !test_data->resp_data) { + ret = -ENOMEM; + goto err_free; + } + + mutex_init(&test_data->cmd_lock); + mutex_init(&test_data->resp_lock); + + ret = dal_test_load_kdi(dev); + if (ret) + goto err_free; + + dev->data = test_data; + filp->private_data = dev; + + return nonseekable_open(inode, filp); + +err_free: + kfree(test_data->cmd_data); + kfree(test_data->resp_data); + kfree(test_data); + +err_clear_bit: + clear_bit(KDI_TEST_OPENED, &dev->kdi_test_status); + + return ret; +} + +/** + * dal_test_release - dal test release function + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_release(struct inode *inode, struct file *filp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + + dev = filp->private_data; + if (!dev) + return -ENODEV; + + dal_test_unload_kdi(dev); + + test_data = dev->data; + if (test_data) { + kfree(test_data->cmd_data); + kfree(test_data->resp_data); + kfree(test_data); + } + + clear_bit(KDI_TEST_OPENED, &dev->kdi_test_status); + + filp->private_data = NULL; + + return 0; +} + +static const struct file_operations dal_test_fops = { + .owner = THIS_MODULE, + .open = dal_test_open, + .release = dal_test_release, + .read = dal_test_read, + .write = dal_test_write, + .llseek = no_llseek, +}; + +/** + * dal_test_exit - destroy dal test device + */ +static void __exit dal_test_exit(void) +{ + struct dal_test_device *dev = &dal_test_dev; + struct class *dal_test_class; + static dev_t devt; + + dal_test_class = dev->dev->class; + devt = dev->dev->devt; + + cdev_del(&dev->cdev); + unregister_chrdev_region(devt, MINORMASK); + device_destroy(dal_test_class, devt); + class_destroy(dal_test_class); +} + +/** + * dal_test_init - initiallize dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int __init dal_test_init(void) +{ + struct dal_test_device *dev = &dal_test_dev; + struct class *dal_test_class; + static dev_t devt; + int ret; + + ret = alloc_chrdev_region(&devt, 0, 1, "mei_dal_test"); + if (ret) + return ret; + + dal_test_class = class_create(THIS_MODULE, "mei_dal_test"); + if (IS_ERR(dal_test_class)) { + ret = PTR_ERR(dal_test_class); + dal_test_class = NULL; + goto err_unregister_cdev; + } + + dev->dev = device_create(dal_test_class, NULL, devt, dev, "dal_test0"); + if (IS_ERR(dev->dev)) { + ret = PTR_ERR(dev->dev); + goto err_class_destroy; + } + + cdev_init(&dev->cdev, &dal_test_fops); + dev->cdev.owner = THIS_MODULE; + ret = cdev_add(&dev->cdev, devt, 1); + if (ret) + goto err_device_destroy; + + return 0; + +err_device_destroy: + device_destroy(dal_test_class, devt); +err_class_destroy: + class_destroy(dal_test_class); +err_unregister_cdev: + unregister_chrdev_region(devt, 1); + + return ret; +} + +module_init(dal_test_init); +module_exit(dal_test_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) DAL test"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h b/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h new file mode 100644 index 000000000000..ed7b8b707b73 --- /dev/null +++ b/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h @@ -0,0 +1,230 @@ +/****************************************************************************** + * Intel mei_dal test Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef KDI_CMD_DEFS_H +#define KDI_CMD_DEFS_H + +/** + * enum kdi_command_id - cmd id to invoke in kdi module + * + * @KDI_SESSION_CREATE: call kdi "create session" function + * @KDI_SESSION_CLOSE: call kdi "close session" function + * @KDI_SEND_AND_RCV: call kdi "send and receive" function + * @KDI_VERSION_GET_INFO: call kdi "get version" function + * @KDI_EXCLUSIVE_ACCESS_SET: call kdi "set exclusive access" function + * @KDI_EXCLUSIVE_ACCESS_REMOVE: call kdi "unset exclusive access" function + */ +enum kdi_command_id { + KDI_SESSION_CREATE, + KDI_SESSION_CLOSE, + KDI_SEND_AND_RCV, + KDI_VERSION_GET_INFO, + KDI_EXCLUSIVE_ACCESS_SET, + KDI_EXCLUSIVE_ACCESS_REMOVE +}; + +/** + * struct kdi_test_command - contains the command received from user space + * + * @cmd_id: the command id + * @data: the command data + */ +struct kdi_test_command { + __u8 cmd_id; + unsigned char data[0]; +} __packed; + +/** + * struct session_create_cmd - create session cmd data + * + * @app_id_len: length of app_id arg + * @acp_pkg_len: length of the acp_pkg arg + * @init_param_len: length of init param arg + * @is_session_handle_ptr: either send kdi a valid ptr to hold the + * session handle or NULL + * @data: buffer to hold the cmd arguments + */ +struct session_create_cmd { + __u32 app_id_len; + __u32 acp_pkg_len; + __u32 init_param_len; + __u8 is_session_handle_ptr; + unsigned char data[0]; +} __packed; + +/** + * struct session_create_resp - create session response + * + * @session_handle: the session handle + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct session_create_resp { + __u64 session_handle; + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct session_close_cmd - close session cmd + * + * @session_handle: the session handle to close + */ +struct session_close_cmd { + __u64 session_handle; +} __packed; + +/** + * struct session_close_resp - close session response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct session_close_resp { + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct send_and_rcv_cmd - send and receive cmd + * + * @session_handle: the session handle + * @command_id: the cmd id to send the applet + * @output_buf_len: the size of the output buffer + * @is_output_buf: either send kdi a valid ptr to hold the output buffer or NULL + * @is_output_len_ptr: either send kdi a valid ptr to hold + * the output len or NULL + * @is_response_code_ptr: either send kdi a valid ptr to hold + * the applet response code or NULL + * @input: the input data to send the applet + */ +struct send_and_rcv_cmd { + __u64 session_handle; + __u32 command_id; + __u32 output_buf_len; + __u8 is_output_buf; + __u8 is_output_len_ptr; + __u8 is_response_code_ptr; + unsigned char input[0]; +} __packed; + +/** + * struct send_and_rcv_resp - send and receive response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + * @response_code: response code returned from the applet + * @output_len: length of output from the applet + * @output: the output got from the applet + */ +struct send_and_rcv_resp { + __s32 test_mod_status; + __s32 status; + __s32 response_code; + __u32 output_len; + unsigned char output[0]; +} __packed; + +/** + * struct version_get_info_cmd - get version cmd + * + * @is_version_ptr: either send kdi a valid ptr to hold the version info or NULL + */ +struct version_get_info_cmd { + __u8 is_version_ptr; +} __packed; + +/** + * struct version_get_info_resp - get version response + * + * @kdi_version: kdi version + * @reserved: reserved bytes + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct version_get_info_resp { + char kdi_version[32]; + __u32 reserved[4]; + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct ta_access_set_remove_cmd - set/remove access cmd + * + * @app_id_len: length of app_id arg + * @data: the cmd data. contains the app_id + */ +struct ta_access_set_remove_cmd { + __u32 app_id_len; + unsigned char data[0]; +} __packed; + +/** + * struct ta_access_set_remove_resp - set/remove access response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct ta_access_set_remove_resp { + __s32 test_mod_status; + __s32 status; +} __packed; + +#endif /* KDI_CMD_DEFS_H */ diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index a617aa5a3ad8..407718faf32f 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -182,6 +182,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, dev->hbm_f_fa_supported); pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n", dev->hbm_f_os_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tDR: %01d\n", + dev->hbm_f_dr_supported); } pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c new file mode 100644 index 000000000000..f841b9f8c054 --- /dev/null +++ b/drivers/misc/mei/dma-ring.c @@ -0,0 +1,233 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include +#include + +#include "mei_dev.h" + +static int mei_dmam_dscr_alloc(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->size) + return 0; + + if (dscr->vaddr) + return 0; + + dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr, + GFP_KERNEL); + if (!dscr->vaddr) + return -ENOMEM; + + return 0; +} + +static void mei_dmam_dscr_free(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->vaddr) + return; + + dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr); + dscr->vaddr = NULL; +} + +/** + * mei_dmam_ring_free - free dma ring buffers + * + * @dev: mei device + */ +void mei_dmam_ring_free(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + mei_dmam_dscr_free(dev, &dev->dr_dscr[i]); +} + +/** + * mei_dmam_ring_alloc - allocate dma ring buffers + * + * @dev: mei device + * + * Return: -ENOMEM on allocation failure 0 otherwise + */ +int mei_dmam_ring_alloc(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i])) + goto err; + + return 0; + +err: + mei_dmam_ring_free(dev); + return -ENOMEM; +} + +/** + * mei_dma_ring_is_allocated - check if dma ring is allocated + * + * @dev: mei device + * + * Return: true if dma ring is allocated + */ +bool mei_dma_ring_is_allocated(struct mei_device *dev) +{ + return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr; +} + +static inline +struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev) +{ + return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr; +} + +void mei_dma_ring_reset(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + + if (!ctrl) + return; + + memset(ctrl, 0, sizeof(*ctrl)); +} + +static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(buf, dbuf + b_offset, b_n); + + return b_n; +} + +static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(hbuf + b_offset, buf, b_n); + + return b_n; +} +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 dbuf_depth; + u32 rd_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "reading from dma %u bytes\n", len); + dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2; + rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1); + slots = DIV_ROUND_UP(len, MEI_DMA_SLOT_SIZE); + if (!buf) + goto out; + + if (rd_idx + slots > dbuf_depth) { + buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx); + rem = slots - (dbuf_depth - rd_idx); + rd_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_from(dev, buf, rd_idx, rem); +out: + WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots); +} + +static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev) +{ + return dev->dr_dscr[DMA_DSCR_HOST].size >> 2; +} + +/** + * mei_dma_ring_empty_slots - calaculate number of empty slots in dma ring + * + * @dev: mei_device + * + * Return: number of empty slots + */ +u32 mei_dma_ring_empty_slots(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 wr_idx, rd_idx, hbuf_depth, empty; + + if (!mei_dma_ring_is_allocated(dev)) + return 0; + + if (WARN_ON(!ctrl)) + return 0; + + /* easier to work in slots */ + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + rd_idx = READ_ONCE(ctrl->hbuf_rd_idx); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx); + + if (rd_idx > wr_idx) + empty = rd_idx - wr_idx; + else + empty = hbuf_depth - (wr_idx - rd_idx); + + return empty; +} + +/** + * mei_dma_ring_write - write data to dma ring host buffer + * + * @dev: mei_device + * @buf: data will be written + * @len: data length + */ +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 hbuf_depth; + u32 wr_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "writing to dma %u bytes\n", len); + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1); + slots = DIV_ROUND_UP(len, MEI_DMA_SLOT_SIZE); + + if (wr_idx + slots > hbuf_depth) { + buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx); + rem = slots - (hbuf_depth - wr_idx); + wr_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_to(dev, buf, wr_idx, rem); + + WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots); +} diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index fe6595fe94f1..81543bfbe194 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -65,6 +65,7 @@ const char *mei_hbm_state_str(enum mei_hbm_state state) MEI_HBM_STATE(IDLE); MEI_HBM_STATE(STARTING); MEI_HBM_STATE(STARTED); + MEI_HBM_STATE(DR_SETUP); MEI_HBM_STATE(ENUM_CLIENTS); MEI_HBM_STATE(CLIENT_PROPERTIES); MEI_HBM_STATE(STOPPED); @@ -131,6 +132,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) hdr->me_addr = 0; hdr->length = length; hdr->msg_complete = 1; + hdr->dma_ring = 0; hdr->reserved = 0; hdr->internal = 0; } @@ -280,6 +282,49 @@ int mei_hbm_start_req(struct mei_device *dev) return 0; } +/** + * mei_hbm_dma_setup_req - setup DMA request + * + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_dma_setup_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_dma_setup_request req; + const size_t len = sizeof(struct hbm_dma_setup_request); + int i; + int ret; + + mei_hbm_hdr(&mei_hdr, len); + + memset(&req, 0, len); + req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD; + for (i = 0; i < DMA_DSCR_NUM; i++) { + phys_addr_t paddr; + + paddr = dev->dr_dscr[i].daddr; + req.dma_dscr[i].addr_hi = upper_32_bits(paddr); + req.dma_dscr[i].addr_lo = lower_32_bits(paddr); + req.dma_dscr[i].size = dev->dr_dscr[i].size; + } + + mei_dma_ring_reset(dev); + + ret = mei_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "dma setup request write failed: ret = %d.\n", + ret); + return ret; + } + + dev->hbm_state = MEI_HBM_DR_SETUP; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + /** * mei_hbm_enum_clients_req - sends enumeration client request message. * @@ -992,6 +1037,12 @@ static void mei_hbm_config_features(struct mei_device *dev) /* OS ver message Support */ if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) dev->hbm_f_os_supported = 1; + + /* DMA Ring Support */ + if (dev->version.major_version > HBM_MAJOR_VERSION_DR || + (dev->version.major_version == HBM_MAJOR_VERSION_DR && + dev->version.minor_version >= HBM_MINOR_VERSION_DR)) + dev->hbm_f_dr_supported = 1; } /** @@ -1023,6 +1074,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) struct hbm_host_version_response *version_res; struct hbm_props_response *props_res; struct hbm_host_enum_response *enum_res; + struct hbm_dma_setup_response *dma_setup_res; struct hbm_add_client_request *add_cl_req; int ret; @@ -1087,14 +1139,52 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) return -EPROTO; } - if (mei_hbm_enum_clients_req(dev)) { - dev_err(dev->dev, "hbm: start: failed to send enumeration request\n"); - return -EIO; + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); + if (mei_dma_ring_is_allocated(dev)) { + if (mei_hbm_dma_setup_req(dev)) + return -EIO; + + wake_up(&dev->wait_hbm_start); + break; + } } + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + wake_up(&dev->wait_hbm_start); break; + case MEI_HBM_DMA_SETUP_RES_CMD: + dev_dbg(dev->dev, "hbm: dma setup response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_DR_SETUP) { + dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; + + if (dma_setup_res->status) { + dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", + dma_setup_res->status, + mei_hbm_status_str(dma_setup_res->status)); + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + } + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + break; + case CLIENT_CONNECT_RES_CMD: dev_dbg(dev->dev, "hbm: client connect response: message received.\n"); mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT); diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index a2025a5083a3..0171a7e79bab 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -26,6 +26,7 @@ struct mei_cl; * * @MEI_HBM_IDLE : protocol not started * @MEI_HBM_STARTING : start request message was sent + * @MEI_HBM_DR_SETUP : dma ring setup request message was sent * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties * @MEI_HBM_STARTED : enumeration was completed @@ -34,6 +35,7 @@ struct mei_cl; enum mei_hbm_state { MEI_HBM_IDLE = 0, MEI_HBM_STARTING, + MEI_HBM_DR_SETUP, MEI_HBM_ENUM_CLIENTS, MEI_HBM_CLIENT_PROPERTIES, MEI_HBM_STARTED, diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 0ccccbaf530d..79f6dc63449c 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -126,12 +126,24 @@ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ +#define MEI_DEV_ID_DNV 0x19D3 /* Denverton (BXT) */ #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ +#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */ +#define MEI_DEV_ID_CNP_LP_2 0x9DE1 /* Cannon Point LP 2 */ +#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */ +#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ +#define MEI_DEV_ID_CNP_H_2 0xA361 /* Cannon Point H 2 */ +#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ + +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ +#define MEI_DEV_ID_ICP_N 0x38E0 /* Ice Lake Point N */ +#define MEI_DEV_ID_ICP_H 0x3DE0 /* Ice Lake Point H */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 10dcf4ff99a5..d6183537bdf3 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "mei_dev.h" #include "hbm.h" @@ -1375,6 +1376,11 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) .fw_status.status[4] = PCI_CFG_HFS_5, \ .fw_status.status[5] = PCI_CFG_HFS_6 +#define MEI_CFG_DMA_128 \ + .dma_size[DMA_DSCR_HOST] = SZ_128K, \ + .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \ + .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE + /* ICH Legacy devices */ static const struct mei_cfg mei_me_ich_cfg = { MEI_CFG_ICH_HFS, @@ -1407,6 +1413,12 @@ static const struct mei_cfg mei_me_pch8_sps_cfg = { MEI_CFG_FW_SPS, }; +/* Cannon Lake and newer devices */ +static const struct mei_cfg mei_me_pch12_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_DMA_128, +}; + /* * mei_cfg_list - A list of platform platform specific configurations. * Note: has to be synchronized with enum mei_cfg_idx. @@ -1419,6 +1431,7 @@ static const struct mei_cfg *const mei_cfg_list[] = { [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, }; const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) @@ -1444,15 +1457,21 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev, { struct mei_device *dev; struct mei_me_hw *hw; + int i; dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) + sizeof(struct mei_me_hw), GFP_KERNEL); if (!dev) return NULL; + hw = to_me_hw(dev); + for (i = 0; i < DMA_DSCR_NUM; i++) + dev->dr_dscr[i].size = cfg->dma_size[i]; + mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); hw->cfg = cfg; + return dev; } diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 67892533576e..5f0fd8934db7 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -31,10 +31,12 @@ * * @fw_status: FW status * @quirk_probe: device exclusion quirk + * @dma_size: device DMA buffers size */ struct mei_cfg { const struct mei_fw_status fw_status; bool (*quirk_probe)(struct pci_dev *pdev); + size_t dma_size[DMA_DSCR_NUM]; }; @@ -78,6 +80,7 @@ struct mei_me_hw { * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer * servers platforms with quirk for * SPS firmware exclusion. + * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer * @MEI_ME_NUM_CFG: Upper Sentinel. */ enum mei_cfg_idx { @@ -88,6 +91,7 @@ enum mei_cfg_idx { MEI_ME_PCH_CPT_PBG_CFG, MEI_ME_PCH8_CFG, MEI_ME_PCH8_SPS_CFG, + MEI_ME_PCH12_CFG, MEI_ME_NUM_CFG, }; diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 5c8286b40b62..7ed3fe4dbf52 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -37,7 +37,7 @@ /* * MEI Version */ -#define HBM_MINOR_VERSION 0 +#define HBM_MINOR_VERSION 1 #define HBM_MAJOR_VERSION 2 /* @@ -82,6 +82,12 @@ #define HBM_MINOR_VERSION_OS 0 #define HBM_MAJOR_VERSION_OS 2 +/* + * MEI version with dma ring support + */ +#define HBM_MINOR_VERSION_DR 1 +#define HBM_MAJOR_VERSION_DR 2 + /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f /* Host bus message command RESPONSE */ @@ -124,6 +130,9 @@ #define MEI_HBM_NOTIFY_RES_CMD 0x90 #define MEI_HBM_NOTIFICATION_CMD 0x11 +#define MEI_HBM_DMA_SETUP_REQ_CMD 0x12 +#define MEI_HBM_DMA_SETUP_RES_CMD 0x92 + /* * MEI Stop Reason * used by hbm_host_stop_request.reason @@ -189,19 +198,29 @@ enum mei_cl_disconnect_status { MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS }; -/* - * MEI BUS Interface Section +/** + * struct mei_msg_hdr - MEI BUS Interface Section + * + * @me_addr: device address + * @host_addr: host address + * @length: message length + * @reserved: reserved + * @dma_ring: message is on dma ring + * @internal: message is internal + * @msg_complete: last packet of the message + * @dr_length: [OPTIONAL] length of the message on dma ring */ struct mei_msg_hdr { u32 me_addr:8; u32 host_addr:8; u32 length:9; - u32 reserved:5; + u32 reserved:4; + u32 dma_ring:1; u32 internal:1; u32 msg_complete:1; + u32 dr_length[0]; } __packed; - struct mei_bus_message { u8 hbm_cmd; u8 data[0]; @@ -451,4 +470,75 @@ struct hbm_notification { u8 reserved[1]; } __packed; +/** + * struct hbm_dma_mem_dscr - dma ring + * + * @addr_hi: the high 32bits of 64 bit address + * @addr_lo: the low 32bits of 64 bit address + * @size : size in bytes + */ +struct hbm_dma_mem_dscr { + u32 addr_hi; + u32 addr_lo; + u32 size; +} __packed; + +enum { + DMA_DSCR_HOST = 0, + DMA_DSCR_DEVICE = 1, + DMA_DSCR_CTRL = 2, + DMA_DSCR_NUM, +}; + +/** + * struct hbm_dma_setup_request - dma setup request + * + * @hbm_cmd: bus message command header + * @reserved: reserved for alignment + * @dma_dscr: dma descriptor for HOST, DEVICE, and CTRL + */ +struct hbm_dma_setup_request { + u8 hbm_cmd; + u8 reserved[3]; + struct hbm_dma_mem_dscr dma_dscr[DMA_DSCR_NUM]; +} __packed; + +/** + * struct hbm_dma_setup_response - dma setup response + * + * @hbm_cmd: bus message command header + * @status: 0 on success; otherwise DMA setup failed. + * @reserved: reserved for alignment + */ +struct hbm_dma_setup_response { + u8 hbm_cmd; + u8 status; + u8 reserved[2]; +} __packed; + +#define MEI_DMA_SLOT_SIZE 4 + +/** + * struct mei_dma_ring_ctrl - dma ring control block + * + * @hbuf_wr_idx: host circular buffer write index in slots + * @reserved1: reserved for alignment + * @hbuf_rd_idx: host circular buffer read index in slots + * @reserved2: reserved for alignment + * @dbuf_wr_idx: device circular buffer write index in slots + * @reserved3: reserved for alignment + * @dbuf_rd_idx: device circular buffer read index in slots + * @reserved4: reserved for alignment + */ +struct hbm_dma_ring_ctrl { + u32 hbuf_wr_idx; + u32 reserved1; + u32 hbuf_rd_idx; + u32 reserved2; + u32 dbuf_wr_idx; + u32 reserved3; + u32 dbuf_rd_idx; + u32 reserved4; +}; + #endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index d2f691424dd1..fe89660762ee 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -151,7 +151,7 @@ int mei_reset(struct mei_device *dev) mei_hbm_reset(dev); - dev->rd_msg_hdr = 0; + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); if (ret) { dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index b0b8f18a85e3..8f2ea7f6d7d9 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -75,6 +75,8 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, */ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) { + if (hdr->dma_ring) + mei_dma_ring_read(dev, NULL, hdr->dr_length[0]); /* * no need to check for size as it is guarantied * that length fits into rd_msg_buf @@ -100,6 +102,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_device *dev = cl->dev; struct mei_cl_cb *cb; size_t buf_sz; + u32 length; cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); if (!cb) { @@ -119,25 +122,31 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, goto discard; } - buf_sz = mei_hdr->length + cb->buf_idx; + length = mei_hdr->dma_ring ? mei_hdr->dr_length[0] : mei_hdr->length; + + buf_sz = length + cb->buf_idx; /* catch for integer overflow */ if (buf_sz < cb->buf_idx) { cl_err(dev, cl, "message is too big len %d idx %zu\n", - mei_hdr->length, cb->buf_idx); + length, cb->buf_idx); cb->status = -EMSGSIZE; goto discard; } if (cb->buf.size < buf_sz) { cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", - cb->buf.size, mei_hdr->length, cb->buf_idx); + cb->buf.size, length, cb->buf_idx); cb->status = -EMSGSIZE; goto discard; } + if (mei_hdr->dma_ring) + mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); + + /* for DMA read 0 length to generate an interrupt to the device */ mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length); - cb->buf_idx += mei_hdr->length; + cb->buf_idx += length; if (mei_hdr->msg_complete) { cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); @@ -152,6 +161,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, discard: if (cb) list_move_tail(&cb->list, cmpl_list); + mei_irq_discard_msg(dev, mei_hdr); return 0; } @@ -243,6 +253,9 @@ static inline int hdr_is_valid(u32 msg_hdr) if (!msg_hdr || mei_hdr->reserved) return -EBADMSG; + if (mei_hdr->dma_ring && mei_hdr->length != sizeof(u32)) + return -EBADMSG; + return 0; } @@ -263,20 +276,21 @@ int mei_irq_read_handler(struct mei_device *dev, struct mei_cl *cl; int ret; - if (!dev->rd_msg_hdr) { - dev->rd_msg_hdr = mei_read_hdr(dev); + if (!dev->rd_msg_hdr[0]) { + dev->rd_msg_hdr[0] = mei_read_hdr(dev); (*slots)--; dev_dbg(dev->dev, "slots =%08x.\n", *slots); - ret = hdr_is_valid(dev->rd_msg_hdr); + ret = hdr_is_valid(dev->rd_msg_hdr[0]); if (ret) { dev_err(dev->dev, "corrupted message header 0x%08X\n", - dev->rd_msg_hdr); + dev->rd_msg_hdr[0]); goto end; } + dev_dbg(dev->dev, "slots = %08x.\n", *slots); } - mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr; + mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr; dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_slots2data(*slots) < mei_hdr->length) { @@ -287,6 +301,11 @@ int mei_irq_read_handler(struct mei_device *dev, goto end; } + if (mei_hdr->dma_ring) { + dev->rd_msg_hdr[1] = mei_read_hdr(dev); + mei_hdr->length = 0; + } + /* HBM message */ if (hdr_is_hbm(mei_hdr)) { ret = mei_hbm_dispatch(dev, mei_hdr); @@ -317,7 +336,7 @@ int mei_irq_read_handler(struct mei_device *dev, goto reset_slots; } dev_err(dev->dev, "no destination client found 0x%08X\n", - dev->rd_msg_hdr); + dev->rd_msg_hdr[0]); ret = -EBADMSG; goto end; } @@ -327,9 +346,8 @@ int mei_irq_read_handler(struct mei_device *dev, reset_slots: /* reset the number of slots and header */ + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); *slots = mei_count_full_read_slots(dev); - dev->rd_msg_hdr = 0; - if (*slots == -EOVERFLOW) { /* overflow - reset */ dev_err(dev->dev, "resetting due to slots overflow.\n"); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index e825f013e54e..22efc039f302 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -507,7 +507,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) break; default: - dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd); rets = -ENOIOCTLCMD; } diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index ebcd5132e447..2fa56598385e 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -121,6 +121,19 @@ struct mei_msg_data { unsigned char *data; }; +/** + * struct mei_dma_dscr - dma address descriptor + * + * @vaddr: dma buffer virtual address + * @daddr: dma buffer physical address + * @size : dma buffer size + */ +struct mei_dma_dscr { + void *vaddr; + dma_addr_t daddr; + size_t size; +}; + /* Maximum number of processed FW status registers */ #define MEI_FW_STATUS_MAX 6 /* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */ @@ -387,6 +400,7 @@ const char *mei_pg_state_str(enum mei_pg_state state); * * @hbuf_depth : depth of hardware host/write buffer is slots * @hbuf_is_ready : query if the host host/write buffer is ready + * @dr_dscr: DMA ring descriptors: TX, RX, and CTRL * * @version : HBM protocol version in use * @hbm_f_pg_supported : hbm feature pgi protocol @@ -396,6 +410,7 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @hbm_f_fa_supported : hbm feature fixed address client * @hbm_f_ie_supported : hbm feature immediate reply to enum request * @hbm_f_os_supported : hbm feature support OS ver message + * @hbm_f_dr_supported : hbm feature dma ring supported * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients @@ -457,12 +472,14 @@ struct mei_device { #endif /* CONFIG_PM */ unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; - u32 rd_msg_hdr; + u32 rd_msg_hdr[2]; /* write buffer */ u8 hbuf_depth; bool hbuf_is_ready; + struct mei_dma_dscr dr_dscr[DMA_DSCR_NUM]; + struct hbm_version version; unsigned int hbm_f_pg_supported:1; unsigned int hbm_f_dc_supported:1; @@ -471,6 +488,7 @@ struct mei_device { unsigned int hbm_f_fa_supported:1; unsigned int hbm_f_ie_supported:1; unsigned int hbm_f_os_supported:1; + unsigned int hbm_f_dr_supported:1; struct rw_semaphore me_clients_rwsem; struct list_head me_clients; @@ -538,6 +556,14 @@ int mei_restart(struct mei_device *dev); void mei_stop(struct mei_device *dev); void mei_cancel_work(struct mei_device *dev); +int mei_dmam_ring_alloc(struct mei_device *dev); +void mei_dmam_ring_free(struct mei_device *dev); +bool mei_dma_ring_is_allocated(struct mei_device *dev); +void mei_dma_ring_reset(struct mei_device *dev); +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len); +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len); +u32 mei_dma_ring_empty_slots(struct mei_device *dev); + /* * MEI interrupt functions prototype */ @@ -675,10 +701,10 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {} int mei_register(struct mei_device *dev, struct device *parent); void mei_deregister(struct mei_device *dev); -#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d internal=%1d comp=%1d" #define MEI_HDR_PRM(hdr) \ (hdr)->host_addr, (hdr)->me_addr, \ - (hdr)->length, (hdr)->internal, (hdr)->msg_complete + (hdr)->length, (hdr)->dma_ring, (hdr)->internal, (hdr)->msg_complete ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len); /** diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 78b3172c8e6e..2a75dd544a1f 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -92,12 +92,24 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_DNV, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_H, MEI_ME_PCH12_CFG)}, + /* required last entry */ {0, } }; @@ -238,8 +250,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ mei_me_set_pm_domain(dev); - if (mei_pg_is_enabled(dev)) + if (mei_pg_is_enabled(dev)) { pm_runtime_put_noidle(&pdev->dev); + if (hw->d0i3_supported) + pm_runtime_allow(&pdev->dev); + } dev_dbg(&pdev->dev, "initialization successful.\n"); diff --git a/drivers/misc/mei/spd/Kconfig b/drivers/misc/mei/spd/Kconfig new file mode 100644 index 000000000000..085f9caa8c66 --- /dev/null +++ b/drivers/misc/mei/spd/Kconfig @@ -0,0 +1,12 @@ +# +# Storage proxy device configuration +# +config INTEL_MEI_SPD + tristate "Intel MEI Host Storage Proxy Driver" + depends on INTEL_MEI && BLOCK && RPMB + help + A driver for the host storage proxy ME client + The driver enables ME FW to store data on a storage devices + that are accessible only from the host. + + To compile this driver as a module, choose M here. diff --git a/drivers/misc/mei/spd/Makefile b/drivers/misc/mei/spd/Makefile new file mode 100644 index 000000000000..5cbc149ceaa0 --- /dev/null +++ b/drivers/misc/mei/spd/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the Storage Proxy device driver. +# + +obj-$(CONFIG_INTEL_MEI_SPD) += mei_spd.o +mei_spd-objs := main.o +mei_spd-objs += cmd.o +mei_spd-objs += gpp.o +mei_spd-objs += rpmb.o +mei_spd-$(CONFIG_DEBUG_FS) += debugfs.o + +ccflags-y += -D__CHECK_ENDIAN__ + diff --git a/drivers/misc/mei/spd/cmd.c b/drivers/misc/mei/spd/cmd.c new file mode 100644 index 000000000000..d03459650f91 --- /dev/null +++ b/drivers/misc/mei/spd/cmd.c @@ -0,0 +1,556 @@ +/* + * Intel Host Storage Proxy Interface Linux driver + * Copyright (c) 2015 - 2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +#define spd_cmd_size(_cmd) \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_##_cmd)) +#define spd_cmd_rpmb_size(_cmd) \ + (spd_cmd_size(_cmd) + SPD_CLIENT_RPMB_DATA_MAX_SIZE) + +#define to_spd_hdr(_buf) (struct spd_cmd_hdr *)(_buf) +#define to_spd_cmd(_cmd, _buf) \ + (struct spd_cmd_##_cmd *)((_buf) + sizeof(struct spd_cmd_hdr)) + +const char *spd_cmd_str(enum spd_cmd_type cmd) +{ +#define __SPD_CMD(_cmd) SPD_##_cmd##_CMD +#define SPD_CMD(cmd) case __SPD_CMD(cmd): return #cmd + switch (cmd) { + SPD_CMD(NONE); + SPD_CMD(START_STOP); + SPD_CMD(RPMB_WRITE); + SPD_CMD(RPMB_READ); + SPD_CMD(RPMB_GET_COUNTER); + SPD_CMD(GPP_WRITE); + SPD_CMD(GPP_READ); + SPD_CMD(TRIM); + SPD_CMD(INIT); + SPD_CMD(STORAGE_STATUS); + SPD_CMD(MAX); + default: + return "unknown"; + } +#undef SPD_CMD +#undef __SPD_CMD +} + +const char *mei_spd_dev_str(enum spd_storage_type type) +{ +#define SPD_TYPE(type) case SPD_TYPE_##type: return #type + switch (type) { + SPD_TYPE(UNDEF); + SPD_TYPE(EMMC); + SPD_TYPE(UFS); + default: + return "unknown"; + } +#undef SPD_TYPE +} + +const char *mei_spd_state_str(enum mei_spd_state state) +{ +#define SPD_STATE(state) case MEI_SPD_STATE_##state: return #state + switch (state) { + SPD_STATE(INIT); + SPD_STATE(INIT_WAIT); + SPD_STATE(INIT_DONE); + SPD_STATE(RUNNING); + SPD_STATE(STOPPING); + default: + return "unknown"; + } +#undef SPD_STATE +} + +/** + * mei_spd_init_req - send init request + * + * @spd: spd device + * + * Return: 0 on success + * -EPROTO if called in wrong state + * < 0 on write error + */ +int mei_spd_cmd_init_req(struct mei_spd *spd) +{ + const int req_len = sizeof(struct spd_cmd_hdr); + struct spd_cmd_hdr *hdr; + u32 cmd_type = SPD_INIT_CMD; + ssize_t ret; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state != MEI_SPD_STATE_INIT) + return -EPROTO; + + memset(spd->buf, 0, req_len); + hdr = to_spd_hdr(spd->buf); + + hdr->command_type = cmd_type; + hdr->is_response = 0; + hdr->len = req_len; + + spd->state = MEI_SPD_STATE_INIT_WAIT; + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { + spd_err(spd, "start send failed ret = %zd\n", ret); + return ret; + } + + return 0; +} + +/** + * mei_spd_cmd_init_rsp - handle init response message + * + * @spd: spd device + * @cmd: received spd command + * @cmd_sz: received command size + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_spd_cmd_init_rsp(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t cmd_sz) +{ + int type; + int gpp_id; + int i; + + if (cmd_sz < spd_cmd_size(init_resp)) { + spd_err(spd, "Wrong init response size\n"); + return -EINVAL; + } + + if (spd->state != MEI_SPD_STATE_INIT_WAIT) + return -EPROTO; + + type = cmd->init_rsp.type; + gpp_id = cmd->init_rsp.gpp_partition_id; + + switch (type) { + case SPD_TYPE_EMMC: + if (gpp_id < 1 || gpp_id > 4) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } + break; + + case SPD_TYPE_UFS: + if (gpp_id < 1 || gpp_id > 6) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } + break; + + default: + spd_err(spd, "unsupported storage type %d\n", + cmd->init_rsp.type); + return -EINVAL; + } + + spd->dev_type = type; + spd->gpp_partition_id = gpp_id; + + if (cmd->init_rsp.serial_no_sz != 0) { + if (cmd->init_rsp.serial_no_sz != + cmd_sz - spd_cmd_size(init_resp)) { + spd_err(spd, "wrong serial no size %u?=%zu\n", + cmd->init_rsp.serial_no_sz, + cmd_sz - spd_cmd_size(init_resp)); + return -EMSGSIZE; + } + + if (cmd->init_rsp.serial_no_sz > 256) { + spd_err(spd, "serial no is too large %u\n", + cmd->init_rsp.serial_no_sz); + return -EMSGSIZE; + } + + spd->dev_id = kzalloc(cmd->init_rsp.serial_no_sz, GFP_KERNEL); + if (!spd->dev_id) + return -ENOMEM; + + spd->dev_id_sz = cmd->init_rsp.serial_no_sz; + if (type == SPD_TYPE_EMMC) { + /* FW have this in be32 format */ + __be32 *sno = (__be32 *)cmd->init_rsp.serial_no; + u32 *dev_id = (u32 *)spd->dev_id; + + for (i = 0; i < spd->dev_id_sz / sizeof(u32); i++) + dev_id[i] = be32_to_cpu(sno[i]); + } else { + memcpy(spd->dev_id, &cmd->init_rsp.serial_no, + cmd->init_rsp.serial_no_sz); + } + } + + spd->state = MEI_SPD_STATE_INIT_DONE; + + return 0; +} + +/** + * mei_spd_cmd_storage_status_req - send storage status message + * + * @spd: spd device + * + * Return: 0 on success + * -EPROTO if called in wrong state + * < 0 on write error + */ +int mei_spd_cmd_storage_status_req(struct mei_spd *spd) +{ + struct spd_cmd_hdr *hdr; + struct spd_cmd_storage_status_req *req; + const int req_len = spd_cmd_size(storage_status_req); + u32 cmd_type = SPD_STORAGE_STATUS_CMD; + ssize_t ret; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state < MEI_SPD_STATE_INIT_DONE) + return -EPROTO; + + memset(spd->buf, 0, req_len); + hdr = to_spd_hdr(spd->buf); + + hdr->command_type = cmd_type; + hdr->is_response = 0; + hdr->len = req_len; + + req = to_spd_cmd(storage_status_req, spd->buf); + req->gpp_on = mei_spd_gpp_is_open(spd); + req->rpmb_on = mei_spd_rpmb_is_open(spd); + + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { + spd_err(spd, "send storage status failed ret = %zd\n", ret); + return ret; + } + + if (req->gpp_on || req->rpmb_on) + spd->state = MEI_SPD_STATE_RUNNING; + else + spd->state = MEI_SPD_STATE_INIT_DONE; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + return 0; +} + +static int mei_spd_cmd_gpp_write(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + size_t len = SPD_GPP_WRITE_DATA_LEN(*cmd); + int ret; + + if (out_buf_sz < spd_cmd_size(gpp_write_req)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + ret = mei_spd_gpp_write(spd, cmd->gpp_write_req.offset, + cmd->gpp_write_req.data, len); + if (ret) { + spd_err(spd, "Failed to write to gpp ret = %d\n", ret); + return SPD_STATUS_GENERAL_FAILURE; + } + + spd_dbg(spd, "wrote %zd bytes of data\n", len); + + cmd->header.len = spd_cmd_size(gpp_write_rsp); + + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_gpp_read(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + size_t len; + int ret; + + if (out_buf_sz < spd_cmd_size(gpp_read_req)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + len = cmd->gpp_read_req.size_to_read; + if (len > SPD_CLIENT_GPP_DATA_MAX_SIZE) { + spd_err(spd, "Block is to large to read\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + ret = mei_spd_gpp_read(spd, cmd->gpp_read_req.offset, + cmd->gpp_read_resp.data, len); + + if (ret) { + spd_err(spd, "Failed to read from gpp ret = %d\n", ret); + return SPD_STATUS_GENERAL_FAILURE; + } + + spd_dbg(spd, "read %zd bytes of data\n", len); + + cmd->header.len = spd_cmd_size(gpp_read_rsp) + len; + + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_read(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_read.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_read)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_READ_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "read RPMB frame performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_write(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_write.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_write)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "write RPMB frame performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_get_counter(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_get_counter.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_get_counter)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "get RPMB counter performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_response(struct mei_spd *spd, ssize_t out_buf_sz) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + u32 spd_cmd; + int ret; + + spd_cmd = cmd->header.command_type; + + spd_dbg(spd, "rsp [%d] %s : state [%d] %s\n", + spd_cmd, spd_cmd_str(spd_cmd), + spd->state, mei_spd_state_str(spd->state)); + + switch (spd_cmd) { + case SPD_INIT_CMD: + ret = mei_spd_cmd_init_rsp(spd, cmd, out_buf_sz); + if (ret) + break; + mutex_unlock(&spd->lock); + mei_spd_rpmb_init(spd); + mei_spd_gpp_init(spd); + mutex_lock(&spd->lock); + break; + default: + ret = -EINVAL; + spd_err(spd, "Wrong response command %d\n", spd_cmd); + break; + } + + return ret; +} + +/** + * mei_spd_cmd_request - dispatch command requests from the SPD device + * + * @spd: spd device + * @out_buf_sz: buffer size + * + * Return: (TBD) + */ +static int mei_spd_cmd_request(struct mei_spd *spd, ssize_t out_buf_sz) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + ssize_t written; + u32 spd_cmd; + int ret; + + spd_cmd = cmd->header.command_type; + + spd_dbg(spd, "req [%d] %s : state [%d] %s\n", + spd_cmd, spd_cmd_str(spd_cmd), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state < MEI_SPD_STATE_RUNNING) { + spd_err(spd, "Wrong state %d\n", spd->state); + ret = SPD_STATUS_INVALID_COMMAND; + goto reply; + } + + switch (spd_cmd) { + case SPD_RPMB_WRITE_CMD: + ret = mei_spd_cmd_rpmb_write(spd, cmd, out_buf_sz); + break; + case SPD_RPMB_READ_CMD: + ret = mei_spd_cmd_rpmb_read(spd, cmd, out_buf_sz); + break; + case SPD_RPMB_GET_COUNTER_CMD: + ret = mei_spd_cmd_rpmb_get_counter(spd, cmd, out_buf_sz); + break; + case SPD_GPP_WRITE_CMD: + ret = mei_spd_cmd_gpp_write(spd, cmd, out_buf_sz); + break; + case SPD_GPP_READ_CMD: + ret = mei_spd_cmd_gpp_read(spd, cmd, out_buf_sz); + break; + case SPD_TRIM_CMD: + spd_err(spd, "Command %d is not supported\n", spd_cmd); + ret = SPD_STATUS_NOT_SUPPORTED; + break; + default: + spd_err(spd, "Wrong request command %d\n", spd_cmd); + ret = SPD_STATUS_INVALID_COMMAND; + break; + } +reply: + cmd->header.is_response = 1; + cmd->header.status = ret; + if (ret != SPD_STATUS_SUCCESS) + cmd->header.len = sizeof(struct spd_cmd_hdr); + + written = mei_cldev_send(spd->cldev, spd->buf, cmd->header.len); + if (written != cmd->header.len) { + ret = SPD_STATUS_GENERAL_FAILURE; + spd_err(spd, "Failed to send reply written = %zd\n", written); + } + + /* FIXME: translate ret to errno */ + if (ret) + return -EINVAL; + + return 0; +} + +ssize_t mei_spd_cmd(struct mei_spd *spd) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + ssize_t out_buf_sz; + int ret; + + out_buf_sz = mei_cldev_recv(spd->cldev, spd->buf, spd->buf_sz); + if (out_buf_sz < 0) { + spd_err(spd, "failure in receive ret = %zd\n", out_buf_sz); + return out_buf_sz; + } + + if (out_buf_sz == 0) { + spd_err(spd, "received empty msg\n"); + return 0; + } + + /* check that we've received at least sizeof(header) */ + if (out_buf_sz < sizeof(struct spd_cmd_hdr)) { + spd_err(spd, "Request is too short\n"); + return -EFAULT; + } + + if (cmd->header.is_response) + ret = mei_spd_cmd_response(spd, out_buf_sz); + else + ret = mei_spd_cmd_request(spd, out_buf_sz); + + return ret; +} + +static void mei_spd_status_send_work(struct work_struct *work) +{ + struct mei_spd *spd = + container_of(work, struct mei_spd, status_send_w); + + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); +} + +void mei_spd_free(struct mei_spd *spd) +{ + if (!spd) + return; + + cancel_work_sync(&spd->status_send_w); + + kfree(spd->buf); + kfree(spd); +} + +struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev) +{ + struct mei_spd *spd; + u8 *buf; + + spd = kzalloc(sizeof(*spd), GFP_KERNEL); + if (!spd) + return NULL; + + spd->buf_sz = sizeof(struct spd_cmd) + SPD_CLIENT_GPP_DATA_MAX_SIZE; + buf = kmalloc(spd->buf_sz, GFP_KERNEL); + if (!buf) + goto free; + + spd->cldev = cldev; + spd->buf = buf; + spd->state = MEI_SPD_STATE_INIT; + mutex_init(&spd->lock); + INIT_WORK(&spd->status_send_w, mei_spd_status_send_work); + + return spd; +free: + kfree(spd); + return NULL; +} diff --git a/drivers/misc/mei/spd/cmd.h b/drivers/misc/mei/spd/cmd.h new file mode 100644 index 000000000000..c7138a3d57c7 --- /dev/null +++ b/drivers/misc/mei/spd/cmd.h @@ -0,0 +1,285 @@ +/****************************************************************************** + * Intel Host Storage Proxy Interface Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 - 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2015 - 2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _SPD_CMD_H +#define _SPD_CMD_H + +#include + +/** + * enum spd_cmd_type - available commands + * + * @SPD_NONE_CMD : Lower command sentinel. + * @SPD_START_STOP_CMD : start stop command (deprecated). [Host -> TEE] + * @SPD_RPMB_WRITE_CMD : RPMB write request. [TEE -> Host] + * @SPD_RPMB_READ_CMD : RPMB read request. [TEE -> Host] + * @SPD_RPMB_GET_COUNTER_CMD: get counter request [TEE -> Host] + * @SPD_GPP_WRITE_CMD : GPP write request. [TEE -> Host] + * @SPD_GPP_READ_CMD : GPP read request. [TEE -> Host] + * @SPD_TRIM_CMD : TRIM command [TEE -> Host] + * @SPD_INIT_CMD : initial handshake between host and fw. [Host -> TEE] + * @SPD_STORAGE_STATUS_CMD : the backing storage status. [Host -> TEE] + * @SPD_MAX_CMD: Upper command sentinel. + */ +enum spd_cmd_type { + SPD_NONE_CMD = 0, + SPD_START_STOP_CMD, + SPD_RPMB_WRITE_CMD, + SPD_RPMB_READ_CMD, + SPD_RPMB_GET_COUNTER_CMD, + SPD_GPP_WRITE_CMD, + SPD_GPP_READ_CMD, + SPD_TRIM_CMD, + SPD_INIT_CMD, + SPD_STORAGE_STATUS_CMD, + SPD_MAX_CMD, +}; + +enum spd_status { + SPD_STATUS_SUCCESS = 0, + SPD_STATUS_GENERAL_FAILURE = 1, + SPD_STATUS_NOT_READY = 2, + SPD_STATUS_NOT_SUPPORTED = 3, + SPD_STATUS_INVALID_COMMAND = 4, +}; + +/** + * enum spd_storage_type - storage device type + * + * @SPD_TYPE_UNDEF: lower enum sentinel + * @SPD_TYPE_EMMC: emmc device + * @SPD_TYPE_UFS: ufs device + * @SPD_TYPE_MAX: upper enum sentinel + */ +enum spd_storage_type { + SPD_TYPE_UNDEF = 0, + SPD_TYPE_EMMC = 1, + SPD_TYPE_UFS = 2, + SPD_TYPE_MAX +}; + +/** + * struct spd_cmd_hdr - Host storage Command Header + * + * @command_type: SPD_TYPES + * @is_response: 1 == Response, 0 == Request + * @len: command length + * @status: command status + * @reserved: reserved + */ +struct spd_cmd_hdr { + u32 command_type : 7; + u32 is_response : 1; + u32 len : 13; + u32 status : 8; + u32 reserved : 3; +} __packed; + +/** + * RPMB Frame Size as defined by the JDEC spec + */ +#define SPD_CLIENT_RPMB_DATA_MAX_SIZE (512) + +/** + * struct spd_cmd_init_resp + * commandType == HOST_STORAGE_INIT_CMD + * + * @gpp_partition_id: gpp_partition: + * UFS: LUN Number (0-7) + * EMMC: 1-4. + * 0xff: GPP not supported + * @type: storage hw type + * SPD_TYPE_EMMC + * SPD_TYPE_UFS + * @serial_no_sz: serial_no size + * @serial_no: device serial number + */ +struct spd_cmd_init_resp { + u32 gpp_partition_id; + u32 type; + u32 serial_no_sz; + u8 serial_no[0]; +}; + +/** + * struct spd_cmd_storage_status_req + * commandType == SPD_STORAGE_STATUS_CMD + * + * @gpp_on: availability of the gpp backing storage + * 0 - GP partition is accessible + * 1 - GP partition is not accessible + * @rpmb_on: availability of the backing storage + * 0 - RPMB partition is accessible + * 1 - RPBM partition is not accessible + */ +struct spd_cmd_storage_status_req { + u32 gpp_on; + u32 rpmb_on; +} __packed; + +/** + * struct spd_cmd_rpmb_write + * command_type == SPD_RPMB_WRITE_CMD + * + * @rpmb_frame: RPMB frame are constant size (512) + */ +struct spd_cmd_rpmb_write { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_rpmb_read + * command_type == SPD_RPMB_READ_CMD + * + * @rpmb_frame: RPMB frame are constant size (512) + */ +struct spd_cmd_rpmb_read { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_rpmb_get_counter + * command_type == SPD_RPMB_GET_COUNTER_CMD + * + * @rpmb_frame: frame containing frame counter + */ +struct spd_cmd_rpmb_get_counter { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_gpp_write_req + * command_type == SPD_GPP_WRITE_CMD + * + * @offset: frame offset in partition + * @data: 4K page + */ +struct spd_cmd_gpp_write_req { + u32 offset; + u8 data[0]; +} __packed; + +/** + * struct spd_cmd_gpp_write_rsp + * command_type == SPD_GPP_WRITE_CMD + * + * @reserved: reserved + */ +struct spd_cmd_gpp_write_rsp { + u32 reserved[2]; +} __packed; + +/** + * struct spd_cmd_gpp_read_req + * command_type == SPD_GPP_READ_CMD + * + * @offset: offset of a frame on GPP partition + * @size_to_read: data length to read (must be ) + */ +struct spd_cmd_gpp_read_req { + u32 offset; + u32 size_to_read; +} __packed; + +/** + * struct spd_cmd_gpp_read_rsp + * command_type == SPD_GPP_READ_CMD + * + * @reserved: reserved + * @data: data + */ +struct spd_cmd_gpp_read_rsp { + u32 reserved; + u8 data[0]; +} __packed; + +#define SPD_GPP_READ_DATA_LEN(cmd) ((cmd).header.len - \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_gpp_read_rsp))) + +#define SPD_GPP_WRITE_DATA_LEN(cmd) ((cmd).header.len - \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_gpp_write_req))) + +struct spd_cmd { + struct spd_cmd_hdr header; + + union { + struct spd_cmd_rpmb_write rpmb_write; + struct spd_cmd_rpmb_read rpmb_read; + struct spd_cmd_rpmb_get_counter rpmb_get_counter; + + struct spd_cmd_gpp_write_req gpp_write_req; + struct spd_cmd_gpp_write_rsp gpp_write_rsp; + + struct spd_cmd_gpp_read_req gpp_read_req; + struct spd_cmd_gpp_read_rsp gpp_read_resp; + + struct spd_cmd_init_resp init_rsp; + struct spd_cmd_storage_status_req status_req; + }; +} __packed; + +/* GPP Max data 4K */ +#define SPD_CLIENT_GPP_DATA_MAX_SIZE (4096) + +const char *spd_cmd_str(enum spd_cmd_type cmd); +const char *mei_spd_dev_str(enum spd_storage_type type); + +#endif /* _SPD_CMD_H */ diff --git a/drivers/misc/mei/spd/debugfs.c b/drivers/misc/mei/spd/debugfs.c new file mode 100644 index 000000000000..9ac1a559778c --- /dev/null +++ b/drivers/misc/mei/spd/debugfs.c @@ -0,0 +1,88 @@ +/* + * Intel Host Storage Proxy Interface Linux driver + * Copyright (c) 2015 - 2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +static ssize_t mei_spd_dbgfs_read_info(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct mei_spd *spd = fp->private_data; + size_t bufsz = 4095; + char *buf; + int pos = 0; + int ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "DEV STATE: [%d] %s\n", + spd->state, mei_spd_state_str(spd->state)); + pos += scnprintf(buf + pos, bufsz - pos, "DEV TYPE : [%d] %s\n", + spd->dev_type, mei_spd_dev_str(spd->dev_type)); + pos += scnprintf(buf + pos, bufsz - pos, " ID SIZE : %d\n", + spd->dev_id_sz); + pos += scnprintf(buf + pos, bufsz - pos, " ID : '%s'\n", "N/A"); + pos += scnprintf(buf + pos, bufsz - pos, "GPP\n"); + pos += scnprintf(buf + pos, bufsz - pos, " id : %d\n", + spd->gpp_partition_id); + pos += scnprintf(buf + pos, bufsz - pos, " opened : %1d\n", + mei_spd_gpp_is_open(spd)); + + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); + kfree(buf); + return ret; +} + +static const struct file_operations mei_spd_dbgfs_fops_info = { + .open = simple_open, + .read = mei_spd_dbgfs_read_info, + .llseek = generic_file_llseek, +}; + +void mei_spd_dbgfs_deregister(struct mei_spd *spd) +{ + if (!spd->dbgfs_dir) + return; + debugfs_remove_recursive(spd->dbgfs_dir); + spd->dbgfs_dir = NULL; +} + +int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) +{ + struct dentry *dir, *f; + + dir = debugfs_create_dir(name, NULL); + if (!dir) + return -ENOMEM; + + spd->dbgfs_dir = dir; + + f = debugfs_create_file("info", 0400, dir, + spd, &mei_spd_dbgfs_fops_info); + if (!f) { + spd_err(spd, "info: registration failed\n"); + goto err; + } + + return 0; +err: + mei_spd_dbgfs_deregister(spd); + return -ENODEV; +} diff --git a/drivers/misc/mei/spd/gpp.c b/drivers/misc/mei/spd/gpp.c new file mode 100644 index 000000000000..4ac480b278a6 --- /dev/null +++ b/drivers/misc/mei/spd/gpp.c @@ -0,0 +1,310 @@ +/** + * Intel Host Storage Proxy Interface Linux driver + * Copyright (c) 2015 - 2017, Intel Corporation. + * + * based on drivers/mtd/devices/block2mtd.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include +#include +#include +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +static struct page *page_read(struct address_space *mapping, int index) +{ + return read_mapping_page(mapping, index, NULL); +} + +static int mei_spd_bd_read(struct mei_spd *spd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct page *page; + int index = from >> PAGE_SHIFT; + int offset = from & (PAGE_SIZE - 1); + int cpylen; + + while (len) { + if ((offset + len) > PAGE_SIZE) + cpylen = PAGE_SIZE - offset; + else + cpylen = len; + len = len - cpylen; + + page = page_read(spd->gpp->bd_inode->i_mapping, index); + if (IS_ERR(page)) + return PTR_ERR(page); + + memcpy(buf, page_address(page) + offset, cpylen); + put_page(page); + + if (retlen) + *retlen += cpylen; + buf += cpylen; + offset = 0; + index++; + } + return 0; +} + +static int _mei_spd_bd_write(struct block_device *dev, const u_char *buf, + loff_t to, size_t len, size_t *retlen) +{ + struct page *page; + struct address_space *mapping = dev->bd_inode->i_mapping; + int index = to >> PAGE_SHIFT; /* page index */ + int offset = to & ~PAGE_MASK; /* page offset */ + int cpylen; + + while (len) { + if ((offset + len) > PAGE_SIZE) + cpylen = PAGE_SIZE - offset; + else + cpylen = len; + len = len - cpylen; + + page = page_read(mapping, index); + if (IS_ERR(page)) + return PTR_ERR(page); + + if (memcmp(page_address(page) + offset, buf, cpylen)) { + lock_page(page); + memcpy(page_address(page) + offset, buf, cpylen); + set_page_dirty(page); + unlock_page(page); + balance_dirty_pages_ratelimited(mapping); + } + put_page(page); + + if (retlen) + *retlen += cpylen; + + buf += cpylen; + offset = 0; + index++; + } + return 0; +} + +static int mei_spd_bd_write(struct mei_spd *spd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + int ret; + + ret = _mei_spd_bd_write(spd->gpp, buf, to, len, retlen); + if (ret > 0) + ret = 0; + + sync_blockdev(spd->gpp); + + return ret; +} + +static void mei_spd_bd_sync(struct mei_spd *spd) +{ + sync_blockdev(spd->gpp); +} + +#define GPP_FMODE (FMODE_WRITE | FMODE_READ | FMODE_EXCL) + +bool mei_spd_gpp_is_open(struct mei_spd *spd) +{ + struct request_queue *q; + + if (!spd->gpp) + return false; + + q = spd->gpp->bd_queue; + if (q && !blk_queue_stopped(q)) + return true; + + return false; +} + +static int mei_spd_gpp_open(struct mei_spd *spd, struct device *dev) +{ + int ret; + + if (spd->gpp) + return 0; + + spd->gpp = blkdev_get_by_dev(dev->devt, GPP_FMODE, spd); + if (IS_ERR(spd->gpp)) { + ret = PTR_ERR(spd->gpp); + spd->gpp = NULL; + spd_dbg(spd, "Can't get GPP block device %s ret = %d\n", + dev_name(dev), ret); + return ret; + } + + spd_dbg(spd, "gpp partition created\n"); + return 0; +} + +static int mei_spd_gpp_close(struct mei_spd *spd) +{ + if (!spd->gpp) + return 0; + + mei_spd_bd_sync(spd); + blkdev_put(spd->gpp, GPP_FMODE); + spd->gpp = NULL; + + spd_dbg(spd, "gpp partition removed\n"); + return 0; +} + +#define UFSHCD "ufshcd" +static bool mei_spd_lun_ufs_match(struct mei_spd *spd, struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + struct scsi_device *sdev; + + switch (disk->major) { + case SCSI_DISK0_MAJOR: + case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: + case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: + break; + default: + return false; + } + + sdev = to_scsi_device(dev->parent); + + if (!sdev->host || + strncmp(sdev->host->hostt->name, UFSHCD, strlen(UFSHCD))) + return false; + + return sdev->lun == spd->gpp_partition_id; +} + +static bool mei_spd_gpp_mmc_match(struct mei_spd *spd, struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + int idx, part_id; + + if (disk->major != MMC_BLOCK_MAJOR) + return false; + + if (sscanf(disk->disk_name, "mmcblk%dgp%d", &idx, &part_id) != 2) + return false; + + return part_id == spd->gpp_partition_id - 1; +} + +static bool mei_spd_gpp_match(struct mei_spd *spd, struct device *dev) +{ + /* we are only interested in physical partitions */ + if (strncmp(dev->type->name, "disk", sizeof("disk"))) + return false; + + if (spd->dev_type == SPD_TYPE_EMMC) + return mei_spd_gpp_mmc_match(spd, dev); + else if (spd->dev_type == SPD_TYPE_UFS) + return mei_spd_lun_ufs_match(spd, dev); + else + return false; +} + +static int gpp_add_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); + + if (!mei_spd_gpp_match(spd, dev)) + return 0; + + mutex_lock(&spd->lock); + if (mei_spd_gpp_open(spd, dev)) { + mutex_unlock(&spd->lock); + return 0; + } + + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); + + return 0; +} + +static void gpp_remove_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); + + if (!mei_spd_gpp_match(spd, dev)) + return; + + mutex_lock(&spd->lock); + if (mei_spd_gpp_close(spd)) { + mutex_unlock(&spd->lock); + return; + } + + if (spd->state != MEI_SPD_STATE_STOPPING) + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); +} + +int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size) +{ + int ret; + + spd_dbg(spd, "GPP read offset = %zx, size = %zx\n", off, size); + + if (!mei_spd_gpp_is_open(spd)) + return -ENODEV; + + ret = mei_spd_bd_read(spd, off, size, NULL, data); + if (ret) + spd_err(spd, "GPP read failed ret = %d\n", ret); + + return ret; +} + +int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size) +{ + int ret; + + spd_dbg(spd, "GPP write offset = %zx, size = %zx\n", off, size); + + if (!mei_spd_gpp_is_open(spd)) + return -ENODEV; + + ret = mei_spd_bd_write(spd, off, size, NULL, data); + if (ret) + spd_err(spd, "GPP write failed ret = %d\n", ret); + + return ret; +} + +void mei_spd_gpp_prepare(struct mei_spd *spd) +{ + spd->gpp_interface.add_dev = gpp_add_device; + spd->gpp_interface.remove_dev = gpp_remove_device; + spd->gpp_interface.class = &block_class; +} + +int mei_spd_gpp_init(struct mei_spd *spd) +{ + int ret; + + ret = class_interface_register(&spd->gpp_interface); + if (ret) + spd_err(spd, "Can't register interface\n"); + return ret; +} + +void mei_spd_gpp_exit(struct mei_spd *spd) +{ + class_interface_unregister(&spd->gpp_interface); +} diff --git a/drivers/misc/mei/spd/main.c b/drivers/misc/mei/spd/main.c new file mode 100644 index 000000000000..24a597ee5582 --- /dev/null +++ b/drivers/misc/mei/spd/main.c @@ -0,0 +1,129 @@ +/* + * Intel Host Storage Proxy Interface Linux driver + * Copyright (c) 2015 - 2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include + +#include "spd.h" + +static void mei_spd_rx_cb(struct mei_cl_device *cldev) +{ + struct mei_spd *spd = mei_cldev_get_drvdata(cldev); + + mutex_lock(&spd->lock); + mei_spd_cmd(spd); + mutex_unlock(&spd->lock); +} + +static int mei_spd_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct mei_spd *spd; + u8 ver = mei_cldev_ver(cldev); + int ret; + + dev_dbg(&cldev->dev, "probing mei spd ver = %d\n", ver); + + if (ver < 2) { + dev_warn(&cldev->dev, "unuspported protocol version %d\n", ver); + return -ENODEV; + } + + spd = mei_spd_alloc(cldev); + if (!spd) + return -ENOMEM; + + mei_cldev_set_drvdata(cldev, spd); + + ret = mei_spd_dbgfs_register(spd, "spd"); + if (ret) + goto free; + + ret = mei_cldev_enable(cldev); + if (ret < 0) { + dev_err(&cldev->dev, "Could not enable device ret = %d\n", ret); + goto free; + } + + ret = mei_cldev_register_rx_cb(cldev, mei_spd_rx_cb); + if (ret) { + dev_err(&cldev->dev, "Error register event %d\n", ret); + goto disable; + } + + spd_dbg(spd, "protocol version %d\n", ver); + mei_spd_gpp_prepare(spd); + mei_spd_rpmb_prepare(spd); + mutex_lock(&spd->lock); + ret = mei_spd_cmd_init_req(spd); + mutex_unlock(&spd->lock); + if (ret) { + dev_err(&cldev->dev, "Could not start ret = %d\n", ret); + goto disable; + } + + return 0; + +disable: + mei_cldev_disable(cldev); + +free: + mei_spd_dbgfs_deregister(spd); + mei_cldev_set_drvdata(cldev, NULL); + mei_spd_free(spd); + return ret; +} + +static int mei_spd_remove(struct mei_cl_device *cldev) +{ + struct mei_spd *spd = mei_cldev_get_drvdata(cldev); + + if (spd->state == MEI_SPD_STATE_RUNNING) { + spd->state = MEI_SPD_STATE_STOPPING; + mei_spd_gpp_exit(spd); + mei_spd_rpmb_exit(spd); + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); + } + + mei_cldev_disable(cldev); + mei_spd_dbgfs_deregister(spd); + mei_cldev_set_drvdata(cldev, NULL); + mei_spd_free(spd); + + return 0; +} + +#define MEI_SPD_UUID UUID_LE(0x2a39291f, 0x5551, 0x482f, \ + 0x99, 0xcb, 0x9e, 0x22, 0x74, 0x97, 0x8c, 0xa8) + +static struct mei_cl_device_id mei_spd_tbl[] = { + { .uuid = MEI_SPD_UUID, .version = MEI_CL_VERSION_ANY}, + /* required last entry */ + { } +}; +MODULE_DEVICE_TABLE(mei, mei_spd_tbl); + +static struct mei_cl_driver mei_spd_driver = { + .id_table = mei_spd_tbl, + .name = "mei_spd", + + .probe = mei_spd_probe, + .remove = mei_spd_remove, +}; + +module_mei_cl_driver(mei_spd_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Storage Proxy driver based on mei bus"); diff --git a/drivers/misc/mei/spd/rpmb.c b/drivers/misc/mei/spd/rpmb.c new file mode 100644 index 000000000000..8e18db0f5139 --- /dev/null +++ b/drivers/misc/mei/spd/rpmb.c @@ -0,0 +1,183 @@ +/* + * Intel Host Storage Interface Linux driver + * Copyright (c) 2015 - 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include "cmd.h" +#include "spd.h" + +static int mei_spd_rpmb_start(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (spd->rdev == rdev) + return 0; + + if (spd->rdev) { + spd_warn(spd, "rpmb device already registered\n"); + return -EEXIST; + } + + spd->rdev = rpmb_dev_get(rdev); + spd_dbg(spd, "rpmb partition created\n"); + return 0; +} + +static int mei_spd_rpmb_stop(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (!spd->rdev) { + spd_dbg(spd, "Already stopped\n"); + return -EPROTO; + } + + if (rdev && spd->rdev != rdev) { + spd_dbg(spd, "Wrong RPMB on stop\n"); + return -EINVAL; + } + + rpmb_dev_put(spd->rdev); + spd->rdev = NULL; + + spd_dbg(spd, "rpmb partition removed\n"); + return 0; +} + +static int mei_spd_rpmb_match(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (spd->dev_id_sz && rdev->ops->dev_id) { + if (rdev->ops->dev_id_len != spd->dev_id_sz || + memcmp(rdev->ops->dev_id, spd->dev_id, + rdev->ops->dev_id_len)) { + spd_dbg(spd, "ignore request for another rpmb\n"); + /* return 0; FW sends garbage now, ignore it */ + } + } + + switch (rdev->ops->type) { + case RPMB_TYPE_EMMC: + if (spd->dev_type != SPD_TYPE_EMMC) + return 0; + break; + case RPMB_TYPE_UFS: + if (spd->dev_type != SPD_TYPE_UFS) + return 0; + break; + default: + return 0; + } + + return 1; +} + +static int rpmb_add_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = + container_of(intf, struct mei_spd, rpmb_interface); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + if (!mei_spd_rpmb_match(spd, rdev)) + return 0; + + mutex_lock(&spd->lock); + if (mei_spd_rpmb_start(spd, rdev)) { + mutex_unlock(&spd->lock); + return 0; + } + + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); + + return 0; +} + +static void rpmb_remove_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = + container_of(intf, struct mei_spd, rpmb_interface); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + if (!mei_spd_rpmb_match(spd, rdev)) + return; + + mutex_lock(&spd->lock); + if (mei_spd_rpmb_stop(spd, rdev)) { + mutex_unlock(&spd->lock); + return; + } + + if (spd->state != MEI_SPD_STATE_STOPPING) + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); +} + +void mei_spd_rpmb_prepare(struct mei_spd *spd) +{ + spd->rpmb_interface.add_dev = rpmb_add_device; + spd->rpmb_interface.remove_dev = rpmb_remove_device; + spd->rpmb_interface.class = &rpmb_class; +} + +/** + * mei_spd_rpmb_init - init RPMB connection + * + * @spd: device + * + * Locking: spd->lock should not be held + * Returns: 0 if initialized successfully, <0 otherwise + */ +int mei_spd_rpmb_init(struct mei_spd *spd) +{ + int ret; + + ret = class_interface_register(&spd->rpmb_interface); + if (ret) + spd_err(spd, "Can't register interface\n"); + return ret; +} + +/** + * mei_spd_rpmb_exit - clean RPMB connection + * + * @spd: device + * + * Locking: spd->lock should not be held + */ +void mei_spd_rpmb_exit(struct mei_spd *spd) +{ + class_interface_unregister(&spd->rpmb_interface); +} + +int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req_type, void *buf) +{ + struct rpmb_data d; + struct rpmb_frame *frame = buf; + int ret; + + if (!spd->rdev) { + spd_err(spd, "RPMB not ready\n"); + return -ENODEV; + } + + d.req_type = req_type; + d.icmd.nframes = 1; + d.icmd.frames = frame; + d.ocmd.nframes = 1; + d.ocmd.frames = frame; + ret = rpmb_cmd_req(spd->rdev, &d); + if (ret) + spd_err(spd, "RPMB req failed ret = %d\n", ret); + return ret; +} + +bool mei_spd_rpmb_is_open(struct mei_spd *spd) +{ + return !!spd->rdev; +} diff --git a/drivers/misc/mei/spd/spd.h b/drivers/misc/mei/spd/spd.h new file mode 100644 index 000000000000..85e8220a5972 --- /dev/null +++ b/drivers/misc/mei/spd/spd.h @@ -0,0 +1,157 @@ +/****************************************************************************** + * Intel Management Engine Interface (Intel MEI) Linux driver + * Intel MEI Interface Header + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 - 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2015 - 2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _MEI_SPD_H +#define _MEI_SPD_H + +#include +#include +#include + +enum mei_spd_state { + MEI_SPD_STATE_INIT, + MEI_SPD_STATE_INIT_WAIT, + MEI_SPD_STATE_INIT_DONE, + MEI_SPD_STATE_RUNNING, + MEI_SPD_STATE_STOPPING, +}; + +/** + * struct mei_spd - spd device struct + * + * @cldev: client bus device + * @gpp: GPP partition block device + * @gpp_partition_id: GPP partition id (1-6) + * @gpp_interface: gpp class interface for discovery + * @dev_type: storage device type + * @dev_id_sz: device id size + * @dev_id: device id string + * @rdev: RPMB device + * @rpmb_interface: gpp class interface for discovery + * @lock: mutex to sync request processing + * @state: driver state + * @status_send_w: workitem for sending status to the FW + * @buf_sz: receive/transmit buffer allocated size + * @buf: receive/transmit buffer + * @dbgfs_dir: debugfs directory entry + */ +struct mei_spd { + struct mei_cl_device *cldev; + struct block_device *gpp; + u32 gpp_partition_id; + struct class_interface gpp_interface; + u32 dev_type; + u32 dev_id_sz; + u8 *dev_id; + struct rpmb_dev *rdev; + struct class_interface rpmb_interface; + struct mutex lock; + enum mei_spd_state state; + struct work_struct status_send_w; + size_t buf_sz; + u8 *buf; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ +}; + +struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev); +void mei_spd_free(struct mei_spd *spd); + +int mei_spd_cmd_init_req(struct mei_spd *spd); +int mei_spd_cmd_storage_status_req(struct mei_spd *spd); +ssize_t mei_spd_cmd(struct mei_spd *spd); + +void mei_spd_gpp_prepare(struct mei_spd *spd); +bool mei_spd_gpp_is_open(struct mei_spd *spd); +int mei_spd_gpp_init(struct mei_spd *spd); +void mei_spd_gpp_exit(struct mei_spd *spd); +int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size); +int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size); + +void mei_spd_rpmb_prepare(struct mei_spd *spd); +bool mei_spd_rpmb_is_open(struct mei_spd *spd); +int mei_spd_rpmb_init(struct mei_spd *spd); +void mei_spd_rpmb_exit(struct mei_spd *spd); +int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req_type, void *buf); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name); +void mei_spd_dbgfs_deregister(struct mei_spd *spd); +#else +static inline int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) +{ + return 0; +} + +static inline void mei_spd_dbgfs_deregister(struct mei_spd *spd) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +const char *mei_spd_state_str(enum mei_spd_state state); + +#define spd_err(spd, fmt, ...) \ + dev_err(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) +#define spd_warn(spd, fmt, ...) \ + dev_warn(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) +#define spd_dbg(spd, fmt, ...) \ + dev_dbg(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) + +#endif /* _MEI_SPD_H */ diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c new file mode 100644 index 000000000000..ba94dcf09169 --- /dev/null +++ b/drivers/misc/memory_state_time.c @@ -0,0 +1,462 @@ +/* drivers/misc/memory_state_time.c + * + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KERNEL_ATTR_RO(_name) \ +static struct kobj_attribute _name##_attr = __ATTR_RO(_name) + +#define KERNEL_ATTR_RW(_name) \ +static struct kobj_attribute _name##_attr = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +#define FREQ_HASH_BITS 4 +DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS); + +static DEFINE_MUTEX(mem_lock); + +#define TAG "memory_state_time" +#define BW_NODE "/soc/memory-state-time" +#define FREQ_TBL "freq-tbl" +#define BW_TBL "bw-buckets" +#define NUM_SOURCES "num-sources" + +#define LOWEST_FREQ 2 + +static int curr_bw; +static int curr_freq; +static u32 *bw_buckets; +static u32 *freq_buckets; +static int num_freqs; +static int num_buckets; +static int registered_bw_sources; +static u64 last_update; +static bool init_success; +static struct workqueue_struct *memory_wq; +static u32 num_sources = 10; +static int *bandwidths; + +struct freq_entry { + int freq; + u64 *buckets; /* Bandwidth buckets. */ + struct hlist_node hash; +}; + +struct queue_container { + struct work_struct update_state; + int value; + u64 time_now; + int id; + struct mutex *lock; +}; + +static int find_bucket(int bw) +{ + int i; + + if (bw_buckets != NULL) { + for (i = 0; i < num_buckets; i++) { + if (bw_buckets[i] > bw) { + pr_debug("Found bucket %d for bandwidth %d\n", + i, bw); + return i; + } + } + return num_buckets - 1; + } + return 0; +} + +static u64 get_time_diff(u64 time_now) +{ + u64 ms; + + ms = time_now - last_update; + last_update = time_now; + return ms; +} + +static ssize_t show_stat_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int i, j; + int len = 0; + struct freq_entry *freq_entry; + + for (i = 0; i < num_freqs; i++) { + hash_for_each_possible(freq_hash_table, freq_entry, hash, + freq_buckets[i]) { + if (freq_entry->freq == freq_buckets[i]) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "%d ", freq_buckets[i]); + if (len >= PAGE_SIZE) + break; + for (j = 0; j < num_buckets; j++) { + len += scnprintf(buf + len, + PAGE_SIZE - len, + "%llu ", + freq_entry->buckets[j]); + } + len += scnprintf(buf + len, PAGE_SIZE - len, + "\n"); + } + } + } + pr_debug("Current Time: %llu\n", ktime_get_boot_ns()); + return len; +} +KERNEL_ATTR_RO(show_stat); + +static void update_table(u64 time_now) +{ + struct freq_entry *freq_entry; + + pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq); + hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) { + if (curr_freq == freq_entry->freq) { + freq_entry->buckets[find_bucket(curr_bw)] + += get_time_diff(time_now); + break; + } + } +} + +static bool freq_exists(int freq) +{ + int i; + + for (i = 0; i < num_freqs; i++) { + if (freq == freq_buckets[i]) + return true; + } + return false; +} + +static int calculate_total_bw(int bw, int index) +{ + int i; + int total_bw = 0; + + pr_debug("memory_state_time New bw %d for id %d\n", bw, index); + bandwidths[index] = bw; + for (i = 0; i < registered_bw_sources; i++) + total_bw += bandwidths[i]; + return total_bw; +} + +static void freq_update_do_work(struct work_struct *work) +{ + struct queue_container *freq_state_update + = container_of(work, struct queue_container, + update_state); + if (freq_state_update) { + mutex_lock(&mem_lock); + update_table(freq_state_update->time_now); + curr_freq = freq_state_update->value; + mutex_unlock(&mem_lock); + kfree(freq_state_update); + } +} + +static void bw_update_do_work(struct work_struct *work) +{ + struct queue_container *bw_state_update + = container_of(work, struct queue_container, + update_state); + if (bw_state_update) { + mutex_lock(&mem_lock); + update_table(bw_state_update->time_now); + curr_bw = calculate_total_bw(bw_state_update->value, + bw_state_update->id); + mutex_unlock(&mem_lock); + kfree(bw_state_update); + } +} + +static void memory_state_freq_update(struct memory_state_update_block *ub, + int value) +{ + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + if (freq_exists(value) && init_success) { + struct queue_container *freq_container + = kmalloc(sizeof(struct queue_container), + GFP_KERNEL); + if (!freq_container) + return; + INIT_WORK(&freq_container->update_state, + freq_update_do_work); + freq_container->time_now = ktime_get_boot_ns(); + freq_container->value = value; + pr_debug("Scheduling freq update in work queue\n"); + queue_work(memory_wq, &freq_container->update_state); + } else { + pr_debug("Freq does not exist.\n"); + } + } +} + +static void memory_state_bw_update(struct memory_state_update_block *ub, + int value) +{ + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + if (init_success) { + struct queue_container *bw_container + = kmalloc(sizeof(struct queue_container), + GFP_KERNEL); + if (!bw_container) + return; + INIT_WORK(&bw_container->update_state, + bw_update_do_work); + bw_container->time_now = ktime_get_boot_ns(); + bw_container->value = value; + bw_container->id = ub->id; + pr_debug("Scheduling bandwidth update in work queue\n"); + queue_work(memory_wq, &bw_container->update_state); + } + } +} + +struct memory_state_update_block *memory_state_register_frequency_source(void) +{ + struct memory_state_update_block *block; + + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + pr_debug("Allocating frequency source\n"); + block = kmalloc(sizeof(struct memory_state_update_block), + GFP_KERNEL); + if (!block) + return NULL; + block->update_call = memory_state_freq_update; + return block; + } + pr_err("Config option disabled.\n"); + return NULL; +} +EXPORT_SYMBOL_GPL(memory_state_register_frequency_source); + +struct memory_state_update_block *memory_state_register_bandwidth_source(void) +{ + struct memory_state_update_block *block; + + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + pr_debug("Allocating bandwidth source %d\n", + registered_bw_sources); + block = kmalloc(sizeof(struct memory_state_update_block), + GFP_KERNEL); + if (!block) + return NULL; + block->update_call = memory_state_bw_update; + if (registered_bw_sources < num_sources) { + block->id = registered_bw_sources++; + } else { + pr_err("Unable to allocate source; max number reached\n"); + kfree(block); + return NULL; + } + return block; + } + pr_err("Config option disabled.\n"); + return NULL; +} +EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source); + +/* Buckets are designated by their maximum. + * Returns the buckets decided by the capability of the device. + */ +static int get_bw_buckets(struct device *dev) +{ + int ret, lenb; + struct device_node *node = dev->of_node; + + of_property_read_u32(node, NUM_SOURCES, &num_sources); + if (!of_find_property(node, BW_TBL, &lenb)) { + pr_err("Missing %s property\n", BW_TBL); + return -ENODATA; + } + + bandwidths = devm_kzalloc(dev, + sizeof(*bandwidths) * num_sources, GFP_KERNEL); + if (!bandwidths) + return -ENOMEM; + lenb /= sizeof(*bw_buckets); + bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets), + GFP_KERNEL); + if (!bw_buckets) { + devm_kfree(dev, bandwidths); + return -ENOMEM; + } + ret = of_property_read_u32_array(node, BW_TBL, bw_buckets, + lenb); + if (ret < 0) { + devm_kfree(dev, bandwidths); + devm_kfree(dev, bw_buckets); + pr_err("Unable to read bandwidth table from device tree.\n"); + return ret; + } + + curr_bw = 0; + num_buckets = lenb; + return 0; +} + +/* Adds struct freq_entry nodes to the hashtable for each compatible frequency. + * Returns the supported number of frequencies. + */ +static int freq_buckets_init(struct device *dev) +{ + struct freq_entry *freq_entry; + int i; + int ret, lenf; + struct device_node *node = dev->of_node; + + if (!of_find_property(node, FREQ_TBL, &lenf)) { + pr_err("Missing %s property\n", FREQ_TBL); + return -ENODATA; + } + + lenf /= sizeof(*freq_buckets); + freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets), + GFP_KERNEL); + if (!freq_buckets) + return -ENOMEM; + pr_debug("freqs found len %d\n", lenf); + ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets, + lenf); + if (ret < 0) { + devm_kfree(dev, freq_buckets); + pr_err("Unable to read frequency table from device tree.\n"); + return ret; + } + pr_debug("ret freq %d\n", ret); + + num_freqs = lenf; + curr_freq = freq_buckets[LOWEST_FREQ]; + + for (i = 0; i < num_freqs; i++) { + freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry), + GFP_KERNEL); + if (!freq_entry) + return -ENOMEM; + freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets, + GFP_KERNEL); + if (!freq_entry->buckets) { + devm_kfree(dev, freq_entry); + return -ENOMEM; + } + pr_debug("memory_state_time Adding freq to ht %d\n", + freq_buckets[i]); + freq_entry->freq = freq_buckets[i]; + hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]); + } + return 0; +} + +struct kobject *memory_kobj; +EXPORT_SYMBOL_GPL(memory_kobj); + +static struct attribute *memory_attrs[] = { + &show_stat_attr.attr, + NULL +}; + +static struct attribute_group memory_attr_group = { + .attrs = memory_attrs, +}; + +static int memory_state_time_probe(struct platform_device *pdev) +{ + int error; + + error = get_bw_buckets(&pdev->dev); + if (error) + return error; + error = freq_buckets_init(&pdev->dev); + if (error) + return error; + last_update = ktime_get_boot_ns(); + init_success = true; + + pr_debug("memory_state_time initialized with num_freqs %d\n", + num_freqs); + return 0; +} + +static const struct of_device_id match_table[] = { + { .compatible = "memory-state-time" }, + {} +}; + +static struct platform_driver memory_state_time_driver = { + .probe = memory_state_time_probe, + .driver = { + .name = "memory-state-time", + .of_match_table = match_table, + .owner = THIS_MODULE, + }, +}; + +static int __init memory_state_time_init(void) +{ + int error; + + hash_init(freq_hash_table); + memory_wq = create_singlethread_workqueue("memory_wq"); + if (!memory_wq) { + pr_err("Unable to create workqueue.\n"); + return -EINVAL; + } + /* + * Create sys/kernel directory for memory_state_time. + */ + memory_kobj = kobject_create_and_add(TAG, kernel_kobj); + if (!memory_kobj) { + pr_err("Unable to allocate memory_kobj for sysfs directory.\n"); + error = -ENOMEM; + goto wq; + } + error = sysfs_create_group(memory_kobj, &memory_attr_group); + if (error) { + pr_err("Unable to create sysfs folder.\n"); + goto kobj; + } + + error = platform_driver_register(&memory_state_time_driver); + if (error) { + pr_err("Unable to register memory_state_time platform driver.\n"); + goto group; + } + return 0; + +group: sysfs_remove_group(memory_kobj, &memory_attr_group); +kobj: kobject_put(memory_kobj); +wq: destroy_workqueue(memory_wq); + return error; +} +module_init(memory_state_time_init); diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index deb203026496..e089bb6dde3a 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -533,6 +533,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, test->base = test->bar[test_reg_bar]; if (!test->base) { + err = -ENOMEM; dev_err(dev, "Cannot perform PCI test without BAR%d\n", test_reg_bar); goto err_iounmap; @@ -542,6 +543,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); if (id < 0) { + err = id; dev_err(dev, "unable to get id\n"); goto err_iounmap; } @@ -588,6 +590,8 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) return; + if (id < 0) + return; misc_deregister(&test->miscdev); ida_simple_remove(&pci_endpoint_test_ida, id); diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index eda38cbe8530..41f2a9f6851d 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c new file mode 100644 index 000000000000..ace629934821 --- /dev/null +++ b/drivers/misc/uid_sys_stats.c @@ -0,0 +1,698 @@ +/* drivers/misc/uid_sys_stats.c + * + * Copyright (C) 2014 - 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define UID_HASH_BITS 10 +DECLARE_HASHTABLE(hash_table, UID_HASH_BITS); + +static DEFINE_RT_MUTEX(uid_lock); +static struct proc_dir_entry *cpu_parent; +static struct proc_dir_entry *io_parent; +static struct proc_dir_entry *proc_parent; + +struct io_stats { + u64 read_bytes; + u64 write_bytes; + u64 rchar; + u64 wchar; + u64 fsync; +}; + +#define UID_STATE_FOREGROUND 0 +#define UID_STATE_BACKGROUND 1 +#define UID_STATE_BUCKET_SIZE 2 + +#define UID_STATE_TOTAL_CURR 2 +#define UID_STATE_TOTAL_LAST 3 +#define UID_STATE_DEAD_TASKS 4 +#define UID_STATE_SIZE 5 + +#define MAX_TASK_COMM_LEN 256 + +struct task_entry { + char comm[MAX_TASK_COMM_LEN]; + pid_t pid; + struct io_stats io[UID_STATE_SIZE]; + struct hlist_node hash; +}; + +struct uid_entry { + uid_t uid; + u64 utime; + u64 stime; + u64 active_utime; + u64 active_stime; + int state; + struct io_stats io[UID_STATE_SIZE]; + struct hlist_node hash; +#ifdef CONFIG_UID_SYS_STATS_DEBUG + DECLARE_HASHTABLE(task_entries, UID_HASH_BITS); +#endif +}; + +static u64 compute_write_bytes(struct task_struct *task) +{ + if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes) + return 0; + + return task->ioac.write_bytes - task->ioac.cancelled_write_bytes; +} + +static void compute_io_bucket_stats(struct io_stats *io_bucket, + struct io_stats *io_curr, + struct io_stats *io_last, + struct io_stats *io_dead) +{ + /* tasks could switch to another uid group, but its io_last in the + * previous uid group could still be positive. + * therefore before each update, do an overflow check first + */ + int64_t delta; + + delta = io_curr->read_bytes + io_dead->read_bytes - + io_last->read_bytes; + io_bucket->read_bytes += delta > 0 ? delta : 0; + delta = io_curr->write_bytes + io_dead->write_bytes - + io_last->write_bytes; + io_bucket->write_bytes += delta > 0 ? delta : 0; + delta = io_curr->rchar + io_dead->rchar - io_last->rchar; + io_bucket->rchar += delta > 0 ? delta : 0; + delta = io_curr->wchar + io_dead->wchar - io_last->wchar; + io_bucket->wchar += delta > 0 ? delta : 0; + delta = io_curr->fsync + io_dead->fsync - io_last->fsync; + io_bucket->fsync += delta > 0 ? delta : 0; + + io_last->read_bytes = io_curr->read_bytes; + io_last->write_bytes = io_curr->write_bytes; + io_last->rchar = io_curr->rchar; + io_last->wchar = io_curr->wchar; + io_last->fsync = io_curr->fsync; + + memset(io_dead, 0, sizeof(struct io_stats)); +} + +#ifdef CONFIG_UID_SYS_STATS_DEBUG +static void get_full_task_comm(struct task_entry *task_entry, + struct task_struct *task) +{ + int i = 0, offset = 0, len = 0; + /* save one byte for terminating null character */ + int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1; + char buf[unused_len]; + struct mm_struct *mm = task->mm; + + /* fill the first TASK_COMM_LEN bytes with thread name */ + __get_task_comm(task_entry->comm, TASK_COMM_LEN, task); + i = strlen(task_entry->comm); + while (i < TASK_COMM_LEN) + task_entry->comm[i++] = ' '; + + /* next the executable file name */ + if (mm) { + down_read(&mm->mmap_sem); + if (mm->exe_file) { + char *pathname = d_path(&mm->exe_file->f_path, buf, + unused_len); + + if (!IS_ERR(pathname)) { + len = strlcpy(task_entry->comm + i, pathname, + unused_len); + i += len; + task_entry->comm[i++] = ' '; + unused_len--; + } + } + up_read(&mm->mmap_sem); + } + unused_len -= len; + + /* fill the rest with command line argument + * replace each null or new line character + * between args in argv with whitespace */ + len = get_cmdline(task, buf, unused_len); + while (offset < len) { + if (buf[offset] != '\0' && buf[offset] != '\n') + task_entry->comm[i++] = buf[offset]; + else + task_entry->comm[i++] = ' '; + offset++; + } + + /* get rid of trailing whitespaces in case when arg is memset to + * zero before being reset in userspace + */ + while (task_entry->comm[i-1] == ' ') + i--; + task_entry->comm[i] = '\0'; +} + +static struct task_entry *find_task_entry(struct uid_entry *uid_entry, + struct task_struct *task) +{ + struct task_entry *task_entry; + + hash_for_each_possible(uid_entry->task_entries, task_entry, hash, + task->pid) { + if (task->pid == task_entry->pid) { + /* if thread name changed, update the entire command */ + int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN) + - task_entry->comm; + + if (strncmp(task_entry->comm, task->comm, len)) + get_full_task_comm(task_entry, task); + return task_entry; + } + } + return NULL; +} + +static struct task_entry *find_or_register_task(struct uid_entry *uid_entry, + struct task_struct *task) +{ + struct task_entry *task_entry; + pid_t pid = task->pid; + + task_entry = find_task_entry(uid_entry, task); + if (task_entry) + return task_entry; + + task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC); + if (!task_entry) + return NULL; + + get_full_task_comm(task_entry, task); + + task_entry->pid = pid; + hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid); + + return task_entry; +} + +static void remove_uid_tasks(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + struct hlist_node *tmp_task; + + hash_for_each_safe(uid_entry->task_entries, bkt_task, + tmp_task, task_entry, hash) { + hash_del(&task_entry->hash); + kfree(task_entry); + } +} + +static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + } +} + +static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) +{ + struct task_entry *task_entry = find_or_register_task(uid_entry, task); + struct io_stats *task_io_slot = &task_entry->io[slot]; + + task_io_slot->read_bytes += task->ioac.read_bytes; + task_io_slot->write_bytes += compute_write_bytes(task); + task_io_slot->rchar += task->ioac.rchar; + task_io_slot->wchar += task->ioac.wchar; + task_io_slot->fsync += task->ioac.syscfs; +} + +static void compute_io_uid_tasks(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + compute_io_bucket_stats(&task_entry->io[uid_entry->state], + &task_entry->io[UID_STATE_TOTAL_CURR], + &task_entry->io[UID_STATE_TOTAL_LAST], + &task_entry->io[UID_STATE_DEAD_TASKS]); + } +} + +static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + /* Separated by comma because space exists in task comm */ + seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n", + task_entry->comm, + (unsigned long)task_entry->pid, + task_entry->io[UID_STATE_FOREGROUND].rchar, + task_entry->io[UID_STATE_FOREGROUND].wchar, + task_entry->io[UID_STATE_FOREGROUND].read_bytes, + task_entry->io[UID_STATE_FOREGROUND].write_bytes, + task_entry->io[UID_STATE_BACKGROUND].rchar, + task_entry->io[UID_STATE_BACKGROUND].wchar, + task_entry->io[UID_STATE_BACKGROUND].read_bytes, + task_entry->io[UID_STATE_BACKGROUND].write_bytes, + task_entry->io[UID_STATE_FOREGROUND].fsync, + task_entry->io[UID_STATE_BACKGROUND].fsync); + } +} +#else +static void remove_uid_tasks(struct uid_entry *uid_entry) {}; +static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {}; +static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) {}; +static void compute_io_uid_tasks(struct uid_entry *uid_entry) {}; +static void show_io_uid_tasks(struct seq_file *m, + struct uid_entry *uid_entry) {} +#endif + +static struct uid_entry *find_uid_entry(uid_t uid) +{ + struct uid_entry *uid_entry; + hash_for_each_possible(hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +static struct uid_entry *find_or_register_uid(uid_t uid) +{ + struct uid_entry *uid_entry; + + uid_entry = find_uid_entry(uid); + if (uid_entry) + return uid_entry; + + uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC); + if (!uid_entry) + return NULL; + + uid_entry->uid = uid; +#ifdef CONFIG_UID_SYS_STATS_DEBUG + hash_init(uid_entry->task_entries); +#endif + hash_add(hash_table, &uid_entry->hash, uid); + + return uid_entry; +} + +static int uid_cputime_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry = NULL; + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + u64 utime; + u64 stime; + unsigned long bkt; + uid_t uid; + + rt_mutex_lock(&uid_lock); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + uid_entry->active_stime = 0; + uid_entry->active_utime = 0; + } + + read_lock(&tasklist_lock); + do_each_thread(temp, task) { + uid = from_kuid_munged(user_ns, task_uid(task)); + if (!uid_entry || uid_entry->uid != uid) + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + read_unlock(&tasklist_lock); + rt_mutex_unlock(&uid_lock); + pr_err("%s: failed to find the uid_entry for uid %d\n", + __func__, uid); + return -ENOMEM; + } + task_cputime_adjusted(task, &utime, &stime); + uid_entry->active_utime += utime; + uid_entry->active_stime += stime; + } while_each_thread(temp, task); + read_unlock(&tasklist_lock); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + u64 total_utime = uid_entry->utime + + uid_entry->active_utime; + u64 total_stime = uid_entry->stime + + uid_entry->active_stime; + seq_printf(m, "%d: %llu %llu\n", uid_entry->uid, + ktime_to_ms(total_utime), ktime_to_ms(total_stime)); + } + + rt_mutex_unlock(&uid_lock); + return 0; +} + +static int uid_cputime_open(struct inode *inode, struct file *file) +{ + return single_open(file, uid_cputime_show, PDE_DATA(inode)); +} + +static const struct file_operations uid_cputime_fops = { + .open = uid_cputime_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int uid_remove_open(struct inode *inode, struct file *file) +{ + return single_open(file, NULL, NULL); +} + +static ssize_t uid_remove_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) +{ + struct uid_entry *uid_entry; + struct hlist_node *tmp; + char uids[128]; + char *start_uid, *end_uid = NULL; + long int uid_start = 0, uid_end = 0; + + if (count >= sizeof(uids)) + count = sizeof(uids) - 1; + + if (copy_from_user(uids, buffer, count)) + return -EFAULT; + + uids[count] = '\0'; + end_uid = uids; + start_uid = strsep(&end_uid, "-"); + + if (!start_uid || !end_uid) + return -EINVAL; + + if (kstrtol(start_uid, 10, &uid_start) != 0 || + kstrtol(end_uid, 10, &uid_end) != 0) { + return -EINVAL; + } + rt_mutex_lock(&uid_lock); + + for (; uid_start <= uid_end; uid_start++) { + hash_for_each_possible_safe(hash_table, uid_entry, tmp, + hash, (uid_t)uid_start) { + if (uid_start == uid_entry->uid) { + remove_uid_tasks(uid_entry); + hash_del(&uid_entry->hash); + kfree(uid_entry); + } + } + } + + rt_mutex_unlock(&uid_lock); + return count; +} + +static const struct file_operations uid_remove_fops = { + .open = uid_remove_open, + .release = single_release, + .write = uid_remove_write, +}; + + +static void add_uid_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) +{ + struct io_stats *io_slot = &uid_entry->io[slot]; + + io_slot->read_bytes += task->ioac.read_bytes; + io_slot->write_bytes += compute_write_bytes(task); + io_slot->rchar += task->ioac.rchar; + io_slot->wchar += task->ioac.wchar; + io_slot->fsync += task->ioac.syscfs; + + add_uid_tasks_io_stats(uid_entry, task, slot); +} + +static void update_io_stats_all_locked(void) +{ + struct uid_entry *uid_entry = NULL; + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + unsigned long bkt; + uid_t uid; + + hash_for_each(hash_table, bkt, uid_entry, hash) { + memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + set_io_uid_tasks_zero(uid_entry); + } + + rcu_read_lock(); + do_each_thread(temp, task) { + uid = from_kuid_munged(user_ns, task_uid(task)); + if (!uid_entry || uid_entry->uid != uid) + uid_entry = find_or_register_uid(uid); + if (!uid_entry) + continue; + add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR); + } while_each_thread(temp, task); + rcu_read_unlock(); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + compute_io_bucket_stats(&uid_entry->io[uid_entry->state], + &uid_entry->io[UID_STATE_TOTAL_CURR], + &uid_entry->io[UID_STATE_TOTAL_LAST], + &uid_entry->io[UID_STATE_DEAD_TASKS]); + compute_io_uid_tasks(uid_entry); + } +} + +static void update_io_stats_uid_locked(struct uid_entry *uid_entry) +{ + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + + memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + set_io_uid_tasks_zero(uid_entry); + + rcu_read_lock(); + do_each_thread(temp, task) { + if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid) + continue; + add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR); + } while_each_thread(temp, task); + rcu_read_unlock(); + + compute_io_bucket_stats(&uid_entry->io[uid_entry->state], + &uid_entry->io[UID_STATE_TOTAL_CURR], + &uid_entry->io[UID_STATE_TOTAL_LAST], + &uid_entry->io[UID_STATE_DEAD_TASKS]); + compute_io_uid_tasks(uid_entry); +} + + +static int uid_io_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry; + unsigned long bkt; + + rt_mutex_lock(&uid_lock); + + update_io_stats_all_locked(); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", + uid_entry->uid, + uid_entry->io[UID_STATE_FOREGROUND].rchar, + uid_entry->io[UID_STATE_FOREGROUND].wchar, + uid_entry->io[UID_STATE_FOREGROUND].read_bytes, + uid_entry->io[UID_STATE_FOREGROUND].write_bytes, + uid_entry->io[UID_STATE_BACKGROUND].rchar, + uid_entry->io[UID_STATE_BACKGROUND].wchar, + uid_entry->io[UID_STATE_BACKGROUND].read_bytes, + uid_entry->io[UID_STATE_BACKGROUND].write_bytes, + uid_entry->io[UID_STATE_FOREGROUND].fsync, + uid_entry->io[UID_STATE_BACKGROUND].fsync); + + show_io_uid_tasks(m, uid_entry); + } + + rt_mutex_unlock(&uid_lock); + return 0; +} + +static int uid_io_open(struct inode *inode, struct file *file) +{ + return single_open(file, uid_io_show, PDE_DATA(inode)); +} + +static const struct file_operations uid_io_fops = { + .open = uid_io_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int uid_procstat_open(struct inode *inode, struct file *file) +{ + return single_open(file, NULL, NULL); +} + +static ssize_t uid_procstat_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) +{ + struct uid_entry *uid_entry; + uid_t uid; + int argc, state; + char input[128]; + + if (count >= sizeof(input)) + return -EINVAL; + + if (copy_from_user(input, buffer, count)) + return -EFAULT; + + input[count] = '\0'; + + argc = sscanf(input, "%u %d", &uid, &state); + if (argc != 2) + return -EINVAL; + + if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND) + return -EINVAL; + + rt_mutex_lock(&uid_lock); + + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + rt_mutex_unlock(&uid_lock); + return -EINVAL; + } + + if (uid_entry->state == state) { + rt_mutex_unlock(&uid_lock); + return count; + } + + update_io_stats_uid_locked(uid_entry); + + uid_entry->state = state; + + rt_mutex_unlock(&uid_lock); + + return count; +} + +static const struct file_operations uid_procstat_fops = { + .open = uid_procstat_open, + .release = single_release, + .write = uid_procstat_write, +}; + +static int process_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + struct task_struct *task = v; + struct uid_entry *uid_entry; + u64 utime, stime; + uid_t uid; + + if (!task) + return NOTIFY_OK; + + rt_mutex_lock(&uid_lock); + uid = from_kuid_munged(current_user_ns(), task_uid(task)); + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + pr_err("%s: failed to find uid %d\n", __func__, uid); + goto exit; + } + + task_cputime_adjusted(task, &utime, &stime); + uid_entry->utime += utime; + uid_entry->stime += stime; + + add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS); + +exit: + rt_mutex_unlock(&uid_lock); + return NOTIFY_OK; +} + +static struct notifier_block process_notifier_block = { + .notifier_call = process_notifier, +}; + +static int __init proc_uid_sys_stats_init(void) +{ + hash_init(hash_table); + + cpu_parent = proc_mkdir("uid_cputime", NULL); + if (!cpu_parent) { + pr_err("%s: failed to create uid_cputime proc entry\n", + __func__); + goto err; + } + + proc_create_data("remove_uid_range", 0222, cpu_parent, + &uid_remove_fops, NULL); + proc_create_data("show_uid_stat", 0444, cpu_parent, + &uid_cputime_fops, NULL); + + io_parent = proc_mkdir("uid_io", NULL); + if (!io_parent) { + pr_err("%s: failed to create uid_io proc entry\n", + __func__); + goto err; + } + + proc_create_data("stats", 0444, io_parent, + &uid_io_fops, NULL); + + proc_parent = proc_mkdir("uid_procstat", NULL); + if (!proc_parent) { + pr_err("%s: failed to create uid_procstat proc entry\n", + __func__); + goto err; + } + + proc_create_data("set", 0222, proc_parent, + &uid_procstat_fops, NULL); + + profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block); + + return 0; + +err: + remove_proc_subtree("uid_cputime", NULL); + remove_proc_subtree("uid_io", NULL); + remove_proc_subtree("uid_procstat", NULL); + return -ENOMEM; +} + +early_initcall(proc_uid_sys_stats_init); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 1e688bfec567..9047c0a529b2 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1271,7 +1271,7 @@ static int __init vmballoon_init(void) * Check if we are running on VMware's hypervisor and bail out * if we are not. */ - if (x86_hyper != &x86_hyper_vmware) + if (x86_hyper_type != X86_HYPER_VMWARE) return -ENODEV; for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES; diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 42e89060cd41..53fc4818f4e0 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -36,6 +36,7 @@ config PWRSEQ_SIMPLE config MMC_BLOCK tristate "MMC block device driver" depends on BLOCK + select RPMB default y help Say Y here to enable the MMC block device driver support. @@ -80,3 +81,17 @@ config MMC_TEST This driver is only of interest to those developing or testing a host driver. Most people should say N here. +config MMC_EMBEDDED_SDIO + boolean "MMC embedded SDIO device support" + help + If you say Y here, support will be added for embedded SDIO + devices which do not contain the necessary enumeration + support in hardware to be properly detected. + +config MMC_PARANOID_SD_INIT + bool "Enable paranoid SD card initialization" + help + If you say Y here, the MMC layer will be extra paranoid + about re-trying SD init requests. This can be a useful + work-around for buggy controllers and hardware. Enable + if you are experiencing issues with SD detection. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 2ad7b5c69156..8bfbab12f78a 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -43,6 +43,7 @@ #include #include #include +#include #include @@ -65,6 +66,7 @@ MODULE_ALIAS("mmc:block"); #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ #define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) +#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ (rq_data_dir(req) == WRITE)) @@ -109,6 +111,7 @@ struct mmc_blk_data { #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) +#define MMC_BLK_RPMB BIT(4) /* * Only set in main mmc_blk_data associated @@ -119,6 +122,10 @@ struct mmc_blk_data { struct device_attribute force_ro; struct device_attribute power_ro_lock; int area_type; + + /* debugfs files (only in main mmc_blk_data) */ + struct dentry *status_dentry; + struct dentry *ext_csd_dentry; }; static DEFINE_MUTEX(open_lock); @@ -204,9 +211,14 @@ static ssize_t power_ro_lock_store(struct device *dev, /* Dispatch locking to the block layer */ req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); + if (IS_ERR(req)) { + count = PTR_ERR(req); + goto out_put; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; + blk_put_request(req); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", @@ -219,7 +231,7 @@ static ssize_t power_ro_lock_store(struct device *dev, set_disk_ro(part_md->disk, 1); } } - +out_put: mmc_blk_put(md); return count; } @@ -369,8 +381,8 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, - u32 retries_max) +static int mmc_blk_rpmb_status_poll(struct mmc_card *card, u32 *status, + u32 retries_max) { int err; u32 retry_count = 0; @@ -529,6 +541,24 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return data.error; } + /* + * Make sure the cache of the PARTITION_CONFIG register and + * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write + * changed it successfully. + */ + if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && + (cmd.opcode == MMC_SWITCH)) { + struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); + u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); + + /* + * Update cache so the next mmc_blk_part_switch call operates + * on up-to-date data. + */ + card->ext_csd.part_config = value; + main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; + } + /* * According to the SD specs, some commands require a delay after * issuing the command. @@ -543,7 +573,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, * Ensure RPMB command has completed by polling CMD13 * "Send Status". */ - err = ioctl_rpmb_card_status_poll(card, &status, 5); + err = mmc_blk_rpmb_status_poll(card, &status, 5); if (err) dev_err(mmc_dev(card->host), "%s: Card Status=0x%08X, error %d\n", @@ -580,6 +610,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, req = blk_get_request(mq->queue, idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_done; + } idatas[0] = idata; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op_data = idatas; @@ -643,6 +677,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, req = blk_get_request(mq->queue, idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_err; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; @@ -1226,6 +1264,255 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } +static int mmc_rpmb_send_cmd(struct mmc_card *card, + unsigned int data_type, bool do_rel_wr, + void *buf, u16 blks) +{ + int err; + u32 status; + struct mmc_command sbc = { + .opcode = MMC_SET_BLOCK_COUNT, + .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, + }; + + struct mmc_command cmd = { + .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC, + }; + + struct mmc_data data = { + .blksz = 512, + }; + struct mmc_request mrq = { + .sbc = &sbc, + .cmd = &cmd, + .data = &data, + .stop = NULL, + }; + struct scatterlist sg; + + /* set CMD23 */ + sbc.arg = blks & 0x0000FFFF; + if (do_rel_wr) + sbc.arg |= MMC_CMD23_ARG_REL_WR; + + /* set CMD25/18 */ + cmd.opcode = (data_type == MMC_DATA_WRITE) ? + MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; + + sg_init_one(&sg, buf, 512 * blks); + + data.blocks = blks; + data.sg = &sg; + data.sg_len = 1; + data.flags = data_type; + mmc_set_data_timeout(&data, card); + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error) { + dev_err(mmc_dev(card->host), "cmd error (%d)\n", cmd.error); + return cmd.error; + } + + if (data.error) { + dev_err(mmc_dev(card->host), "data error (%d)\n", data.error); + return data.error; + } + + err = mmc_blk_rpmb_status_poll(card, &status, 5); + if (err) + dev_err(mmc_dev(card->host), "Card Status=0x%08X, error %d\n", + status, err); + return err; +} + +static int mmc_blk_rpmb_sequence(struct mmc_card *card, + struct rpmb_cmd *cmds, u32 ncmds) +{ + int err, i; + struct rpmb_cmd *cmd; + unsigned int data_type; + bool do_rel_wr; + + for (err = 0, i = 0; i < ncmds && !err; i++) { + cmd = &cmds[i]; + if (cmd->flags & RPMB_F_WRITE) { + data_type = MMC_DATA_WRITE; + do_rel_wr = !!(cmd->flags & RPMB_F_REL_WRITE); + } else { + data_type = MMC_DATA_READ; + do_rel_wr = false; + } + + err = mmc_rpmb_send_cmd(card, data_type, do_rel_wr, + cmd->frames, cmd->nframes); + } + + return err; +} + +static int mmc_blk_rpmb_process(struct mmc_blk_data *md, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct mmc_card *card; + struct mmc_blk_data *main_md; + int ret; + + if (WARN_ON(!md || !cmds || !ncmds)) + return -EINVAL; + + if (!(md->flags & MMC_BLK_CMD23) || + (md->part_type != EXT_CSD_PART_CONFIG_ACC_RPMB)) + return -EOPNOTSUPP; + + card = md->queue.card; + if (!card || !mmc_card_mmc(card)) + return -ENODEV; + + mmc_get_card(card); + + /* switch to RPMB partition */ + ret = mmc_blk_part_switch(card, md->part_type); + if (ret) { + dev_err(mmc_dev(card->host), "Invalid RPMB partition switch (%d)!\n", + ret); + /* + * In case partition is not in user data area, make + * a force partition switch. + * we need reset eMMC card at here + */ + ret = mmc_blk_reset(md, card->host, MMC_BLK_RPMB); + if (!ret) + mmc_blk_reset_success(md, MMC_BLK_RPMB); + else + dev_err(mmc_dev(card->host), "eMMC card reset failed (%d)\n", + ret); + goto out; + } + + ret = mmc_blk_rpmb_sequence(card, cmds, ncmds); + if (ret) + dev_err(mmc_dev(card->host), "failed (%d) to handle RPMB request\n", + ret); + + /* Always switch back to main area after RPMB access */ + + main_md = dev_get_drvdata(&card->dev); + mmc_blk_part_switch(card, main_md->part_type); +out: + mmc_put_card(card); + return ret; +} + +static int mmc_blk_rpmb_cmd_seq(struct device *dev, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct mmc_blk_data *md; + int ret; + + md = mmc_blk_get(dev_to_disk(dev)); + if (!md) + return -ENODEV; + + if (!cmds || !ncmds) + return -EINVAL; + + ret = mmc_blk_rpmb_process(md, cmds, ncmds); + + mmc_blk_put(md); + + return ret; +} + +static struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .type = RPMB_TYPE_EMMC, +}; + +static struct mmc_blk_data *mmc_blk_rpmb_part_get(struct mmc_blk_data *md) +{ + struct mmc_blk_data *part_md; + + if (!md) + return NULL; + + list_for_each_entry(part_md, &md->part, part) { + if (part_md->area_type == MMC_BLK_DATA_AREA_RPMB) + return part_md; + } + + return NULL; +} + +static void mmc_blk_rpmb_unset_dev_id(struct rpmb_ops *ops) +{ + kfree(ops->dev_id); + ops->dev_id = NULL; +} + +static int mmc_blk_rpmb_set_dev_id(struct rpmb_ops *ops, struct mmc_card *card) +{ + char *id; + + id = kmalloc(sizeof(card->raw_cid), GFP_KERNEL); + if (!id) + return -ENOMEM; + + memcpy(id, card->raw_cid, sizeof(card->raw_cid)); + ops->dev_id = id; + ops->dev_id_len = sizeof(card->raw_cid); + + return 0; +} + +static void mmc_blk_rpmb_set_rel_wr_cnt(struct rpmb_ops *ops, + struct mmc_card *card) +{ + u16 rel_wr_cnt; + + /* RPMB blocks are written in half sectors hence '* 2' */ + rel_wr_cnt = card->ext_csd.rel_sectors * 2; + /* eMMC 5.1 may support RPMB 8K (32) frames */ + if (card->ext_csd.rev >= 8) { + if (card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) + rel_wr_cnt = 32; + else + rel_wr_cnt = 2; + } + ops->reliable_wr_cnt = rel_wr_cnt; +} + +static void mmc_blk_rpmb_add(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_blk_data *part_md = mmc_blk_rpmb_part_get(md); + struct rpmb_dev *rdev; + + if (!part_md) + return; + + mmc_blk_rpmb_set_dev_id(&mmc_rpmb_dev_ops, card); + mmc_blk_rpmb_set_rel_wr_cnt(&mmc_rpmb_dev_ops, card); + + rdev = rpmb_dev_register(disk_to_dev(part_md->disk), + &mmc_rpmb_dev_ops); + if (IS_ERR(rdev)) { + pr_warn("%s: cannot register to rpmb %ld\n", + part_md->disk->disk_name, PTR_ERR(rdev)); + } +} + +static void mmc_blk_rpmb_remove(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_blk_data *part_md = mmc_blk_rpmb_part_get(md); + + if (part_md) + rpmb_dev_unregister(disk_to_dev(part_md->disk)); + + mmc_blk_rpmb_unset_dev_id(&mmc_rpmb_dev_ops); +} + static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->blkdata; @@ -2314,6 +2601,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) /* Ask the block layer about the card status */ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) + return PTR_ERR(req); req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; @@ -2321,6 +2610,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) *val = ret; ret = 0; } + blk_put_request(req); return ret; } @@ -2347,10 +2637,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) /* Ask the block layer for the EXT CSD */ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_free; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; blk_execute_rq(mq->queue, NULL, req, 0); err = req_to_mmc_queue_req(req)->drv_op_result; + blk_put_request(req); if (err) { pr_err("FAILED %d\n", err); goto out_free; @@ -2362,6 +2657,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) if (n != EXT_CSD_STR_LEN) { err = -EINVAL; + kfree(ext_csd); goto out_free; } @@ -2396,7 +2692,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = { .llseek = default_llseek, }; -static int mmc_blk_add_debugfs(struct mmc_card *card) +static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) { struct dentry *root; @@ -2406,28 +2702,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card) root = card->debugfs_root; if (mmc_card_mmc(card) || mmc_card_sd(card)) { - if (!debugfs_create_file("status", S_IRUSR, root, card, - &mmc_dbg_card_status_fops)) + md->status_dentry = + debugfs_create_file("status", S_IRUSR, root, card, + &mmc_dbg_card_status_fops); + if (!md->status_dentry) return -EIO; } if (mmc_card_mmc(card)) { - if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, - &mmc_dbg_ext_csd_fops)) + md->ext_csd_dentry = + debugfs_create_file("ext_csd", S_IRUSR, root, card, + &mmc_dbg_ext_csd_fops); + if (!md->ext_csd_dentry) return -EIO; } return 0; } +static void mmc_blk_remove_debugfs(struct mmc_card *card, + struct mmc_blk_data *md) +{ + if (!card->debugfs_root) + return; + + if (!IS_ERR_OR_NULL(md->status_dentry)) { + debugfs_remove(md->status_dentry); + md->status_dentry = NULL; + } + + if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) { + debugfs_remove(md->ext_csd_dentry); + md->ext_csd_dentry = NULL; + } +} #else -static int mmc_blk_add_debugfs(struct mmc_card *card) +static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) { return 0; } +static void mmc_blk_remove_debugfs(struct mmc_card *card, + struct mmc_blk_data *md) +{ +} + #endif /* CONFIG_DEBUG_FS */ static int mmc_blk_probe(struct mmc_card *card) @@ -2467,7 +2788,9 @@ static int mmc_blk_probe(struct mmc_card *card) } /* Add two debugfs entries */ - mmc_blk_add_debugfs(card); + mmc_blk_add_debugfs(card, md); + /* Add rpmb layer */ + mmc_blk_rpmb_add(card); pm_runtime_set_autosuspend_delay(&card->dev, 3000); pm_runtime_use_autosuspend(&card->dev); @@ -2493,6 +2816,8 @@ static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + mmc_blk_remove_debugfs(card, md); + mmc_blk_rpmb_remove(card); mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); @@ -2521,6 +2846,7 @@ static int _mmc_blk_suspend(struct mmc_card *card) static void mmc_blk_shutdown(struct mmc_card *card) { + mmc_blk_rpmb_remove(card); _mmc_blk_suspend(card); } diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 301246513a37..7f428e387de3 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev) return ret; ret = host->bus_ops->suspend(host); + if (ret) + pm_generic_resume(dev); + return ret; } diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index f06cd91964ce..9c821eedd156 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -75,11 +75,14 @@ struct mmc_fixup { #define EXT_CSD_REV_ANY (-1u) #define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_ATP 0x9 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 +#define CID_MANFID_APACER 0x27 #define CID_MANFID_KINGSTON 0x70 #define CID_MANFID_HYNIX 0x90 +#define CID_MANFID_NUMONYX 0xFE #define END_FIXUP { NULL } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 66c9cf49ad2f..e71e6070ae75 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2751,6 +2751,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block, if (!err) break; + if (!mmc_card_is_removable(host)) { + dev_warn(mmc_dev(host), + "pre_suspend failed for non-removable host: " + "%d\n", err); + /* Avoid removing non-removable hosts */ + break; + } + /* Calling bus_ops->remove() with a claimed host can deadlock */ host->bus_ops->remove(host); mmc_claim_host(host); @@ -2802,6 +2810,22 @@ void mmc_init_context_info(struct mmc_host *host) init_waitqueue_head(&host->context_info.wait); } +#ifdef CONFIG_MMC_EMBEDDED_SDIO +void mmc_set_embedded_sdio_data(struct mmc_host *host, + struct sdio_cis *cis, + struct sdio_cccr *cccr, + struct sdio_embedded_func *funcs, + int num_funcs) +{ + host->embedded_sdio_data.cis = cis; + host->embedded_sdio_data.cccr = cccr; + host->embedded_sdio_data.funcs = funcs; + host->embedded_sdio_data.num_funcs = num_funcs; +} + +EXPORT_SYMBOL(mmc_set_embedded_sdio_data); +#endif + static int __init mmc_init(void) { int ret; diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 01e459a34f33..0f4a7d7b2626 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -314,4 +314,5 @@ void mmc_add_card_debugfs(struct mmc_card *card) void mmc_remove_card_debugfs(struct mmc_card *card) { debugfs_remove_recursive(card->debugfs_root); + card->debugfs_root = NULL; } diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index ad88deb2e8f3..f469254bf78a 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -429,7 +429,8 @@ int mmc_add_host(struct mmc_host *host) #endif mmc_start_host(host); - mmc_register_pm_notifier(host); + if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) + mmc_register_pm_notifier(host); return 0; } @@ -446,7 +447,8 @@ EXPORT_SYMBOL(mmc_add_host); */ void mmc_remove_host(struct mmc_host *host) { - mmc_unregister_pm_notifier(host); + if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) + mmc_unregister_pm_notifier(host); mmc_stop_host(host); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 36217ad5e9b1..13871e2bcb0a 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -780,7 +780,8 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv); -MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info); +MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev); +MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info); MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n", card->ext_csd.device_life_time_est_typ_a, card->ext_csd.device_life_time_est_typ_b); @@ -790,7 +791,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr); MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en); static ssize_t mmc_fwrev_show(struct device *dev, @@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_name.attr, &dev_attr_oemid.attr, &dev_attr_prv.attr, + &dev_attr_rev.attr, &dev_attr_pre_eol_info.attr, &dev_attr_life_time.attr, &dev_attr_serial.attr, diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 478869805b96..789afef66fce 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -2328,10 +2328,17 @@ static int mmc_test_reset(struct mmc_test_card *test) int err; err = mmc_hw_reset(host); - if (!err) + if (!err) { + /* + * Reset will re-enable the card's command queue, but tests + * expect it to be disabled. + */ + if (card->ext_csd.cmdq_en) + mmc_cmdq_disable(card); return RESULT_OK; - else if (err == -EOPNOTSUPP) + } else if (err == -EOPNOTSUPP) { return RESULT_UNSUP_HOST; + } return RESULT_FAIL; } diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 0a4e77a5ba33..8c4721f1b4bc 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -17,6 +17,8 @@ #include #include +#include +#include #include "queue.h" #include "block.h" @@ -43,6 +45,11 @@ static int mmc_queue_thread(void *d) struct mmc_queue *mq = d; struct request_queue *q = mq->queue; struct mmc_context_info *cntx = &mq->card->host->context_info; + struct sched_param scheduler_params = {0}; + + scheduler_params.sched_priority = 1; + + sched_setscheduler(current, SCHED_FIFO, &scheduler_params); current->flags |= PF_MEMALLOC; diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index f664e9cbc9f8..5153577754f0 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -52,6 +52,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = { MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), + /* + * Some SD cards lockup while using CMD23 multiblock transfers. + */ + MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd, + MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd, + MMC_QUIRK_BLK_NO_CMD23), + /* * Some MMC cards need longer data read timeout than indicated in CSD. */ @@ -101,6 +109,12 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = { */ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), + /* + * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI + * feature is used so disable the HPI feature for such buggy cards. + */ + MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX, + 0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6), END_FIXUP }; diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 4fd1620b732d..4bbdfa382e84 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr); static ssize_t mmc_dsr_show(struct device *dev, @@ -834,6 +834,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, bool reinit) { int err; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif if (!reinit) { /* @@ -860,7 +863,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, /* * Fetch switch information from card. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + for (retries = 1; retries <= 3; retries++) { + err = mmc_read_switch(card); + if (!err) { + if (retries > 1) { + printk(KERN_WARNING + "%s: recovered\n", + mmc_hostname(host)); + } + break; + } else { + printk(KERN_WARNING + "%s: read switch failed (attempt %d)\n", + mmc_hostname(host), retries); + } + } +#else err = mmc_read_switch(card); +#endif + if (err) return err; } @@ -1054,14 +1076,33 @@ static int mmc_sd_alive(struct mmc_host *host) */ static void mmc_sd_detect(struct mmc_host *host) { - int err; + int err = 0; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries = 5; +#endif mmc_get_card(host->card); /* * Just check if our card has been removed. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + while(retries) { + err = mmc_send_status(host->card, NULL); + if (err) { + retries--; + udelay(5); + continue; + } + break; + } + if (!retries) { + printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n", + __func__, mmc_hostname(host), err); + } +#else err = _mmc_detect_card_removed(host); +#endif mmc_put_card(host->card); @@ -1120,6 +1161,9 @@ static int mmc_sd_suspend(struct mmc_host *host) static int _mmc_sd_resume(struct mmc_host *host) { int err = 0; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif mmc_claim_host(host); @@ -1127,7 +1171,23 @@ static int _mmc_sd_resume(struct mmc_host *host) goto out; mmc_power_up(host, host->card->ocr); +#ifdef CONFIG_MMC_PARANOID_SD_INIT + retries = 5; + while (retries) { + err = mmc_sd_init_card(host, host->card->ocr, host->card); + + if (err) { + printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n", + mmc_hostname(host), err, retries); + mdelay(5); + retries--; + continue; + } + break; + } +#else err = mmc_sd_init_card(host, host->card->ocr, host->card); +#endif mmc_card_clr_suspended(host->card); out: @@ -1202,6 +1262,9 @@ int mmc_attach_sd(struct mmc_host *host) { int err; u32 ocr, rocr; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif WARN_ON(!host->claimed); @@ -1237,9 +1300,27 @@ int mmc_attach_sd(struct mmc_host *host) /* * Detect and init the card. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + retries = 5; + while (retries) { + err = mmc_sd_init_card(host, rocr, NULL); + if (err) { + retries--; + continue; + } + break; + } + + if (!retries) { + printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n", + mmc_hostname(host), err); + goto err; + } +#else err = mmc_sd_init_card(host, rocr, NULL); if (err) goto err; +#endif mmc_release_host(host); err = mmc_add_card(host->card); diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index cc43687ca241..c42e3cd537f2 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -31,6 +31,10 @@ #include "sdio_ops.h" #include "sdio_cis.h" +#ifdef CONFIG_MMC_EMBEDDED_SDIO +#include +#endif + static int sdio_read_fbr(struct sdio_func *func) { int ret; @@ -706,28 +710,44 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, goto finish; } - /* - * Read the common registers. Note that we should try to - * validate whether UHS would work or not. - */ - err = sdio_read_cccr(card, ocr); - if (err) { - mmc_sdio_resend_if_cond(host, card); - if (ocr & R4_18V_PRESENT) { - /* Retry init sequence, but without R4_18V_PRESENT. */ - retries = 0; - goto try_again; - } else { - goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.cccr) + memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr)); + else { +#endif + /* + * Read the common registers. Note that we should try to + * validate whether UHS would work or not. + */ + err = sdio_read_cccr(card, ocr); + if (err) { + mmc_sdio_resend_if_cond(host, card); + if (ocr & R4_18V_PRESENT) { + /* Retry init sequence, but without R4_18V_PRESENT. */ + retries = 0; + goto try_again; + } else { + goto remove; + } } +#ifdef CONFIG_MMC_EMBEDDED_SDIO } +#endif - /* - * Read the common CIS tuples. - */ - err = sdio_read_common_cis(card); - if (err) - goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.cis) + memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis)); + else { +#endif + /* + * Read the common CIS tuples. + */ + err = sdio_read_common_cis(card); + if (err) + goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + } +#endif if (oldcard) { int same = (card->cis.vendor == oldcard->cis.vendor && @@ -1129,14 +1149,36 @@ int mmc_attach_sdio(struct mmc_host *host) funcs = (ocr & 0x70000000) >> 28; card->sdio_funcs = 0; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.funcs) + card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs; +#endif + /* * Initialize (but don't add) all present functions. */ for (i = 0; i < funcs; i++, card->sdio_funcs++) { - err = sdio_init_func(host->card, i + 1); - if (err) - goto remove; - +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.funcs) { + struct sdio_func *tmp; + + tmp = sdio_alloc_func(host->card); + if (IS_ERR(tmp)) + goto remove; + tmp->num = (i + 1); + card->sdio_func[i] = tmp; + tmp->class = host->embedded_sdio_data.funcs[i].f_class; + tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize; + tmp->vendor = card->cis.vendor; + tmp->device = card->cis.device; + } else { +#endif + err = sdio_init_func(host->card, i + 1); + if (err) + goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + } +#endif /* * Enable Runtime PM for this func (if supported) */ diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 2b32b88949ba..997d556fccf1 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -29,6 +29,10 @@ #include "sdio_cis.h" #include "sdio_bus.h" +#ifdef CONFIG_MMC_EMBEDDED_SDIO +#include +#endif + #define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) /* show configuration fields */ @@ -264,7 +268,14 @@ static void sdio_release_func(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - sdio_free_func_cis(func); +#ifdef CONFIG_MMC_EMBEDDED_SDIO + /* + * If this device is embedded then we never allocated + * cis tables for this func + */ + if (!func->card->host->embedded_sdio_data.funcs) +#endif + sdio_free_func_cis(func); kfree(func->info); kfree(func->tmpbuf); diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 229dc18f0581..768972af8b85 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host) char pio_limit_string[20]; int ret; - mmc->f_max = host->max_clk; + if (!mmc->f_max || mmc->f_max > host->max_clk) + mmc->f_max = host->max_clk; mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 35026795be28..a84aa3f1ae85 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -165,9 +165,15 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) static int dw_mci_exynos_runtime_resume(struct device *dev) { struct dw_mci *host = dev_get_drvdata(dev); + int ret; + + ret = dw_mci_runtime_resume(dev); + if (ret) + return ret; dw_mci_exynos_config_smu(host); - return dw_mci_runtime_resume(dev); + + return ret; } /** @@ -487,6 +493,7 @@ static unsigned long exynos_dwmmc_caps[4] = { static const struct dw_mci_drv_data exynos_drv_data = { .caps = exynos_dwmmc_caps, + .num_caps = ARRAY_SIZE(exynos_dwmmc_caps), .init = dw_mci_exynos_priv_init, .set_ios = dw_mci_exynos_set_ios, .parse_dt = dw_mci_exynos_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 64cda84b2302..864e7fcaffaf 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host) if (priv->ctrl_id < 0) priv->ctrl_id = 0; + if (priv->ctrl_id >= TIMING_MODE) + return -EINVAL; + host->priv = priv; return 0; } @@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode) static const struct dw_mci_drv_data hi6220_data = { .caps = dw_mci_hi6220_caps, + .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps), .switch_voltage = dw_mci_hi6220_switch_voltage, .set_ios = dw_mci_hi6220_set_ios, .parse_dt = dw_mci_hi6220_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index a3f1c2b30145..339295212935 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = { static const struct dw_mci_drv_data rk3288_drv_data = { .caps = dw_mci_rk3288_dwmmc_caps, + .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps), .set_ios = dw_mci_rk3288_set_ios, .execute_tuning = dw_mci_rk3288_execute_tuning, .parse_dt = dw_mci_rk3288_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c index d38e94ae2b85..c06b5393312f 100644 --- a/drivers/mmc/host/dw_mmc-zx.c +++ b/drivers/mmc/host/dw_mmc-zx.c @@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = { static const struct dw_mci_drv_data zx_drv_data = { .caps = zx_dwmmc_caps, + .num_caps = ARRAY_SIZE(zx_dwmmc_caps), .execute_tuning = dw_mci_zx_execute_tuning, .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, .parse_dt = dw_mci_zx_parse_dt, diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 4f2806720c5c..de31e20dc56c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) { struct dw_mci *host = s->private; + pm_runtime_get_sync(host->dev); + seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); @@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); + pm_runtime_put_autosuspend(host->dev); + return 0; } @@ -409,7 +413,9 @@ static inline void dw_mci_set_cto(struct dw_mci *host) cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; if (cto_div == 0) cto_div = 1; - cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); + + cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, + host->bus_hz); /* add a bit spare time */ cto_ms += 10; @@ -558,6 +564,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) (sizeof(struct idmac_desc_64addr) * (i + 1))) >> 32; /* Initialize reserved and buffer size fields to "0" */ + p->des0 = 0; p->des1 = 0; p->des2 = 0; p->des3 = 0; @@ -580,6 +587,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) i++, p++) { p->des3 = cpu_to_le32(host->sg_dma + (sizeof(struct idmac_desc) * (i + 1))); + p->des0 = 0; p->des1 = 0; } @@ -1795,8 +1803,8 @@ static bool dw_mci_reset(struct dw_mci *host) } if (host->use_dma == TRANS_MODE_IDMAC) - /* It is also recommended that we reset and reprogram idmac */ - dw_mci_idmac_reset(host); + /* It is also required that we reinit idmac */ + dw_mci_idmac_init(host); ret = true; @@ -1943,8 +1951,9 @@ static void dw_mci_set_drto(struct dw_mci *host) drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; if (drto_div == 0) drto_div = 1; - drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, - host->bus_hz); + + drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, + host->bus_hz); /* add a bit spare time */ drto_ms += 10; @@ -2758,12 +2767,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) +{ + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + struct mmc_host *mmc = slot->mmc; + int ctrl_id; + + if (host->pdata->caps) + mmc->caps = host->pdata->caps; + + /* + * Support MMC_CAP_ERASE by default. + * It needs to use trim/discard/erase commands. + */ + mmc->caps |= MMC_CAP_ERASE; + + if (host->pdata->pm_caps) + mmc->pm_caps = host->pdata->pm_caps; + + if (host->dev->of_node) { + ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); + if (ctrl_id < 0) + ctrl_id = 0; + } else { + ctrl_id = to_platform_device(host->dev)->id; + } + + if (drv_data && drv_data->caps) { + if (ctrl_id >= drv_data->num_caps) { + dev_err(host->dev, "invalid controller id %d\n", + ctrl_id); + return -EINVAL; + } + mmc->caps |= drv_data->caps[ctrl_id]; + } + + if (host->pdata->caps2) + mmc->caps2 = host->pdata->caps2; + + /* Process SDIO IRQs through the sdio_irq_work. */ + if (mmc->caps & MMC_CAP_SDIO_IRQ) + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + + return 0; +} + static int dw_mci_init_slot(struct dw_mci *host) { struct mmc_host *mmc; struct dw_mci_slot *slot; - const struct dw_mci_drv_data *drv_data = host->drv_data; - int ctrl_id, ret; + int ret; u32 freq[2]; mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); @@ -2797,38 +2851,13 @@ static int dw_mci_init_slot(struct dw_mci *host) if (!mmc->ocr_avail) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - if (host->pdata->caps) - mmc->caps = host->pdata->caps; - - /* - * Support MMC_CAP_ERASE by default. - * It needs to use trim/discard/erase commands. - */ - mmc->caps |= MMC_CAP_ERASE; - - if (host->pdata->pm_caps) - mmc->pm_caps = host->pdata->pm_caps; - - if (host->dev->of_node) { - ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); - if (ctrl_id < 0) - ctrl_id = 0; - } else { - ctrl_id = to_platform_device(host->dev)->id; - } - if (drv_data && drv_data->caps) - mmc->caps |= drv_data->caps[ctrl_id]; - - if (host->pdata->caps2) - mmc->caps2 = host->pdata->caps2; - ret = mmc_of_parse(mmc); if (ret) goto err_host_allocated; - /* Process SDIO IRQs through the sdio_irq_work. */ - if (mmc->caps & MMC_CAP_SDIO_IRQ) - mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + ret = dw_mci_init_slot_caps(slot); + if (ret) + goto err_host_allocated; /* Useful defaults if platform data is unset. */ if (host->use_dma == TRANS_MODE_IDMAC) { diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 34474ad731aa..044c87ce6725 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -542,6 +542,7 @@ struct dw_mci_slot { /** * dw_mci driver data - dw-mshc implementation specific driver data. * @caps: mmc subsystem specified capabilities of the controller(s). + * @num_caps: number of capabilities specified by @caps. * @init: early implementation specific initialization. * @set_ios: handle bus specific extensions. * @parse_dt: parse implementation specific device tree properties. @@ -553,6 +554,7 @@ struct dw_mci_slot { */ struct dw_mci_drv_data { unsigned long *caps; + u32 num_caps; int (*init)(struct dw_mci *host); void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); int (*parse_dt)(struct dw_mci *host); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 85745ef179e2..08a55c2e96e1 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -716,22 +716,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct meson_host *host = mmc_priv(mmc); - int ret; - - /* - * If this is the initial tuning, try to get a sane Rx starting - * phase before doing the actual tuning. - */ - if (!mmc->doing_retune) { - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); - - if (ret) - return ret; - } - - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); - if (ret) - return ret; return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); } @@ -762,9 +746,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); - /* Reset phases */ + /* Reset rx phase */ clk_set_phase(host->rx_clk, 0); - clk_set_phase(host->tx_clk, 270); break; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index fcf7235d5742..157e1d9e7725 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev) return 0; } EXPORT_SYMBOL_GPL(renesas_sdhi_remove); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 85140c9af581..8b941f814472 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -687,6 +687,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, return; } + /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */ + if (is_imx53_esdhc(imx_data)) { + /* + * According to the i.MX53 reference manual, if DLLCTRL[10] can + * be set, then the controller is eSDHCv3, else it is eSDHCv2. + */ + val = readl(host->ioaddr + ESDHC_DLL_CTRL); + writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL); + temp = readl(host->ioaddr + ESDHC_DLL_CTRL); + writel(val, host->ioaddr + ESDHC_DLL_CTRL); + if (temp & BIT(10)) + pre_div = 2; + } + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index fc73e56eb1e2..92c483ec6cb2 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1251,6 +1251,21 @@ static int sdhci_msm_probe(struct platform_device *pdev) CORE_VENDOR_SPEC_CAPABILITIES0); } + /* + * Power on reset state may trigger power irq if previous status of + * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq + * interrupt in GIC, any pending power irq interrupt should be + * acknowledged. Otherwise power irq interrupt handler would be + * fired prematurely. + */ + sdhci_msm_voltage_switch(host); + + /* + * Ensure that above writes are propogated before interrupt enablement + * in GIC. + */ + mb(); + /* Setup IRQ for handling power/voltage tasks with PMIC */ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); if (msm_host->pwr_irq < 0) { @@ -1260,6 +1275,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto clk_disable; } + /* Enable pwr irq interrupts */ + writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK); + ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, sdhci_msm_pwr_irq, IRQF_ONESHOT, dev_name(&pdev->dev), host); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index d96a057a7db8..4ffa6b173a21 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) return clock / 256 / 16; } +static void esdhc_clock_enable(struct sdhci_host *host, bool enable) +{ + u32 val; + ktime_t timeout; + + val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + + if (enable) + val |= ESDHC_CLOCK_SDCLKEN; + else + val &= ~ESDHC_CLOCK_SDCLKEN; + + sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + val = ESDHC_CLOCK_STABLE; + while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { + if (ktime_after(ktime_get(), timeout)) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + udelay(10); + } +} + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; - if (clock == 0) + if (clock == 0) { + esdhc_clock_enable(host, false); return; + } /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ if (esdhc->vendor_ver < VENDOR_V_23) @@ -558,39 +587,20 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) sdhci_writel(host, ctrl, ESDHC_PROCTL); } -static void esdhc_clock_enable(struct sdhci_host *host, bool enable) +static void esdhc_reset(struct sdhci_host *host, u8 mask) { u32 val; - ktime_t timeout; - - val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - if (enable) - val |= ESDHC_CLOCK_SDCLKEN; - else - val &= ~ESDHC_CLOCK_SDCLKEN; - - sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { - if (ktime_after(ktime_get(), timeout)) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - break; - } - udelay(10); - } -} - -static void esdhc_reset(struct sdhci_host *host, u8 mask) -{ sdhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + + if (mask & SDHCI_RESET_ALL) { + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + } } /* The SCFG, Supplemental Configuration Unit, provides SoC specific diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 67d787fa3306..eb84187894a1 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -594,9 +594,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot) slot->chip->rpm_retune = intel_host->d3_retune; } -static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) { + int err = sdhci_execute_tuning(mmc, opcode); + struct sdhci_host *host = mmc_priv(mmc); + + if (err) + return err; + + /* + * Tuning can leave the IP in an active state (Buffer Read Enable bit + * set) which prevents the entry to low power states (i.e. S0i3). Data + * reset will clear it. + */ + sdhci_reset(host, SDHCI_RESET_DATA); + + return 0; +} + +static void byt_probe_slot(struct sdhci_pci_slot *slot) +{ + struct mmc_host_ops *ops = &slot->host->mmc_host_ops; + byt_read_dsm(slot); + + ops->execute_tuning = intel_execute_tuning; +} + +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ + byt_probe_slot(slot); slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | MMC_CAP_CMD_DURING_TFR | @@ -606,6 +633,9 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ slot->host->mmc_host_ops.select_drive_strength = intel_select_drive_strength; + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_EMMC) { + slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO | MMC_CAP2_NO_SD; + } return 0; } @@ -651,7 +681,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) { int err; - byt_read_dsm(slot); + byt_probe_slot(slot); err = ni_set_max_freq(slot); if (err) @@ -664,7 +694,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) { - byt_read_dsm(slot); + byt_probe_slot(slot); slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | MMC_CAP_WAIT_WHILE_BUSY; return 0; @@ -672,7 +702,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) { - byt_read_dsm(slot); + byt_probe_slot(slot); slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; slot->cd_idx = 0; @@ -779,6 +809,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; break; case INTEL_MRFLD_SDIO: + /* Advertise 2.0v for compatibility with the SDIO card's OCR */ + slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195; slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD; break; diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 0842bbc2d7ad..4d0791f6ec23 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -230,7 +230,14 @@ static void xenon_set_power(struct sdhci_host *host, unsigned char mode, mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); } +static void xenon_voltage_switch(struct sdhci_host *host) +{ + /* Wait for 5ms after set 1.8V signal enable bit */ + usleep_range(5000, 5500); +} + static const struct sdhci_ops sdhci_xenon_ops = { + .voltage_switch = xenon_voltage_switch, .set_clock = sdhci_set_clock, .set_power = xenon_set_power, .set_bus_width = sdhci_set_bus_width, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 0d5fcca18c9e..d35deb79965d 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include #include @@ -501,8 +503,35 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host, if (data->host_cookie == COOKIE_PRE_MAPPED) return data->sg_count; - sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - mmc_get_dma_dir(data)); + /* Bounce write requests to the bounce buffer */ + if (host->bounce_buffer) { + unsigned int length = data->blksz * data->blocks; + + if (length > host->bounce_buffer_size) { + pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", + mmc_hostname(host->mmc), length, + host->bounce_buffer_size); + return -EIO; + } + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + /* Copy the data to the bounce buffer */ + sg_copy_to_buffer(data->sg, data->sg_len, + host->bounce_buffer, + length); + } + /* Switch ownership to the DMA */ + dma_sync_single_for_device(host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + mmc_get_dma_dir(data)); + /* Just a dummy value */ + sg_count = 1; + } else { + /* Just access the data directly from memory */ + sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } if (sg_count == 0) return -ENOSPC; @@ -672,6 +701,14 @@ static void sdhci_adma_table_post(struct sdhci_host *host, } } +static u32 sdhci_sdma_address(struct sdhci_host *host) +{ + if (host->bounce_buffer) + return host->bounce_addr; + else + return sg_dma_address(host->data->sg); +} + static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) { u8 count; @@ -857,8 +894,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) SDHCI_ADMA_ADDRESS_HI); } else { WARN_ON(sg_cnt != 1); - sdhci_writel(host, sg_dma_address(data->sg), - SDHCI_DMA_ADDRESS); + sdhci_writel(host, sdhci_sdma_address(host), + SDHCI_DMA_ADDRESS); } } @@ -1433,6 +1470,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, if (mode != MMC_POWER_OFF) { switch (1 << vdd) { case MMC_VDD_165_195: + /* + * Without a regulator, SDHCI does not support 2.0v + * so we only get here if the driver deliberately + * added the 2.0v range to ocr_avail. Map it to 1.8v + * for the purpose of turning on the power. + */ + case MMC_VDD_20_21: pwr = SDHCI_POWER_180; break; case MMC_VDD_29_30: @@ -2247,7 +2291,12 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) mrq->data->host_cookie = COOKIE_UNMAPPED; - if (host->flags & SDHCI_REQ_USE_DMA) + /* + * No pre-mapping in the pre hook if we're using the bounce buffer, + * for that we would need two bounce buffers since one buffer is + * in flight when this is getting called. + */ + if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); } @@ -2351,8 +2400,45 @@ static bool sdhci_request_done(struct sdhci_host *host) struct mmc_data *data = mrq->data; if (data && data->host_cookie == COOKIE_MAPPED) { - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - mmc_get_dma_dir(data)); + if (host->bounce_buffer) { + /* + * On reads, copy the bounced data into the + * sglist + */ + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + unsigned int length = data->bytes_xfered; + + if (length > host->bounce_buffer_size) { + pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", + mmc_hostname(host->mmc), + host->bounce_buffer_size, + data->bytes_xfered); + /* Cap it down and continue */ + length = host->bounce_buffer_size; + } + dma_sync_single_for_cpu( + host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + DMA_FROM_DEVICE); + sg_copy_from_buffer(data->sg, + data->sg_len, + host->bounce_buffer, + length); + } else { + /* No copying, just switch ownership */ + dma_sync_single_for_cpu( + host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + mmc_get_dma_dir(data)); + } + } else { + /* Unmap the raw data */ + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + } data->host_cookie = COOKIE_UNMAPPED; } } @@ -2635,7 +2721,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) */ if (intmask & SDHCI_INT_DMA_END) { u32 dmastart, dmanow; - dmastart = sg_dma_address(host->data->sg); + + dmastart = sdhci_sdma_address(host); dmanow = dmastart + host->data->bytes_xfered; /* * Force update to the next DMA block boundary. @@ -3216,6 +3303,68 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) } EXPORT_SYMBOL_GPL(__sdhci_read_caps); +static int sdhci_allocate_bounce_buffer(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + unsigned int max_blocks; + unsigned int bounce_size; + int ret; + + /* + * Cap the bounce buffer at 64KB. Using a bigger bounce buffer + * has diminishing returns, this is probably because SD/MMC + * cards are usually optimized to handle this size of requests. + */ + bounce_size = SZ_64K; + /* + * Adjust downwards to maximum request size if this is less + * than our segment size, else hammer down the maximum + * request size to the maximum buffer size. + */ + if (mmc->max_req_size < bounce_size) + bounce_size = mmc->max_req_size; + max_blocks = bounce_size / 512; + + /* + * When we just support one segment, we can get significant + * speedups by the help of a bounce buffer to group scattered + * reads/writes together. + */ + host->bounce_buffer = devm_kmalloc(mmc->parent, + bounce_size, + GFP_KERNEL); + if (!host->bounce_buffer) { + pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", + mmc_hostname(mmc), + bounce_size); + /* + * Exiting with zero here makes sure we proceed with + * mmc->max_segs == 1. + */ + return 0; + } + + host->bounce_addr = dma_map_single(mmc->parent, + host->bounce_buffer, + bounce_size, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(mmc->parent, host->bounce_addr); + if (ret) + /* Again fall back to max_segs == 1 */ + return 0; + host->bounce_buffer_size = bounce_size; + + /* Lie about this since we're bouncing */ + mmc->max_segs = max_blocks; + mmc->max_seg_size = bounce_size; + mmc->max_req_size = bounce_size; + + pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", + mmc_hostname(mmc), max_blocks, bounce_size); + + return 0; +} + int sdhci_setup_host(struct sdhci_host *host) { struct mmc_host *mmc; @@ -3650,23 +3799,30 @@ int sdhci_setup_host(struct sdhci_host *host) spin_lock_init(&host->lock); + /* + * Maximum number of sectors in one transfer. Limited by SDMA boundary + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this + * is less anyway. + */ + mmc->max_req_size = 524288; + /* * Maximum number of segments. Depends on if the hardware * can do scatter/gather or not. */ - if (host->flags & SDHCI_USE_ADMA) + if (host->flags & SDHCI_USE_ADMA) { mmc->max_segs = SDHCI_MAX_SEGS; - else if (host->flags & SDHCI_USE_SDMA) + } else if (host->flags & SDHCI_USE_SDMA) { mmc->max_segs = 1; - else /* PIO */ + if (swiotlb_max_segment()) { + unsigned int max_req_size = (1 << IO_TLB_SHIFT) * + IO_TLB_SEGSIZE; + mmc->max_req_size = min(mmc->max_req_size, + max_req_size); + } + } else { /* PIO */ mmc->max_segs = SDHCI_MAX_SEGS; - - /* - * Maximum number of sectors in one transfer. Limited by SDMA boundary - * size (512KiB). Note some tuning modes impose a 4MiB limit, but this - * is less anyway. - */ - mmc->max_req_size = 524288; + } /* * Maximum segment size. Could be one segment with the maximum number @@ -3705,6 +3861,13 @@ int sdhci_setup_host(struct sdhci_host *host) */ mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; + if (mmc->max_segs == 1) { + /* This may alter mmc->*_blk_* parameters */ + ret = sdhci_allocate_bounce_buffer(host); + if (ret) + return ret; + } + return 0; unreg: diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 54bc444c317f..1d7d61e25dbf 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -440,6 +440,9 @@ struct sdhci_host { int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ + char *bounce_buffer; /* For packing SDMA reads/writes */ + dma_addr_t bounce_addr; + unsigned int bounce_buffer_size; const struct sdhci_ops *ops; /* Low level hw interface */ diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 9c4e6199b854..3a6d49f07e22 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1113,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) { struct tmio_mmc_data *pdata = host->pdata; struct mmc_host *mmc = host->mmc; + int err; - mmc_regulator_get_supply(mmc); + err = mmc_regulator_get_supply(mmc); + if (err) + return err; /* use ocr_mask if no regulator */ if (!mmc->ocr_avail) diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 7c0b27d132b1..b479bd81120b 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c @@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, do { uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); mask = (1 << (cfi->device_type * 8)) - 1; + if (ofs >= map->size) + return 0; result = map_read(map, base + ofs); bank++; } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 84b16133554b..0806f72102c0 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor) struct dentry *root = floor->dbg.dfs_dir; struct docg3 *docg3 = floor->priv; - if (IS_ERR_OR_NULL(root)) + if (IS_ERR_OR_NULL(root)) { + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + dev_warn(floor->dev.parent, + "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); return; + } debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, &flashcontrol_fops); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 3568294d4854..b25f444c5914 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -487,7 +487,7 @@ static int shrink_ecclayout(struct mtd_info *mtd, for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { u32 eccpos; - ret = mtd_ooblayout_ecc(mtd, section, &oobregion); + ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); if (ret < 0) { if (ret != -ERANGE) return ret; @@ -534,7 +534,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) for (i = 0; i < ARRAY_SIZE(to->eccpos);) { u32 eccpos; - ret = mtd_ooblayout_ecc(mtd, section, &oobregion); + ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); if (ret < 0) { if (ret != -ERANGE) return ret; diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index f25eca79f4e5..68c9d98a3347 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -2547,6 +2547,7 @@ static struct platform_driver atmel_nand_controller_driver = { .driver = { .name = "atmel-nand-controller", .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), + .pm = &atmel_nand_controller_pm_ops, }, .probe = atmel_nand_controller_probe, .remove = atmel_nand_controller_remove, diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 8268636675ef..4124bf91bee6 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -426,7 +426,7 @@ static int get_strength(struct atmel_pmecc_user *user) static int get_sectorsize(struct atmel_pmecc_user *user) { - return user->cache.cfg & PMECC_LOOKUP_TABLE_SIZE_1024 ? 1024 : 512; + return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512; } static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector) diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index e0eb51d8c012..2a978d9832a7 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -1763,7 +1763,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, err = brcmstb_nand_verify_erased_page(mtd, chip, buf, addr); /* erased page bitflips corrected */ - if (err > 0) + if (err >= 0) return err; } @@ -2193,16 +2193,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) if (ctrl->nand_version >= 0x0702) tmp |= ACC_CONTROL_RD_ERASED; tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; - if (ctrl->features & BRCMNAND_HAS_PREFETCH) { - /* - * FIXME: Flash DMA + prefetch may see spurious erased-page ECC - * errors - */ - if (has_flash_dma(ctrl)) - tmp &= ~ACC_CONTROL_PREFETCH; - else - tmp |= ACC_CONTROL_PREFETCH; - } + if (ctrl->features & BRCMNAND_HAS_PREFETCH) + tmp &= ~ACC_CONTROL_PREFETCH; + nand_writereg(ctrl, offs, tmp); return 0; diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c index 81370c79aa48..7ad0db65a6fa 100644 --- a/drivers/mtd/nand/denali_pci.c +++ b/drivers/mtd/nand/denali_pci.c @@ -124,3 +124,7 @@ static struct pci_driver denali_pci_driver = { }; module_pci_driver(denali_pci_driver); + +MODULE_DESCRIPTION("PCI driver for Denali NAND controller"); +MODULE_AUTHOR("Intel Corporation and its suppliers"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 9e03bac7f34c..4005b427023c 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) /* returns nonzero if entire page is blank */ static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, - u32 *eccstat, unsigned int bufnum) + u32 eccstat, unsigned int bufnum) { - u32 reg = eccstat[bufnum / 4]; - int errors; - - errors = (reg >> ((3 - bufnum % 4) * 8)) & 15; - - return errors; + return (eccstat >> ((3 - bufnum % 4) * 8)) & 15; } /* @@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - u32 eccstat[4]; + u32 eccstat; int i; /* set the chip select for NAND Transaction */ @@ -228,19 +223,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) if (nctrl->eccread) { int errors; int bufnum = nctrl->page & priv->bufnum_mask; - int sector = bufnum * chip->ecc.steps; - int sector_end = sector + chip->ecc.steps - 1; + int sector_start = bufnum * chip->ecc.steps; + int sector_end = sector_start + chip->ecc.steps - 1; __be32 *eccstat_regs; - if (ctrl->version >= FSL_IFC_VERSION_2_0_0) - eccstat_regs = ifc->ifc_nand.v2_nand_eccstat; - else - eccstat_regs = ifc->ifc_nand.v1_nand_eccstat; + eccstat_regs = ifc->ifc_nand.nand_eccstat; + eccstat = ifc_in32(&eccstat_regs[sector_start / 4]); - for (i = sector / 4; i <= sector_end / 4; i++) - eccstat[i] = ifc_in32(&eccstat_regs[i]); + for (i = sector_start; i <= sector_end; i++) { + if (i != sector_start && !(i % 4)) + eccstat = ifc_in32(&eccstat_regs[i / 4]); - for (i = sector; i <= sector_end; i++) { errors = check_read_ecc(mtd, ctrl, eccstat, i); if (errors == 15) { @@ -626,6 +619,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; u32 nand_fsr; + int status; /* Use READ_STATUS command, but wait for the device to be ready */ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | @@ -640,12 +634,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) fsl_ifc_run_command(mtd); nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); - + status = nand_fsr >> 24; /* * The chip always seems to report that it is * write-protected, even when it is not. */ - return nand_fsr | NAND_STATUS_WP; + return status | NAND_STATUS_WP; } /* @@ -916,6 +910,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) if (ctrl->version >= FSL_IFC_VERSION_1_1_0) fsl_ifc_sram_init(priv); + /* + * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older + * versions which had 8KB. Hence bufnum mask needs to be updated. + */ + if (ctrl->version >= FSL_IFC_VERSION_2_0_0) + priv->bufnum_mask = (priv->bufnum_mask * 2) + 1; + return 0; } diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 50f8d4a1b983..d4d824ef64e9 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, return ret; } - /* handle the block mark swapping */ - block_mark_swapping(this, payload_virt, auxiliary_virt); - /* Loop over status bytes, accumulating ECC status. */ status = auxiliary_virt + nfc_geo->auxiliary_status_offset; @@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, max_bitflips = max_t(unsigned int, max_bitflips, *status); } + /* handle the block mark swapping */ + block_mark_swapping(this, buf, auxiliary_virt); + if (oob_required) { /* * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index 7f3b065b6b8f..c51d214d169e 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c @@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) op = ECC_DECODE; dec = readw(ecc->regs + ECC_DECDONE); if (dec & ecc->sectors) { + /* + * Clear decode IRQ status once again to ensure that + * there will be no extra IRQ. + */ + readw(ecc->regs + ECC_DECIRQ_STA); ecc->sectors = 0; complete(&ecc->done); } else { @@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) } } - writel(0, ecc->regs + ECC_IRQ_REG(op)); - return IRQ_HANDLED; } @@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc) /* disable it */ mtk_ecc_wait_idle(ecc, op); + if (op == ECC_DECODE) + /* + * Clear decode IRQ status in case there is a timeout to wait + * decode IRQ. + */ + readw(ecc->regs + ECC_DECIRQ_STA); writew(0, ecc->regs + ECC_IRQ_REG(op)); writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 12edaae17d81..528e04f96c13 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -710,7 +710,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, chip->cmd_ctrl(mtd, readcmd, ctrl); ctrl &= ~NAND_CTRL_CHANGE; } - chip->cmd_ctrl(mtd, command, ctrl); + if (command != NAND_CMD_NONE) + chip->cmd_ctrl(mtd, command, ctrl); /* Address cycle, when necessary */ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; @@ -739,6 +740,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, */ switch (command) { + case NAND_CMD_NONE: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: @@ -832,7 +834,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, } /* Command latch cycle */ - chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); + if (command != NAND_CMD_NONE) + chip->cmd_ctrl(mtd, command, + NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); if (column != -1 || page_addr != -1) { int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; @@ -868,6 +872,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, */ switch (command) { + case NAND_CMD_NONE: case NAND_CMD_CACHEDPROG: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: @@ -1246,6 +1251,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) return 0; } +EXPORT_SYMBOL_GPL(nand_reset); /** * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data @@ -2200,6 +2206,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome); static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { + unsigned int max_bitflips = 0; int page, realpage, chipnr; struct nand_chip *chip = mtd_to_nand(mtd); struct mtd_ecc_stats stats; @@ -2257,6 +2264,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, nand_wait_ready(mtd); } + max_bitflips = max_t(unsigned int, max_bitflips, ret); + readlen -= len; if (!readlen) break; @@ -2282,7 +2291,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; - return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; + return max_bitflips; } /** @@ -2799,15 +2808,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const uint8_t *buf) { struct nand_chip *chip = mtd_to_nand(mtd); + int chipnr = (int)(to >> chip->chip_shift); struct mtd_oob_ops ops; int ret; - /* Wait for the device to get ready */ - panic_nand_wait(mtd, chip, 400); - /* Grab the device */ panic_nand_get_device(chip, mtd, FL_WRITING); + chip->select_chip(mtd, chipnr); + + /* Wait for the device to get ready */ + panic_nand_wait(mtd, chip, 400); + memset(&ops, 0, sizeof(ops)); ops.len = len; ops.datbuf = (uint8_t *)buf; diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 246b4393118e..44322a363ba5 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev) struct dentry *root = nsmtd->dbg.dfs_dir; struct dentry *dent; - if (!IS_ENABLED(CONFIG_DEBUG_FS)) + /* + * Just skip debugfs initialization when the debugfs directory is + * missing. + */ + if (IS_ERR_OR_NULL(root)) { + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); return 0; - - if (IS_ERR_OR_NULL(root)) - return -1; + } dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, root, dev, &dfs_fops); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 54540c8fa1a2..9f98f74ff221 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, 0x97, 0x79, 0xe5, 0x24, 0xb5}; /** - * omap_calculate_ecc_bch - Generate bytes of ECC bytes + * _omap_calculate_ecc_bch - Generate ECC bytes for one sector * @mtd: MTD device structure * @dat: The pointer to data on which ecc is computed * @ecc_code: The ecc_code buffer + * @i: The sector number (for a multi sector page) * - * Support calculating of BCH4/8 ecc vectors for the page + * Support calculating of BCH4/8/16 ECC vectors for one sector + * within a page. Sector number is in @i. */ -static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, - const u_char *dat, u_char *ecc_calc) +static int _omap_calculate_ecc_bch(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc, int i) { struct omap_nand_info *info = mtd_to_omap(mtd); int eccbytes = info->nand.ecc.bytes; struct gpmc_nand_regs *gpmc_regs = &info->reg; u8 *ecc_code; - unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; + unsigned long bch_val1, bch_val2, bch_val3, bch_val4; u32 val; - int i, j; + int j; + + ecc_code = ecc_calc; + switch (info->ecc_opt) { + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + case OMAP_ECC_BCH8_CODE_HW: + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); + bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); + bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); + *ecc_code++ = (bch_val4 & 0xFF); + *ecc_code++ = ((bch_val3 >> 24) & 0xFF); + *ecc_code++ = ((bch_val3 >> 16) & 0xFF); + *ecc_code++ = ((bch_val3 >> 8) & 0xFF); + *ecc_code++ = (bch_val3 & 0xFF); + *ecc_code++ = ((bch_val2 >> 24) & 0xFF); + *ecc_code++ = ((bch_val2 >> 16) & 0xFF); + *ecc_code++ = ((bch_val2 >> 8) & 0xFF); + *ecc_code++ = (bch_val2 & 0xFF); + *ecc_code++ = ((bch_val1 >> 24) & 0xFF); + *ecc_code++ = ((bch_val1 >> 16) & 0xFF); + *ecc_code++ = ((bch_val1 >> 8) & 0xFF); + *ecc_code++ = (bch_val1 & 0xFF); + break; + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: + case OMAP_ECC_BCH4_CODE_HW: + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); + *ecc_code++ = ((bch_val2 >> 12) & 0xFF); + *ecc_code++ = ((bch_val2 >> 4) & 0xFF); + *ecc_code++ = ((bch_val2 & 0xF) << 4) | + ((bch_val1 >> 28) & 0xF); + *ecc_code++ = ((bch_val1 >> 20) & 0xFF); + *ecc_code++ = ((bch_val1 >> 12) & 0xFF); + *ecc_code++ = ((bch_val1 >> 4) & 0xFF); + *ecc_code++ = ((bch_val1 & 0xF) << 4); + break; + case OMAP_ECC_BCH16_CODE_HW: + val = readl(gpmc_regs->gpmc_bch_result6[i]); + ecc_code[0] = ((val >> 8) & 0xFF); + ecc_code[1] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result5[i]); + ecc_code[2] = ((val >> 24) & 0xFF); + ecc_code[3] = ((val >> 16) & 0xFF); + ecc_code[4] = ((val >> 8) & 0xFF); + ecc_code[5] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result4[i]); + ecc_code[6] = ((val >> 24) & 0xFF); + ecc_code[7] = ((val >> 16) & 0xFF); + ecc_code[8] = ((val >> 8) & 0xFF); + ecc_code[9] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result3[i]); + ecc_code[10] = ((val >> 24) & 0xFF); + ecc_code[11] = ((val >> 16) & 0xFF); + ecc_code[12] = ((val >> 8) & 0xFF); + ecc_code[13] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result2[i]); + ecc_code[14] = ((val >> 24) & 0xFF); + ecc_code[15] = ((val >> 16) & 0xFF); + ecc_code[16] = ((val >> 8) & 0xFF); + ecc_code[17] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result1[i]); + ecc_code[18] = ((val >> 24) & 0xFF); + ecc_code[19] = ((val >> 16) & 0xFF); + ecc_code[20] = ((val >> 8) & 0xFF); + ecc_code[21] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result0[i]); + ecc_code[22] = ((val >> 24) & 0xFF); + ecc_code[23] = ((val >> 16) & 0xFF); + ecc_code[24] = ((val >> 8) & 0xFF); + ecc_code[25] = ((val >> 0) & 0xFF); + break; + default: + return -EINVAL; + } + + /* ECC scheme specific syndrome customizations */ + switch (info->ecc_opt) { + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: + /* Add constant polynomial to remainder, so that + * ECC of blank pages results in 0x0 on reading back + */ + for (j = 0; j < eccbytes; j++) + ecc_calc[j] ^= bch4_polynomial[j]; + break; + case OMAP_ECC_BCH4_CODE_HW: + /* Set 8th ECC byte as 0x0 for ROM compatibility */ + ecc_calc[eccbytes - 1] = 0x0; + break; + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + /* Add constant polynomial to remainder, so that + * ECC of blank pages results in 0x0 on reading back + */ + for (j = 0; j < eccbytes; j++) + ecc_calc[j] ^= bch8_polynomial[j]; + break; + case OMAP_ECC_BCH8_CODE_HW: + /* Set 14th ECC byte as 0x0 for ROM compatibility */ + ecc_calc[eccbytes - 1] = 0x0; + break; + case OMAP_ECC_BCH16_CODE_HW: + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used + * when SW based correction is required as ECC is required for one sector + * at a time. + */ +static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc) +{ + return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0); +} + +/** + * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. + */ +static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc) +{ + struct omap_nand_info *info = mtd_to_omap(mtd); + int eccbytes = info->nand.ecc.bytes; + unsigned long nsectors; + int i, ret; nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; for (i = 0; i < nsectors; i++) { - ecc_code = ecc_calc; - switch (info->ecc_opt) { - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: - case OMAP_ECC_BCH8_CODE_HW: - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); - bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); - bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); - *ecc_code++ = (bch_val4 & 0xFF); - *ecc_code++ = ((bch_val3 >> 24) & 0xFF); - *ecc_code++ = ((bch_val3 >> 16) & 0xFF); - *ecc_code++ = ((bch_val3 >> 8) & 0xFF); - *ecc_code++ = (bch_val3 & 0xFF); - *ecc_code++ = ((bch_val2 >> 24) & 0xFF); - *ecc_code++ = ((bch_val2 >> 16) & 0xFF); - *ecc_code++ = ((bch_val2 >> 8) & 0xFF); - *ecc_code++ = (bch_val2 & 0xFF); - *ecc_code++ = ((bch_val1 >> 24) & 0xFF); - *ecc_code++ = ((bch_val1 >> 16) & 0xFF); - *ecc_code++ = ((bch_val1 >> 8) & 0xFF); - *ecc_code++ = (bch_val1 & 0xFF); - break; - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: - case OMAP_ECC_BCH4_CODE_HW: - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); - *ecc_code++ = ((bch_val2 >> 12) & 0xFF); - *ecc_code++ = ((bch_val2 >> 4) & 0xFF); - *ecc_code++ = ((bch_val2 & 0xF) << 4) | - ((bch_val1 >> 28) & 0xF); - *ecc_code++ = ((bch_val1 >> 20) & 0xFF); - *ecc_code++ = ((bch_val1 >> 12) & 0xFF); - *ecc_code++ = ((bch_val1 >> 4) & 0xFF); - *ecc_code++ = ((bch_val1 & 0xF) << 4); - break; - case OMAP_ECC_BCH16_CODE_HW: - val = readl(gpmc_regs->gpmc_bch_result6[i]); - ecc_code[0] = ((val >> 8) & 0xFF); - ecc_code[1] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result5[i]); - ecc_code[2] = ((val >> 24) & 0xFF); - ecc_code[3] = ((val >> 16) & 0xFF); - ecc_code[4] = ((val >> 8) & 0xFF); - ecc_code[5] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result4[i]); - ecc_code[6] = ((val >> 24) & 0xFF); - ecc_code[7] = ((val >> 16) & 0xFF); - ecc_code[8] = ((val >> 8) & 0xFF); - ecc_code[9] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result3[i]); - ecc_code[10] = ((val >> 24) & 0xFF); - ecc_code[11] = ((val >> 16) & 0xFF); - ecc_code[12] = ((val >> 8) & 0xFF); - ecc_code[13] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result2[i]); - ecc_code[14] = ((val >> 24) & 0xFF); - ecc_code[15] = ((val >> 16) & 0xFF); - ecc_code[16] = ((val >> 8) & 0xFF); - ecc_code[17] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result1[i]); - ecc_code[18] = ((val >> 24) & 0xFF); - ecc_code[19] = ((val >> 16) & 0xFF); - ecc_code[20] = ((val >> 8) & 0xFF); - ecc_code[21] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result0[i]); - ecc_code[22] = ((val >> 24) & 0xFF); - ecc_code[23] = ((val >> 16) & 0xFF); - ecc_code[24] = ((val >> 8) & 0xFF); - ecc_code[25] = ((val >> 0) & 0xFF); - break; - default: - return -EINVAL; - } - - /* ECC scheme specific syndrome customizations */ - switch (info->ecc_opt) { - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: - /* Add constant polynomial to remainder, so that - * ECC of blank pages results in 0x0 on reading back */ - for (j = 0; j < eccbytes; j++) - ecc_calc[j] ^= bch4_polynomial[j]; - break; - case OMAP_ECC_BCH4_CODE_HW: - /* Set 8th ECC byte as 0x0 for ROM compatibility */ - ecc_calc[eccbytes - 1] = 0x0; - break; - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: - /* Add constant polynomial to remainder, so that - * ECC of blank pages results in 0x0 on reading back */ - for (j = 0; j < eccbytes; j++) - ecc_calc[j] ^= bch8_polynomial[j]; - break; - case OMAP_ECC_BCH8_CODE_HW: - /* Set 14th ECC byte as 0x0 for ROM compatibility */ - ecc_calc[eccbytes - 1] = 0x0; - break; - case OMAP_ECC_BCH16_CODE_HW: - break; - default: - return -EINVAL; - } + ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i); + if (ret) + return ret; - ecc_calc += eccbytes; + ecc_calc += eccbytes; } return 0; @@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->write_buf(mtd, buf, mtd->writesize); /* Update ecc vector from GPMC result registers */ - chip->ecc.calculate(mtd, buf, &ecc_calc[0]); + omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]); ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, chip->ecc.total); @@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, return 0; } +/** + * omap_write_subpage_bch - BCH hardware ECC based subpage write + * @mtd: mtd info structure + * @chip: nand chip info structure + * @offset: column address of subpage within the page + * @data_len: data length + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * @page: page number to write + * + * OMAP optimized subpage write method. + */ +static int omap_write_subpage_bch(struct mtd_info *mtd, + struct nand_chip *chip, u32 offset, + u32 data_len, const u8 *buf, + int oob_required, int page) +{ + u8 *ecc_calc = chip->buffers->ecccalc; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int ecc_steps = chip->ecc.steps; + u32 start_step = offset / ecc_size; + u32 end_step = (offset + data_len - 1) / ecc_size; + int step, ret = 0; + + /* + * Write entire page at one go as it would be optimal + * as ECC is calculated by hardware. + * ECC is calculated for all subpages but we choose + * only what we want. + */ + + /* Enable GPMC ECC engine */ + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + + /* Write data */ + chip->write_buf(mtd, buf, mtd->writesize); + + for (step = 0; step < ecc_steps; step++) { + /* mask ECC of un-touched subpages by padding 0xFF */ + if (step < start_step || step > end_step) + memset(ecc_calc, 0xff, ecc_bytes); + else + ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step); + + if (ret) + return ret; + + buf += ecc_size; + ecc_calc += ecc_bytes; + } + + /* copy calculated ECC for whole page to chip->buffer->oob */ + /* this include masked-value(0xFF) for unwritten subpages */ + ecc_calc = chip->buffers->ecccalc; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + /* write OOB buffer to NAND device */ + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + /** * omap_read_page_bch - BCH ecc based page read function for entire page * @mtd: mtd info structure @@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->ecc.total); /* Calculate ecc bytes */ - chip->ecc.calculate(mtd, buf, ecc_calc); + omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, chip->ecc.total); @@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 4; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = nand_chip->ecc.bytes + 1; @@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 4; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 8; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = nand_chip->ecc.bytes + 1; @@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 8; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 16; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 85cff68643e0..125b744c9c28 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -950,6 +950,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) switch (command) { case NAND_CMD_READ0: + case NAND_CMD_READOOB: case NAND_CMD_PAGEPROG: info->use_ecc = 1; break; diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 82244be3e766..958974821582 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -1853,8 +1853,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, /* Add ECC info retrieval from DT */ for (i = 0; i < ARRAY_SIZE(strengths); i++) { - if (ecc->strength <= strengths[i]) + if (ecc->strength <= strengths[i]) { + /* + * Update ecc->strength value with the actual strength + * that will be used by the ECC engine. + */ + ecc->strength = strengths[i]; break; + } } if (i >= ARRAY_SIZE(strengths)) { diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 8037d4b48a05..e2583a539b41 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev) if (mtd->oobsize > 64) mtd->oobsize = 64; - /* - * mtd->ecclayout is not specified here because we're using the - * default large page ECC layout defined in NAND core. - */ + /* Use default large page ECC layout defined in NAND core */ + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); if (chip->ecc.strength == 32) { nfc->ecc_mode = ECC_60_BYTE; chip->ecc.bytes = 60; diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index 8a596bfeddff..7802ac3ba934 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -422,7 +422,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, if (ret < 0) return ret; - val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; val |= ret << SSFSTS_CTL_COP_SHIFT; val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; val |= SSFSTS_CTL_SCGO; @@ -432,7 +432,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, if (ret) return ret; - status = readl(ispi->base + SSFSTS_CTL); + status = readl(ispi->sregs + SSFSTS_CTL); if (status & SSFSTS_CTL_FCERR) return -EIO; else if (status & SSFSTS_CTL_AEL) diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c index 86c0931543c5..ad6a3e1844cb 100644 --- a/drivers/mtd/spi-nor/stm32-quadspi.c +++ b/drivers/mtd/spi-nor/stm32-quadspi.c @@ -240,12 +240,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, STM32_QSPI_FIFO_TIMEOUT_US); if (ret) { dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); - break; + return ret; } tx_fifo(buf++, qspi->io_base + QUADSPI_DR); } - return ret; + return 0; } static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c index 1cb3f7758fb6..766b2c385682 100644 --- a/drivers/mtd/tests/oobtest.c +++ b/drivers/mtd/tests/oobtest.c @@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum) ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd_read_oob(mtd, addr, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err || ops.oobretlen != use_len) { pr_err("error: readoob failed at %#llx\n", (long long)addr); @@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum) ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd_read_oob(mtd, addr, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err || ops.oobretlen != mtd->oobavail) { pr_err("error: readoob failed at %#llx\n", (long long)addr); @@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum) /* read entire block's OOB at one go */ err = mtd_read_oob(mtd, addr, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err || ops.oobretlen != len) { pr_err("error: readoob failed at %#llx\n", (long long)addr); @@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void) pr_info("attempting to start read past end of OOB\n"); pr_info("an error is expected...\n"); err = mtd_read_oob(mtd, addr0, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err) { pr_info("error occurred as expected\n"); err = 0; @@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void) pr_info("attempting to read past end of device\n"); pr_info("an error is expected...\n"); err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err) { pr_info("error occurred as expected\n"); err = 0; @@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void) pr_info("attempting to read past end of device\n"); pr_info("an error is expected...\n"); err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err) { pr_info("error occurred as expected\n"); err = 0; @@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void) ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd_read_oob(mtd, addr, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err) goto out; if (memcmpshow(addr, readbuf, writebuf, diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index b210fdb31c98..b1fc28f63882 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -99,6 +99,8 @@ struct ubiblock { /* Linked list of all ubiblock instances */ static LIST_HEAD(ubiblock_devices); +static DEFINE_IDR(ubiblock_minor_idr); +/* Protects ubiblock_devices and ubiblock_minor_idr */ static DEFINE_MUTEX(devices_mutex); static int ubiblock_major; @@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = { .init_request = ubiblock_init_request, }; -static DEFINE_IDR(ubiblock_minor_idr); - int ubiblock_create(struct ubi_volume_info *vi) { struct ubiblock *dev; @@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Check that the volume isn't already handled */ mutex_lock(&devices_mutex); if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { - mutex_unlock(&devices_mutex); - return -EEXIST; + ret = -EEXIST; + goto out_unlock; } - mutex_unlock(&devices_mutex); dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); - if (!dev) - return -ENOMEM; + if (!dev) { + ret = -ENOMEM; + goto out_unlock; + } mutex_init(&dev->dev_mutex); @@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi) goto out_free_queue; } - mutex_lock(&devices_mutex); list_add_tail(&dev->list, &ubiblock_devices); - mutex_unlock(&devices_mutex); /* Must be the last step: anyone can call file ops from now on */ add_disk(dev->gd); dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", dev->ubi_num, dev->vol_id, vi->name); + mutex_unlock(&devices_mutex); return 0; out_free_queue: @@ -457,6 +457,8 @@ int ubiblock_create(struct ubi_volume_info *vi) put_disk(dev->gd); out_free_dev: kfree(dev); +out_unlock: + mutex_unlock(&devices_mutex); return ret; } @@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev) int ubiblock_remove(struct ubi_volume_info *vi) { struct ubiblock *dev; + int ret; mutex_lock(&devices_mutex); dev = find_dev_nolock(vi->ubi_num, vi->vol_id); if (!dev) { - mutex_unlock(&devices_mutex); - return -ENODEV; + ret = -ENODEV; + goto out_unlock; } /* Found a device, let's lock it so we can check if it's busy */ mutex_lock(&dev->dev_mutex); if (dev->refcnt > 0) { - mutex_unlock(&dev->dev_mutex); - mutex_unlock(&devices_mutex); - return -EBUSY; + ret = -EBUSY; + goto out_unlock_dev; } /* Remove from device list */ list_del(&dev->list); - mutex_unlock(&devices_mutex); - ubiblock_cleanup(dev); mutex_unlock(&dev->dev_mutex); + mutex_unlock(&devices_mutex); + kfree(dev); return 0; + +out_unlock_dev: + mutex_unlock(&dev->dev_mutex); +out_unlock: + mutex_unlock(&devices_mutex); + return ret; } static int ubiblock_resize(struct ubi_volume_info *vi) @@ -630,6 +638,7 @@ static void ubiblock_remove_all(void) struct ubiblock *next; struct ubiblock *dev; + mutex_lock(&devices_mutex); list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { /* The module is being forcefully removed */ WARN_ON(dev->desc); @@ -638,6 +647,7 @@ static void ubiblock_remove_all(void) ubiblock_cleanup(dev); kfree(dev); } + mutex_unlock(&devices_mutex); } int __init ubiblock_init(void) diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 85237cf661f9..3fd8d7ff7a02 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->last_eb_bytes = vol->usable_leb_size; } + /* Make volume "available" before it becomes accessible via sysfs */ + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = vol; + ubi->vol_count += 1; + spin_unlock(&ubi->volumes_lock); + /* Register character device for the volume */ cdev_init(&vol->cdev, &ubi_vol_cdev_operations); vol->cdev.owner = THIS_MODULE; @@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) if (err) goto out_sysfs; - spin_lock(&ubi->volumes_lock); - ubi->volumes[vol_id] = vol; - ubi->vol_count += 1; - spin_unlock(&ubi->volumes_lock); - ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); self_check_volumes(ubi); return err; @@ -315,6 +316,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) */ cdev_device_del(&vol->cdev, &vol->dev); out_mapping: + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = NULL; + ubi->vol_count -= 1; + spin_unlock(&ubi->volumes_lock); ubi_eba_destroy_table(eba_tbl); out_acc: spin_lock(&ubi->volumes_lock); diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index b5b8cd6f481c..668b46202507 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1528,6 +1528,46 @@ static void shutdown_work(struct ubi_device *ubi) } } +/** + * erase_aeb - erase a PEB given in UBI attach info PEB + * @ubi: UBI device description object + * @aeb: UBI attach info PEB + * @sync: If true, erase synchronously. Otherwise schedule for erasure + */ +static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync) +{ + struct ubi_wl_entry *e; + int err; + + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->pnum = aeb->pnum; + e->ec = aeb->ec; + ubi->lookuptbl[e->pnum] = e; + + if (sync) { + err = sync_erase(ubi, e, false); + if (err) + goto out_free; + + wl_tree_add(e, &ubi->free); + ubi->free_count++; + } else { + err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false); + if (err) + goto out_free; + } + + return 0; + +out_free: + wl_entry_destroy(ubi, e); + + return err; +} + /** * ubi_wl_init - initialize the WL sub-system using attaching information. * @ubi: UBI device description object @@ -1566,18 +1606,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { cond_resched(); - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) + err = erase_aeb(ubi, aeb, false); + if (err) goto out_free; - e->pnum = aeb->pnum; - e->ec = aeb->ec; - ubi->lookuptbl[e->pnum] = e; - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { - wl_entry_destroy(ubi, e); - goto out_free; - } - found_pebs++; } @@ -1635,6 +1667,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) ubi_assert(!ubi->lookuptbl[e->pnum]); ubi->lookuptbl[e->pnum] = e; } else { + bool sync = false; + /* * Usually old Fastmap PEBs are scheduled for erasure * and we don't have to care about them but if we face @@ -1644,18 +1678,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (ubi->lookuptbl[aeb->pnum]) continue; - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) - goto out_free; + /* + * The fastmap update code might not find a free PEB for + * writing the fastmap anchor to and then reuses the + * current fastmap anchor PEB. When this PEB gets erased + * and a power cut happens before it is written again we + * must make sure that the fastmap attach code doesn't + * find any outdated fastmap anchors, hence we erase the + * outdated fastmap anchor PEBs synchronously here. + */ + if (aeb->vol_id == UBI_FM_SB_VOLUME_ID) + sync = true; - e->pnum = aeb->pnum; - e->ec = aeb->ec; - ubi_assert(!ubi->lookuptbl[e->pnum]); - ubi->lookuptbl[e->pnum] = e; - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { - wl_entry_destroy(ubi, e); + err = erase_aeb(ubi, aeb, sync); + if (err) goto out_free; - } } found_pebs++; diff --git a/drivers/mux/core.c b/drivers/mux/core.c index 2260063b0ea8..6e5cf9d9cd99 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c @@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data) return dev->of_node == data; } +/* Note this function returns a reference to the mux_chip dev. */ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) { struct device *dev; @@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) (!args.args_count && (mux_chip->controllers > 1))) { dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", np, args.np); + put_device(&mux_chip->dev); return ERR_PTR(-EINVAL); } @@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) if (controller >= mux_chip->controllers) { dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", np, controller, args.np); + put_device(&mux_chip->dev); return ERR_PTR(-EINVAL); } - get_device(&mux_chip->dev); return &mux_chip->mux[controller]; } EXPORT_SYMBOL_GPL(mux_control_get); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2db581131b2..82f28ffccddf 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1524,39 +1524,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_close; } - /* If the mode uses primary, then the following is handled by - * bond_change_active_slave(). - */ - if (!bond_uses_primary(bond)) { - /* set promiscuity level to new slave */ - if (bond_dev->flags & IFF_PROMISC) { - res = dev_set_promiscuity(slave_dev, 1); - if (res) - goto err_close; - } - - /* set allmulti level to new slave */ - if (bond_dev->flags & IFF_ALLMULTI) { - res = dev_set_allmulti(slave_dev, 1); - if (res) - goto err_close; - } - - netif_addr_lock_bh(bond_dev); - - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - - netif_addr_unlock_bh(bond_dev); - } - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_add(slave_dev, lacpdu_multicast); - } - res = vlan_vids_add_by_dev(slave_dev, bond_dev); if (res) { netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", @@ -1721,6 +1688,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_upper_unlink; } + /* If the mode uses primary, then the following is handled by + * bond_change_active_slave(). + */ + if (!bond_uses_primary(bond)) { + /* set promiscuity level to new slave */ + if (bond_dev->flags & IFF_PROMISC) { + res = dev_set_promiscuity(slave_dev, 1); + if (res) + goto err_sysfs_del; + } + + /* set allmulti level to new slave */ + if (bond_dev->flags & IFF_ALLMULTI) { + res = dev_set_allmulti(slave_dev, 1); + if (res) { + if (bond_dev->flags & IFF_PROMISC) + dev_set_promiscuity(slave_dev, -1); + goto err_sysfs_del; + } + } + + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); + + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + /* add lacpdu mc addr to mc list */ + u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; + + dev_mc_add(slave_dev, lacpdu_multicast); + } + } + bond->slave_cnt++; bond_compute_features(bond); bond_set_carrier(bond); @@ -1744,6 +1745,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return 0; /* Undo stages on error */ +err_sysfs_del: + bond_sysfs_slave_del(new_slave); + err_upper_unlink: bond_upper_dev_unlink(bond, new_slave); @@ -1751,9 +1755,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) netdev_rx_handler_unregister(slave_dev); err_detach: - if (!bond_uses_primary(bond)) - bond_hw_addr_flush(bond_dev, slave_dev); - vlan_vids_del_by_dev(slave_dev, bond_dev); if (rcu_access_pointer(bond->primary_slave) == new_slave) RCU_INIT_POINTER(bond->primary_slave, NULL); diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 1e37313054f3..6da69af103e6 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev, return 0; } -static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +static void cc770_tx(struct net_device *dev, int mo) { struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - unsigned int mo = obj2msgobj(CC770_OBJ_TX); + struct can_frame *cf = (struct can_frame *)priv->tx_skb->data; u8 dlc, rtr; u32 id; int i; - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - - if ((cc770_read_reg(priv, - msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { - netdev_err(dev, "TX register is still occupied!\n"); - return NETDEV_TX_BUSY; - } - - netif_stop_queue(dev); - dlc = cf->can_dlc; id = cf->can_id; - if (cf->can_id & CAN_RTR_FLAG) - rtr = 0; - else - rtr = MSGCFG_DIR; + rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; + + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); + if (id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; cc770_write_reg(priv, msgobj[mo].config, @@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < dlc; i++) cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); - /* Store echo skb before starting the transfer */ - can_put_echo_skb(skb, dev, 0); - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); +} - stats->tx_bytes += dlc; +static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + unsigned int mo = obj2msgobj(CC770_OBJ_TX); + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; - /* - * HM: We had some cases of repeated IRQs so make sure the - * INT is acknowledged I know it's already further up, but - * doing again fixed the issue - */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + netif_stop_queue(dev); + + if ((cc770_read_reg(priv, + msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { + netdev_err(dev, "TX register is still occupied!\n"); + return NETDEV_TX_BUSY; + } + + priv->tx_skb = skb; + cc770_tx(dev, mo); return NETDEV_TX_OK; } @@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); + struct can_frame *cf; + u8 ctrl1; + + ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); - /* Nothing more to send, switch off interrupts */ cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); - /* - * We had some cases of repeated IRQ so make sure the - * INT is acknowledged + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); + + if (unlikely(!priv->tx_skb)) { + netdev_err(dev, "missing tx skb in tx interrupt\n"); + return; + } + + if (unlikely(ctrl1 & MSGLST_SET)) { + stats->rx_over_errors++; + stats->rx_errors++; + } + + /* When the CC770 is sending an RTR message and it receives a regular + * message that matches the id of the RTR message, it will overwrite the + * outgoing message in the TX register. When this happens we must + * process the received message and try to transmit the outgoing skb + * again. */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + if (unlikely(ctrl1 & NEWDAT_SET)) { + cc770_rx(dev, mo, ctrl1); + cc770_tx(dev, mo); + return; + } + cf = (struct can_frame *)priv->tx_skb->data; + stats->tx_bytes += cf->can_dlc; stats->tx_packets++; + + can_put_echo_skb(priv->tx_skb, dev, 0); can_get_echo_skb(dev, 0); + priv->tx_skb = NULL; + netif_wake_queue(dev); } @@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv) priv->can.do_set_bittiming = cc770_set_bittiming; priv->can.do_set_mode = cc770_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; + priv->tx_skb = NULL; memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h index a1739db98d91..95752e1d1283 100644 --- a/drivers/net/can/cc770/cc770.h +++ b/drivers/net/can/cc770/cc770.h @@ -193,6 +193,8 @@ struct cc770_priv { u8 cpu_interface; /* CPU interface register */ u8 clkout; /* Clock out register */ u8 bus_config; /* Bus conffiguration register */ + + struct sk_buff *tx_skb; }; struct net_device *alloc_cc770dev(int sizeof_priv); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index a13a4896a8bd..ed8a2a7ce500 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -189,7 +189,7 @@ * MX35 FlexCAN2 03.00.00.00 no no ? no no * MX53 FlexCAN2 03.00.00.00 yes no no no no * MX6s FlexCAN3 10.00.12.00 yes yes no no yes - * VF610 FlexCAN3 ? no yes ? yes yes? + * VF610 FlexCAN3 ? no yes no yes yes? * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ @@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | + FLEXCAN_QUIRK_BROKEN_PERR_STATE, }; static const struct can_bittiming_const flexcan_bittiming_const = { @@ -525,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) data = be32_to_cpup((__be32 *)&cf->data[0]); flexcan_write(data, &priv->tx_mb->data[0]); } - if (cf->can_dlc > 3) { + if (cf->can_dlc > 4) { data = be32_to_cpup((__be32 *)&cf->data[4]); flexcan_write(data, &priv->tx_mb->data[1]); } diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 2772d05ff11c..fedd927ba6ed 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -30,6 +30,7 @@ #define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) #define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) #define IFI_CANFD_STCMD_BUSOFF BIT(4) +#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5) #define IFI_CANFD_STCMD_BUSMONITOR BIT(16) #define IFI_CANFD_STCMD_LOOPBACK BIT(18) #define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) @@ -52,7 +53,10 @@ #define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) #define IFI_CANFD_INTERRUPT 0xc +#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0) #define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) +#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2) +#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3) #define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) @@ -61,6 +65,10 @@ #define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) #define IFI_CANFD_IRQMASK 0x10 +#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0) +#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1) +#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2) +#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3) #define IFI_CANFD_IRQMASK_SET_ERR BIT(7) #define IFI_CANFD_IRQMASK_SET_TS BIT(15) #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) @@ -136,6 +144,8 @@ #define IFI_CANFD_SYSCLOCK 0x50 #define IFI_CANFD_VER 0x54 +#define IFI_CANFD_VER_REV_MASK 0xff +#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15 #define IFI_CANFD_IP_ID 0x58 #define IFI_CANFD_IP_ID_VALUE 0xD073CAFD @@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable) if (enable) { enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | - IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; + IFI_CANFD_IRQMASK_RXFIFO_NEMPTY | + IFI_CANFD_IRQMASK_ERROR_STATE_CHG | + IFI_CANFD_IRQMASK_ERROR_WARNING | + IFI_CANFD_IRQMASK_ERROR_BUSOFF; if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; } @@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev) return 1; } -static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) +static int ifi_canfd_handle_lec_err(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | @@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, switch (new_state) { case CAN_STATE_ERROR_ACTIVE: + /* error active state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_ACTIVE; + break; + case CAN_STATE_ERROR_WARNING: /* error warning state */ priv->can.can_stats.error_warning++; priv->can.state = CAN_STATE_ERROR_WARNING; @@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, ifi_canfd_get_berr_counter(ndev, &bec); switch (new_state) { - case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: /* error warning state */ cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? @@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, return 1; } -static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) +static int ifi_canfd_handle_state_errors(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); + u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); int work_done = 0; - u32 isr; - /* - * The ErrWarn condition is a little special, since the bit is - * located in the INTERRUPT register instead of STCMD register. - */ - isr = readl(priv->base + IFI_CANFD_INTERRUPT); - if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && + if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) && + (priv->can.state != CAN_STATE_ERROR_ACTIVE)) { + netdev_dbg(ndev, "Error, entered active state\n"); + work_done += ifi_canfd_handle_state_change(ndev, + CAN_STATE_ERROR_ACTIVE); + } + + if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) && (priv->can.state != CAN_STATE_ERROR_WARNING)) { - /* Clear the interrupt */ - writel(IFI_CANFD_INTERRUPT_ERROR_WARNING, - priv->base + IFI_CANFD_INTERRUPT); netdev_dbg(ndev, "Error, entered warning state\n"); work_done += ifi_canfd_handle_state_change(ndev, CAN_STATE_ERROR_WARNING); @@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct ifi_canfd_priv *priv = netdev_priv(ndev); - const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE | - IFI_CANFD_STCMD_BUSOFF; - int work_done = 0; - - u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); - u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); + int work_done = 0; /* Handle bus state changes */ - if ((stcmd & stcmd_state_mask) || - ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0)) - work_done += ifi_canfd_handle_state_errors(ndev, stcmd); + work_done += ifi_canfd_handle_state_errors(ndev); /* Handle lost messages on RX */ if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) @@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) /* Handle lec errors on the bus */ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) - work_done += ifi_canfd_handle_lec_err(ndev, errctr); + work_done += ifi_canfd_handle_lec_err(ndev); /* Handle normal messages on RX */ if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) @@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) struct net_device_stats *stats = &ndev->stats; const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | + IFI_CANFD_INTERRUPT_ERROR_COUNTER | + IFI_CANFD_INTERRUPT_ERROR_STATE_CHG | IFI_CANFD_INTERRUPT_ERROR_WARNING | - IFI_CANFD_INTERRUPT_ERROR_COUNTER; + IFI_CANFD_INTERRUPT_ERROR_BUSOFF; const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; - const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | - IFI_CANFD_INTERRUPT_ERROR_WARNING)); + const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ); u32 isr; isr = readl(priv->base + IFI_CANFD_INTERRUPT); @@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) struct resource *res; void __iomem *addr; int irq, ret; - u32 id; + u32 id, rev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = devm_ioremap_resource(dev, res); @@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) return -EINVAL; } + rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK; + if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) { + dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n", + rev, IFI_CANFD_VER_REV_MIN_SUPPORTED); + return -EINVAL; + } + ndev = alloc_candev(sizeof(*priv), 1); if (!ndev) return -ENOMEM; diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 85268be0c913..ed8561d4a90f 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -258,22 +258,19 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, /* if this frame is an echo, */ if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { - int n; unsigned long flags; spin_lock_irqsave(&priv->echo_lock, flags); - n = can_get_echo_skb(priv->ndev, msg->client); - spin_unlock_irqrestore(&priv->echo_lock, flags); + can_get_echo_skb(priv->ndev, msg->client); /* count bytes of the echo instead of skb */ stats->tx_bytes += cf_len; stats->tx_packets++; - if (n) { - /* restart tx queue only if a slot is free */ - netif_wake_queue(priv->ndev); - } + /* restart tx queue (a slot is free) */ + netif_wake_queue(priv->ndev); + spin_unlock_irqrestore(&priv->echo_lock, flags); return 0; } @@ -336,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ if (pucan_status_is_rx_barrier(msg)) { - unsigned long flags; if (priv->enable_tx_path) { int err = priv->enable_tx_path(priv); @@ -345,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, return err; } - /* restart network queue only if echo skb array is free */ - spin_lock_irqsave(&priv->echo_lock, flags); - - if (!priv->can.echo_skb[priv->echo_idx]) { - spin_unlock_irqrestore(&priv->echo_lock, flags); - - netif_wake_queue(ndev); - } else { - spin_unlock_irqrestore(&priv->echo_lock, flags); - } + /* start network queue (echo_skb array is empty) */ + netif_start_queue(ndev); return 0; } @@ -729,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, */ should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); - spin_unlock_irqrestore(&priv->echo_lock, flags); - - /* write the skb on the interface */ - priv->write_tx_msg(priv, msg); - /* stop network tx queue if not enough room to save one more msg too */ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) should_stop_tx_queue |= (room_left < @@ -745,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, if (should_stop_tx_queue) netif_stop_queue(ndev); + spin_unlock_irqrestore(&priv->echo_lock, flags); + + /* write the skb on the interface */ + priv->write_tx_msg(priv, msg); + return NETDEV_TX_OK; } diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index b4efd711f824..3c51a884db87 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg) priv->tx_pages_free++; spin_unlock_irqrestore(&priv->tx_lock, flags); - /* wake producer up */ - netif_wake_queue(priv->ucan.ndev); + /* wake producer up (only if enough room in echo_skb array) */ + spin_lock_irqsave(&priv->ucan.echo_lock, flags); + if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx]) + netif_wake_queue(priv->ucan.ndev); + + spin_unlock_irqrestore(&priv->ucan.echo_lock, flags); } /* re-enable Rx DMA transfer for this CAN */ @@ -825,7 +829,10 @@ static int peak_pciefd_probe(struct pci_dev *pdev, err_disable_pci: pci_disable_device(pdev); - return err; + /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while + * the probe() function must return a negative errno in case of failure + * (err is unchanged if negative) */ + return pcibios_err_to_errno(err); } /* free the board structure object, as well as its resources: */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 131026fbc2d7..5adc95c922ee 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -717,7 +717,10 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) failure_disable_pci: pci_disable_device(pdev); - return err; + /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while + * the probe() function must return a negative errno in case of failure + * (err is unchanged if negative) */ + return pcibios_err_to_errno(err); } static void peak_pci_remove(struct pci_dev *pdev) diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 4d4941469cfc..db6ea936dc3f 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota) mbx_mask = hecc_read(priv, HECC_CANMIM); mbx_mask |= HECC_TX_MBOX_MASK; hecc_write(priv, HECC_CANMIM, mbx_mask); + } else { + /* repoll is done only if whole budget is used */ + num_pkts = quota; } return num_pkts; diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index b3d02759c226..b00358297424 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 9fdb0f0bfa06..c6dcf93675c0 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 68ac3e88a8ce..8bf80ad9dc44 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", rc); - return rc; + return (rc > 0) ? 0 : rc; } static void gs_usb_xmit_callback(struct urb *urb) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 9b18d96ef526..63587b8e6825 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, } if (pos + tmp->len > actual_len) { - dev_err(dev->udev->dev.parent, - "Format error\n"); + dev_err_ratelimited(dev->udev->dev.parent, + "Format error\n"); break; } @@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, if (err) { netdev_err(netdev, "Error transmitting URB\n"); usb_unanchor_urb(urb); + kfree(buf); usb_free_urb(urb); return err; } @@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) case 0: break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; default: @@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) goto resubmit_urb; } - while (pos <= urb->actual_length - MSG_HEADER_LEN) { + while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) { msg = urb->transfer_buffer + pos; /* The Kvaser firmware can only read and write messages that @@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) } if (pos + msg->len > urb->actual_length) { - dev_err(dev->udev->dev.parent, "Format error\n"); + dev_err_ratelimited(dev->udev->dev.parent, + "Format error\n"); break; } @@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); usb_unanchor_urb(urb); + kfree(buf); stats->tx_dropped++; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 7f0272558bef..e0c24abce16c 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 7ccdc3e30c98..53d6bb045e9e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) void *cmd_head = pcan_usb_fd_cmd_buffer(dev); int err = 0; u8 *packet_ptr; - int i, n = 1, packet_len; + int packet_len; ptrdiff_t cmd_len; /* usb device unregistered? */ @@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) } packet_ptr = cmd_head; + packet_len = cmd_len; /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ - if ((dev->udev->speed != USB_SPEED_HIGH) && - (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { - packet_len = PCAN_UFD_LOSPD_PKT_SIZE; - n += cmd_len / packet_len; - } else { - packet_len = cmd_len; - } + if (unlikely(dev->udev->speed != USB_SPEED_HIGH)) + packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE); - for (i = 0; i < n; i++) { + do { err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), @@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) } packet_ptr += packet_len; - } + cmd_len -= packet_len; + + if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE) + packet_len = cmd_len; + + } while (packet_len > 0); return err; } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index d000cb62d6ae..27861c417c94 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 8404e8852a0f..b4c4a2c76437 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, tbp = peer_tb; } - if (tbp[IFLA_IFNAME]) { + if (ifmp && tbp[IFLA_IFNAME]) { nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index d7b53d53c116..72d6ffbfd638 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -167,7 +167,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) reg = reg_readl(priv, REG_SPHY_CNTRL); if (enable) { reg |= PHY_RESET; - reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); + reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS); reg_writel(priv, reg, REG_SPHY_CNTRL); udelay(21); reg = reg_readl(priv, REG_SPHY_CNTRL); diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index b471413d3df9..1e5a69b9d90a 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -569,7 +569,7 @@ static int lan9303_disable_processing(struct lan9303 *chip) { int p; - for (p = 0; p < LAN9303_NUM_PORTS; p++) { + for (p = 1; p < LAN9303_NUM_PORTS; p++) { int ret = lan9303_disable_processing_port(chip, p); if (ret) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d74c7335c512..eebda5ec9676 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) u16 mask; mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask); - mask |= GENMASK(chip->g1_irq.nirqs, 0); + mask &= ~GENMASK(chip->g1_irq.nirqs, 0); mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); free_irq(chip->irq, chip); @@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) return 0; out_disable: - mask |= GENMASK(chip->g1_irq.nirqs, 0); + mask &= ~GENMASK(chip->g1_irq.nirqs, 0); mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); out_mapping: @@ -2153,6 +2153,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = { { }, }; +static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) + +{ + struct mv88e6xxx_mdio_bus *mdio_bus; + struct mii_bus *bus; + + list_for_each_entry(mdio_bus, &chip->mdios, list) { + bus = mdio_bus->bus; + + mdiobus_unregister(bus); + } +} + static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, struct device_node *np) { @@ -2177,27 +2190,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, match = of_match_node(mv88e6xxx_mdio_external_match, child); if (match) { err = mv88e6xxx_mdio_register(chip, child, true); - if (err) + if (err) { + mv88e6xxx_mdios_unregister(chip); return err; + } } } return 0; } -static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) - -{ - struct mv88e6xxx_mdio_bus *mdio_bus; - struct mii_bus *bus; - - list_for_each_entry(mdio_bus, &chip->mdios, list) { - bus = mdio_bus->bus; - - mdiobus_unregister(bus); - } -} - static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c6bd5e24005d..67df5053dc30 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1565,7 +1565,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) static int ena_up_complete(struct ena_adapter *adapter) { - int rc, i; + int rc; rc = ena_rss_configure(adapter); if (rc) @@ -1584,17 +1584,6 @@ static int ena_up_complete(struct ena_adapter *adapter) ena_napi_enable_all(adapter); - /* Enable completion queues interrupt */ - for (i = 0; i < adapter->num_queues; i++) - ena_unmask_interrupt(&adapter->tx_ring[i], - &adapter->rx_ring[i]); - - /* schedule napi in case we had pending packets - * from the last time we disable napi - */ - for (i = 0; i < adapter->num_queues; i++) - napi_schedule(&adapter->ena_napi[i].napi); - return 0; } @@ -1731,7 +1720,7 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) static int ena_up(struct ena_adapter *adapter) { - int rc; + int rc, i; netdev_dbg(adapter->netdev, "%s\n", __func__); @@ -1774,6 +1763,17 @@ static int ena_up(struct ena_adapter *adapter) set_bit(ENA_FLAG_DEV_UP, &adapter->flags); + /* Enable completion queues interrupt */ + for (i = 0; i < adapter->num_queues; i++) + ena_unmask_interrupt(&adapter->tx_ring[i], + &adapter->rx_ring[i]); + + /* schedule napi in case we had pending packets + * from the last time we disable napi + */ + for (i = 0; i < adapter->num_queues; i++) + napi_schedule(&adapter->ena_napi[i].napi); + return rc; err_up: diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 608693d11bd7..75c4455e2271 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -595,7 +595,7 @@ static void xgbe_isr_task(unsigned long data) reissue_mask = 1 << 0; if (!pdata->per_channel_irq) - reissue_mask |= 0xffff < 4; + reissue_mask |= 0xffff << 4; XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 3e5833cf1fab..eb23f9ba1a9a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev) struct net_device *netdev = pdata->netdev; int ret = 0; + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); + pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 0207927dc8a6..4ebd53b3c7da 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -85,7 +85,9 @@ struct aq_hw_ops { void (*destroy)(struct aq_hw_s *self); int (*get_hw_caps)(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps); + struct aq_hw_caps_s *aq_hw_caps, + unsigned short device, + unsigned short subsystem_device); int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, unsigned int frags); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 483e97691eea..c93e5613d4cc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -222,7 +222,7 @@ static struct net_device *aq_nic_ndev_alloc(void) struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, const struct ethtool_ops *et_ops, - struct device *dev, + struct pci_dev *pdev, struct aq_pci_func_s *aq_pci_func, unsigned int port, const struct aq_hw_ops *aq_hw_ops) @@ -242,7 +242,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, ndev->netdev_ops = ndev_ops; ndev->ethtool_ops = et_ops; - SET_NETDEV_DEV(ndev, dev); + SET_NETDEV_DEV(ndev, &pdev->dev); ndev->if_port = port; self->ndev = ndev; @@ -254,7 +254,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, &self->aq_hw_ops); - err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); + err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps, + pdev->device, pdev->subsystem_device); if (err < 0) goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 4309983acdd6..3c9f8db03d5f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -71,7 +71,7 @@ struct aq_nic_cfg_s { struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, const struct ethtool_ops *et_ops, - struct device *dev, + struct pci_dev *pdev, struct aq_pci_func_s *aq_pci_func, unsigned int port, const struct aq_hw_ops *aq_hw_ops); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index cadaa646c89f..58c29d04b186 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, pci_set_drvdata(pdev, self); self->pdev = pdev; - err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); + err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device, + pdev->subsystem_device); if (err < 0) goto err_exit; @@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, for (port = 0; port < self->ports; ++port) { struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, - &pdev->dev, self, + pdev, self, port, aq_hw_ops); if (!aq_nic) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 07b3c49a16a4..b0abd187cead 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -18,9 +18,20 @@ #include "hw_atl_a0_internal.h" static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps) + struct aq_hw_caps_s *aq_hw_caps, + unsigned short device, + unsigned short subsystem_device) { memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); + + if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; + + if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G; + } + return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index ec68c20efcbd..36fddb199160 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -16,11 +16,23 @@ #include "hw_atl_utils.h" #include "hw_atl_llh.h" #include "hw_atl_b0_internal.h" +#include "hw_atl_llh_internal.h" static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps) + struct aq_hw_caps_s *aq_hw_caps, + unsigned short device, + unsigned short subsystem_device) { memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); + + if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; + + if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G; + } + return 0; } @@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, }; int err = 0; + u32 val; self->aq_nic_cfg = aq_nic_cfg; @@ -374,6 +387,16 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + /* Force limit MRRS on RDM/TDM to 2K */ + val = aq_hw_read_reg(self, pci_reg_control6_adr); + aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404); + + /* TX DMA total request limit. B0 hardware is not capable to + * handle more than (8K-MRRS) incoming DMA data. + * Value 24 in 256byte units + */ + aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24); + err = aq_hw_err_from_flags(self); if (err < 0) goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 5527fc0e5942..93450ec930e8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2343,6 +2343,9 @@ #define tx_dma_desc_base_addrmsw_adr(descriptor) \ (0x00007c04u + (descriptor) * 0x40) +/* tx dma total request limit */ +#define tx_dma_total_req_limit_adr 0x00007b20u + /* tx interrupt moderation control register definitions * Preprocessor definitions for TX Interrupt Moderation Control Register * Base Address: 0x00008980 @@ -2369,6 +2372,9 @@ /* default value of bitfield reg_res_dsbl */ #define pci_reg_res_dsbl_default 0x1 +/* PCI core control register */ +#define pci_reg_control6_adr 0x1014u + /* global microprocessor scratch pad definitions */ #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 3241af1ce718..5b422be56165 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -210,39 +210,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget) continue; } - pktlen = info & LEN_MASK; - stats->rx_packets++; - stats->rx_bytes += pktlen; - skb = rx_buff->skb; - skb_put(skb, pktlen); - skb->dev = ndev; - skb->protocol = eth_type_trans(skb, ndev); - - dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), - dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); - - /* Prepare the BD for next cycle */ - rx_buff->skb = netdev_alloc_skb_ip_align(ndev, - EMAC_BUFFER_SIZE); - if (unlikely(!rx_buff->skb)) { + /* Prepare the BD for next cycle. netif_receive_skb() + * only if new skb was allocated and mapped to avoid holes + * in the RX fifo. + */ + skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); + if (unlikely(!skb)) { + if (net_ratelimit()) + netdev_err(ndev, "cannot allocate skb\n"); + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; - /* Because receive_skb is below, increment rx_dropped */ stats->rx_dropped++; continue; } - /* receive_skb only if new skb was allocated to avoid holes */ - netif_receive_skb(skb); - - addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + addr = dma_map_single(&ndev->dev, (void *)skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, addr)) { if (net_ratelimit()) - netdev_err(ndev, "cannot dma map\n"); - dev_kfree_skb(rx_buff->skb); + netdev_err(ndev, "cannot map dma buffer\n"); + dev_kfree_skb(skb); + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; + stats->rx_dropped++; continue; } + + /* unmap previosly mapped skb */ + dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), + dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); + + pktlen = info & LEN_MASK; + stats->rx_packets++; + stats->rx_bytes += pktlen; + skb_put(rx_buff->skb, pktlen); + rx_buff->skb->dev = ndev; + rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); + + netif_receive_skb(rx_buff->skb); + + rx_buff->skb = skb; dma_unmap_addr_set(rx_buff, addr, addr); dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index e278e3d96ee0..c770ca37c9b2 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* Optional regulator for PHY */ priv->regulator = devm_regulator_get_optional(dev, "phy"); if (IS_ERR(priv->regulator)) { - if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto out_clk_disable; + } dev_err(dev, "no regulator found\n"); priv->regulator = NULL; } @@ -220,9 +222,11 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* RMII TX/RX needs always a rate of 25MHz */ err = clk_set_rate(priv->macclk, 25000000); - if (err) + if (err) { dev_err(dev, "failed to change mac clock rate (%d)\n", err); + goto out_clk_disable_macclk; + } } err = arc_emac_probe(ndev, interface); @@ -232,7 +236,8 @@ static int emac_rockchip_probe(struct platform_device *pdev) } return 0; - +out_clk_disable_macclk: + clk_disable_unprepare(priv->macclk); out_regulator_disable: if (priv->regulator) regulator_disable(priv->regulator); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index eb441e5e2cd8..1e856e8b9a92 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { - unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; struct net_device *ndev = priv->netdev; + unsigned int txbds_processed = 0; struct bcm_sysport_cb *cb; + unsigned int txbds_ready; + unsigned int c_index; u32 hw_ind; /* Clear status before servicing to reduce spurious interrupts */ @@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; - ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); - - last_c_index = ring->c_index; - num_tx_cbs = ring->size; - - c_index &= (num_tx_cbs - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; - else - last_tx_cn = num_tx_cbs - last_c_index + c_index; + txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; netif_dbg(priv, tx_done, ndev, - "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", - ring->index, c_index, last_tx_cn, last_c_index); + "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + ring->index, ring->c_index, c_index, txbds_ready); - while (last_tx_cn-- > 0) { - cb = ring->cbs + last_c_index; + while (txbds_processed < txbds_ready) { + cb = &ring->cbs[ring->clean_index]; bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); ring->desc_count++; - last_c_index++; - last_c_index &= (num_tx_cbs - 1); + txbds_processed++; + + if (likely(ring->clean_index < ring->size - 1)) + ring->clean_index++; + else + ring->clean_index = 0; } u64_stats_update_begin(&priv->syncp); @@ -1406,6 +1402,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); ring->index = index; ring->size = size; + ring->clean_index = 0; ring->alloc_size = ring->size; ring->desc_cpu = p; ring->desc_count = ring->size; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 82e401df199e..a2006f5fc26f 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring { unsigned int desc_count; /* Number of descriptors */ unsigned int curr_desc; /* Current descriptor */ unsigned int c_index; /* Last consumer index */ - unsigned int p_index; /* Current producer index */ + unsigned int clean_index; /* Current clean index */ struct bcm_sysport_cb *cbs; /* Transmit control blocks */ struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct bcm_sysport_priv *priv; /* private context backpointer */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1216c1f1e052..6465414dad74 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) del_timer_sync(&bp->timer); - if (IS_PF(bp)) { + if (IS_PF(bp) && !BP_NOMCP(bp)) { /* Set ALWAYS_ALIVE bit in shmem */ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bnx2x_drv_pulse(bp); @@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->cnic_loaded = false; /* Clear driver version indication in shmem */ - if (IS_PF(bp)) + if (IS_PF(bp) && !BP_NOMCP(bp)) bnx2x_update_mng_version(bp); /* Check if there are pending parity attentions. If there are - set diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c12b4d3e946e..e855a271db48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp) do { bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + + /* If we read all 0xFFs, means we are in PCI error state and + * should bail out to avoid crashes on adapter's FW reads. + */ + if (bp->common.shmem_base == 0xFFFFFFFF) { + bp->flags |= NO_MCP_FLAG; + return -ENODEV; + } + if (bp->common.shmem_base) { val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); if (val & SHR_MEM_VALIDITY_MB) @@ -14315,7 +14324,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) BNX2X_ERR("IO slot reset --> driver unload\n"); /* MCP should have been reset; Need to wait for validity */ - bnx2x_init_shmem(bp); + if (bnx2x_init_shmem(bp)) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 v; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dc5de275352a..807cf75f0a98 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1698,12 +1698,16 @@ static int bnxt_async_event_process(struct bnxt *bp, if (BNXT_VF(bp)) goto async_event_process_exit; - if (data1 & 0x20000) { + + /* print unsupported speed warning in forced speed mode only */ + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && + (data1 & 0x20000)) { u16 fw_speed = link_info->force_link_speed; u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); - netdev_warn(bp->dev, "Link speed %d no longer supported\n", - speed); + if (speed != SPEED_UNKNOWN) + netdev_warn(bp->dev, "Link speed %d no longer supported\n", + speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); /* fall thru */ @@ -1875,7 +1879,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) * here forever if we consistently cannot allocate * buffers. */ - else if (rc == -ENOMEM) + else if (rc == -ENOMEM && budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; @@ -1961,7 +1965,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); - if (likely(rc == -EIO)) + if (likely(rc == -EIO) && budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; @@ -8218,8 +8222,9 @@ static void bnxt_shutdown(struct pci_dev *pdev) if (netif_running(dev)) dev_close(dev); + bnxt_ulp_shutdown(bp); + if (system_state == SYSTEM_POWER_OFF) { - bnxt_ulp_shutdown(bp); bnxt_clear_int_mode(bp); pci_wake_from_d3(pdev, bp->wol); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3cbe771b3352..a22336fef66b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2133,8 +2133,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev, /* Read A2 portion of the EEPROM */ if (length) { start -= ETH_MODULE_SFF_8436_LEN; - bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, - length, data); + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, + start, length, data); } return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 5ee18660bc33..c9617675f934 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); return -EINVAL; } - if (vf_id >= bp->pf.max_vfs) { + if (vf_id >= bp->pf.active_vfs) { netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 7dd3d131043a..6a185344b378 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -327,7 +327,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, } /* If all IP and L4 fields are wildcarded then this is an L2 flow */ - if (is_wildcard(&l3_mask, sizeof(l3_mask)) && + if (is_wildcard(l3_mask, sizeof(*l3_mask)) && is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; } else { diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 656e6af70f0a..48738eb27806 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -10052,6 +10052,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) tw32(GRC_MODE, tp->grc_mode | val); + /* On one of the AMD platform, MRRS is restricted to 4000 because of + * south bridge limitation. As a workaround, Driver is setting MRRS + * to 2048 instead of default 4096. + */ + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { + val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; + tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); + } + /* Setup the timer prescalar register. Clock is always 66Mhz. */ val = tr32(GRC_MISC_CFG); val &= ~0xff; @@ -14227,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) /* Reset PHY, otherwise the read DMA engine will be in a mode that * breaks all requests to 256 bytes. */ - if (tg3_asic_rev(tp) == ASIC_REV_57766) + if (tg3_asic_rev(tp) == ASIC_REV_57766 || + tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) reset_phy = true; err = tg3_restart_hw(tp, reset_phy); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index c2d02d02d1e6..b057f71aed48 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -96,6 +96,7 @@ #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a +#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0 #define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a @@ -281,6 +282,9 @@ #define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ #define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ /* 0xa8 --> 0xb8 unused */ +#define TG3PCI_DEV_STATUS_CTRL 0x000000b4 +#define MAX_READ_REQ_SIZE_2048 0x00004000 +#define MAX_READ_REQ_MASK 0x00007000 #define TG3PCI_DUAL_MAC_CTRL 0x000000b8 #define DUAL_MAC_CTRL_CH_MASK 0x00000003 #define DUAL_MAC_CTRL_ID 0x00000004 diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 2e993ce43b66..4d2db22e011b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1289,6 +1289,9 @@ static int liquidio_stop(struct net_device *netdev) struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; + /* tell Octeon to stop forwarding packets to host */ + send_rx_ctrl_cmd(lio, 0); + if (oct->props[lio->ifidx].napi_enabled) { list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_disable(napi); @@ -1306,9 +1309,6 @@ static int liquidio_stop(struct net_device *netdev) netif_carrier_off(netdev); lio->link_changes++; - /* tell Octeon to stop forwarding packets to host */ - send_rx_ctrl_cmd(lio, 0); - ifstate_reset(lio, LIO_IFSTATE_RUNNING); txqs_stop(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 805ab45e9b5a..2237ef8e4344 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1832,6 +1832,11 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->pdev = pdev; nic->pnicvf = nic; nic->max_queues = qcount; + /* If no of CPUs are too low, there won't be any queues left + * for XDP_TX, hence double it. + */ + if (!nic->t88) + nic->max_queues *= 2; /* MAP VF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d4496e9afcdf..a3d12dbde95b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1355,7 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, /* Offload checksum calculation to HW */ if (skb->ip_summed == CHECKSUM_PARTIAL) { - hdr->csum_l3 = 1; /* Enable IP csum calculation */ + if (ip.v4->version == 4) + hdr->csum_l3 = 1; /* Enable IP csum calculation */ hdr->l3_offset = skb_network_offset(skb); hdr->l4_offset = skb_transport_offset(skb); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index b65ce26ff72f..1802debbd3c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2632,7 +2632,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) } #define EEPROM_STAT_ADDR 0x7bfc -#define VPD_SIZE 0x800 #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 @@ -2670,15 +2669,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; - /* We have two VPD data structures stored in the adapter VPD area. - * By default, Linux calculates the size of the VPD area by traversing - * the first VPD area at offset 0x0, so we need to tell the OS what - * our real VPD size is. - */ - ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); - if (ret < 0) - goto out; - /* Card information normally starts at VPD_BASE but early cards had * it at 0. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 05498e7f2840..6246003f9922 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2619,8 +2619,8 @@ void t4vf_sge_stop(struct adapter *adapter) int t4vf_sge_init(struct adapter *adapter) { struct sge_params *sge_params = &adapter->params.sge; - u32 fl0 = sge_params->sge_fl_buffer_size[0]; - u32 fl1 = sge_params->sge_fl_buffer_size[1]; + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; /* @@ -2628,9 +2628,20 @@ int t4vf_sge_init(struct adapter *adapter) * the Physical Function Driver. Ideally we should be able to deal * with _any_ configuration. Practice is different ... */ - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", - fl0, fl1); + fl_small_pg, fl_large_pg); return -EINVAL; } if ((sge_params->sge_control & RXPKTCPLMODE_F) != @@ -2642,8 +2653,8 @@ int t4vf_sge_init(struct adapter *adapter) /* * Now translate the adapter parameters into our internal forms. */ - if (fl1) - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a807..1b03c32afc1f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter) be_schedule_worker(adapter); + /* + * The IF was destroyed and re-created. We need to clear + * all promiscuous flags valid for the destroyed IF. + * Without this promisc mode is not restored during + * be_open() because the driver thinks that it is + * already enabled in HW. + */ + adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS; + if (netif_running(netdev)) status = be_open(netdev); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index e92859dab7ae..e191c4ebeaf4 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -257,8 +257,8 @@ enum rx_desc_status_bits { RXFSD = 0x00000800, /* first descriptor */ RXLSD = 0x00000400, /* last descriptor */ ErrorSummary = 0x80, /* error summary */ - RUNT = 0x40, /* runt packet received */ - LONG = 0x20, /* long packet received */ + RUNTPKT = 0x40, /* runt packet received */ + LONGPKT = 0x20, /* long packet received */ FAE = 0x10, /* frame align error */ CRC = 0x08, /* crc error */ RXER = 0x04, /* receive error */ @@ -1632,7 +1632,7 @@ static int netdev_rx(struct net_device *dev) dev->name, rx_status); dev->stats.rx_errors++; /* end of a packet. */ - if (rx_status & (LONG | RUNT)) + if (rx_status & (LONGPKT | RUNTPKT)) dev->stats.rx_length_errors++; if (rx_status & RXER) dev->stats.rx_frame_errors++; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 42258060f142..4f6e9d3470d5 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2022,7 +2022,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, } if (unlikely(err < 0)) { - percpu_stats->tx_errors++; percpu_stats->tx_fifo_errors++; return err; } @@ -2292,7 +2291,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, vaddr = phys_to_virt(addr); prefetch(vaddr + qm_fd_get_offset(fd)); - fd_format = qm_fd_get_format(fd); /* The only FD types that we may receive are contig and S/G */ WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); @@ -2325,8 +2323,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, skb_len = skb->len; - if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { + percpu_stats->rx_dropped++; return qman_cb_dqrr_consume; + } percpu_stats->rx_packets++; percpu_stats->rx_bytes += skb_len; @@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev) struct device *dev; int err; - dev = &pdev->dev; + dev = pdev->dev.parent; net_dev = dev_get_drvdata(dev); priv = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3dc2d771a222..eb2ea231c7ca 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev) for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = cpu_to_fec16(0); + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); if (txq->tx_skbuff[i]) { dev_kfree_skb_any(txq->tx_skbuff[i]); txq->tx_skbuff[i] = NULL; @@ -3452,6 +3458,10 @@ fec_probe(struct platform_device *pdev) goto failed_regulator; } } else { + if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto failed_regulator; + } fep->reg_phy = NULL; } @@ -3533,8 +3543,9 @@ fec_probe(struct platform_device *pdev) failed_clk: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); -failed_phy: of_node_put(phy_node); +failed_phy: + dev_id--; failed_ioremap: free_netdev(ndev); @@ -3554,6 +3565,8 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 5be52d89b182..7f837006bb6a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev) gfar_init_addr_hash_table(priv); - /* Insert receive time stamps into padding alignment bytes */ + /* Insert receive time stamps into padding alignment bytes, and + * plus 2 bytes padding to ensure the cpu alignment. + */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) - priv->padding = 8; + priv->padding = 8 + DEFAULT_PADDING; if (dev->features & NETIF_F_IP_CSUM || priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) @@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev) GFAR_SUPPORTED_GBIT : 0; phy_interface_t interface; struct phy_device *phydev; + struct ethtool_eee edata; priv->oldlink = 0; priv->oldspeed = 0; @@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev) /* Add support for flow control, but don't advertise it by default */ phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + /* disable EEE autoneg, EEE not supported by eTSEC */ + memset(&edata, 0, sizeof(struct ethtool_eee)); + phy_ethtool_set_eee(phydev, &edata); + return 0; } diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 544114281ea7..9f8d4f8e57e3 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) now = tmr_cnt_read(etsects); now += delta; tmr_cnt_write(etsects, now); + set_fipers(etsects); spin_unlock_irqrestore(&etsects->lock, flags); - set_fipers(etsects); - return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 86944bc3b273..74bd260ca02a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data) static int hns_gmac_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return ARRAY_SIZE(g_gmac_stats_string); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index b62816c1574e..93e71e27401b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb) int hns_ppe_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return ETH_PPE_STATIC_NUM; return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6f3570cfb501..e2e28532e4dc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) */ int hns_rcb_get_ring_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return HNS_RING_STATIC_REG_NUM; return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 7ea7f8a4aa2a..2e14a3ae1d8b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset) cnt--; return cnt; - } else { + } else if (stringset == ETH_SS_STATS) { return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); + } else { + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c1cdbfd83bdb..ff7a70ffafc6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2092,6 +2092,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (phydev) + return phydev->autoneg; hclge_query_autoneg_result(hdev); @@ -3981,7 +3985,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->roce.client = client; } - if (hdev->roce_client) { + if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) goto err; @@ -4007,13 +4011,19 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { vport = &hdev->vport[i]; - if (hdev->roce_client) + if (hdev->roce_client) { hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + hdev->roce_client = NULL; + vport->roce.client = NULL; + } if (client->type == HNAE3_CLIENT_ROCE) return; - if (client->ops->uninit_instance) + if (client->ops->uninit_instance) { client->ops->uninit_instance(&vport->nic, 0); + hdev->nic_client = NULL; + vport->nic.client = NULL; + } } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 35369e1c8036..d1e4dcec5db2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -721,7 +721,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) HNS3_TXD_BDTYPE_M, 0); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, @@ -1060,6 +1060,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) u64 rx_bytes = 0; u64 tx_pkts = 0; u64 rx_pkts = 0; + u64 tx_drop = 0; + u64 rx_drop = 0; for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ @@ -1068,6 +1070,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; + tx_drop += ring->stats.tx_busy; + tx_drop += ring->stats.sw_err_cnt; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ @@ -1076,6 +1080,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) start = u64_stats_fetch_begin_irq(&ring->syncp); rx_bytes += ring->stats.rx_bytes; rx_pkts += ring->stats.rx_pkts; + rx_drop += ring->stats.non_vld_descs; + rx_drop += ring->stats.err_pkt_len; + rx_drop += ring->stats.l2_err; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } @@ -1091,8 +1098,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) stats->rx_missed_errors = netdev->stats.rx_missed_errors; stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = netdev->stats.rx_dropped; - stats->tx_dropped = netdev->stats.tx_dropped; + stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; + stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -1306,6 +1313,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) return ret; } + netdev->mtu = new_mtu; + /* if the netdev was running earlier, bring it up again */ if (if_running && hns3_nic_net_open(netdev)) ret = -EINVAL; @@ -1546,7 +1555,7 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, return 0; out_with_buf: - hns3_free_buffers(ring); + hns3_free_buffer(ring, cb); out: return ret; } @@ -1586,7 +1595,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, struct hns3_desc_cb *res_cb) { - hns3_map_buffer(ring, &ring->desc_cb[i]); + hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); } @@ -2460,9 +2469,8 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) (void)irq_set_affinity_hint( priv->tqp_vector[i].vector_irq, NULL); - devm_free_irq(&pdev->dev, - priv->tqp_vector[i].vector_irq, - &priv->tqp_vector[i]); + free_irq(priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); } priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; @@ -2489,16 +2497,16 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, if (ring_type == HNAE3_RING_TYPE_TX) { ring_data[q->tqp_index].ring = ring; + ring_data[q->tqp_index].queue_index = q->tqp_index; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; } else { ring_data[q->tqp_index + queue_num].ring = ring; + ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; ring->io_base = q->io_base; } hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); - ring_data[q->tqp_index].queue_index = q->tqp_index; - ring->tqp = q; ring->desc = NULL; ring->desc_cb = NULL; @@ -2688,8 +2696,12 @@ static int hns3_uninit_all_ring(struct hns3_nic_priv *priv) h->ae_algo->ops->reset_queue(h, i); hns3_fini_ring(priv->ring_data[i].ring); + devm_kfree(priv->dev, priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + devm_kfree(priv->dev, + priv->ring_data[i + h->kinfo.num_tqps].ring); } + devm_kfree(priv->dev, priv->ring_data); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index d636399232fb..a64a5a413d4d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -22,7 +22,8 @@ struct hns3_stats { #define HNS3_TQP_STAT(_string, _member) { \ .stats_string = _string, \ .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ - .stats_offset = offsetof(struct hns3_enet_ring, stats), \ + .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ + offsetof(struct ring_stats, _member), \ } \ static const struct hns3_stats hns3_txq_stats[] = { @@ -189,13 +190,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hns3_enet_ring *ring; u8 *stat; - u32 i; + int i, j; /* get stats for Tx */ for (i = 0; i < kinfo->num_tqps; i++) { ring = nic_priv->ring_data[i].ring; - for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; *data++ = *(u64 *)stat; } } @@ -203,8 +204,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) /* get stats for Rx */ for (i = 0; i < kinfo->num_tqps; i++) { ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; - for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; *data++ = *(u64 *)stat; } } @@ -375,6 +376,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev, break; } + if (!cmd->base.autoneg) + advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + /* now, map driver link modes to ethtool link modes */ hns3_driv_to_eth_caps(supported_caps, cmd, false); hns3_driv_to_eth_caps(advertised_caps, cmd, true); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c66abd476023..3ae02b0620bc 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -927,6 +927,7 @@ static int ibmvnic_open(struct net_device *netdev) } rc = __ibmvnic_open(netdev); + netif_carrier_on(netdev); mutex_unlock(&adapter->reset_lock); return rc; @@ -2208,6 +2209,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) struct ibmvnic_sub_crq_queue *scrq = instance; struct ibmvnic_adapter *adapter = scrq->adapter; + /* When booting a kdump kernel we can hit pending interrupts + * prior to completing driver initialization. + */ + if (unlikely(adapter->state != VNIC_OPEN)) + return IRQ_NONE; + adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { @@ -3899,6 +3906,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (rc) goto ibmvnic_init_fail; + netif_carrier_off(netdev); rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index d7bdea79e9fa..8fd2458060a0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -331,7 +331,8 @@ struct e1000_adapter { enum e1000_state_t { __E1000_TESTING, __E1000_RESETTING, - __E1000_DOWN + __E1000_DOWN, + __E1000_DISABLED }; #undef pr_fmt diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 1982f7917a8d..3dd4aeb2706d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter, static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; - struct e1000_adapter *adapter; + struct e1000_adapter *adapter = NULL; struct e1000_hw *hw; static int cards_found; @@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 tmp = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; int bars, need_ioport; + bool disable_dev = false; /* do not allocate ioport bars when not needed */ need_ioport = e1000_is_need_ioport(pdev); @@ -1259,11 +1260,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iounmap(hw->ce4100_gbe_mdio_base_virt); iounmap(hw->hw_addr); err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, bars); err_pci_reg: - pci_disable_device(pdev); + if (!adapter || disable_dev) + pci_disable_device(pdev); return err; } @@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + bool disable_dev; e1000_down_and_stop(adapter); e1000_release_manageability(adapter); @@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev) iounmap(hw->flash_address); pci_release_selected_regions(pdev, adapter->bars); + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); free_netdev(netdev); - pci_disable_device(pdev); + if (disable_dev) + pci_disable_device(pdev); } /** @@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) if (netif_running(netdev)) e1000_free_irq(adapter); - pci_disable_device(pdev); + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); return 0; } @@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev) pr_err("Cannot enable PCI device from suspend\n"); return err; } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); @@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) e1000_down(adapter); - pci_disable_device(pdev); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) pr_err("Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 0641c0098738..afb7ebe20b24 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -398,6 +398,7 @@ #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ /* If this bit asserted, the driver should claim the interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index d6d4ed7acf03..31277d3bb7dc 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1367,6 +1367,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { @@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 67163ca898ba..00a36df02a3f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -113,7 +113,8 @@ #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ -#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) +#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000 +#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index b322011ec282..f457c5703d0c 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { @@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 327dfe5bedc0..991c2a0dd67e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1910,14 +1910,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 icr; + bool enable = true; + + icr = er32(ICR); + if (icr & E1000_ICR_RXO) { + ew32(ICR, E1000_ICR_RXO); + enable = false; + /* napi poll will re-enable Other, make sure it runs */ + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + } + if (icr & E1000_ICR_LSC) { + ew32(ICR, E1000_ICR_LSC); + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } - hw->mac.get_link_status = true; - - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) { - mod_timer(&adapter->watchdog_timer, jiffies + 1); + if (enable && !test_bit(__E1000_DOWN, &adapter->state)) ew32(IMS, E1000_IMS_OTHER); - } return IRQ_HANDLED; } @@ -2687,7 +2703,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight) napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) - ew32(IMS, adapter->rx_ring->ims_val); + ew32(IMS, adapter->rx_ring->ims_val | + E1000_IMS_OTHER); else e1000_irq_enable(adapter); } @@ -3004,8 +3021,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) hw->mac.ops.config_collision_dist(hw); - /* SPT and CNP Si errata workaround to avoid data corruption */ - if (hw->mac.type >= e1000_pch_spt) { + /* SPT and KBL Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { u32 reg_val; reg_val = er32(IOSFPC); @@ -3013,7 +3030,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(IOSFPC, reg_val); reg_val = er32(TARC(0)); - reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + /* SPT and KBL Si errata workaround to avoid Tx hang. + * Dropping the number of outstanding requests from + * 3 to 2 in order to avoid a buffer overrun. + */ + reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; + reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; ew32(TARC(0), reg_val); } } @@ -4204,7 +4226,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) - ew32(ICS, E1000_ICS_OTHER); + ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER); else ew32(ICS, E1000_ICS_LSC); } @@ -5081,7 +5103,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; + link_active = ret_val > 0; } else { link_active = true; } @@ -5099,7 +5121,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) break; } - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ e_info("Gigabit has been disabled, downgrading speed\n"); diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index d78d47b41a71..86ff0969efb6 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, s32 ret_val = 0; u16 i, phy_status; + *success = false; for (i = 0; i < iterations; i++) { /* Some PHYs require the MII_BMSR register to be read * twice due to the link bit being sticky. No harm doing @@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - if (phy_status & BMSR_LSTATUS) + if (phy_status & BMSR_LSTATUS) { + *success = true; break; + } if (usec_interval >= 1000) msleep(usec_interval / 1000); else udelay(usec_interval); } - *success = (i < iterations); - return ret_val; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 689c413b7782..d2f9a2dd76a2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -526,8 +526,8 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid); int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac); int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, u8 qos, __be16 vlan_proto); -int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate, - int unused); +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, + int __always_unused min_rate, int max_rate); int fm10k_ndo_get_vf_config(struct net_device *netdev, int vf_idx, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 5f4dac0d36ef..e72fd52bacfe 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -126,6 +126,9 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface) struct fm10k_mbx_info *mbx = &vf_info->mbx; u16 glort = vf_info->glort; + /* process the SM mailbox first to drain outgoing messages */ + hw->mbx.ops.process(hw, &hw->mbx); + /* verify port mapping is valid, if not reset port */ if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) hw->iov.ops.reset_lport(hw, vf_info); @@ -482,7 +485,7 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, } int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, - int __always_unused unused, int rate) + int __always_unused min_rate, int max_rate) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; @@ -493,14 +496,15 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, return -EINVAL; /* rate limit cannot be less than 10Mbs or greater than link speed */ - if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) + if (max_rate && + (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX)) return -EINVAL; /* store values */ - iov_data->vf_info[vf_idx].rate = rate; + iov_data->vf_info[vf_idx].rate = max_rate; /* update hardware configuration */ - hw->iov.ops.configure_tc(hw, vf_idx, rate); + hw->iov.ops.configure_tc(hw, vf_idx, max_rate); return 0; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 9dffaba85ae6..103c0a742d03 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1229,7 +1229,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cb..b1cde1b051a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1553,11 +1553,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p) else netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + /* Copy the address first, so that we avoid a possible race with + * .set_rx_mode(). If we copy after changing the address in the filter + * list, we might open ourselves to a narrow race window where + * .set_rx_mode could delete our dev_addr filter and prevent traffic + * from passing. + */ + ether_addr_copy(netdev->dev_addr, addr->sa_data); + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); i40e_add_mac_filter(vsi, addr->sa_data); spin_unlock_bh(&vsi->mac_filter_hash_lock); - ether_addr_copy(netdev->dev_addr, addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; @@ -1739,6 +1746,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + i40e_del_mac_filter(vsi, addr); return 0; @@ -2874,14 +2889,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; + int cpu; if (!ring->q_vector || !ring->netdev) return; if ((vsi->tc_config.numtc <= 1) && !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { - netif_set_xps_queue(ring->netdev, - get_cpu_mask(ring->q_vector->v_idx), + cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); + netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index); } @@ -3471,6 +3487,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) int tx_int_idx = 0; int vector, err; int irq_num; + int cpu; for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; @@ -3506,10 +3523,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* get_cpu_mask returns a static constant mask with - * a permanent lifetime so it's ok to use here. + /* Spread affinity hints out across online CPUs. + * + * get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to pass to + * irq_set_affinity_hint without making a copy. */ - irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } vsi->irqs_ready = true; @@ -3760,7 +3781,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 120c68f78951..542c00b1c823 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ @@ -3048,10 +3048,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > I40E_MAX_DATA_PER_TXD) { + int align_pad = -(stale->page_offset) & + (I40E_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > I40E_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -3059,7 +3079,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4d1e670f490e..e368b0237a1b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1008,8 +1008,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); /* Do not notify the client during VF init */ - if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, - &vf->vf_states)) + if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, + &vf->vf_states)) i40e_notify_client_of_vf_reset(pf, abs_vf_id); vf->num_vlan = 0; } @@ -2779,6 +2779,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) struct i40e_mac_filter *f; struct i40e_vf *vf; int ret = 0; + struct hlist_node *h; int bkt; /* validate the request */ @@ -2817,7 +2818,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Delete all the filters for this VSI - we're going to kill it * anyway. */ - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index c32c62462c84..7368b0dc3af8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ @@ -2014,10 +2014,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > I40E_MAX_DATA_PER_TXD) { + int align_pad = -(stale->page_offset) & + (I40E_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > I40E_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -2025,7 +2045,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1825d956bb00..4eb6ff60e8fc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -546,6 +546,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; + int cpu; i40evf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ @@ -584,10 +585,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vector->affinity_notify.release = i40evf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* get_cpu_mask returns a static constant mask with - * a permanent lifetime so it's ok to use here. + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. */ - irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; @@ -1772,7 +1775,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - if (netif_running(adapter->netdev)) { + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (adapter->state == __I40EVF_RUNNING) { set_bit(__I40E_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); @@ -1830,6 +1837,7 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_mac_filter *f; u32 reg_val; int i = 0, err; + bool running; while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) @@ -1889,7 +1897,13 @@ static void i40evf_reset_task(struct work_struct *work) } continue_reset: - if (netif_running(netdev)) { + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = (adapter->state == __I40EVF_RUNNING); + + if (running) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -1933,7 +1947,10 @@ static void i40evf_reset_task(struct work_struct *work) mod_timer(&adapter->watchdog_timer, jiffies + 2); - if (netif_running(adapter->netdev)) { + /* We were running when the reset started, so we need to restore some + * state here. + */ + if (running) { /* allocate transmit descriptors */ err = i40evf_setup_all_tx_resources(adapter); if (err) diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 1de82f247312..83cabff1e0ab 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -353,7 +353,18 @@ #define E1000_RXPBS_CFG_TS_EN 0x80000000 #define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_RXPBSIZE_MASK 0x0000003F +#define I210_RXPBSIZE_PB_32KB 0x00000020 #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define I210_TXPBSIZE_MASK 0xC0FFFFFF +#define I210_TXPBSIZE_PB0_8KB (8 << 0) +#define I210_TXPBSIZE_PB1_8KB (8 << 6) +#define I210_TXPBSIZE_PB2_4KB (4 << 12) +#define I210_TXPBSIZE_PB3_4KB (4 << 18) + +#define I210_DTXMXPKTSZ_DEFAULT 0x00000098 + +#define I210_SR_QUEUES_NUM 2 /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 @@ -1051,4 +1062,16 @@ #define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) #define E1000_VLAPQF_QUEUE_MASK 0x03 +/* TX Qav Control fields */ +#define E1000_TQAVCTRL_XMIT_MODE BIT(0) +#define E1000_TQAVCTRL_DATAFETCHARB BIT(4) +#define E1000_TQAVCTRL_DATATRANARB BIT(8) + +/* TX Qav Credit Control fields */ +#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define E1000_TQAVCC_QUEUEMODE BIT(31) + +/* Transmit Descriptor Control fields */ +#define E1000_TXDCTL_PRIORITY BIT(27) + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 58adbf234e07..8eee081d395f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -421,6 +421,14 @@ do { \ #define E1000_I210_FLA 0x1201C +#define E1000_I210_DTXMXPKTSZ 0x355C + +#define E1000_I210_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + #define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) #define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 06ffb2bc713e..92845692087a 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -281,6 +281,11 @@ struct igb_ring { u16 count; /* number of desc. in the ring */ u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ /* everything past this point are written often */ u16 next_to_clean; @@ -621,6 +626,7 @@ struct igb_adapter { #define IGB_FLAG_EEE BIT(14) #define IGB_FLAG_VLAN_PROMISC BIT(15) #define IGB_FLAG_RX_LEGACY BIT(16) +#define IGB_FLAG_FQTSS BIT(17) /* Media Auto Sense */ #define IGB_MAS_ENABLE_0 0X0001 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea69af267d63..8b6eaa872791 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -62,6 +63,17 @@ #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY, + QUEUE_MODE_STREAM_RESERVATION, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH, + TX_QUEUE_PRIO_LOW, +}; + char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = @@ -1271,6 +1283,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, ring->count = adapter->tx_ring_count; ring->queue_index = txr_idx; + ring->cbs_enable = false; + ring->idleslope = 0; + ring->sendslope = 0; + ring->hicredit = 0; + ring->locredit = 0; + u64_stats_init(&ring->tx_syncp); u64_stats_init(&ring->tx_syncp2); @@ -1598,6 +1616,284 @@ static void igb_get_hw_control(struct igb_adapter *adapter) ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } +static void enable_fqtss(struct igb_adapter *adapter, bool enable) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(hw->mac.type != e1000_i210); + + if (enable) + adapter->flags |= IGB_FLAG_FQTSS; + else + adapter->flags &= ~IGB_FLAG_FQTSS; + + if (netif_running(netdev)) + schedule_work(&adapter->reset_task); +} + +static bool is_fqtss_enabled(struct igb_adapter *adapter) +{ + return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; +} + +static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, + enum tx_queue_prio prio) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 4); + + val = rd32(E1000_I210_TXDCTL(queue)); + + if (prio == TX_QUEUE_PRIO_HIGH) + val |= E1000_TXDCTL_PRIORITY; + else + val &= ~E1000_TXDCTL_PRIORITY; + + wr32(E1000_I210_TXDCTL(queue), val); +} + +static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + val = rd32(E1000_I210_TQAVCC(queue)); + + if (mode == QUEUE_MODE_STREAM_RESERVATION) + val |= E1000_TQAVCC_QUEUEMODE; + else + val &= ~E1000_TQAVCC_QUEUEMODE; + + wr32(E1000_I210_TQAVCC(queue), val); +} + +/** + * igb_configure_cbs - Configure Credit-Based Shaper (CBS) + * @adapter: pointer to adapter struct + * @queue: queue number + * @enable: true = enable CBS, false = disable CBS + * @idleslope: idleSlope in kbps + * @sendslope: sendSlope in kbps + * @hicredit: hiCredit in bytes + * @locredit: loCredit in bytes + * + * Configure CBS for a given hardware queue. When disabling, idleslope, + * sendslope, hicredit, locredit arguments are ignored. Returns 0 if + * success. Negative otherwise. + **/ +static void igb_configure_cbs(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tqavcc; + u16 value; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + if (enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + + /* According to i210 datasheet section 7.2.7.7, we should set + * the 'idleSlope' field from TQAVCC register following the + * equation: + * + * For 100 Mbps link speed: + * + * value = BW * 0x7735 * 0.2 (E1) + * + * For 1000Mbps link speed: + * + * value = BW * 0x7735 * 2 (E2) + * + * E1 and E2 can be merged into one equation as shown below. + * Note that 'link-speed' is in Mbps. + * + * value = BW * 0x7735 * 2 * link-speed + * -------------- (E3) + * 1000 + * + * 'BW' is the percentage bandwidth out of full link speed + * which can be found with the following equation. Note that + * idleSlope here is the parameter from this function which + * is in kbps. + * + * BW = idleSlope + * ----------------- (E4) + * link-speed * 1000 + * + * That said, we can come up with a generic equation to + * calculate the value we should set it TQAVCC register by + * replacing 'BW' in E3 by E4. The resulting equation is: + * + * value = idleSlope * 0x7735 * 2 * link-speed + * ----------------- -------------- (E5) + * link-speed * 1000 1000 + * + * 'link-speed' is present in both sides of the fraction so + * it is canceled out. The final equation is the following: + * + * value = idleSlope * 61034 + * ----------------- (E6) + * 1000000 + */ + value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000); + + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + tqavcc |= value; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + + /* Set idleSlope to zero. */ + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + /* Set hiCredit to zero. */ + wr32(E1000_I210_TQAVHC(queue), 0); + } + + /* XXX: In i210 controller the sendSlope and loCredit parameters from + * CBS are not configurable by software so we don't do any 'controller + * configuration' in respect to these parameters. + */ + + netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", + (enable) ? "enabled" : "disabled", queue, + idleslope, sendslope, hicredit, locredit); +} + +static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + struct igb_ring *ring; + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static void igb_setup_tx_mode(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 val; + + /* Only i210 controller supports changing the transmission mode. */ + if (hw->mac.type != e1000_i210) + return; + + if (is_fqtss_enabled(adapter)) { + int i, max_queue; + + /* Configure TQAVCTRL register: set transmit mode to 'Qav', + * set data fetch arbitration to 'round robin' and set data + * transfer arbitration to 'credit shaper algorithm. + */ + val = rd32(E1000_I210_TQAVCTRL); + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB; + val &= ~E1000_TQAVCTRL_DATAFETCHARB; + wr32(E1000_I210_TQAVCTRL, val); + + /* Configure Tx and Rx packet buffers sizes as described in + * i210 datasheet section 7.2.7.7. + */ + val = rd32(E1000_TXPBS); + val &= ~I210_TXPBSIZE_MASK; + val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB | + I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB; + wr32(E1000_TXPBS, val); + + val = rd32(E1000_RXPBS); + val &= ~I210_RXPBSIZE_MASK; + val |= I210_RXPBSIZE_PB_32KB; + wr32(E1000_RXPBS, val); + + /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ + * register should not exceed the buffer size programmed in + * TXPBS. The smallest buffer size programmed in TXPBS is 4kB + * so according to the datasheet we should set MAX_TPKT_SIZE to + * 4kB / 64. + * + * However, when we do so, no frame from queue 2 and 3 are + * transmitted. It seems the MAX_TPKT_SIZE should not be great + * or _equal_ to the buffer size programmed in TXPBS. For this + * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. + */ + val = (4096 - 1) / 64; + wr32(E1000_I210_DTXMXPKTSZ, val); + + /* Since FQTSS mode is enabled, apply any CBS configuration + * previously set. If no previous CBS configuration has been + * done, then the initial configuration is applied, which means + * CBS is disabled. + */ + max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? + adapter->num_tx_queues : I210_SR_QUEUES_NUM; + + for (i = 0; i < max_queue; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + + igb_configure_cbs(adapter, i, ring->cbs_enable, + ring->idleslope, ring->sendslope, + ring->hicredit, ring->locredit); + } + } else { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); + + val = rd32(E1000_I210_TQAVCTRL); + /* According to Section 8.12.21, the other flags we've set when + * enabling FQTSS are not relevant when disabling FQTSS so we + * don't set they here. + */ + val &= ~E1000_TQAVCTRL_XMIT_MODE; + wr32(E1000_I210_TQAVCTRL, val); + } + + netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? + "enabled" : "disabled"); +} + /** * igb_configure - configure the hardware for RX and TX * @adapter: private board structure @@ -1609,6 +1905,7 @@ static void igb_configure(struct igb_adapter *adapter) igb_get_hw_control(adapter); igb_set_rx_mode(netdev); + igb_setup_tx_mode(adapter); igb_restore_vlan(adapter); @@ -2150,6 +2447,55 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int igb_offload_cbs(struct igb_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* CBS offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* CBS offloading is only supported by queue 0 and queue 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + if (is_fqtss_enabled(adapter)) { + igb_configure_cbs(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + + if (!is_any_cbs_enabled(adapter)) + enable_fqtss(adapter, false); + + } else { + enable_fqtss(adapter, true); + } + + return 0; +} + +static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_CBS: + return igb_offload_cbs(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2175,6 +2521,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_features = igb_set_features, .ndo_fdb_add = igb_ndo_fdb_add, .ndo_features_check = igb_features_check, + .ndo_setup_tc = igb_setup_tc, }; /** @@ -3162,6 +3509,8 @@ static int igb_sw_init(struct igb_adapter *adapter) /* Setup and initialize a copy of the hw vlan table array */ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), GFP_ATOMIC); + if (!adapter->shadow_vfta) + return -ENOMEM; /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter, true)) { @@ -3329,7 +3678,7 @@ static int __igb_close(struct net_device *netdev, bool suspending) int igb_close(struct net_device *netdev) { - if (netif_device_present(netdev)) + if (netif_device_present(netdev) || netdev->dismantle) return __igb_close(netdev, false); return 0; } @@ -6970,7 +7319,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1ed556911b14..6f5888bd9194 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 6e6ab6f6875e..64429a14c630 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3781,10 +3781,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); fw_cmd.pad = 0; fw_cmd.pad2 = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, &fw_cmd, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6d5f31e94358..29f600fd6977 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) @@ -1877,6 +1877,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 19fbb2f28ea4..8a85217845ae 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -900,6 +900,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, /* convert offset from words to bytes */ buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); + buffer.pad2 = 0; + buffer.pad3 = 0; status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357..90ecc4b06462 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index c9798210fa0f..0495487f7b42 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev) dev->regs + MVMDIO_ERR_INT_MASK); } else if (dev->err_interrupt == -EPROBE_DEFER) { - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto out_mdio; } if (pdev->dev.of_node) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 64a04975bcf8..a539263cd79c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, { u32 val; - /* Only 255 descriptors can be added at once ; Assume caller - * process TX desriptors in quanta less than 256 - */ - val = pend_desc + txq->pending; - mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc += txq->pending; + + /* Only 255 Tx descriptors can be added at once */ + do { + val = min(pend_desc, 255); + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc -= val; + } while (pend_desc > 0); txq->pending = 0; } @@ -1211,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp) val &= ~MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); + pp->link = 0; + pp->duplex = -1; + pp->speed = 0; + udelay(200); } @@ -1955,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { + mvneta_rx_error(pp, rx_desc); err_drop_frame: dev->stats.rx_errors++; - mvneta_rx_error(pp, rx_desc); /* leave the descriptor untouched */ continue; } @@ -3008,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp) { int queue; - for (queue = 0; queue < txq_number; queue++) + for (queue = 0; queue < rxq_number; queue++) mvneta_rxq_deinit(pp, &pp->rxqs[queue]); } diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index fcf9ba5eb8d1..529be74f609d 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4552,11 +4552,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val |= MVPP2_GMAC_DISABLE_PADDING; - val &= ~MVPP2_GMAC_FLOW_CTRL_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | @@ -4564,10 +4559,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; val &= ~MVPP22_CTRL4_DP_CLK_SEL; writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val &= ~MVPP2_GMAC_DISABLE_PADDING; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); } /* The port is connected to a copper PHY */ @@ -5408,7 +5399,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, u32 txq_dma; /* Allocate memory for TX descriptors */ - aggr_txq->descs = dma_alloc_coherent(&pdev->dev, + aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, &aggr_txq->descs_dma, GFP_KERNEL); if (!aggr_txq->descs) @@ -5606,7 +5597,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, sizeof(*txq_pcpu->buffs), GFP_KERNEL); if (!txq_pcpu->buffs) - goto cleanup; + return -ENOMEM; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -5619,26 +5610,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port, &txq_pcpu->tso_headers_dma, GFP_KERNEL); if (!txq_pcpu->tso_headers) - goto cleanup; + return -ENOMEM; } return 0; -cleanup: - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - kfree(txq_pcpu->buffs); - - dma_free_coherent(port->dev->dev.parent, - txq_pcpu->size * TSO_HEADER_SIZE, - txq_pcpu->tso_headers, - txq_pcpu->tso_headers_dma); - } - - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_dma); - - return -ENOMEM; } /* Free allocated TXQ resources */ @@ -6913,6 +6888,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev) int id = port->id; bool allmulti = dev->flags & IFF_ALLMULTI; +retry: mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); @@ -6920,9 +6896,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev) /* Remove all port->id's mcast enries */ mvpp2_prs_mcast_del_all(priv, id); - if (allmulti && !netdev_mc_empty(dev)) { - netdev_for_each_mc_addr(ha, dev) - mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); + if (!allmulti) { + netdev_for_each_mc_addr(ha, dev) { + if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { + allmulti = true; + goto retry; + } + } } } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 1145cde2274a..b12e3a4f9439 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 150; + pdev->d3_delay = 200; return 0; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 5e81a7263654..3fd71cf5cd60 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1959,11 +1959,12 @@ static int mtk_hw_init(struct mtk_eth *eth) /* set GE2 TUNE */ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); - /* GE1, Force 1000M/FD, FC ON */ - mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); - - /* GE2, Force 1000M/FD, FC ON */ - mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); + /* Set linkdown as the default for each GMAC. Its own MCR would be set + * up with the more appropriate value when mtk_phy_link_adjust call is + * being invoked. + */ + for (i = 0; i < MTK_MAC_COUNT; i++) + mtk_w32(eth, 0, MTK_MAC_MCR(i)); /* Indicates CDM to parse the MTK special tag from CPU * which also is working out for untag packets. diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 5f41dc92aa68..752a72499b4f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) { struct mlx4_en_priv *priv = netdev_priv(netdev); + struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_dev *mdev = priv->mdev; + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 1; if (priv->cee_config.pfc_state) { int tc; + rx_ppp = prof->rx_ppp; + tx_ppp = prof->tx_ppp; - priv->prof->rx_pause = 0; - priv->prof->tx_pause = 0; for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { u8 tc_mask = 1 << tc; switch (priv->cee_config.dcb_pfc[tc]) { case pfc_disabled: - priv->prof->tx_ppp &= ~tc_mask; - priv->prof->rx_ppp &= ~tc_mask; + tx_ppp &= ~tc_mask; + rx_ppp &= ~tc_mask; break; case pfc_enabled_full: - priv->prof->tx_ppp |= tc_mask; - priv->prof->rx_ppp |= tc_mask; + tx_ppp |= tc_mask; + rx_ppp |= tc_mask; break; case pfc_enabled_tx: - priv->prof->tx_ppp |= tc_mask; - priv->prof->rx_ppp &= ~tc_mask; + tx_ppp |= tc_mask; + rx_ppp &= ~tc_mask; break; case pfc_enabled_rx: - priv->prof->tx_ppp &= ~tc_mask; - priv->prof->rx_ppp |= tc_mask; + tx_ppp &= ~tc_mask; + rx_ppp |= tc_mask; break; default: break; } } - en_dbg(DRV, priv, "Set pfc on\n"); + rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause; + tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause; } else { - priv->prof->rx_pause = 1; - priv->prof->tx_pause = 1; - en_dbg(DRV, priv, "Set pfc off\n"); + rx_ppp = 0; + tx_ppp = 0; + rx_pause = prof->rx_pause; + tx_pause = prof->tx_pause; } if (mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - priv->prof->tx_pause, - priv->prof->tx_ppp, - priv->prof->rx_pause, - priv->prof->rx_ppp)) { + tx_pause, tx_ppp, rx_pause, rx_ppp)) { en_err(priv, "Failed setting pause params\n"); return 1; } + prof->tx_ppp = tx_ppp; + prof->rx_ppp = rx_ppp; + prof->tx_pause = tx_pause; + prof->rx_pause = rx_pause; + return 0; } @@ -310,6 +316,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) } switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: case IEEE_8021QAZ_TSA_STRICT: break; case IEEE_8021QAZ_TSA_ETS: @@ -347,6 +354,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, /* higher TC means higher priority => lower pg */ for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + pg[i] = MLX4_EN_TC_VENDOR; + tc_tx_bw[i] = MLX4_EN_BW_MAX; + break; case IEEE_8021QAZ_TSA_STRICT: pg[i] = num_strict++; tc_tx_bw[i] = MLX4_EN_BW_MAX; @@ -403,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_dev *mdev = priv->mdev; + u32 tx_pause, tx_ppp, rx_pause, rx_ppp; int err; en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", @@ -411,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, pfc->mbc, pfc->delay); - prof->rx_pause = !pfc->pfc_en; - prof->tx_pause = !pfc->pfc_en; - prof->rx_ppp = pfc->pfc_en; - prof->tx_ppp = pfc->pfc_en; + rx_pause = prof->rx_pause && !pfc->pfc_en; + tx_pause = prof->tx_pause && !pfc->pfc_en; + rx_ppp = pfc->pfc_en; + tx_ppp = pfc->pfc_en; err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - prof->tx_pause, - prof->tx_ppp, - prof->rx_pause, - prof->rx_ppp); - if (err) + tx_pause, tx_ppp, rx_pause, rx_ppp); + if (err) { en_err(priv, "Failed setting pause params\n"); - else - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, - prof->rx_ppp, prof->rx_pause, - prof->tx_ppp, prof->tx_pause); + return err; + } + + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + rx_ppp, rx_pause, tx_ppp, tx_pause); + + prof->tx_ppp = tx_ppp; + prof->rx_ppp = rx_ppp; + prof->rx_pause = rx_pause; + prof->tx_pause = tx_pause; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3d4e4a5d00d1..67f74fcb265e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1046,27 +1046,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; int err; if (pause->autoneg) return -EINVAL; - priv->prof->tx_pause = pause->tx_pause != 0; - priv->prof->rx_pause = pause->rx_pause != 0; + tx_pause = !!(pause->tx_pause); + rx_pause = !!(pause->rx_pause); + rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); + tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); + err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - priv->prof->tx_pause, - priv->prof->tx_ppp, - priv->prof->rx_pause, - priv->prof->rx_ppp); - if (err) - en_err(priv, "Failed setting pause params\n"); - else - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, - priv->prof->rx_ppp, - priv->prof->rx_pause, - priv->prof->tx_ppp, - priv->prof->tx_pause); + tx_pause, tx_ppp, rx_pause, rx_ppp); + if (err) { + en_err(priv, "Failed setting pause params, err = %d\n", err); + return err; + } + + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + rx_ppp, rx_pause, tx_ppp, tx_pause); + + priv->prof->tx_pause = tx_pause; + priv->prof->rx_pause = rx_pause; + priv->prof->tx_ppp = tx_ppp; + priv->prof->rx_ppp = rx_ppp; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 686e18de9a97..6b2f7122b3ab 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -163,9 +163,9 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->udp_rss = 0; } for (i = 1; i <= MLX4_MAX_PORTS; i++) { - params->prof[i].rx_pause = 1; + params->prof[i].rx_pause = !(pfcrx || pfctx); params->prof[i].rx_ppp = pfcrx; - params->prof[i].tx_pause = 1; + params->prof[i].tx_pause = !(pfcrx || pfctx); params->prof[i].tx_ppp = pfctx; params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 9c218f1cfc6c..c097eef41a9c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3335,6 +3335,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + u8 prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) { + priv->ets.prio_tc[prio] = prio; + priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR; + } + priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; priv->flags |= MLX4_EN_DCB_ENABLED; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index fdb3ad0cbe54..2c1a5ff6acfa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -476,6 +476,7 @@ struct mlx4_en_frag_info { #define MLX4_EN_BW_MIN 1 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ +#define MLX4_EN_TC_VENDOR 0 #define MLX4_EN_TC_ETS 7 enum dcb_pfc_type { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 728a2fb1f5c0..22a3bfe1ed8f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) u64 in_param = 0; int err; + if (!cnt) + return; + if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, base_qpn); set_param_h(&in_param, cnt); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index fabb53379727..a069fcc823c3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -5089,6 +5089,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) &tracker->res_tree[RES_FS_RULE]); list_del(&fs_rule->com.list); spin_unlock_irq(mlx4_tlock(dev)); + kfree(fs_rule->mirr_mbox); kfree(fs_rule); state = 0; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 1fffdebbc9e8..e9a1fbcc4adf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: - case MLX5_CMD_OP_SET_RATE_LIMIT: + case MLX5_CMD_OP_SET_PP_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: @@ -505,7 +505,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); - MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); + MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 13b5ef9d8703..5fa071620104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -590,6 +590,7 @@ struct mlx5e_channel { struct mlx5_core_dev *mdev; struct mlx5e_tstamp *tstamp; int ix; + int cpu; }; struct mlx5e_channels { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 51c4cc00a186..9d64d0759ee9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -259,6 +259,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, struct ieee_ets *ets) { + bool have_ets_tc = false; int bw_sum = 0; int i; @@ -273,11 +274,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, } /* Validate Bandwidth Sum */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { + have_ets_tc = true; bw_sum += ets->tc_tx_bw[i]; + } + } - if (bw_sum != 0 && bw_sum != 100) { + if (have_ets_tc && bw_sum != 100) { netdev_err(netdev, "Failed to validate ETS: BW sum is illegal\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cc11bbbd0309..225b2ad3e15f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -71,11 +71,6 @@ struct mlx5e_channel_param { struct mlx5e_cq_param icosq_cq; }; -static int mlx5e_get_node(struct mlx5e_priv *priv, int ix) -{ - return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix); -} - static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { return MLX5_CAP_GEN(mdev, striding_rq) && @@ -452,17 +447,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int wq_sz = mlx5_wq_ll_get_size(&rq->wq); int mtt_sz = mlx5e_get_wqe_mtt_sz(); int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; - int node = mlx5e_get_node(c->priv, c->ix); int i; rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), - GFP_KERNEL, node); + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) goto err_out; /* We allocate more than mtt_sz as we will align the pointer */ - rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, - GFP_KERNEL, node); + rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, + cpu_to_node(c->cpu)); if (unlikely(!rq->mpwqe.mtt_no_align)) goto err_free_wqe_info; @@ -570,7 +564,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, int err; int i; - rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + rqp->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); @@ -636,8 +630,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, default: /* MLX5_WQ_TYPE_LINKED_LIST */ rq->wqe.frag_info = kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), - GFP_KERNEL, - mlx5e_get_node(c->priv, c->ix)); + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->wqe.frag_info) { err = -ENOMEM; goto err_rq_wq_destroy; @@ -1007,13 +1000,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1060,13 +1053,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1132,13 +1125,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1510,8 +1503,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->priv->mdev; int err; - param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix); - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.buf_numa_node = cpu_to_node(c->cpu); + param->wq.db_numa_node = cpu_to_node(c->cpu); param->eq_ix = c->ix; err = mlx5e_alloc_cq_common(mdev, param, cq); @@ -1610,6 +1603,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq) mlx5e_free_cq(cq); } +static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) +{ + return cpumask_first(priv->mdev->priv.irq_info[ix].mask); +} + static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -1758,12 +1756,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, { struct mlx5e_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; unsigned int irq; int err; int eqn; - c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); + c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1771,6 +1770,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mdev = priv->mdev; c->tstamp = &priv->tstamp; c->ix = ix; + c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); @@ -1859,8 +1859,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_rq(&c->rq); - netif_set_xps_queue(c->netdev, - mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix); + netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix); } static void mlx5e_deactivate_channel(struct mlx5e_channel *c) @@ -1919,13 +1918,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, param->wq.linear = 1; } -static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) +static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, + struct mlx5e_rq_param *param) { void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); } static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, @@ -2716,6 +2718,9 @@ int mlx5e_open(struct net_device *netdev) mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); + if (mlx5e_vxlan_allowed(priv->mdev)) + udp_tunnel_get_rx_info(netdev); + return err; } @@ -2779,6 +2784,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); + param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev); + return mlx5e_alloc_cq_common(mdev, param, cq); } @@ -2790,7 +2798,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, struct mlx5e_cq *cq = &drop_rq->cq; int err; - mlx5e_build_drop_rq_param(&rq_param); + mlx5e_build_drop_rq_param(mdev, &rq_param); err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); if (err) @@ -3554,6 +3562,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, struct sk_buff *skb, netdev_features_t features) { + unsigned int offset = 0; struct udphdr *udph; u8 proto; u16 port; @@ -3563,7 +3572,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, proto = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): - proto = ipv6_hdr(skb)->nexthdr; + proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL); break; default: goto out; @@ -4270,13 +4279,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) if (netdev->reg_state != NETREG_REGISTERED) return; - /* Device already registered: sync netdev system state */ - if (mlx5e_vxlan_allowed(mdev)) { - rtnl_lock(); - udp_tunnel_get_rx_info(netdev); - rtnl_unlock(); - } - queue_work(priv->wq, &priv->set_rx_mode_work); rtnl_lock(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 45e03c427faf..5ffd1db4e797 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -43,6 +43,11 @@ #include "en_tc.h" #include "fs_core.h" +#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) +#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \ + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE) + static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; static void mlx5e_rep_get_drvinfo(struct net_device *dev, @@ -230,7 +235,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) { #if IS_ENABLED(CONFIG_IPV6) - unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms, + unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME); #else unsigned long ipv6_interval = ~0UL; @@ -366,7 +371,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, case NETEVENT_NEIGH_UPDATE: n = ptr; #if IS_ENABLED(CONFIG_IPV6) - if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) + if (n->tbl != &nd_tbl && n->tbl != &arp_tbl) #else if (n->tbl != &arp_tbl) #endif @@ -414,7 +419,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, * done per device delay prob time parameter. */ #if IS_ENABLED(CONFIG_IPV6) - if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) + if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl)) #else if (!p->dev || p->tbl != &arp_tbl) #endif @@ -610,7 +615,6 @@ static int mlx5e_rep_open(struct net_device *dev) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int err; mutex_lock(&priv->state_lock); @@ -618,8 +622,9 @@ static int mlx5e_rep_open(struct net_device *dev) if (err) goto unlock; - if (!mlx5_eswitch_set_vport_state(esw, rep->vport, - MLX5_ESW_VPORT_ADMIN_STATE_UP)) + if (!mlx5_modify_vport_admin_state(priv->mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); unlock: @@ -632,11 +637,12 @@ static int mlx5e_rep_close(struct net_device *dev) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int ret; mutex_lock(&priv->state_lock); - (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + mlx5_modify_vport_admin_state(priv->mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; @@ -797,9 +803,9 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; - params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + params->log_rq_size = MLX5E_REP_PARAMS_LOG_RQ_SIZE; params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 91b1b0938931..3476f594c195 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) return true; } +static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) +{ + u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); + u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || + (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); + + tcp->check = 0; + tcp->psh = get_cqe_lro_tcppsh(cqe); + + if (tcp_ack) { + tcp->ack = 1; + tcp->ack_seq = cqe->lro_ack_seq_num; + tcp->window = cqe->lro_tcp_win; + } +} + static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); struct tcphdr *tcp; int network_depth = 0; + __wsum check; __be16 proto; u16 tot_len; void *ip_p; - u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); - u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || - (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); - skb->mac_len = ETH_HLEN; proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); @@ -577,23 +591,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, ipv4->check = 0; ipv4->check = ip_fast_csum((unsigned char *)ipv4, ipv4->ihl); + + mlx5e_lro_update_tcp_hdr(cqe, tcp); + check = csum_partial(tcp, tcp->doff * 4, + csum_unfold((__force __sum16)cqe->check_sum)); + /* Almost done, don't forget the pseudo header */ + tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, + tot_len - sizeof(struct iphdr), + IPPROTO_TCP, check); } else { + u16 payload_len = tot_len - sizeof(struct ipv6hdr); struct ipv6hdr *ipv6 = ip_p; tcp = ip_p + sizeof(struct ipv6hdr); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; ipv6->hop_limit = cqe->lro_min_ttl; - ipv6->payload_len = cpu_to_be16(tot_len - - sizeof(struct ipv6hdr)); - } - - tcp->psh = get_cqe_lro_tcppsh(cqe); - - if (tcp_ack) { - tcp->ack = 1; - tcp->ack_seq = cqe->lro_ack_seq_num; - tcp->window = cqe->lro_tcp_win; + ipv6->payload_len = cpu_to_be16(payload_len); + + mlx5e_lro_update_tcp_hdr(cqe, tcp); + check = csum_partial(tcp, tcp->doff * 4, + csum_unfold((__force __sum16)cqe->check_sum)); + /* Almost done, don't forget the pseudo header */ + tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, + IPPROTO_TCP, check); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index acf32fe952cd..3d3b1f97dc27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -197,9 +197,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_WORSE; + if (!prev->ppms) + return curr->ppms ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_WORSE; + if (!prev->epms) + return MLX5E_AM_STATS_SAME; if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 1f1f8af87d4d..707976482c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, if (iph->protocol != IPPROTO_UDP) goto out; - udph = udp_hdr(skb); + /* Don't assume skb_transport_header() was set */ + udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl); if (udph->dest != htons(9)) goto out; @@ -238,15 +239,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, int err = 0; /* Temporarily enable local_lb */ - if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { - mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); - if (!lbtp->local_lb) - mlx5_nic_vport_update_local_lb(priv->mdev, true); + err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); + if (err) + return err; + + if (!lbtp->local_lb) { + err = mlx5_nic_vport_update_local_lb(priv->mdev, true); + if (err) + return err; } err = mlx5e_refresh_tirs(priv, true); if (err) - return err; + goto out; lbtp->loopback_ok = false; init_completion(&lbtp->comp); @@ -256,16 +261,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, lbtp->pt.dev = priv->netdev; lbtp->pt.af_packet_priv = lbtp; dev_add_pack(&lbtp->pt); + + return 0; + +out: + if (!lbtp->local_lb) + mlx5_nic_vport_update_local_lb(priv->mdev, false); + return err; } static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, struct mlx5e_lbt_priv *lbtp) { - if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { - if (!lbtp->local_lb) - mlx5_nic_vport_update_local_lb(priv->mdev, false); - } + if (!lbtp->local_lb) + mlx5_nic_vport_update_local_lb(priv->mdev, false); dev_remove_pack(&lbtp->pt); mlx5e_refresh_tirs(priv, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9ba1f72060aa..42bab73a9f40 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -484,7 +484,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) tbl = &arp_tbl; #if IS_ENABLED(CONFIG_IPV6) else if (m_neigh->family == AF_INET6) - tbl = ipv6_stub->nd_tbl; + tbl = &nd_tbl; #endif else return; @@ -2091,19 +2091,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, if (err != -EAGAIN) flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || + !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) + kvfree(parse_attr); + err = rhashtable_insert_fast(&tc->ht, &flow->node, tc->ht_params); - if (err) - goto err_del_rule; + if (err) { + mlx5e_tc_del_flow(priv, flow); + kfree(flow); + } - if (flow->flags & MLX5E_TC_FLOW_ESWITCH && - !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) - kvfree(parse_attr); return err; -err_del_rule: - mlx5e_tc_del_flow(priv, flow); - err_free: kvfree(parse_attr); kfree(flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1d6925d4369a..eea7f931cad3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -155,7 +155,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, default: hlen = mlx5e_skb_l2_header_offset(skb); } - return min_t(u16, hlen, skb->len); + return min_t(u16, hlen, skb_headlen(skb)); } static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index fc606bfd1d6e..eb91de86202b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -776,7 +776,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) return err; } -int mlx5_stop_eqs(struct mlx5_core_dev *dev) +void mlx5_stop_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; int err; @@ -785,22 +785,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, pg)) { err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); if (err) - return err; + mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n", + err); } #endif err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); if (err) - return err; + mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", + err); - mlx5_destroy_unmap_eq(dev, &table->async_eq); + err = mlx5_destroy_unmap_eq(dev, &table->async_eq); + if (err) + mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", + err); mlx5_cmd_use_polling(dev); err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); if (err) - mlx5_cmd_use_events(dev); - - return err; + mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", + err); } int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c index 3c11d6e2160a..14962969c5ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c @@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size, u8 actual_size; int err; + if (!size) + return -EINVAL; + if (!fdev->mdev) return -ENOTCONN; @@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size, u8 actual_size; int err; + if (!size) + return -EINVAL; + if (!fdev->mdev) return -ENOTCONN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 06562c9a6b9c..4ddd632d10f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -316,9 +316,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; - struct irq_affinity irqdesc = { - .pre_vectors = MLX5_EQ_VEC_COMP_BASE, - }; int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int nvec; @@ -332,10 +329,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) if (!priv->irq_info) goto err_free_msix; - nvec = pci_alloc_irq_vectors_affinity(dev->pdev, + nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1, nvec, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, - &irqdesc); + PCI_IRQ_MSIX); if (nvec < 0) return nvec; @@ -581,8 +577,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev) int ret = 0; /* Disable local_lb by default */ - if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && - MLX5_CAP_GEN(dev, disable_local_lb)) + if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) ret = mlx5_nic_vport_update_local_lb(dev, false); return ret; @@ -621,6 +616,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) return (u64)timer_l | (u64)timer_h1 << 32; } +static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + + if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); + return -ENOMEM; + } + + cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), + priv->irq_info[i].mask); + + if (IS_ENABLED(CONFIG_SMP) && + irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); + + return 0; +} + +static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + + irq_set_affinity_hint(irq, NULL); + free_cpumask_var(priv->irq_info[i].mask); +} + +static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) +{ + int err; + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { + err = mlx5_irq_set_affinity_hint(mdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + mlx5_irq_clear_affinity_hint(mdev, i); + + return err; +} + +static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) +{ + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) + mlx5_irq_clear_affinity_hint(mdev, i); +} + int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn) { @@ -1093,6 +1145,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_stop_eqs; } + err = mlx5_irq_set_affinity_hints(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); + goto err_affinity_hints; + } + err = mlx5_init_fs(dev); if (err) { dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1150,6 +1208,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_cleanup_fs(dev); err_fs: + mlx5_irq_clear_affinity_hints(dev); + +err_affinity_hints: free_comp_eqs(dev); err_stop_eqs: @@ -1218,6 +1279,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); + mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_put_uars_page(dev, priv->uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index db9e665ab104..889130edb715 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, err_cmd: memset(din, 0, sizeof(din)); memset(dout, 0, sizeof(dout)); - MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); - MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, din, qpn, qp->qpn); mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index e651e4c02867..d3c33e9eea72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, return ret_entry; } -static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, +static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, u32 rate, u16 index) { - u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0}; - MLX5_SET(set_rate_limit_in, in, opcode, - MLX5_CMD_OP_SET_RATE_LIMIT); - MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); - MLX5_SET(set_rate_limit_in, in, rate_limit, rate); + MLX5_SET(set_pp_rate_limit_in, in, opcode, + MLX5_CMD_OP_SET_PP_RATE_LIMIT); + MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index); + MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } @@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) entry->refcount++; } else { /* new rate limit */ - err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); + err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index); if (err) { mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", rate, err); @@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate) entry->refcount--; if (!entry->refcount) { /* need to remove rate */ - mlx5_set_rate_limit_cmd(dev, 0, entry->index); + mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index); entry->rate = 0; } @@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) /* Clear all configured rates */ for (i = 0; i < table->max_size; i++) if (table->rl_entry[i].rate) - mlx5_set_rate_limit_cmd(dev, 0, - table->rl_entry[i].index); + mlx5_set_pp_rate_limit_cmd(dev, 0, + table->rl_entry[i].index); kfree(dev->priv.rl_table.rl_entry); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index d653b0025b13..71153c0f1605 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -36,6 +36,9 @@ #include #include "mlx5_core.h" +/* Mutex to hold while enabling or disabling RoCE */ +static DEFINE_MUTEX(mlx5_roce_en_lock); + static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u32 *out, int outlen) { @@ -908,23 +911,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable) void *in; int err; - mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable"); + if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) && + !MLX5_CAP_GEN(mdev, disable_local_lb_uc)) + return 0; + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; - MLX5_SET(modify_nic_vport_context_in, in, - field_select.disable_mc_local_lb, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.disable_mc_local_lb, !enable); - - MLX5_SET(modify_nic_vport_context_in, in, - field_select.disable_uc_local_lb, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.disable_uc_local_lb, !enable); + if (MLX5_CAP_GEN(mdev, disable_local_lb_mc)) + MLX5_SET(modify_nic_vport_context_in, in, + field_select.disable_mc_local_lb, 1); + + if (MLX5_CAP_GEN(mdev, disable_local_lb_uc)) + MLX5_SET(modify_nic_vport_context_in, in, + field_select.disable_uc_local_lb, 1); + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + if (!err) + mlx5_core_dbg(mdev, "%s local_lb\n", + enable ? "enable" : "disable"); + kvfree(in); return err; } @@ -988,17 +1001,35 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev, int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev) { - if (atomic_inc_return(&mdev->roce.roce_en) != 1) - return 0; - return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED); + int err = 0; + + mutex_lock(&mlx5_roce_en_lock); + if (!mdev->roce.roce_en) + err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED); + + if (!err) + mdev->roce.roce_en++; + mutex_unlock(&mlx5_roce_en_lock); + + return err; } EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce); int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev) { - if (atomic_dec_return(&mdev->roce.roce_en) != 0) - return 0; - return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED); + int err = 0; + + mutex_lock(&mlx5_roce_en_lock); + if (mdev->roce.roce_en) { + mdev->roce.roce_en--; + if (mdev->roce.roce_en == 0) + err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED); + + if (err) + mdev->roce.roce_en++; + } + mutex_unlock(&mlx5_roce_en_lock); + return err; } EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 07a9ba6cfc70..2f74953e4561 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; - spin_lock(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); vxlan = radix_tree_lookup(&vxlan_db->tree, port); - spin_unlock(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); return vxlan; } @@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) struct mlx5e_vxlan *vxlan; int err; - if (mlx5e_vxlan_lookup_port(priv, port)) + mutex_lock(&priv->state_lock); + vxlan = mlx5e_vxlan_lookup_port(priv, port); + if (vxlan) { + atomic_inc(&vxlan->refcount); goto free_work; + } if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) goto free_work; @@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) goto err_delete_port; vxlan->udp_port = port; + atomic_set(&vxlan->refcount, 1); - spin_lock_irq(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); - spin_unlock_irq(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); if (err) goto err_free; @@ -113,35 +118,39 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) err_delete_port: mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); free_work: + mutex_unlock(&priv->state_lock); kfree(vxlan_work); } -static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) +static void mlx5e_vxlan_del_port(struct work_struct *work) { + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + u16 port = vxlan_work->port; struct mlx5e_vxlan *vxlan; + bool remove = false; - spin_lock_irq(&vxlan_db->lock); - vxlan = radix_tree_delete(&vxlan_db->tree, port); - spin_unlock_irq(&vxlan_db->lock); - + mutex_lock(&priv->state_lock); + spin_lock_bh(&vxlan_db->lock); + vxlan = radix_tree_lookup(&vxlan_db->tree, port); if (!vxlan) - return; - - mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port); - - kfree(vxlan); -} + goto out_unlock; -static void mlx5e_vxlan_del_port(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - u16 port = vxlan_work->port; + if (atomic_dec_and_test(&vxlan->refcount)) { + radix_tree_delete(&vxlan_db->tree, port); + remove = true; + } - __mlx5e_vxlan_core_del_port(priv, port); +out_unlock: + spin_unlock_bh(&vxlan_db->lock); + if (remove) { + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + kfree(vxlan); + } + mutex_unlock(&priv->state_lock); kfree(vxlan_work); } @@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) struct mlx5e_vxlan *vxlan; unsigned int port = 0; - spin_lock_irq(&vxlan_db->lock); + /* Lockless since we are the only radix-tree consumers, wq is disabled */ while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { port = vxlan->udp_port; - spin_unlock_irq(&vxlan_db->lock); - __mlx5e_vxlan_core_del_port(priv, (u16)port); - spin_lock_irq(&vxlan_db->lock); + radix_tree_delete(&vxlan_db->tree, port); + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + kfree(vxlan); } - spin_unlock_irq(&vxlan_db->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 5def12c048e3..5ef6ae7d568a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -36,6 +36,7 @@ #include "en.h" struct mlx5e_vxlan { + atomic_t refcount; u16 udp_port; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 23f7d828cf67..6ef20e5cc77d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, return 0; } - wmb(); /* reset needs to be written before we read control register */ + /* Reset needs to be written before we read control register, and + * we must wait for the HW to become responsive once again + */ + wmb(); + msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); + end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); do { u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index a6441208e9d9..fb082ad21b00 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -59,6 +59,7 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 +#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF #define MLXSW_PCI_FW_READY_MAGIC 0x5E diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 696b99e65a5a..99bd6e88ebc7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2974,6 +2974,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", mlxsw_sp_port->local_port); + err = PTR_ERR(mlxsw_sp_port_vlan); goto err_port_vlan_get; } @@ -4163,6 +4164,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) { + u16 vid = 1; int err; err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); @@ -4175,8 +4177,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) true, false); if (err) goto err_port_vlan_set; + + for (; vid <= VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, + vid, false); + if (err) + goto err_vid_learning_set; + } + return 0; +err_vid_learning_set: + for (vid--; vid >= 1; vid--) + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); err_port_vlan_set: mlxsw_sp_port_stp_set(mlxsw_sp_port, false); err_port_stp_set: @@ -4186,6 +4199,12 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) { + u16 vid; + + for (vid = VLAN_N_VID - 1; vid >= 1; vid--) + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, + vid, true); + mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, false, false); mlxsw_sp_port_stp_set(mlxsw_sp_port, false); @@ -4216,7 +4235,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, return -EINVAL; if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) + if (netdev_has_any_upper_dev(upper_dev) && + (!netif_is_bridge_master(upper_dev) || + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, + upper_dev))) return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, @@ -4328,6 +4350,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct netdev_notifier_changeupper_info *info = ptr; struct net_device *upper_dev; int err = 0; @@ -4339,7 +4362,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, return -EINVAL; if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) + if (netdev_has_any_upper_dev(upper_dev) && + (!netif_is_bridge_master(upper_dev) || + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, + upper_dev))) return -EINVAL; break; case NETDEV_CHANGEUPPER: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 84ce83acdc19..88892d47acae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -326,6 +326,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev); +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev); /* spectrum.c */ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 93728c694e6d..0a9adc5962fb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(10000, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 5189022a1c8c..516e63244606 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -729,26 +729,29 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { + struct mlxsw_sp_fib *fib4; + struct mlxsw_sp_fib *fib6; struct mlxsw_sp_vr *vr; int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) return ERR_PTR(-EBUSY); - vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); - if (IS_ERR(vr->fib4)) - return ERR_CAST(vr->fib4); - vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6); - if (IS_ERR(vr->fib6)) { - err = PTR_ERR(vr->fib6); + fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(fib4)) + return ERR_CAST(fib4); + fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(fib6)) { + err = PTR_ERR(fib6); goto err_fib6_create; } + vr->fib4 = fib4; + vr->fib6 = fib6; vr->tb_id = tb_id; return vr; err_fib6_create: - mlxsw_sp_fib_destroy(vr->fib4); - vr->fib4 = NULL; + mlxsw_sp_fib_destroy(fib4); return ERR_PTR(err); } @@ -1531,11 +1534,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, dipn = htonl(dip); dev = mlxsw_sp->router->rifs[rif]->dev; n = neigh_lookup(&arp_tbl, &dipn, dev); - if (!n) { - netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n", - &dip); + if (!n) return; - } netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); neigh_event_send(n, NULL); @@ -1562,11 +1562,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, dev = mlxsw_sp->router->rifs[rif]->dev; n = neigh_lookup(&nd_tbl, &dip, dev); - if (!n) { - netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n", - &dip); + if (!n) return; - } netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); neigh_event_send(n, NULL); @@ -2536,7 +2533,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, { if (!removing) nh->should_offload = 1; - else if (nh->offloaded) + else nh->should_offload = 0; nh->update = 1; } @@ -3035,6 +3032,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; int i; + if (!list_is_singular(&nh_grp->fib_list)) + return; + for (i = 0; i < nh_grp->count; i++) { struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d39ffbfcc436..42a6afcaae03 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -134,6 +134,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, return NULL; } +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); +} + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, struct net_device *br_dev) @@ -1092,6 +1098,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool dynamic) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1101,9 +1108,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, action, local_port); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; +out: + kfree(sfd_pl); return err; } @@ -1128,6 +1142,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, bool adding, bool dynamic) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1138,9 +1153,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, lag_vid, lag_id); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; +out: + kfree(sfd_pl); return err; } @@ -1185,6 +1207,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, u16 fid, u16 mid, bool adding) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1194,7 +1217,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, MLXSW_REG_SFD_REC_ACTION_NOP, mid); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: kfree(sfd_pl); return err; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index c20dd00a1cae..899e7d53e669 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -52,8 +52,7 @@ struct nfp_app; #define NFP_FLOWER_MASK_ELEMENT_RS 1 #define NFP_FLOWER_MASK_HASH_BITS 10 -#define NFP_FL_META_FLAG_NEW_MASK 128 -#define NFP_FL_META_FLAG_LAST_MASK 1 +#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7) #define NFP_FL_MASK_REUSE_TIME_NS 40000 #define NFP_FL_MASK_ID_LOCATION 1 diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 3226ddc55f99..d9582ccc0025 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len, id = nfp_add_mask_table(app, mask_data, mask_len); if (id < 0) return false; - *meta_flags |= NFP_FL_META_FLAG_NEW_MASK; + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } *mask_id = id; @@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, if (!mask_entry) return false; + if (meta_flags) + *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; + *mask_id = mask_entry->mask_id; mask_entry->ref_cnt--; if (!mask_entry->ref_cnt) { @@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, nfp_release_mask_id(app, *mask_id); kfree(mask_entry); if (meta_flags) - *meta_flags |= NFP_FL_META_FLAG_LAST_MASK; + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } return true; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e118b5f23996..8d53a593fb27 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, return err; } nn_writeb(nn, ctrl_offset, entry->entry); + nfp_net_irq_unmask(nn, entry->entry); return 0; } @@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset, unsigned int vector_idx) { nn_writeb(nn, ctrl_offset, 0xff); + nn_pci_flush(nn); free_irq(nn->irq_entries[vector_idx].vector, nn); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index dc016dfec64d..8e623d8fa78e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -306,7 +306,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev, ls >= ARRAY_SIZE(ls_to_ethtool)) return 0; - cmd->base.speed = ls_to_ethtool[sts]; + cmd->base.speed = ls_to_ethtool[ls]; cmd->base.duplex = DUPLEX_FULL; return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d540a9dc77b3..9a7655560629 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port, { u8 __iomem *mem = port->eth_stats; - /* TX and RX stats are flipped as we are returning the stats as seen - * at the switch port corresponding to the phys port. - */ - stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK); - stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS); - stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS); + stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK); + stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS); + stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS); - stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK); - stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS); - stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS); + stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK); + stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS); + stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS); } static void @@ -297,6 +294,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->netdev_ops = &nfp_repr_netdev_ops; netdev->ethtool_ops = &nfp_port_ethtool_ops; + netdev->max_mtu = pf_netdev->max_mtu; + SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); if (nfp_app_has_tc(app)) { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 37364555c42b..f88ff3f4b661 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -68,10 +68,11 @@ /* CPP address to retrieve the data from */ #define NSP_BUFFER 0x10 #define NSP_BUFFER_CPP GENMASK_ULL(63, 40) -#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) -#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(39, 0) #define NSP_DFLT_BUFFER 0x18 +#define NSP_DFLT_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0) #define NSP_DFLT_BUFFER_CONFIG 0x20 #define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) @@ -412,8 +413,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (err < 0) return err; - cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; - cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); if (in_buf && in_size) { err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 9d989c96278c..e41f28602535 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1663,6 +1663,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); if (eth_type == ETH_P_IP) { + if (iph->protocol != IPPROTO_TCP) { + DP_NOTICE(p_hwfn, + "Unexpected ip protocol on ll2 %x\n", + iph->protocol); + return -EINVAL; + } + cm_info->local_ip[0] = ntohl(iph->daddr); cm_info->remote_ip[0] = ntohl(iph->saddr); cm_info->ip_version = TCP_IPV4; @@ -1671,6 +1678,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, *payload_len = ntohs(iph->tot_len) - ip_hlen; } else if (eth_type == ETH_P_IPV6) { ip6h = (struct ipv6hdr *)iph; + + if (ip6h->nexthdr != IPPROTO_TCP) { + DP_NOTICE(p_hwfn, + "Unexpected ip protocol on ll2 %x\n", + iph->protocol); + return -EINVAL; + } + for (i = 0; i < 4; i++) { cm_info->local_ip[i] = ntohl(ip6h->daddr.in6_u.u6_addr32[i]); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index e5ee9f274a71..6eab2c632c75 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2066,8 +2066,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, link_params.link_up = true; edev->ops->common->set_link(edev->cdev, &link_params); - qede_rdma_dev_event_open(edev); - edev->state = QEDE_STATE_OPEN; DP_INFO(edev, "Ending successfully qede load\n"); @@ -2168,12 +2166,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link) DP_NOTICE(edev, "Link is up\n"); netif_tx_start_all_queues(edev->ndev); netif_carrier_on(edev->ndev); + qede_rdma_dev_event_open(edev); } } else { if (netif_carrier_ok(edev->ndev)) { DP_NOTICE(edev, "Link is down\n"); netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); + qede_rdma_dev_event_close(edev); } } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f7080d0ab874..46b0372dd032 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) struct list_head *head = &mbx->cmd_q; struct qlcnic_cmd_args *cmd = NULL; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); while (!list_empty(head)) { cmd = list_entry(head->next, struct qlcnic_cmd_args, list); @@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) qlcnic_83xx_notify_cmd_completion(adapter, cmd); } - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); } static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) @@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); list_del(&cmd->list); mbx->num_cmds--; - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); qlcnic_83xx_notify_cmd_completion(adapter, cmd); } @@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, init_completion(&cmd->completion); cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); list_add_tail(&cmd->list, &mbx->cmd_q); mbx->num_cmds++; @@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; queue_work(mbx->work_q, &mbx->work); - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); return 0; } @@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; spin_unlock_irqrestore(&mbx->aen_lock, flags); - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); if (list_empty(head)) { - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); return; } cmd = list_entry(head->next, struct qlcnic_cmd_args, list); - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); mbx_ops->encode_cmd(adapter, cmd); mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 540c7622dcb1..929fb8d96ec0 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -166,12 +166,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) - return RMNET_MAP_CONSUMED; + goto fail; } map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) - return RMNET_MAP_CONSUMED; + goto fail; if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { if (ep->mux_id == 0xff) @@ -183,6 +183,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, skb->protocol = htons(ETH_P_MAP); return RMNET_MAP_SUCCESS; + +fail: + kfree_skb(skb); + return RMNET_MAP_CONSUMED; } /* Ingress / Egress Entry Points */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a3c949ea7d1a..db553d4e8d22 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1388,7 +1388,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond) { void __iomem *ioaddr = tp->mmio_addr; - return RTL_R8(IBISR0) & 0x02; + return RTL_R8(IBISR0) & 0x20; } static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) @@ -1396,7 +1396,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); - rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); } @@ -2025,21 +2025,6 @@ static int rtl8169_set_speed(struct net_device *dev, return ret; } -static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - del_timer_sync(&tp->timer); - - rtl_lock_work(tp); - ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), - cmd->duplex, cmd->advertising); - rtl_unlock_work(tp); - - return ret; -} - static netdev_features_t rtl8169_fix_features(struct net_device *dev, netdev_features_t features) { @@ -2166,6 +2151,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev, return rc; } +static int rtl8169_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int rc; + u32 advertising; + + if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising)) + return -EINVAL; + + del_timer_sync(&tp->timer); + + rtl_lock_work(tp); + rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex, advertising); + rtl_unlock_work(tp); + + return rc; +} + static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@ -2233,19 +2239,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) void __iomem *ioaddr = tp->mmio_addr; dma_addr_t paddr = tp->counters_phys_addr; u32 cmd; - bool ret; RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + RTL_R32(CounterAddrHigh); cmd = (u64)paddr & DMA_BIT_MASK(32); RTL_W32(CounterAddrLow, cmd); RTL_W32(CounterAddrLow, cmd | counter_cmd); - ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); - - RTL_W32(CounterAddrLow, 0); - RTL_W32(CounterAddrHigh, 0); - - return ret; + return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); } static bool rtl8169_reset_counters(struct net_device *dev) @@ -2367,7 +2368,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, .get_regs_len = rtl8169_get_regs_len, .get_link = ethtool_op_get_link, - .set_settings = rtl8169_set_settings, .get_msglevel = rtl8169_get_msglevel, .set_msglevel = rtl8169_set_msglevel, .get_regs = rtl8169_get_regs, @@ -2379,6 +2379,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .nway_reset = rtl8169_nway_reset, .get_link_ksettings = rtl8169_get_link_ksettings, + .set_link_ksettings = rtl8169_set_link_ksettings, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@ -8465,12 +8466,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_msi_5; } + pci_set_drvdata(pdev, dev); + rc = register_netdev(dev); if (rc < 0) goto err_out_cnt_6; - pci_set_drvdata(pdev, dev); - netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d2e88a30f57b..db31963c5d9d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3212,18 +3212,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* ioremap the TSU registers */ if (mdp->cd->tsu) { struct resource *rtsu; + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); - if (IS_ERR(mdp->tsu_addr)) { - ret = PTR_ERR(mdp->tsu_addr); + if (!rtsu) { + dev_err(&pdev->dev, "no TSU resource\n"); + ret = -ENODEV; + goto out_release; + } + /* We can only request the TSU region for the first port + * of the two sharing this TSU for the probe to succeed... + */ + if (devno % 2 == 0 && + !devm_request_mem_region(&pdev->dev, rtsu->start, + resource_size(rtsu), + dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "can't request TSU resource.\n"); + ret = -EBUSY; + goto out_release; + } + mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, + resource_size(rtsu)); + if (!mdp->tsu_addr) { + dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); + ret = -ENOMEM; goto out_release; } mdp->port = devno % 2; ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; } - /* initialize first or needed device */ - if (!devno || pd->needs_init) { + /* Need to init only the first port of the two sharing a TSU */ + if (devno % 2 == 0) { if (mdp->cd->chip_reset) mdp->cd->chip_reset(ndev); diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index fc8f8bdf6579..056cb6093630 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2902,6 +2902,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_alloc_ordered_workqueue; } + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + /* Only FIBs pointing to our own netdevs are programmed into * the device, so no need to pass a callback. */ @@ -2918,22 +2924,16 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) rocker->hw.id = rocker_read64(rocker, SWITCH_ID); - err = rocker_probe_ports(rocker); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); - goto err_probe_ports; - } - dev_info(&pdev->dev, "Rocker switch with id %*phN\n", (int)sizeof(rocker->hw.id), &rocker->hw.id); return 0; -err_probe_ports: - unregister_switchdev_notifier(&rocker_switchdev_notifier); err_register_switchdev_notifier: unregister_fib_notifier(&rocker->fib_nb); err_register_fib_notifier: + rocker_remove_ports(rocker); +err_probe_ports: destroy_workqueue(rocker->rocker_owq); err_alloc_ordered_workqueue: free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); @@ -2961,9 +2961,9 @@ static void rocker_remove(struct pci_dev *pdev) { struct rocker *rocker = pci_get_drvdata(pdev); - rocker_remove_ports(rocker); unregister_switchdev_notifier(&rocker_switchdev_notifier); unregister_fib_notifier(&rocker->fib_nb); + rocker_remove_ports(rocker); rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); destroy_workqueue(rocker->rocker_owq); free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d2..09352ee43b55 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5726,7 +5726,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) * MCFW do not support VFs. */ rc = efx_ef10_vport_set_mac_address(efx); - } else { + } else if (rc) { efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, sizeof(inbuf), NULL, 0, rc); } diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..9b85cbd5a231 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, } if (buffer->flags & EFX_TX_BUF_SKB) { + EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); (*pkts_compl)++; (*bytes_compl) += buffer->skb->len; dev_consume_skb_any((struct sk_buff *)buffer->skb); @@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; buffer = __efx_tx_queue_get_insert_buffer(tx_queue); - efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); } } diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index e82b4b70b7be..627fec210e2f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -409,7 +409,7 @@ struct stmmac_desc_ops { /* get timestamp value */ u64(*get_timestamp) (void *desc, u32 ats); /* get rx timestamp status */ - int (*get_rx_timestamp_status) (void *desc, u32 ats); + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); /* Display ring */ void (*display_ring)(void *head, unsigned int size, bool rx); /* set MSS via context descriptor */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 4b286e27c4ca..7e089bf906b4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc) return ret; } -static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) +static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) { struct dma_desc *p = (struct dma_desc *)desc; int ret = -EINVAL; @@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) /* Check if timestamp is OK from context descriptor */ do { - ret = dwmac4_rx_check_timestamp(desc); + ret = dwmac4_rx_check_timestamp(next_desc); if (ret < 0) goto exit; i++; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 7546b3664113..2a828a312814 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats) return ns; } -static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) +static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) { if (ats) { struct dma_extended_desc *p = (struct dma_extended_desc *)desc; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index f817f8f36569..db4cee57bb24 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats) return ns; } -static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) +static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats) { struct dma_desc *p = (struct dma_desc *)desc; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 721b61655261..08c19ebd5306 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, { u32 value = readl(ioaddr + PTP_TCR); unsigned long data; + u32 reg_value; /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second * formula = (1/ptp_clock) * 1000000000 @@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, data &= PTP_SSIR_SSINC_MASK; + reg_value = data; if (gmac4) - data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; + reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT; - writel(data, ioaddr + PTP_SSIR); + writel(reg_value, ioaddr + PTP_SSIR); return data; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 16bd50929084..d0cc73795056 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) bool stmmac_eee_init(struct stmmac_priv *priv) { struct net_device *ndev = priv->dev; + int interface = priv->plat->interface; unsigned long flags; bool ret = false; + if ((interface != PHY_INTERFACE_MODE_MII) && + (interface != PHY_INTERFACE_MODE_GMII) && + !phy_interface_mode_is_rgmii(interface)) + goto out; + /* Using PCS we cannot dial with the phy registers at this stage * so we do not support extra feature like EEE. */ @@ -483,7 +489,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, desc = np; /* Check if timestamp is available */ - if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { + if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) { ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); @@ -2564,6 +2570,7 @@ static int stmmac_open(struct net_device *dev) priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); priv->rx_copybreak = STMMAC_RX_COPYBREAK; + priv->mss = 0; ret = alloc_dma_desc_resources(priv); if (ret < 0) { diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index db8a4bcfc6c7..a5bb7b19040e 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -996,7 +996,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) mac_control |= BIT(15); - else if (phy->speed == 10) + /* in band mode only works in 10Mbps RGMII mode */ + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) mac_control |= BIT(18); /* In Band mode */ if (priv->rx_pause) @@ -1618,6 +1619,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, q_idx = q_idx % cpsw->tx_ch_num; txch = cpsw->txv[q_idx].ch; + txq = netdev_get_tx_queue(ndev, q_idx); ret = cpsw_tx_packet_submit(priv, skb, txch); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); @@ -1628,15 +1630,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, * tell the kernel to stop sending us tx frames. */ if (unlikely(!cpdma_check_free_tx_desc(txch))) { - txq = netdev_get_tx_queue(ndev, q_idx); netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); } return NETDEV_TX_OK; fail: ndev->stats.tx_dropped++; - txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb)); netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); + return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 6d68c8a8f4f2..da4ec575ccf9 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -34,6 +34,7 @@ config XILINX_AXI_EMAC config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" depends on (PPC || MICROBLAZE) + depends on !64BIT || BROKEN select PHYLIB ---help--- This driver supports the Xilinx 10/100/1000 LocalLink TEMAC diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ed51018a813e..b9d8d71a6ecc 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1503,6 +1503,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = &geneve->info; + bool metadata = geneve->collect_md; __u8 tmp_vni[3]; __u32 vni; @@ -1511,32 +1512,24 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) goto nla_put_failure; - if (rtnl_dereference(geneve->sock4)) { + if (!metadata && ip_tunnel_info_af(info) == AF_INET) { if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, info->key.u.ipv4.dst)) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, !!(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; - } - #if IS_ENABLED(CONFIG_IPV6) - if (rtnl_dereference(geneve->sock6)) { + } else if (!metadata) { if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, &info->key.u.ipv6.dst)) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, !(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; - - if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, - !geneve->use_udp6_rx_checksums)) - goto nla_put_failure; - } #endif + } if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || @@ -1546,10 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) goto nla_put_failure; - if (geneve->collect_md) { - if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) + if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) goto nla_put_failure; - } + + if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, + !geneve->use_udp6_rx_checksums)) + goto nla_put_failure; + return 0; nla_put_failure: diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 71ddadbf2368..d7ba2b813eff 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1381,8 +1381,8 @@ static int rr_close(struct net_device *dev) rrpriv->info_dma); rrpriv->info = NULL; - free_irq(pdev->irq, dev); spin_unlock_irqrestore(&rrpriv->lock, flags); + free_irq(pdev->irq, dev); return 0; } diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5176be76ca7d..4f3afcf92a7c 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -635,14 +635,27 @@ struct nvsp_message { #define NETVSC_MTU 65535 #define NETVSC_MTU_MIN ETH_MIN_MTU -#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ -#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ -#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ +/* Max buffer sizes allowed by a host */ +#define NETVSC_RECEIVE_BUFFER_SIZE (1024 * 1024 * 31) /* 31MB */ +#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024 * 1024 * 15) /* 15MB */ +#define NETVSC_RECEIVE_BUFFER_DEFAULT (1024 * 1024 * 16) + +#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ +#define NETVSC_SEND_BUFFER_DEFAULT (1024 * 1024) + #define NETVSC_INVALID_INDEX -1 #define NETVSC_SEND_SECTION_SIZE 6144 #define NETVSC_RECV_SECTION_SIZE 1728 +/* Default size of TX buf: 1MB, RX buf: 16MB */ +#define NETVSC_MIN_TX_SECTIONS 10 +#define NETVSC_DEFAULT_TX (NETVSC_SEND_BUFFER_DEFAULT \ + / NETVSC_SEND_SECTION_SIZE) +#define NETVSC_MIN_RX_SECTIONS 10 +#define NETVSC_DEFAULT_RX (NETVSC_RECEIVE_BUFFER_DEFAULT \ + / NETVSC_RECV_SECTION_SIZE) + #define NETVSC_RECEIVE_BUFFER_ID 0xcafe #define NETVSC_SEND_BUFFER_ID 0 diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 8d5077fb0492..a6bafcf55776 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -262,6 +262,11 @@ static int netvsc_init_buf(struct hv_device *device, buf_size = device_info->recv_sections * device_info->recv_section_size; buf_size = roundup(buf_size, PAGE_SIZE); + /* Legacy hosts only allow smaller receive buffer */ + if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) + buf_size = min_t(unsigned int, buf_size, + NETVSC_RECEIVE_BUFFER_SIZE_LEGACY); + net_device->recv_buf = vzalloc(buf_size); if (!net_device->recv_buf) { netdev_err(ndev, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a32ae02e1b6c..c849de3cb046 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -46,10 +46,6 @@ #include "hyperv_net.h" #define RING_SIZE_MIN 64 -#define NETVSC_MIN_TX_SECTIONS 10 -#define NETVSC_DEFAULT_TX 192 /* ~1M */ -#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ -#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 3e4c8b21403c..46b42de13d76 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -888,7 +888,7 @@ static const struct ieee802154_ops adf7242_ops = { .set_cca_ed_level = adf7242_set_cca_ed_level, }; -static void adf7242_debug(u8 irq1) +static void adf7242_debug(struct adf7242_local *lp, u8 irq1) { #ifdef DEBUG u8 stat; @@ -932,7 +932,7 @@ static irqreturn_t adf7242_isr(int irq, void *data) dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n", __func__, irq1); - adf7242_debug(irq1); + adf7242_debug(lp, irq1); xmit = test_bit(FLAG_XMIT, &lp->flags); diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 1f3295e274d0..71ff6bd4be9f 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -304,6 +304,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) success = true; } else { + if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest, + ipvlan->phy_dev->dev_addr)) + skb->pkt_type = PACKET_OTHERHOST; + ret = RX_HANDLER_ANOTHER; success = true; } @@ -375,6 +379,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) .flowi4_oif = dev->ifindex, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC, + .flowi4_mark = skb->mark, .daddr = ip4h->daddr, .saddr = ip4h->saddr, }; @@ -409,7 +414,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct dst_entry *dst; int err, ret = NET_XMIT_DROP; struct flowi6 fl6 = { - .flowi6_iif = dev->ifindex, + .flowi6_oif = dev->ifindex, .daddr = ip6h->daddr, .saddr = ip6h->saddr, .flowi6_flags = FLOWI_FLAG_ANYSRC, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d2aea961e0f4..0f35597553f4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -480,7 +480,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); - if (vlan == NULL) + if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE) return RX_HANDLER_PASS; dev = vlan->dev; @@ -1037,7 +1037,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, lowerdev_features &= (features | ~NETIF_F_LRO); features = netdev_increment_features(lowerdev_features, features, mask); features |= ALWAYS_ON_FEATURES; - features &= ~NETIF_F_NETNS_LOCAL; + features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES); return features; } @@ -1441,9 +1441,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, return 0; unregister_netdev: + /* macvlan_uninit would free the macvlan port */ unregister_netdevice(dev); + return err; destroy_macvlan_port: - if (create) + /* the macvlan port may be freed by macvlan_uninit when fail to register. + * so we destroy the macvlan port only when it's valid. + */ + if (create && macvlan_port_get_rtnl(dev)) macvlan_port_destroy(port->dev); return err; } diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index c1e52b9dc58d..e911e4990b20 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -167,7 +167,7 @@ static int at803x_set_wol(struct phy_device *phydev, mac = (const u8 *) ndev->dev_addr; if (!is_valid_ether_addr(mac)) - return -EFAULT; + return -EINVAL; for (i = 0; i < 3; i++) { phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, @@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev) { int value; - mutex_lock(&phydev->lock); - value = phy_read(phydev, MII_BMCR); value &= ~(BMCR_PDOWN | BMCR_ISOLATE); phy_write(phydev, MII_BMCR, value); - mutex_unlock(&phydev->lock); - return 0; } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 4d02b27df044..a3f456b91c99 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2069,7 +2069,7 @@ static struct phy_driver marvell_drivers[] = { .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1145_config_init, - .config_aneg = &marvell_config_aneg, + .config_aneg = &m88e1101_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index aebc08beceba..21b3f36e023a 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -16,6 +16,7 @@ * link takes priority and the other port is completely locked out. */ #include +#include enum { MV_PCS_BASE_T = 0x0000, @@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev) static struct phy_driver mv3310_drivers[] = { { .phy_id = 0x002b09aa, - .phy_id_mask = 0xffffffff, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88x3310", .features = SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | @@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = { module_phy_driver(mv3310_drivers); static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { 0x002b09aa, 0xffffffff }, + { 0x002b09aa, MARVELL_PHY_ID_MASK }, { }, }; MODULE_DEVICE_TABLE(mdio, mv3310_tbl); diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 135296508a7e..6425ce04d3f9 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev) data->regulator = devm_regulator_get(&pdev->dev, "phy"); if (IS_ERR(data->regulator)) { - if (PTR_ERR(data->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(data->regulator) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_out_free_mdiobus; + } dev_info(&pdev->dev, "no regulator found\n"); data->regulator = NULL; diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index bfd3090fb055..07c6048200c6 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata) } ret = xgene_enet_ecc_init(pdata); - if (ret) + if (ret) { + if (pdata->dev->of_node) + clk_disable_unprepare(pdata->clk); return ret; + } xgene_gmac_reset(pdata); return 0; @@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev) return ret; mdio_bus = mdiobus_alloc(); - if (!mdio_bus) - return -ENOMEM; + if (!mdio_bus) { + ret = -ENOMEM; + goto out_clk; + } mdio_bus->name = "APM X-Gene MDIO bus"; @@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev) mdio_bus->phy_mask = ~0; ret = mdiobus_register(mdio_bus); if (ret) - goto out; + goto out_mdiobus; acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1, acpi_register_phy, NULL, mdio_bus, NULL); @@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev) } if (ret) - goto out; + goto out_mdiobus; pdata->mdio_bus = mdio_bus; xgene_mdio_status = true; return 0; -out: +out_mdiobus: mdiobus_free(mdio_bus); +out_clk: + if (dev->of_node) + clk_disable_unprepare(pdata->clk); + return ret; } diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 1ea69b7585d9..7ddb709f69fc 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -25,27 +25,53 @@ static int meson_gxl_config_init(struct phy_device *phydev) { + int ret; + /* Enable Analog and DSP register Bank access by */ - phy_write(phydev, 0x14, 0x0000); - phy_write(phydev, 0x14, 0x0400); - phy_write(phydev, 0x14, 0x0000); - phy_write(phydev, 0x14, 0x0400); + ret = phy_write(phydev, 0x14, 0x0000); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x0400); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x0000); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x0400); + if (ret) + return ret; /* Write Analog register 23 */ - phy_write(phydev, 0x17, 0x8E0D); - phy_write(phydev, 0x14, 0x4417); + ret = phy_write(phydev, 0x17, 0x8E0D); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x4417); + if (ret) + return ret; /* Enable fractional PLL */ - phy_write(phydev, 0x17, 0x0005); - phy_write(phydev, 0x14, 0x5C1B); + ret = phy_write(phydev, 0x17, 0x0005); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x5C1B); + if (ret) + return ret; /* Program fraction FR_PLL_DIV1 */ - phy_write(phydev, 0x17, 0x029A); - phy_write(phydev, 0x14, 0x5C1D); + ret = phy_write(phydev, 0x17, 0x029A); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x5C1D); + if (ret) + return ret; /* Program fraction FR_PLL_DIV1 */ - phy_write(phydev, 0x17, 0xAAAA); - phy_write(phydev, 0x14, 0x5C1C); + ret = phy_write(phydev, 0x17, 0xAAAA); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x5C1C); + if (ret) + return ret; return 0; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fdb43dd9b5cd..6c45ff650ec7 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -622,6 +622,7 @@ static int ksz9031_read_status(struct phy_device *phydev) phydev->link = 0; if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) phydev->drv->config_intr(phydev); + return genphy_config_aneg(phydev); } return 0; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 2b1e67bc1e73..dba6d17ad885 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -614,6 +614,91 @@ static void phy_error(struct phy_device *phydev) phy_trigger_machine(phydev, false); } +/** + * phy_disable_interrupts - Disable the PHY interrupts from the PHY side + * @phydev: target phy_device struct + */ +static int phy_disable_interrupts(struct phy_device *phydev) +{ + int err; + + /* Disable PHY interrupts */ + err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); + if (err) + goto phy_err; + + /* Clear the interrupt */ + err = phy_clear_interrupt(phydev); + if (err) + goto phy_err; + + return 0; + +phy_err: + phy_error(phydev); + + return err; +} + +/** + * phy_change - Called by the phy_interrupt to handle PHY changes + * @phydev: phy_device struct that interrupted + */ +static irqreturn_t phy_change(struct phy_device *phydev) +{ + if (phy_interrupt_is_valid(phydev)) { + if (phydev->drv->did_interrupt && + !phydev->drv->did_interrupt(phydev)) + goto ignore; + + if (phy_disable_interrupts(phydev)) + goto phy_err; + } + + mutex_lock(&phydev->lock); + if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) + phydev->state = PHY_CHANGELINK; + mutex_unlock(&phydev->lock); + + if (phy_interrupt_is_valid(phydev)) { + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); + + /* Reenable interrupts */ + if (PHY_HALTED != phydev->state && + phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) + goto irq_enable_err; + } + + /* reschedule state queue work to run as soon as possible */ + phy_trigger_machine(phydev, true); + return IRQ_HANDLED; + +ignore: + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); + return IRQ_NONE; + +irq_enable_err: + disable_irq(phydev->irq); + atomic_inc(&phydev->irq_disable); +phy_err: + phy_error(phydev); + return IRQ_NONE; +} + +/** + * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes + * @work: work_struct that describes the work to be done + */ +void phy_change_work(struct work_struct *work) +{ + struct phy_device *phydev = + container_of(work, struct phy_device, phy_queue); + + phy_change(phydev); +} + /** * phy_interrupt - PHY interrupt handler * @irq: interrupt line @@ -632,9 +717,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) disable_irq_nosync(irq); atomic_inc(&phydev->irq_disable); - phy_change(phydev); - - return IRQ_HANDLED; + return phy_change(phydev); } /** @@ -651,32 +734,6 @@ static int phy_enable_interrupts(struct phy_device *phydev) return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); } -/** - * phy_disable_interrupts - Disable the PHY interrupts from the PHY side - * @phydev: target phy_device struct - */ -static int phy_disable_interrupts(struct phy_device *phydev) -{ - int err; - - /* Disable PHY interrupts */ - err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); - if (err) - goto phy_err; - - /* Clear the interrupt */ - err = phy_clear_interrupt(phydev); - if (err) - goto phy_err; - - return 0; - -phy_err: - phy_error(phydev); - - return err; -} - /** * phy_start_interrupts - request and enable interrupts for a PHY device * @phydev: target phy_device struct @@ -727,64 +784,6 @@ int phy_stop_interrupts(struct phy_device *phydev) } EXPORT_SYMBOL(phy_stop_interrupts); -/** - * phy_change - Called by the phy_interrupt to handle PHY changes - * @phydev: phy_device struct that interrupted - */ -void phy_change(struct phy_device *phydev) -{ - if (phy_interrupt_is_valid(phydev)) { - if (phydev->drv->did_interrupt && - !phydev->drv->did_interrupt(phydev)) - goto ignore; - - if (phy_disable_interrupts(phydev)) - goto phy_err; - } - - mutex_lock(&phydev->lock); - if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) - phydev->state = PHY_CHANGELINK; - mutex_unlock(&phydev->lock); - - if (phy_interrupt_is_valid(phydev)) { - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); - - /* Reenable interrupts */ - if (PHY_HALTED != phydev->state && - phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) - goto irq_enable_err; - } - - /* reschedule state queue work to run as soon as possible */ - phy_trigger_machine(phydev, true); - return; - -ignore: - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); - return; - -irq_enable_err: - disable_irq(phydev->irq); - atomic_inc(&phydev->irq_disable); -phy_err: - phy_error(phydev); -} - -/** - * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes - * @work: work_struct that describes the work to be done - */ -void phy_change_work(struct work_struct *work) -{ - struct phy_device *phydev = - container_of(work, struct phy_device, phy_queue); - - phy_change(phydev); -} - /** * phy_stop - Bring down the PHY link, and stop checking the status * @phydev: target phy_device struct @@ -828,7 +827,6 @@ EXPORT_SYMBOL(phy_stop); */ void phy_start(struct phy_device *phydev) { - bool do_resume = false; int err = 0; mutex_lock(&phydev->lock); @@ -841,25 +839,23 @@ void phy_start(struct phy_device *phydev) phydev->state = PHY_UP; break; case PHY_HALTED: + /* if phy was suspended, bring the physical link up again */ + __phy_resume(phydev); + /* make sure interrupts are re-enabled for the PHY */ - if (phydev->irq != PHY_POLL) { + if (phy_interrupt_is_valid(phydev)) { err = phy_enable_interrupts(phydev); if (err < 0) break; } phydev->state = PHY_RESUMING; - do_resume = true; break; default: break; } mutex_unlock(&phydev->lock); - /* if phy was suspended, bring the physical link up again */ - if (do_resume) - phy_resume(phydev); - phy_trigger_machine(phydev, true); } EXPORT_SYMBOL(phy_start); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 67f25ac29025..a1e7ea4d4b16 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -999,10 +999,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, "attached_dev"); if (!err) { - err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, - "phydev"); - if (err) - goto error; + err = sysfs_create_link_nowarn(&dev->dev.kobj, + &phydev->mdio.dev.kobj, + "phydev"); + if (err) { + dev_err(&dev->dev, "could not add device link to %s err %d\n", + kobject_name(&phydev->mdio.dev.kobj), + err); + /* non-fatal - some net drivers can use one netdevice + * with more then one phy + */ + } phydev->sysfs_links = true; } @@ -1152,11 +1159,13 @@ int phy_suspend(struct phy_device *phydev) } EXPORT_SYMBOL(phy_suspend); -int phy_resume(struct phy_device *phydev) +int __phy_resume(struct phy_device *phydev) { struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; + WARN_ON(!mutex_is_locked(&phydev->lock)); + if (phydev->drv && phydrv->resume) ret = phydrv->resume(phydev); @@ -1167,6 +1176,18 @@ int phy_resume(struct phy_device *phydev) return ret; } +EXPORT_SYMBOL(__phy_resume); + +int phy_resume(struct phy_device *phydev) +{ + int ret; + + mutex_lock(&phydev->lock); + ret = __phy_resume(phydev); + mutex_unlock(&phydev->lock); + + return ret; +} EXPORT_SYMBOL(phy_resume); int phy_loopback(struct phy_device *phydev, bool enable) @@ -1639,13 +1660,9 @@ int genphy_resume(struct phy_device *phydev) { int value; - mutex_lock(&phydev->lock); - value = phy_read(phydev, MII_BMCR); phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); - mutex_unlock(&phydev->lock); - return 0; } EXPORT_SYMBOL(genphy_resume); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index bcb4755bcd95..1b2fe74a44ea 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -525,6 +525,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, pl->link_config.pause = MLO_PAUSE_AN; pl->link_config.speed = SPEED_UNKNOWN; pl->link_config.duplex = DUPLEX_UNKNOWN; + pl->link_config.an_enabled = true; pl->ops = ops; __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); @@ -771,6 +772,7 @@ void phylink_stop(struct phylink *pl) sfp_upstream_stop(pl->sfp_bus); set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + queue_work(system_power_efficient_wq, &pl->resolve); flush_work(&pl->resolve); } EXPORT_SYMBOL_GPL(phylink_stop); @@ -948,6 +950,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, mutex_lock(&pl->state_mutex); /* Configure the MAC to match the new settings */ linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); + pl->link_config.interface = config.interface; pl->link_config.speed = our_kset.base.speed; pl->link_config.duplex = our_kset.base.duplex; pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; @@ -1426,9 +1429,8 @@ static void phylink_sfp_link_down(void *upstream) WARN_ON(!lockdep_rtnl_is_held()); set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + queue_work(system_power_efficient_wq, &pl->resolve); flush_work(&pl->resolve); - - netif_carrier_off(pl->netdev); } static void phylink_sfp_link_up(void *upstream) diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 5cb5384697ea..7ae815bee52d 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -359,7 +359,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream); void sfp_unregister_upstream(struct sfp_bus *bus) { rtnl_lock(); - sfp_unregister_bus(bus); + if (bus->sfp) + sfp_unregister_bus(bus); bus->upstream = NULL; bus->netdev = NULL; rtnl_unlock(); @@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket); void sfp_unregister_socket(struct sfp_bus *bus) { rtnl_lock(); - sfp_unregister_bus(bus); + if (bus->netdev) + sfp_unregister_bus(bus); bus->sfp_dev = NULL; bus->sfp = NULL; bus->socket_ops = NULL; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index baee371bf767..3165bc7b8e1e 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -318,12 +318,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp) msleep(T_PHY_RESET_MS); phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR); - if (IS_ERR(phy)) { - dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); + if (phy == ERR_PTR(-ENODEV)) { + dev_info(sfp->dev, "no PHY detected\n"); return; } - if (!phy) { - dev_info(sfp->dev, "no PHY detected\n"); + if (IS_ERR(phy)) { + dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); return; } @@ -358,7 +358,7 @@ static void sfp_sm_link_check_los(struct sfp *sfp) * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume * the same as SFP_OPTIONS_LOS_NORMAL set. */ - if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED) + if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED)) los ^= SFP_F_LOS; if (los) @@ -583,7 +583,8 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) if (event == SFP_E_TX_FAULT) sfp_sm_fault(sfp, true); else if (event == - (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + (sfp->id.ext.options & + cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) ? SFP_E_LOS_HIGH : SFP_E_LOS_LOW)) sfp_sm_link_up(sfp); break; @@ -593,7 +594,8 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) sfp_sm_link_down(sfp); sfp_sm_fault(sfp, true); } else if (event == - (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + (sfp->id.ext.options & + cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) ? SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) { sfp_sm_link_down(sfp); sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); @@ -665,20 +667,19 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN); len -= first; - ret = sfp->read(sfp, false, first, data, len); + ret = sfp_read(sfp, false, first, data, len); if (ret < 0) return ret; first += len; data += len; } - if (first >= ETH_MODULE_SFF_8079_LEN && - first < ETH_MODULE_SFF_8472_LEN) { + if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) { len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN); len -= first; first -= ETH_MODULE_SFF_8079_LEN; - ret = sfp->read(sfp, true, first, data, len); + ret = sfp_read(sfp, true, first, data, len); if (ret < 0) return ret; } diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig index 1373c6d7278d..282aec4860eb 100644 --- a/drivers/net/ppp/Kconfig +++ b/drivers/net/ppp/Kconfig @@ -149,6 +149,23 @@ config PPPOL2TP tunnels. L2TP is replacing PPTP for VPN uses. if TTY +config PPPOLAC + tristate "PPP on L2TP Access Concentrator" + depends on PPP && INET + help + L2TP (RFC 2661) is a tunneling protocol widely used in virtual private + networks. This driver handles L2TP data packets between a UDP socket + and a PPP channel, but only permits one session per socket. Thus it is + fairly simple and suited for clients. + +config PPPOPNS + tristate "PPP on PPTP Network Server" + depends on PPP && INET + help + PPTP (RFC 2637) is a tunneling protocol widely used in virtual private + networks. This driver handles PPTP data packets between a RAW socket + and a PPP channel. It is fairly simple and easy to use. + config PPP_ASYNC tristate "PPP support for async serial ports" depends on PPP diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile index 16c457d6b324..879e1663dd48 100644 --- a/drivers/net/ppp/Makefile +++ b/drivers/net/ppp/Makefile @@ -12,3 +12,5 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o obj-$(CONFIG_PPPOE) += pppox.o pppoe.o obj-$(CONFIG_PPPOL2TP) += pppox.o obj-$(CONFIG_PPTP) += pppox.o pptp.o +obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o +obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e365866600ba..34b24d7e1e2f 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -256,7 +256,7 @@ struct ppp_net { /* Prototypes. */ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct file *file, unsigned int cmd, unsigned long arg); -static void ppp_xmit_process(struct ppp *ppp); +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb); static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); static void ppp_push(struct ppp *ppp); static void ppp_channel_push(struct channel *pch); @@ -512,13 +512,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, goto out; } - skb_queue_tail(&pf->xq, skb); - switch (pf->kind) { case INTERFACE: - ppp_xmit_process(PF_TO_PPP(pf)); + ppp_xmit_process(PF_TO_PPP(pf), skb); break; case CHANNEL: + skb_queue_tail(&pf->xq, skb); ppp_channel_push(PF_TO_CHANNEL(pf)); break; } @@ -959,6 +958,7 @@ static __net_exit void ppp_exit_net(struct net *net) unregister_netdevice_many(&list); rtnl_unlock(); + mutex_destroy(&pn->all_ppp_mutex); idr_destroy(&pn->units_idr); } @@ -1002,17 +1002,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) if (!ifname_is_set) snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); + mutex_unlock(&pn->all_ppp_mutex); + ret = register_netdevice(ppp->dev); if (ret < 0) goto err_unit; atomic_inc(&ppp_unit_count); - mutex_unlock(&pn->all_ppp_mutex); - return 0; err_unit: + mutex_lock(&pn->all_ppp_mutex); unit_put(&pn->units_idr, ppp->file.index); err: mutex_unlock(&pn->all_ppp_mutex); @@ -1262,8 +1263,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) put_unaligned_be16(proto, pp); skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); - skb_queue_tail(&ppp->file.xq, skb); - ppp_xmit_process(ppp); + ppp_xmit_process(ppp, skb); + return NETDEV_TX_OK; outf: @@ -1415,13 +1416,14 @@ static void ppp_setup(struct net_device *dev) */ /* Called to do any work queued up on the transmit side that can now be done */ -static void __ppp_xmit_process(struct ppp *ppp) +static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) { - struct sk_buff *skb; - ppp_xmit_lock(ppp); if (!ppp->closing) { ppp_push(ppp); + + if (skb) + skb_queue_tail(&ppp->file.xq, skb); while (!ppp->xmit_pending && (skb = skb_dequeue(&ppp->file.xq))) ppp_send_frame(ppp, skb); @@ -1435,7 +1437,7 @@ static void __ppp_xmit_process(struct ppp *ppp) ppp_xmit_unlock(ppp); } -static void ppp_xmit_process(struct ppp *ppp) +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) { local_bh_disable(); @@ -1443,7 +1445,7 @@ static void ppp_xmit_process(struct ppp *ppp) goto err; (*this_cpu_ptr(ppp->xmit_recursion))++; - __ppp_xmit_process(ppp); + __ppp_xmit_process(ppp, skb); (*this_cpu_ptr(ppp->xmit_recursion))--; local_bh_enable(); @@ -1453,6 +1455,8 @@ static void ppp_xmit_process(struct ppp *ppp) err: local_bh_enable(); + kfree_skb(skb); + if (net_ratelimit()) netdev_err(ppp->dev, "recursion detected\n"); } @@ -1937,7 +1941,7 @@ static void __ppp_channel_push(struct channel *pch) if (skb_queue_empty(&pch->file.xq)) { ppp = pch->ppp; if (ppp) - __ppp_xmit_process(ppp); + __ppp_xmit_process(ppp, NULL); } } @@ -3156,6 +3160,15 @@ ppp_connect_channel(struct channel *pch, int unit) goto outl; ppp_lock(ppp); + spin_lock_bh(&pch->downl); + if (!pch->chan) { + /* Don't connect unregistered channels */ + spin_unlock_bh(&pch->downl); + ppp_unlock(ppp); + ret = -ENOTCONN; + goto outl; + } + spin_unlock_bh(&pch->downl); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 4e1da1645b15..5aa59f41bf8c 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, struct pppoe_hdr *ph; struct net_device *dev; char *start; + int hlen; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { @@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, if (total_len > (dev->mtu + dev->hard_header_len)) goto end; - - skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, - 0, GFP_KERNEL); + hlen = LL_RESERVED_SPACE(dev); + skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len + + dev->needed_tailroom, 0, GFP_KERNEL); if (!skb) { error = -ENOMEM; goto end; } /* Reserve space for headers. */ - skb_reserve(skb, dev->hard_header_len); + skb_reserve(skb, hlen); skb_reset_network_header(skb); skb->dev = dev; @@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) /* Copy the data if there is no space for the header or if it's * read-only. */ - if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) + if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph))) goto abort; __skb_push(skb, sizeof(*ph)); diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c new file mode 100644 index 000000000000..95cc6be54ad5 --- /dev/null +++ b/drivers/net/ppp/pppolac.c @@ -0,0 +1,450 @@ +/* drivers/net/pppolac.c + * + * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661) + * + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* This driver handles L2TP data packets between a UDP socket and a PPP channel. + * The socket must keep connected, and only one session per socket is permitted. + * Sequencing of outgoing packets is controlled by LNS. Incoming packets with + * sequences are reordered within a sliding window of one second. Currently + * reordering only happens when a packet is received. It is done for simplicity + * since no additional locks or threads are required. This driver only works on + * IPv4 due to the lack of UDP encapsulation support in IPv6. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define L2TP_CONTROL_BIT 0x80 +#define L2TP_LENGTH_BIT 0x40 +#define L2TP_SEQUENCE_BIT 0x08 +#define L2TP_OFFSET_BIT 0x02 +#define L2TP_VERSION 0x02 +#define L2TP_VERSION_MASK 0x0F + +#define PPP_ADDR 0xFF +#define PPP_CTRL 0x03 + +union unaligned { + __u32 u32; +} __attribute__((packed)); + +static inline union unaligned *unaligned(void *ptr) +{ + return (union unaligned *)ptr; +} + +struct meta { + __u32 sequence; + __u32 timestamp; +}; + +static inline struct meta *skb_meta(struct sk_buff *skb) +{ + return (struct meta *)skb->cb; +} + +/******************************************************************************/ + +static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb) +{ + struct sock *sk = (struct sock *)sk_udp->sk_user_data; + struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac; + struct meta *meta = skb_meta(skb); + __u32 now = jiffies; + __u8 bits; + __u8 *ptr; + + /* Drop the packet if L2TP header is missing. */ + if (skb->len < sizeof(struct udphdr) + 6) + goto drop; + + /* Put it back if it is a control packet. */ + if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT) + return opt->backlog_rcv(sk_udp, skb); + + /* Skip UDP header. */ + skb_pull(skb, sizeof(struct udphdr)); + + /* Check the version. */ + if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION) + goto drop; + bits = skb->data[0]; + ptr = &skb->data[2]; + + /* Check the length if it is present. */ + if (bits & L2TP_LENGTH_BIT) { + if ((ptr[0] << 8 | ptr[1]) != skb->len) + goto drop; + ptr += 2; + } + + /* Skip all fields including optional ones. */ + if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) + + (bits & L2TP_LENGTH_BIT ? 2 : 0) + + (bits & L2TP_OFFSET_BIT ? 2 : 0))) + goto drop; + + /* Skip the offset padding if it is present. */ + if (bits & L2TP_OFFSET_BIT && + !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1])) + goto drop; + + /* Check the tunnel and the session. */ + if (unaligned(ptr)->u32 != opt->local) + goto drop; + + /* Check the sequence if it is present. */ + if (bits & L2TP_SEQUENCE_BIT) { + meta->sequence = ptr[4] << 8 | ptr[5]; + if ((__s16)(meta->sequence - opt->recv_sequence) < 0) + goto drop; + } + + /* Skip PPP address and control if they are present. */ + if (skb->len >= 2 && skb->data[0] == PPP_ADDR && + skb->data[1] == PPP_CTRL) + skb_pull(skb, 2); + + /* Fix PPP protocol if it is compressed. */ + if (skb->len >= 1 && skb->data[0] & 1) + *(u8 *)skb_push(skb, 1) = 0; + + /* Drop the packet if PPP protocol is missing. */ + if (skb->len < 2) + goto drop; + + /* Perform reordering if sequencing is enabled. */ + atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT); + if (bits & L2TP_SEQUENCE_BIT) { + struct sk_buff *skb1; + + /* Insert the packet into receive queue in order. */ + skb_set_owner_r(skb, sk); + skb_queue_walk(&sk->sk_receive_queue, skb1) { + struct meta *meta1 = skb_meta(skb1); + __s16 order = meta->sequence - meta1->sequence; + if (order == 0) + goto drop; + if (order < 0) { + meta->timestamp = meta1->timestamp; + skb_insert(skb1, skb, &sk->sk_receive_queue); + skb = NULL; + break; + } + } + if (skb) { + meta->timestamp = now; + skb_queue_tail(&sk->sk_receive_queue, skb); + } + + /* Remove packets from receive queue as long as + * 1. the receive buffer is full, + * 2. they are queued longer than one second, or + * 3. there are no missing packets before them. */ + skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { + meta = skb_meta(skb); + if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && + now - meta->timestamp < HZ && + meta->sequence != opt->recv_sequence) + break; + skb_unlink(skb, &sk->sk_receive_queue); + opt->recv_sequence = (__u16)(meta->sequence + 1); + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + } + return NET_RX_SUCCESS; + } + + /* Flush receive queue if sequencing is disabled. */ + skb_queue_purge(&sk->sk_receive_queue); + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + return NET_RX_SUCCESS; +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb) +{ + sock_hold(sk_udp); + sk_receive_skb(sk_udp, skb, 0); + return 0; +} + +static struct sk_buff_head delivery_queue; + +static void pppolac_xmit_core(struct work_struct *delivery_work) +{ + mm_segment_t old_fs = get_fs(); + struct sk_buff *skb; + + set_fs(KERNEL_DS); + while ((skb = skb_dequeue(&delivery_queue))) { + struct sock *sk_udp = skb->sk; + struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len}; + struct msghdr msg = { + .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT, + }; + + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, + skb->len); + sk_udp->sk_prot->sendmsg(sk_udp, &msg, skb->len); + kfree_skb(skb); + } + set_fs(old_fs); +} + +static DECLARE_WORK(delivery_work, pppolac_xmit_core); + +static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb) +{ + struct sock *sk_udp = (struct sock *)chan->private; + struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac; + + /* Install PPP address and control. */ + skb_push(skb, 2); + skb->data[0] = PPP_ADDR; + skb->data[1] = PPP_CTRL; + + /* Install L2TP header. */ + if (atomic_read(&opt->sequencing)) { + skb_push(skb, 10); + skb->data[0] = L2TP_SEQUENCE_BIT; + skb->data[6] = opt->xmit_sequence >> 8; + skb->data[7] = opt->xmit_sequence; + skb->data[8] = 0; + skb->data[9] = 0; + opt->xmit_sequence++; + } else { + skb_push(skb, 6); + skb->data[0] = 0; + } + skb->data[1] = L2TP_VERSION; + unaligned(&skb->data[2])->u32 = opt->remote; + + /* Now send the packet via the delivery queue. */ + skb_set_owner_w(skb, sk_udp); + skb_queue_tail(&delivery_queue, skb); + schedule_work(&delivery_work); + return 1; +} + +/******************************************************************************/ + +static struct ppp_channel_ops pppolac_channel_ops = { + .start_xmit = pppolac_xmit, +}; + +static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr, + int addrlen, int flags) +{ + struct sock *sk = sock->sk; + struct pppox_sock *po = pppox_sk(sk); + struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr; + struct socket *sock_udp = NULL; + struct sock *sk_udp; + int error; + + if (addrlen != sizeof(struct sockaddr_pppolac) || + !addr->local.tunnel || !addr->local.session || + !addr->remote.tunnel || !addr->remote.session) { + return -EINVAL; + } + + lock_sock(sk); + error = -EALREADY; + if (sk->sk_state != PPPOX_NONE) + goto out; + + sock_udp = sockfd_lookup(addr->udp_socket, &error); + if (!sock_udp) + goto out; + sk_udp = sock_udp->sk; + lock_sock(sk_udp); + + /* Remove this check when IPv6 supports UDP encapsulation. */ + error = -EAFNOSUPPORT; + if (sk_udp->sk_family != AF_INET) + goto out; + error = -EPROTONOSUPPORT; + if (sk_udp->sk_protocol != IPPROTO_UDP) + goto out; + error = -EDESTADDRREQ; + if (sk_udp->sk_state != TCP_ESTABLISHED) + goto out; + error = -EBUSY; + if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data) + goto out; + if (!sk_udp->sk_bound_dev_if) { + struct dst_entry *dst = sk_dst_get(sk_udp); + error = -ENODEV; + if (!dst) + goto out; + sk_udp->sk_bound_dev_if = dst->dev->ifindex; + dst_release(dst); + } + + po->chan.hdrlen = 12; + po->chan.private = sk_udp; + po->chan.ops = &pppolac_channel_ops; + po->chan.mtu = PPP_MRU - 80; + po->proto.lac.local = unaligned(&addr->local)->u32; + po->proto.lac.remote = unaligned(&addr->remote)->u32; + atomic_set(&po->proto.lac.sequencing, 1); + po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv; + + error = ppp_register_channel(&po->chan); + if (error) + goto out; + + sk->sk_state = PPPOX_CONNECTED; + udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP; + udp_sk(sk_udp)->encap_rcv = pppolac_recv; + sk_udp->sk_backlog_rcv = pppolac_recv_core; + sk_udp->sk_user_data = sk; +out: + if (sock_udp) { + release_sock(sk_udp); + if (error) + sockfd_put(sock_udp); + } + release_sock(sk); + return error; +} + +static int pppolac_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + release_sock(sk); + return -EBADF; + } + + if (sk->sk_state != PPPOX_NONE) { + struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private; + lock_sock(sk_udp); + skb_queue_purge(&sk->sk_receive_queue); + pppox_unbind_sock(sk); + udp_sk(sk_udp)->encap_type = 0; + udp_sk(sk_udp)->encap_rcv = NULL; + sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv; + sk_udp->sk_user_data = NULL; + release_sock(sk_udp); + sockfd_put(sk_udp->sk_socket); + } + + sock_orphan(sk); + sock->sk = NULL; + release_sock(sk); + sock_put(sk); + return 0; +} + +/******************************************************************************/ + +static struct proto pppolac_proto = { + .name = "PPPOLAC", + .owner = THIS_MODULE, + .obj_size = sizeof(struct pppox_sock), +}; + +static struct proto_ops pppolac_proto_ops = { + .family = PF_PPPOX, + .owner = THIS_MODULE, + .release = pppolac_release, + .bind = sock_no_bind, + .connect = pppolac_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = sock_no_getname, + .poll = sock_no_poll, + .ioctl = pppox_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .mmap = sock_no_mmap, +}; + +static int pppolac_create(struct net *net, struct socket *sock, int kern) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto, kern); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + sock->state = SS_UNCONNECTED; + sock->ops = &pppolac_proto_ops; + sk->sk_protocol = PX_PROTO_OLAC; + sk->sk_state = PPPOX_NONE; + return 0; +} + +/******************************************************************************/ + +static struct pppox_proto pppolac_pppox_proto = { + .create = pppolac_create, + .owner = THIS_MODULE, +}; + +static int __init pppolac_init(void) +{ + int error; + + error = proto_register(&pppolac_proto, 0); + if (error) + return error; + + error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto); + if (error) + proto_unregister(&pppolac_proto); + else + skb_queue_head_init(&delivery_queue); + return error; +} + +static void __exit pppolac_exit(void) +{ + unregister_pppox_proto(PX_PROTO_OLAC); + proto_unregister(&pppolac_proto); +} + +module_init(pppolac_init); +module_exit(pppolac_exit); + +MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)"); +MODULE_AUTHOR("Chia-chi Yeh "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c new file mode 100644 index 000000000000..a01a07152c86 --- /dev/null +++ b/drivers/net/ppp/pppopns.c @@ -0,0 +1,429 @@ +/* drivers/net/pppopns.c + * + * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637) + * + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* This driver handles PPTP data packets between a RAW socket and a PPP channel. + * The socket is created in the kernel space and connected to the same address + * of the control socket. Outgoing packets are always sent with sequences but + * without acknowledgements. Incoming packets with sequences are reordered + * within a sliding window of one second. Currently reordering only happens when + * a packet is received. It is done for simplicity since no additional locks or + * threads are required. This driver should work on both IPv4 and IPv6. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GRE_HEADER_SIZE 8 + +#define PPTP_GRE_BITS htons(0x2001) +#define PPTP_GRE_BITS_MASK htons(0xEF7F) +#define PPTP_GRE_SEQ_BIT htons(0x1000) +#define PPTP_GRE_ACK_BIT htons(0x0080) +#define PPTP_GRE_TYPE htons(0x880B) + +#define PPP_ADDR 0xFF +#define PPP_CTRL 0x03 + +struct header { + __u16 bits; + __u16 type; + __u16 length; + __u16 call; + __u32 sequence; +} __attribute__((packed)); + +struct meta { + __u32 sequence; + __u32 timestamp; +}; + +static inline struct meta *skb_meta(struct sk_buff *skb) +{ + return (struct meta *)skb->cb; +} + +/******************************************************************************/ + +static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb) +{ + struct sock *sk = (struct sock *)sk_raw->sk_user_data; + struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns; + struct meta *meta = skb_meta(skb); + __u32 now = jiffies; + struct header *hdr; + + /* Skip transport header */ + skb_pull(skb, skb_transport_header(skb) - skb->data); + + /* Drop the packet if GRE header is missing. */ + if (skb->len < GRE_HEADER_SIZE) + goto drop; + hdr = (struct header *)skb->data; + + /* Check the header. */ + if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local || + (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS) + goto drop; + + /* Skip all fields including optional ones. */ + if (!skb_pull(skb, GRE_HEADER_SIZE + + (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) + + (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0))) + goto drop; + + /* Check the length. */ + if (skb->len != ntohs(hdr->length)) + goto drop; + + /* Check the sequence if it is present. */ + if (hdr->bits & PPTP_GRE_SEQ_BIT) { + meta->sequence = ntohl(hdr->sequence); + if ((__s32)(meta->sequence - opt->recv_sequence) < 0) + goto drop; + } + + /* Skip PPP address and control if they are present. */ + if (skb->len >= 2 && skb->data[0] == PPP_ADDR && + skb->data[1] == PPP_CTRL) + skb_pull(skb, 2); + + /* Fix PPP protocol if it is compressed. */ + if (skb->len >= 1 && skb->data[0] & 1) + *(u8 *)skb_push(skb, 1) = 0; + + /* Drop the packet if PPP protocol is missing. */ + if (skb->len < 2) + goto drop; + + /* Perform reordering if sequencing is enabled. */ + if (hdr->bits & PPTP_GRE_SEQ_BIT) { + struct sk_buff *skb1; + + /* Insert the packet into receive queue in order. */ + skb_set_owner_r(skb, sk); + skb_queue_walk(&sk->sk_receive_queue, skb1) { + struct meta *meta1 = skb_meta(skb1); + __s32 order = meta->sequence - meta1->sequence; + if (order == 0) + goto drop; + if (order < 0) { + meta->timestamp = meta1->timestamp; + skb_insert(skb1, skb, &sk->sk_receive_queue); + skb = NULL; + break; + } + } + if (skb) { + meta->timestamp = now; + skb_queue_tail(&sk->sk_receive_queue, skb); + } + + /* Remove packets from receive queue as long as + * 1. the receive buffer is full, + * 2. they are queued longer than one second, or + * 3. there are no missing packets before them. */ + skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { + meta = skb_meta(skb); + if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && + now - meta->timestamp < HZ && + meta->sequence != opt->recv_sequence) + break; + skb_unlink(skb, &sk->sk_receive_queue); + opt->recv_sequence = meta->sequence + 1; + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + } + return NET_RX_SUCCESS; + } + + /* Flush receive queue if sequencing is disabled. */ + skb_queue_purge(&sk->sk_receive_queue); + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + return NET_RX_SUCCESS; +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static void pppopns_recv(struct sock *sk_raw) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) { + sock_hold(sk_raw); + sk_receive_skb(sk_raw, skb, 0); + } +} + +static struct sk_buff_head delivery_queue; + +static void pppopns_xmit_core(struct work_struct *delivery_work) +{ + mm_segment_t old_fs = get_fs(); + struct sk_buff *skb; + + set_fs(KERNEL_DS); + while ((skb = skb_dequeue(&delivery_queue))) { + struct sock *sk_raw = skb->sk; + struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len}; + struct msghdr msg = { + .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT, + }; + + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, + skb->len); + sk_raw->sk_prot->sendmsg(sk_raw, &msg, skb->len); + kfree_skb(skb); + } + set_fs(old_fs); +} + +static DECLARE_WORK(delivery_work, pppopns_xmit_core); + +static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb) +{ + struct sock *sk_raw = (struct sock *)chan->private; + struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns; + struct header *hdr; + __u16 length; + + /* Install PPP address and control. */ + skb_push(skb, 2); + skb->data[0] = PPP_ADDR; + skb->data[1] = PPP_CTRL; + length = skb->len; + + /* Install PPTP GRE header. */ + hdr = (struct header *)skb_push(skb, 12); + hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT; + hdr->type = PPTP_GRE_TYPE; + hdr->length = htons(length); + hdr->call = opt->remote; + hdr->sequence = htonl(opt->xmit_sequence); + opt->xmit_sequence++; + + /* Now send the packet via the delivery queue. */ + skb_set_owner_w(skb, sk_raw); + skb_queue_tail(&delivery_queue, skb); + schedule_work(&delivery_work); + return 1; +} + +/******************************************************************************/ + +static struct ppp_channel_ops pppopns_channel_ops = { + .start_xmit = pppopns_xmit, +}; + +static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr, + int addrlen, int flags) +{ + struct sock *sk = sock->sk; + struct pppox_sock *po = pppox_sk(sk); + struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr; + struct sockaddr_storage ss; + struct socket *sock_tcp = NULL; + struct socket *sock_raw = NULL; + struct sock *sk_tcp; + struct sock *sk_raw; + int error; + + if (addrlen != sizeof(struct sockaddr_pppopns)) + return -EINVAL; + + lock_sock(sk); + error = -EALREADY; + if (sk->sk_state != PPPOX_NONE) + goto out; + + sock_tcp = sockfd_lookup(addr->tcp_socket, &error); + if (!sock_tcp) + goto out; + sk_tcp = sock_tcp->sk; + error = -EPROTONOSUPPORT; + if (sk_tcp->sk_protocol != IPPROTO_TCP) + goto out; + addrlen = sizeof(struct sockaddr_storage); + error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen); + if (error) + goto out; + if (!sk_tcp->sk_bound_dev_if) { + struct dst_entry *dst = sk_dst_get(sk_tcp); + error = -ENODEV; + if (!dst) + goto out; + sk_tcp->sk_bound_dev_if = dst->dev->ifindex; + dst_release(dst); + } + + error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw); + if (error) + goto out; + sk_raw = sock_raw->sk; + sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if; + error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0); + if (error) + goto out; + + po->chan.hdrlen = 14; + po->chan.private = sk_raw; + po->chan.ops = &pppopns_channel_ops; + po->chan.mtu = PPP_MRU - 80; + po->proto.pns.local = addr->local; + po->proto.pns.remote = addr->remote; + po->proto.pns.data_ready = sk_raw->sk_data_ready; + po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv; + + error = ppp_register_channel(&po->chan); + if (error) + goto out; + + sk->sk_state = PPPOX_CONNECTED; + lock_sock(sk_raw); + sk_raw->sk_data_ready = pppopns_recv; + sk_raw->sk_backlog_rcv = pppopns_recv_core; + sk_raw->sk_user_data = sk; + release_sock(sk_raw); +out: + if (sock_tcp) + sockfd_put(sock_tcp); + if (error && sock_raw) + sock_release(sock_raw); + release_sock(sk); + return error; +} + +static int pppopns_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + release_sock(sk); + return -EBADF; + } + + if (sk->sk_state != PPPOX_NONE) { + struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private; + lock_sock(sk_raw); + skb_queue_purge(&sk->sk_receive_queue); + pppox_unbind_sock(sk); + sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready; + sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv; + sk_raw->sk_user_data = NULL; + release_sock(sk_raw); + sock_release(sk_raw->sk_socket); + } + + sock_orphan(sk); + sock->sk = NULL; + release_sock(sk); + sock_put(sk); + return 0; +} + +/******************************************************************************/ + +static struct proto pppopns_proto = { + .name = "PPPOPNS", + .owner = THIS_MODULE, + .obj_size = sizeof(struct pppox_sock), +}; + +static struct proto_ops pppopns_proto_ops = { + .family = PF_PPPOX, + .owner = THIS_MODULE, + .release = pppopns_release, + .bind = sock_no_bind, + .connect = pppopns_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = sock_no_getname, + .poll = sock_no_poll, + .ioctl = pppox_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .mmap = sock_no_mmap, +}; + +static int pppopns_create(struct net *net, struct socket *sock, int kern) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto, kern); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + sock->state = SS_UNCONNECTED; + sock->ops = &pppopns_proto_ops; + sk->sk_protocol = PX_PROTO_OPNS; + sk->sk_state = PPPOX_NONE; + return 0; +} + +/******************************************************************************/ + +static struct pppox_proto pppopns_pppox_proto = { + .create = pppopns_create, + .owner = THIS_MODULE, +}; + +static int __init pppopns_init(void) +{ + int error; + + error = proto_register(&pppopns_proto, 0); + if (error) + return error; + + error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto); + if (error) + proto_unregister(&pppopns_proto); + else + skb_queue_head_init(&delivery_queue); + return error; +} + +static void __exit pppopns_exit(void) +{ + unregister_pppox_proto(PX_PROTO_OPNS); + proto_unregister(&pppopns_proto); +} + +module_init(pppopns_init); +module_exit(pppopns_exit); + +MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)"); +MODULE_AUTHOR("Chia-chi Yeh "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 6dde9a0cfe76..9b70a3af678e 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -464,7 +464,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu = dst_mtu(&rt->dst); if (!po->chan.mtu) po->chan.mtu = PPP_MRU; - ip_rt_put(rt); po->chan.mtu -= PPTP_HEADER_OVERHEAD; po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index 5782733959f0..f4e93f5fc204 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) if(x < 0 || x > comp->rslot_limit) goto bad; + /* Check if the cstate is initialized */ + if (!comp->rstate[x].initialized) + goto bad; + comp->flags &=~ SLF_TOSS; comp->recv_current = x; } else { @@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) if (cs->cs_tcp.doff > 5) memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4); cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2; + cs->initialized = true; /* Put headers back on packet * Neither header checksum is recalculated */ diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 6c0c84c33e1f..bfd4ded0a53f 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q, DEFINE_WAIT(wait); ssize_t ret = 0; - if (!iov_iter_count(to)) + if (!iov_iter_count(to)) { + if (skb) + kfree_skb(skb); return 0; + } if (skb) goto put; @@ -1077,7 +1080,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN)) + TUN_F_TSO_ECN | TUN_F_UFO)) return -EINVAL; rtnl_lock(); @@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); + struct sk_buff *skb = m->msg_control; int ret; - if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) + if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { + if (skb) + kfree_skb(skb); return -EINVAL; - ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, - m->msg_control); + } + ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index ae53e899259f..2a366554c503 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1197,11 +1197,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_dev_open; } - netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); - err = vlan_vids_add_by_dev(port_dev, dev); if (err) { netdev_err(dev, "Failed to add vlan ids to device %s\n", @@ -1241,6 +1236,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_option_port_add; } + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); @@ -1265,8 +1265,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) vlan_vids_del_by_dev(port_dev, dev); err_vids_add: - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); dev_close(port_dev); err_dev_open: @@ -2394,7 +2392,7 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } @@ -2680,7 +2678,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 42bb820a56c9..0fd45ea34312 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -534,6 +534,14 @@ static void tun_queue_purge(struct tun_file *tfile) skb_queue_purge(&tfile->sk.sk_error_queue); } +static void tun_cleanup_tx_array(struct tun_file *tfile) +{ + if (tfile->tx_array.ring.queue) { + skb_array_cleanup(&tfile->tx_array); + memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); + } +} + static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; @@ -575,8 +583,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } - if (tun) - skb_array_cleanup(&tfile->tx_array); + tun_cleanup_tx_array(tfile); sock_put(&tfile->sk); } } @@ -616,11 +623,13 @@ static void tun_detach_all(struct net_device *dev) /* Drop read queue */ tun_queue_purge(tfile); sock_put(&tfile->sk); + tun_cleanup_tx_array(tfile); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_enable_queue(tfile); tun_queue_purge(tfile); sock_put(&tfile->sk); + tun_cleanup_tx_array(tfile); } BUG_ON(tun->numdisabled != 0); @@ -1306,6 +1315,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, else *skb_xdp = 0; + preempt_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog && !*skb_xdp) { @@ -1324,8 +1334,11 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, get_page(alloc_frag->page); alloc_frag->offset += buflen; err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); + xdp_do_flush_map(); if (err) goto err_redirect; + rcu_read_unlock(); + preempt_enable(); return NULL; case XDP_TX: xdp_xmit = true; @@ -1347,6 +1360,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb = build_skb(buf, buflen); if (!skb) { rcu_read_unlock(); + preempt_enable(); return ERR_PTR(-ENOMEM); } @@ -1358,11 +1372,13 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, if (xdp_xmit) { skb->dev = tun->dev; generic_xdp_tx(skb, xdp_prog); - rcu_read_lock(); + rcu_read_unlock(); + preempt_enable(); return NULL; } rcu_read_unlock(); + preempt_enable(); return skb; @@ -1370,6 +1386,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, put_page(alloc_frag->page); err_xdp: rcu_read_unlock(); + preempt_enable(); this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } @@ -1734,8 +1751,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, tun_debug(KERN_INFO, tun, "tun_do_read\n"); - if (!iov_iter_count(to)) + if (!iov_iter_count(to)) { + if (skb) + kfree_skb(skb); return 0; + } if (!skb) { /* Read frames from ring */ @@ -1851,22 +1871,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = __tun_get(tfile); + struct sk_buff *skb = m->msg_control; int ret; - if (!tun) - return -EBADFD; + if (!tun) { + ret = -EBADFD; + goto out_free_skb; + } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { ret = -EINVAL; - goto out; + goto out_put_tun; } if (flags & MSG_ERRQUEUE) { ret = sock_recv_errqueue(sock->sk, m, total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } - ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, - m->msg_control); + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@ -1874,6 +1896,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, out: tun_put(tun); return ret; + +out_put_tun: + tun_put(tun); +out_free_skb: + if (skb) + kfree_skb(skb); + return ret; } static int tun_peek_len(struct socket *sock) @@ -2144,6 +2173,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } + + arg &= ~TUN_F_UFO; } /* This gives the user a way to test for new features in future by @@ -2252,6 +2283,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, int le; int ret; +#ifdef CONFIG_ANDROID_PARANOID_NETWORK + if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) { + return -EPERM; + } +#endif + if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; @@ -2609,6 +2646,8 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); + memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); + return 0; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 05dca3e5c93d..178b956501a7 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -895,6 +895,12 @@ static const struct usb_device_id products[] = { USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, +}, { + /* Cinterion AHS3 modem by GEMALTO */ + USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 47cab1bde065..9e1b74590682 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -771,7 +771,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ int err; u8 iface_no; struct usb_cdc_parsed_header hdr; - u16 curr_ntb_format; + __le16 curr_ntb_format; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -889,7 +889,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ goto error2; } - if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) { + if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) { dev_info(&intf->dev, "resetting NTB format to 16-bit"); err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS | USB_DIR_OUT diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 0161f77641fa..1fb464837b3e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -928,7 +928,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, offset += 0x100; else ret = -EINVAL; - ret = lan78xx_read_raw_otp(dev, offset, length, data); + if (!ret) + ret = lan78xx_read_raw_otp(dev, offset, length, data); } return ret; @@ -2396,6 +2397,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; dev->rx_qlen = 4; + dev->tx_qlen = 4; } ret = lan78xx_write_reg(dev, BURST_CAP, buf); @@ -2862,8 +2864,7 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) if (ret < 0) { netdev_warn(dev->net, "lan78xx_setup_irq_domain() failed : %d", ret); - kfree(pdata); - return ret; + goto out1; } dev->net->hard_header_len += TX_OVERHEAD; @@ -2871,14 +2872,32 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) /* Init all registers */ ret = lan78xx_reset(dev); + if (ret) { + netdev_warn(dev->net, "Registers INIT FAILED...."); + goto out2; + } ret = lan78xx_mdio_init(dev); + if (ret) { + netdev_warn(dev->net, "MDIO INIT FAILED....."); + goto out2; + } dev->net->flags |= IFF_MULTICAST; pdata->wol = WAKE_MAGIC; return ret; + +out2: + lan78xx_remove_irq_domain(dev); + +out1: + netdev_warn(dev->net, "Bind routine FAILED"); + cancel_work_sync(&pdata->set_multicast); + cancel_work_sync(&pdata->set_vlan); + kfree(pdata); + return ret; } static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) @@ -2890,6 +2909,8 @@ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) lan78xx_remove_mdio(dev); if (pdata) { + cancel_work_sync(&pdata->set_multicast); + cancel_work_sync(&pdata->set_vlan); netif_dbg(dev, ifdown, dev->net, "free pdata"); kfree(pdata); pdata = NULL; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8d4a6f7cba61..5d3d31f5933b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net) net->hard_header_len = 0; net->addr_len = 0; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + set_bit(EVENT_NO_IP_ALIGN, &dev->flags); netdev_dbg(net, "mode: raw IP\n"); } else if (!net->header_ops) { /* don't bother if already set */ ether_setup(net); + clear_bit(EVENT_NO_IP_ALIGN, &dev->flags); netdev_dbg(net, "mode: Ethernet\n"); } @@ -824,7 +826,7 @@ static int qmi_wwan_resume(struct usb_interface *intf) static const struct driver_info qmi_wwan_info = { .description = "WWAN/QMI device", - .flags = FLAG_WWAN, + .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = qmi_wwan_bind, .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, @@ -833,7 +835,7 @@ static const struct driver_info qmi_wwan_info = { static const struct driver_info qmi_wwan_info_quirk_dtr = { .description = "WWAN/QMI device", - .flags = FLAG_WWAN, + .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = qmi_wwan_bind, .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, @@ -1098,6 +1100,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, + {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ @@ -1202,12 +1205,14 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ @@ -1239,6 +1244,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ + {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 6510e5cc1817..42baad125a7d 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -484,7 +484,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) return -ENOLINK; } - skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); + if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) + skb = __netdev_alloc_skb(dev->net, size, flags); + else + skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); if (!skb) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f5438d0978ca..a69ad39ee57e 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -410,6 +410,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; + peer->gso_max_size = dev->gso_max_size; + peer->gso_max_segs = dev->gso_max_segs; + err = register_netdevice(peer); put_net(net); net = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 511f8339fa96..b0a038e6fda0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -260,9 +260,12 @@ static void virtqueue_napi_complete(struct napi_struct *napi, int opaque; opaque = virtqueue_enable_cb_prepare(vq); - if (napi_complete_done(napi, processed) && - unlikely(virtqueue_poll(vq, opaque))) - virtqueue_napi_schedule(napi, vq); + if (napi_complete_done(napi, processed)) { + if (unlikely(virtqueue_poll(vq, opaque))) + virtqueue_napi_schedule(napi, vq); + } else { + virtqueue_disable_cb(vq); + } } static void skb_xmit_done(struct virtqueue *vq) @@ -714,7 +717,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int num_skb_frags; buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); - if (unlikely(!ctx)) { + if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", dev->name, num_buf, virtio16_to_cpu(vi->vdev, @@ -1995,8 +1998,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } /* Make sure NAPI is not using any XDP TX queues for RX. */ - for (i = 0; i < vi->max_queue_pairs; i++) - napi_disable(&vi->rq[i].napi); + if (netif_running(dev)) + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); err = _virtnet_set_queues(vi, curr_qp + xdp_qp); @@ -2015,7 +2019,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } if (old_prog) bpf_prog_put(old_prog); - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + if (netif_running(dev)) + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); } return 0; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d1c7029ded7c..cf95290b160c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, rq->rx_ring[i].basePA); rq->rx_ring[i].base = NULL; } - rq->buf_info[i] = NULL; } if (rq->data_ring.base) { @@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, (rq->rx_ring[0].size + rq->rx_ring[1].size); dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], rq->buf_info_pa); + rq->buf_info[0] = rq->buf_info[1] = NULL; } } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7dc3bcac3506..5c6a8ef54aec 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -579,12 +579,13 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); ret = neigh_output(neigh, skb); + rcu_read_unlock_bh(); + return ret; } rcu_read_unlock_bh(); err: - if (unlikely(ret < 0)) - vrf_tx_error(skb->dev, skb); + vrf_tx_error(skb->dev, skb); return ret; } @@ -674,8 +675,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb) { - /* don't divert multicast */ - if (ipv4_is_multicast(ip_hdr(skb)->daddr)) + /* don't divert multicast or local broadcast */ + if (ipv4_is_multicast(ip_hdr(skb)->daddr) || + ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; if (qdisc_tx_is_default(vrf_dev)) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d7c49cf1d5e9..bb44f0c6891f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1623,26 +1623,19 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct nd_msg *msg; - const struct ipv6hdr *iphdr; const struct in6_addr *daddr; - struct neighbour *n; + const struct ipv6hdr *iphdr; struct inet6_dev *in6_dev; + struct neighbour *n; + struct nd_msg *msg; in6_dev = __in6_dev_get(dev); if (!in6_dev) goto out; - if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) - goto out; - iphdr = ipv6_hdr(skb); daddr = &iphdr->daddr; - msg = (struct nd_msg *)(iphdr + 1); - if (msg->icmph.icmp6_code != 0 || - msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) - goto out; if (ipv6_addr_loopback(daddr) || ipv6_addr_is_multicast(&msg->target)) @@ -2162,6 +2155,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ndst = &rt->dst; + if (skb_dst(skb)) { + int mtu = dst_mtu(ndst) - VXLAN_HEADROOM; + + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, + skb, mtu); + } + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), @@ -2197,6 +2197,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto out_unlock; } + if (skb_dst(skb)) { + int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM; + + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, + skb, mtu); + } + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); @@ -2240,11 +2247,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_rdst *rdst, *fdst = NULL; const struct ip_tunnel_info *info; - struct ethhdr *eth; bool did_rsc = false; - struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; + struct ethhdr *eth; __be32 vni = 0; info = skb_tunnel_info(skb); @@ -2269,12 +2276,14 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) if (ntohs(eth->h_proto) == ETH_P_ARP) return arp_reduce(dev, skb, vni); #if IS_ENABLED(CONFIG_IPV6) - else if (ntohs(eth->h_proto) == ETH_P_IPV6) { - struct ipv6hdr *hdr, _hdr; - if ((hdr = skb_header_pointer(skb, - skb_network_offset(skb), - sizeof(_hdr), &_hdr)) && - hdr->nexthdr == IPPROTO_ICMPV6) + else if (ntohs(eth->h_proto) == ETH_P_IPV6 && + pskb_may_pull(skb, sizeof(struct ipv6hdr) + + sizeof(struct nd_msg)) && + ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { + struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1); + + if (m->icmph.icmp6_code == 0 && + m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) return neigh_reduce(dev, skb, vni); } #endif @@ -3110,6 +3119,11 @@ static void vxlan_config_apply(struct net_device *dev, max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); + if (max_mtu < ETH_MIN_MTU) + max_mtu = ETH_MIN_MTU; + + if (!changelink && !conf->mtu) + dev->mtu = max_mtu; } if (dev->mtu > max_mtu) diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 0d2e00ece804..f3c1d5245978 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg) ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); proto->restart_counter--; - } else + } else if (netif_carrier_ok(proto->dev)) + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, + 0, NULL); + else ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 0, NULL); break; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 166920ae23f8..a19fb7b6dc07 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -114,4 +114,6 @@ config USB_NET_RNDIS_WLAN If you choose to build a module, it'll be called rndis_wlan. +source "drivers/net/wireless/bcmdhd/Kconfig" + endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 7fc96306712a..c6c15878aecc 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -27,3 +27,5 @@ obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o + +obj-$(CONFIG_BCMDHD) += bcmdhd/ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 5683f1a5330e..252c2206cbb5 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2553,7 +2553,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, } break; case WMI_VDEV_TYPE_STA: - if (vif->bss_conf.qos) + if (sta->wme) arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; break; case WMI_VDEV_TYPE_IBSS: @@ -6183,6 +6183,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, "mac vdev %d peer delete %pM sta %pK (sta gone)\n", arvif->vdev_id, sta->addr, sta); + if (sta->tdls) { + ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, + sta, + WMI_TDLS_PEER_STATE_TEARDOWN); + if (ret) + ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", + sta->addr, + WMI_TDLS_PEER_STATE_TEARDOWN, ret); + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 195dafb98131..d790ea20b95d 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2577,9 +2577,13 @@ void ath10k_pci_hif_power_down(struct ath10k *ar) */ } -#ifdef CONFIG_PM - static int ath10k_pci_hif_suspend(struct ath10k *ar) +{ + /* Nothing to do; the important stuff is in the driver suspend. */ + return 0; +} + +static int ath10k_pci_suspend(struct ath10k *ar) { /* The grace timer can still be counting down and ar->ps_awake be true. * It is known that the device may be asleep after resuming regardless @@ -2592,6 +2596,12 @@ static int ath10k_pci_hif_suspend(struct ath10k *ar) } static int ath10k_pci_hif_resume(struct ath10k *ar) +{ + /* Nothing to do; the important stuff is in the driver resume. */ + return 0; +} + +static int ath10k_pci_resume(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; @@ -2615,7 +2625,6 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) return ret; } -#endif static bool ath10k_pci_validate_cal(void *data, size_t size) { @@ -2770,10 +2779,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .power_down = ath10k_pci_hif_power_down, .read32 = ath10k_pci_read32, .write32 = ath10k_pci_write32, -#ifdef CONFIG_PM .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, -#endif .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, }; @@ -3401,11 +3408,7 @@ static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) struct ath10k *ar = dev_get_drvdata(dev); int ret; - if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->running_fw->fw_file.fw_features)) - return 0; - - ret = ath10k_hif_suspend(ar); + ret = ath10k_pci_suspend(ar); if (ret) ath10k_warn(ar, "failed to suspend hif: %d\n", ret); @@ -3417,11 +3420,7 @@ static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) struct ath10k *ar = dev_get_drvdata(dev); int ret; - if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->running_fw->fw_file.fw_features)) - return 0; - - ret = ath10k_hif_resume(ar); + ret = ath10k_pci_resume(ar); if (ret) ath10k_warn(ar, "failed to resume hif: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 7a3606dde227..bab876cf25fe 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -5235,7 +5235,8 @@ enum wmi_10_4_vdev_param { #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) #define WMI_TXBF_STS_CAP_OFFSET_LSB 4 -#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70 +#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7) #define WMI_BF_SOUND_DIM_OFFSET_LSB 8 #define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index f0439f2d566b..173891b11b2d 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -1112,7 +1112,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, if (!avp->assoc) return false; - skb = ieee80211_nullfunc_get(sc->hw, vif); + skb = ieee80211_nullfunc_get(sc->hw, vif, false); if (!skb) return false; diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 49ed1afb913c..fe3a8263b224 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -179,6 +179,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, ssize_t len; int r; + if (count < 1) + return -EINVAL; + if (sc->cur_chan->nvifs > 1) return -EOPNOTSUPP; @@ -186,6 +189,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, if (copy_from_user(buf, user_buf, len)) return -EFAULT; + buf[len] = '\0'; + if (strtobool(buf, &start)) return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 396bf05c6bf6..d8b041f48ca8 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2892,6 +2892,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) struct ath_txq *txq; int tidno; + rcu_read_lock(); + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { tid = ath_node_to_tid(an, tidno); txq = tid->txq; @@ -2909,6 +2911,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) if (!an->sta) break; /* just one multicast ath_atx_tid */ } + + rcu_read_unlock(); } #ifdef CONFIG_ATH9K_TX99 diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index b83f01d6e3dd..af37c19dbfd7 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) } } + if (changed & IEEE80211_CONF_CHANGE_PS) { + list_for_each_entry(tmp, &wcn->vif_list, list) { + vif = wcn36xx_priv_to_vif(tmp); + if (hw->conf.flags & IEEE80211_CONF_PS) { + if (vif->bss_conf.ps) /* ps allowed ? */ + wcn36xx_pmc_enter_bmps_state(wcn, vif); + } else { + wcn36xx_pmc_exit_bmps_state(wcn, vif); + } + } + } + mutex_unlock(&wcn->conf_mutex); return 0; @@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, vif_priv->dtim_period = bss_conf->dtim_period; } - if (changed & BSS_CHANGED_PS) { - wcn36xx_dbg(WCN36XX_DBG_MAC, - "mac bss PS set %d\n", - bss_conf->ps); - if (bss_conf->ps) { - wcn36xx_pmc_enter_bmps_state(wcn, vif); - } else { - wcn36xx_pmc_exit_bmps_state(wcn, vif); - } - } - if (changed & BSS_CHANGED_BSSID) { wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n", bss_conf->bssid); diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c index 589fe5f70971..1976b80c235f 100644 --- a/drivers/net/wireless/ath/wcn36xx/pmc.c +++ b/drivers/net/wireless/ath/wcn36xx/pmc.c @@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn, struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); if (WCN36XX_BMPS != vif_priv->pw_state) { - wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); - return -EINVAL; + /* Unbalanced call or last BMPS enter failed */ + wcn36xx_dbg(WCN36XX_DBG_PMC, + "Not in BMPS mode, no need to exit\n"); + return -EALREADY; } wcn36xx_smd_exit_bmps(wcn, vif); vif_priv->pw_state = WCN36XX_FULL_POWER; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index ffdd2fa401b1..d63d7c326801 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1380,8 +1380,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) }; int rc; u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; - struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); + struct wmi_set_appie_cmd *cmd; + if (len < ie_len) { + rc = -EINVAL; + goto out; + } + + cmd = kzalloc(len, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; goto out; diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig new file mode 100755 index 000000000000..8ecbfb248037 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/Kconfig @@ -0,0 +1,44 @@ +config BCMDHD + tristate "Broadcom FullMAC wireless cards support" + ---help--- + This module adds support for wireless adapters based on + Broadcom FullMAC chipset. + + If you choose to build a module, it'll be called dhd. Say M if + unsure. + +config BCMDHD_SDIO + bool "SDIO bus interface support" + depends on BCMDHD && MMC + +config BCMDHD_PCIE + bool "PCIe bus interface support" + depends on BCMDHD && PCI && !BCMDHD_SDIO + +config BCMDHD_FW_PATH + depends on BCMDHD + string "Firmware path" + default "/system/vendor/firmware/fw_bcmdhd.bin" + ---help--- + Path to the firmware file. + +config BCMDHD_NVRAM_PATH + depends on BCMDHD + string "NVRAM path" + default "/system/etc/wifi/bcmdhd.cal" + ---help--- + Path to the calibration file. + +config DHD_USE_STATIC_BUF + bool "Enable memory preallocation" + depends on BCMDHD + default n + ---help--- + Use memory preallocated in platform + +config DHD_USE_SCHED_SCAN + bool "Use CFG80211 sched scan" + depends on BCMDHD && CFG80211 + default n + ---help--- + Use CFG80211 sched scan diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile new file mode 100755 index 000000000000..02baabf6e503 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/Makefile @@ -0,0 +1,89 @@ +# bcmdhd +# +# +# +# +# + +DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \ + -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ + -DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DCUSTOMER_HW2 -DWLP2P \ + -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \ + -DKEEP_ALIVE -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT \ + -DEMBEDDED_PLATFORM -DPNO_SUPPORT \ + -DDHD_USE_IDLECOUNT -DSET_RANDOM_MAC_SOFTAP -DROAM_ENABLE -DVSDB \ + -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST \ + -DESCAN_RESULT_PATCH -DSUPPORT_PM2_ONLY -DWLTDLS \ + -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DRXFRAME_THREAD \ + -DMIRACAST_AMPDU_SIZE=8 -DROAM_ENABLE -DWL_IFACE_COMB_NUM_CHANNELS \ + -Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include + +DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \ + dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \ + dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \ + bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \ + hnd_pktq.o hnd_pktpool.o + +obj-$(CONFIG_BCMDHD) += bcmdhd.o +bcmdhd-objs += $(DHDOFILES) + +ifneq ($(CONFIG_CFG80211),) +bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o dhd_cfg80211.o wl_cfg_btcoex.o +DHDCFLAGS += -DWL_CFG80211 -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF +DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65 +DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15 +DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000 +DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7 +endif +ifneq ($(CONFIG_DHD_USE_SCHED_SCAN),) +DHDCFLAGS += -DWL_SCHED_SCAN +endif +EXTRA_CFLAGS = $(DHDCFLAGS) +ifeq ($(CONFIG_BCMDHD),m) +EXTRA_LDFLAGS += --strip-debug +else +DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD +endif + +DHDCFLAGS += -DCONFIG_DTS + +######################### +# Chip dependent feature +######################### +ifneq ($(CONFIG_BCM4339),) +DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8 -DCUSTOM_RXCHAIN=1 +DHDCFLAGS += -DUSE_DYNAMIC_F2_BLKSIZE -DDYNAMIC_F2_BLKSIZE_FOR_NONLEGACY=128 +DHDCFLAGS += -DBCMSDIOH_TXGLOM -DCUSTOM_TXGLOM=1 -DBCMSDIOH_TXGLOM_HIGHSPEED +DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=512 +DHDCFLAGS += -DDHDTCPACK_SUPPRESS +DHDCFLAGS += -DUSE_WL_TXBF +DHDCFLAGS += -DUSE_WL_FRAMEBURST +DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=32 +DHDCFLAGS += -DPROP_TXSTATUS_VSDB +DHDCFLAGS += -DCUSTOM_MAX_TXGLOM_SIZE=32 +DHDCFLAGS += -DREPEAT_READFRAME +DHDCFLAGS += -DROAM_AP_ENV_DETECTION +endif + +bcmdhd-$(CONFIG_BCMDHD_SDIO) += \ + bcmsdh.o \ + bcmsdh_linux.o \ + bcmsdh_sdmmc.o \ + bcmsdh_sdmmc_linux.o \ + dhd_sdio.o \ + dhd_cdc.o \ + dhd_wlfc.o +bcmdhd-$(CONFIG_BCMDHD_PCIE) += \ + dhd_pcie.o \ + dhd_pcie_linux.o \ + dhd_msgbuf.o \ + dhd_log.o \ + circularbuf.o \ + pcie_core.o \ + dhd_flowring.o +ccflags-$(CONFIG_BCMDHD_SDIO) += \ + -DSDTEST -DBDC -DDHD_BCMEVENTS -DPROP_TXSTATUS -DOOB_INTR_ONLY \ + -DHW_OOB -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DSDIO_CRC_ERROR_FIX \ + -DCUSTOM_SDIO_F2_BLKSIZE=128 -DUSE_SDIOFIFO_IOVAR +ccflags-$(CONFIG_BCMDHD_PCIE) += \ + -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c new file mode 100644 index 000000000000..70b2b6301ecb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/aiutils.c @@ -0,0 +1,1284 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: aiutils.c 607900 2015-12-22 13:38:53Z $ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" + +#define BCM47162_DMP() (0) +#define BCM5357_DMP() (0) +#define BCM53573_DMP() (0) +#define BCM4707_DMP() (0) +#define PMU_DMP() (0) +#define GCI_DMP() (0) +#define remap_coreid(sih, coreid) (coreid) +#define remap_corerev(sih, corerev) (corerev) + +/* EROM parsing */ + +static uint32 +get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) +{ + uint32 ent; + uint inv = 0, nom = 0; + uint32 size = 0; + + while (TRUE) { + ent = R_REG(si_osh(sih), *eromptr); + (*eromptr)++; + + if (mask == 0) + break; + + if ((ent & ER_VALID) == 0) { + inv++; + continue; + } + + if (ent == (ER_END | ER_VALID)) + break; + + if ((ent & mask) == match) + break; + + /* escape condition related EROM size if it has invalid values */ + size += sizeof(*eromptr); + if (size >= ER_SZ_MAX) { + SI_ERROR(("Failed to find end of EROM marker\n")); + break; + } + + nom++; + } + + SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent)); + if (inv + nom) { + SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom)); + } + return ent; +} + +static uint32 +get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh, + uint32 *sizel, uint32 *sizeh) +{ + uint32 asd, sz, szd; + + asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); + if (((asd & ER_TAG1) != ER_ADD) || + (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || + ((asd & AD_ST_MASK) != st)) { + /* This is not what we want, "push" it back */ + (*eromptr)--; + return 0; + } + *addrl = asd & AD_ADDR_MASK; + if (asd & AD_AG32) + *addrh = get_erom_ent(sih, eromptr, 0, 0); + else + *addrh = 0; + *sizeh = 0; + sz = asd & AD_SZ_MASK; + if (sz == AD_SZ_SZD) { + szd = get_erom_ent(sih, eromptr, 0, 0); + *sizel = szd & SD_SZ_MASK; + if (szd & SD_SG32) + *sizeh = get_erom_ent(sih, eromptr, 0, 0); + } else + *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); + + SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", + sp, ad, st, *sizeh, *sizel, *addrh, *addrl)); + + return asd; +} + +static void +ai_hwfixup(si_info_t *sii) +{ +} + + +/* parse the enumeration rom to identify all cores */ +void +ai_scan(si_t *sih, void *regs, uint devid) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc = (chipcregs_t *)regs; + uint32 erombase, *eromptr, *eromlim; + + erombase = R_REG(sii->osh, &cc->eromptr); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + break; + + case PCI_BUS: + /* Set wrappers address */ + sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); + + /* Now point the window at the erom */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase); + eromptr = regs; + break; + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + eromptr = (uint32 *)(uintptr)erombase; + break; +#endif /* BCMSDIO */ + + case PCMCIA_BUS: + default: + SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype)); + ASSERT(0); + return; + } + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + + SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", + regs, erombase, eromptr, eromlim)); + while (eromptr < eromlim) { + uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; + uint32 mpd, asd, addrl, addrh, sizel, sizeh; + uint i, j, idx; + bool br; + + br = FALSE; + + /* Grok a component */ + cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); + if (cia == (ER_END | ER_VALID)) { + SI_VMSG(("Found END of erom after %d cores\n", sii->numcores)); + ai_hwfixup(sii); + return; + } + + cib = get_erom_ent(sih, &eromptr, 0, 0); + + if ((cib & ER_TAG) != ER_CI) { + SI_ERROR(("CIA not followed by CIB\n")); + goto error; + } + + cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; + mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; + crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; + nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; + nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + +#ifdef BCMDBG_SI + SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " + "nsw = %d, nmp = %d & nsp = %d\n", + mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp)); +#else + BCM_REFERENCE(crev); +#endif + + if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) + continue; + if ((nmw + nsw == 0)) { + /* A component which is not a core */ + if (cid == OOB_ROUTER_CORE_ID) { + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, + &addrl, &addrh, &sizel, &sizeh); + if (asd != 0) { + sii->oob_router = addrl; + } + } + if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID && + cid != PMU_CORE_ID && cid != GCI_CORE_ID) + continue; + } + + idx = sii->numcores; + + cores_info->cia[idx] = cia; + cores_info->cib[idx] = cib; + cores_info->coreid[idx] = remap_coreid(sih, cid); + + for (i = 0; i < nmp; i++) { + mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + if ((mpd & ER_TAG) != ER_MP) { + SI_ERROR(("Not enough MP entries for component 0x%x\n", cid)); + goto error; + } + SI_VMSG((" Master port %d, mp: %d id: %d\n", i, + (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT, + (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT)); + } + + /* First Slave Address Descriptor should be port 0: + * the main register space for the core + */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + do { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + if (asd != 0) + br = TRUE; + else { + if (br == TRUE) { + break; + } + else if ((addrh != 0) || (sizeh != 0) || + (sizel != SI_CORE_SIZE)) { + SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 =" + "0x%x\n", addrh, sizeh, sizel)); + SI_ERROR(("First Slave ASD for" + "core 0x%04x malformed " + "(0x%08x)\n", cid, asd)); + goto error; + } + } + } while (1); + } + cores_info->coresba[idx] = addrl; + cores_info->coresba_size[idx] = sizel; + /* Get any more ASDs in port 0 */ + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) { + cores_info->coresba2[idx] = addrl; + cores_info->coresba2_size[idx] = sizel; + } + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + + if (asd == 0) + break; + j++; + } while (1); + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + goto error; + } + } + + /* Now get master wrappers */ + for (i = 0; i < nmw; i++) { + asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for MW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Master wrapper %d is not 4KB\n", i)); + goto error; + } + if (i == 0) + cores_info->wrapba[idx] = addrl; + else if (i == 1) + cores_info->wrapba2[idx] = addrl; + } + + /* And finally slave wrappers */ + for (i = 0; i < nsw; i++) { + uint fwp = (nsp == 1) ? 0 : 1; + asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, + &sizel, &sizeh); + + /* cache APB bridge wrapper address for set/clear timeout */ + if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) { + ASSERT(sii->num_br < SI_MAXBR); + sii->br_wrapba[sii->num_br++] = addrl; + } + if (asd == 0) { + SI_ERROR(("Missing descriptor for SW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Slave wrapper %d is not 4KB\n", i)); + goto error; + } + if ((nmw == 0) && (i == 0)) + cores_info->wrapba[idx] = addrl; + else if ((nmw == 0) && (i == 1)) + cores_info->wrapba2[idx] = addrl; + } + + + /* Don't record bridges */ + if (br) + continue; + + /* Done with core */ + sii->numcores++; + } + + SI_ERROR(("Reached end of erom without finding END")); + +error: + sii->numcores = 0; + return; +} + +#define AI_SETCOREIDX_MAPSIZE(coreid) \ + (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE) + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static void * +_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 addr, wrap, wrap2; + void *regs; + + if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) + return (NULL); + + addr = cores_info->coresba[coreidx]; + wrap = cores_info->wrapba[coreidx]; + wrap2 = cores_info->wrapba2[coreidx]; + + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(addr, + AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx])); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + sii->curmap = regs = cores_info->regs[coreidx]; + if (!cores_info->wrappers[coreidx] && (wrap != 0)) { + cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers[coreidx])); + } + if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) { + cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers2[coreidx])); + } + if (use_wrap2) + sii->curwrap = cores_info->wrappers2[coreidx]; + else + sii->curwrap = cores_info->wrappers[coreidx]; + break; + + case PCI_BUS: + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); + regs = sii->curmap; + /* point bar0 2nd 4KB window to the primary wrapper */ + if (use_wrap2) + wrap = wrap2; + if (PCIE_GEN2(sii)) + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap); + else + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap); + break; + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + sii->curmap = regs = (void *)((uintptr)addr); + if (use_wrap2) + sii->curwrap = (void *)((uintptr)wrap2); + else + sii->curwrap = (void *)((uintptr)wrap); + break; +#endif /* BCMSDIO */ + + case PCMCIA_BUS: + default: + ASSERT(0); + regs = NULL; + break; + } + + sii->curmap = regs; + sii->curidx = coreidx; + + return regs; +} + +void * +ai_setcoreidx(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 0); +} + +void * +ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 1); +} + +void +ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc = NULL; + uint32 erombase, *eromptr, *eromlim; + uint i, j, cidx; + uint32 cia, cib, nmp, nsp; + uint32 asd, addrl, addrh, sizel, sizeh; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == CC_CORE_ID) { + cc = (chipcregs_t *)cores_info->regs[i]; + break; + } + } + if (cc == NULL) + goto error; + + erombase = R_REG(sii->osh, &cc->eromptr); + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + + cidx = sii->curidx; + cia = cores_info->cia[cidx]; + cib = cores_info->cib[cidx]; + + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + + /* scan for cores */ + while (eromptr < eromlim) { + if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) && + (get_erom_ent(sih, &eromptr, 0, 0) == cib)) { + break; + } + } + + /* skip master ports */ + for (i = 0; i < nmp; i++) + get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + + /* Skip ASDs in port 0 */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + } + + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) + break; + + if (!asidx--) { + *addr = addrl; + *size = sizel; + return; + } + j++; + } while (1); + + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + break; + } + } + +error: + *size = 0; + return; +} + +/* Return the number of address spaces in current core */ +int +ai_numaddrspaces(si_t *sih) +{ + return 2; +} + +/* Return the address of the nth address space in the current core */ +uint32 +ai_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint cidx; + + cidx = sii->curidx; + + if (asidx == 0) + return cores_info->coresba[cidx]; + else if (asidx == 1) + return cores_info->coresba2[cidx]; + else { + SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", + __FUNCTION__, asidx)); + return 0; + } +} + +/* Return the size of the nth address space in the current core */ +uint32 +ai_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint cidx; + + cidx = sii->curidx; + + if (asidx == 0) + return cores_info->coresba_size[cidx]; + else if (asidx == 1) + return cores_info->coresba2_size[cidx]; + else { + SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", + __FUNCTION__, asidx)); + return 0; + } +} + +uint +ai_flag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__)); + return sii->curidx; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__)); + return sii->curidx; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n", + __FUNCTION__)); + return sii->curidx; + } + if (BCM53573_DMP()) { + SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__)); + return sii->curidx; + } +#ifdef REROUTE_OOBINT + if (PMU_DMP()) { + SI_ERROR(("%s: Attempting to read PMU DMP registers\n", + __FUNCTION__)); + return PMU_OOB_BIT; + } +#else + if (PMU_DMP()) { + uint idx, flag; + idx = sii->curidx; + ai_setcoreidx(sih, SI_CC_IDX); + flag = ai_flag_alt(sih); + ai_setcoreidx(sih, idx); + return flag; + } +#endif /* REROUTE_OOBINT */ + + ai = sii->curwrap; + ASSERT(ai != NULL); + + return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f); +} + +uint +ai_flag_alt(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__)); + return sii->curidx; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__)); + return sii->curidx; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n", + __FUNCTION__)); + return sii->curidx; + } +#ifdef REROUTE_OOBINT + if (PMU_DMP()) { + SI_ERROR(("%s: Attempting to read PMU DMP registers\n", + __FUNCTION__)); + return PMU_OOB_BIT; + } +#endif /* REROUTE_OOBINT */ + + ai = sii->curwrap; + + return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK); +} + +void +ai_setint(si_t *sih, int siflag) +{ +} + +uint +ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + uint32 *map = (uint32 *) sii->curwrap; + + if (mask || val) { + uint32 w = R_REG(sii->osh, map+(offset/4)); + w &= ~mask; + w |= val; + W_REG(sii->osh, map+(offset/4), w); + } + + return (R_REG(sii->osh, map+(offset/4))); +} + +uint +ai_corevendor(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 cia; + + cia = cores_info->cia[sii->curidx]; + return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT); +} + +uint +ai_corerev(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 cib; + + + cib = cores_info->cib[sii->curidx]; + return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT); +} + +bool +ai_iscoreup(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + ai = sii->curwrap; + + return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) && + ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fiddling with interrupts or core switches is needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + + /* readback */ + w = R_REG(sii->osh, r); + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +uint32 * +ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + uint32 *r = NULL; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + ASSERT(sii->curidx == coreidx); + r = (uint32*) ((uchar*)sii->curmap + regoff); + } + + return (r); +} + +void +ai_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii = SI_INFO(sih); + volatile uint32 dummy; + uint32 status; + aidmp_t *ai; + + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* if core is already in reset, just return */ + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) + return; + + /* ensure there are no pending backplane operations */ + SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + /* if pending backplane ops still, try waiting longer */ + if (status != 0) { + /* 300usecs was sufficient to allow backplane ops to clear for big hammer */ + /* during driver load we may need more time */ + SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000); + /* if still pending ops, continue on and try disable anyway */ + /* this is in big hammer path, so don't call wl_reinit in this case... */ + } + + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + dummy = R_REG(sii->osh, &ai->resetctrl); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + W_REG(sii->osh, &ai->ioctrl, bits); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); + OSL_DELAY(10); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +static void +_ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + volatile uint32 dummy; + uint loop_counter = 10; +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: bits: 0x%x, resetbits: 0x%x\n", __FUNCTION__, bits, resetbits); +#endif + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: resetstatus: %p dummy: %x\n", __FUNCTION__, &ai->resetstatus, dummy); +#endif + + + /* put core into reset state */ +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: resetctrl: %p\n", __FUNCTION__, &ai->resetctrl); +#endif + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + OSL_DELAY(10); + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + + W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: ioctrl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy); +#endif + BCM_REFERENCE(dummy); + + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + + while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) { + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + + /* take core out of reset */ + W_REG(sii->osh, &ai->resetctrl, 0); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: loop_counter: %d resetstatus: %p resetctrl: %p\n", + __FUNCTION__, loop_counter, &ai->resetstatus, &ai->resetctrl); +#endif + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + } + + + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: ioctl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy); +#endif + BCM_REFERENCE(dummy); + OSL_DELAY(1); +} + +void +ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx = sii->curidx; + + if (cores_info->wrapba2[idx] != 0) { + ai_setcoreidx_2ndwrap(sih, idx); + _ai_core_reset(sih, bits, resetbits); + ai_setcoreidx(sih, idx); + } + + _ai_core_reset(sih, bits, resetbits); +} + +void +ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0", + __FUNCTION__)); + return; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n", + __FUNCTION__)); + return; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return; + } + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return; + } + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } +} + +uint32 +ai_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0", + __FUNCTION__)); + return 0; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n", + __FUNCTION__)); + return 0; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return 0; + } + + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return 0; + } + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } + + return R_REG(sii->osh, &ai->ioctrl); +} + +uint32 +ai_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", + __FUNCTION__)); + return 0; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n", + __FUNCTION__)); + return 0; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return 0; + } + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return 0; + } + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val); + W_REG(sii->osh, &ai->iostatus, w); + } + + return R_REG(sii->osh, &ai->iostatus); +} + +#if defined(BCMDBG_PHYDUMP) +/* print interesting aidmp registers */ +void +ai_dumpregs(si_t *sih, struct bcmstrbuf *b) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + osl_t *osh; + aidmp_t *ai; + uint i; + + osh = sii->osh; + + for (i = 0; i < sii->numcores; i++) { + si_setcoreidx(&sii->pub, i); + ai = sii->curwrap; + + bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]); + if (BCM47162_DMP()) { + bcm_bprintf(b, "Skipping mips74k in 47162a0\n"); + continue; + } + if (BCM5357_DMP()) { + bcm_bprintf(b, "Skipping usb20h in 5357\n"); + continue; + } + if (BCM4707_DMP()) { + bcm_bprintf(b, "Skipping chipcommonb in 4707\n"); + continue; + } + + if (PMU_DMP()) { + bcm_bprintf(b, "Skipping pmu core\n"); + continue; + } + + bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x" + "ioctrlwidth 0x%x iostatuswidth 0x%x\n" + "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n" + "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x" + "errlogaddrlo 0x%x errlogaddrhi 0x%x\n" + "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n" + "intstatus 0x%x config 0x%x itcr 0x%x\n", + R_REG(osh, &ai->ioctrlset), + R_REG(osh, &ai->ioctrlclear), + R_REG(osh, &ai->ioctrl), + R_REG(osh, &ai->iostatus), + R_REG(osh, &ai->ioctrlwidth), + R_REG(osh, &ai->iostatuswidth), + R_REG(osh, &ai->resetctrl), + R_REG(osh, &ai->resetstatus), + R_REG(osh, &ai->resetreadid), + R_REG(osh, &ai->resetwriteid), + R_REG(osh, &ai->errlogctrl), + R_REG(osh, &ai->errlogdone), + R_REG(osh, &ai->errlogstatus), + R_REG(osh, &ai->errlogaddrlo), + R_REG(osh, &ai->errlogaddrhi), + R_REG(osh, &ai->errlogid), + R_REG(osh, &ai->errloguser), + R_REG(osh, &ai->errlogflags), + R_REG(osh, &ai->intstatus), + R_REG(osh, &ai->config), + R_REG(osh, &ai->itcr)); + } +} +#endif + + +void +ai_enable_backplane_timeouts(si_t *sih) +{ +#ifdef AXI_TIMEOUTS + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + int i; + + for (i = 0; i < sii->num_br; ++i) { + ai = (aidmp_t *) sii->br_wrapba[i]; + W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) | + ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK)); + } +#endif /* AXI_TIMEOUTS */ +} + +void +ai_clear_backplane_to(si_t *sih) +{ +#ifdef AXI_TIMEOUTS + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + int i; + uint32 errlogstatus; + + for (i = 0; i < sii->num_br; ++i) { + ai = (aidmp_t *) sii->br_wrapba[i]; + /* check for backplane timeout & clear backplane hang */ + errlogstatus = R_REG(sii->osh, &ai->errlogstatus); + + if ((errlogstatus & AIELS_TIMEOUT_MASK) != 0) { + /* set ErrDone to clear the condition */ + W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); + + /* SPINWAIT on errlogstatus timeout status bits */ + while (R_REG(sii->osh, &ai->errlogstatus) & AIELS_TIMEOUT_MASK) + ; + + /* only reset APB Bridge on timeout (not slave error, or dec error) */ + switch (errlogstatus & AIELS_TIMEOUT_MASK) { + case 0x1: + printf("AXI slave error"); + break; + case 0x2: + /* reset APB Bridge */ + OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + /* clear Reset bit */ + AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET)); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + printf("AXI timeout"); + break; + case 0x3: + printf("AXI decode error"); + break; + default: + ; /* should be impossible */ + } + printf("; APB Bridge %d\n", i); + printf("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x", + R_REG(sii->osh, &ai->errlogaddrlo), + R_REG(sii->osh, &ai->errlogaddrhi), + R_REG(sii->osh, &ai->errlogid), + R_REG(sii->osh, &ai->errlogflags)); + printf(", status 0x%08x\n", errlogstatus); + } + } +#endif /* AXI_TIMEOUTS */ +} diff --git a/drivers/net/wireless/bcmdhd/bcm_app_utils.c b/drivers/net/wireless/bcmdhd/bcm_app_utils.c new file mode 100644 index 000000000000..d138849d65c6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcm_app_utils.c @@ -0,0 +1,1012 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_app_utils.c 547371 2015-04-08 12:51:39Z $ + */ + +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else /* BCMDRIVER */ +#include +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* BCMDRIVER */ +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */ +#endif + +#include +#include +#include + +#ifndef BCMDRIVER +/* Take an array of measurments representing a single channel over time and return + a summary. Currently implemented as a simple average but could easily evolve + into more cpomplex alogrithms. +*/ +cca_congest_channel_req_t * +cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent) +{ + int sec; + cca_congest_t totals; + + totals.duration = 0; + totals.congest_ibss = 0; + totals.congest_obss = 0; + totals.interference = 0; + avg->num_secs = 0; + + for (sec = 0; sec < input->num_secs; sec++) { + if (input->secs[sec].duration) { + totals.duration += input->secs[sec].duration; + totals.congest_ibss += input->secs[sec].congest_ibss; + totals.congest_obss += input->secs[sec].congest_obss; + totals.interference += input->secs[sec].interference; + avg->num_secs++; + } + } + avg->chanspec = input->chanspec; + + if (!avg->num_secs || !totals.duration) + return (avg); + + if (percent) { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration; + avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration; + avg->secs[0].interference = totals.interference * 100/totals.duration; + } else { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs; + avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs; + avg->secs[0].interference = totals.interference / avg->num_secs; + } + + return (avg); +} + +static void +cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos) +{ + int i; + for (*left = 0, i = 0; i < num_bits; i++) { + if (isset(bitmap, i)) { + (*left)++; + *bit_pos = i; + } + } +} + +static uint8 +spec_to_chan(chanspec_t chspec) +{ + uint8 center_ch, edge, primary, sb; + + center_ch = CHSPEC_CHANNEL(chspec); + + if (CHSPEC_IS20(chspec)) { + return center_ch; + } else { + /* the lower edge of the wide channel is half the bw from + * the center channel. + */ + if (CHSPEC_IS40(chspec)) { + edge = center_ch - CH_20MHZ_APART; + } else { + /* must be 80MHz (until we support more) */ + ASSERT(CHSPEC_IS80(chspec)); + edge = center_ch - CH_40MHZ_APART; + } + + /* find the channel number of the lowest 20MHz primary channel */ + primary = edge + CH_10MHZ_APART; + + /* select the actual subband */ + sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT; + primary = primary + sb * CH_20MHZ_APART; + + return primary; + } +} + +/* + Take an array of measumrements representing summaries of different channels. + Return a recomended channel. + Interference is evil, get rid of that first. + Then hunt for lowest Other bss traffic. + Don't forget that channels with low duration times may not have accurate readings. + For the moment, do not overwrite input array. +*/ +int +cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer) +{ + uint8 *bitmap = NULL; /* 38 Max channels needs 5 bytes = 40 */ + int i, left, winner, ret_val = 0; + uint32 min_obss = 1 << 30; + uint bitmap_sz; + + bitmap_sz = CEIL(num_chans, NBBY); + bitmap = (uint8 *)malloc(bitmap_sz); + if (bitmap == NULL) { + printf("unable to allocate memory\n"); + return BCME_NOMEM; + } + + memset(bitmap, 0, bitmap_sz); + /* Initially, all channels are up for consideration */ + for (i = 0; i < num_chans; i++) { + if (input[i]->chanspec) + setbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_TOO_FEW; + goto f_exit; + } + + /* Filter for 2.4 GHz Band */ + if (flags & CCA_FLAG_2G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS2G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for 5 GHz Band */ + if (flags & CCA_FLAG_5G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS5G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for Duration */ + if (!(flags & CCA_FLAG_IGNORE_DURATION)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].duration < CCA_THRESH_MILLI) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_DURATION; + goto f_exit; + } + + /* Filter for 1 6 11 on 2.4 Band */ + if (flags & CCA_FLAGS_PREFER_1_6_11) { + int tmp_channel = spec_to_chan(input[i]->chanspec); + int is2g = CHSPEC_IS2G(input[i]->chanspec); + for (i = 0; i < num_chans; i++) { + if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_PREF_CHAN; + goto f_exit; + } + + /* Toss high interference interference */ + if (!(flags & CCA_FLAG_IGNORE_INTERFER)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE) + clrbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_INTERFER; + goto f_exit; + } + } + + /* Now find lowest obss */ + winner = 0; + for (i = 0; i < num_chans; i++) { + if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) { + winner = i; + min_obss = input[i]->secs[0].congest_obss; + } + } + *answer = input[winner]->chanspec; + f_exit: + free(bitmap); /* free the allocated memory for bitmap */ + return ret_val; +} +#endif /* !BCMDRIVER */ + +/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */ +#define IDX_IN_WL_CNT_VER_6_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32)) + +#define IDX_IN_WL_CNT_VER_11_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe)) \ + / sizeof(uint32)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_6_T \ + ((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T \ + (NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \ + ((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude 64 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \ + (NUM_OF_CNT_IN_WL_CNT_VER_11_T - WL_CNT_MCST_VAR_NUM) + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */ +static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = { + IDX_IN_WL_CNT_VER_6_T(txframe), + IDX_IN_WL_CNT_VER_6_T(txbyte), + IDX_IN_WL_CNT_VER_6_T(txretrans), + IDX_IN_WL_CNT_VER_6_T(txerror), + IDX_IN_WL_CNT_VER_6_T(txctl), + IDX_IN_WL_CNT_VER_6_T(txprshort), + IDX_IN_WL_CNT_VER_6_T(txserr), + IDX_IN_WL_CNT_VER_6_T(txnobuf), + IDX_IN_WL_CNT_VER_6_T(txnoassoc), + IDX_IN_WL_CNT_VER_6_T(txrunt), + IDX_IN_WL_CNT_VER_6_T(txchit), + IDX_IN_WL_CNT_VER_6_T(txcmiss), + IDX_IN_WL_CNT_VER_6_T(txuflo), + IDX_IN_WL_CNT_VER_6_T(txphyerr), + IDX_IN_WL_CNT_VER_6_T(txphycrs), + IDX_IN_WL_CNT_VER_6_T(rxframe), + IDX_IN_WL_CNT_VER_6_T(rxbyte), + IDX_IN_WL_CNT_VER_6_T(rxerror), + IDX_IN_WL_CNT_VER_6_T(rxctl), + IDX_IN_WL_CNT_VER_6_T(rxnobuf), + IDX_IN_WL_CNT_VER_6_T(rxnondata), + IDX_IN_WL_CNT_VER_6_T(rxbadds), + IDX_IN_WL_CNT_VER_6_T(rxbadcm), + IDX_IN_WL_CNT_VER_6_T(rxfragerr), + IDX_IN_WL_CNT_VER_6_T(rxrunt), + IDX_IN_WL_CNT_VER_6_T(rxgiant), + IDX_IN_WL_CNT_VER_6_T(rxnoscb), + IDX_IN_WL_CNT_VER_6_T(rxbadproto), + IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_6_T(rxbadda), + IDX_IN_WL_CNT_VER_6_T(rxfilter), + IDX_IN_WL_CNT_VER_6_T(rxoflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_6_T(dmade), + IDX_IN_WL_CNT_VER_6_T(dmada), + IDX_IN_WL_CNT_VER_6_T(dmape), + IDX_IN_WL_CNT_VER_6_T(reset), + IDX_IN_WL_CNT_VER_6_T(tbtt), + IDX_IN_WL_CNT_VER_6_T(txdmawar), + IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_6_T(txfrag), + IDX_IN_WL_CNT_VER_6_T(txmulti), + IDX_IN_WL_CNT_VER_6_T(txfail), + IDX_IN_WL_CNT_VER_6_T(txretry), + IDX_IN_WL_CNT_VER_6_T(txretrie), + IDX_IN_WL_CNT_VER_6_T(rxdup), + IDX_IN_WL_CNT_VER_6_T(txrts), + IDX_IN_WL_CNT_VER_6_T(txnocts), + IDX_IN_WL_CNT_VER_6_T(txnoack), + IDX_IN_WL_CNT_VER_6_T(rxfrag), + IDX_IN_WL_CNT_VER_6_T(rxmulti), + IDX_IN_WL_CNT_VER_6_T(rxcrc), + IDX_IN_WL_CNT_VER_6_T(txfrmsnt), + IDX_IN_WL_CNT_VER_6_T(rxundec), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_6_T(tkipreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpundec), + IDX_IN_WL_CNT_VER_6_T(fourwayfail), + IDX_IN_WL_CNT_VER_6_T(wepundec), + IDX_IN_WL_CNT_VER_6_T(wepicverr), + IDX_IN_WL_CNT_VER_6_T(decsuccess), + IDX_IN_WL_CNT_VER_6_T(tkipicverr), + IDX_IN_WL_CNT_VER_6_T(wepexcluded), + IDX_IN_WL_CNT_VER_6_T(txchanrej), + IDX_IN_WL_CNT_VER_6_T(psmwds), + IDX_IN_WL_CNT_VER_6_T(phywatchdog), + IDX_IN_WL_CNT_VER_6_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_6_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_6_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_6_T(rx1mbps), + IDX_IN_WL_CNT_VER_6_T(rx2mbps), + IDX_IN_WL_CNT_VER_6_T(rx5mbps5), + IDX_IN_WL_CNT_VER_6_T(rx6mbps), + IDX_IN_WL_CNT_VER_6_T(rx9mbps), + IDX_IN_WL_CNT_VER_6_T(rx11mbps), + IDX_IN_WL_CNT_VER_6_T(rx12mbps), + IDX_IN_WL_CNT_VER_6_T(rx18mbps), + IDX_IN_WL_CNT_VER_6_T(rx24mbps), + IDX_IN_WL_CNT_VER_6_T(rx36mbps), + IDX_IN_WL_CNT_VER_6_T(rx48mbps), + IDX_IN_WL_CNT_VER_6_T(rx54mbps), + IDX_IN_WL_CNT_VER_6_T(rx108mbps), + IDX_IN_WL_CNT_VER_6_T(rx162mbps), + IDX_IN_WL_CNT_VER_6_T(rx216mbps), + IDX_IN_WL_CNT_VER_6_T(rx270mbps), + IDX_IN_WL_CNT_VER_6_T(rx324mbps), + IDX_IN_WL_CNT_VER_6_T(rx378mbps), + IDX_IN_WL_CNT_VER_6_T(rx432mbps), + IDX_IN_WL_CNT_VER_6_T(rx486mbps), + IDX_IN_WL_CNT_VER_6_T(rx540mbps), + IDX_IN_WL_CNT_VER_6_T(rfdisable), + IDX_IN_WL_CNT_VER_6_T(txexptime), + IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_6_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst) +}; + +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */ +static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = { + IDX_IN_WL_CNT_VER_11_T(txframe), + IDX_IN_WL_CNT_VER_11_T(txbyte), + IDX_IN_WL_CNT_VER_11_T(txretrans), + IDX_IN_WL_CNT_VER_11_T(txerror), + IDX_IN_WL_CNT_VER_11_T(txctl), + IDX_IN_WL_CNT_VER_11_T(txprshort), + IDX_IN_WL_CNT_VER_11_T(txserr), + IDX_IN_WL_CNT_VER_11_T(txnobuf), + IDX_IN_WL_CNT_VER_11_T(txnoassoc), + IDX_IN_WL_CNT_VER_11_T(txrunt), + IDX_IN_WL_CNT_VER_11_T(txchit), + IDX_IN_WL_CNT_VER_11_T(txcmiss), + IDX_IN_WL_CNT_VER_11_T(txuflo), + IDX_IN_WL_CNT_VER_11_T(txphyerr), + IDX_IN_WL_CNT_VER_11_T(txphycrs), + IDX_IN_WL_CNT_VER_11_T(rxframe), + IDX_IN_WL_CNT_VER_11_T(rxbyte), + IDX_IN_WL_CNT_VER_11_T(rxerror), + IDX_IN_WL_CNT_VER_11_T(rxctl), + IDX_IN_WL_CNT_VER_11_T(rxnobuf), + IDX_IN_WL_CNT_VER_11_T(rxnondata), + IDX_IN_WL_CNT_VER_11_T(rxbadds), + IDX_IN_WL_CNT_VER_11_T(rxbadcm), + IDX_IN_WL_CNT_VER_11_T(rxfragerr), + IDX_IN_WL_CNT_VER_11_T(rxrunt), + IDX_IN_WL_CNT_VER_11_T(rxgiant), + IDX_IN_WL_CNT_VER_11_T(rxnoscb), + IDX_IN_WL_CNT_VER_11_T(rxbadproto), + IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_11_T(rxbadda), + IDX_IN_WL_CNT_VER_11_T(rxfilter), + IDX_IN_WL_CNT_VER_11_T(rxoflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_11_T(dmade), + IDX_IN_WL_CNT_VER_11_T(dmada), + IDX_IN_WL_CNT_VER_11_T(dmape), + IDX_IN_WL_CNT_VER_11_T(reset), + IDX_IN_WL_CNT_VER_11_T(tbtt), + IDX_IN_WL_CNT_VER_11_T(txdmawar), + IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_11_T(txfrag), + IDX_IN_WL_CNT_VER_11_T(txmulti), + IDX_IN_WL_CNT_VER_11_T(txfail), + IDX_IN_WL_CNT_VER_11_T(txretry), + IDX_IN_WL_CNT_VER_11_T(txretrie), + IDX_IN_WL_CNT_VER_11_T(rxdup), + IDX_IN_WL_CNT_VER_11_T(txrts), + IDX_IN_WL_CNT_VER_11_T(txnocts), + IDX_IN_WL_CNT_VER_11_T(txnoack), + IDX_IN_WL_CNT_VER_11_T(rxfrag), + IDX_IN_WL_CNT_VER_11_T(rxmulti), + IDX_IN_WL_CNT_VER_11_T(rxcrc), + IDX_IN_WL_CNT_VER_11_T(txfrmsnt), + IDX_IN_WL_CNT_VER_11_T(rxundec), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_11_T(tkipreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpundec), + IDX_IN_WL_CNT_VER_11_T(fourwayfail), + IDX_IN_WL_CNT_VER_11_T(wepundec), + IDX_IN_WL_CNT_VER_11_T(wepicverr), + IDX_IN_WL_CNT_VER_11_T(decsuccess), + IDX_IN_WL_CNT_VER_11_T(tkipicverr), + IDX_IN_WL_CNT_VER_11_T(wepexcluded), + IDX_IN_WL_CNT_VER_11_T(txchanrej), + IDX_IN_WL_CNT_VER_11_T(psmwds), + IDX_IN_WL_CNT_VER_11_T(phywatchdog), + IDX_IN_WL_CNT_VER_11_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_11_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_11_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_11_T(rx1mbps), + IDX_IN_WL_CNT_VER_11_T(rx2mbps), + IDX_IN_WL_CNT_VER_11_T(rx5mbps5), + IDX_IN_WL_CNT_VER_11_T(rx6mbps), + IDX_IN_WL_CNT_VER_11_T(rx9mbps), + IDX_IN_WL_CNT_VER_11_T(rx11mbps), + IDX_IN_WL_CNT_VER_11_T(rx12mbps), + IDX_IN_WL_CNT_VER_11_T(rx18mbps), + IDX_IN_WL_CNT_VER_11_T(rx24mbps), + IDX_IN_WL_CNT_VER_11_T(rx36mbps), + IDX_IN_WL_CNT_VER_11_T(rx48mbps), + IDX_IN_WL_CNT_VER_11_T(rx54mbps), + IDX_IN_WL_CNT_VER_11_T(rx108mbps), + IDX_IN_WL_CNT_VER_11_T(rx162mbps), + IDX_IN_WL_CNT_VER_11_T(rx216mbps), + IDX_IN_WL_CNT_VER_11_T(rx270mbps), + IDX_IN_WL_CNT_VER_11_T(rx324mbps), + IDX_IN_WL_CNT_VER_11_T(rx378mbps), + IDX_IN_WL_CNT_VER_11_T(rx432mbps), + IDX_IN_WL_CNT_VER_11_T(rx486mbps), + IDX_IN_WL_CNT_VER_11_T(rx540mbps), + IDX_IN_WL_CNT_VER_11_T(rfdisable), + IDX_IN_WL_CNT_VER_11_T(txexptime), + IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_11_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst), + IDX_IN_WL_CNT_VER_11_T(dma_hang), + IDX_IN_WL_CNT_VER_11_T(reinit), + IDX_IN_WL_CNT_VER_11_T(pstatxucast), + IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc), + IDX_IN_WL_CNT_VER_11_T(pstarxucast), + IDX_IN_WL_CNT_VER_11_T(pstarxbcmc), + IDX_IN_WL_CNT_VER_11_T(pstatxbcmc), + IDX_IN_WL_CNT_VER_11_T(cso_passthrough), + IDX_IN_WL_CNT_VER_11_T(cso_normal), + IDX_IN_WL_CNT_VER_11_T(chained), + IDX_IN_WL_CNT_VER_11_T(chainedsz1), + IDX_IN_WL_CNT_VER_11_T(unchained), + IDX_IN_WL_CNT_VER_11_T(maxchainsz), + IDX_IN_WL_CNT_VER_11_T(currchainsz), + IDX_IN_WL_CNT_VER_11_T(pciereset), + IDX_IN_WL_CNT_VER_11_T(cfgrestore), + IDX_IN_WL_CNT_VER_11_T(reinitreason), + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7, + IDX_IN_WL_CNT_VER_11_T(rxrtry), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu), + IDX_IN_WL_CNT_VER_11_T(txbar), + IDX_IN_WL_CNT_VER_11_T(rxbar), + IDX_IN_WL_CNT_VER_11_T(txpspoll), + IDX_IN_WL_CNT_VER_11_T(rxpspoll), + IDX_IN_WL_CNT_VER_11_T(txnull), + IDX_IN_WL_CNT_VER_11_T(rxnull), + IDX_IN_WL_CNT_VER_11_T(txqosnull), + IDX_IN_WL_CNT_VER_11_T(rxqosnull), + IDX_IN_WL_CNT_VER_11_T(txassocreq), + IDX_IN_WL_CNT_VER_11_T(rxassocreq), + IDX_IN_WL_CNT_VER_11_T(txreassocreq), + IDX_IN_WL_CNT_VER_11_T(rxreassocreq), + IDX_IN_WL_CNT_VER_11_T(txdisassoc), + IDX_IN_WL_CNT_VER_11_T(rxdisassoc), + IDX_IN_WL_CNT_VER_11_T(txassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxassocrsp), + IDX_IN_WL_CNT_VER_11_T(txreassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxreassocrsp), + IDX_IN_WL_CNT_VER_11_T(txauth), + IDX_IN_WL_CNT_VER_11_T(rxauth), + IDX_IN_WL_CNT_VER_11_T(txdeauth), + IDX_IN_WL_CNT_VER_11_T(rxdeauth), + IDX_IN_WL_CNT_VER_11_T(txprobereq), + IDX_IN_WL_CNT_VER_11_T(rxprobereq), + IDX_IN_WL_CNT_VER_11_T(txprobersp), + IDX_IN_WL_CNT_VER_11_T(rxprobersp), + IDX_IN_WL_CNT_VER_11_T(txaction), + IDX_IN_WL_CNT_VER_11_T(rxaction) +}; + +/* Index conversion table from wl_cnt_ver_11_t to + * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t + */ +static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + IDX_IN_WL_CNT_VER_11_T(txmpdu), + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + IDX_IN_WL_CNT_VER_11_T(rxnodelim), + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + +/* For mcst offsets that were not used. (2 Pads) */ +#define INVALID_MCST_IDX ((uint8)(-1)) +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_6_T(txallfrm), + IDX_IN_WL_CNT_VER_6_T(txrtsfrm), + IDX_IN_WL_CNT_VER_6_T(txctsfrm), + IDX_IN_WL_CNT_VER_6_T(txackfrm), + IDX_IN_WL_CNT_VER_6_T(txdnlfrm), + IDX_IN_WL_CNT_VER_6_T(txbcnfrm), + IDX_IN_WL_CNT_VER_6_T(txfunfl), + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_6_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(txtplunfl), + IDX_IN_WL_CNT_VER_6_T(txphyerror), + IDX_IN_WL_CNT_VER_6_T(pktengrxducast), + IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_6_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_6_T(rxbadfcs), + IDX_IN_WL_CNT_VER_6_T(rxbadplcp), + IDX_IN_WL_CNT_VER_6_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxstrt), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_6_T(rxrtsucast), + IDX_IN_WL_CNT_VER_6_T(rxctsucast), + IDX_IN_WL_CNT_VER_6_T(rxackucast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxrtsocast), + IDX_IN_WL_CNT_VER_6_T(rxctsocast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_6_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_6_T(rxrsptmout), + IDX_IN_WL_CNT_VER_6_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_6_T(txsfovfl), + IDX_IN_WL_CNT_VER_6_T(pmqovfl), + IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_6_T(txcgprsfail), + IDX_IN_WL_CNT_VER_6_T(txcgprssuc), + IDX_IN_WL_CNT_VER_6_T(prs_timeout), + IDX_IN_WL_CNT_VER_6_T(rxnack), + IDX_IN_WL_CNT_VER_6_T(frmscons), + IDX_IN_WL_CNT_VER_6_T(txnack), + IDX_IN_WL_CNT_VER_6_T(rxback), + IDX_IN_WL_CNT_VER_6_T(txback), + IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxdrop20s), + IDX_IN_WL_CNT_VER_6_T(rxtoolate), + IDX_IN_WL_CNT_VER_6_T(bphy_badplcp) +}; + +/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */ +static int +wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx) +{ + uint i; + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + /* Init wlccnt with invalid value. Unchanged value will not be printed out */ + for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) { + dst[i] = INVALID_CNT_VAL; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) { + if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) { + /* src buffer does not have counters from here */ + break; + } + dst[i] = src[wlcntver6t_to_wlcntwlct[i]]; + } + } else { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) { + if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) { + /* src buffer does not have counters from here */ + break; + } + dst[i] = src[wlcntver11t_to_wlcntwlct[i]]; + } + } + return BCME_OK; +} + +/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */ +static int +wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_6_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]]; + } + } + } else { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_11_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]]; + } + } + } + return BCME_OK; +} + +static int +wl_copy_macstat_ver11(uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]]; + } + return BCME_OK; +} + +/** + * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format. + * Parameters: + * cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW. + * Newly translated xtlv format is written to this pointer. + * buflen: length of the "cntbuf" without any padding. + * corerev: chip core revision of the driver/FW. + */ +int +wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) +{ + wl_cnt_wlc_t *wlccnt = NULL; + uint32 *macstat = NULL; + xtlv_desc_t xtlv_desc[3]; + uint16 mcst_xtlv_id; + int res = BCME_OK; + wl_cnt_info_t *cntinfo = cntbuf; + void *xtlvbuf_p = cntinfo->data; + uint16 ver = cntinfo->version; + uint16 xtlvbuflen = (uint16)buflen; + uint16 src_max_idx; +#ifdef BCMDRIVER + osl_t *osh = ctx; +#else + BCM_REFERENCE(ctx); +#endif + + if (ver == WL_CNT_T_VERSION) { + /* Already in xtlv format. */ + goto exit; + } + +#ifdef BCMDRIVER + wlccnt = MALLOC(osh, sizeof(*wlccnt)); + macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ); +#else + wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt)); + macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ); +#endif + if (!wlccnt) { + printf("wl_cntbuf_to_xtlv_format malloc fail!\n"); + res = BCME_NOMEM; + goto exit; + } + + /* Check if the max idx in the struct exceeds the boundary of uint8 */ + if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) || + NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n"); + res = BCME_ERROR; + goto exit; + } + + /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */ + src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32); + + if (src_max_idx > (uint8)(-1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n" + "Try updating wl utility to the latest.\n"); + res = BCME_ERROR; + } + + /* Copy wlc layer counters to wl_cnt_wlc_t */ + res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx); + if (res != BCME_OK) { + printf("wl_copy_wlccnt fail!\n"); + goto exit; + } + + /* Copy macstat counters to wl_cnt_wlc_t */ + if (ver == WL_CNT_VERSION_11) { + res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_ver11 fail!\n"); + goto exit; + } + if (corerev >= 40) { + mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1; + } else { + mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1; + } + } else { + res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_upto_ver10 fail!\n"); + goto exit; + } + mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE; + } + + xtlv_desc[0].type = WL_CNT_XTLV_WLC; + xtlv_desc[0].len = sizeof(*wlccnt); + xtlv_desc[0].ptr = wlccnt; + + xtlv_desc[1].type = mcst_xtlv_id; + xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ; + xtlv_desc[1].ptr = macstat; + + xtlv_desc[2].type = 0; + xtlv_desc[2].len = 0; + xtlv_desc[2].ptr = NULL; + + memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE); + + res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen, + xtlv_desc, BCM_XTLV_OPTION_ALIGN32); + cntinfo->datalen = (buflen - xtlvbuflen); +exit: +#ifdef BCMDRIVER + if (wlccnt) { + MFREE(osh, wlccnt, sizeof(*wlccnt)); + } + if (macstat) { + MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ); + } +#else + if (wlccnt) { + free(wlccnt); + } + if (macstat) { + free(macstat); + } +#endif + return res; +} diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c new file mode 100644 index 000000000000..1746f47fd613 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmevent.c @@ -0,0 +1,230 @@ +/* + * bcmevent read-only data shared by kernel or app layers + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmevent.c 530174 2015-01-29 09:47:55Z $ + */ + +#include +#include +#include +#include +#include +#include + + +/* Table of event name strings for UIs and debugging dumps */ +typedef struct { + uint event; + const char *name; +} bcmevent_name_str_t; + +/* Use the actual name for event tracing */ +#define BCMEVENT_NAME(_event) {(_event), #_event} + +static const bcmevent_name_str_t bcmevent_names[] = { + BCMEVENT_NAME(WLC_E_SET_SSID), + BCMEVENT_NAME(WLC_E_JOIN), + BCMEVENT_NAME(WLC_E_START), + BCMEVENT_NAME(WLC_E_AUTH), + BCMEVENT_NAME(WLC_E_AUTH_IND), + BCMEVENT_NAME(WLC_E_DEAUTH), + BCMEVENT_NAME(WLC_E_DEAUTH_IND), + BCMEVENT_NAME(WLC_E_ASSOC), + BCMEVENT_NAME(WLC_E_ASSOC_IND), + BCMEVENT_NAME(WLC_E_REASSOC), + BCMEVENT_NAME(WLC_E_REASSOC_IND), + BCMEVENT_NAME(WLC_E_DISASSOC), + BCMEVENT_NAME(WLC_E_DISASSOC_IND), + BCMEVENT_NAME(WLC_E_QUIET_START), + BCMEVENT_NAME(WLC_E_QUIET_END), + BCMEVENT_NAME(WLC_E_BEACON_RX), + BCMEVENT_NAME(WLC_E_LINK), + BCMEVENT_NAME(WLC_E_MIC_ERROR), + BCMEVENT_NAME(WLC_E_NDIS_LINK), + BCMEVENT_NAME(WLC_E_ROAM), + BCMEVENT_NAME(WLC_E_TXFAIL), + BCMEVENT_NAME(WLC_E_PMKID_CACHE), + BCMEVENT_NAME(WLC_E_RETROGRADE_TSF), + BCMEVENT_NAME(WLC_E_PRUNE), + BCMEVENT_NAME(WLC_E_AUTOAUTH), + BCMEVENT_NAME(WLC_E_EAPOL_MSG), + BCMEVENT_NAME(WLC_E_SCAN_COMPLETE), + BCMEVENT_NAME(WLC_E_ADDTS_IND), + BCMEVENT_NAME(WLC_E_DELTS_IND), + BCMEVENT_NAME(WLC_E_BCNSENT_IND), + BCMEVENT_NAME(WLC_E_BCNRX_MSG), + BCMEVENT_NAME(WLC_E_BCNLOST_MSG), + BCMEVENT_NAME(WLC_E_ROAM_PREP), + BCMEVENT_NAME(WLC_E_PFN_NET_FOUND), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), + BCMEVENT_NAME(WLC_E_PFN_NET_LOST), +#if defined(IBSS_PEER_DISCOVERY_EVENT) + BCMEVENT_NAME(WLC_E_IBSS_ASSOC), +#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */ + BCMEVENT_NAME(WLC_E_RADIO), + BCMEVENT_NAME(WLC_E_PSM_WATCHDOG), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG), + BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND), + BCMEVENT_NAME(WLC_E_PSK_SUP), + BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED), + BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME), + BCMEVENT_NAME(WLC_E_ICV_ERROR), + BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR), + BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR), + BCMEVENT_NAME(WLC_E_TRACE), + BCMEVENT_NAME(WLC_E_IF), +#ifdef WLP2P + BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE), +#endif + BCMEVENT_NAME(WLC_E_RSSI), + BCMEVENT_NAME(WLC_E_EXTLOG_MSG), + BCMEVENT_NAME(WLC_E_ACTION_FRAME), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE), + BCMEVENT_NAME(WLC_E_ESCAN_RESULT), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE), +#ifdef WLP2P + BCMEVENT_NAME(WLC_E_PROBRESP_MSG), + BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG), +#endif +#ifdef PROP_TXSTATUS + BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP), +#endif + BCMEVENT_NAME(WLC_E_WAKE_EVENT), + BCMEVENT_NAME(WLC_E_DCS_REQUEST), + BCMEVENT_NAME(WLC_E_RM_COMPLETE), +#ifdef WLMEDIA_HTSF + BCMEVENT_NAME(WLC_E_HTSFSYNC), +#endif + BCMEVENT_NAME(WLC_E_OVERLAY_REQ), + BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND), + BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT), + BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), +#ifdef SOFTAP + BCMEVENT_NAME(WLC_E_GTK_PLUMBED), +#endif + BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE), + BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE), + BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX), +#ifdef WLTDLS + BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT), +#endif /* WLTDLS */ + BCMEVENT_NAME(WLC_E_NATIVE), +#ifdef WLPKTDLYSTAT + BCMEVENT_NAME(WLC_E_PKTDELAY_IND), +#endif /* WLPKTDLYSTAT */ + BCMEVENT_NAME(WLC_E_SERVICE_FOUND), + BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX), + BCMEVENT_NAME(WLC_E_GAS_COMPLETE), + BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE), + BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE), +#ifdef WLWNM + BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP), +#endif /* WLWNM */ +#if defined(WL_PROXDETECT) + BCMEVENT_NAME(WLC_E_PROXD), +#endif + BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL), + BCMEVENT_NAME(WLC_E_BSSID), +#ifdef PROP_TXSTATUS + BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT), +#endif + BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND), + BCMEVENT_NAME(WLC_E_TXFAIL_THRESH), +#ifdef GSCAN_SUPPORT + BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT), + BCMEVENT_NAME(WLC_E_PFN_SWC), +#endif /* GSCAN_SUPPORT */ +#ifdef WLBSSLOAD_REPORT + BCMEVENT_NAME(WLC_E_BSS_LOAD), +#endif +#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW) + BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ), +#endif + BCMEVENT_NAME(WLC_E_AUTHORIZED), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX), + BCMEVENT_NAME(WLC_E_CSA_START_IND), + BCMEVENT_NAME(WLC_E_CSA_DONE_IND), + BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND), + BCMEVENT_NAME(WLC_E_RMC_EVENT), + BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND), +}; + + +const char *bcmevent_get_name(uint event_type) +{ + /* note: first coded this as a static const but some + * ROMs already have something called event_name so + * changed it so we don't have a variable for the + * 'unknown string + */ + const char *event_name = NULL; + + uint idx; + for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) { + + if (bcmevent_names[idx].event == event_type) { + event_name = bcmevent_names[idx].name; + break; + } + } + + /* if we find an event name in the array, return it. + * otherwise return unknown string. + */ + return ((event_name) ? event_name : "Unknown Event"); +} + +void +wl_event_to_host_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = ntoh32(evt->event_type); + evt->flags = ntoh16(evt->flags); + evt->status = ntoh32(evt->status); + evt->reason = ntoh32(evt->reason); + evt->auth_type = ntoh32(evt->auth_type); + evt->datalen = ntoh32(evt->datalen); + evt->version = ntoh16(evt->version); +} + +void +wl_event_to_network_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = hton32(evt->event_type); + evt->flags = hton16(evt->flags); + evt->status = hton32(evt->status); + evt->reason = hton32(evt->reason); + evt->auth_type = hton32(evt->auth_type); + evt->datalen = hton32(evt->datalen); + evt->version = hton16(evt->version); +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c new file mode 100644 index 000000000000..42b29bd8574e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh.c @@ -0,0 +1,708 @@ +/* + * BCMSDH interface glue + * implement bcmsdh API for SDIOH driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh.c 514727 2014-11-12 03:02:48Z $ + */ + +/** + * @file bcmsdh.c + */ + +/* ****************** BCMSDH Interface Functions *************************** */ + +#include +#include +#include +#include +#include +#include +#include + +#include /* BRCM API for SDIO clients (such as wl, dhd) */ +#include /* common SDIO/controller interface */ +#include /* SDIO device core hardware definitions. */ +#include /* SDIO Device and Protocol Specs */ + +#define SDIOH_API_ACCESS_RETRY_LIMIT 2 +const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; + +/* local copy of bcm sd handler */ +bcmsdh_info_t * l_bcmsdh = NULL; + + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) +extern int +sdioh_enable_hw_oob_intr(void *sdioh, bool enable); + +void +bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable) +{ + sdioh_enable_hw_oob_intr(sdh->sdioh, enable); +} +#endif + +/* Attach BCMSDH layer to SDIO Host Controller Driver + * + * @param osh OSL Handle. + * @param cfghdl Configuration Handle. + * @param regsva Virtual address of controller registers. + * @param irq Interrupt number of SDIO controller. + * + * @return bcmsdh_info_t Handle to BCMSDH context. + */ +bcmsdh_info_t * +bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva) +{ + bcmsdh_info_t *bcmsdh; + + if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) { + BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)bcmsdh, sizeof(bcmsdh_info_t)); + bcmsdh->sdioh = sdioh; + bcmsdh->osh = osh; + bcmsdh->init_success = TRUE; + *regsva = SI_ENUM_BASE; + + /* Report the BAR, to fix if needed */ + bcmsdh->sbwad = SI_ENUM_BASE; + + /* save the handler locally */ + l_bcmsdh = bcmsdh; + + return bcmsdh; +} + +int +bcmsdh_detach(osl_t *osh, void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bcmsdh != NULL) { + MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t)); + } + + l_bcmsdh = NULL; + + return 0; +} + +int +bcmsdh_iovar_op(void *sdh, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set); +} + +bool +bcmsdh_intr_query(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + bool on; + + ASSERT(bcmsdh); + status = sdioh_interrupt_query(bcmsdh->sdioh, &on); + if (SDIOH_API_SUCCESS(status)) + return FALSE; + else + return on; +} + +int +bcmsdh_intr_enable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_disable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_dereg(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_deregister(bcmsdh->sdioh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +#if defined(DHD_DEBUG) +bool +bcmsdh_intr_pending(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + ASSERT(sdh); + return sdioh_interrupt_pending(bcmsdh->sdioh); +} +#endif + + +int +bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + ASSERT(sdh); + + /* don't support yet */ + return BCME_UNSUPPORTED; +} + +/** + * Read from SDIO Configuration Space + * @param sdh SDIO Host context. + * @param func_num Function number to read from. + * @param addr Address to read from. + * @param err Error return. + * @return value read from SDIO configuration space. + */ +uint8 +bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + uint8 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR; + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); +} + +uint32 +bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num, + addr, data)); +} + + +int +bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + uint8 *tmp_buf, *tmp_ptr; + uint8 *ptr; + bool ascii = func & ~0xf; + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cis); + ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT); + + status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length); + + if (ascii) { + /* Move binary bits to tmp and format them into the provided buffer. */ + if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) { + BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__)); + return BCME_NOMEM; + } + bcopy(cis, tmp_buf, length); + for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) { + ptr += snprintf((char*)ptr, (cis + length - ptr - 4), + "%.2x ", *tmp_ptr & 0xff); + if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0) + ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n"); + } + MFREE(bcmsdh->osh, tmp_buf, length); + } + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + + +int +bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set) +{ + int err = 0; + uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK; + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bar0 != bcmsdh->sbwad || force_set) { + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + + if (!err) + bcmsdh->sbwad = bar0; + else + /* invalidate cached window var */ + bcmsdh->sbwad = 0; + + } + + return err; +} + +uint32 +bcmsdh_reg_read(void *sdh, uint32 addr, uint size) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 word = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)) + return 0xFFFFFFFF; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, + SDIOH_READ, SDIO_FUNC_1, addr, &word, size); + + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + BCMSDH_INFO(("uint32data = 0x%x\n", word)); + + /* if ok, return appropriately masked word */ + if (SDIOH_API_SUCCESS(status)) { + switch (size) { + case sizeof(uint8): + return (word & 0xff); + case sizeof(uint16): + return (word & 0xffff); + case sizeof(uint32): + return word; + default: + bcmsdh->regfail = TRUE; + + } + } + + /* otherwise, bad sdio access or invalid size */ + BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size)); + return 0xFFFFFFFF; +} + +uint32 +bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + int err = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", + __FUNCTION__, addr, size*8, data)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1, + addr, &data, size); + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + if (SDIOH_API_SUCCESS(status)) + return 0; + + BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n", + __FUNCTION__, data, addr, size)); + return 0xFFFFFFFF; +} + +bool +bcmsdh_regfail(void *sdh) +{ + return ((bcmsdh_info_t *)sdh)->regfail; +} + +int +bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_READ, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); +} + +int +bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0); + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC, + (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, + addr, 4, nbytes, buf, NULL); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_abort(void *sdh, uint fn) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_abort(bcmsdh->sdioh, fn); +} + +int +bcmsdh_start(void *sdh, int stage) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_start(bcmsdh->sdioh, stage); +} + +int +bcmsdh_stop(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_stop(bcmsdh->sdioh); +} + +int +bcmsdh_waitlockfree(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_waitlockfree(bcmsdh->sdioh); +} + + +int +bcmsdh_query_device(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0; + return (bcmsdh->vendevid); +} + +uint +bcmsdh_query_iofnum(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (sdioh_query_iofnum(bcmsdh->sdioh)); +} + +int +bcmsdh_reset(bcmsdh_info_t *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_sdio_reset(bcmsdh->sdioh); +} + +void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh) +{ + ASSERT(sdh); + return sdh->sdioh; +} + +/* Function to pass device-status bits to DHD. */ +uint32 +bcmsdh_get_dstatus(void *sdh) +{ + return 0; +} +uint32 +bcmsdh_cur_sbwad(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (bcmsdh->sbwad); +} + +void +bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev) +{ + return; +} + + +int +bcmsdh_sleep(void *sdh, bool enab) +{ +#ifdef SDIOH_SLEEP_ENABLED + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_sleep(sd, enab); +#else + return BCME_UNSUPPORTED; +#endif +} + +int +bcmsdh_gpio_init(void *sdh) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpio_init(sd); +} + +bool +bcmsdh_gpioin(void *sdh, uint32 gpio) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioin(sd, gpio); +} + +int +bcmsdh_gpioouten(void *sdh, uint32 gpio) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioouten(sd, gpio); +} + +int +bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioout(sd, gpio, enab); +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c new file mode 100644 index 000000000000..bf882610f263 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c @@ -0,0 +1,468 @@ +/* + * SDIO access interface for drivers - linux specific (pci only) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_linux.c 514727 2014-11-12 03:02:48Z $ + */ + +/** + * @file bcmsdh_linux.c + */ + +#define __UNDEF_NO_VERSION__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +extern void dhdsdio_isr(void * args); +#include +#include +#include +#if defined(CONFIG_ARCH_ODIN) +#include +#endif /* defined(CONFIG_ARCH_ODIN) */ +#include + +/* driver info, initialized when bcmsdh_register is called */ +static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL}; + +typedef enum { + DHD_INTR_INVALID = 0, + DHD_INTR_INBAND, + DHD_INTR_HWOOB, + DHD_INTR_SWOOB +} DHD_HOST_INTR_TYPE; + +/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g. + * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather + * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this + * structure. + */ +typedef struct bcmsdh_os_info { + DHD_HOST_INTR_TYPE intr_type; + int oob_irq_num; /* valid when hardware or software oob in use */ + unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ + bool oob_irq_registered; + bool oob_irq_enabled; + bool oob_irq_wake_enabled; + spinlock_t oob_irq_spinlock; + bcmsdh_cb_fn_t oob_irq_handler; + void *oob_irq_handler_context; + void *context; /* context returned from upper layer */ + void *sdioh; /* handle to lower layer (sdioh) */ + void *dev; /* handle to the underlying device */ + bool dev_wake_enabled; +} bcmsdh_os_info_t; + +/* debugging macros */ +#define SDLX_MSG(x) + +/** + * Checks to see if vendor and device IDs match a supported SDIO Host Controller. + */ +bool +bcmsdh_chipmatch(uint16 vendor, uint16 device) +{ + /* Add other vendors and devices as required */ + +#ifdef BCMSDIOH_STD + /* Check for Arasan host controller */ + if (vendor == VENDOR_SI_IMAGE) { + return (TRUE); + } + /* Check for BRCM 27XX Standard host controller */ + if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for BRCM Standard host controller */ + if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for TI PCIxx21 Standard host controller */ + if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) { + return (TRUE); + } + if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) { + return (TRUE); + } + /* Ricoh R5C822 Standard SDIO Host */ + if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) { + return (TRUE); + } + /* JMicron Standard SDIO Host */ + if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) { + return (TRUE); + } + +#endif /* BCMSDIOH_STD */ +#ifdef BCMSDIOH_SPI + /* This is the PciSpiHost. */ + if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) { + printf("Found PCI SPI Host Controller\n"); + return (TRUE); + } + +#endif /* BCMSDIOH_SPI */ + + return (FALSE); +} + +void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num) +{ + ulong regs; + bcmsdh_info_t *bcmsdh; + uint32 vendevid; + bcmsdh_os_info_t *bcmsdh_osinfo = NULL; + + bcmsdh = bcmsdh_attach(osh, sdioh, ®s); + if (bcmsdh == NULL) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } + bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t)); + if (bcmsdh_osinfo == NULL) { + SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__)); + goto err; + } + bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t)); + bcmsdh->os_cxt = bcmsdh_osinfo; + bcmsdh_osinfo->sdioh = sdioh; + bcmsdh_osinfo->dev = dev; + osl_set_bus_handle(osh, bcmsdh); + +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dev && device_init_wakeup(dev, true) == 0) + bcmsdh_osinfo->dev_wake_enabled = TRUE; +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + +#if defined(OOB_INTR_ONLY) + spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock); + /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */ + bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info, + &bcmsdh_osinfo->oob_irq_flags); + if (bcmsdh_osinfo->oob_irq_num < 0) { + SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__)); + goto err; + } +#endif /* defined(BCMLXSDMMC) */ + + /* Read the vendor/device ID from the CIS */ + vendevid = bcmsdh_query_device(bcmsdh); + /* try to attach to the target device */ + bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num, + slot_num, 0, bus_type, (void *)regs, osh, bcmsdh); + if (bcmsdh_osinfo->context == NULL) { + SDLX_MSG(("%s: device attach failed\n", __FUNCTION__)); + goto err; + } + + return bcmsdh; + + /* error handling */ +err: + if (bcmsdh != NULL) + bcmsdh_detach(osh, bcmsdh); + if (bcmsdh_osinfo != NULL) + MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t)); + return NULL; +} + +int bcmsdh_remove(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (bcmsdh_osinfo->dev) + device_init_wakeup(bcmsdh_osinfo->dev, false); + bcmsdh_osinfo->dev_wake_enabled = FALSE; +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + + drvinfo.remove(bcmsdh_osinfo->context); + MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t)); + bcmsdh_detach(bcmsdh->osh, bcmsdh); + + return 0; +} + +int bcmsdh_suspend(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context)) + return -EBUSY; + return 0; +} + +int bcmsdh_resume(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (drvinfo.resume) + return drvinfo.resume(bcmsdh_osinfo->context); + return 0; +} + +extern int bcmsdh_register_client_driver(void); +extern void bcmsdh_unregister_client_driver(void); +extern int sdio_func_reg_notify(void* semaphore); +extern void sdio_func_unreg_notify(void); + +#if defined(BCMLXSDMMC) +int bcmsdh_reg_sdio_notify(void* semaphore) +{ + return sdio_func_reg_notify(semaphore); +} + +void bcmsdh_unreg_sdio_notify(void) +{ + sdio_func_unreg_notify(); +} +#endif /* defined(BCMLXSDMMC) */ + +int +bcmsdh_register(bcmsdh_driver_t *driver) +{ + int error = 0; + + drvinfo = *driver; + SDLX_MSG(("%s: register client driver\n", __FUNCTION__)); + error = bcmsdh_register_client_driver(); + if (error) + SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error)); + + return error; +} + +void +bcmsdh_unregister(void) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (bcmsdh_pci_driver.node.next == NULL) + return; +#endif + + bcmsdh_unregister_client_driver(); +} + +void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh) +{ +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + pm_stay_awake(bcmsdh_osinfo->dev); +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ +} + +void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh) +{ +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + pm_relax(bcmsdh_osinfo->dev); +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ +} + +bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + return bcmsdh_osinfo->dev_wake_enabled; +} + +#if defined(OOB_INTR_ONLY) +void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable) +{ + unsigned long flags; + bcmsdh_os_info_t *bcmsdh_osinfo; + + if (!bcmsdh) + return; + + bcmsdh_osinfo = bcmsdh->os_cxt; + spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags); + if (bcmsdh_osinfo->oob_irq_enabled != enable) { + if (enable) + enable_irq(bcmsdh_osinfo->oob_irq_num); + else + disable_irq_nosync(bcmsdh_osinfo->oob_irq_num); + bcmsdh_osinfo->oob_irq_enabled = enable; + } + spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags); +} + +static irqreturn_t wlan_oob_irq(int irq, void *dev_id) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + bcmsdh_oob_intr_set(bcmsdh, FALSE); + bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context); + + return IRQ_HANDLED; +} + +int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler, + void* oob_irq_handler_context) +{ + int err = 0; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + SDLX_MSG(("%s: Enter\n", __FUNCTION__)); + if (bcmsdh_osinfo->oob_irq_registered) { + SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__)); + return -EBUSY; + } + SDLX_MSG(("%s OOB irq=%d flags=%X \n", __FUNCTION__, + (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags)); + bcmsdh_osinfo->oob_irq_handler = oob_irq_handler; + bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context; +#if defined(CONFIG_ARCH_ODIN) + err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq, + bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh); +#else + err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq, + bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh); +#endif /* defined(CONFIG_ARCH_ODIN) */ + if (err) { + SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err)); + return err; + } + + err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (!err) + bcmsdh_osinfo->oob_irq_wake_enabled = TRUE; + bcmsdh_osinfo->oob_irq_enabled = TRUE; + bcmsdh_osinfo->oob_irq_registered = TRUE; + return err; +} + +void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh) +{ + int err = 0; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + SDLX_MSG(("%s: Enter\n", __FUNCTION__)); + if (!bcmsdh_osinfo->oob_irq_registered) { + SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__)); + return; + } + if (bcmsdh_osinfo->oob_irq_wake_enabled) { + err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (!err) + bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; + } + if (bcmsdh_osinfo->oob_irq_enabled) { + disable_irq(bcmsdh_osinfo->oob_irq_num); + bcmsdh_osinfo->oob_irq_enabled = FALSE; + } + free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh); + bcmsdh_osinfo->oob_irq_registered = FALSE; +} +#endif + +/* Module parameters specific to each host-controller driver */ + +extern uint sd_msglevel; /* Debug message level */ +module_param(sd_msglevel, uint, 0); + +extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */ +module_param(sd_power, uint, 0); + +extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */ +module_param(sd_clock, uint, 0); + +extern uint sd_divisor; /* Divisor (-1 means external clock) */ +module_param(sd_divisor, uint, 0); + +extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */ +module_param(sd_sdmode, uint, 0); + +extern uint sd_hiok; /* Ok to use hi-speed mode */ +module_param(sd_hiok, uint, 0); + +extern uint sd_f2_blocksize; +module_param(sd_f2_blocksize, int, 0); + +#ifdef BCMSDIOH_STD +extern int sd_uhsimode; +module_param(sd_uhsimode, int, 0); +extern uint sd_tuning_period; +module_param(sd_tuning_period, uint, 0); +extern int sd_delay_value; +module_param(sd_delay_value, uint, 0); + +/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */ +extern char dhd_sdiod_uhsi_ds_override[2]; +module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0); + +#endif + +#ifdef BCMSDH_MODULE +EXPORT_SYMBOL(bcmsdh_attach); +EXPORT_SYMBOL(bcmsdh_detach); +EXPORT_SYMBOL(bcmsdh_intr_query); +EXPORT_SYMBOL(bcmsdh_intr_enable); +EXPORT_SYMBOL(bcmsdh_intr_disable); +EXPORT_SYMBOL(bcmsdh_intr_reg); +EXPORT_SYMBOL(bcmsdh_intr_dereg); + +#if defined(DHD_DEBUG) +EXPORT_SYMBOL(bcmsdh_intr_pending); +#endif + +EXPORT_SYMBOL(bcmsdh_devremove_reg); +EXPORT_SYMBOL(bcmsdh_cfg_read); +EXPORT_SYMBOL(bcmsdh_cfg_write); +EXPORT_SYMBOL(bcmsdh_cis_read); +EXPORT_SYMBOL(bcmsdh_reg_read); +EXPORT_SYMBOL(bcmsdh_reg_write); +EXPORT_SYMBOL(bcmsdh_regfail); +EXPORT_SYMBOL(bcmsdh_send_buf); +EXPORT_SYMBOL(bcmsdh_recv_buf); + +EXPORT_SYMBOL(bcmsdh_rwdata); +EXPORT_SYMBOL(bcmsdh_abort); +EXPORT_SYMBOL(bcmsdh_query_device); +EXPORT_SYMBOL(bcmsdh_query_iofnum); +EXPORT_SYMBOL(bcmsdh_iovar_op); +EXPORT_SYMBOL(bcmsdh_register); +EXPORT_SYMBOL(bcmsdh_unregister); +EXPORT_SYMBOL(bcmsdh_chipmatch); +EXPORT_SYMBOL(bcmsdh_reset); +EXPORT_SYMBOL(bcmsdh_waitlockfree); + +EXPORT_SYMBOL(bcmsdh_get_dstatus); +EXPORT_SYMBOL(bcmsdh_cfg_read_word); +EXPORT_SYMBOL(bcmsdh_cfg_write_word); +EXPORT_SYMBOL(bcmsdh_cur_sbwad); +EXPORT_SYMBOL(bcmsdh_chipinfo); + +#endif /* BCMSDH_MODULE */ diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c new file mode 100644 index 000000000000..e1c0fb721176 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c @@ -0,0 +1,1475 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc.c 591104 2015-10-07 04:45:18Z $ + */ +#include + +#include +#include +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* Standard SDIO Host Controller Specification */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ + +#include +#include +#include +#include +#include + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include +extern volatile bool dhd_mmc_suspend; +#endif +#include "bcmsdh_sdmmc.h" + +#ifndef BCMSDH_MODULE +extern int sdio_function_init(void); +extern void sdio_function_cleanup(void); +#endif /* BCMSDH_MODULE */ + +#if !defined(OOB_INTR_ONLY) +static void IRQHandler(struct sdio_func *func); +static void IRQHandlerF2(struct sdio_func *func); +#endif /* !defined(OOB_INTR_ONLY) */ +static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); +int __attribute__((weak)) sdio_reset_comm(struct mmc_card *card) +{ + return 0; +} + +#define DEFAULT_SDIO_F2_BLKSIZE 512 +#ifndef CUSTOM_SDIO_F2_BLKSIZE +#define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE +#endif + +#define MAX_IO_RW_EXTENDED_BLK 511 + +uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ +uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE; +uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ + +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */ +uint sd_msglevel = 0x01; +uint sd_use_dma = TRUE; + +#ifndef CUSTOM_RXCHAIN +#define CUSTOM_RXCHAIN 0 +#endif + +DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait); + +#define DMA_ALIGN_MASK 0x03 +#define MMC_SDIO_ABORT_RETRY_LIMIT 5 + +int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); + +static int +sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) +{ + int err_ret; + uint32 fbraddr; + uint8 func; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's common CIS address */ + sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Enable Function 1 */ + sdio_claim_host(sd->func[1]); + err_ret = sdio_enable_func(sd->func[1]); + sdio_release_host(sd->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret)); + } + + return FALSE; +} + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, struct sdio_func *func) +{ + sdioh_info_t *sd = NULL; + int err_ret; + + sd_trace(("%s\n", __FUNCTION__)); + + if (func == NULL) { + sd_err(("%s: sdio function device is NULL\n", __FUNCTION__)); + return NULL; + } + + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + sd->fake_func0.num = 0; + sd->fake_func0.card = func->card; + sd->func[0] = &sd->fake_func0; + sd->func[1] = func->card->sdio_func[0]; + sd->func[2] = func->card->sdio_func[1]; + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + sd->use_rxchain = CUSTOM_RXCHAIN; + if (sd->func[1] == NULL || sd->func[2] == NULL) { + sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__)); + goto fail; + } + sdio_set_drvdata(sd->func[1], sd); + + sdio_claim_host(sd->func[1]); + sd->client_block_size[1] = 64; + err_ret = sdio_set_block_size(sd->func[1], 64); + sdio_release_host(sd->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret)); + goto fail; + } + + sdio_claim_host(sd->func[2]); + sd->client_block_size[2] = sd_f2_blocksize; + err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize); + sdio_release_host(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n", + sd_f2_blocksize, err_ret)); + goto fail; + } + + sdioh_sdmmc_card_enablefuncs(sd); + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; + +fail: + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; +} + + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + + if (sd) { + + /* Disable Function 2 */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + sdio_disable_func(sd->func[2]); + sdio_release_host(sd->func[2]); + } + + /* Disable Function 1 */ + if (sd->func[1]) { + sdio_claim_host(sd->func[1]); + sdio_disable_func(sd->func[1]); + sdio_release_host(sd->func[1]); + } + + sd->func[1] = NULL; + sd->func[2] = NULL; + + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +extern SDIOH_API_RC +sdioh_enable_func_intr(sdioh_info_t *sd) +{ + uint8 reg; + int err; + + if (sd->func[0] == NULL) { + sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sdio_claim_host(sd->func[0]); + reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(sd->func[0]); + return SDIOH_API_RC_FAIL; + } + /* Enable F1 and F2 interrupts, clear master enable */ + reg &= ~INTR_CTL_MASTER_EN; + reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); + sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(sd->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_disable_func_intr(sdioh_info_t *sd) +{ + uint8 reg; + int err; + + if (sd->func[0] == NULL) { + sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sdio_claim_host(sd->func[0]); + reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(sd->func[0]); + return SDIOH_API_RC_FAIL; + } + reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); + /* Disable master interrupt with the last function interrupt */ + if (!(reg & 0xFE)) + reg = 0; + sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(sd->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + + return SDIOH_API_RC_SUCCESS; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + if (fn == NULL) { + sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + + /* register and unmask irq */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + sdio_claim_irq(sd->func[2], IRQHandlerF2); + sdio_release_host(sd->func[2]); + } + + if (sd->func[1]) { + sdio_claim_host(sd->func[1]); + sdio_claim_irq(sd->func[1], IRQHandler); + sdio_release_host(sd->func[1]); + } +#elif defined(HW_OOB) + sdioh_enable_func_intr(sd); +#endif /* !defined(OOB_INTR_ONLY) */ + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + +#if !defined(OOB_INTR_ONLY) + if (sd->func[1]) { + /* register and unmask irq */ + sdio_claim_host(sd->func[1]); + sdio_release_irq(sd->func[1]); + sdio_release_host(sd->func[1]); + } + + if (sd->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(sd->func[2]); + sdio_release_irq(sd->func[2]); + /* Release host controller F2 */ + sdio_release_host(sd->func[2]); + } + + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#elif defined(HW_OOB) + sdioh_disable_func_intr(sd); +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return (0); +} +#endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_RXCHAIN +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 }, + {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + BCM_REFERENCE(bool_val); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + si->client_block_size[func] = blksize; + +#ifdef USE_DYNAMIC_F2_BLKSIZE + if (si->func[func] == NULL) { + sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); + bcmerror = BCME_NORESOURCE; + break; + } + sdio_claim_host(si->func[func]); + bcmerror = sdio_set_block_size(si->func[func], blksize); + if (bcmerror) + sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n", + __FUNCTION__, func, blksize, bcmerror)); + sdio_release_host(si->func[func]); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + break; + } + + case IOV_GVAL(IOV_RXCHAIN): + int_val = (int32)si->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + si->use_client_ints = (bool)int_val; + if (si->use_client_ints) + si->intmask |= CLIENT_INTR; + else + si->intmask &= ~CLIENT_INTR; + + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */ + else if (sd_ptr->offset & 2) + int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */ + else + int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */ + + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + break; + } + + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = 0; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +SDIOH_API_RC +sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable) +{ + SDIOH_API_RC status; + uint8 data; + + if (enable) + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI; + else + data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */ + + status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data); + return status; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +static int +sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) +{ + /* read 24 bits and return valid 17 bit addr */ + int i; + uint32 scratch, regdata; + uint8 *ptr = (uint8 *)&scratch; + for (i = 0; i < 3; i++) { + if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + sd_err(("%s: Can't read!\n", __FUNCTION__)); + + *ptr++ = (uint8) regdata; + regaddr++; + } + + /* Only the lower 17-bits are valid */ + scratch = ltoh32(scratch); + scratch &= 0x0001FFFF; + return (scratch); +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cis = (uint8)(foo & 0xff); + cis++; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int err_ret = 0; +#if defined(MMC_SDIO_ABORT) + int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; +#endif + + sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); + + DHD_PM_RESUME_WAIT(sdioh_request_byte_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + if(rw) { /* CMD52 Write */ + if (func == 0) { + /* Can only directly write to some F0 registers. Handle F2 enable + * as a special case. + */ + if (regaddr == SDIOD_CCCR_IOEN) { + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + if (*byte & SDIO_FUNC_ENABLE_2) { + /* Enable Function 2 */ + err_ret = sdio_enable_func(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F2 failed:%d", + err_ret)); + } + } else { + /* Disable Function 2 */ + err_ret = sdio_disable_func(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d", + err_ret)); + } + } + sdio_release_host(sd->func[2]); + } + } +#if defined(MMC_SDIO_ABORT) + /* to allow abort command through F1 */ + else if (regaddr == SDIOD_CCCR_IOABORT) { + while (sdio_abort_retry--) { + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + /* + * this sdio_f0_writeb() can be replaced with + * another api depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + if (!err_ret) + break; + } + } +#endif /* MMC_SDIO_ABORT */ + else if (regaddr < 0xF0) { + sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); + } else { + /* Claim host controller, perform F0 write, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + sdio_f0_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + } + } else { + /* Claim host controller, perform Fn write, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + sdio_writeb(sd->func[func], *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + } + } else { /* CMD52 Read */ + /* Claim host controller, perform Fn read, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + if (func == 0) { + *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret); + } else { + *byte = sdio_readb(sd->func[func], regaddr, &err_ret); + } + sdio_release_host(sd->func[func]); + } + } + + if (err_ret) { + if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) { + } else { + sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n", + rw ? "Write" : "Read", func, regaddr, *byte, err_ret)); + } + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int err_ret = SDIOH_API_RC_FAIL; +#if defined(MMC_SDIO_ABORT) + int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; +#endif + + if (func == 0) { + sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", + __FUNCTION__, cmd_type, rw, func, addr, nbytes)); + + DHD_PM_RESUME_WAIT(sdioh_request_word_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + /* Claim host controller */ + sdio_claim_host(sd->func[func]); + + if(rw) { /* CMD52 Write */ + if (nbytes == 4) { + sdio_writel(sd->func[func], *word, addr, &err_ret); + } else if (nbytes == 2) { + sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret); + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } else { /* CMD52 Read */ + if (nbytes == 4) { + *word = sdio_readl(sd->func[func], addr, &err_ret); + } else if (nbytes == 2) { + *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF; + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } + + /* Release host controller */ + sdio_release_host(sd->func[func]); + + if (err_ret) { +#if defined(MMC_SDIO_ABORT) + /* Any error on CMD53 transaction should abort that function using function 0. */ + while (sdio_abort_retry--) { + if (sd->func[0]) { + sdio_claim_host(sd->func[0]); + /* + * this sdio_f0_writeb() can be replaced with another api + * depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[0], + func, SDIOD_CCCR_IOABORT, &err_ret); + sdio_release_host(sd->func[0]); + } + if (!err_ret) + break; + } + if (err_ret) +#endif /* MMC_SDIO_ABORT */ + { + sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x", + rw ? "Write" : "Read", err_ret)); + } + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +static SDIOH_API_RC +sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, void *pkt) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + int err_ret = 0; + void *pnext; + uint ttl_len, pkt_offset; + uint blk_num; + uint blk_size; + uint max_blk_count; + uint max_req_size; + struct mmc_request mmc_req; + struct mmc_command mmc_cmd; + struct mmc_data mmc_dat; + uint32 sg_count; + struct sdio_func *sdio_func = sd->func[func]; + struct mmc_host *host = sdio_func->card->host; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + ASSERT(pkt); + DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + blk_size = sd->client_block_size[func]; + max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK); + max_req_size = min(max_blk_count * blk_size, host->max_req_size); + + pkt_offset = 0; + pnext = pkt; + + while (pnext != NULL) { + ttl_len = 0; + sg_count = 0; + memset(&mmc_req, 0, sizeof(struct mmc_request)); + memset(&mmc_cmd, 0, sizeof(struct mmc_command)); + memset(&mmc_dat, 0, sizeof(struct mmc_data)); + sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list)); + + /* Set up scatter-gather DMA descriptors. this loop is to find out the max + * data we can transfer with one command 53. blocks per command is limited by + * host max_req_size and 9-bit max block number. when the total length of this + * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED + * commands (each transfer is still block aligned) + */ + while (pnext != NULL && ttl_len < max_req_size) { + int pkt_len; + int sg_data_size; + uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext); + + ASSERT(pdata != NULL); + pkt_len = PKTLEN(sd->osh, pnext); + sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len)); + /* sg_count is unlikely larger than the array size, and this is + * NOT something we can handle here, but in case it happens, PLEASE put + * a restriction on max tx/glom count (based on host->max_segs). + */ + if (sg_count >= ARRAYSIZE(sd->sg_list)) { + sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__)); + return (SDIOH_API_RC_FAIL); + } + pdata += pkt_offset; + + sg_data_size = pkt_len - pkt_offset; + if (sg_data_size > max_req_size - ttl_len) + sg_data_size = max_req_size - ttl_len; + /* some platforms put a restriction on the data size of each scatter-gather + * DMA descriptor, use multiple sg buffers when xfer_size is bigger than + * max_seg_size + */ + if (sg_data_size > host->max_seg_size) + sg_data_size = host->max_seg_size; + sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size); + + ttl_len += sg_data_size; + pkt_offset += sg_data_size; + if (pkt_offset == pkt_len) { + pnext = PKTNEXT(sd->osh, pnext); + pkt_offset = 0; + } + } + + if (ttl_len % blk_size != 0) { + sd_err(("%s, data length %d not aligned to block size %d\n", + __FUNCTION__, ttl_len, blk_size)); + return SDIOH_API_RC_FAIL; + } + blk_num = ttl_len / blk_size; + mmc_dat.sg = sd->sg_list; + mmc_dat.sg_len = sg_count; + mmc_dat.blksz = blk_size; + mmc_dat.blocks = blk_num; + mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; + mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */ + mmc_cmd.arg = write ? 1<<31 : 0; + mmc_cmd.arg |= (func & 0x7) << 28; + mmc_cmd.arg |= 1<<27; + mmc_cmd.arg |= fifo ? 0 : 1<<26; + mmc_cmd.arg |= (addr & 0x1FFFF) << 9; + mmc_cmd.arg |= blk_num & 0x1FF; + mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + mmc_req.cmd = &mmc_cmd; + mmc_req.data = &mmc_dat; + if (!fifo) + addr += ttl_len; + + sdio_claim_host(sdio_func); + mmc_set_data_timeout(&mmc_dat, sdio_func->card); + mmc_wait_for_req(host, &mmc_req); + sdio_release_host(sdio_func); + + err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error; + if (0 != err_ret) { + sd_err(("%s:CMD53 %s failed with code %d\n", + __FUNCTION__, write ? "write" : "read", err_ret)); + return SDIOH_API_RC_FAIL; + } + } + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +static SDIOH_API_RC +sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, uint8 *buf, uint len) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + int err_ret = 0; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + ASSERT(buf); + + /* NOTE: + * For all writes, each packet length is aligned to 32 (or 4) + * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length + * is aligned to block boundary. If you want to align each packet to + * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here + * + * For reads, the alignment is doen in sdioh_request_buffer. + * + */ + sdio_claim_host(sd->func[func]); + + if ((write) && (!fifo)) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len); + else if (write) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len); + else if (fifo) + err_ret = sdio_readsb(sd->func[func], buf, addr, len); + else + err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len); + + sdio_release_host(sd->func[func]); + + if (err_ret) + sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__, + (write) ? "TX" : "RX", buf, addr, len, err_ret)); + else + sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__, + (write) ? "TX" : "RX", buf, addr, len)); + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + + +/* + * This function takes a buffer or packet, and fixes everything up so that in the + * end, a DMA-able packet is created. + * + * A buffer does not have an associated packet pointer, and may or may not be aligned. + * A packet may consist of a single packet, or a packet chain. If it is a packet chain, + * then all the packets in the chain must be properly aligned. If the packet data is not + * aligned, then there may only be one packet, and in this case, it is copied to a new + * aligned packet. + * + */ +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func, + uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt) +{ + SDIOH_API_RC status; + void *tmppkt; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + if (pkt) { + /* packet chain, only used for tx/rx glom, all packets length + * are aligned, total length is a block multiple + */ + if (PKTNEXT(sd->osh, pkt)) + return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt); + + /* non-glom mode, ignore the buffer parameter and use the packet pointer + * (this shouldn't happen) + */ + buffer = PKTDATA(sd->osh, pkt); + buf_len = PKTLEN(sd->osh, pkt); + } + + ASSERT(buffer); + + /* buffer and length are aligned, use it directly so we can avoid memory copy */ + if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0) + return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len); + + sd_err(("%s: [%d] doing memory copy buf=%p, len=%d\n", + __FUNCTION__, write, buffer, buf_len)); + + /* otherwise, a memory copy is needed as the input buffer is not aligned */ + tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE); + if (tmppkt == NULL) { + sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len)); + return SDIOH_API_RC_FAIL; + } + + if (write) + bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len); + + status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, + PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1))); + + if (!write) + bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len); + + PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE); + + return status; +} + +/* this function performs "abort" for both of host & device */ +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ +#if defined(MMC_SDIO_ABORT) + char t_func = (char) func; +#endif /* defined(MMC_SDIO_ABORT) */ + sd_trace(("%s: Enter\n", __FUNCTION__)); + +#if defined(MMC_SDIO_ABORT) + /* issue abort cmd52 command through F1 */ + sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func); +#endif /* defined(MMC_SDIO_ABORT) */ + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Reset and re-initialize the device */ +int sdioh_sdio_reset(sdioh_info_t *si) +{ + sd_trace(("%s: Enter\n", __FUNCTION__)); + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Disable device interrupt */ +void +sdioh_sdmmc_devintr_off(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask &= ~CLIENT_INTR; +} + +/* Enable device interrupt */ +void +sdioh_sdmmc_devintr_on(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask |= CLIENT_INTR; +} + +/* Read client card reg */ +int +sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp = 0; + + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + *data = temp; + *data &= 0xff; + sd_data(("%s: byte read data=0x%02x\n", + __FUNCTION__, *data)); + } else { + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize); + if (regsize == 2) + *data &= 0xffff; + + sd_data(("%s: word read data=0x%08x\n", + __FUNCTION__, *data)); + } + + return SUCCESS; +} + +#if !defined(OOB_INTR_ONLY) +/* bcmsdh_sdmmc interrupt handler */ +static void IRQHandler(struct sdio_func *func) +{ + sdioh_info_t *sd; + + sd = sdio_get_drvdata(func); + + ASSERT(sd != NULL); + sdio_release_host(sd->func[0]); + + if (sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + } else { + sd_err(("bcmsdh_sdmmc: ***IRQHandler\n")); + + sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + + sdio_claim_host(sd->func[0]); +} + +/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */ +static void IRQHandlerF2(struct sdio_func *func) +{ + sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n")); +} +#endif /* !defined(OOB_INTR_ONLY) */ + +#ifdef NOTUSED +/* Write client card reg */ +static int +sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp; + + temp = data & 0xff; + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + sd_data(("%s: byte write data=0x%02x\n", + __FUNCTION__, data)); + } else { + if (regsize == 2) + data &= 0xffff; + + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize); + + sd_data(("%s: word write data=0x%08x\n", + __FUNCTION__, data)); + } + + return SUCCESS; +} +#endif /* NOTUSED */ + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + int ret; + + if (!sd) { + sd_err(("%s Failed, sd is NULL\n", __FUNCTION__)); + return (0); + } + + /* Need to do this stages as we can't enable the interrupt till + downloading of the firmware is complete, other wise polling + sdio access will come in way + */ + if (sd->func[0]) { + if (stage == 0) { + /* Since the power to the chip is killed, we will have + re enumerate the device again. Set the block size + and enable the fucntion 1 for in preparation for + downloading the code + */ + /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux + 2.6.27. The implementation prior to that is buggy, and needs broadcom's + patch for it + */ + if ((ret = sdio_reset_comm(sd->func[0]->card))) { + sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret)); + return ret; + } + else { + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + + if (sd->func[1]) { + /* Claim host controller */ + sdio_claim_host(sd->func[1]); + + sd->client_block_size[1] = 64; + ret = sdio_set_block_size(sd->func[1], 64); + if (ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 " + "blocksize(%d)\n", ret)); + } + + /* Release host controller F1 */ + sdio_release_host(sd->func[1]); + } + + if (sd->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(sd->func[2]); + + sd->client_block_size[2] = sd_f2_blocksize; + ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize); + if (ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 " + "blocksize to %d(%d)\n", sd_f2_blocksize, ret)); + } + + /* Release host controller F2 */ + sdio_release_host(sd->func[2]); + } + + sdioh_sdmmc_card_enablefuncs(sd); + } + } else { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(sd->func[0]); + if (sd->func[2]) + sdio_claim_irq(sd->func[2], IRQHandlerF2); + if (sd->func[1]) + sdio_claim_irq(sd->func[1], IRQHandler); + sdio_release_host(sd->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_enable_func_intr(sd); +#endif + bcmsdh_oob_intr_set(sd->bcmsdh, TRUE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + + return (0); +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + /* MSM7201A Android sdio stack has bug with interrupt + So internaly within SDIO stack they are polling + which cause issue when device is turned off. So + unregister interrupt with SDIO stack to stop the + polling + */ + if (sd->func[0]) { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(sd->func[0]); + if (sd->func[1]) + sdio_release_irq(sd->func[1]); + if (sd->func[2]) + sdio_release_irq(sd->func[2]); + sdio_release_host(sd->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_disable_func_intr(sd); +#endif + bcmsdh_oob_intr_set(sd->bcmsdh, FALSE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + return (0); +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return (1); +} + + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c new file mode 100644 index 000000000000..19fa8b5c8b17 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c @@ -0,0 +1,399 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc_linux.c 591173 2015-10-07 06:24:22Z $ + */ + +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#include /* request_irq() */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(SDIO_VENDOR_ID_BROADCOM) +#define SDIO_VENDOR_ID_BROADCOM 0x02d0 +#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */ + +#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000 + +#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) +#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */ +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4325) +#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4329) +#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4319) +#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4330) +#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4334) +#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4324) +#define SDIO_DEVICE_ID_BROADCOM_4324 0x4324 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_43239) +#define SDIO_DEVICE_ID_BROADCOM_43239 43239 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */ + +extern void wl_cfg80211_set_parent_dev(void *dev); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num); +extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh); + +int sdio_function_init(void); +void sdio_function_cleanup(void); + +#define DESCRIPTION "bcmsdh_sdmmc Driver" +#define AUTHOR "Broadcom Corporation" + +/* module param defaults */ +static int clockoverride = 0; + +module_param(clockoverride, int, 0644); +MODULE_PARM_DESC(clockoverride, "SDIO card clock override"); + +/* Maximum number of bcmsdh_sdmmc devices supported by driver */ +#define BCMSDH_SDMMC_MAX_DEVICES 1 + +extern volatile bool dhd_mmc_suspend; + +static int sdioh_probe(struct sdio_func *func) +{ + int host_idx = func->card->host->index; + uint32 rca = func->card->rca; + wifi_adapter_info_t *adapter; + osl_t *osh = NULL; + sdioh_info_t *sdioh = NULL; + + sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca)); + adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca); + if (adapter != NULL) + sd_err(("found adapter info '%s'\n", adapter->name)); + else + sd_err(("can't find adapter info for this chip\n")); + +#ifdef WL_CFG80211 + wl_cfg80211_set_parent_dev(&func->dev); +#endif + + /* allocate SDIO Host Controller state info */ + osh = osl_attach(&func->dev, SDIO_BUS, TRUE); + if (osh == NULL) { + sd_err(("%s: osl_attach failed\n", __FUNCTION__)); + goto fail; + } + osl_static_mem_init(osh, adapter); + sdioh = sdioh_attach(osh, func); + if (sdioh == NULL) { + sd_err(("%s: sdioh_attach failed\n", __FUNCTION__)); + goto fail; + } + sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca); + if (sdioh->bcmsdh == NULL) { + sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__)); + goto fail; + } + + sdio_set_drvdata(func, sdioh); + return 0; + +fail: + if (sdioh != NULL) + sdioh_detach(osh, sdioh); + if (osh != NULL) + osl_detach(osh); + return -ENOMEM; +} + +static void sdioh_remove(struct sdio_func *func) +{ + sdioh_info_t *sdioh; + osl_t *osh; + + sdioh = sdio_get_drvdata(func); + if (sdioh == NULL) { + sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__)); + return; + } + + osh = sdioh->osh; + bcmsdh_remove(sdioh->bcmsdh); + sdioh_detach(osh, sdioh); + osl_detach(osh); +} + +static int bcmsdh_sdmmc_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret = 0; + + if (func == NULL) + return -EINVAL; + + sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + + /* 4318 doesn't have function 2 */ + if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) + ret = sdioh_probe(func); + + return ret; +} + +static void bcmsdh_sdmmc_remove(struct sdio_func *func) +{ + if (func == NULL) { + sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__)); + return; + } + + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + + if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) + sdioh_remove(func); +} + +/* devices we support, null terminated */ +static const struct sdio_device_id bcmsdh_sdmmc_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) }, + { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) }, + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) +static int bcmsdh_sdmmc_suspend(struct device *pdev) +{ + int err; + sdioh_info_t *sdioh; + struct sdio_func *func = dev_to_sdio_func(pdev); + mmc_pm_flag_t sdio_flags; + + sd_err(("%s Enter\n", __FUNCTION__)); + if (func->num != 2) + return 0; + +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = TRUE; +#endif + + sdioh = sdio_get_drvdata(func); + err = bcmsdh_suspend(sdioh->bcmsdh); + if (err) { +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = FALSE; +#endif + return err; + } + + sdio_flags = sdio_get_host_pm_caps(func); + if (!(sdio_flags & MMC_PM_KEEP_POWER)) { + sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__)); +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = FALSE; +#endif + return -EINVAL; + } + + /* keep power while host suspended */ + err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (err) { + sd_err(("%s: error while trying to keep power\n", __FUNCTION__)); +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = FALSE; +#endif + return err; + } + smp_mb(); + + return 0; +} + +static int bcmsdh_sdmmc_resume(struct device *pdev) +{ + sdioh_info_t *sdioh; + struct sdio_func *func = dev_to_sdio_func(pdev); + + sd_err(("%s Enter\n", __FUNCTION__)); + if (func->num != 2) + return 0; + + sdioh = sdio_get_drvdata(func); +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = FALSE; +#endif + + smp_mb(); + return 0; +} + +static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = { + .suspend = bcmsdh_sdmmc_suspend, + .resume = bcmsdh_sdmmc_resume, +}; +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + +#if defined(BCMLXSDMMC) +static struct semaphore *notify_semaphore = NULL; + +static int dummy_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + if (func && (func->num != 2)) { + return 0; + } + + if (notify_semaphore) + up(notify_semaphore); + return 0; +} + +static void dummy_remove(struct sdio_func *func) +{ +} + +static struct sdio_driver dummy_sdmmc_driver = { + .probe = dummy_probe, + .remove = dummy_remove, + .name = "dummy_sdmmc", + .id_table = bcmsdh_sdmmc_ids, + }; + +int sdio_func_reg_notify(void* semaphore) +{ + notify_semaphore = semaphore; + return sdio_register_driver(&dummy_sdmmc_driver); +} + +void sdio_func_unreg_notify(void) +{ + OSL_SLEEP(15); + sdio_unregister_driver(&dummy_sdmmc_driver); +} + +#endif /* defined(BCMLXSDMMC) */ + +static struct sdio_driver bcmsdh_sdmmc_driver = { + .probe = bcmsdh_sdmmc_probe, + .remove = bcmsdh_sdmmc_remove, + .name = "bcmsdh_sdmmc", + .id_table = bcmsdh_sdmmc_ids, +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) + .drv = { + .pm = &bcmsdh_sdmmc_pm_ops, + }, +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + }; + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; +}; + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + if (!sd) + return BCME_BADARG; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + return SDIOH_API_RC_SUCCESS; +} + +#ifdef BCMSDH_MODULE +static int __init +bcmsdh_module_init(void) +{ + int error = 0; + error = sdio_function_init(); + return error; +} + +static void __exit +bcmsdh_module_cleanup(void) +{ + sdio_function_cleanup(); +} + +module_init(bcmsdh_module_init); +module_exit(bcmsdh_module_cleanup); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DESCRIPTION); +MODULE_AUTHOR(AUTHOR); + +#endif /* BCMSDH_MODULE */ +/* + * module init +*/ +int bcmsdh_register_client_driver(void) +{ + return sdio_register_driver(&bcmsdh_sdmmc_driver); +} + +/* + * module cleanup +*/ +void bcmsdh_unregister_client_driver(void) +{ + sdio_unregister_driver(&bcmsdh_sdmmc_driver); +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c b/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c new file mode 100644 index 000000000000..139288e73ad8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c @@ -0,0 +1,252 @@ +/* + * Broadcom SPI Host Controller Driver - Linux Per-port + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdspi_linux.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#include +#include /* SDIO Device and Protocol Specs */ +#include /* request_irq(), free_irq() */ +#include +#include + +extern uint sd_crc; +module_param(sd_crc, uint, 0); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define KERNEL26 +#endif + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) +#endif + +/* Interrupt handler */ +static irqreturn_t +sdspi_isr(int irq, void *dev_id +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +, struct pt_regs *ptregs +#endif +) +{ + sdioh_info_t *sd; + struct sdos_info *sdos; + bool ours; + + sd = (sdioh_info_t *)dev_id; + sd->local_intrcount++; + + if (!sd->card_init_done) { + sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); + return IRQ_RETVAL(FALSE); + } else { + ours = spi_check_client_intr(sd, NULL); + + /* For local interrupts, wake the waiting process */ + if (ours && sd->got_hcint) { + sdos = (struct sdos_info *)sd->sdos_info; + wake_up_interruptible(&sdos->intr_wait_queue); + } + + return IRQ_RETVAL(ours); + } +} + + +/* Register with Linux for interrupts */ +int +spi_register_irq(sdioh_info_t *sd, uint irq) +{ + sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); + if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) { + sd_err(("%s: request_irq() failed\n", __FUNCTION__)); + return ERROR; + } + return SUCCESS; +} + +/* Free Linux irq */ +void +spi_free_irq(uint irq, sdioh_info_t *sd) +{ + free_irq(irq, sd); +} + +/* Map Host controller registers */ +uint32 * +spi_reg_map(osl_t *osh, uintptr addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +spi_reg_unmap(osl_t *osh, uintptr addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); +} + +int +spi_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); + init_waitqueue_head(&sdos->intr_wait_queue); + return BCME_OK; +} + +void +spi_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + if (!(sd->host_init_done && sd->card_init_done)) { + sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; + if (enable && !sd->lockcount) + spi_devintr_on(sd); + else + spi_devintr_off(sd); + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + +/* Protect against reentrancy (disable device interrupts while executing) */ +void +spi_lock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); + + spin_lock_irqsave(&sdos->lock, flags); + if (sd->lockcount) { + sd_err(("%s: Already locked!\n", __FUNCTION__)); + ASSERT(sd->lockcount == 0); + } + spi_devintr_off(sd); + sd->lockcount++; + spin_unlock_irqrestore(&sdos->lock, flags); +} + +/* Enable client interrupt */ +void +spi_unlock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); + ASSERT(sd->lockcount > 0); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + spin_lock_irqsave(&sdos->lock, flags); + if (--sd->lockcount == 0 && sd->client_intr_enabled) { + spi_devintr_on(sd); + } + spin_unlock_irqrestore(&sdos->lock, flags); +} + +void spi_waitbits(sdioh_info_t *sd, bool yield) +{ +#ifndef BCMSDYIELD + ASSERT(!yield); +#endif + sd_trace(("%s: yield %d canblock %d\n", + __FUNCTION__, yield, BLOCKABLE())); + + /* Clear the "interrupt happened" flag and last intrstatus */ + sd->got_hcint = FALSE; + +#ifdef BCMSDYIELD + if (yield && BLOCKABLE()) { + struct sdos_info *sdos; + sdos = (struct sdos_info *)sd->sdos_info; + /* Wait for the indication, the interrupt will be masked when the ISR fires. */ + wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); + } else +#endif /* BCMSDYIELD */ + { + spi_spinbits(sd); + } + +} diff --git a/drivers/net/wireless/bcmdhd/bcmspibrcm.c b/drivers/net/wireless/bcmdhd/bcmspibrcm.c new file mode 100644 index 000000000000..10d982e0f8d8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmspibrcm.c @@ -0,0 +1,1819 @@ +/* + * Broadcom BCMSDH to gSPI Protocol Conversion Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspibrcm.c 591086 2015-10-07 02:51:01Z $ + */ + +#define HSMODE + +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* SDIO device core hardware definitions. */ +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ +#include /* SDIO Device and Protocol Specs */ + +#include + + +#include +#include + +/* these are for the older cores... for newer cores we have control for each of them */ +#define F0_RESPONSE_DELAY 16 +#define F1_RESPONSE_DELAY 16 +#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY + + +#define GSPI_F0_RESP_DELAY 0 +#define GSPI_F1_RESP_DELAY F1_RESPONSE_DELAY +#define GSPI_F2_RESP_DELAY 0 +#define GSPI_F3_RESP_DELAY 0 + +#define CMDLEN 4 + +#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE) + +/* Globals */ +#if defined(DHD_DEBUG) +uint sd_msglevel = SDH_ERROR_VAL; +#else +uint sd_msglevel = 0; +#endif + +uint sd_hiok = FALSE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 64; /* Default blocksize */ + + +uint sd_divisor = 2; +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */ +uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ + +uint8 spi_outbuf[SPI_MAX_PKT_LEN]; +uint8 spi_inbuf[SPI_MAX_PKT_LEN]; + +/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits + * assuming we will not exceed F0 response delay > 100 bytes at 48MHz. + */ +#define BUF2_PKT_LEN 128 +uint8 spi_outbuf2[BUF2_PKT_LEN]; +uint8 spi_inbuf2[BUF2_PKT_LEN]; + +#define SPISWAP_WD4(x) bcmswap32(x); +#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \ + (bcmswap16((x & 0xffff0000) >> 16) << 16); + +/* Prototypes */ +static bool bcmspi_test_card(sdioh_info_t *sd); +static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd); +static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode); +static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen); +static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, + uint8 *data); +static int bcmspi_driver_init(sdioh_info_t *sd); +static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, + uint32 *data); +static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer); +static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg); + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + if (spi_osinit(sd) != 0) { + sd_err(("%s: spi_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + + sd->bar0 = bar0; + sd->irq = irq; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; + + /* Set defaults */ + sd->use_client_ints = TRUE; + sd->sd_use_dma = FALSE; /* DMA Not supported */ + + /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit + * mode + */ + sd->wordlen = 2; + + + if (!spi_hw_attach(sd)) { + sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__)); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (bcmspi_driver_init(sd) != SUCCESS) { + sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (spi_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if (sd) { + sd_err(("%s: detaching from hardware\n", __FUNCTION__)); + spi_free_irq(sd->irq, sd); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return 0; +} +#endif + +extern SDIOH_API_RC +sdioh_query_device(sdioh_info_t *sd) +{ + /* Return a BRCM ID appropriate to the dongle class */ + return (sd->num_funcs > 1) ? BCM4329_D11N_ID : BCM4318_D11G_ID; +} + +/* Provide dstatus bits of spi-transaction for dhd layers. */ +extern uint32 +sdioh_get_dstatus(sdioh_info_t *sd) +{ + return sd->card_dstatus; +} + +extern void +sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev) +{ + sd->chip = chip; + sd->chiprev = chiprev; +} + +extern void +sdioh_dwordmode(sdioh_info_t *sd, bool set) +{ + uint8 reg = 0; + int status; + + if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } + + if (set) { + reg |= DWORD_PKT_LEN_EN; + sd->dwordmode = TRUE; + sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */ + } else { + reg &= ~DWORD_PKT_LEN_EN; + sd->dwordmode = FALSE; + sd->client_block_size[SPI_FUNC_2] = 2048; + } + + if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } +} + + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_SPIERRSTATS, + IOV_RESP_DELAY_ALL +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, + {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) }, + {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; +/* + sdioh_regs_t *regs; +*/ + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!spi_start_clock(si, (uint16)sd_divisor)) { + sd_err(("%s: set clock failed\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + + if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) { + sd_err(("%s: Failed changing highspeed mode to %d.\n", + __FUNCTION__, sd_hiok)); + bcmerror = BCME_ERROR; + return ERROR; + } + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + + case IOV_GVAL(IOV_SPIERRSTATS): + { + bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t)); + break; + } + + case IOV_SVAL(IOV_SPIERRSTATS): + { + bzero(&si->spierrstats, sizeof(struct spierrstats_t)); + break; + } + + case IOV_GVAL(IOV_RESP_DELAY_ALL): + int_val = (int32)si->resp_delay_all; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RESP_DELAY_ALL): + si->resp_delay_all = (bool)int_val; + int_val = STATUS_ENABLE|INTR_WITH_STATUS; + if (si->resp_delay_all) + int_val |= RESP_DELAY_ALL; + else { + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1, + F1_RESPONSE_DELAY) != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + } + + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val) + != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + + if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) { + uint8 dummy_data; + status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data); + if (status) { + sd_err(("sdioh_cfg_read() failed.\n")); + return status; + } + } + + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 cis_byte; + uint16 *cis = (uint16 *)cisd; + uint bar0 = SI_ENUM_BASE; + int status; + uint8 data; + + sd_trace(("%s: Func %d\n", __FUNCTION__, func)); + + spi_lock(sd); + + /* Set sb window address to 0x18000000 */ + data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data); + if (status == SUCCESS) { + data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + if (status == SUCCESS) { + data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + offset = CC_SROM_OTP; /* OTP offset in chipcommon. */ + for (count = 0; count < length/2; count++) { + if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + *cis = (uint16)cis_byte; + cis++; + offset += 2; + } + + spi_unlock(sd); + + return (BCME_OK); +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + spi_lock(sd); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + if (rw == SDIOH_READ) { + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr)); + } else { + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + } + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) { + spi_unlock(sd); + return status; + } + + if (rw == SDIOH_READ) { + *byte = (uint8)data; + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte)); + } + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + + spi_lock(sd); + + if (rw == SDIOH_READ) + status = bcmspi_card_regread(sd, func, addr, nbytes, word); + else + status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word); + + spi_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + + spi_lock(sd); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks. */ + while (buflen > 0) { + len = MIN(sd->client_block_size[func], buflen); + if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + sd_err(("%s: bcmspi_card_buf %s failed\n", + __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write")); + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + buffer += len; + buflen -= len; + if (!fifo) + addr += len; + } + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +/* This function allows write to gspi bus when another rd/wr function is deep down the call stack. + * Its main aim is to have simpler spi writes rather than recursive writes. + * e.g. When there is a need to program response delay on the fly after detecting the SPI-func + * this call will allow to program the response delay. + */ +static int +bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte) +{ + uint32 cmd_arg; + uint32 datalen = 1; + uint32 hostlen; + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + } else { + sd_err(("%s: Host is %d bit spid, could not create SPI command.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (datalen != 0) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte); + } + } + + /* +4 for cmd, +4 for dstatus */ + hostlen = datalen + 8; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +/* Program the response delay corresponding to the spi function */ +static int +bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay) +{ + if (sd->resp_delay_all == FALSE) + return (BCME_OK); + + if (sd->prev_fun == func) + return (BCME_OK); + + if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY) + return (BCME_OK); + + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay); + + /* Remember function for which to avoid reprogramming resp-delay in next iteration */ + sd->prev_fun = func; + + return (BCME_OK); + +} + +#define GSPI_RESYNC_PATTERN 0x0 + +/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI. + * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is + * synchronised and all queued resuests are cancelled. + */ +static int +bcmspi_resync_f1(sdioh_info_t *sd) +{ + uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0; + + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + *(uint32 *)spi_outbuf2 = cmd_arg; + + /* for Write, put the data into the output buffer */ + *(uint32 *)&spi_outbuf2[CMDLEN] = data; + + /* +4 for cmd, +4 for dstatus */ + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +uint32 dstatus_count = 0; + +static int +bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg) +{ + uint32 dstatus = sd->card_dstatus; + struct spierrstats_t *spierrstats = &sd->spierrstats; + int err = SUCCESS; + + sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus)); + + /* Store dstatus of last few gSPI transactions */ + spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus; + spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg; + dstatus_count++; + + if (sd->card_init_done == FALSE) + return err; + + if (dstatus & STATUS_DATA_NOT_AVAILABLE) { + spierrstats->dna++; + sd_trace(("Read data not available on F1 addr = 0x%x\n", + GFIELD(cmd_arg, SPI_REG_ADDR))); + /* Clear dna bit */ + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE); + } + + if (dstatus & STATUS_UNDERFLOW) { + spierrstats->rdunderflow++; + sd_err(("FIFO underflow happened due to current F2 read command.\n")); + } + + if (dstatus & STATUS_OVERFLOW) { + spierrstats->wroverflow++; + sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n")); + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW); + bcmspi_resync_f1(sd); + sd_err(("Recovering from F1 FIFO overflow.\n")); + } + + if (dstatus & STATUS_F2_INTR) { + spierrstats->f2interrupt++; + sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_F3_INTR) { + spierrstats->f3interrupt++; + sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_HOST_CMD_DATA_ERR) { + spierrstats->hostcmddataerr++; + sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n")); + } + + if (dstatus & STATUS_F2_PKT_AVAILABLE) { + spierrstats->f2pktavailable++; + sd_trace(("Packet is available/ready in F2 TX FIFO\n")); + sd_trace(("Packet length = %d\n", sd->dwordmode ? + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) : + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT))); + } + + if (dstatus & STATUS_F3_PKT_AVAILABLE) { + spierrstats->f3pktavailable++; + sd_err(("Packet is available/ready in F3 TX FIFO\n")); + sd_err(("Packet length = %d\n", + (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT)); + } + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ + return 0; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return SUCCESS; +} + + +/* + * Private/Static work routines + */ +static int +bcmspi_host_init(sdioh_info_t *sd) +{ + + /* Default power on mode */ + sd->sd_mode = SDIOH_MODE_SPI; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = 1; + + return (SUCCESS); +} + +static int +get_client_blocksize(sdioh_info_t *sd) +{ + uint32 regdata[2]; + int status; + + /* Find F1/F2/F3 max packet size */ + if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG, + 8, regdata)) != SUCCESS) { + return status; + } + + sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n", + regdata[0], regdata[1])); + + sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2; + sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1])); + ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1); + + sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2; + sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2])); + ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2); + + sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2; + sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3])); + ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3); + + return 0; +} + +static int +bcmspi_client_init(sdioh_info_t *sd) +{ + uint32 status_en_reg = 0; + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + +#ifdef HSMODE + if (!spi_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#else + /* Start at ~400KHz clock rate for initialization */ + if (!spi_start_clock(sd, 128)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ + + if (!bcmspi_host_device_init_adapt(sd)) { + sd_err(("bcmspi_host_device_init_adapt failed\n")); + return ERROR; + } + + if (!bcmspi_test_card(sd)) { + sd_err(("bcmspi_test_card failed\n")); + return ERROR; + } + + sd->num_funcs = SPI_MAX_IOFUNCS; + + get_client_blocksize(sd); + + /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */ + bcmspi_resync_f1(sd); + + sd->dwordmode = FALSE; + + bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg); + + sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__)); + status_en_reg |= INTR_WITH_STATUS; + + if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, + status_en_reg & 0xff) != SUCCESS) { + sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__)); + return ERROR; + } + +#ifndef HSMODE + /* After configuring for High-Speed mode, set the desired clock rate. */ + if (!spi_start_clock(sd, 4)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ + + /* check to see if the response delay needs to be programmed properly */ + { + uint32 f1_respdelay = 0; + bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay); + if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) { + /* older sdiodevice core and has no separte resp delay for each of */ + sd_err(("older corerev < 4 so use the same resp delay for all funcs\n")); + sd->resp_delay_new = FALSE; + } + else { + /* older sdiodevice core and has no separte resp delay for each of */ + int ret_val; + sd->resp_delay_new = TRUE; + sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n")); + sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n", + GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY, + GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY)); + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1, + GSPI_F0_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1, + GSPI_F1_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1, + GSPI_F2_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1, + GSPI_F3_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + } + } + + + sd->card_init_done = TRUE; + + /* get the device rev to program the prop respdelays */ + + return SUCCESS; +} + +static int +bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG, + 4, ®data)) != SUCCESS) + return status; + + sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata)); + + + if (hsmode == TRUE) { + sd_trace(("Attempting to enable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + sd_trace(("Device is already in High-Speed mode.\n")); + return status; + } else { + regdata |= HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) { + return status; + } + } + } else { + sd_trace(("Attempting to disable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + regdata &= ~HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) + return status; + } + else { + sd_trace(("Device is already in Low-Speed mode.\n")); + return status; + } + } + spi_controller_highspeed_mode(sd, hsmode); + + return TRUE; +} + +#define bcmspi_find_curr_mode(sd) { \ + sd->wordlen = 2; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd->wordlen = 4; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd_trace(("Silicon testability issue: regdata = 0x%x." \ + " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \ + OSL_DELAY(100000); \ +} + +#define INIT_ADAPT_LOOP 100 + +/* Adapt clock-phase-speed-bitwidth between host and device */ +static bool +bcmspi_host_device_init_adapt(sdioh_info_t *sd) +{ + uint32 wrregdata, regdata = 0; + int status; + int i; + + /* Due to a silicon testability issue, the first command from the Host + * to the device will get corrupted (first bit will be lost). So the + * Host should poll the device with a safe read request. ie: The Host + * should try to read F0 addr 0x14 using the Fixed address mode + * (This will prevent a unintended write command to be detected by device) + */ + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + /* If device was not power-cycled it will stay in 32bit mode with + * response-delay-all bit set. Alternate the iteration so that + * read either with or without response-delay for F0 to succeed. + */ + bcmspi_find_curr_mode(sd); + sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = TRUE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = FALSE; + } + + /* Bail out, device not detected */ + if (i == INIT_ADAPT_LOOP) + return FALSE; + + /* Softreset the spid logic */ + if ((sd->dwordmode) || (sd->wordlen == 4)) { + bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI); + bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, ®data); + sd_trace(("reset reg read = 0x%x\n", regdata)); + sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode, + sd->wordlen, sd->resp_delay_all)); + /* Restore default state after softreset */ + sd->wordlen = 2; + sd->dwordmode = FALSE; + } + + if (sd->wordlen == 4) { + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != + SUCCESS) + return FALSE; + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n", + regdata)); + sd_trace(("Spid power was left on.\n")); + } else { + sd_err(("Spid power was left on but signature read failed." + " Value read = 0x%x\n", regdata)); + return FALSE; + } + } else { + sd->wordlen = 2; + +#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */ + + wrregdata = (CTRL_REG_DEFAULT); + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata)); + +#ifndef HSMODE + wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY); + wrregdata &= ~HIGH_SPEED_MODE; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); +#endif /* HSMODE */ + + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) { + sd_trace(("0xfeedbead was leftshifted by 1-bit.\n")); + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, + ®data)) != SUCCESS) + return FALSE; + } + OSL_DELAY(1000); + } + +#if defined(CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH) + /* Change to host controller intr-polarity of active-high */ + wrregdata |= INTR_POLARITY; +#else + /* Change to host controller intr-polarity of active-low */ + wrregdata &= ~INTR_POLARITY; +#endif /* CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH */ + + sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n", + wrregdata)); + /* Change to 32bit mode */ + wrregdata |= WORD_LENGTH_32; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); + + /* Change command/data packaging in 32bit LE mode */ + sd->wordlen = 4; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Read spid passed. Value read = 0x%x\n", regdata)); + sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n")); + } else { + sd_err(("Stale spid reg values read as it was kept powered. Value read =" + "0x%x\n", regdata)); + return FALSE; + } + } + + + return TRUE; +} + +static bool +bcmspi_test_card(sdioh_info_t *sd) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == (TEST_RO_DATA_32BIT_LE)) + sd_trace(("32bit LE regdata = 0x%x\n", regdata)); + else { + sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata)); + return FALSE; + } + + +#define RW_PATTERN1 0xA0A1A2A3 +#define RW_PATTERN2 0x4B5B6B7B + + regdata = RW_PATTERN1; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN1) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN1, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + regdata = RW_PATTERN2; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN2) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN2, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + return TRUE; +} + +static int +bcmspi_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if ((bcmspi_host_init(sd)) != SUCCESS) { + return ERROR; + } + + if (bcmspi_client_init(sd) != SUCCESS) { + return ERROR; + } + + return SUCCESS; +} + +/* Read device reg */ +static int +bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +static int +bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + int status; + uint32 cmd_arg; + uint32 dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data)); + + bcmspi_cmd_getdstatus(sd, &dstatus); + sd_trace(("dstatus =0x%x\n", dstatus)); + return SUCCESS; +} + +/* write a device register */ +static int +bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + return SUCCESS; +} + +/* write a device register - 1 byte */ +static int +bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +void +bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer) +{ + *dstatus_buffer = sd->card_dstatus; +} + +/* 'data' is of type uint32 whereas other buffers are of type uint8 */ +static int +bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen) +{ + uint32 i, j; + uint8 resp_delay = 0; + int err = SUCCESS; + uint32 hostlen; + uint32 spilen = 0; + uint32 dstatus_idx = 0; + uint16 templen, buslen, len, *ptr = NULL; + + sd_trace(("spi cmd = 0x%x\n", cmd_arg)); + + if (DWORDMODE_ON) { + spilen = GFIELD(cmd_arg, SPI_LEN); + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) || + (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1)) + dstatus_idx = spilen * 3; + + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && + (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { + spilen = spilen << 2; + dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0; + /* convert len to mod16 size */ + spilen = ROUNDUP(spilen, 16); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } + } + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + if (datalen < 4) + datalen = ROUNDUP(datalen, 4); + } else { + sd_err(("Host is %d bit spid, could not create SPI command.\n", + 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) { + /* We send len field of hw-header always a mod16 size, both from host and dongle */ + if (DWORDMODE_ON) { + if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) { + ptr = (uint16 *)&data[0]; + templen = *ptr; + /* ASSERT(*ptr == ~*(ptr + 1)); */ + templen = ROUNDUP(templen, 16); + *ptr = templen; + sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1)))); + } + } + + if (datalen != 0) { + for (i = 0; i < datalen/4; i++) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD4(data[i]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD2(data[i]); + } + } + } + } + + /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */ + if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) { + int func = GFIELD(cmd_arg, SPI_FUNCTION); + switch (func) { + case 0: + if (sd->resp_delay_new) + resp_delay = GSPI_F0_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0; + break; + case 1: + if (sd->resp_delay_new) + resp_delay = GSPI_F1_RESP_DELAY; + else + resp_delay = F1_RESPONSE_DELAY; + break; + case 2: + if (sd->resp_delay_new) + resp_delay = GSPI_F2_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0; + break; + default: + ASSERT(0); + break; + } + /* Program response delay */ + if (sd->resp_delay_new == FALSE) + bcmspi_prog_resp_delay(sd, func, resp_delay); + } + + /* +4 for cmd and +4 for dstatus */ + hostlen = datalen + 8 + resp_delay; + hostlen += dstatus_idx; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen); + + /* for Read, get the data into the input buffer */ + if (datalen != 0) { + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */ + for (j = 0; j < datalen/4; j++) { + if (sd->wordlen == 4) { /* 32bit spid */ + data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } + } + + if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + ptr = (uint16 *)&data[0]; + templen = *ptr; + buslen = len = ~(*(ptr + 1)); + buslen = ROUNDUP(buslen, 16); + /* populate actual len in hw-header */ + if (templen == buslen) + *ptr = len; + } + } + } + + /* Restore back the len field of the hw header */ + if (DWORDMODE_ON) { + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && + (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { + ptr = (uint16 *)&data[0]; + *ptr = (uint16)(~*(ptr+1)); + } + } + + dstatus_idx += (datalen + CMDLEN + resp_delay); + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else { + sd_err(("Host is %d bit machine, could not read SPI dstatus.\n", + 8 * sd->wordlen)); + return ERROR; + } + if (sd->card_dstatus == 0xffffffff) { + sd_err(("looks like not a GSPI device or device is not powered.\n")); + } + + err = bcmspi_update_stats(sd, cmd_arg); + + return err; + +} + +static int +bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data) +{ + int status; + uint32 cmd_arg; + bool write = rw == SDIOH_READ ? 0 : 1; + uint retries = 0; + + bool enable; + uint32 spilen; + + cmd_arg = 0; + + ASSERT(nbytes); + ASSERT(nbytes <= sd->client_block_size[func]); + + if (write) sd->t_cnt++; else sd->r_cnt++; + + if (func == 2) { + /* Frame len check limited by gSPI. */ + if ((nbytes > 2000) && write) { + sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes)); + } + /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */ + /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */ + if (write) { + uint32 dstatus; + /* check F2 ready with cached one */ + bcmspi_cmd_getdstatus(sd, &dstatus); + if ((dstatus & STATUS_F2_RX_READY) == 0) { + retries = WAIT_F2RXFIFORDY; + enable = 0; + while (retries-- && !enable) { + OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000); + bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4, + &dstatus); + if (dstatus & STATUS_F2_RX_READY) + enable = TRUE; + } + if (!enable) { + struct spierrstats_t *spierrstats = &sd->spierrstats; + spierrstats->f2rxnotready++; + sd_err(("F2 FIFO is not ready to receive data.\n")); + return ERROR; + } + sd_trace(("No of retries on F2 ready %d\n", + (WAIT_F2RXFIFORDY - retries))); + } + } + } + + /* F2 transfers happen on 0 addr */ + addr = (func == 2) ? 0 : addr; + + /* In pio mode buffer is read using fixed address fifo in func 1 */ + if ((func == 1) && (fifo)) + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); + else + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); + + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write); + spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes); + if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + /* convert len to mod4 size */ + spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } else + cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen); + + if ((func == 2) && (fifo == 1)) { + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wr" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + } + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wd" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, + (write ? "write" : "read"))); + return status; + } + + /* gSPI expects that hw-header-len is equal to spi-command-len */ + if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) { + ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff)); + ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16))); + } + + if ((nbytes > 2000) && !write) { + sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes)); + } + + return SUCCESS; +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + si->card_init_done = FALSE; + return bcmspi_client_init(si); +} + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c new file mode 100644 index 000000000000..a4f1f25b0a20 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmutils.c @@ -0,0 +1,3545 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmutils.c 591286 2015-10-07 11:59:26Z $ + */ + +#include +#include +#include +#include +#ifdef BCMDRIVER + +#include +#include + +#else /* !BCMDRIVER */ + +#include +#include +#include + +#if defined(BCMEXTSUP) +#include +#endif + +#ifndef ASSERT +#define ASSERT(exp) +#endif + +#endif /* !BCMDRIVER */ + +#include +#include +#include +#include +#include +#include +#include + + +void *_bcmutils_dummy_fn = NULL; + + + + +#ifdef BCMDRIVER + + + +/* copy a pkt buffer chain into a buffer */ +uint +pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + if (len < 0) + len = 4096; /* "infinite" */ + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(PKTDATA(osh, p) + offset, buf, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + +/* copy a buffer into a pkt buffer chain */ +uint +pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(buf, PKTDATA(osh, p) + offset, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + + + +/* return total length of buffer chain */ +uint BCMFASTPATH +pkttotlen(osl_t *osh, void *p) +{ + uint total; + int len; + + total = 0; + for (; p; p = PKTNEXT(osh, p)) { + len = PKTLEN(osh, p); + total += len; +#ifdef BCMLFRAG + if (BCMLFRAG_ENAB()) { + if (PKTISFRAG(osh, p)) { + total += PKTFRAGTOTLEN(osh, p); + } + } +#endif + } + + return (total); +} + +/* return the last buffer of chained pkt */ +void * +pktlast(osl_t *osh, void *p) +{ + for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) + ; + + return (p); +} + +/* count segments of a chained packet */ +uint BCMFASTPATH +pktsegcnt(osl_t *osh, void *p) +{ + uint cnt; + + for (cnt = 0; p; p = PKTNEXT(osh, p)) { + cnt++; +#ifdef BCMLFRAG + if (BCMLFRAG_ENAB()) { + if (PKTISFRAG(osh, p)) { + cnt += PKTFRAGTOTNUM(osh, p); + } + } +#endif + } + + return cnt; +} + + +/* count segments of a chained packet */ +uint BCMFASTPATH +pktsegcnt_war(osl_t *osh, void *p) +{ + uint cnt; + uint8 *pktdata; + uint len, remain, align64; + + for (cnt = 0; p; p = PKTNEXT(osh, p)) { + cnt++; + len = PKTLEN(osh, p); + if (len > 128) { + pktdata = (uint8 *)PKTDATA(osh, p); /* starting address of data */ + /* Check for page boundary straddle (2048B) */ + if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff)) + cnt++; + + align64 = (uint)((uintptr)pktdata & 0x3f); /* aligned to 64B */ + align64 = (64 - align64) & 0x3f; + len -= align64; /* bytes from aligned 64B to end */ + /* if aligned to 128B, check for MOD 128 between 1 to 4B */ + remain = len % 128; + if (remain > 0 && remain <= 4) + cnt++; /* add extra seg */ + } + } + + return cnt; +} + +uint8 * BCMFASTPATH +pktdataoffset(osl_t *osh, void *p, uint offset) +{ + uint total = pkttotlen(osh, p); + uint pkt_off = 0, len = 0; + uint8 *pdata = (uint8 *) PKTDATA(osh, p); + + if (offset > total) + return NULL; + + for (; p; p = PKTNEXT(osh, p)) { + pdata = (uint8 *) PKTDATA(osh, p); + pkt_off = offset - len; + len += PKTLEN(osh, p); + if (len > offset) + break; + } + return (uint8*) (pdata+pkt_off); +} + + +/* given a offset in pdata, find the pkt seg hdr */ +void * +pktoffset(osl_t *osh, void *p, uint offset) +{ + uint total = pkttotlen(osh, p); + uint len = 0; + + if (offset > total) + return NULL; + + for (; p; p = PKTNEXT(osh, p)) { + len += PKTLEN(osh, p); + if (len > offset) + break; + } + return p; +} + +#endif /* BCMDRIVER */ + +#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) +const unsigned char bcm_ctype[] = { + + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ + _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, + _BCM_C, /* 8-15 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ + _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ + _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ + _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ + _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ + _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, + _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ + _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, + _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ + _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ +}; + +ulong +bcm_strtoul(const char *cp, char **endp, uint base) +{ + ulong result, last_result = 0, value; + bool minus; + + minus = FALSE; + + while (bcm_isspace(*cp)) + cp++; + + if (cp[0] == '+') + cp++; + else if (cp[0] == '-') { + minus = TRUE; + cp++; + } + + if (base == 0) { + if (cp[0] == '0') { + if ((cp[1] == 'x') || (cp[1] == 'X')) { + base = 16; + cp = &cp[2]; + } else { + base = 8; + cp = &cp[1]; + } + } else + base = 10; + } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { + cp = &cp[2]; + } + + result = 0; + + while (bcm_isxdigit(*cp) && + (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { + result = result*base + value; + /* Detected overflow */ + if (result < last_result && !minus) + return (ulong)-1; + last_result = result; + cp++; + } + + if (minus) + result = (ulong)(-(long)result); + + if (endp) + *endp = DISCARD_QUAL(cp, char); + + return (result); +} + +int +bcm_atoi(const char *s) +{ + return (int)bcm_strtoul(s, NULL, 10); +} + +/* return pointer to location of substring 'needle' in 'haystack' */ +char * +bcmstrstr(const char *haystack, const char *needle) +{ + int len, nlen; + int i; + + if ((haystack == NULL) || (needle == NULL)) + return DISCARD_QUAL(haystack, char); + + nlen = (int)strlen(needle); + len = (int)strlen(haystack) - nlen + 1; + + for (i = 0; i < len; i++) + if (memcmp(needle, &haystack[i], nlen) == 0) + return DISCARD_QUAL(&haystack[i], char); + return (NULL); +} + +char * +bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len) +{ + for (; s_len >= substr_len; s++, s_len--) + if (strncmp(s, substr, substr_len) == 0) + return DISCARD_QUAL(s, char); + + return NULL; +} + +char * +bcmstrcat(char *dest, const char *src) +{ + char *p; + + p = dest + strlen(dest); + + while ((*p++ = *src++) != '\0') + ; + + return (dest); +} + +char * +bcmstrncat(char *dest, const char *src, uint size) +{ + char *endp; + char *p; + + p = dest + strlen(dest); + endp = p + size; + + while (p != endp && (*p++ = *src++) != '\0') + ; + + return (dest); +} + + +/**************************************************************************** +* Function: bcmstrtok +* +* Purpose: +* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), +* but allows strToken() to be used by different strings or callers at the same +* time. Each call modifies '*string' by substituting a NULL character for the +* first delimiter that is encountered, and updates 'string' to point to the char +* after the delimiter. Leading delimiters are skipped. +* +* Parameters: +* string (mod) Ptr to string ptr, updated by token. +* delimiters (in) Set of delimiter characters. +* tokdelim (out) Character that delimits the returned token. (May +* be set to NULL if token delimiter is not required). +* +* Returns: Pointer to the next token found. NULL when no more tokens are found. +***************************************************************************** +*/ +char * +bcmstrtok(char **string, const char *delimiters, char *tokdelim) +{ + unsigned char *str; + unsigned long map[8]; + int count; + char *nextoken; + + if (tokdelim != NULL) { + /* Prime the token delimiter */ + *tokdelim = '\0'; + } + + /* Clear control map */ + for (count = 0; count < 8; count++) { + map[count] = 0; + } + + /* Set bits in delimiter table */ + do { + map[*delimiters >> 5] |= (1 << (*delimiters & 31)); + } + while (*delimiters++); + + str = (unsigned char*)*string; + + /* Find beginning of token (skip over leading delimiters). Note that + * there is no token iff this loop sets str to point to the terminal + * null (*str == '\0') + */ + while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { + str++; + } + + nextoken = (char*)str; + + /* Find the end of the token. If it is not the end of the string, + * put a null there. + */ + for (; *str; str++) { + if (map[*str >> 5] & (1 << (*str & 31))) { + if (tokdelim != NULL) { + *tokdelim = *str; + } + + *str++ = '\0'; + break; + } + } + + *string = (char*)str; + + /* Determine if a token has been found. */ + if (nextoken == (char *) str) { + return NULL; + } + else { + return nextoken; + } +} + + +#define xToLower(C) \ + ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) + + +/**************************************************************************** +* Function: bcmstricmp +* +* Purpose: Compare to strings case insensitively. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstricmp(const char *s1, const char *s2) +{ + char dc, sc; + + while (*s2 && *s1) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + } + + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + + +/**************************************************************************** +* Function: bcmstrnicmp +* +* Purpose: Compare to strings case insensitively, upto a max of 'cnt' +* characters. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* cnt (in) Max characters to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstrnicmp(const char* s1, const char* s2, int cnt) +{ + char dc, sc; + + while (*s2 && *s1 && cnt) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + cnt--; + } + + if (!cnt) return 0; + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + +/* parse a xx:xx:xx:xx:xx:xx format ethernet address */ +int +bcm_ether_atoe(const char *p, struct ether_addr *ea) +{ + int i = 0; + char *ep; + + for (;;) { + ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16); + p = ep; + if (!*p++ || i == 6) + break; + } + + return (i == 6); +} + +int +bcm_atoipv4(const char *p, struct ipv4_addr *ip) +{ + + int i = 0; + char *c; + for (;;) { + ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0); + if (*c++ != '.' || i == IPV4_ADDR_LEN) + break; + p = c; + } + return (i == IPV4_ADDR_LEN); +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ + + +#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) +/* registry routine buffer preparation utility functions: + * parameter order is like strncpy, but returns count + * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) + */ +ulong +wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) +{ + ulong copyct = 1; + ushort i; + + if (abuflen == 0) + return 0; + + /* wbuflen is in bytes */ + wbuflen /= sizeof(ushort); + + for (i = 0; i < wbuflen; ++i) { + if (--abuflen == 0) + break; + *abuf++ = (char) *wbuf++; + ++copyct; + } + *abuf = '\0'; + + return copyct; +} +#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ + +char * +bcm_ether_ntoa(const struct ether_addr *ea, char *buf) +{ + static const char hex[] = + { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' + }; + const uint8 *octet = ea->octet; + char *p = buf; + int i; + + for (i = 0; i < 6; i++, octet++) { + *p++ = hex[(*octet >> 4) & 0xf]; + *p++ = hex[*octet & 0xf]; + *p++ = ':'; + } + + *(p-1) = '\0'; + + return (buf); +} + +char * +bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) +{ + snprintf(buf, 16, "%d.%d.%d.%d", + ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); + return (buf); +} + +char * +bcm_ipv6_ntoa(void *ipv6, char *buf) +{ + /* Implementing RFC 5952 Sections 4 + 5 */ + /* Not thoroughly tested */ + uint16 tmp[8]; + uint16 *a = &tmp[0]; + char *p = buf; + int i, i_max = -1, cnt = 0, cnt_max = 1; + uint8 *a4 = NULL; + memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN); + + for (i = 0; i < IPV6_ADDR_LEN/2; i++) { + if (a[i]) { + if (cnt > cnt_max) { + cnt_max = cnt; + i_max = i - cnt; + } + cnt = 0; + } else + cnt++; + } + if (cnt > cnt_max) { + cnt_max = cnt; + i_max = i - cnt; + } + if (i_max == 0 && + /* IPv4-translated: ::ffff:0:a.b.c.d */ + ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) || + /* IPv4-mapped: ::ffff:a.b.c.d */ + (cnt_max == 5 && a[5] == 0xffff))) + a4 = (uint8*) (a + 6); + + for (i = 0; i < IPV6_ADDR_LEN/2; i++) { + if ((uint8*) (a + i) == a4) { + snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]); + break; + } else if (i == i_max) { + *p++ = ':'; + i += cnt_max - 1; + p[0] = ':'; + p[1] = '\0'; + } else { + if (i) + *p++ = ':'; + p += snprintf(p, 8, "%x", ntoh16(a[i])); + } + } + + return buf; +} +#ifdef BCMDRIVER + +void +bcm_mdelay(uint ms) +{ + uint i; + + for (i = 0; i < ms; i++) { + OSL_DELAY(1000); + } +} + + + + + +#if defined(DHD_DEBUG) +/* pretty hex print a pkt buffer chain */ +void +prpkt(const char *msg, osl_t *osh, void *p0) +{ + void *p; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + for (p = p0; p; p = PKTNEXT(osh, p)) + prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); +} +#endif + +/* Takes an Ethernet frame and sets out-of-bound PKTPRIO. + * Also updates the inplace vlan tag if requested. + * For debugging, it returns an indication of what it did. + */ +uint BCMFASTPATH +pktsetprio(void *pkt, bool update_vtag) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *pktdata; + int priority = 0; + int rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); + + eh = (struct ether_header *) pktdata; + + if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { + uint16 vlan_tag; + int vlan_prio, dscp_prio = 0; + + evh = (struct ethervlan_header *)eh; + + vlan_tag = ntoh16(evh->vlan_tag); + vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; + + if ((evh->ether_type == hton16(ETHER_TYPE_IP)) || + (evh->ether_type == hton16(ETHER_TYPE_IPV6))) { + uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); + uint8 tos_tc = IP_TOS46(ip_body); + dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + } + + /* DSCP priority gets precedence over 802.1P (vlan tag) */ + if (dscp_prio != 0) { + priority = dscp_prio; + rc |= PKTPRIO_VDSCP; + } else { + priority = vlan_prio; + rc |= PKTPRIO_VLAN; + } + /* + * If the DSCP priority is not the same as the VLAN priority, + * then overwrite the priority field in the vlan tag, with the + * DSCP priority value. This is required for Linux APs because + * the VLAN driver on Linux, overwrites the skb->priority field + * with the priority value in the vlan tag + */ + if (update_vtag && (priority != vlan_prio)) { + vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); + vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; + evh->vlan_tag = hton16(vlan_tag); + rc |= PKTPRIO_UPD; + } +#ifdef DHD_LOSSLESS_ROAMING + } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + priority = PRIO_8021D_NC; + rc = PKTPRIO_DSCP; +#endif /* DHD_LOSSLESS_ROAMING */ + } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || + (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { + uint8 *ip_body = pktdata + sizeof(struct ether_header); + uint8 tos_tc = IP_TOS46(ip_body); + uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT; + switch (dscp) { + case DSCP_EF: + priority = PRIO_8021D_VO; + break; + case DSCP_AF31: + case DSCP_AF32: + case DSCP_AF33: + priority = PRIO_8021D_CL; + break; + case DSCP_AF21: + case DSCP_AF22: + case DSCP_AF23: + case DSCP_AF11: + case DSCP_AF12: + case DSCP_AF13: + priority = PRIO_8021D_EE; + break; + default: + priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + break; + } + + rc |= PKTPRIO_DSCP; + } + + ASSERT(priority >= 0 && priority <= MAXPRIO); + PKTSETPRIO(pkt, priority); + return (rc | priority); +} + +/* lookup user priority for specified DSCP */ +static uint8 +dscp2up(uint8 *up_table, uint8 dscp) +{ + uint8 user_priority = 255; + + /* lookup up from table if parameters valid */ + if (up_table != NULL && dscp < UP_TABLE_MAX) { + user_priority = up_table[dscp]; + } + + /* 255 is unused value so return up from dscp */ + if (user_priority == 255) { + user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT); + } + + return user_priority; +} + +/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */ +uint BCMFASTPATH +pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag) +{ + if (up_table) { + uint8 *pktdata; + uint pktlen; + uint8 dscp; + uint user_priority = 0; + uint rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + pktlen = PKTLEN(OSH_NULL, pkt); + + if (pktgetdscp(pktdata, pktlen, &dscp)) { + rc = PKTPRIO_DSCP; + user_priority = dscp2up(up_table, dscp); + PKTSETPRIO(pkt, user_priority); + } + + return (rc | user_priority); + } else { + return pktsetprio(pkt, update_vtag); + } +} + +/* Returns TRUE and DSCP if IP header found, FALSE otherwise. + */ +bool BCMFASTPATH +pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *ip_body; + bool rc = FALSE; + + /* minimum length is ether header and IP header */ + if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN) + return FALSE; + + eh = (struct ether_header *) pktdata; + + if (eh->ether_type == HTON16(ETHER_TYPE_IP)) { + ip_body = pktdata + sizeof(struct ether_header); + *dscp = IP_DSCP46(ip_body); + rc = TRUE; + } + else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) { + evh = (struct ethervlan_header *)eh; + + /* minimum length is ethervlan header and IP header */ + if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN && + evh->ether_type == HTON16(ETHER_TYPE_IP)) { + ip_body = pktdata + sizeof(struct ethervlan_header); + *dscp = IP_DSCP46(ip_body); + rc = TRUE; + } + } + + return rc; +} + +/* The 0.5KB string table is not removed by compiler even though it's unused */ + +static char bcm_undeferrstr[32]; +static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; + +/* Convert the error codes into related error strings */ +const char * +bcmerrorstr(int bcmerror) +{ + /* check if someone added a bcmerror code but forgot to add errorstring */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); + + if (bcmerror > 0 || bcmerror < BCME_LAST) { + snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); + return bcm_undeferrstr; + } + + ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); + + return bcmerrorstrtable[-bcmerror]; +} + + + +/* iovar table lookup */ +/* could mandate sorted tables and do a binary search */ +const bcm_iovar_t* +bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) +{ + const bcm_iovar_t *vi; + const char *lookup_name; + + /* skip any ':' delimited option prefixes */ + lookup_name = strrchr(name, ':'); + if (lookup_name != NULL) + lookup_name++; + else + lookup_name = name; + + ASSERT(table != NULL); + + for (vi = table; vi->name; vi++) { + if (!strcmp(vi->name, lookup_name)) + return vi; + } + /* ran to end of table */ + + return NULL; /* var name not found */ +} + +int +bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) +{ + int bcmerror = 0; + + /* length check on io buf */ + switch (vi->type) { + case IOVT_BOOL: + case IOVT_INT8: + case IOVT_INT16: + case IOVT_INT32: + case IOVT_UINT8: + case IOVT_UINT16: + case IOVT_UINT32: + /* all integers are int32 sized args at the ioctl interface */ + if (len < (int)sizeof(int)) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_BUFFER: + /* buffer must meet minimum length requirement */ + if (len < vi->minlen) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_VOID: + if (!set) { + /* Cannot return nil... */ + bcmerror = BCME_UNSUPPORTED; + } else if (len) { + /* Set is an action w/o parameters */ + bcmerror = BCME_BUFTOOLONG; + } + break; + + default: + /* unknown type for length check in iovar info */ + ASSERT(0); + bcmerror = BCME_UNSUPPORTED; + } + + return bcmerror; +} + +#endif /* BCMDRIVER */ + +#ifdef BCM_OBJECT_TRACE + +#define BCM_OBJECT_MERGE_SAME_OBJ 0 + +/* some place may add / remove the object to trace list for Linux: */ +/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */ +/* remove: osl_pktfree dev_kfree_skb netif_rx */ + +#define BCM_OBJDBG_COUNT (1024 * 100) +static spinlock_t dbgobj_lock; +#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock) +#define BCM_OBJDBG_LOCK_DESTROY() +#define BCM_OBJDBG_LOCK spin_lock_irqsave +#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore + +#define BCM_OBJDBG_ADDTOHEAD 0 +#define BCM_OBJDBG_ADDTOTAIL 1 + +#define BCM_OBJDBG_CALLER_LEN 32 +struct bcm_dbgobj { + struct bcm_dbgobj *prior; + struct bcm_dbgobj *next; + uint32 flag; + void *obj; + uint32 obj_sn; + uint32 obj_state; + uint32 line; + char caller[BCM_OBJDBG_CALLER_LEN]; +}; + +static struct bcm_dbgobj *dbgobj_freehead = NULL; +static struct bcm_dbgobj *dbgobj_freetail = NULL; +static struct bcm_dbgobj *dbgobj_objhead = NULL; +static struct bcm_dbgobj *dbgobj_objtail = NULL; + +static uint32 dbgobj_sn = 0; +static int dbgobj_count = 0; +static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT]; + +void +bcm_object_trace_init(void) +{ + int i = 0; + BCM_OBJDBG_LOCK_INIT(); + memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT); + dbgobj_freehead = &bcm_dbg_objs[0]; + dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1]; + + for (i = 0; i < BCM_OBJDBG_COUNT; ++i) { + bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ? + dbgobj_freehead : &bcm_dbg_objs[i + 1]; + bcm_dbg_objs[i].prior = (i == 0) ? + dbgobj_freetail : &bcm_dbg_objs[i - 1]; + } +} + +void +bcm_object_trace_deinit(void) +{ + if (dbgobj_objhead || dbgobj_objtail) { + printf("%s: not all objects are released\n", __FUNCTION__); + ASSERT(0); + } + BCM_OBJDBG_LOCK_DESTROY(); +} + +static void +bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj) +{ + if ((dbgobj == *head) && (dbgobj == *tail)) { + *head = NULL; + *tail = NULL; + } else if (dbgobj == *head) { + *head = (*head)->next; + } else if (dbgobj == *tail) { + *tail = (*tail)->prior; + } + dbgobj->next->prior = dbgobj->prior; + dbgobj->prior->next = dbgobj->next; +} + +static void +bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int addtotail) +{ + if (!(*head) && !(*tail)) { + *head = dbgobj; + *tail = dbgobj; + dbgobj->next = dbgobj; + dbgobj->prior = dbgobj; + } else if ((*head) && (*tail)) { + (*tail)->next = dbgobj; + (*head)->prior = dbgobj; + dbgobj->next = *head; + dbgobj->prior = *tail; + if (addtotail == BCM_OBJDBG_ADDTOTAIL) + *tail = dbgobj; + else + *head = dbgobj; + } else { + ASSERT(0); /* can't be this case */ + } +} + +static INLINE void +bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int movetotail) +{ + if ((*head) && (*tail)) { + if (movetotail == BCM_OBJDBG_ADDTOTAIL) { + if (dbgobj != (*tail)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } else { + if (dbgobj != (*head)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } + } else { + ASSERT(0); /* can't be this case */ + } +} + +void +bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + if (opt == BCM_OBJDBG_ADD_PKT || + opt == BCM_OBJDBG_ADD) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + printf("%s: obj %p allocated from %s(%d)," + " allocate again from %s(%d)\n", + __FUNCTION__, dbgobj->obj, + dbgobj->caller, dbgobj->line, + caller, line); + ASSERT(0); + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +#if BCM_OBJECT_MERGE_SAME_OBJ + dbgobj = dbgobj_freetail; + while (dbgobj) { + if (dbgobj->obj == obj) { + goto FREED_ENTRY_FOUND; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + + dbgobj = dbgobj_freehead; +#if BCM_OBJECT_MERGE_SAME_OBJ +FREED_ENTRY_FOUND: +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + if (!dbgobj) { + printf("%s: already got %d objects ?????????????????????\n", + __FUNCTION__, BCM_OBJDBG_COUNT); + ASSERT(0); + goto EXIT; + } + + bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj); + dbgobj->obj = obj; + strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN); + dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0'; + dbgobj->line = line; + dbgobj->flag = 0; + if (opt == BCM_OBJDBG_ADD_PKT) { + dbgobj->obj_sn = dbgobj_sn++; + dbgobj->obj_state = 0; + /* first 4 bytes is pkt sn */ + if (((unsigned long)PKTTAG(obj)) & 0x3) + printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj)); + *(uint32*)PKTTAG(obj) = dbgobj->obj_sn; + } + bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + + dbgobj_count++; + + } else if (opt == BCM_OBJDBG_REMOVE) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (dbgobj->flag) { + printf("%s: rm flagged obj %p flag 0x%08x from %s(%d)\n", + __FUNCTION__, obj, dbgobj->flag, caller, line); + } + bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj); + memset(dbgobj->caller, 0x00, BCM_OBJDBG_CALLER_LEN); + strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN); + dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0'; + dbgobj->line = line; + bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + dbgobj_count--; + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj && dbgobj->obj) { + if (dbgobj->obj == obj) { + printf("%s: obj %p already freed from from %s(%d)," + " try free again from %s(%d)\n", + __FUNCTION__, obj, + dbgobj->caller, dbgobj->line, + caller, line); + //ASSERT(0); /* release same obj more than one time? */ + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("%s: ################### release none-existing obj %p from %s(%d)\n", + __FUNCTION__, obj, caller, line); + //ASSERT(0); /* release same obj more than one time? */ + + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_upd(void *obj, void *obj_new) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + dbgobj->obj = obj_new; + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + printf("%s: (%s:%d) obj %p (sn %d state %d) was freed from %s(%d)\n", + __FUNCTION__, caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state, + dbgobj->caller, dbgobj->line); + goto EXIT; + } + else if (dbgobj->obj == NULL) { + break; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("%s: obj %p not found, check from %s(%d), chksn %s, sn %d\n", + __FUNCTION__, obj, caller, line, chksn ? "yes" : "no", sn); + dbgobj = dbgobj_objtail; + while (dbgobj) { + printf("%s: (%s:%d) obj %p sn %d was allocated from %s(%d)\n", + __FUNCTION__, caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line); + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_feature_set(void *obj, uint32 type, uint32 value) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + if (value & BCM_OBJECT_FEATURE_CLEAR) + dbgobj->flag &= ~(value); + else + dbgobj->flag |= (value); + } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) { + dbgobj->obj_state = value; + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("%s: obj %p not found in active list\n", __FUNCTION__, obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +int +bcm_object_feature_get(void *obj, uint32 type, uint32 value) +{ + int rtn = 0; + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR); + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("%s: obj %p not found in active list\n", __FUNCTION__, obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return rtn; +} + +#endif /* BCM_OBJECT_TRACE */ + +uint8 * +bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst) +{ + uint8 *new_dst = dst; + bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst; + + /* dst buffer should always be valid */ + ASSERT(dst); + + /* data len must be within valid range */ + ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)); + + /* source data buffer pointer should be valid, unless datalen is 0 + * meaning no data with this TLV + */ + ASSERT((data != NULL) || (datalen == 0)); + + /* only do work if the inputs are valid + * - must have a dst to write to AND + * - datalen must be within range AND + * - the source data pointer must be non-NULL if datalen is non-zero + * (this last condition detects datalen > 0 with a NULL data pointer) + */ + if ((dst != NULL) && + ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) && + ((data != NULL) || (datalen == 0))) { + + /* write type, len fields */ + dst_tlv->id = (uint8)type; + dst_tlv->len = (uint8)datalen; + + /* if data is present, copy to the output buffer and update + * pointer to output buffer + */ + if (datalen > 0) { + + memcpy(dst_tlv->data, data, datalen); + } + + /* update the output destination poitner to point past + * the TLV written + */ + new_dst = dst + BCM_TLV_HDR_SIZE + datalen; + } + + return (new_dst); +} + +uint8 * +bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen) +{ + uint8 *new_dst = dst; + + if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) { + + /* if len + tlv hdr len is more than destlen, don't do anything + * just return the buffer untouched + */ + if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) { + + new_dst = bcm_write_tlv(type, data, datalen, dst); + } + } + + return (new_dst); +} + +uint8 * +bcm_copy_tlv(const void *src, uint8 *dst) +{ + uint8 *new_dst = dst; + const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; + uint totlen; + + ASSERT(dst && src); + if (dst && src) { + + totlen = BCM_TLV_HDR_SIZE + src_tlv->len; + memcpy(dst, src_tlv, totlen); + new_dst = dst + totlen; + } + + return (new_dst); +} + + +uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen) +{ + uint8 *new_dst = dst; + const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; + + ASSERT(src); + if (src) { + if (bcm_valid_tlv(src_tlv, dst_maxlen)) { + new_dst = bcm_copy_tlv(src, dst); + } + } + + return (new_dst); +} + + +#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) +/******************************************************************************* + * crc8 + * + * Computes a crc8 over the input data using the polynomial: + * + * x^8 + x^7 +x^6 + x^4 + x^2 + 1 + * + * The caller provides the initial value (either CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC8_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint8 crc8_table[256] = { + 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, + 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, + 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, + 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, + 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, + 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, + 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, + 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, + 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, + 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, + 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, + 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, + 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, + 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, + 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, + 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, + 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, + 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, + 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, + 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, + 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, + 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, + 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, + 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, + 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, + 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, + 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, + 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, + 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, + 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, + 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, + 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F +}; + +#define CRC_INNER_LOOP(n, c, x) \ + (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] + +uint8 +hndcrc8( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint8 crc /* either CRC8_INIT_VALUE or previous return value */ +) +{ + /* hard code the crc loop instead of using CRC_INNER_LOOP macro + * to avoid the undefined and unnecessary (uint8 >> 8) operation. + */ + while (nbytes-- > 0) + crc = crc8_table[(crc ^ *pdata++) & 0xff]; + + return crc; +} + +/******************************************************************************* + * crc16 + * + * Computes a crc16 over the input data using the polynomial: + * + * x^16 + x^12 +x^5 + 1 + * + * The caller provides the initial value (either CRC16_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC16_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint16 crc16_table[256] = { + 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, + 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, + 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, + 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, + 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, + 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, + 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, + 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, + 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, + 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, + 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, + 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, + 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, + 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, + 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, + 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, + 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, + 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, + 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, + 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, + 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, + 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, + 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, + 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, + 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, + 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, + 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, + 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, + 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, + 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, + 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, + 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 +}; + +uint16 +hndcrc16( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint16 crc /* either CRC16_INIT_VALUE or previous return value */ +) +{ + while (nbytes-- > 0) + CRC_INNER_LOOP(16, crc, *pdata++); + return crc; +} + +static const uint32 crc32_table[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +/* + * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if + * accumulating over multiple pieces. + */ +uint32 +hndcrc32(uint8 *pdata, uint nbytes, uint32 crc) +{ + uint8 *pend; + pend = pdata + nbytes; + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); + + return crc; +} + +#ifdef notdef +#define CLEN 1499 /* CRC Length */ +#define CBUFSIZ (CLEN+4) +#define CNBUFS 5 /* # of bufs */ + +void +testcrc32(void) +{ + uint j, k, l; + uint8 *buf; + uint len[CNBUFS]; + uint32 crcr; + uint32 crc32tv[CNBUFS] = + {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; + + ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); + + /* step through all possible alignments */ + for (l = 0; l <= 4; l++) { + for (j = 0; j < CNBUFS; j++) { + len[j] = CLEN; + for (k = 0; k < len[j]; k++) + *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; + } + + for (j = 0; j < CNBUFS; j++) { + crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); + ASSERT(crcr == crc32tv[j]); + } + } + + MFREE(buf, CBUFSIZ*CNBUFS); + return; +} +#endif /* notdef */ + +/* + * Advance from the current 1-byte tag/1-byte length/variable-length value + * triple, to the next, returning a pointer to the next. + * If the current or next TLV is invalid (does not fit in given buffer length), + * NULL is returned. + * *buflen is not modified if the TLV elt parameter is invalid, or is decremented + * by the TLV parameter's length if it is valid. + */ +bcm_tlv_t * +bcm_next_tlv(bcm_tlv_t *elt, int *buflen) +{ + int len; + + /* validate current elt */ + if (!bcm_valid_tlv(elt, *buflen)) { + return NULL; + } + + /* advance to next elt */ + len = elt->len; + elt = (bcm_tlv_t*)(elt->data + len); + *buflen -= (TLV_HDR_LEN + len); + + /* validate next elt */ + if (!bcm_valid_tlv(elt, *buflen)) { + return NULL; + } + + return elt; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + */ +bcm_tlv_t * +bcm_parse_tlvs(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + if ((elt = (bcm_tlv_t*)buf) == NULL) { + return NULL; + } + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + int len = elt->len; + + /* validate remaining totlen */ + if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) { + + return (elt); + } + + elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + * return NULL if not found or length field < min_varlen + */ +bcm_tlv_t * +bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen) +{ + bcm_tlv_t * ret = bcm_parse_tlvs(buf, buflen, key); + if (ret == NULL || ret->len < min_bodylen) { + return NULL; + } + return ret; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag. Stop parsing when we see an element whose ID is greater + * than the target key. + */ +bcm_tlv_t * +bcm_parse_ordered_tlvs(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + elt = (bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + uint id = elt->id; + int len = elt->len; + + /* Punt if we start seeing IDs > than target key */ + if (id > key) { + return (NULL); + } + + /* validate remaining totlen */ + if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) { + return (elt); + } + + elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + return NULL; +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ + +#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ + defined(DHD_DEBUG) +int +bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len) +{ + int i, slen = 0; + uint32 bit, mask; + const char *name; + mask = bd->mask; + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; (name = bd->bitfield[i].name) != NULL; i++) { + bit = bd->bitfield[i].bit; + if ((flags & mask) == bit) { + if (len > (int)strlen(name)) { + slen = strlen(name); + strncpy(buf, name, slen+1); + } + break; + } + } + return slen; +} + +int +bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) +{ + int i; + char* p = buf; + char hexstr[16]; + int slen = 0, nlen = 0; + uint32 bit; + const char* name; + + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; flags != 0; i++) { + bit = bd[i].bit; + name = bd[i].name; + if (bit == 0 && flags != 0) { + /* print any unnamed bits */ + snprintf(hexstr, 16, "0x%X", flags); + name = hexstr; + flags = 0; /* exit loop */ + } else if ((flags & bit) == 0) + continue; + flags &= ~bit; + nlen = strlen(name); + slen += nlen; + /* count btwn flag space */ + if (flags != 0) + slen += 1; + /* need NULL char as well */ + if (len <= slen) + break; + /* copy NULL char but don't count it */ + strncpy(p, name, nlen + 1); + p += nlen; + /* copy btwn flag space and NULL char */ + if (flags != 0) + p += snprintf(p, 2, " "); + } + + /* indicate the str was too short */ + if (flags != 0) { + p += snprintf(p, 2, ">"); + } + + return (int)(p - buf); +} +#endif + +/* print bytes formatted as hex to a string. return the resulting string length */ +int +bcm_format_hex(char *str, const void *bytes, int len) +{ + int i; + char *p = str; + const uint8 *src = (const uint8*)bytes; + + for (i = 0; i < len; i++) { + p += snprintf(p, 3, "%02X", *src); + src++; + } + return (int)(p - str); +} + +/* pretty hex print a contiguous buffer */ +void +prhex(const char *msg, uchar *buf, uint nbytes) +{ + char line[128], *p; + int len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04x: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + printf("%s\n", line); /* flush line */ + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) + printf("%s\n", line); +} + +static const char *crypto_algo_names[] = { + "NONE", + "WEP1", + "TKIP", + "WEP128", + "AES_CCM", + "AES_OCB_MSDU", + "AES_OCB_MPDU", + "NALG", + "UNDEF", + "UNDEF", + "UNDEF", + "UNDEF" + "PMK", + "BIP", + "AES_GCM", + "AES_CCM256", + "AES_GCM256", + "BIP_CMAC256", + "BIP_GMAC", + "BIP_GMAC256", + "UNDEF" +}; + +const char * +bcm_crypto_algo_name(uint algo) +{ + return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR"; +} + + +char * +bcm_chipname(uint chipid, char *buf, uint len) +{ + const char *fmt; + + fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; + snprintf(buf, len, fmt, chipid); + return buf; +} + +/* Produce a human-readable string for boardrev */ +char * +bcm_brev_str(uint32 brev, char *buf) +{ + if (brev < 0x100) + snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); + else + snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); + + return (buf); +} + +#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ + +/* dump large strings to console */ +void +printbig(char *buf) +{ + uint len, max_len; + char c; + + len = (uint)strlen(buf); + + max_len = BUFSIZE_TODUMP_ATONCE; + + while (len > max_len) { + c = buf[max_len]; + buf[max_len] = '\0'; + printf("%s", buf); + buf[max_len] = c; + + buf += max_len; + len -= max_len; + } + /* print the remaining string */ + printf("%s\n", buf); + return; +} + +/* routine to dump fields in a fileddesc structure */ +uint +bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, + char *buf, uint32 bufsize) +{ + uint filled_len; + int len; + struct fielddesc *cur_ptr; + + filled_len = 0; + cur_ptr = fielddesc_array; + + while (bufsize > 1) { + if (cur_ptr->nameandfmt == NULL) + break; + len = snprintf(buf, bufsize, cur_ptr->nameandfmt, + read_rtn(arg0, arg1, cur_ptr->offset)); + /* check for snprintf overflow or error */ + if (len < 0 || (uint32)len >= bufsize) + len = bufsize - 1; + buf += len; + bufsize -= len; + filled_len += len; + cur_ptr++; + } + return filled_len; +} + +uint +bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint buflen) +{ + uint len; + + len = (uint)strlen(name) + 1; + + if ((len + datalen) > buflen) + return 0; + + strncpy(buf, name, buflen); + + /* append data onto the end of the name string */ + memcpy(&buf[len], data, datalen); + len += datalen; + + return len; +} + +/* Quarter dBm units to mW + * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 + * Table is offset so the last entry is largest mW value that fits in + * a uint16. + */ + +#define QDBM_OFFSET 153 /* Offset for first entry */ +#define QDBM_TABLE_LEN 40 /* Table size */ + +/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. + * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 + */ +#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ + +/* Largest mW value that will round down to the last table entry, + * QDBM_OFFSET + QDBM_TABLE_LEN-1. + * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. + */ +#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ + +static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { +/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ +/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, +/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, +/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, +/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, +/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 +}; + +uint16 +bcm_qdbm_to_mw(uint8 qdbm) +{ + uint factor = 1; + int idx = qdbm - QDBM_OFFSET; + + if (idx >= QDBM_TABLE_LEN) { + /* clamp to max uint16 mW value */ + return 0xFFFF; + } + + /* scale the qdBm index up to the range of the table 0-40 + * where an offset of 40 qdBm equals a factor of 10 mW. + */ + while (idx < 0) { + idx += 40; + factor *= 10; + } + + /* return the mW value scaled down to the correct factor of 10, + * adding in factor/2 to get proper rounding. + */ + return ((nqdBm_to_mW_map[idx] + factor/2) / factor); +} + +uint8 +bcm_mw_to_qdbm(uint16 mw) +{ + uint8 qdbm; + int offset; + uint mw_uint = mw; + uint boundary; + + /* handle boundary case */ + if (mw_uint <= 1) + return 0; + + offset = QDBM_OFFSET; + + /* move mw into the range of the table */ + while (mw_uint < QDBM_TABLE_LOW_BOUND) { + mw_uint *= 10; + offset -= 40; + } + + for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { + boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - + nqdBm_to_mW_map[qdbm])/2; + if (mw_uint < boundary) break; + } + + qdbm += (uint8)offset; + + return (qdbm); +} + + +uint +bcm_bitcount(uint8 *bitmap, uint length) +{ + uint bitcount = 0, i; + uint8 tmp; + for (i = 0; i < length; i++) { + tmp = bitmap[i]; + while (tmp) { + bitcount++; + tmp &= (tmp - 1); + } + } + return bitcount; +} + +#if defined(BCMDRIVER) || defined(WL_UNITTEST) + +/* triggers bcm_bprintf to print to kernel log */ +bool bcm_bprintf_bypass = FALSE; + +/* Initialization of bcmstrbuf structure */ +void +bcm_binit(struct bcmstrbuf *b, char *buf, uint size) +{ + b->origsize = b->size = size; + b->origbuf = b->buf = buf; +} + +/* Buffer sprintf wrapper to guard against buffer overflow */ +int +bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) +{ + va_list ap; + int r; + + va_start(ap, fmt); + + r = vsnprintf(b->buf, b->size, fmt, ap); + if (bcm_bprintf_bypass == TRUE) { + printf(b->buf); + goto exit; + } + + /* Non Ansi C99 compliant returns -1, + * Ansi compliant return r >= b->size, + * bcmstdlib returns 0, handle all + */ + /* r == 0 is also the case when strlen(fmt) is zero. + * typically the case when "" is passed as argument. + */ + if ((r == -1) || (r >= (int)b->size)) { + b->size = 0; + } else { + b->size -= r; + b->buf += r; + } + +exit: + va_end(ap); + + return r; +} + +void +bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, int len) +{ + int i; + + if (msg != NULL && msg[0] != '\0') + bcm_bprintf(b, "%s", msg); + for (i = 0; i < len; i ++) + bcm_bprintf(b, "%02X", buf[i]); + if (newline) + bcm_bprintf(b, "\n"); +} + +void +bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) +{ + int i; + + for (i = 0; i < num_bytes; i++) { + num[i] += amount; + if (num[i] >= amount) + break; + amount = 1; + } +} + +int +bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes) +{ + int i; + + for (i = nbytes - 1; i >= 0; i--) { + if (arg1[i] != arg2[i]) + return (arg1[i] - arg2[i]); + } + return 0; +} + +void +bcm_print_bytes(const char *name, const uchar *data, int len) +{ + int i; + int per_line = 0; + + printf("%s: %d \n", name ? name : "", len); + for (i = 0; i < len; i++) { + printf("%02x ", *data++); + per_line++; + if (per_line == 16) { + per_line = 0; + printf("\n"); + } + } + printf("\n"); +} + +/* Look for vendor-specific IE with specified OUI and optional type */ +bcm_tlv_t * +bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len) +{ + bcm_tlv_t *ie; + uint8 ie_len; + + ie = (bcm_tlv_t*)tlvs; + + /* make sure we are looking at a valid IE */ + if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) { + return NULL; + } + + /* Walk through the IEs looking for an OUI match */ + do { + ie_len = ie->len; + if ((ie->id == DOT11_MNG_PROPR_ID) && + (ie_len >= (DOT11_OUI_LEN + type_len)) && + !bcmp(ie->data, voui, DOT11_OUI_LEN)) + { + /* compare optional type */ + if (type_len == 0 || + !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) { + return (ie); /* a match */ + } + } + } while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL); + + return NULL; +} + +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) + +int +bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) +{ + uint i, c; + char *p = buf; + char *endp = buf + SSID_FMT_BUF_LEN; + + if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; + + for (i = 0; i < ssid_len; i++) { + c = (uint)ssid[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = '\\'; + } else if (bcm_isprint((uchar)c)) { + *p++ = (char)c; + } else { + p += snprintf(p, (endp - p), "\\x%02X", c); + } + } + *p = '\0'; + ASSERT(p < endp); + + return (int)(p - buf); +} +#endif + +#endif /* BCMDRIVER || WL_UNITTEST */ + +/* + * ProcessVars:Takes a buffer of "=\n" lines read from a file and ending in a NUL. + * also accepts nvram files which are already in the format of =\0\=\0 + * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. + * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. +*/ + +unsigned int +process_nvram_vars(char *varbuf, unsigned int len) +{ + char *dp; + bool findNewline; + int column; + unsigned int buf_len, n; + unsigned int pad = 0; + + dp = varbuf; + + findNewline = FALSE; + column = 0; + + for (n = 0; n < len; n++) { + if (varbuf[n] == '\r') + continue; + if (findNewline && varbuf[n] != '\n') + continue; + findNewline = FALSE; + if (varbuf[n] == '#') { + findNewline = TRUE; + continue; + } + if (varbuf[n] == '\n') { + if (column == 0) + continue; + *dp++ = 0; + column = 0; + continue; + } + *dp++ = varbuf[n]; + column++; + } + buf_len = (unsigned int)(dp - varbuf); + if (buf_len % 4) { + pad = 4 - buf_len % 4; + if (pad && (buf_len + pad <= len)) { + buf_len += pad; + } + } + + while (dp < varbuf + n) + *dp++ = 0; + + return buf_len; +} + +/* calculate a * b + c */ +void +bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c) +{ +#define FORMALIZE(var) {cc += (var & 0x80000000) ? 1 : 0; var &= 0x7fffffff;} + uint32 r1, r0; + uint32 a1, a0, b1, b0, t, cc = 0; + + a1 = a >> 16; + a0 = a & 0xffff; + b1 = b >> 16; + b0 = b & 0xffff; + + r0 = a0 * b0; + FORMALIZE(r0); + + t = (a1 * b0) << 16; + FORMALIZE(t); + + r0 += t; + FORMALIZE(r0); + + t = (a0 * b1) << 16; + FORMALIZE(t); + + r0 += t; + FORMALIZE(r0); + + FORMALIZE(c); + + r0 += c; + FORMALIZE(r0); + + r0 |= (cc % 2) ? 0x80000000 : 0; + r1 = a1 * b1 + ((a1 * b0) >> 16) + ((b1 * a0) >> 16) + (cc / 2); + + *r_high = r1; + *r_low = r0; +} + +/* calculate a / b */ +void +bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b) +{ + uint32 a1 = a_high, a0 = a_low, r0 = 0; + + if (b < 2) + return; + + while (a1 != 0) { + r0 += (0xffffffff / b) * a1; + bcm_uint64_multiple_add(&a1, &a0, ((0xffffffff % b) + 1) % b, a1, a0); + } + + r0 += a0 / b; + *r = r0; +} + +#ifndef setbit /* As in the header file */ +#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS +/* Set bit in byte array. */ +void +setbit(void *array, uint bit) +{ + ((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY); +} + +/* Clear bit in byte array. */ +void +clrbit(void *array, uint bit) +{ + ((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY)); +} + +/* Test if bit is set in byte array. */ +bool +isset(const void *array, uint bit) +{ + return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))); +} + +/* Test if bit is clear in byte array. */ +bool +isclr(const void *array, uint bit) +{ + return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0); +} +#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */ +#endif /* setbit */ + +void +set_bitrange(void *array, uint start, uint end, uint maxbit) +{ + uint startbyte = start/NBBY; + uint endbyte = end/NBBY; + uint i, startbytelastbit, endbytestartbit; + + if (end >= start) { + if (endbyte - startbyte > 1) + { + startbytelastbit = (startbyte+1)*NBBY - 1; + endbytestartbit = endbyte*NBBY; + for (i = startbyte+1; i < endbyte; i++) + ((uint8 *)array)[i] = 0xFF; + for (i = start; i <= startbytelastbit; i++) + setbit(array, i); + for (i = endbytestartbit; i <= end; i++) + setbit(array, i); + } else { + for (i = start; i <= end; i++) + setbit(array, i); + } + } + else { + set_bitrange(array, start, maxbit, maxbit); + set_bitrange(array, 0, end, maxbit); + } +} + +void +bcm_bitprint32(const uint32 u32arg) +{ + int i; + for (i = NBITS(uint32) - 1; i >= 0; i--) { + isbitset(u32arg, i) ? printf("1") : printf("0"); + if ((i % NBBY) == 0) printf(" "); + } + printf("\n"); +} + +/* calculate checksum for ip header, tcp / udp header / data */ +uint16 +bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) +{ + while (len > 1) { + sum += (buf[0] << 8) | buf[1]; + buf += 2; + len -= 2; + } + + if (len > 0) { + sum += (*buf) << 8; + } + + while (sum >> 16) { + sum = (sum & 0xffff) + (sum >> 16); + } + + return ((uint16)~sum); +} +#if defined(BCMDRIVER) && !defined(_CFEZ_) +/* + * Hierarchical Multiword bitmap based small id allocator. + * + * Multilevel hierarchy bitmap. (maximum 2 levels) + * First hierarchy uses a multiword bitmap to identify 32bit words in the + * second hierarchy that have at least a single bit set. Each bit in a word of + * the second hierarchy represents a unique ID that may be allocated. + * + * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed. + * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word + * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs. + * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non + * non-zero bitmap word carrying at least one free ID. + * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations. + * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID + * + * Design Notes: + * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many + * bits are computed each time on allocation and deallocation, requiring 4 + * array indexed access and 3 arithmetic operations. When not defined, a runtime + * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed. + * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation. + * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may + * be used by defining BCM_MWBMAP_USE_CNTSETBITS. + * + * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array + * size is fixed. No intention to support larger than 4K indice allocation. ID + * allocators for ranges smaller than 4K will have a wastage of only 12Bytes + * with savings in not having to use an indirect access, had it been dynamically + * allocated. + */ +#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */ + +#define BCM_MWBMAP_BITS_WORD (NBITS(uint32)) +#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD) +#define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD) +#define BCM_MWBMAP_SHIFT_OP (5) +#define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1)) +#define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP) +#define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP) + +/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */ +#define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl)) +#define BCM_MWBMAP_HDL(ptr) ((void *)(ptr)) + +#if defined(BCM_MWBMAP_DEBUG) +#define BCM_MWBMAP_AUDIT(mwb) \ + do { \ + ASSERT((mwb != NULL) && \ + (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \ + bcm_mwbmap_audit(mwb); \ + } while (0) +#define MWBMAP_ASSERT(exp) ASSERT(exp) +#define MWBMAP_DBG(x) printf x +#else /* !BCM_MWBMAP_DEBUG */ +#define BCM_MWBMAP_AUDIT(mwb) do {} while (0) +#define MWBMAP_ASSERT(exp) do {} while (0) +#define MWBMAP_DBG(x) +#endif /* !BCM_MWBMAP_DEBUG */ + + +typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */ + uint16 wmaps; /* Total number of words in free wd bitmap */ + uint16 imaps; /* Total number of words in free id bitmap */ + int32 ifree; /* Count of free indices. Used only in audits */ + uint16 total; /* Total indices managed by multiword bitmap */ + + void * magic; /* Audit handle parameter from user */ + + uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */ +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */ +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + + uint32 id_bitmap[0]; /* Second level bitmap */ +} bcm_mwbmap_t; + +/* Incarnate a hierarchical multiword bitmap based small index allocator. */ +struct bcm_mwbmap * +bcm_mwbmap_init(osl_t *osh, uint32 items_max) +{ + struct bcm_mwbmap * mwbmap_p; + uint32 wordix, size, words, extra; + + /* Implementation Constraint: Uses 32bit word bitmap */ + MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U); + MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U); + MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX)); + MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U); + + ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX); + + /* Determine the number of words needed in the multiword bitmap */ + extra = BCM_MWBMAP_MODOP(items_max); + words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U); + + /* Allocate runtime state of multiword bitmap */ + /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */ + size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words); + mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size); + if (mwbmap_p == (bcm_mwbmap_t *)NULL) { + ASSERT(0); + goto error1; + } + memset(mwbmap_p, 0, size); + + /* Initialize runtime multiword bitmap state */ + mwbmap_p->imaps = (uint16)words; + mwbmap_p->ifree = (int32)items_max; + mwbmap_p->total = (uint16)items_max; + + /* Setup magic, for use in audit of handle */ + mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p); + + /* Setup the second level bitmap of free indices */ + /* Mark all indices as available */ + for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) { + mwbmap_p->id_bitmap[wordix] = (uint32)(~0U); +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD; +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + } + + /* Ensure that extra indices are tagged as un-available */ + if (extra) { /* fixup the free ids in last bitmap and wd_count */ + uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1]; + *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */ +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + } + + /* Setup the first level bitmap hierarchy */ + extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps); + words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U); + + mwbmap_p->wmaps = (uint16)words; + + for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++) + mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U); + if (extra) { + uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1]; + *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ + } + + return mwbmap_p; + +error1: + return BCM_MWBMAP_INVALID_HDL; +} + +/* Release resources used by multiword bitmap based small index allocator. */ +void +bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap) + + (sizeof(uint32) * mwbmap_p->imaps)); + return; +} + +/* Allocate a unique small index using a multiword bitmap index allocator. */ +uint32 BCMFASTPATH +bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + /* Start with the first hierarchy */ + for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) { + + bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */ + + if (bitmap != 0U) { + + uint32 count, bitix, *bitmap_p; + + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + + /* clear all except trailing 1 */ + bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); + MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == + bcm_count_leading_zeros(bitmap)); + bitix = (BCM_MWBMAP_BITS_WORD - 1) + - bcm_count_leading_zeros(bitmap); /* use asm clz */ + wordix = BCM_MWBMAP_MULOP(wordix) + bitix; + + /* Clear bit if wd count is 0, without conditional branch */ +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1; +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + mwbmap_p->wd_count[wordix]--; + count = mwbmap_p->wd_count[wordix]; + MWBMAP_ASSERT(count == + (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1)); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + MWBMAP_ASSERT(count >= 0); + + /* clear wd_bitmap bit if id_map count is 0 */ + bitmap = (count == 0) << bitix; + + MWBMAP_DBG(( + "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count)); + + *bitmap_p ^= bitmap; + + /* Use bitix in the second hierarchy */ + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */ + MWBMAP_ASSERT(bitmap != 0U); + + /* clear all except trailing 1 */ + bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); + MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == + bcm_count_leading_zeros(bitmap)); + bitix = BCM_MWBMAP_MULOP(wordix) + + (BCM_MWBMAP_BITS_WORD - 1) + - bcm_count_leading_zeros(bitmap); /* use asm clz */ + + mwbmap_p->ifree--; /* decrement system wide free count */ + MWBMAP_ASSERT(mwbmap_p->ifree >= 0); + + MWBMAP_DBG(( + "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, + mwbmap_p->ifree)); + + *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */ + + return bitix; + } + } + + ASSERT(mwbmap_p->ifree == 0); + + return BCM_MWBMAP_INVALID_IDX; +} + +/* Force an index at a specified position to be in use */ +void +bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 count, wordix, bitmap, *bitmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + /* Start with second hierarchy */ + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + ASSERT((*bitmap_p & bitmap) == bitmap); + + mwbmap_p->ifree--; /* update free count */ + ASSERT(mwbmap_p->ifree >= 0); + + MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, + mwbmap_p->ifree)); + + *bitmap_p ^= bitmap; /* mark as in use */ + + /* Update first hierarchy */ + bitix = wordix; + + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + mwbmap_p->wd_count[bitix]--; + count = mwbmap_p->wd_count[bitix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + MWBMAP_ASSERT(count >= 0); + + bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix); + + MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d", + BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap, + (*bitmap_p) ^ bitmap, count)); + + *bitmap_p ^= bitmap; /* mark as in use */ + + return; +} + +/* Free a previously allocated index back into the multiword bitmap allocator */ +void BCMFASTPATH +bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap, *bitmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + /* Start with second level hierarchy */ + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + ASSERT((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */ + + mwbmap_p->ifree++; /* update free count */ + ASSERT(mwbmap_p->ifree <= mwbmap_p->total); + + MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, + mwbmap_p->ifree)); + + *bitmap_p |= bitmap; /* mark as available */ + + /* Now update first level hierarchy */ + + bitix = wordix; + + wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */ + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[bitix]++; +#endif + +#if defined(BCM_MWBMAP_DEBUG) + { + uint32 count; +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[bitix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + + MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD); + + MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count)); + } +#endif /* BCM_MWBMAP_DEBUG */ + + *bitmap_p |= bitmap; + + return; +} + +/* Fetch the toal number of free indices in the multiword bitmap allocator */ +uint32 +bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(mwbmap_p->ifree >= 0); + + return mwbmap_p->ifree; +} + +/* Determine whether an index is inuse or free */ +bool +bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + + return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U); +} + +/* Debug dump a multiword bitmap allocator */ +void +bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl) +{ + uint32 ix, count; + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p, + mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total); + for (ix = 0U; ix < mwbmap_p->wmaps; ix++) { + printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]); + bcm_bitprint32(mwbmap_p->wd_bitmap[ix]); + printf("\n"); + } + for (ix = 0U; ix < mwbmap_p->imaps; ix++) { +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[ix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count); + bcm_bitprint32(mwbmap_p->id_bitmap[ix]); + printf("\n"); + } + + return; +} + +/* Audit a hierarchical multiword bitmap */ +void +bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p; + + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) { + + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + + for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) { + if ((*bitmap_p) & (1 << bitix)) { + idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix; +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[idmap_ix]; + ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + ASSERT(count != 0U); + free_cnt += count; + } + } + } + + ASSERT((int)free_cnt == mwbmap_p->ifree); +} +/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */ + +/* Simple 16bit Id allocator using a stack implementation. */ +typedef struct id16_map { + uint32 failures; /* count of failures */ + void *dbg; /* debug placeholder */ + uint16 total; /* total number of ids managed by allocator */ + uint16 start; /* start value of 16bit ids to be managed */ + int stack_idx; /* index into stack of available ids */ + uint16 stack[0]; /* stack of 16 bit ids */ +} id16_map_t; + +#define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \ + (sizeof(uint16) * (items))) + +#if defined(BCM_DBG) + +/* Uncomment BCM_DBG_ID16 to debug double free */ +/* #define BCM_DBG_ID16 */ + +typedef struct id16_map_dbg { + uint16 total; + bool avail[0]; +} id16_map_dbg_t; +#define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \ + (sizeof(bool) * (items))) +#define ID16_MAP_MSG(x) print x +#else +#define ID16_MAP_MSG(x) +#endif /* BCM_DBG */ + +void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */ +id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16) +{ + uint16 idx, val16; + id16_map_t * id16_map; + + ASSERT(total_ids > 0); + + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); + + id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids)); + if (id16_map == NULL) { + return NULL; + } + + id16_map->total = total_ids; + id16_map->start = start_val16; + id16_map->failures = 0; + id16_map->dbg = NULL; + + /* + * Populate stack with 16bit id values, commencing with start_val16. + * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map. + */ + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->start != ID16_UNDEFINED) { + id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids)); + + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + return (void *)id16_map; +} + +void * /* Destruct an id16 allocator instance */ +id16_map_fini(osl_t *osh, void * id16_map_hndl) +{ + uint16 total_ids; + id16_map_t * id16_map; + + if (id16_map_hndl == NULL) + return NULL; + + id16_map = (id16_map_t *)id16_map_hndl; + + total_ids = id16_map->total; + ASSERT(total_ids > 0); + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids)); + id16_map->dbg = NULL; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + id16_map->total = 0; + MFREE(osh, id16_map, ID16_MAP_SZ(total_ids)); + + return NULL; +} + +void +id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16) +{ + uint16 idx, val16; + id16_map_t * id16_map; + + ASSERT(total_ids > 0); + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); + + id16_map = (id16_map_t *)id16_map_hndl; + if (id16_map == NULL) { + return; + } + + id16_map->total = total_ids; + id16_map->start = start_val16; + id16_map->failures = 0; + + /* Populate stack with 16bit id values, commencing with start_val16 */ + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->start != ID16_UNDEFINED) { + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ +} + +uint16 BCMFASTPATH /* Allocate a unique 16bit id */ +id16_map_alloc(void * id16_map_hndl) +{ + uint16 val16; + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + + ASSERT(id16_map->total > 0); + + if (id16_map->stack_idx < 0) { + id16_map->failures++; + return ID16_INVALID; + } + + val16 = id16_map->stack[id16_map->stack_idx]; + id16_map->stack_idx--; + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + ASSERT((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); + + if (id16_map->dbg) { /* Validate val16 */ + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE); + id16_map_dbg->avail[val16 - id16_map->start] = FALSE; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + return val16; +} + + +void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */ +id16_map_free(void * id16_map_hndl, uint16 val16) +{ + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + ASSERT((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); + + if (id16_map->dbg) { /* Validate val16 */ + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE); + id16_map_dbg->avail[val16 - id16_map->start] = TRUE; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + id16_map->stack_idx++; + id16_map->stack[id16_map->stack_idx] = val16; +} + +uint32 /* Returns number of failures to allocate an unique id16 */ +id16_map_failures(void * id16_map_hndl) +{ + ASSERT(id16_map_hndl != NULL); + return ((id16_map_t *)id16_map_hndl)->failures; +} + +bool +id16_map_audit(void * id16_map_hndl) +{ + int idx; + int insane = 0; + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + + ASSERT(id16_map->stack_idx >= -1); + ASSERT(id16_map->stack_idx < (int)id16_map->total); + + if (id16_map->start == ID16_UNDEFINED) + goto done; + + for (idx = 0; idx <= id16_map->stack_idx; idx++) { + ASSERT(id16_map->stack[idx] >= id16_map->start); + ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total)); + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + uint16 val16 = id16_map->stack[idx]; + if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) { + insane |= 1; + ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n", + id16_map_hndl, idx, val16)); + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + uint16 avail = 0; /* Audit available ids counts */ + for (idx = 0; idx < id16_map_dbg->total; idx++) { + if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE) + avail++; + } + if (avail && (avail != (id16_map->stack_idx + 1))) { + insane |= 1; + ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n", + id16_map_hndl, avail, id16_map->stack_idx)); + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + +done: + /* invoke any other system audits */ + return (!!insane); +} +/* END: Simple id16 allocator */ + + +#endif + +/* calculate a >> b; and returns only lower 32 bits */ +void +bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b) +{ + uint32 a1 = a_high, a0 = a_low, r0 = 0; + + if (b == 0) { + r0 = a_low; + *r = r0; + return; + } + + if (b < 32) { + a0 = a0 >> b; + a1 = a1 & ((1 << b) - 1); + a1 = a1 << (32 - b); + r0 = a0 | a1; + *r = r0; + return; + } else { + r0 = a1 >> (b - 32); + *r = r0; + return; + } + +} + +/* calculate a + b where a is a 64 bit number and b is a 32 bit number */ +void +bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset) +{ + uint32 r1_lo = *r_lo; + (*r_lo) += offset; + if (*r_lo < r1_lo) + (*r_hi) ++; +} + +/* calculate a - b where a is a 64 bit number and b is a 32 bit number */ +void +bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset) +{ + uint32 r1_lo = *r_lo; + (*r_lo) -= offset; + if (*r_lo > r1_lo) + (*r_hi) --; +} + +#ifdef DEBUG_COUNTER +#if (OSL_SYSUPTIME_SUPPORT == TRUE) +void counter_printlog(counter_tbl_t *ctr_tbl) +{ + uint32 now; + + if (!ctr_tbl->enabled) + return; + + now = OSL_SYSUPTIME(); + + if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) { + uint8 i = 0; + printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print); + + for (i = 0; i < ctr_tbl->needed_cnt; i++) { + printf(" %u", ctr_tbl->cnt[i]); + } + printf("\n"); + + ctr_tbl->prev_log_print = now; + bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint)); + } +} +#else +/* OSL_SYSUPTIME is not supported so no way to get time */ +#define counter_printlog(a) do {} while (0) +#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */ +#endif /* DEBUG_COUNTER */ + +#if defined(BCMDRIVER) && !defined(_CFEZ_) +void +dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size) +{ + uint32 mem_size; + mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + if (pool) + MFREE(osh, pool, mem_size); +} +dll_pool_t * +dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size) +{ + uint32 mem_size, i; + dll_pool_t * dll_pool_p; + dll_t * elem_p; + + ASSERT(elem_size > sizeof(dll_t)); + + mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + + if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) { + printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n", + elems_max, elem_size); + ASSERT(0); + return dll_pool_p; + } + + dll_init(&dll_pool_p->free_list); + dll_pool_p->elems_max = elems_max; + dll_pool_p->elem_size = elem_size; + + elem_p = dll_pool_p->elements; + for (i = 0; i < elems_max; i++) { + dll_append(&dll_pool_p->free_list, elem_p); + elem_p = (dll_t *)((uintptr)elem_p + elem_size); + } + + dll_pool_p->free_count = elems_max; + + return dll_pool_p; +} + + +void * +dll_pool_alloc(dll_pool_t * dll_pool_p) +{ + dll_t * elem_p; + + if (dll_pool_p->free_count == 0) { + ASSERT(dll_empty(&dll_pool_p->free_list)); + return NULL; + } + + elem_p = dll_head_p(&dll_pool_p->free_list); + dll_delete(elem_p); + dll_pool_p->free_count -= 1; + + return (void *)elem_p; +} + +void +dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p) +{ + dll_t * node_p = (dll_t *)elem_p; + dll_prepend(&dll_pool_p->free_list, node_p); + dll_pool_p->free_count += 1; +} + + +void +dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p) +{ + dll_t * node_p = (dll_t *)elem_p; + dll_append(&dll_pool_p->free_list, node_p); + dll_pool_p->free_count += 1; +} + +#endif diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c new file mode 100644 index 000000000000..be884cc33cc1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c @@ -0,0 +1,1253 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_channels.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* BCMDRIVER */ + +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */ +#endif + +/* Definitions for D11AC capable Chanspec type */ + +/* Chanspec ASCII representation with 802.11ac capability: + * [ 'g'] ['/' []['/'<1st80channel>'-'<2nd80channel>]] + * + * : + * (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively. + * Default value is 2g if channel <= 14, otherwise 5g. + * : + * channel number of the 5MHz, 10MHz, 20MHz channel, + * or primary channel of 40MHz, 80MHz, 160MHz, or 80+80MHz channel. + * : + * (optional) 5, 10, 20, 40, 80, 160, or 80+80. Default value is 20. + * : + * (only for 2.4GHz band 40MHz) U for upper sideband primary, L for lower. + * + * For 2.4GHz band 40MHz channels, the same primary channel may be the + * upper sideband for one 40MHz channel, and the lower sideband for an + * overlapping 40MHz channel. The U/L disambiguates which 40MHz channel + * is being specified. + * + * For 40MHz in the 5GHz band and all channel bandwidths greater than + * 40MHz, the U/L specificaion is not allowed since the channels are + * non-overlapping and the primary sub-band is derived from its + * position in the wide bandwidth channel. + * + * <1st80Channel>: + * <2nd80Channel>: + * Required for 80+80, otherwise not allowed. + * Specifies the center channel of the first and second 80MHz band. + * + * In its simplest form, it is a 20MHz channel number, with the implied band + * of 2.4GHz if channel number <= 14, and 5GHz otherwise. + * + * To allow for backward compatibility with scripts, the old form for + * 40MHz channels is also allowed: + * + * : + * primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz + * : + * "U" for upper, "L" for lower (or lower case "u" "l") + * + * 5 GHz Examples: + * Chanspec BW Center Ch Channel Range Primary Ch + * 5g8 20MHz 8 - - + * 52 20MHz 52 - - + * 52/40 40MHz 54 52-56 52 + * 56/40 40MHz 54 52-56 56 + * 52/80 80MHz 58 52-64 52 + * 56/80 80MHz 58 52-64 56 + * 60/80 80MHz 58 52-64 60 + * 64/80 80MHz 58 52-64 64 + * 52/160 160MHz 50 36-64 52 + * 36/160 160MGz 50 36-64 36 + * 36/80+80/42-106 80+80MHz 42,106 36-48,100-112 36 + * + * 2 GHz Examples: + * Chanspec BW Center Ch Channel Range Primary Ch + * 2g8 20MHz 8 - - + * 8 20MHz 8 - - + * 6 20MHz 6 - - + * 6/40l 40MHz 8 6-10 6 + * 6l 40MHz 8 6-10 6 + * 6/40u 40MHz 4 2-6 6 + * 6u 40MHz 4 2-6 6 + */ + +/* bandwidth ASCII string */ +static const char *wf_chspec_bw_str[] = +{ + "5", + "10", + "20", + "40", + "80", + "160", + "80+80", +#ifdef WL11ULB + "2.5" +#else /* WL11ULB */ + "na" +#endif /* WL11ULB */ +}; + +static const uint8 wf_chspec_bw_mhz[] = +{5, 10, 20, 40, 80, 160, 160}; + +#define WF_NUM_BW \ + (sizeof(wf_chspec_bw_mhz)/sizeof(uint8)) + +/* 40MHz channels in 5GHz band */ +static const uint8 wf_5g_40m_chans[] = +{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159}; +#define WF_NUM_5G_40M_CHANS \ + (sizeof(wf_5g_40m_chans)/sizeof(uint8)) + +/* 80MHz channels in 5GHz band */ +static const uint8 wf_5g_80m_chans[] = +{42, 58, 106, 122, 138, 155}; +#define WF_NUM_5G_80M_CHANS \ + (sizeof(wf_5g_80m_chans)/sizeof(uint8)) + +/* 160MHz channels in 5GHz band */ +static const uint8 wf_5g_160m_chans[] = +{50, 114}; +#define WF_NUM_5G_160M_CHANS \ + (sizeof(wf_5g_160m_chans)/sizeof(uint8)) + + +/* convert bandwidth from chanspec to MHz */ +static uint +bw_chspec_to_mhz(chanspec_t chspec) +{ + uint bw; + + bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT; + return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]); +} + +/* bw in MHz, return the channel count from the center channel to the + * the channel at the edge of the band + */ +static uint8 +center_chan_to_edge(uint bw) +{ + /* edge channels separated by BW - 10MHz on each side + * delta from cf to edge is half of that, + * MHz to channel num conversion is 5MHz/channel + */ + return (uint8)(((bw - 20) / 2) / 5); +} + +/* return channel number of the low edge of the band + * given the center channel and BW + */ +static uint8 +channel_low_edge(uint center_ch, uint bw) +{ + return (uint8)(center_ch - center_chan_to_edge(bw)); +} + +/* return side band number given center channel and control channel + * return -1 on error + */ +static int +channel_to_sb(uint center_ch, uint ctl_ch, uint bw) +{ + uint lowest = channel_low_edge(center_ch, bw); + uint sb; + + if ((ctl_ch - lowest) % 4) { + /* bad ctl channel, not mult 4 */ + return -1; + } + + sb = ((ctl_ch - lowest) / 4); + + /* sb must be a index to a 20MHz channel in range */ + if (sb >= (bw / 20)) { + /* ctl_ch must have been too high for the center_ch */ + return -1; + } + + return sb; +} + +/* return control channel given center channel and side band */ +static uint8 +channel_to_ctl_chan(uint center_ch, uint bw, uint sb) +{ + return (uint8)(channel_low_edge(center_ch, bw) + sb * 4); +} + +/* return index of 80MHz channel from channel number + * return -1 on error + */ +static int +channel_80mhz_to_id(uint ch) +{ + uint i; + for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) { + if (ch == wf_5g_80m_chans[i]) + return i; + } + + return -1; +} + +/* wrapper function for wf_chspec_ntoa. In case of an error it puts + * the original chanspec in the output buffer, prepended with "invalid". + * Can be directly used in print routines as it takes care of null + */ +char * +wf_chspec_ntoa_ex(chanspec_t chspec, char *buf) +{ + if (wf_chspec_ntoa(chspec, buf) == NULL) + snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec); + return buf; +} + +/* given a chanspec and a string buffer, format the chanspec as a + * string, and return the original pointer a. + * Min buffer length must be CHANSPEC_STR_LEN. + * On error return NULL + */ +char * +wf_chspec_ntoa(chanspec_t chspec, char *buf) +{ + const char *band; + uint ctl_chan; + + if (wf_chspec_malformed(chspec)) + return NULL; + + band = ""; + + /* check for non-default band spec */ + if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) || + (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL)) + band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g"; + + /* ctl channel */ + ctl_chan = wf_chspec_ctlchan(chspec); + + /* bandwidth and ctl sideband */ + if (CHSPEC_IS20(chspec)) { + snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan); + } else if (!CHSPEC_IS8080(chspec)) { + const char *bw; + const char *sb = ""; + + bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT]; + +#ifdef CHANSPEC_NEW_40MHZ_FORMAT + /* ctl sideband string if needed for 2g 40MHz */ + if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) { + sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; + } + + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb); +#else + /* ctl sideband string instead of BW for 40MHz */ + if (CHSPEC_IS40(chspec)) { + sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; + snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb); + } else { + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw); + } +#endif /* CHANSPEC_NEW_40MHZ_FORMAT */ + + } else { + /* 80+80 */ + uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT; + uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT; + + /* convert to channel number */ + chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0; + chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0; + + /* Outputs a max of CHANSPEC_STR_LEN chars including '\0' */ + snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2); + } + + return (buf); +} + +static int +read_uint(const char **p, unsigned int *num) +{ + unsigned long val; + char *endp = NULL; + + val = strtoul(*p, &endp, 10); + /* if endp is the initial pointer value, then a number was not read */ + if (endp == *p) + return 0; + + /* advance the buffer pointer to the end of the integer string */ + *p = endp; + /* return the parsed integer */ + *num = (unsigned int)val; + + return 1; +} + +/* given a chanspec string, convert to a chanspec. + * On error return 0 + */ +chanspec_t +wf_chspec_aton(const char *a) +{ + chanspec_t chspec; + uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb; + uint num, ctl_ch; + uint ch1, ch2; + char c, sb_ul = '\0'; + int i; + + bw = 20; + chspec_sb = 0; + chspec_ch = ch1 = ch2 = 0; + + /* parse channel num or band */ + if (!read_uint(&a, &num)) + return 0; + /* if we are looking at a 'g', then the first number was a band */ + c = tolower((int)a[0]); + if (c == 'g') { + a++; /* consume the char */ + + /* band must be "2" or "5" */ + if (num == 2) + chspec_band = WL_CHANSPEC_BAND_2G; + else if (num == 5) + chspec_band = WL_CHANSPEC_BAND_5G; + else + return 0; + + /* read the channel number */ + if (!read_uint(&a, &ctl_ch)) + return 0; + + c = tolower((int)a[0]); + } + else { + /* first number is channel, use default for band */ + ctl_ch = num; + chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + } + + if (c == '\0') { + /* default BW of 20MHz */ + chspec_bw = WL_CHANSPEC_BW_20; + goto done_read; + } + + a ++; /* consume the 'u','l', or '/' */ + + /* check 'u'/'l' */ + if (c == 'u' || c == 'l') { + sb_ul = c; + chspec_bw = WL_CHANSPEC_BW_40; + goto done_read; + } + + /* next letter must be '/' */ + if (c != '/') + return 0; + + /* read bandwidth */ + if (!read_uint(&a, &bw)) + return 0; + + /* convert to chspec value */ + if (bw == 2) { + chspec_bw = WL_CHANSPEC_BW_2P5; + } else if (bw == 5) { + chspec_bw = WL_CHANSPEC_BW_5; + } else if (bw == 10) { + chspec_bw = WL_CHANSPEC_BW_10; + } else if (bw == 20) { + chspec_bw = WL_CHANSPEC_BW_20; + } else if (bw == 40) { + chspec_bw = WL_CHANSPEC_BW_40; + } else if (bw == 80) { + chspec_bw = WL_CHANSPEC_BW_80; + } else if (bw == 160) { + chspec_bw = WL_CHANSPEC_BW_160; + } else { + return 0; + } + + /* So far we have g/ + * Can now be followed by u/l if bw = 40, + * or '+80' if bw = 80, to make '80+80' bw, + * or '.5' if bw = 2.5 to make '2.5' bw . + */ + + c = tolower((int)a[0]); + + /* if we have a 2g/40 channel, we should have a l/u spec now */ + if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) { + if (c == 'u' || c == 'l') { + a ++; /* consume the u/l char */ + sb_ul = c; + goto done_read; + } + } + + /* check for 80+80 */ + if (c == '+') { + /* 80+80 */ + const char plus80[] = "80/"; + + /* must be looking at '+80/' + * check and consume this string. + */ + chspec_bw = WL_CHANSPEC_BW_8080; + + a ++; /* consume the char '+' */ + + /* consume the '80/' string */ + for (i = 0; i < 3; i++) { + if (*a++ != plus80[i]) { + return 0; + } + } + + /* read primary 80MHz channel */ + if (!read_uint(&a, &ch1)) + return 0; + + /* must followed by '-' */ + if (a[0] != '-') + return 0; + a ++; /* consume the char */ + + /* read secondary 80MHz channel */ + if (!read_uint(&a, &ch2)) + return 0; + } else if (c == '.') { + /* 2.5 */ + /* must be looking at '.5' + * check and consume this string. + */ + chspec_bw = WL_CHANSPEC_BW_2P5; + + a ++; /* consume the char '.' */ + + /* consume the '5' string */ + if (*a++ != '5') { + return 0; + } + } + +done_read: + /* skip trailing white space */ + while (a[0] == ' ') { + a ++; + } + + /* must be end of string */ + if (a[0] != '\0') + return 0; + + /* Now have all the chanspec string parts read; + * chspec_band, ctl_ch, chspec_bw, sb_ul, ch1, ch2. + * chspec_band and chspec_bw are chanspec values. + * Need to convert ctl_ch, sb_ul, and ch1,ch2 into + * a center channel (or two) and sideband. + */ + + /* if a sb u/l string was given, just use that, + * guaranteed to be bw = 40 by sting parse. + */ + if (sb_ul != '\0') { + if (sb_ul == 'l') { + chspec_ch = UPPER_20_SB(ctl_ch); + chspec_sb = WL_CHANSPEC_CTL_SB_LLL; + } else if (sb_ul == 'u') { + chspec_ch = LOWER_20_SB(ctl_ch); + chspec_sb = WL_CHANSPEC_CTL_SB_LLU; + } + } + /* if the bw is 20, center and sideband are trivial */ + else if (BW_LE20(chspec_bw)) { + chspec_ch = ctl_ch; + chspec_sb = WL_CHANSPEC_CTL_SB_NONE; + } + /* if the bw is 40/80/160, not 80+80, a single method + * can be used to to find the center and sideband + */ + else if (chspec_bw != WL_CHANSPEC_BW_8080) { + /* figure out ctl sideband based on ctl channel and bandwidth */ + const uint8 *center_ch = NULL; + int num_ch = 0; + int sb = -1; + + if (chspec_bw == WL_CHANSPEC_BW_40) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + } else { + return 0; + } + + for (i = 0; i < num_ch; i ++) { + sb = channel_to_sb(center_ch[i], ctl_ch, bw); + if (sb >= 0) { + chspec_ch = center_ch[i]; + chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; + break; + } + } + + /* check for no matching sb/center */ + if (sb < 0) { + return 0; + } + } + /* Otherwise, bw is 80+80. Figure out channel pair and sb */ + else { + int ch1_id = 0, ch2_id = 0; + int sb; + + /* look up the channel ID for the specified channel numbers */ + ch1_id = channel_80mhz_to_id(ch1); + ch2_id = channel_80mhz_to_id(ch2); + + /* validate channels */ + if (ch1_id < 0 || ch2_id < 0) + return 0; + + /* combine 2 channel IDs in channel field of chspec */ + chspec_ch = (((uint)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) | + ((uint)ch2_id << WL_CHANSPEC_CHAN2_SHIFT)); + + /* figure out primary 20 MHz sideband */ + + /* is the primary channel contained in the 1st 80MHz channel? */ + sb = channel_to_sb(ch1, ctl_ch, bw); + if (sb < 0) { + /* no match for primary channel 'ctl_ch' in segment0 80MHz channel */ + return 0; + } + + chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; + } + + chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb); + + if (wf_chspec_malformed(chspec)) + return 0; + + return chspec; +} + +/* + * Verify the chanspec is using a legal set of parameters, i.e. that the + * chanspec specified a band, bw, ctl_sb and channel and that the + * combination could be legal given any set of circumstances. + * RETURNS: TRUE is the chanspec is malformed, false if it looks good. + */ +bool +wf_chspec_malformed(chanspec_t chanspec) +{ + uint chspec_bw = CHSPEC_BW(chanspec); + uint chspec_ch = CHSPEC_CHANNEL(chanspec); + + /* must be 2G or 5G band */ + if (CHSPEC_IS2G(chanspec)) { + /* must be valid bandwidth */ + if (!BW_LE40(chspec_bw)) { + return TRUE; + } + } else if (CHSPEC_IS5G(chanspec)) { + if (chspec_bw == WL_CHANSPEC_BW_8080) { + uint ch1_id, ch2_id; + + /* channel IDs in 80+80 must be in range */ + ch1_id = CHSPEC_CHAN1(chanspec); + ch2_id = CHSPEC_CHAN2(chanspec); + if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS) + return TRUE; + + } else if (BW_LE160(chspec_bw)) { + if (chspec_ch > MAXCHANNEL) { + return TRUE; + } + } else { + /* invalid bandwidth */ + return TRUE; + } + } else { + /* must be 2G or 5G band */ + return TRUE; + } + + /* side band needs to be consistent with bandwidth */ + if (BW_LE20(chspec_bw)) { + if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_40) { + if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_80 || + chspec_bw == WL_CHANSPEC_BW_8080) { + if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU) + return TRUE; + } + else if (chspec_bw == WL_CHANSPEC_BW_160) { + ASSERT(CHSPEC_CTL_SB(chanspec) <= WL_CHANSPEC_CTL_SB_UUU); + } + return FALSE; +} + +/* + * Verify the chanspec specifies a valid channel according to 802.11. + * RETURNS: TRUE if the chanspec is a valid 802.11 channel + */ +bool +wf_chspec_valid(chanspec_t chanspec) +{ + uint chspec_bw = CHSPEC_BW(chanspec); + uint chspec_ch = CHSPEC_CHANNEL(chanspec); + + if (wf_chspec_malformed(chanspec)) + return FALSE; + + if (CHSPEC_IS2G(chanspec)) { + /* must be valid bandwidth and channel range */ + if (BW_LE20(chspec_bw)) { + if (chspec_ch >= 1 && chspec_ch <= 14) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_40) { + if (chspec_ch >= 3 && chspec_ch <= 11) + return TRUE; + } + } else if (CHSPEC_IS5G(chanspec)) { + if (chspec_bw == WL_CHANSPEC_BW_8080) { + uint16 ch1, ch2; + + ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)]; + ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)]; + + /* the two channels must be separated by more than 80MHz by VHT req */ + if ((ch2 > ch1 + CH_80MHZ_APART) || + (ch1 > ch2 + CH_80MHZ_APART)) + return TRUE; + } else { + const uint8 *center_ch; + uint num_ch, i; + + if (BW_LE40(chspec_bw)) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + } else { + /* invalid bandwidth */ + return FALSE; + } + + /* check for a valid center channel */ + if (BW_LE20(chspec_bw)) { + /* We don't have an array of legal 20MHz 5G channels, but they are + * each side of the legal 40MHz channels. Check the chanspec + * channel against either side of the 40MHz channels. + */ + for (i = 0; i < num_ch; i ++) { + if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) || + chspec_ch == (uint)UPPER_20_SB(center_ch[i])) + break; /* match found */ + } + + if (i == num_ch) { + /* check for channel 165 which is not the side band + * of 40MHz 5G channel + */ + if (chspec_ch == 165) + i = 0; + + /* check for legacy JP channels on failure */ + if (chspec_ch == 34 || chspec_ch == 38 || + chspec_ch == 42 || chspec_ch == 46) + i = 0; + } + } else { + /* check the chanspec channel to each legal channel */ + for (i = 0; i < num_ch; i ++) { + if (chspec_ch == center_ch[i]) + break; /* match found */ + } + } + + if (i < num_ch) { + /* match found */ + return TRUE; + } + } + } + + return FALSE; +} + +/* + * This function returns the channel number that control traffic is being sent on, for 20MHz + * channels this is just the channel number, for 40MHZ, 80MHz, 160MHz channels it is the 20MHZ + * sideband depending on the chanspec selected + */ +uint8 +wf_chspec_ctlchan(chanspec_t chspec) +{ + uint center_chan; + uint bw_mhz; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* Is there a sideband ? */ + if (CHSPEC_BW_LE20(chspec)) { + return CHSPEC_CHANNEL(chspec); + } else { + sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT; + + if (CHSPEC_IS8080(chspec)) { + /* For an 80+80 MHz channel, the sideband 'sb' field is an 80 MHz sideband + * (LL, LU, UL, LU) for the 80 MHz frequency segment 0. + */ + uint chan_id = CHSPEC_CHAN1(chspec); + + bw_mhz = 80; + + /* convert from channel index to channel number */ + center_chan = wf_5g_80m_chans[chan_id]; + } + else { + bw_mhz = bw_chspec_to_mhz(chspec); + center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT; + } + + return (channel_to_ctl_chan(center_chan, bw_mhz, sb)); + } +} + +/* given a chanspec, return the bandwidth string */ +char * +wf_chspec_to_bw_str(chanspec_t chspec) +{ + return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)]; +} + +/* + * This function returns the chanspec of the control channel of a given chanspec + */ +chanspec_t +wf_chspec_ctlchspec(chanspec_t chspec) +{ + chanspec_t ctl_chspec = chspec; + uint8 ctl_chan; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* Is there a sideband ? */ + if (!CHSPEC_BW_LE20(chspec)) { + ctl_chan = wf_chspec_ctlchan(chspec); + ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20; + ctl_chspec |= CHSPEC_BAND(chspec); + } + return ctl_chspec; +} + +/* return chanspec given control channel and bandwidth + * return 0 on error + */ +uint16 +wf_channel2chspec(uint ctl_ch, uint bw) +{ + uint16 chspec; + const uint8 *center_ch = NULL; + int num_ch = 0; + int sb = -1; + int i = 0; + + chspec = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + + chspec |= bw; + + if (bw == WL_CHANSPEC_BW_40) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + bw = 40; + } else if (bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + bw = 80; + } else if (bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + bw = 160; + } else if (BW_LE20(bw)) { + chspec |= ctl_ch; + return chspec; + } else { + return 0; + } + + for (i = 0; i < num_ch; i ++) { + sb = channel_to_sb(center_ch[i], ctl_ch, bw); + if (sb >= 0) { + chspec |= center_ch[i]; + chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT); + break; + } + } + + /* check for no matching sb/center */ + if (sb < 0) { + return 0; + } + + return chspec; +} + +/* + * This function returns the chanspec for the primary 40MHz of an 80MHz channel. + * The control sideband specifies the same 20MHz channel that the 80MHz channel is using + * as the primary 20MHz channel. + */ +extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec) +{ + chanspec_t chspec40 = chspec; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */ + if (CHSPEC_IS8080(chspec) || CHSPEC_IS160(chspec)) { + chspec = wf_chspec_primary80_chspec(chspec); + } + + /* determine primary 40 MHz sub-channel of an 80 MHz chanspec */ + if (CHSPEC_IS80(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + sb = CHSPEC_CTL_SB(chspec); + + if (sb < WL_CHANSPEC_CTL_SB_UL) { + /* Primary 40MHz is on lower side */ + center_chan -= CH_20MHZ_APART; + /* sideband bits are the same for LL/LU and L/U */ + } else { + /* Primary 40MHz is on upper side */ + center_chan += CH_20MHZ_APART; + /* sideband bits need to be adjusted by UL offset */ + sb -= WL_CHANSPEC_CTL_SB_UL; + } + + /* Create primary 40MHz chanspec */ + chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 | + sb | center_chan); + } + + return chspec40; +} + +/* + * Return the channel number for a given frequency and base frequency. + * The returned channel number is relative to the given base frequency. + * If the given base frequency is zero, a base frequency of 5 GHz is assumed for + * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz. + * + * Frequency is specified in MHz. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * + * The returned channel will be in the range [1, 14] in the 2.4 GHz band + * and [0, 200] otherwise. + * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the + * frequency is not a 2.4 GHz channel, or if the frequency is not and even + * multiple of 5 MHz from the base frequency to the base plus 1 GHz. + * + * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 + */ +int +wf_mhz2channel(uint freq, uint start_factor) +{ + int ch = -1; + uint base; + int offset; + + /* take the default channel start frequency */ + if (start_factor == 0) { + if (freq >= 2400 && freq <= 2500) + start_factor = WF_CHAN_FACTOR_2_4_G; + else if (freq >= 5000 && freq <= 6000) + start_factor = WF_CHAN_FACTOR_5_G; + } + + if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G) + return 14; + + base = start_factor / 2; + + /* check that the frequency is in 1GHz range of the base */ + if ((freq < base) || (freq > base + 1000)) + return -1; + + offset = freq - base; + ch = offset / 5; + + /* check that frequency is a 5MHz multiple from the base */ + if (offset != (ch * 5)) + return -1; + + /* restricted channel range check for 2.4G */ + if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13)) + return -1; + + return ch; +} + +/* + * Return the center frequency in MHz of the given channel and base frequency. + * The channel number is interpreted relative to the given base frequency. + * + * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G + * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands. + * The channel range of [1, 14] is only checked for a start_factor of + * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2). + * Odd start_factors produce channels on .5 MHz boundaries, in which case + * the answer is rounded down to an integral MHz. + * -1 is returned for an out of range channel. + * + * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 + */ +int +wf_channel2mhz(uint ch, uint start_factor) +{ + int freq; + + if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) || + (ch > 200)) + freq = -1; + else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) + freq = 2484; + else + freq = ch * 5 + start_factor / 2; + + return freq; +} + +static const uint16 sidebands[] = { + WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU, + WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU, + WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU, + WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU +}; + +/* + * Returns the chanspec 80Mhz channel corresponding to the following input + * parameters + * + * primary_channel - primary 20Mhz channel + * center_channel - center frequecny of the 80Mhz channel + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} + * + * returns INVCHANSPEC in case of error + */ +chanspec_t +wf_chspec_80(uint8 center_channel, uint8 primary_channel) +{ + + chanspec_t chanspec = INVCHANSPEC; + chanspec_t chanspec_cur; + uint i; + + for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) { + chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]); + if (primary_channel == wf_chspec_ctlchan(chanspec_cur)) { + chanspec = chanspec_cur; + break; + } + } + /* If the loop ended early, we are good, otherwise we did not + * find a 80MHz chanspec with the given center_channel that had a primary channel + *matching the given primary_channel. + */ + return chanspec; +} + +/* + * Returns the 80+80 chanspec corresponding to the following input parameters + * + * primary_20mhz - Primary 20 MHz channel + * chan0 - center channel number of one frequency segment + * chan1 - center channel number of the other frequency segment + * + * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}. + * The primary channel must be contained in one of the 80MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * Returns INVCHANSPEC in case of error. + * + * Refer to IEEE802.11ac section 22.3.14 "Channelization". + */ +chanspec_t +wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1) +{ + int sb = 0; + uint16 chanspec = 0; + int chan0_id = 0, chan1_id = 0; + int seg0, seg1; + + chan0_id = channel_80mhz_to_id(chan0); + chan1_id = channel_80mhz_to_id(chan1); + + /* make sure the channel numbers were valid */ + if (chan0_id == -1 || chan1_id == -1) + return INVCHANSPEC; + + /* does the primary channel fit with the 1st 80MHz channel ? */ + sb = channel_to_sb(chan0, primary_20mhz, 80); + if (sb >= 0) { + /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */ + seg0 = chan0_id; + seg1 = chan1_id; + } else { + /* no, so does the primary channel fit with the 2nd 80MHz channel ? */ + sb = channel_to_sb(chan1, primary_20mhz, 80); + if (sb < 0) { + /* no match for ctl_ch to either 80MHz center channel */ + return INVCHANSPEC; + } + /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */ + seg0 = chan1_id; + seg1 = chan0_id; + } + + chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) | + (seg1 << WL_CHANSPEC_CHAN2_SHIFT) | + (sb << WL_CHANSPEC_CTL_SB_SHIFT) | + WL_CHANSPEC_BW_8080 | + WL_CHANSPEC_BAND_5G); + + return chanspec; +} + +/* + * This function returns the 80Mhz channel for the given id. + */ +static uint8 +wf_chspec_get80Mhz_ch(uint8 chan_80Mhz_id) +{ + if (chan_80Mhz_id < WF_NUM_5G_80M_CHANS) + return wf_5g_80m_chans[chan_80Mhz_id]; + + return 0; +} + +/* + * Returns the primary 80 Mhz channel for the provided chanspec + * + * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved + * + * returns -1 in case the provided channel is 20/40 Mhz chanspec + */ + +uint8 +wf_chspec_primary80_channel(chanspec_t chanspec) +{ + uint8 primary80_chan; + + if (CHSPEC_IS80(chanspec)) { + primary80_chan = CHSPEC_CHANNEL(chanspec); + } + else if (CHSPEC_IS8080(chanspec)) { + /* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */ + primary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec)); + } + else if (CHSPEC_IS160(chanspec)) { + uint8 center_chan = CHSPEC_CHANNEL(chanspec); + uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT; + + /* based on the sb value primary 80 channel can be retrieved + * if sb is in range 0 to 3 the lower band is the 80Mhz primary band + */ + if (sb < 4) { + primary80_chan = center_chan - CH_40MHZ_APART; + } + /* if sb is in range 4 to 7 the upper band is the 80Mhz primary band */ + else + { + primary80_chan = center_chan + CH_40MHZ_APART; + } + } + else { + /* for 20 and 40 Mhz */ + primary80_chan = -1; + } + return primary80_chan; +} + +/* + * Returns the secondary 80 Mhz channel for the provided chanspec + * + * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved + * + * returns -1 in case the provided channel is 20/40/80 Mhz chanspec + */ +uint8 +wf_chspec_secondary80_channel(chanspec_t chanspec) +{ + uint8 secondary80_chan; + + if (CHSPEC_IS8080(chanspec)) { + secondary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec)); + } + else if (CHSPEC_IS160(chanspec)) { + uint8 center_chan = CHSPEC_CHANNEL(chanspec); + uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT; + + /* based on the sb value secondary 80 channel can be retrieved + * if sb is in range 0 to 3 upper band is the secondary 80Mhz band + */ + if (sb < 4) { + secondary80_chan = center_chan + CH_40MHZ_APART; + } + /* if sb is in range 4 to 7 the lower band is the secondary 80Mhz band */ + else + { + secondary80_chan = center_chan - CH_40MHZ_APART; + } + } + else { + /* for 20, 40, and 80 Mhz */ + secondary80_chan = -1; + } + return secondary80_chan; +} + +/* + * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel. + * + * chanspec - Input chanspec for which the primary 80Mhz chanspec has to be retreived + * + * returns the input chanspec in case the provided chanspec is an 80 MHz chanspec + * returns INVCHANSPEC in case the provided channel is 20/40 MHz chanspec + */ +chanspec_t +wf_chspec_primary80_chspec(chanspec_t chspec) +{ + chanspec_t chspec80; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + if (CHSPEC_IS80(chspec)) { + chspec80 = chspec; + } + else if (CHSPEC_IS8080(chspec)) { + + /* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */ + center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec)); + + sb = CHSPEC_CTL_SB(chspec); + + /* Create primary 80MHz chanspec */ + chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); + } + else if (CHSPEC_IS160(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + sb = CHSPEC_CTL_SB(chspec); + + if (sb < WL_CHANSPEC_CTL_SB_ULL) { + /* Primary 80MHz is on lower side */ + center_chan -= CH_40MHZ_APART; + } + else { + /* Primary 80MHz is on upper side */ + center_chan += CH_40MHZ_APART; + sb -= WL_CHANSPEC_CTL_SB_ULL; + } + /* Create primary 80MHz chanspec */ + chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); + } + else { + chspec80 = INVCHANSPEC; + } + + return chspec80; +} + +#ifdef WL11AC_80P80 +uint8 +wf_chspec_channel(chanspec_t chspec) +{ + if (CHSPEC_IS8080(chspec)) { + return wf_chspec_primary80_channel(chspec); + } + else { + return ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)); + } +} +#endif /* WL11AC_80P80 */ diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.h b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h new file mode 100644 index 000000000000..186c0e18ee92 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h @@ -0,0 +1,631 @@ +/* + * Misc utility routines for WL and Apps + * This header file housing the define and function prototype use by + * both the wl driver, tools & Apps. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_channels.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _bcmwifi_channels_h_ +#define _bcmwifi_channels_h_ + + +/* A chanspec holds the channel number, band, bandwidth and control sideband */ +typedef uint16 chanspec_t; + +/* channel defines */ +#define CH_UPPER_SB 0x01 +#define CH_LOWER_SB 0x02 +#define CH_EWA_VALID 0x04 +#define CH_80MHZ_APART 16 +#define CH_40MHZ_APART 8 +#define CH_20MHZ_APART 4 +#define CH_10MHZ_APART 2 +#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ +#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */ + +/* maximum # channels the s/w supports */ +#define MAXCHANNEL 224 /* max # supported channels. The max channel no is above, + * this is that + 1 rounded up to a multiple of NBBY (8). + * DO NOT MAKE it > 255: channels are uint8's all over + */ +#define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */ + +/* channel bitvec */ +typedef struct { + uint8 vec[MAXCHANNEL/8]; /* bitvec of channels */ +} chanvec_t; + +/* make sure channel num is within valid range */ +#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM) + +#define CHSPEC_CTLOVLP(sp1, sp2, sep) \ + (ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep)) + +/* All builds use the new 11ac ratespec/chanspec */ +#undef D11AC_IOTYPES +#define D11AC_IOTYPES + +#define WL_CHANSPEC_CHAN_MASK 0x00ff +#define WL_CHANSPEC_CHAN_SHIFT 0 +#define WL_CHANSPEC_CHAN1_MASK 0x000f +#define WL_CHANSPEC_CHAN1_SHIFT 0 +#define WL_CHANSPEC_CHAN2_MASK 0x00f0 +#define WL_CHANSPEC_CHAN2_SHIFT 4 + +#define WL_CHANSPEC_CTL_SB_MASK 0x0700 +#define WL_CHANSPEC_CTL_SB_SHIFT 8 +#define WL_CHANSPEC_CTL_SB_LLL 0x0000 +#define WL_CHANSPEC_CTL_SB_LLU 0x0100 +#define WL_CHANSPEC_CTL_SB_LUL 0x0200 +#define WL_CHANSPEC_CTL_SB_LUU 0x0300 +#define WL_CHANSPEC_CTL_SB_ULL 0x0400 +#define WL_CHANSPEC_CTL_SB_ULU 0x0500 +#define WL_CHANSPEC_CTL_SB_UUL 0x0600 +#define WL_CHANSPEC_CTL_SB_UUU 0x0700 +#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL +#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU +#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL + +#define WL_CHANSPEC_BW_MASK 0x3800 +#define WL_CHANSPEC_BW_SHIFT 11 +#define WL_CHANSPEC_BW_5 0x0000 +#define WL_CHANSPEC_BW_10 0x0800 +#define WL_CHANSPEC_BW_20 0x1000 +#define WL_CHANSPEC_BW_40 0x1800 +#define WL_CHANSPEC_BW_80 0x2000 +#define WL_CHANSPEC_BW_160 0x2800 +#define WL_CHANSPEC_BW_8080 0x3000 +#define WL_CHANSPEC_BW_2P5 0x3800 + +#define WL_CHANSPEC_BAND_MASK 0xc000 +#define WL_CHANSPEC_BAND_SHIFT 14 +#define WL_CHANSPEC_BAND_2G 0x0000 +#define WL_CHANSPEC_BAND_3G 0x4000 +#define WL_CHANSPEC_BAND_4G 0x8000 +#define WL_CHANSPEC_BAND_5G 0xc000 +#define INVCHANSPEC 255 +#define MAX_CHANSPEC 0xFFFF + +/* channel defines */ +#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \ + ((channel) - CH_10MHZ_APART) : 0) +#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \ + ((channel) + CH_10MHZ_APART) : 0) + +#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0) +#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \ + ((channel) + 3 * CH_10MHZ_APART) : 0) +#define LU_20_SB(channel) LOWER_20_SB(channel) +#define UL_20_SB(channel) UPPER_20_SB(channel) + +#define LOWER_40_SB(channel) ((channel) - CH_20MHZ_APART) +#define UPPER_40_SB(channel) ((channel) + CH_20MHZ_APART) +#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX) +#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH2P5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_2P5 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_5 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH10MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_10 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \ + ((channel) + CH_20MHZ_APART) : 0) +#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \ + ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ + WL_CHANSPEC_BAND_5G)) +#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | \ + WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G) +#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | \ + WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G) +#define CHBW_CHSPEC(bw, channel) (chanspec_t)((chanspec_t)(channel) | (bw) | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) + +/* simple MACROs to get different fields of chanspec */ +#ifdef WL11AC_80P80 +#define CHSPEC_CHANNEL(chspec) wf_chspec_channel(chspec) +#else +#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)) +#endif +#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT +#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT +#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) +#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK) +#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK) + +#ifdef WL11N_20MHZONLY + +#define CHSPEC_IS2P5(chspec) 0 +#define CHSPEC_IS5(chspec) 0 +#define CHSPEC_IS10(chspec) 0 +#define CHSPEC_IS20(chspec) 1 +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) 0 +#endif +#ifndef CHSPEC_IS80 +#define CHSPEC_IS80(chspec) 0 +#endif +#ifndef CHSPEC_IS160 +#define CHSPEC_IS160(chspec) 0 +#endif +#ifndef CHSPEC_IS8080 +#define CHSPEC_IS8080(chspec) 0 +#endif +#define BW_LE20(bw) TRUE +#define CHSPEC_ISLE20(chspec) TRUE +#else /* !WL11N_20MHZONLY */ + +#define CHSPEC_IS2P5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5) +#define CHSPEC_IS5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5) +#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) +#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) +#endif +#ifndef CHSPEC_IS80 +#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80) +#endif +#ifndef CHSPEC_IS160 +#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160) +#endif +#ifndef CHSPEC_IS8080 +#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080) +#endif + +#ifdef WL11ULB +#define BW_LT20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \ + ((bw) == WL_CHANSPEC_BW_5) || \ + ((bw) == WL_CHANSPEC_BW_10)) +#define CHSPEC_BW_LT20(chspec) (BW_LT20(CHSPEC_BW(chspec))) +/* This MACRO is strictly to avoid abandons in existing code with ULB feature and is in no way + * optimial to use. Should be replaced with CHSPEC_BW_LE() instead + */ +#define BW_LE20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \ + ((bw) == WL_CHANSPEC_BW_5) || \ + ((bw) == WL_CHANSPEC_BW_10) || \ + ((bw) == WL_CHANSPEC_BW_20)) +#define CHSPEC_ISLE20(chspec) (BW_LE20(CHSPEC_BW(chspec))) + +#else /* WL11ULB */ +#define BW_LE20(bw) ((bw) == WL_CHANSPEC_BW_20) +#define CHSPEC_ISLE20(chspec) (CHSPEC_IS20(chspec)) +#endif /* WL11ULB */ +#endif /* !WL11N_20MHZONLY */ + +#define BW_LE40(bw) (BW_LE20(bw) || ((bw) == WL_CHANSPEC_BW_40)) +#define BW_LE80(bw) (BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80)) +#define BW_LE160(bw) (BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160)) +#define CHSPEC_BW_LE20(chspec) (BW_LE20(CHSPEC_BW(chspec))) +#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) +#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) +#define CHSPEC_SB_UPPER(chspec) \ + ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) +#define CHSPEC_SB_LOWER(chspec) \ + ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) +#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G) + +/** + * Number of chars needed for wf_chspec_ntoa() destination character buffer. + */ +#define CHANSPEC_STR_LEN 20 + + +#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\ + CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080) + +/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made +* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80, +* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080). +* +* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide. +* If both chspec bandwidth and bw is not 160 wide, then the comparison is made. +*/ +#ifdef WL11ULB +#define CHSPEC_BW_GE(chspec, bw) \ + (((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) >= (bw))) && \ + (!(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5))) +#else /* WL11ULB */ +#define CHSPEC_BW_GE(chspec, bw) \ + ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) >= (bw))) +#endif /* WL11ULB */ + +#ifdef WL11ULB +#define CHSPEC_BW_LE(chspec, bw) \ + (((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) <= (bw))) || \ + (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5)) +#else /* WL11ULB */ +#define CHSPEC_BW_LE(chspec, bw) \ + ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) <= (bw))) +#endif /* WL11ULB */ + +#ifdef WL11ULB +#define CHSPEC_BW_GT(chspec, bw) \ + ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) > (bw))) && \ + (CHSPEC_BW(chspec) != WL_CHANSPEC_BW_2P5)) +#else /* WL11ULB */ +#define CHSPEC_BW_GT(chspec, bw) \ + (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) > (bw))) +#endif /* WL11ULB */ + +#ifdef WL11ULB +#define CHSPEC_BW_LT(chspec, bw) \ + ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) < (bw))) || \ + ((CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5))) +#else /* WL11ULB */ +#define CHSPEC_BW_LT(chspec, bw) \ + (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) < (bw))) +#endif /* WL11ULB */ + +/* Legacy Chanspec defines + * These are the defines for the previous format of the chanspec_t + */ +#define WL_LCHANSPEC_CHAN_MASK 0x00ff +#define WL_LCHANSPEC_CHAN_SHIFT 0 + +#define WL_LCHANSPEC_CTL_SB_MASK 0x0300 +#define WL_LCHANSPEC_CTL_SB_SHIFT 8 +#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100 +#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200 +#define WL_LCHANSPEC_CTL_SB_NONE 0x0300 + +#define WL_LCHANSPEC_BW_MASK 0x0C00 +#define WL_LCHANSPEC_BW_SHIFT 10 +#define WL_LCHANSPEC_BW_10 0x0400 +#define WL_LCHANSPEC_BW_20 0x0800 +#define WL_LCHANSPEC_BW_40 0x0C00 + +#define WL_LCHANSPEC_BAND_MASK 0xf000 +#define WL_LCHANSPEC_BAND_SHIFT 12 +#define WL_LCHANSPEC_BAND_5G 0x1000 +#define WL_LCHANSPEC_BAND_2G 0x2000 + +#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK)) +#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK) +#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK) +#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK) +#define LCHSPEC_IS10(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10) +#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20) +#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40) +#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G) +#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G) + +#define LCHSPEC_SB_UPPER(chspec) \ + ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \ + (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)) +#define LCHSPEC_SB_LOWER(chspec) \ + ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \ + (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)) + +#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band))) + +#define CH20MHZ_LCHSPEC(channel) \ + (chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \ + WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G)) + +/* + * WF_CHAN_FACTOR_* constants are used to calculate channel frequency + * given a channel number. + * chan_freq = chan_factor * 500Mhz + chan_number * 5 + */ + +/** + * Channel Factor for the starting frequence of 2.4 GHz channels. + * The value corresponds to 2407 MHz. + */ +#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */ + +/** + * Channel Factor for the starting frequence of 5 GHz channels. + * The value corresponds to 5000 MHz. + */ +#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */ + +/** + * Channel Factor for the starting frequence of 4.9 GHz channels. + * The value corresponds to 4000 MHz. + */ +#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */ + +#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */ + +/** + * No of sub-band vlaue of the specified Mhz chanspec + */ +#define WF_NUM_SIDEBANDS_40MHZ 2 +#define WF_NUM_SIDEBANDS_80MHZ 4 +#define WF_NUM_SIDEBANDS_8080MHZ 4 +#define WF_NUM_SIDEBANDS_160MHZ 8 + +/** + * Convert chanspec to ascii string + * + * @param chspec chanspec format + * @param buf ascii string of chanspec + * + * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes + * Original chanspec in case of error + * + * @see CHANSPEC_STR_LEN + */ +extern char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf); + +/** + * Convert chanspec to ascii string + * + * @param chspec chanspec format + * @param buf ascii string of chanspec + * + * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes + * NULL in case of error + * + * @see CHANSPEC_STR_LEN + */ +extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf); + +/** + * Convert ascii string to chanspec + * + * @param a pointer to input string + * + * @return >= 0 if successful or 0 otherwise + */ +extern chanspec_t wf_chspec_aton(const char *a); + +/** + * Verify the chanspec fields are valid. + * + * Verify the chanspec is using a legal set field values, i.e. that the chanspec + * specified a band, bw, ctl_sb and channel and that the combination could be + * legal given some set of circumstances. + * + * @param chanspec input chanspec to verify + * + * @return TRUE if the chanspec is malformed, FALSE if it looks good. + */ +extern bool wf_chspec_malformed(chanspec_t chanspec); + +/** + * Verify the chanspec specifies a valid channel according to 802.11. + * + * @param chanspec input chanspec to verify + * + * @return TRUE if the chanspec is a valid 802.11 channel + */ +extern bool wf_chspec_valid(chanspec_t chanspec); + +/** + * Return the primary (control) channel. + * + * This function returns the channel number of the primary 20MHz channel. For + * 20MHz channels this is just the channel number. For 40MHz or wider channels + * it is the primary 20MHz channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the channel number of the primary 20MHz channel + */ +extern uint8 wf_chspec_ctlchan(chanspec_t chspec); + +/* + * Return the bandwidth string. + * + * This function returns the bandwidth string for the passed chanspec. + * + * @param chspec input chanspec + * + * @return Returns the bandwidth string + */ +extern char * wf_chspec_to_bw_str(chanspec_t chspec); + +/** + * Return the primary (control) chanspec. + * + * This function returns the chanspec of the primary 20MHz channel. For 20MHz + * channels this is just the chanspec. For 40MHz or wider channels it is the + * chanspec of the primary 20MHZ channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the chanspec of the primary 20MHz channel + */ +extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec); + +/** + * Return a channel number corresponding to a frequency. + * + * This function returns the chanspec for the primary 40MHz of an 80MHz channel. + * The control sideband specifies the same 20MHz channel that the 80MHz channel is using + * as the primary 20MHz channel. + */ +extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec); + +/* + * Return the channel number for a given frequency and base frequency. + * The returned channel number is relative to the given base frequency. + * If the given base frequency is zero, a base frequency of 5 GHz is assumed for + * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz. + * + * Frequency is specified in MHz. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * + * The returned channel will be in the range [1, 14] in the 2.4 GHz band + * and [0, 200] otherwise. + * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the + * frequency is not a 2.4 GHz channel, or if the frequency is not and even + * multiple of 5 MHz from the base frequency to the base plus 1 GHz. + * + * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 + * + * @param freq frequency in MHz + * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz + * + * @return Returns a channel number + * + * @see WF_CHAN_FACTOR_2_4_G + * @see WF_CHAN_FACTOR_5_G + */ +extern int wf_mhz2channel(uint freq, uint start_factor); + +/** + * Return the center frequency in MHz of the given channel and base frequency. + * + * Return the center frequency in MHz of the given channel and base frequency. + * The channel number is interpreted relative to the given base frequency. + * + * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * The channel range of [1, 14] is only checked for a start_factor of + * WF_CHAN_FACTOR_2_4_G (4814). + * Odd start_factors produce channels on .5 MHz boundaries, in which case + * the answer is rounded down to an integral MHz. + * -1 is returned for an out of range channel. + * + * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 + * + * @param channel input channel number + * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz + * + * @return Returns a frequency in MHz + * + * @see WF_CHAN_FACTOR_2_4_G + * @see WF_CHAN_FACTOR_5_G + */ +extern int wf_channel2mhz(uint channel, uint start_factor); + +/** + * Returns the chanspec 80Mhz channel corresponding to the following input + * parameters + * + * primary_channel - primary 20Mhz channel + * center_channel - center frequecny of the 80Mhz channel + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} + * + * returns INVCHANSPEC in case of error + */ +extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel); + +/** + * Convert ctl chan and bw to chanspec + * + * @param ctl_ch channel + * @param bw bandwidth + * + * @return > 0 if successful or 0 otherwise + * + */ +extern uint16 wf_channel2chspec(uint ctl_ch, uint bw); + +extern uint wf_channel2freq(uint channel); +extern uint wf_freq2channel(uint freq); + +/* + * Returns the 80+80 MHz chanspec corresponding to the following input parameters + * + * primary_20mhz - Primary 20 MHz channel + * chan0_80MHz - center channel number of one frequency segment + * chan1_80MHz - center channel number of the other frequency segment + * + * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}. + * The primary channel must be contained in one of the 80MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * Returns INVCHANSPEC in case of error. + * + * Refer to IEEE802.11ac section 22.3.14 "Channelization". + */ +extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz, + uint8 chan0_80Mhz, uint8 chan1_80Mhz); + +/* + * Returns the primary 80 Mhz channel for the provided chanspec + * + * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved + * + * returns -1 in case the provided channel is 20/40 Mhz chanspec + */ +extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec); + +/* + * Returns the secondary 80 Mhz channel for the provided chanspec + * + * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved + * + * returns -1 in case the provided channel is 20/40 Mhz chanspec + */ +extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec); + +/* + * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel. + */ +extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec); + +#ifdef WL11AC_80P80 +/* + * This function returns the centre chanel for the given chanspec. + * In case of 80+80 chanspec it returns the primary 80 Mhz centre channel + */ +extern uint8 wf_chspec_channel(chanspec_t chspec); +#endif +#endif /* _bcmwifi_channels_h_ */ diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_rates.h b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h new file mode 100644 index 000000000000..1329e9bc80da --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h @@ -0,0 +1,793 @@ +/* + * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_rates.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _bcmwifi_rates_h_ +#define _bcmwifi_rates_h_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + +#define WL_RATESET_SZ_DSSS 4 +#define WL_RATESET_SZ_OFDM 8 +#define WL_RATESET_SZ_VHT_MCS 10 +#define WL_RATESET_SZ_VHT_MCS_P 12 + +#if defined(WLPROPRIETARY_11N_RATES) +#define WL_RATESET_SZ_HT_MCS WL_RATESET_SZ_VHT_MCS +#else +#define WL_RATESET_SZ_HT_MCS 8 +#endif + +#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */ + +#define WL_TX_CHAINS_MAX 4 + +#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */ + +/* Transmit channel bandwidths */ +typedef enum wl_tx_bw { + WL_TX_BW_20, + WL_TX_BW_40, + WL_TX_BW_80, + WL_TX_BW_20IN40, + WL_TX_BW_20IN80, + WL_TX_BW_40IN80, + WL_TX_BW_160, + WL_TX_BW_20IN160, + WL_TX_BW_40IN160, + WL_TX_BW_80IN160, + WL_TX_BW_ALL, + WL_TX_BW_8080, + WL_TX_BW_8080CHAN2, + WL_TX_BW_20IN8080, + WL_TX_BW_40IN8080, + WL_TX_BW_80IN8080, + WL_TX_BW_2P5, + WL_TX_BW_5, + WL_TX_BW_10 +} wl_tx_bw_t; + + +/* + * Transmit modes. + * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed + */ +typedef enum wl_tx_mode { + WL_TX_MODE_NONE, + WL_TX_MODE_STBC, + WL_TX_MODE_CDD, + WL_TX_MODE_TXBF, + WL_NUM_TX_MODES +} wl_tx_mode_t; + + +/* Number of transmit chains */ +typedef enum wl_tx_chains { + WL_TX_CHAINS_1 = 1, + WL_TX_CHAINS_2, + WL_TX_CHAINS_3, + WL_TX_CHAINS_4 +} wl_tx_chains_t; + + +/* Number of transmit streams */ +typedef enum wl_tx_nss { + WL_TX_NSS_1 = 1, + WL_TX_NSS_2, + WL_TX_NSS_3, + WL_TX_NSS_4 +} wl_tx_nss_t; + + +/* This enum maps each rate to a CLM index */ + +typedef enum clm_rates { + /************ + * 1 chain * + ************ + */ + + /* 1 Stream */ + WL_RATE_1X1_DSSS_1 = 0, + WL_RATE_1X1_DSSS_2 = 1, + WL_RATE_1X1_DSSS_5_5 = 2, + WL_RATE_1X1_DSSS_11 = 3, + + WL_RATE_1X1_OFDM_6 = 4, + WL_RATE_1X1_OFDM_9 = 5, + WL_RATE_1X1_OFDM_12 = 6, + WL_RATE_1X1_OFDM_18 = 7, + WL_RATE_1X1_OFDM_24 = 8, + WL_RATE_1X1_OFDM_36 = 9, + WL_RATE_1X1_OFDM_48 = 10, + WL_RATE_1X1_OFDM_54 = 11, + + WL_RATE_1X1_MCS0 = 12, + WL_RATE_1X1_MCS1 = 13, + WL_RATE_1X1_MCS2 = 14, + WL_RATE_1X1_MCS3 = 15, + WL_RATE_1X1_MCS4 = 16, + WL_RATE_1X1_MCS5 = 17, + WL_RATE_1X1_MCS6 = 18, + WL_RATE_1X1_MCS7 = 19, + WL_RATE_P_1X1_MCS87 = 20, + WL_RATE_P_1X1_MCS88 = 21, + + WL_RATE_1X1_VHT0SS1 = 12, + WL_RATE_1X1_VHT1SS1 = 13, + WL_RATE_1X1_VHT2SS1 = 14, + WL_RATE_1X1_VHT3SS1 = 15, + WL_RATE_1X1_VHT4SS1 = 16, + WL_RATE_1X1_VHT5SS1 = 17, + WL_RATE_1X1_VHT6SS1 = 18, + WL_RATE_1X1_VHT7SS1 = 19, + WL_RATE_1X1_VHT8SS1 = 20, + WL_RATE_1X1_VHT9SS1 = 21, + WL_RATE_P_1X1_VHT10SS1 = 22, + WL_RATE_P_1X1_VHT11SS1 = 23, + + + /************ + * 2 chains * + ************ + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_DSSS_1 = 24, + WL_RATE_1X2_DSSS_2 = 25, + WL_RATE_1X2_DSSS_5_5 = 26, + WL_RATE_1X2_DSSS_11 = 27, + + WL_RATE_1X2_CDD_OFDM_6 = 28, + WL_RATE_1X2_CDD_OFDM_9 = 29, + WL_RATE_1X2_CDD_OFDM_12 = 30, + WL_RATE_1X2_CDD_OFDM_18 = 31, + WL_RATE_1X2_CDD_OFDM_24 = 32, + WL_RATE_1X2_CDD_OFDM_36 = 33, + WL_RATE_1X2_CDD_OFDM_48 = 34, + WL_RATE_1X2_CDD_OFDM_54 = 35, + + WL_RATE_1X2_CDD_MCS0 = 36, + WL_RATE_1X2_CDD_MCS1 = 37, + WL_RATE_1X2_CDD_MCS2 = 38, + WL_RATE_1X2_CDD_MCS3 = 39, + WL_RATE_1X2_CDD_MCS4 = 40, + WL_RATE_1X2_CDD_MCS5 = 41, + WL_RATE_1X2_CDD_MCS6 = 42, + WL_RATE_1X2_CDD_MCS7 = 43, + WL_RATE_P_1X2_CDD_MCS87 = 44, + WL_RATE_P_1X2_CDD_MCS88 = 45, + + WL_RATE_1X2_VHT0SS1 = 36, + WL_RATE_1X2_VHT1SS1 = 37, + WL_RATE_1X2_VHT2SS1 = 38, + WL_RATE_1X2_VHT3SS1 = 39, + WL_RATE_1X2_VHT4SS1 = 40, + WL_RATE_1X2_VHT5SS1 = 41, + WL_RATE_1X2_VHT6SS1 = 42, + WL_RATE_1X2_VHT7SS1 = 43, + WL_RATE_1X2_VHT8SS1 = 44, + WL_RATE_1X2_VHT9SS1 = 45, + WL_RATE_P_1X2_VHT10SS1 = 46, + WL_RATE_P_1X2_VHT11SS1 = 47, + + /* 2 Streams */ + WL_RATE_2X2_STBC_MCS0 = 48, + WL_RATE_2X2_STBC_MCS1 = 49, + WL_RATE_2X2_STBC_MCS2 = 50, + WL_RATE_2X2_STBC_MCS3 = 51, + WL_RATE_2X2_STBC_MCS4 = 52, + WL_RATE_2X2_STBC_MCS5 = 53, + WL_RATE_2X2_STBC_MCS6 = 54, + WL_RATE_2X2_STBC_MCS7 = 55, + WL_RATE_P_2X2_STBC_MCS87 = 56, + WL_RATE_P_2X2_STBC_MCS88 = 57, + + WL_RATE_2X2_STBC_VHT0SS1 = 48, + WL_RATE_2X2_STBC_VHT1SS1 = 49, + WL_RATE_2X2_STBC_VHT2SS1 = 50, + WL_RATE_2X2_STBC_VHT3SS1 = 51, + WL_RATE_2X2_STBC_VHT4SS1 = 52, + WL_RATE_2X2_STBC_VHT5SS1 = 53, + WL_RATE_2X2_STBC_VHT6SS1 = 54, + WL_RATE_2X2_STBC_VHT7SS1 = 55, + WL_RATE_2X2_STBC_VHT8SS1 = 56, + WL_RATE_2X2_STBC_VHT9SS1 = 57, + WL_RATE_P_2X2_STBC_VHT10SS1 = 58, + WL_RATE_P_2X2_STBC_VHT11SS1 = 59, + + WL_RATE_2X2_SDM_MCS8 = 60, + WL_RATE_2X2_SDM_MCS9 = 61, + WL_RATE_2X2_SDM_MCS10 = 62, + WL_RATE_2X2_SDM_MCS11 = 63, + WL_RATE_2X2_SDM_MCS12 = 64, + WL_RATE_2X2_SDM_MCS13 = 65, + WL_RATE_2X2_SDM_MCS14 = 66, + WL_RATE_2X2_SDM_MCS15 = 67, + WL_RATE_P_2X2_SDM_MCS99 = 68, + WL_RATE_P_2X2_SDM_MCS100 = 69, + + WL_RATE_2X2_VHT0SS2 = 60, + WL_RATE_2X2_VHT1SS2 = 61, + WL_RATE_2X2_VHT2SS2 = 62, + WL_RATE_2X2_VHT3SS2 = 63, + WL_RATE_2X2_VHT4SS2 = 64, + WL_RATE_2X2_VHT5SS2 = 65, + WL_RATE_2X2_VHT6SS2 = 66, + WL_RATE_2X2_VHT7SS2 = 67, + WL_RATE_2X2_VHT8SS2 = 68, + WL_RATE_2X2_VHT9SS2 = 69, + WL_RATE_P_2X2_VHT10SS2 = 70, + WL_RATE_P_2X2_VHT11SS2 = 71, + + /**************************** + * TX Beamforming, 2 chains * + **************************** + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_TXBF_OFDM_6 = 72, + WL_RATE_1X2_TXBF_OFDM_9 = 73, + WL_RATE_1X2_TXBF_OFDM_12 = 74, + WL_RATE_1X2_TXBF_OFDM_18 = 75, + WL_RATE_1X2_TXBF_OFDM_24 = 76, + WL_RATE_1X2_TXBF_OFDM_36 = 77, + WL_RATE_1X2_TXBF_OFDM_48 = 78, + WL_RATE_1X2_TXBF_OFDM_54 = 79, + + WL_RATE_1X2_TXBF_MCS0 = 80, + WL_RATE_1X2_TXBF_MCS1 = 81, + WL_RATE_1X2_TXBF_MCS2 = 82, + WL_RATE_1X2_TXBF_MCS3 = 83, + WL_RATE_1X2_TXBF_MCS4 = 84, + WL_RATE_1X2_TXBF_MCS5 = 85, + WL_RATE_1X2_TXBF_MCS6 = 86, + WL_RATE_1X2_TXBF_MCS7 = 87, + WL_RATE_P_1X2_TXBF_MCS87 = 88, + WL_RATE_P_1X2_TXBF_MCS88 = 89, + + WL_RATE_1X2_TXBF_VHT0SS1 = 80, + WL_RATE_1X2_TXBF_VHT1SS1 = 81, + WL_RATE_1X2_TXBF_VHT2SS1 = 82, + WL_RATE_1X2_TXBF_VHT3SS1 = 83, + WL_RATE_1X2_TXBF_VHT4SS1 = 84, + WL_RATE_1X2_TXBF_VHT5SS1 = 85, + WL_RATE_1X2_TXBF_VHT6SS1 = 86, + WL_RATE_1X2_TXBF_VHT7SS1 = 87, + WL_RATE_1X2_TXBF_VHT8SS1 = 88, + WL_RATE_1X2_TXBF_VHT9SS1 = 89, + WL_RATE_P_1X2_TXBF_VHT10SS1 = 90, + WL_RATE_P_1X2_TXBF_VHT11SS1 = 91, + + /* 2 Streams */ + WL_RATE_2X2_TXBF_SDM_MCS8 = 92, + WL_RATE_2X2_TXBF_SDM_MCS9 = 93, + WL_RATE_2X2_TXBF_SDM_MCS10 = 94, + WL_RATE_2X2_TXBF_SDM_MCS11 = 95, + WL_RATE_2X2_TXBF_SDM_MCS12 = 96, + WL_RATE_2X2_TXBF_SDM_MCS13 = 97, + WL_RATE_2X2_TXBF_SDM_MCS14 = 98, + WL_RATE_2X2_TXBF_SDM_MCS15 = 99, + WL_RATE_P_2X2_TXBF_SDM_MCS99 = 100, + WL_RATE_P_2X2_TXBF_SDM_MCS100 = 101, + + WL_RATE_2X2_TXBF_VHT0SS2 = 92, + WL_RATE_2X2_TXBF_VHT1SS2 = 93, + WL_RATE_2X2_TXBF_VHT2SS2 = 94, + WL_RATE_2X2_TXBF_VHT3SS2 = 95, + WL_RATE_2X2_TXBF_VHT4SS2 = 96, + WL_RATE_2X2_TXBF_VHT5SS2 = 97, + WL_RATE_2X2_TXBF_VHT6SS2 = 98, + WL_RATE_2X2_TXBF_VHT7SS2 = 99, + WL_RATE_2X2_TXBF_VHT8SS2 = 100, + WL_RATE_2X2_TXBF_VHT9SS2 = 101, + WL_RATE_P_2X2_TXBF_VHT10SS2 = 102, + WL_RATE_P_2X2_TXBF_VHT11SS2 = 103, + + + /************ + * 3 chains * + ************ + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_DSSS_1 = 104, + WL_RATE_1X3_DSSS_2 = 105, + WL_RATE_1X3_DSSS_5_5 = 106, + WL_RATE_1X3_DSSS_11 = 107, + + WL_RATE_1X3_CDD_OFDM_6 = 108, + WL_RATE_1X3_CDD_OFDM_9 = 109, + WL_RATE_1X3_CDD_OFDM_12 = 110, + WL_RATE_1X3_CDD_OFDM_18 = 111, + WL_RATE_1X3_CDD_OFDM_24 = 112, + WL_RATE_1X3_CDD_OFDM_36 = 113, + WL_RATE_1X3_CDD_OFDM_48 = 114, + WL_RATE_1X3_CDD_OFDM_54 = 115, + + WL_RATE_1X3_CDD_MCS0 = 116, + WL_RATE_1X3_CDD_MCS1 = 117, + WL_RATE_1X3_CDD_MCS2 = 118, + WL_RATE_1X3_CDD_MCS3 = 119, + WL_RATE_1X3_CDD_MCS4 = 120, + WL_RATE_1X3_CDD_MCS5 = 121, + WL_RATE_1X3_CDD_MCS6 = 122, + WL_RATE_1X3_CDD_MCS7 = 123, + WL_RATE_P_1X3_CDD_MCS87 = 124, + WL_RATE_P_1X3_CDD_MCS88 = 125, + + WL_RATE_1X3_VHT0SS1 = 116, + WL_RATE_1X3_VHT1SS1 = 117, + WL_RATE_1X3_VHT2SS1 = 118, + WL_RATE_1X3_VHT3SS1 = 119, + WL_RATE_1X3_VHT4SS1 = 120, + WL_RATE_1X3_VHT5SS1 = 121, + WL_RATE_1X3_VHT6SS1 = 122, + WL_RATE_1X3_VHT7SS1 = 123, + WL_RATE_1X3_VHT8SS1 = 124, + WL_RATE_1X3_VHT9SS1 = 125, + WL_RATE_P_1X3_VHT10SS1 = 126, + WL_RATE_P_1X3_VHT11SS1 = 127, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_STBC_MCS0 = 128, + WL_RATE_2X3_STBC_MCS1 = 129, + WL_RATE_2X3_STBC_MCS2 = 130, + WL_RATE_2X3_STBC_MCS3 = 131, + WL_RATE_2X3_STBC_MCS4 = 132, + WL_RATE_2X3_STBC_MCS5 = 133, + WL_RATE_2X3_STBC_MCS6 = 134, + WL_RATE_2X3_STBC_MCS7 = 135, + WL_RATE_P_2X3_STBC_MCS87 = 136, + WL_RATE_P_2X3_STBC_MCS88 = 137, + + WL_RATE_2X3_STBC_VHT0SS1 = 128, + WL_RATE_2X3_STBC_VHT1SS1 = 129, + WL_RATE_2X3_STBC_VHT2SS1 = 130, + WL_RATE_2X3_STBC_VHT3SS1 = 131, + WL_RATE_2X3_STBC_VHT4SS1 = 132, + WL_RATE_2X3_STBC_VHT5SS1 = 133, + WL_RATE_2X3_STBC_VHT6SS1 = 134, + WL_RATE_2X3_STBC_VHT7SS1 = 135, + WL_RATE_2X3_STBC_VHT8SS1 = 136, + WL_RATE_2X3_STBC_VHT9SS1 = 137, + WL_RATE_P_2X3_STBC_VHT10SS1 = 138, + WL_RATE_P_2X3_STBC_VHT11SS1 = 139, + + WL_RATE_2X3_SDM_MCS8 = 140, + WL_RATE_2X3_SDM_MCS9 = 141, + WL_RATE_2X3_SDM_MCS10 = 142, + WL_RATE_2X3_SDM_MCS11 = 143, + WL_RATE_2X3_SDM_MCS12 = 144, + WL_RATE_2X3_SDM_MCS13 = 145, + WL_RATE_2X3_SDM_MCS14 = 146, + WL_RATE_2X3_SDM_MCS15 = 147, + WL_RATE_P_2X3_SDM_MCS99 = 148, + WL_RATE_P_2X3_SDM_MCS100 = 149, + + WL_RATE_2X3_VHT0SS2 = 140, + WL_RATE_2X3_VHT1SS2 = 141, + WL_RATE_2X3_VHT2SS2 = 142, + WL_RATE_2X3_VHT3SS2 = 143, + WL_RATE_2X3_VHT4SS2 = 144, + WL_RATE_2X3_VHT5SS2 = 145, + WL_RATE_2X3_VHT6SS2 = 146, + WL_RATE_2X3_VHT7SS2 = 147, + WL_RATE_2X3_VHT8SS2 = 148, + WL_RATE_2X3_VHT9SS2 = 149, + WL_RATE_P_2X3_VHT10SS2 = 150, + WL_RATE_P_2X3_VHT11SS2 = 151, + + /* 3 Streams */ + WL_RATE_3X3_SDM_MCS16 = 152, + WL_RATE_3X3_SDM_MCS17 = 153, + WL_RATE_3X3_SDM_MCS18 = 154, + WL_RATE_3X3_SDM_MCS19 = 155, + WL_RATE_3X3_SDM_MCS20 = 156, + WL_RATE_3X3_SDM_MCS21 = 157, + WL_RATE_3X3_SDM_MCS22 = 158, + WL_RATE_3X3_SDM_MCS23 = 159, + WL_RATE_P_3X3_SDM_MCS101 = 160, + WL_RATE_P_3X3_SDM_MCS102 = 161, + + WL_RATE_3X3_VHT0SS3 = 152, + WL_RATE_3X3_VHT1SS3 = 153, + WL_RATE_3X3_VHT2SS3 = 154, + WL_RATE_3X3_VHT3SS3 = 155, + WL_RATE_3X3_VHT4SS3 = 156, + WL_RATE_3X3_VHT5SS3 = 157, + WL_RATE_3X3_VHT6SS3 = 158, + WL_RATE_3X3_VHT7SS3 = 159, + WL_RATE_3X3_VHT8SS3 = 160, + WL_RATE_3X3_VHT9SS3 = 161, + WL_RATE_P_3X3_VHT10SS3 = 162, + WL_RATE_P_3X3_VHT11SS3 = 163, + + + /**************************** + * TX Beamforming, 3 chains * + **************************** + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_TXBF_OFDM_6 = 164, + WL_RATE_1X3_TXBF_OFDM_9 = 165, + WL_RATE_1X3_TXBF_OFDM_12 = 166, + WL_RATE_1X3_TXBF_OFDM_18 = 167, + WL_RATE_1X3_TXBF_OFDM_24 = 168, + WL_RATE_1X3_TXBF_OFDM_36 = 169, + WL_RATE_1X3_TXBF_OFDM_48 = 170, + WL_RATE_1X3_TXBF_OFDM_54 = 171, + + WL_RATE_1X3_TXBF_MCS0 = 172, + WL_RATE_1X3_TXBF_MCS1 = 173, + WL_RATE_1X3_TXBF_MCS2 = 174, + WL_RATE_1X3_TXBF_MCS3 = 175, + WL_RATE_1X3_TXBF_MCS4 = 176, + WL_RATE_1X3_TXBF_MCS5 = 177, + WL_RATE_1X3_TXBF_MCS6 = 178, + WL_RATE_1X3_TXBF_MCS7 = 179, + WL_RATE_P_1X3_TXBF_MCS87 = 180, + WL_RATE_P_1X3_TXBF_MCS88 = 181, + + WL_RATE_1X3_TXBF_VHT0SS1 = 172, + WL_RATE_1X3_TXBF_VHT1SS1 = 173, + WL_RATE_1X3_TXBF_VHT2SS1 = 174, + WL_RATE_1X3_TXBF_VHT3SS1 = 175, + WL_RATE_1X3_TXBF_VHT4SS1 = 176, + WL_RATE_1X3_TXBF_VHT5SS1 = 177, + WL_RATE_1X3_TXBF_VHT6SS1 = 178, + WL_RATE_1X3_TXBF_VHT7SS1 = 179, + WL_RATE_1X3_TXBF_VHT8SS1 = 180, + WL_RATE_1X3_TXBF_VHT9SS1 = 181, + WL_RATE_P_1X3_TXBF_VHT10SS1 = 182, + WL_RATE_P_1X3_TXBF_VHT11SS1 = 183, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_TXBF_SDM_MCS8 = 184, + WL_RATE_2X3_TXBF_SDM_MCS9 = 185, + WL_RATE_2X3_TXBF_SDM_MCS10 = 186, + WL_RATE_2X3_TXBF_SDM_MCS11 = 187, + WL_RATE_2X3_TXBF_SDM_MCS12 = 188, + WL_RATE_2X3_TXBF_SDM_MCS13 = 189, + WL_RATE_2X3_TXBF_SDM_MCS14 = 190, + WL_RATE_2X3_TXBF_SDM_MCS15 = 191, + WL_RATE_P_2X3_TXBF_SDM_MCS99 = 192, + WL_RATE_P_2X3_TXBF_SDM_MCS100 = 193, + + WL_RATE_2X3_TXBF_VHT0SS2 = 184, + WL_RATE_2X3_TXBF_VHT1SS2 = 185, + WL_RATE_2X3_TXBF_VHT2SS2 = 186, + WL_RATE_2X3_TXBF_VHT3SS2 = 187, + WL_RATE_2X3_TXBF_VHT4SS2 = 188, + WL_RATE_2X3_TXBF_VHT5SS2 = 189, + WL_RATE_2X3_TXBF_VHT6SS2 = 190, + WL_RATE_2X3_TXBF_VHT7SS2 = 191, + WL_RATE_2X3_TXBF_VHT8SS2 = 192, + WL_RATE_2X3_TXBF_VHT9SS2 = 193, + WL_RATE_P_2X3_TXBF_VHT10SS2 = 194, + WL_RATE_P_2X3_TXBF_VHT11SS2 = 195, + + /* 3 Streams */ + WL_RATE_3X3_TXBF_SDM_MCS16 = 196, + WL_RATE_3X3_TXBF_SDM_MCS17 = 197, + WL_RATE_3X3_TXBF_SDM_MCS18 = 198, + WL_RATE_3X3_TXBF_SDM_MCS19 = 199, + WL_RATE_3X3_TXBF_SDM_MCS20 = 200, + WL_RATE_3X3_TXBF_SDM_MCS21 = 201, + WL_RATE_3X3_TXBF_SDM_MCS22 = 202, + WL_RATE_3X3_TXBF_SDM_MCS23 = 203, + WL_RATE_P_3X3_TXBF_SDM_MCS101 = 204, + WL_RATE_P_3X3_TXBF_SDM_MCS102 = 205, + + WL_RATE_3X3_TXBF_VHT0SS3 = 196, + WL_RATE_3X3_TXBF_VHT1SS3 = 197, + WL_RATE_3X3_TXBF_VHT2SS3 = 198, + WL_RATE_3X3_TXBF_VHT3SS3 = 199, + WL_RATE_3X3_TXBF_VHT4SS3 = 200, + WL_RATE_3X3_TXBF_VHT5SS3 = 201, + WL_RATE_3X3_TXBF_VHT6SS3 = 202, + WL_RATE_3X3_TXBF_VHT7SS3 = 203, + WL_RATE_3X3_TXBF_VHT8SS3 = 204, + WL_RATE_3X3_TXBF_VHT9SS3 = 205, + WL_RATE_P_3X3_TXBF_VHT10SS3 = 206, + WL_RATE_P_3X3_TXBF_VHT11SS3 = 207, + + + /************ + * 4 chains * + ************ + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_DSSS_1 = 208, + WL_RATE_1X4_DSSS_2 = 209, + WL_RATE_1X4_DSSS_5_5 = 210, + WL_RATE_1X4_DSSS_11 = 211, + + WL_RATE_1X4_CDD_OFDM_6 = 212, + WL_RATE_1X4_CDD_OFDM_9 = 213, + WL_RATE_1X4_CDD_OFDM_12 = 214, + WL_RATE_1X4_CDD_OFDM_18 = 215, + WL_RATE_1X4_CDD_OFDM_24 = 216, + WL_RATE_1X4_CDD_OFDM_36 = 217, + WL_RATE_1X4_CDD_OFDM_48 = 218, + WL_RATE_1X4_CDD_OFDM_54 = 219, + + WL_RATE_1X4_CDD_MCS0 = 220, + WL_RATE_1X4_CDD_MCS1 = 221, + WL_RATE_1X4_CDD_MCS2 = 222, + WL_RATE_1X4_CDD_MCS3 = 223, + WL_RATE_1X4_CDD_MCS4 = 224, + WL_RATE_1X4_CDD_MCS5 = 225, + WL_RATE_1X4_CDD_MCS6 = 226, + WL_RATE_1X4_CDD_MCS7 = 227, + WL_RATE_P_1X4_CDD_MCS87 = 228, + WL_RATE_P_1X4_CDD_MCS88 = 229, + + WL_RATE_1X4_VHT0SS1 = 220, + WL_RATE_1X4_VHT1SS1 = 221, + WL_RATE_1X4_VHT2SS1 = 222, + WL_RATE_1X4_VHT3SS1 = 223, + WL_RATE_1X4_VHT4SS1 = 224, + WL_RATE_1X4_VHT5SS1 = 225, + WL_RATE_1X4_VHT6SS1 = 226, + WL_RATE_1X4_VHT7SS1 = 227, + WL_RATE_1X4_VHT8SS1 = 228, + WL_RATE_1X4_VHT9SS1 = 229, + WL_RATE_P_1X4_VHT10SS1 = 230, + WL_RATE_P_1X4_VHT11SS1 = 231, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_STBC_MCS0 = 232, + WL_RATE_2X4_STBC_MCS1 = 233, + WL_RATE_2X4_STBC_MCS2 = 234, + WL_RATE_2X4_STBC_MCS3 = 235, + WL_RATE_2X4_STBC_MCS4 = 236, + WL_RATE_2X4_STBC_MCS5 = 237, + WL_RATE_2X4_STBC_MCS6 = 238, + WL_RATE_2X4_STBC_MCS7 = 239, + WL_RATE_P_2X4_STBC_MCS87 = 240, + WL_RATE_P_2X4_STBC_MCS88 = 241, + + WL_RATE_2X4_STBC_VHT0SS1 = 232, + WL_RATE_2X4_STBC_VHT1SS1 = 233, + WL_RATE_2X4_STBC_VHT2SS1 = 234, + WL_RATE_2X4_STBC_VHT3SS1 = 235, + WL_RATE_2X4_STBC_VHT4SS1 = 236, + WL_RATE_2X4_STBC_VHT5SS1 = 237, + WL_RATE_2X4_STBC_VHT6SS1 = 238, + WL_RATE_2X4_STBC_VHT7SS1 = 239, + WL_RATE_2X4_STBC_VHT8SS1 = 240, + WL_RATE_2X4_STBC_VHT9SS1 = 241, + WL_RATE_P_2X4_STBC_VHT10SS1 = 242, + WL_RATE_P_2X4_STBC_VHT11SS1 = 243, + + WL_RATE_2X4_SDM_MCS8 = 244, + WL_RATE_2X4_SDM_MCS9 = 245, + WL_RATE_2X4_SDM_MCS10 = 246, + WL_RATE_2X4_SDM_MCS11 = 247, + WL_RATE_2X4_SDM_MCS12 = 248, + WL_RATE_2X4_SDM_MCS13 = 249, + WL_RATE_2X4_SDM_MCS14 = 250, + WL_RATE_2X4_SDM_MCS15 = 251, + WL_RATE_P_2X4_SDM_MCS99 = 252, + WL_RATE_P_2X4_SDM_MCS100 = 253, + + WL_RATE_2X4_VHT0SS2 = 244, + WL_RATE_2X4_VHT1SS2 = 245, + WL_RATE_2X4_VHT2SS2 = 246, + WL_RATE_2X4_VHT3SS2 = 247, + WL_RATE_2X4_VHT4SS2 = 248, + WL_RATE_2X4_VHT5SS2 = 249, + WL_RATE_2X4_VHT6SS2 = 250, + WL_RATE_2X4_VHT7SS2 = 251, + WL_RATE_2X4_VHT8SS2 = 252, + WL_RATE_2X4_VHT9SS2 = 253, + WL_RATE_P_2X4_VHT10SS2 = 254, + WL_RATE_P_2X4_VHT11SS2 = 255, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_SDM_MCS16 = 256, + WL_RATE_3X4_SDM_MCS17 = 257, + WL_RATE_3X4_SDM_MCS18 = 258, + WL_RATE_3X4_SDM_MCS19 = 259, + WL_RATE_3X4_SDM_MCS20 = 260, + WL_RATE_3X4_SDM_MCS21 = 261, + WL_RATE_3X4_SDM_MCS22 = 262, + WL_RATE_3X4_SDM_MCS23 = 263, + WL_RATE_P_3X4_SDM_MCS101 = 264, + WL_RATE_P_3X4_SDM_MCS102 = 265, + + WL_RATE_3X4_VHT0SS3 = 256, + WL_RATE_3X4_VHT1SS3 = 257, + WL_RATE_3X4_VHT2SS3 = 258, + WL_RATE_3X4_VHT3SS3 = 259, + WL_RATE_3X4_VHT4SS3 = 260, + WL_RATE_3X4_VHT5SS3 = 261, + WL_RATE_3X4_VHT6SS3 = 262, + WL_RATE_3X4_VHT7SS3 = 263, + WL_RATE_3X4_VHT8SS3 = 264, + WL_RATE_3X4_VHT9SS3 = 265, + WL_RATE_P_3X4_VHT10SS3 = 266, + WL_RATE_P_3X4_VHT11SS3 = 267, + + + /* 4 Streams */ + WL_RATE_4X4_SDM_MCS24 = 268, + WL_RATE_4X4_SDM_MCS25 = 269, + WL_RATE_4X4_SDM_MCS26 = 270, + WL_RATE_4X4_SDM_MCS27 = 271, + WL_RATE_4X4_SDM_MCS28 = 272, + WL_RATE_4X4_SDM_MCS29 = 273, + WL_RATE_4X4_SDM_MCS30 = 274, + WL_RATE_4X4_SDM_MCS31 = 275, + WL_RATE_P_4X4_SDM_MCS103 = 276, + WL_RATE_P_4X4_SDM_MCS104 = 277, + + WL_RATE_4X4_VHT0SS4 = 268, + WL_RATE_4X4_VHT1SS4 = 269, + WL_RATE_4X4_VHT2SS4 = 270, + WL_RATE_4X4_VHT3SS4 = 271, + WL_RATE_4X4_VHT4SS4 = 272, + WL_RATE_4X4_VHT5SS4 = 273, + WL_RATE_4X4_VHT6SS4 = 274, + WL_RATE_4X4_VHT7SS4 = 275, + WL_RATE_4X4_VHT8SS4 = 276, + WL_RATE_4X4_VHT9SS4 = 277, + WL_RATE_P_4X4_VHT10SS4 = 278, + WL_RATE_P_4X4_VHT11SS4 = 279, + + + /**************************** + * TX Beamforming, 4 chains * + **************************** + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_TXBF_OFDM_6 = 280, + WL_RATE_1X4_TXBF_OFDM_9 = 281, + WL_RATE_1X4_TXBF_OFDM_12 = 282, + WL_RATE_1X4_TXBF_OFDM_18 = 283, + WL_RATE_1X4_TXBF_OFDM_24 = 284, + WL_RATE_1X4_TXBF_OFDM_36 = 285, + WL_RATE_1X4_TXBF_OFDM_48 = 286, + WL_RATE_1X4_TXBF_OFDM_54 = 287, + + WL_RATE_1X4_TXBF_MCS0 = 288, + WL_RATE_1X4_TXBF_MCS1 = 289, + WL_RATE_1X4_TXBF_MCS2 = 290, + WL_RATE_1X4_TXBF_MCS3 = 291, + WL_RATE_1X4_TXBF_MCS4 = 292, + WL_RATE_1X4_TXBF_MCS5 = 293, + WL_RATE_1X4_TXBF_MCS6 = 294, + WL_RATE_1X4_TXBF_MCS7 = 295, + WL_RATE_P_1X4_TXBF_MCS87 = 296, + WL_RATE_P_1X4_TXBF_MCS88 = 297, + + WL_RATE_1X4_TXBF_VHT0SS1 = 288, + WL_RATE_1X4_TXBF_VHT1SS1 = 289, + WL_RATE_1X4_TXBF_VHT2SS1 = 290, + WL_RATE_1X4_TXBF_VHT3SS1 = 291, + WL_RATE_1X4_TXBF_VHT4SS1 = 292, + WL_RATE_1X4_TXBF_VHT5SS1 = 293, + WL_RATE_1X4_TXBF_VHT6SS1 = 294, + WL_RATE_1X4_TXBF_VHT7SS1 = 295, + WL_RATE_1X4_TXBF_VHT8SS1 = 296, + WL_RATE_1X4_TXBF_VHT9SS1 = 297, + WL_RATE_P_1X4_TXBF_VHT10SS1 = 298, + WL_RATE_P_1X4_TXBF_VHT11SS1 = 299, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_TXBF_SDM_MCS8 = 300, + WL_RATE_2X4_TXBF_SDM_MCS9 = 301, + WL_RATE_2X4_TXBF_SDM_MCS10 = 302, + WL_RATE_2X4_TXBF_SDM_MCS11 = 303, + WL_RATE_2X4_TXBF_SDM_MCS12 = 304, + WL_RATE_2X4_TXBF_SDM_MCS13 = 305, + WL_RATE_2X4_TXBF_SDM_MCS14 = 306, + WL_RATE_2X4_TXBF_SDM_MCS15 = 307, + WL_RATE_P_2X4_TXBF_SDM_MCS99 = 308, + WL_RATE_P_2X4_TXBF_SDM_MCS100 = 309, + + WL_RATE_2X4_TXBF_VHT0SS2 = 300, + WL_RATE_2X4_TXBF_VHT1SS2 = 301, + WL_RATE_2X4_TXBF_VHT2SS2 = 302, + WL_RATE_2X4_TXBF_VHT3SS2 = 303, + WL_RATE_2X4_TXBF_VHT4SS2 = 304, + WL_RATE_2X4_TXBF_VHT5SS2 = 305, + WL_RATE_2X4_TXBF_VHT6SS2 = 306, + WL_RATE_2X4_TXBF_VHT7SS2 = 307, + WL_RATE_2X4_TXBF_VHT8SS2 = 308, + WL_RATE_2X4_TXBF_VHT9SS2 = 309, + WL_RATE_P_2X4_TXBF_VHT10SS2 = 310, + WL_RATE_P_2X4_TXBF_VHT11SS2 = 311, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_TXBF_SDM_MCS16 = 312, + WL_RATE_3X4_TXBF_SDM_MCS17 = 313, + WL_RATE_3X4_TXBF_SDM_MCS18 = 314, + WL_RATE_3X4_TXBF_SDM_MCS19 = 315, + WL_RATE_3X4_TXBF_SDM_MCS20 = 316, + WL_RATE_3X4_TXBF_SDM_MCS21 = 317, + WL_RATE_3X4_TXBF_SDM_MCS22 = 318, + WL_RATE_3X4_TXBF_SDM_MCS23 = 319, + WL_RATE_P_3X4_TXBF_SDM_MCS101 = 320, + WL_RATE_P_3X4_TXBF_SDM_MCS102 = 321, + + WL_RATE_3X4_TXBF_VHT0SS3 = 312, + WL_RATE_3X4_TXBF_VHT1SS3 = 313, + WL_RATE_3X4_TXBF_VHT2SS3 = 314, + WL_RATE_3X4_TXBF_VHT3SS3 = 315, + WL_RATE_3X4_TXBF_VHT4SS3 = 316, + WL_RATE_3X4_TXBF_VHT5SS3 = 317, + WL_RATE_3X4_TXBF_VHT6SS3 = 318, + WL_RATE_3X4_TXBF_VHT7SS3 = 319, + WL_RATE_P_3X4_TXBF_VHT8SS3 = 320, + WL_RATE_P_3X4_TXBF_VHT9SS3 = 321, + WL_RATE_P_3X4_TXBF_VHT10SS3 = 322, + WL_RATE_P_3X4_TXBF_VHT11SS3 = 323, + + /* 4 Streams */ + WL_RATE_4X4_TXBF_SDM_MCS24 = 324, + WL_RATE_4X4_TXBF_SDM_MCS25 = 325, + WL_RATE_4X4_TXBF_SDM_MCS26 = 326, + WL_RATE_4X4_TXBF_SDM_MCS27 = 327, + WL_RATE_4X4_TXBF_SDM_MCS28 = 328, + WL_RATE_4X4_TXBF_SDM_MCS29 = 329, + WL_RATE_4X4_TXBF_SDM_MCS30 = 330, + WL_RATE_4X4_TXBF_SDM_MCS31 = 331, + WL_RATE_P_4X4_TXBF_SDM_MCS103 = 332, + WL_RATE_P_4X4_TXBF_SDM_MCS104 = 333, + + WL_RATE_4X4_TXBF_VHT0SS4 = 324, + WL_RATE_4X4_TXBF_VHT1SS4 = 325, + WL_RATE_4X4_TXBF_VHT2SS4 = 326, + WL_RATE_4X4_TXBF_VHT3SS4 = 327, + WL_RATE_4X4_TXBF_VHT4SS4 = 328, + WL_RATE_4X4_TXBF_VHT5SS4 = 329, + WL_RATE_4X4_TXBF_VHT6SS4 = 330, + WL_RATE_4X4_TXBF_VHT7SS4 = 331, + WL_RATE_P_4X4_TXBF_VHT8SS4 = 332, + WL_RATE_P_4X4_TXBF_VHT9SS4 = 333, + WL_RATE_P_4X4_TXBF_VHT10SS4 = 334, + WL_RATE_P_4X4_TXBF_VHT11SS4 = 335 + +} clm_rates_t; + +/* Number of rate codes */ +#define WL_NUMRATES 336 + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _bcmwifi_rates_h_ */ diff --git a/drivers/net/wireless/bcmdhd/bcmxtlv.c b/drivers/net/wireless/bcmdhd/bcmxtlv.c new file mode 100644 index 000000000000..26cfb9ac264a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmxtlv.c @@ -0,0 +1,457 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmxtlv.c 527361 2015-01-17 01:48:34Z $ + */ + +#include + +#include +#include + +#include + +#ifdef BCMDRIVER +#include +#else /* !BCMDRIVER */ + #include /* AS!!! */ +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +INLINE void* MALLOCZ(void *o, size_t s) { BCM_REFERENCE(o); return calloc(1, s); } +INLINE void MFREE(void *o, void *p, size_t s) { BCM_REFERENCE(o); BCM_REFERENCE(s); free(p); } +#endif /* !BCMDRIVER */ + +#include +#include + +static INLINE int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts) +{ + return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + BCM_XTLV_HDR_SIZE, 4) + : (dlen + BCM_XTLV_HDR_SIZE)); +} + +bcm_xtlv_t * +bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts) +{ + int sz; + /* advance to next elt */ + sz = BCM_XTLV_SIZE(elt, opts); + elt = (bcm_xtlv_t*)((uint8 *)elt + sz); + *buflen -= sz; + + /* validate next elt */ + if (!bcm_valid_xtlv(elt, *buflen, opts)) + return NULL; + + return elt; +} + +int +bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, bcm_xtlv_opts_t opts) +{ + if (!tlv_buf || !buf || !len) + return BCME_BADARG; + + tlv_buf->opts = opts; + tlv_buf->size = len; + tlv_buf->head = buf; + tlv_buf->buf = buf; + return BCME_OK; +} + +uint16 +bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return 0; + return (uint16)(tbuf->buf - tbuf->head); +} +uint16 +bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return 0; + return tbuf->size - bcm_xtlv_buf_len(tbuf); +} +uint8 * +bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return NULL; + return tbuf->buf; +} +uint8 * +bcm_xtlv_head(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return NULL; + return tbuf->head; +} +int +bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(dlen, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(dlen); + memcpy(xtlv->data, data, dlen); + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(1, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + xtlv->data[0] = data; + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(2, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + htol16_ua_store(data, xtlv->data); + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(4, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + htol32_ua_store(data, xtlv->data); + tbuf->buf += size; + return BCME_OK; +} + +/* + * upacks xtlv record from buf checks the type + * copies data to callers buffer + * advances tlv pointer to next record + * caller's resposible for dst space check + */ +int +bcm_unpack_xtlv_entry(uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst, + bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf; + uint16 len; + uint16 type; + + ASSERT(ptlv); + /* tlv headr is always packed in LE order */ + len = ltoh16(ptlv->len); + type = ltoh16(ptlv->id); + if (len == 0) { + /* z-len tlv headers: allow, but don't process */ + printf("z-len, skip unpack\n"); + } else { + if ((type != xpct_type) || + (len > xpct_len)) { + printf("xtlv_unpack Error: found[type:%d,len:%d] != xpct[type:%d,len:%d]\n", + type, len, xpct_type, xpct_len); + return BCME_BADARG; + } + /* copy tlv record to caller's buffer */ + memcpy(dst, ptlv->data, ptlv->len); + } + *tlv_buf += BCM_XTLV_SIZE(ptlv, opts); + return BCME_OK; +} + +/* + * packs user data into tlv record + * advances tlv pointer to next xtlv slot + * buflen is used for tlv_buf space check + */ +int +bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src, + bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf; + int size; + + ASSERT(ptlv); + ASSERT(src); + + size = bcm_xtlv_size_for_data(len, opts); + + /* copy data from tlv buffer to dst provided by user */ + if (size > *buflen) { + printf("bcm_pack_xtlv_entry: no space tlv_buf: requested:%d, available:%d\n", + size, *buflen); + return BCME_BADLEN; + } + ptlv->id = htol16(type); + ptlv->len = htol16(len); + + /* copy callers data */ + memcpy(ptlv->data, src, len); + + /* advance callers pointer to tlv buff */ + *tlv_buf += size; + /* decrement the len */ + *buflen -= (uint16)size; + return BCME_OK; +} + +/* + * unpack all xtlv records from the issue a callback + * to set function one call per found tlv record + */ +int +bcm_unpack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_xtlv_unpack_cbfn_t *cbfn) +{ + uint16 len; + uint16 type; + int res = BCME_OK; + int size; + bcm_xtlv_t *ptlv; + int sbuflen = buflen; + + ASSERT(!buflen || tlv_buf); + ASSERT(!buflen || cbfn); + + while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) { + ptlv = (bcm_xtlv_t *)tlv_buf; + + /* tlv header is always packed in LE order */ + len = ltoh16(ptlv->len); + type = ltoh16(ptlv->id); + + size = bcm_xtlv_size_for_data(len, opts); + + sbuflen -= size; + /* check for possible buffer overrun */ + if (sbuflen < 0) + break; + + if ((res = cbfn(ctx, ptlv->data, type, len)) != BCME_OK) + break; + tlv_buf += size; + } + return res; +} + +int +bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next, + int *outlen) +{ + int res = BCME_OK; + uint16 tlv_id; + uint16 tlv_len; + uint8 *startp; + uint8 *endp; + uint8 *buf; + bool more; + int size; + + ASSERT(get_next && pack_next); + + buf = (uint8 *)tlv_buf; + startp = buf; + endp = (uint8 *)buf + buflen; + more = TRUE; + while (more && (buf < endp)) { + more = get_next(ctx, &tlv_id, &tlv_len); + size = bcm_xtlv_size_for_data(tlv_len, opts); + if ((buf + size) >= endp) { + res = BCME_BUFTOOSHORT; + goto done; + } + + htol16_ua_store(tlv_id, buf); + htol16_ua_store(tlv_len, buf + sizeof(tlv_id)); + pack_next(ctx, tlv_id, tlv_len, buf + BCM_XTLV_HDR_SIZE); + buf += size; + } + + if (more) + res = BCME_BUFTOOSHORT; + +done: + if (outlen) { + *outlen = (int)(buf - startp); + } + return res; +} + +/* + * pack xtlv buffer from memory according to xtlv_desc_t + */ +int +bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + uint8 *ptlv = (uint8 *)*tlv_buf; + + while (items->type != 0) { + if ((res = bcm_pack_xtlv_entry(&ptlv, + buflen, items->type, + items->len, items->ptr, opts) != BCME_OK)) { + break; + } + items++; + } + *tlv_buf = ptlv; /* update the external pointer */ + return res; +} + +/* + * unpack xtlv buffer to memory according to xtlv_desc_t + * + */ +int +bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items, bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + bcm_xtlv_t *elt; + + elt = bcm_valid_xtlv((bcm_xtlv_t *)tlv_buf, *buflen, opts) ? (bcm_xtlv_t *)tlv_buf : NULL; + if (!elt || !items) { + res = BCME_BADARG; + return res; + } + + for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) { + /* find matches in desc_t items */ + xtlv_desc_t *dst_desc = items; + uint16 len = ltoh16(elt->len); + + while (dst_desc->type != 0) { + if (ltoh16(elt->id) == dst_desc->type) { + if (len != dst_desc->len) { + res = BCME_BADLEN; + } else { + memcpy(dst_desc->ptr, elt->data, len); + } + break; + } + dst_desc++; + } + } + + if (res == BCME_OK && *buflen != 0) + res = BCME_BUFTOOSHORT; + + return res; +} + +/* + * return data pointer of a given ID from xtlv buffer. + * If the specified xTLV ID is found, on return *data_len_out will contain + * the the data length of the xTLV ID. + */ +void * +bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id, + uint16 *datalen_out, bcm_xtlv_opts_t opts) +{ + void *retptr = NULL; + uint16 type, len; + int size; + bcm_xtlv_t *ptlv; + int sbuflen = buflen; + + while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) { + ptlv = (bcm_xtlv_t *)tlv_buf; + + /* tlv header is always packed in LE order */ + type = ltoh16(ptlv->id); + len = ltoh16(ptlv->len); + size = bcm_xtlv_size_for_data(len, opts); + + sbuflen -= size; + /* check for possible buffer overrun */ + if (sbuflen < 0) { + printf("%s %d: Invalid sbuflen %d\n", + __FUNCTION__, __LINE__, sbuflen); + break; + } + + if (id == type) { + retptr = ptlv->data; + if (datalen_out) { + *datalen_out = len; + } + break; + } + tlv_buf += size; + } + + return retptr; +} + +int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + int size; /* entire size of the XTLV including header, data, and optional padding */ + int len; /* XTLV's value real length wthout padding */ + + len = BCM_XTLV_LEN(elt); + + size = bcm_xtlv_size_for_data(len, opts); + + return size; +} diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h new file mode 100644 index 000000000000..fbc6269a7594 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd.h @@ -0,0 +1,1607 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd.h 610267 2016-01-06 16:03:53Z $ + */ + +/**************** + * Common types * + */ + +#ifndef _dhd_h_ +#define _dhd_h_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK) +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */ +#include +/* The kernel threading is sdio-specific */ +struct task_struct; +struct sched_param; +int setScheduler(struct task_struct *p, int policy, struct sched_param *param); +int get_scheduler_policy(struct task_struct *p); +#define MAX_EVENT 16 + +#define ALL_INTERFACES 0xff + +#include +#include + +#if defined(BCMWDF) +#include +#include +#endif /* (BCMWDF) */ + +#ifdef DEBUG_DPC_THREAD_WATCHDOG +#define MAX_RESCHED_CNT 600 +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) && LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT)) +#define WL_VENDOR_EXT_SUPPORT +#endif /* 3.13.0 <= LINUX_KERNEL_VERSION < 3.18.0 || CONFIG_BCMDHD_VENDOR_EXT */ +#if defined(CONFIG_ANDROID) && defined(WL_VENDOR_EXT_SUPPORT) +#if !defined(GSCAN_SUPPORT) +#define GSCAN_SUPPORT +#endif +#endif /* CONFIG_ANDROID && WL_VENDOR_EXT_SUPPORT */ + +#if defined(KEEP_ALIVE) +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define KEEP_ALIVE_PERIOD 55000 +#define NULL_PKT_STR "null_pkt" +#endif /* KEEP_ALIVE */ + +/* Forward decls */ +struct dhd_bus; +struct dhd_prot; +struct dhd_info; +struct dhd_ioctl; + +/* The level of bus communication with the dongle */ +enum dhd_bus_state { + DHD_BUS_DOWN, /* Not ready for frame transfers */ + DHD_BUS_LOAD, /* Download access only (CPU reset) */ + DHD_BUS_DATA, /* Ready for frame transfers */ + DHD_BUS_SUSPEND, /* Bus has been suspended */ + DHD_BUS_DOWN_IN_PROGRESS, /* Bus going Down */ +}; + +/* + * Bit fields to Indicate clean up process that wait till they are finished. + * Future synchronizable processes can add their bit filed below and update + * their functionalities accordingly + */ +#define DHD_BUS_BUSY_IN_TX 0x01 +#define DHD_BUS_BUSY_IN_SEND_PKT 0x02 +#define DHD_BUS_BUSY_IN_DPC 0x04 +#define DHD_BUS_BUSY_IN_WD 0x08 +#define DHD_BUS_BUSY_IN_IOVAR 0x10 +#define DHD_BUS_BUSY_IN_DHD_IOVAR 0x20 +#define DHD_BUS_BUSY_IN_SUSPEND 0x40 +#define DHD_BUS_BUSY_IN_RESUME 0x80 +#define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100 +#define DHD_BUS_BUSY_RPM_SUSPEND_DONE 0x200 +#define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS 0x400 +#define DHD_BUS_BUSY_RPM_ALL (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \ + DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \ + DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) + +/* Download Types */ +typedef enum download_type { + FW, + NVRAM +} download_type_t; + + +/* For supporting multiple interfaces */ +#define DHD_MAX_IFS 16 +#define DHD_DEL_IF -0xE +#define DHD_BAD_IF -0xF + +enum dhd_op_flags { +/* Firmware requested operation mode */ + DHD_FLAG_STA_MODE = (1 << (0)), /* STA only */ + DHD_FLAG_HOSTAP_MODE = (1 << (1)), /* SOFTAP only */ + DHD_FLAG_P2P_MODE = (1 << (2)), /* P2P Only */ + /* STA + P2P */ + DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE), + DHD_FLAG_CONCURR_MULTI_CHAN_MODE = (1 << (4)), /* STA + P2P */ + /* Current P2P mode for P2P connection */ + DHD_FLAG_P2P_GC_MODE = (1 << (5)), + DHD_FLAG_P2P_GO_MODE = (1 << (6)), + DHD_FLAG_MBSS_MODE = (1 << (7)), /* MBSS in future */ + DHD_FLAG_IBSS_MODE = (1 << (8)), + DHD_FLAG_MFG_MODE = (1 << (9)), + DHD_FLAG_RSDB_MODE = (1 << (10)), + DHD_FLAG_MP2P_MODE = (1 << (11)) +}; + +#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \ + (dhd ? ((((dhd_pub_t *)dhd)->op_mode) & opmode_flag) : -1) + +/* Max sequential TX/RX Control timeouts to set HANG event */ +#ifndef MAX_CNTL_TX_TIMEOUT +#define MAX_CNTL_TX_TIMEOUT 2 +#endif /* MAX_CNTL_TX_TIMEOUT */ +#ifndef MAX_CNTL_RX_TIMEOUT +#define MAX_CNTL_RX_TIMEOUT 1 +#endif /* MAX_CNTL_RX_TIMEOUT */ + +#define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */ +#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */ +#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */ + +#ifndef POWERUP_MAX_RETRY +#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */ +#endif +#ifndef POWERUP_WAIT_MS +#define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */ +#endif +#define MAX_NVRAMBUF_SIZE (16 * 1024) /* max nvram buf size */ +#ifdef DHD_DEBUG +#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */ +#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */ +#endif + +#define FW_VER_STR_LEN 128 + +enum dhd_bus_wake_state { + WAKE_LOCK_OFF, + WAKE_LOCK_PRIV, + WAKE_LOCK_DPC, + WAKE_LOCK_IOCTL, + WAKE_LOCK_DOWNLOAD, + WAKE_LOCK_TMOUT, + WAKE_LOCK_WATCHDOG, + WAKE_LOCK_LINK_DOWN_TMOUT, + WAKE_LOCK_PNO_FIND_TMOUT, + WAKE_LOCK_SOFTAP_SET, + WAKE_LOCK_SOFTAP_STOP, + WAKE_LOCK_SOFTAP_START, + WAKE_LOCK_SOFTAP_THREAD +}; + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, + DHD_PREALLOC_RXBUF, + DHD_PREALLOC_DATABUF, + DHD_PREALLOC_OSL_BUF, +#if defined(STATIC_WL_PRIV_STRUCT) + DHD_PREALLOC_WIPHY_ESCAN0 = 5, +#endif /* STATIC_WL_PRIV_STRUCT */ + DHD_PREALLOC_DHD_INFO = 7, + DHD_PREALLOC_DHD_WLFC_INFO = 8, + DHD_PREALLOC_IF_FLOW_LKUP = 9, + DHD_PREALLOC_MEMDUMP_BUF = 10, + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15 +}; + +enum dhd_dongledump_mode { + DUMP_DISABLED = 0, + DUMP_MEMONLY, + DUMP_MEMFILE, + DUMP_MEMFILE_BUGON, + DUMP_MEMFILE_MAX +}; + +enum dhd_dongledump_type { + DUMP_TYPE_RESUMED_ON_TIMEOUT = 1, + DUMP_TYPE_D3_ACK_TIMEOUT, + DUMP_TYPE_DONGLE_TRAP, + DUMP_TYPE_MEMORY_CORRUPTION, + DUMP_TYPE_PKTID_AUDIT_FAILURE, + DUMP_TYPE_SCAN_TIMEOUT, + DUMP_TYPE_SCAN_BUSY, + DUMP_TYPE_BY_SYSDUMP, + DUMP_TYPE_BY_LIVELOCK, + DUMP_TYPE_AP_LINKUP_FAILURE +}; + +enum dhd_hang_reason { + HANG_REASON_MASK = 0x8000, + HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001, + HANG_REASON_DONGLE_TRAP = 0x8002, + HANG_REASON_D3_ACK_TIMEOUT = 0x8003, + HANG_REASON_BUS_DOWN = 0x8004, + HANG_REASON_PCIE_LINK_DOWN = 0x8005, + HANG_REASON_MSGBUF_LIVELOCK = 0x8006, + HANG_REASON_P2P_IFACE_DEL_FAILURE = 0x8007, + HANG_REASON_HT_AVAIL_ERROR = 0x8008, + HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009, + HANG_REASON_MAX = 0x800a +}; + +enum dhd_rsdb_scan_features { + /* Downgraded scan feature for AP active */ + RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01, + /* Downgraded scan feature for P2P Discovery */ + RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02, + /* Enable channel pruning for ROAM SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10, + /* Enable channel pruning for any SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL = 0x20 +}; + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif + +/** + * DMA-able buffer parameters + * - dmaaddr_t is 32bits on a 32bit host. + * dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr + * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer. + */ +typedef struct dhd_dma_buf { + void *va; /* virtual address of buffer */ + uint32 len; /* user requested buffer length */ + dmaaddr_t pa; /* physical address of buffer */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ + uint32 _alloced; /* actual size of buffer allocated with align and pad */ +} dhd_dma_buf_t; + +/* host reordering packts logic */ +/* followed the structure to hold the reorder buffers (void **p) */ +typedef struct reorder_info { + void **p; + uint8 flow_id; + uint8 cur_idx; + uint8 exp_idx; + uint8 max_idx; + uint8 pend_pkts; +} reorder_info_t; + +#ifdef DHDTCPACK_SUPPRESS + +enum { + /* TCPACK suppress off */ + TCPACK_SUP_OFF, + /* Replace TCPACK in txq when new coming one has higher ACK number. */ + TCPACK_SUP_REPLACE, + /* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA. + * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that + * 1. we are able to read TCP DATA packets first from the bus + * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed. + */ + TCPACK_SUP_DELAYTX, + TCPACK_SUP_HOLD, + TCPACK_SUP_LAST_MODE +}; +#endif /* DHDTCPACK_SUPPRESS */ + + +/* + * Accumulating the queue lengths of all flowring queues in a parent object, + * to assert flow control, when the cummulative queue length crosses an upper + * threshold defined on a parent object. Upper threshold may be maintained + * at a station level, at an interface level, or at a dhd instance. + * + * cumm_ctr_t abstraction: + * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis + * pause on/off threshold callback. + * All macros use the address of the cummulative length in the parent objects. + * + * BCM_GMAC3 builds use a single perimeter lock, as opposed to a per queue lock. + * Cummulative counters in parent objects may be updated without spinlocks. + * + * In non BCM_GMAC3, if a cummulative queue length is desired across all flows + * belonging to either of (a station, or an interface or a dhd instance), then + * an atomic operation is required using an atomic_t cummulative counters or + * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct. + */ + +/* Cummulative length not supported. */ +typedef uint32 cumm_ctr_t; +#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen)) +#define DHD_CUMM_CTR(clen) *(DHD_CUMM_CTR_PTR(clen)) /* accessor */ +#define DHD_CUMM_CTR_READ(clen) DHD_CUMM_CTR(clen) /* read access */ +#define DHD_CUMM_CTR_INIT(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_INCR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_DECR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); + +/* DMA'ing r/w indices for rings supported */ +#ifdef BCM_INDX_TCM /* FW gets r/w indices in TCM */ +#define DMA_INDX_ENAB(dma_indxsup) 0 +#elif defined BCM_INDX_DMA /* FW gets r/w indices from Host memory */ +#define DMA_INDX_ENAB(dma_indxsup) 1 +#else /* r/w indices in TCM or host memory based on FW/Host agreement */ +#define DMA_INDX_ENAB(dma_indxsup) dma_indxsup +#endif /* BCM_INDX_TCM */ + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) +struct tdls_peer_node { + uint8 addr[ETHER_ADDR_LEN]; + struct tdls_peer_node *next; +}; +typedef struct tdls_peer_node tdls_peer_node_t; +typedef struct { + tdls_peer_node_t *node; + uint8 tdls_peer_count; +} tdls_peer_tbl_t; +#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ + +#ifdef DHD_LOG_DUMP +/* below structure describe ring buffer. */ +struct dhd_log_dump_buf +{ + spinlock_t lock; + unsigned int wraparound; + unsigned long max; + unsigned int remain; + char* present; + char* front; + char* buffer; +}; + +#define DHD_LOG_DUMP_BUFFER_SIZE (1024 * 1024) +#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256 + +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +#endif /* DHD_LOG_DUMP */ +#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/log/" + +/* Common structure for module and instance linkage */ +typedef struct dhd_pub { + /* Linkage ponters */ + osl_t *osh; /* OSL handle */ + struct dhd_bus *bus; /* Bus module handle */ + struct dhd_prot *prot; /* Protocol module handle */ + struct dhd_info *info; /* Info module handle */ + + /* to NDIS developer, the structure dhd_common is redundant, + * please do NOT merge it back from other branches !!! + */ + + + /* Internal dhd items */ + bool up; /* Driver up/down (to OS) */ + bool txoff; /* Transmit flow-controlled */ + bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */ + enum dhd_bus_state busstate; + uint dhd_bus_busy_state; /* Bus busy state */ + uint hdrlen; /* Total DHD header length (proto + bus) */ + uint maxctl; /* Max size rxctl request from proto to bus */ + uint rxsz; /* Rx buffer size bus module should use */ + uint8 wme_dp; /* wme discard priority */ + + /* Dongle media info */ + bool iswl; /* Dongle-resident driver is wl */ + ulong drv_version; /* Version of dongle-resident driver */ + struct ether_addr mac; /* MAC address obtained from dongle */ + dngl_stats_t dstats; /* Stats for dongle-based data */ + + /* Additional stats for the bus level */ + ulong tx_packets; /* Data packets sent to dongle */ + ulong tx_dropped; /* Data packets dropped in dhd */ + ulong tx_multicast; /* Multicast data packets sent to dongle */ + ulong tx_errors; /* Errors in sending data to dongle */ + ulong tx_ctlpkts; /* Control packets sent to dongle */ + ulong tx_ctlerrs; /* Errors sending control frames to dongle */ + ulong rx_packets; /* Packets sent up the network interface */ + ulong rx_multicast; /* Multicast packets sent up the network interface */ + ulong rx_errors; /* Errors processing rx data packets */ + ulong rx_ctlpkts; /* Control frames processed from dongle */ + ulong rx_ctlerrs; /* Errors in processing rx control frames */ + ulong rx_dropped; /* Packets dropped locally (no memory) */ + ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */ + ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */ + ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */ + ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */ + ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */ + ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ + ulong fc_packets; /* Number of flow control pkts recvd */ + + /* Last error return */ + int bcmerror; + uint tickcnt; + + /* Last error from dongle */ + int dongle_error; + + uint8 country_code[WLC_CNTRY_BUF_SZ]; + + /* Suspend disable flag and "in suspend" flag */ + int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */ + int in_suspend; /* flag set to 1 when early suspend called */ +#ifdef PNO_SUPPORT + int pno_enable; /* pno status : "1" is pno enable */ + int pno_suspend; /* pno suspend status : "1" is pno suspended */ +#endif /* PNO_SUPPORT */ + /* DTIM skip value, default 0(or 1) means wake each DTIM + * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3) + */ + int suspend_bcn_li_dtim; /* bcn_li_dtim value in suspend mode */ +#ifdef PKT_FILTER_SUPPORT + int early_suspended; /* Early suspend status */ + int dhcp_in_progress; /* DHCP period */ +#endif + + /* Pkt filter defination */ + char * pktfilter[100]; + int pktfilter_count; + + wl_country_t dhd_cspec; /* Current Locale info */ +#ifdef CUSTOM_COUNTRY_CODE + u32 dhd_cflags; +#endif /* CUSTOM_COUNTRY_CODE */ + bool force_country_change; + char eventmask[WL_EVENTING_MASK_LEN]; + int op_mode; /* STA, HostAPD, WFD, SoftAP */ + +/* Set this to 1 to use a seperate interface (p2p0) for p2p operations. + * For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework + * see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile + */ +/* #define WL_ENABLE_P2P_IF 1 */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */ + struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */ +#endif + +#ifdef PROP_TXSTATUS + bool wlfc_enabled; + int wlfc_mode; + void* wlfc_state; + /* + Mode in which the dhd flow control shall operate. Must be set before + traffic starts to the device. + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + 3 - Only AMPDU hostreorder used. no wlfc. + */ + uint8 proptxstatus_mode; + bool proptxstatus_txoff; + bool proptxstatus_module_ignore; + bool proptxstatus_credit_ignore; + bool proptxstatus_txstatus_ignore; + + bool wlfc_rxpkt_chk; + /* + * implement below functions in each platform if needed. + */ + /* platform specific function whether to skip flow control */ + bool (*skip_fc)(void); + /* platform specific function for wlfc_enable and wlfc_deinit */ + void (*plat_init)(void *dhd); + void (*plat_deinit)(void *dhd); +#ifdef DHD_WLFC_THREAD + bool wlfc_thread_go; + struct task_struct* wlfc_thread; + wait_queue_head_t wlfc_wqhead; +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + void *pno_state; +#endif +#ifdef RTT_SUPPORT + void *rtt_state; +#endif + bool dongle_isolation; + bool dongle_trap_occured; /* flag for sending HANG event to upper layer */ + int hang_was_sent; + int rxcnt_timeout; /* counter rxcnt timeout to send HANG */ + int txcnt_timeout; /* counter txcnt timeout to send HANG */ +#ifdef BCMPCIE + int d3ackcnt_timeout; /* counter d3ack timeout to send HANG */ +#endif /* BCMPCIE */ + bool hang_report; /* enable hang report by default */ + uint16 hang_reason; /* reason codes for HANG event */ +#ifdef WLMEDIA_HTSF + uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */ +#endif +#ifdef WLTDLS + bool tdls_enable; +#endif + struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS]; + #define WLC_IOCTL_MAXBUF_FWCAP 512 + char fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP]; + #define MAXSKBPEND 1024 + void *skbbuf[MAXSKBPEND]; + uint32 store_idx; + uint32 sent_idx; +#ifdef DHDTCPACK_SUPPRESS + uint8 tcpack_sup_mode; /* TCPACK suppress mode */ + void *tcpack_sup_module; /* TCPACK suppress module */ + uint32 tcpack_sup_ratio; + uint32 tcpack_sup_delay; +#endif /* DHDTCPACK_SUPPRESS */ +#if defined(ARP_OFFLOAD_SUPPORT) + uint32 arp_version; +#endif +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool dhd_bug_on; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ +#ifdef CUSTOM_SET_CPUCORE + struct task_struct * current_dpc; + struct task_struct * current_rxf; + int chan_isvht80; +#endif /* CUSTOM_SET_CPUCORE */ + + + void *sta_pool; /* pre-allocated pool of sta objects */ + void *staid_allocator; /* allocator of sta indexes */ +#ifdef PCIE_FULL_DONGLE + bool flow_rings_inited; /* set this flag after initializing flow rings */ +#endif /* PCIE_FULL_DONGLE */ + void *flowid_allocator; /* unique flowid allocator */ + void *flow_ring_table; /* flow ring table, include prot and bus info */ + void *if_flow_lkup; /* per interface flowid lkup hash table */ + void *flowid_lock; /* per os lock for flowid info protection */ + void *flowring_list_lock; /* per os lock for flowring list protection */ + uint32 num_flow_rings; + cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */ + uint32 d2h_sync_mode; /* D2H DMA completion sync mode */ + uint8 flow_prio_map[NUMPRIO]; + uint8 flow_prio_map_type; + char enable_log[MAX_EVENT]; + bool dma_d2h_ring_upd_support; + bool dma_h2d_ring_upd_support; + +#ifdef DHD_WMF + bool wmf_ucast_igmp; +#ifdef DHD_IGMP_UCQUERY + bool wmf_ucast_igmp_query; +#endif +#ifdef DHD_UCAST_UPNP + bool wmf_ucast_upnp; +#endif +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */ +#endif /* DHD_L2_FILTER */ + uint8 *soc_ram; + uint32 soc_ram_length; + uint32 memdump_type; +#ifdef DHD_FW_COREDUMP + uint32 memdump_enabled; +#endif /* DHD_FW_COREDUMP */ +#ifdef PCIE_FULL_DONGLE +#ifdef WLTDLS + tdls_peer_tbl_t peer_tbl; +#endif /* WLTDLS */ +#endif /* PCIE_FULL_DONGLE */ +#ifdef CACHE_FW_IMAGES + char *cached_fw; + int cached_fw_length; + char *cached_nvram; + int cached_nvram_length; +#endif +#ifdef WLTDLS + uint32 tdls_mode; +#endif +#ifdef DHD_LOSSLESS_ROAMING + uint8 dequeue_prec_map; +#endif + struct mutex wl_up_lock; + bool is_fw_download_done; +#ifdef DHD_LOG_DUMP + struct dhd_log_dump_buf dld_buf; + unsigned int dld_enable; +#endif /* DHD_LOG_DUMP */ +} dhd_pub_t; + +#if defined(PCIE_FULL_DONGLE) + +/* Packet Tag for PCIE Full Dongle DHD */ +typedef struct dhd_pkttag_fd { + uint16 flowid; /* Flowring Id */ + uint16 dataoff; /* start of packet */ + uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */ + dmaaddr_t pa; /* physical address */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ +} dhd_pkttag_fd_t; + +/* Packet Tag for DHD PCIE Full Dongle */ +#define DHD_PKTTAG_FD(pkt) ((dhd_pkttag_fd_t *)(PKTTAG(pkt))) + +#define DHD_PKT_GET_FLOWID(pkt) ((DHD_PKTTAG_FD(pkt))->flowid) +#define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \ + DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid) + +#define DHD_PKT_GET_DATAOFF(pkt) ((DHD_PKTTAG_FD(pkt))->dataoff) +#define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \ + DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff) + +#define DHD_PKT_GET_DMA_LEN(pkt) ((DHD_PKTTAG_FD(pkt))->dma_len) +#define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \ + DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len) + +#define DHD_PKT_GET_PA(pkt) ((DHD_PKTTAG_FD(pkt))->pa) +#define DHD_PKT_SET_PA(pkt, pkt_pa) \ + DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa) + +#define DHD_PKT_GET_DMAH(pkt) ((DHD_PKTTAG_FD(pkt))->dmah) +#define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \ + DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah) + +#define DHD_PKT_GET_SECDMA(pkt) ((DHD_PKTTAG_FD(pkt))->secdma) +#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \ + DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma) +#endif /* PCIE_FULL_DONGLE */ + +#if defined(BCMWDF) +typedef struct { + dhd_pub_t *dhd_pub; +} dhd_workitem_context_t; + +WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context) +#endif /* (BCMWDF) */ + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + + #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define _DHD_PM_RESUME_WAIT(a, b) do {\ + int retry = 0; \ + SMP_RD_BARRIER_DEPENDS(); \ + while (dhd_mmc_suspend && retry++ != b) { \ + SMP_RD_BARRIER_DEPENDS(); \ + wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \ + } \ + } while (0) + #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200) + #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0) + #define DHD_PM_RESUME_RETURN_ERROR(a) do { \ + if (dhd_mmc_suspend) { \ + printf("%s[%d]: mmc is still in suspend state!!!\n", \ + __FUNCTION__, __LINE__); \ + return a; \ + } \ + } while (0) + #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0) + + #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9999; \ + while ((exp) && (countdown >= 10000)) { \ + wait_event_interruptible_timeout(a, FALSE, 1); \ + countdown -= 10000; \ + } \ + } while (0) + + #else + + #define DHD_PM_RESUME_WAIT_INIT(a) + #define DHD_PM_RESUME_WAIT(a) + #define DHD_PM_RESUME_WAIT_FOREVER(a) + #define DHD_PM_RESUME_RETURN_ERROR(a) + #define DHD_PM_RESUME_RETURN + + #define DHD_SPINWAIT_SLEEP_INIT(a) + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) { \ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ + } while (0) + + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#ifndef OSL_SLEEP +#define OSL_SLEEP(ms) OSL_DELAY(ms*1000) +#endif /* OSL_SLEEP */ + +#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */ + +#ifdef PNO_SUPPORT +int dhd_pno_clean(dhd_pub_t *dhd); +#endif /* PNO_SUPPORT */ +/* + * Wake locks are an Android power management concept. They are used by applications and services + * to request CPU resources. + */ +extern int dhd_os_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wake_unlock(dhd_pub_t *pub); +extern int dhd_event_wake_lock(dhd_pub_t *pub); +extern int dhd_event_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_waive(dhd_pub_t *pub); +extern int dhd_os_wake_lock_restore(dhd_pub_t *pub); +extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); +extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub); +extern int dhd_os_wd_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub); +extern void dhd_os_wake_lock_init(struct dhd_info *dhd); +extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd); +#ifdef BCMPCIE_OOB_HOST_WAKE +extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK +extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub); +#endif /* BCMPCIE_SCAN_WAKELOCK */ + +inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) +#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) +#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub) +#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub) +#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \ + dhd_os_wake_lock_rx_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \ + dhd_os_wake_lock_ctrl_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \ + dhd_os_wake_lock_ctrl_timeout_cancel(pub) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub) +#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd); +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd); + +#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub) +#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub) + +#ifdef BCMPCIE_OOB_HOST_WAKE +#define OOB_WAKE_LOCK_TIMEOUT 500 +#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) +#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK +#ifdef DHD_DEBUG_SCAN_WAKELOCK +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call wake_lock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \ + do { \ + printf("call wake_unlock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_unlock(pub); \ + } while (0) +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub) +#endif /* DHD_DEBUG_SCAN_WAKELOCK */ +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) +#endif /* DHD_USE_SCAN_WAKELOCK */ +#define DHD_PACKET_TIMEOUT_MS 500 +#define DHD_EVENT_TIMEOUT_MS 1500 +#define SCAN_WAKE_LOCK_TIMEOUT 10000 + +/* Enum for IOCTL recieved status */ +typedef enum dhd_ioctl_recieved_status +{ + IOCTL_WAIT = 0, + IOCTL_RETURN_ON_SUCCESS, + IOCTL_RETURN_ON_TRAP, + IOCTL_RETURN_ON_BUS_STOP +} dhd_ioctl_recieved_status_t; + +/* interface operations (register, remove) should be atomic, use this lock to prevent race + * condition among wifi on/off and interface operation functions + */ +void dhd_net_if_lock(struct net_device *dev); +void dhd_net_if_unlock(struct net_device *dev); + + +typedef enum dhd_attach_states +{ + DHD_ATTACH_STATE_INIT = 0x0, + DHD_ATTACH_STATE_NET_ALLOC = 0x1, + DHD_ATTACH_STATE_DHD_ALLOC = 0x2, + DHD_ATTACH_STATE_ADD_IF = 0x4, + DHD_ATTACH_STATE_PROT_ATTACH = 0x8, + DHD_ATTACH_STATE_WL_ATTACH = 0x10, + DHD_ATTACH_STATE_THREADS_CREATED = 0x20, + DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40, + DHD_ATTACH_STATE_CFG80211 = 0x80, + DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100, + DHD_ATTACH_STATE_DONE = 0x200 +} dhd_attach_states_t; + +/* Value -1 means we are unsuccessful in creating the kthread. */ +#define DHD_PID_KT_INVALID -1 +/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */ +#define DHD_PID_KT_TL_INVALID -2 + +/* + * Exported from dhd OS modules (dhd_linux/dhd_ndis) + */ + +/* Indication from bus module regarding presence/insertion of dongle. + * Return dhd_pub_t pointer, used as handle to OS module in later calls. + * Returned structure should have bus and prot pointers filled in. + * bus_hdrlen specifies required headroom for bus module header. + */ +extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen); +#if defined(WLP2P) && defined(WL_CFG80211) +/* To allow attach/detach calls corresponding to p2p0 interface */ +extern int dhd_attach_p2p(dhd_pub_t *); +extern int dhd_detach_p2p(dhd_pub_t *); +#endif /* WLP2P && WL_CFG80211 */ +extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock); + +/* Indication from bus module regarding removal/absence of dongle */ +extern void dhd_detach(dhd_pub_t *dhdp); +extern void dhd_free(dhd_pub_t *dhdp); +extern void dhd_clear(dhd_pub_t *dhdp); + +/* Indication from bus module to change flow-control state */ +extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on); + +/* Store the status of a connection attempt for later retrieval by an iovar */ +extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason); + +extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec); + +/* Receive frame for delivery to OS. Callee disposes of rxp. */ +extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan); + +/* Return pointer to interface name */ +extern char *dhd_ifname(dhd_pub_t *dhdp, int idx); + +/* Request scheduling of the bus dpc */ +extern void dhd_sched_dpc(dhd_pub_t *dhdp); + +/* Notify tx completion */ +extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); +extern void dhd_dpc_enable(dhd_pub_t *dhdp); + +#define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */ +#define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */ +#define WIFI_FEATURE_HOTSPOT 0x0004 /* Support for GAS/ANQP */ +#define WIFI_FEATURE_P2P 0x0008 /* Wifi-Direct */ +#define WIFI_FEATURE_SOFT_AP 0x0010 /* Soft AP */ +#define WIFI_FEATURE_GSCAN 0x0020 /* Google-Scan APIs */ +#define WIFI_FEATURE_NAN 0x0040 /* Neighbor Awareness Networking */ +#define WIFI_FEATURE_D2D_RTT 0x0080 /* Device-to-device RTT */ +#define WIFI_FEATURE_D2AP_RTT 0x0100 /* Device-to-AP RTT */ +#define WIFI_FEATURE_BATCH_SCAN 0x0200 /* Batched Scan (legacy) */ +#define WIFI_FEATURE_PNO 0x0400 /* Preferred network offload */ +#define WIFI_FEATURE_ADDITIONAL_STA 0x0800 /* Support for two STAs */ +#define WIFI_FEATURE_TDLS 0x1000 /* Tunnel directed link setup */ +#define WIFI_FEATURE_TDLS_OFFCHANNEL 0x2000 /* Support for TDLS off channel */ +#define WIFI_FEATURE_EPR 0x4000 /* Enhanced power reporting */ +#define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */ +#define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */ + +#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3 + +extern int dhd_dev_get_feature_set(struct net_device *dev); +extern int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num); +#ifdef CUSTOM_FORCE_NODFS_FLAG +extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs); +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +/* OS independent layer functions */ +extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub); +extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub); +extern int dhd_os_proto_block(dhd_pub_t * pub); +extern int dhd_os_proto_unblock(dhd_pub_t * pub); +extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub); +extern unsigned int dhd_os_get_ioctl_resp_timeout(void); +extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); +extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub); +extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub); +extern int dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason); + +#define DHD_OS_IOCTL_RESP_LOCK(x) +#define DHD_OS_IOCTL_RESP_UNLOCK(x) + + +extern int dhd_os_get_image_block(char * buf, int len, void * image); +extern void * dhd_os_open_image(char * filename); +extern void dhd_os_close_image(void * image); +extern void dhd_os_wd_timer(void *bus, uint wdtick); +#ifdef DHD_PCIE_RUNTIMEPM +extern void dhd_os_runtimepm_timer(void *bus, uint tick); +#endif /* DHD_PCIE_RUNTIMEPM */ +extern void dhd_os_sdlock(dhd_pub_t * pub); +extern void dhd_os_sdunlock(dhd_pub_t * pub); +extern void dhd_os_sdlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); +#ifdef DHDTCPACK_SUPPRESS +extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub); +extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags); +#endif /* DHDTCPACK_SUPPRESS */ + +extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr); +extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff); +extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf); +#ifdef CUSTOM_COUNTRY_CODE +extern void get_customized_country_code(void *adapter, char *country_iso_code, +wl_country_t *cspec, u32 flags); +#else +extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec); +#endif /* CUSTOM_COUNTRY_CODE */ +extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_eventq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub); +extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret); +extern int dhd_os_send_hang_message(dhd_pub_t *dhdp); +extern void dhd_set_version_info(dhd_pub_t *pub, char *fw); +extern bool dhd_os_check_if_up(dhd_pub_t *pub); +extern int dhd_os_check_wakelock(dhd_pub_t *pub); +extern int dhd_os_check_wakelock_all(dhd_pub_t *pub); +extern int dhd_get_instance(dhd_pub_t *pub); +#ifdef CUSTOM_SET_CPUCORE +extern void dhd_set_cpucore(dhd_pub_t *dhd, int set); +#endif /* CUSTOM_SET_CPUCORE */ + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +#ifdef SUPPORT_AP_POWERSAVE +extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); +#endif + +#if defined(DHD_FW_COREDUMP) +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size); +#endif /* DHD_FW_COREDUMP */ + +#ifdef SUPPORT_AP_POWERSAVE +extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); +#endif /* SUPPORT_AP_POWERSAVE */ + + +#ifdef PKT_FILTER_SUPPORT +#define DHD_UNICAST_FILTER_NUM 0 +#define DHD_BROADCAST_FILTER_NUM 1 +#define DHD_MULTICAST4_FILTER_NUM 2 +#define DHD_MULTICAST6_FILTER_NUM 3 +#define DHD_MDNS_FILTER_NUM 4 +#define DHD_ARP_FILTER_NUM 5 +extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val); +extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd); +extern int net_os_enable_packet_filter(struct net_device *dev, int val); +extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num); +#endif /* PKT_FILTER_SUPPORT */ + +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); +extern bool dhd_support_sta_mode(dhd_pub_t *dhd); + +extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size); + +typedef struct { + uint32 limit; /* Expiration time (usec) */ + uint32 increment; /* Current expiration increment (usec) */ + uint32 elapsed; /* Current elapsed time (usec) */ + uint32 tick; /* O/S tick time (usec) */ +} dhd_timeout_t; + +#ifdef SHOW_LOGTRACE +typedef struct { + int num_fmts; + char **fmts; + char *raw_fmts; + char *raw_sstr; + uint32 ramstart; + uint32 rodata_start; + uint32 rodata_end; + char *rom_raw_sstr; + uint32 rom_ramstart; + uint32 rom_rodata_start; + uint32 rom_rodata_end; +} dhd_event_log_t; +#endif /* SHOW_LOGTRACE */ + +extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec); +extern int dhd_timeout_expired(dhd_timeout_t *tmo); + +extern int dhd_ifname2idx(struct dhd_info *dhd, char *name); +extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net); +extern struct net_device * dhd_idx2net(void *pub, int ifidx); +extern int net_os_send_hang_message(struct net_device *dev); +extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num); +extern bool dhd_wowl_cap(void *bus); + +extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, size_t pktlen, + wl_event_msg_t *, void **data_ptr, void *); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern int wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr); + +extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len); +extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, + int ifindex); +extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx); +extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx); +extern void dhd_common_init(osl_t *osh); + +extern int dhd_do_driver_init(struct net_device *net); +extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name); +extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock); +extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name); +extern void dhd_vif_del(struct dhd_info *dhd, int ifidx); +extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx); +extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len); + +/* Send packet to dongle via data channel */ +extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt); + +/* send up locally generated event */ +extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +/* Send event to host */ +extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +#ifdef LOG_INTO_TCPDUMP +extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len); +#endif /* LOG_INTO_TCPDUMP */ +extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag); +extern uint dhd_bus_status(dhd_pub_t *dhdp); +extern int dhd_bus_start(dhd_pub_t *dhdp); +extern int dhd_bus_suspend(dhd_pub_t *dhdpub); +extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage); +extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size); +extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line); +extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval); +#if defined(BCMSDIO) || defined(BCMPCIE) +extern uint dhd_bus_chip_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp); +#endif /* defined(BCMSDIO) || defined(BCMPCIE) */ + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +/* OS spin lock API */ +extern void *dhd_os_spin_lock_init(osl_t *osh); +extern void dhd_os_spin_lock_deinit(osl_t *osh, void *lock); +extern unsigned long dhd_os_spin_lock(void *lock); +void dhd_os_spin_unlock(void *lock, unsigned long flags); + +/* + * Manage sta objects in an interface. Interface is identified by an ifindex and + * sta(s) within an interfaces are managed using a MacAddress of the sta. + */ +struct dhd_sta; +extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea); +extern void dhd_del_sta(void *pub, int ifidx, void *ea); +extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val); +#if defined(BCM_GMAC3) +extern int dhd_set_dev_def(dhd_pub_t *dhdp, uint32 idx, int val); +#endif +extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx); +extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_d3ack_wake(dhd_pub_t * pub); +extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition); +extern int dhd_os_busbusy_wake(dhd_pub_t * pub); + +extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd); +extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set); +typedef enum cust_gpio_modes { + WLAN_RESET_ON, + WLAN_RESET_OFF, + WLAN_POWER_ON, + WLAN_POWER_OFF +} cust_gpio_modes_t; + +extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); +/* + * Insmod parameters for debug/test + */ + +/* Watchdog timer interval */ +extern uint dhd_watchdog_ms; + +#ifdef DHD_PCIE_RUNTIMEPM +extern uint dhd_runtimepm_ms; +#endif /* DHD_PCIE_RUNTIMEPM */ + +#if defined(DHD_DEBUG) +/* Console output poll interval */ +extern uint dhd_console_ms; +extern uint wl_msg_level; +#endif /* defined(DHD_DEBUG) */ + +extern uint dhd_slpauto; + +/* Use interrupts */ +extern uint dhd_intr; + +/* Use polling */ +extern uint dhd_poll; + +/* ARP offload agent mode */ +extern uint dhd_arp_mode; + +/* ARP offload enable */ +extern uint dhd_arp_enable; + +/* Pkt filte enable control */ +extern uint dhd_pkt_filter_enable; + +/* Pkt filter init setup */ +extern uint dhd_pkt_filter_init; + +/* Pkt filter mode control */ +extern uint dhd_master_mode; + +/* Roaming mode control */ +extern uint dhd_roam_disable; + +/* Roaming mode control */ +extern uint dhd_radio_up; + +/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */ +extern int dhd_idletime; +#ifdef DHD_USE_IDLECOUNT +#define DHD_IDLETIME_TICKS 5 +#else +#define DHD_IDLETIME_TICKS 1 +#endif /* DHD_USE_IDLECOUNT */ + +/* SDIO Drive Strength */ +extern uint dhd_sdiod_drive_strength; + +/* triggers bcm_bprintf to print to kernel log */ +extern bool bcm_bprintf_bypass; + +/* Override to force tx queueing all the time */ +extern uint dhd_force_tx_queueing; +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define DEFAULT_KEEP_ALIVE_VALUE 55000 /* msec */ +#ifndef CUSTOM_KEEP_ALIVE_SETTING +#define CUSTOM_KEEP_ALIVE_SETTING DEFAULT_KEEP_ALIVE_VALUE +#endif /* DEFAULT_KEEP_ALIVE_VALUE */ + +#define NULL_PKT_STR "null_pkt" + +/* hooks for custom glom setting option via Makefile */ +#define DEFAULT_GLOM_VALUE -1 +#ifndef CUSTOM_GLOM_SETTING +#define CUSTOM_GLOM_SETTING DEFAULT_GLOM_VALUE +#endif +#define WL_AUTO_ROAM_TRIGGER -75 +/* hooks for custom Roaming Trigger setting via Makefile */ +#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */ +#define DEFAULT_ROAM_TRIGGER_SETTING -1 +#ifndef CUSTOM_ROAM_TRIGGER_SETTING +#define CUSTOM_ROAM_TRIGGER_SETTING DEFAULT_ROAM_TRIGGER_VALUE +#endif + +/* hooks for custom Roaming Romaing setting via Makefile */ +#define DEFAULT_ROAM_DELTA_VALUE 10 /* dBm default roam delta all band */ +#define DEFAULT_ROAM_DELTA_SETTING -1 +#ifndef CUSTOM_ROAM_DELTA_SETTING +#define CUSTOM_ROAM_DELTA_SETTING DEFAULT_ROAM_DELTA_VALUE +#endif + +/* hooks for custom PNO Event wake lock to guarantee enough time + for the Platform to detect Event before system suspended +*/ +#define DEFAULT_PNO_EVENT_LOCK_xTIME 2 /* multiplay of DHD_PACKET_TIMEOUT_MS */ +#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME +#define CUSTOM_PNO_EVENT_LOCK_xTIME DEFAULT_PNO_EVENT_LOCK_xTIME +#endif +/* hooks for custom dhd_dpc_prio setting option via Makefile */ +#define DEFAULT_DHP_DPC_PRIO 1 +#ifndef CUSTOM_DPC_PRIO_SETTING +#define CUSTOM_DPC_PRIO_SETTING DEFAULT_DHP_DPC_PRIO +#endif + +#ifndef CUSTOM_LISTEN_INTERVAL +#define CUSTOM_LISTEN_INTERVAL LISTEN_INTERVAL +#endif /* CUSTOM_LISTEN_INTERVAL */ + +#define DEFAULT_SUSPEND_BCN_LI_DTIM 3 +#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM +#define CUSTOM_SUSPEND_BCN_LI_DTIM DEFAULT_SUSPEND_BCN_LI_DTIM +#endif + +#ifndef CUSTOM_RXF_PRIO_SETTING +#define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1) +#endif + +#define DEFAULT_WIFI_TURNOFF_DELAY 0 +#define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY + +#define DEFAULT_WIFI_TURNON_DELAY 200 +#ifndef WIFI_TURNON_DELAY +#define WIFI_TURNON_DELAY DEFAULT_WIFI_TURNON_DELAY +#endif /* WIFI_TURNON_DELAY */ + +#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 10 /* msec */ +#ifndef CUSTOM_DHD_WATCHDOG_MS +#define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS +#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */ + +#define DEFAULT_ASSOC_RETRY_MAX 3 +#ifndef CUSTOM_ASSOC_RETRY_MAX +#define CUSTOM_ASSOC_RETRY_MAX DEFAULT_ASSOC_RETRY_MAX +#endif /* DEFAULT_ASSOC_RETRY_MAX */ + + +#ifdef WLTDLS +#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING +#define CUSTOM_TDLS_IDLE_MODE_SETTING 60000 /* 60sec to tear down TDLS of not active */ +#endif +#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH +#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */ +#endif +#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW +#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */ +#endif +#endif /* WLTDLS */ + +#define DEFAULT_BCN_TIMEOUT 8 +#ifndef CUSTOM_BCN_TIMEOUT +#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT +#endif + +#define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */ +#ifndef MAX_DTIM_ALLOWED_INTERVAL +#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */ +#endif +#define NO_DTIM_SKIP 1 +#ifdef SDTEST +/* Echo packet generator (SDIO), pkts/s */ +extern uint dhd_pktgen; + +/* Echo packet len (0 => sawtooth, max 1800) */ +extern uint dhd_pktgen_len; +#define MAX_PKTGEN_LEN 1800 +#endif + + +/* optionally set by a module_param_string() */ +#define MOD_PARAM_PATHLEN 2048 +#define MOD_PARAM_INFOLEN 512 + +#ifdef SOFTAP +extern char fw_path2[MOD_PARAM_PATHLEN]; +#endif + +/* Flag to indicate if we should download firmware on driver load */ +extern uint dhd_download_fw_on_driverload; +extern int allow_delay_fwdl; + + +extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); +extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); + +#define IFLOCK_INIT(lock) *lock = 0 +#define IFLOCK(lock) while (InterlockedCompareExchange((lock), 1, 0)) \ + NdisStallExecution(1); +#define IFUNLOCK(lock) InterlockedExchange((lock), 0) +#define IFLOCK_FREE(lock) +#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL)) +#ifdef ARP_OFFLOAD_SUPPORT +#define MAX_IPV4_ENTRIES 8 +void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode); +void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable); + +/* dhd_commn arp offload wrapers */ +void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx); +void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx); +int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx); +void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx); +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef WLTDLS +int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac); +int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode); +#ifdef PCIE_FULL_DONGLE +void dhd_tdls_update_peer_info(struct net_device *dev, bool connect_disconnect, uint8 *addr); +#endif /* PCIE_FULL_DONGLE */ +#endif /* WLTDLS */ +/* Neighbor Discovery Offload Support */ +extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable); +int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx); +int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx); +/* ioctl processing for nl80211 */ +int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf); + +void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path); +void dhd_set_bus_state(void *bus, uint32 state); + +/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */ +typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ); +extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn); + +#ifdef PROP_TXSTATUS +int dhd_os_wlfc_block(dhd_pub_t *pub); +int dhd_os_wlfc_unblock(dhd_pub_t *pub); +extern const uint8 prio2fifo[]; +#endif /* PROP_TXSTATUS */ + +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail); +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size); + +int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) +#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE) +#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size) +#else +#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size) +#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size) +#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */ + +#ifdef USE_WFA_CERT_CONF +enum { + SET_PARAM_BUS_TXGLOM_MODE, + SET_PARAM_ROAMOFF, +#ifdef USE_WL_FRAMEBURST + SET_PARAM_FRAMEBURST, +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + SET_PARAM_TXBF, +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + SET_PARAM_PROPTX, + SET_PARAM_PROPTXMODE, +#endif /* PROP_TXSTATUS */ + PARAM_LAST_VALUE +}; +extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val); +#endif /* USE_WFA_CERT_CONF */ + +#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0) +#define dhd_del_flowid(pub, ifidx, flowid) do {} while (0) + +extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub); +extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); + +/** Miscellaenous DHD Spin Locks */ + +/* Disable router 3GMAC bypass path perimeter lock */ +#define DHD_PERIM_LOCK(dhdp) do {} while (0) +#define DHD_PERIM_UNLOCK(dhdp) do {} while (0) +#define DHD_PERIM_LOCK_ALL(processor_id) do {} while (0) +#define DHD_PERIM_UNLOCK_ALL(processor_id) do {} while (0) + +/* Enable DHD general spin lock/unlock */ +#define DHD_GENERAL_LOCK(dhdp, flags) \ + (flags) = dhd_os_general_spin_lock(dhdp) +#define DHD_GENERAL_UNLOCK(dhdp, flags) \ + dhd_os_general_spin_unlock((dhdp), (flags)) + +/* Enable DHD flowring spin lock/unlock */ +#define DHD_FLOWRING_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWRING_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +/* Enable DHD common flowring info spin lock/unlock */ +#define DHD_FLOWID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWID_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +/* Enable DHD common flowring list spin lock/unlock */ +#define DHD_FLOWRING_LIST_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWRING_LIST_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp); + + +#ifdef DHD_L2_FILTER +extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val); +#endif /* DHD_L2_FILTER */ + +typedef struct wl_io_pport { + dhd_pub_t *dhd_pub; + uint ifidx; +} wl_io_pport_t; + +typedef struct wl_evt_pport { + dhd_pub_t *dhd_pub; + int *ifidx; + void *pktdata; + void **data_ptr; + void *raw_event; +} wl_evt_pport_t; + +extern void *dhd_pub_shim(dhd_pub_t *dhd_pub); +#ifdef DHD_FW_COREDUMP +void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length); +#endif /* DHD_FW_COREDUMP */ + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable); +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len); +void custom_rps_map_clear(struct netdev_rx_queue *queue); +#define PRIMARY_INF 0 +#define VIRTUAL_INF 1 +#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \ + defined(CONFIG_SOC_EXYNOS8890) +#define RPS_CPUS_MASK "10" +#define RPS_CPUS_MASK_P2P "10" +#define RPS_CPUS_MASK_IBSS "10" +#define RPS_CPUS_WLAN_CORE_ID 4 +#else +#define RPS_CPUS_MASK "6" +#define RPS_CPUS_MASK_P2P "6" +#define RPS_CPUS_MASK_IBSS "6" +#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */ +#endif + +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length); + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length); + +#define dhd_is_device_removed(x) FALSE +#define dhd_os_ind_firmware_stall(x) + +#ifdef DHD_FW_COREDUMP +extern void dhd_get_memdump_info(dhd_pub_t *dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG +extern void dhd_get_assert_info(dhd_pub_t *dhd); +#endif /* BCMASSERT_LOG */ + + +#if defined(DHD_LB_STATS) +#include +extern void dhd_lb_stats_init(dhd_pub_t *dhd); +extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp); +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp); +#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp) +/* Reset is called from common layer so it takes dhd_pub_t as argument */ +#define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_init(dhdp) +#define DHD_LB_STATS_CLR(x) (x) = 0U +#define DHD_LB_STATS_INCR(x) (x) = (x) + 1 +#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c) +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) \ + { \ + int cpu = get_cpu(); put_cpu(); \ + DHD_LB_STATS_INCR(x[cpu]); \ + } +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhdp, x) dhd_lb_stats_update_txc_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhdp, x) dhd_lb_stats_update_rxc_histo(dhdp, x) +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_txc_percpu_cnt_incr(dhdp) +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_rxc_percpu_cnt_incr(dhdp) +#else /* !DHD_LB_STATS */ +#define DHD_LB_STATS_NOOP do { /* noop */ } while (0) +#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP +#endif /* !DHD_LB_STATS */ + +#ifdef DHD_PCIE_RUNTIMEPM +extern bool dhd_runtimepm_state(dhd_pub_t *dhd); +extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr); +extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr); +extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp); +extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp); +extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp); +extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp); +/* Disable the Runtime PM and wake up if the bus is already in suspend */ +#define DHD_DISABLE_RUNTIME_PM(dhdp) \ +do { \ + dhd_runtime_pm_disable(dhdp); \ +} while (0); + +/* Enable the Runtime PM */ +#define DHD_ENABLE_RUNTIME_PM(dhdp) \ +do { \ + dhd_runtime_pm_enable(dhdp); \ +} while (0); +#else +#define DHD_DISABLE_RUNTIME_PM(dhdp) +#define DHD_ENABLE_RUNTIME_PM(dhdp) +#endif /* DHD_PCIE_RUNTIMEPM */ + +extern void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs); + +/* + * Enable this macro if you want to track the calls to wake lock + * This records can be printed using the following command + * cat /sys/bcm-dhd/wklock_trace + * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#undef DHD_TRACE_WAKE_LOCK +#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */ + +#if defined(DHD_TRACE_WAKE_LOCK) +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); +#endif + +extern int dhd_prot_debug_info_print(dhd_pub_t *dhd); + +#ifdef ENABLE_TEMP_THROTTLING +#define TEMP_THROTTLE_CONTROL_BIT 0xf //Enable all feature. +#endif /* ENABLE_TEMP_THROTTLING */ + +#ifdef DHD_PKTID_AUDIT_ENABLED +void dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +#endif /* _dhd_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c new file mode 100644 index 000000000000..dc24edbb5c30 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bta.c @@ -0,0 +1,340 @@ +/* + * BT-AMP support routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_bta.c 514727 2014-11-12 03:02:48Z $ + */ +#error "WLBTAMP is not defined" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +#ifdef SEND_HCI_CMD_VIA_IOCTL +#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE + +/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */ +int +dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len) +{ + amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf; + uint8 buf[BTA_HCI_CMD_MAX_LEN + 16]; + uint len = sizeof(buf); + wl_ioctl_t ioc; + + if (cmd_len < HCI_CMD_PREAMBLE_SIZE) + return BCME_BADLEN; + + if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len) + return BCME_BADLEN; + + len = bcm_mkiovar("HCI_cmd", + (char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len); + + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = TRUE; + + return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len); +} +#else /* !SEND_HCI_CMD_VIA_IOCTL */ + +static void +dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh) +{ + int prec; + struct pktq *q; + uint count = 0; + + q = dhd_bus_txq(pub->bus); + if (q == NULL) + return; + + DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh)); + + dhd_os_sdlock_txq(pub); + + /* Walk through the txq and toss all HCI ACL data packets */ + PKTQ_PREC_ITER(q, prec) { + void *head_pkt = NULL; + + while (pktq_ppeek(q, prec) != head_pkt) { + void *pkt = pktq_pdeq(q, prec); + int ifidx; + + dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL); + + if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) { + struct ether_header *eh = + (struct ether_header *)PKTDATA(pub->osh, pkt); + + if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) { + struct dot11_llc_snap_header *lsh = + (struct dot11_llc_snap_header *)&eh[1]; + + if (bcmp(lsh, BT_SIG_SNAP_MPROT, + DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && + ntoh16(lsh->type) == BTA_PROT_L2CAP) { + amp_hci_ACL_data_t *ACL_data = + (amp_hci_ACL_data_t *)&lsh[1]; + uint16 handle = ltoh16(ACL_data->handle); + + if (HCI_ACL_DATA_HANDLE(handle) == llh) { + PKTFREE(pub->osh, pkt, TRUE); + count ++; + continue; + } + } + } + } + + dhd_prot_hdrpush(pub, ifidx, pkt); + + if (head_pkt == NULL) + head_pkt = pkt; + pktq_penq(q, prec, pkt); + } + } + + dhd_os_sdunlock_txq(pub); + + DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh)); +} + +/* Handle HCI cmd locally. + * Return 0: continue to send the cmd across SDIO + * < 0: stop, fail + * > 0: stop, succuess + */ +static int +_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd) +{ + int status = 0; + + switch (ltoh16_ua((uint8 *)&cmd->opcode)) { + case HCI_Enhanced_Flush: { + eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms; + dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh)); + break; + } + default: + break; + } + + return status; +} + +/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */ +int +dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len) +{ + amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf; + struct ether_header *eh; + struct dot11_llc_snap_header *lsh; + osl_t *osh = pub->osh; + uint len; + void *p; + int status; + + if (cmd_len < HCI_CMD_PREAMBLE_SIZE) { + DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len)); + return BCME_BADLEN; + } + + if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) { + DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n", + len, cmd_len)); + /* return BCME_BADLEN; */ + } + + p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE); + if (p == NULL) { + DHD_ERROR(("dhd_bta_docmd: out of memory\n")); + return BCME_NOMEM; + } + + + /* intercept and handle the HCI cmd locally */ + if ((status = _dhd_bta_docmd(pub, cmd)) > 0) + return 0; + else if (status < 0) + return status; + + /* copy in HCI cmd */ + PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN); + bcopy(cmd, PKTDATA(osh, p), len); + + /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */ + PKTPUSH(osh, p, RFC1042_HDR_LEN); + eh = (struct ether_header *)PKTDATA(osh, p); + bzero(eh->ether_dhost, ETHER_ADDR_LEN); + ETHER_SET_LOCALADDR(eh->ether_dhost); + bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN); + eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN); + lsh = (struct dot11_llc_snap_header *)&eh[1]; + bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2); + lsh->type = 0; + + return dhd_sendpkt(pub, 0, p); +} +#endif /* !SEND_HCI_CMD_VIA_IOCTL */ + +/* Send HCI ACL data to dongle via data channel */ +int +dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len) +{ + amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf; + struct ether_header *eh; + struct dot11_llc_snap_header *lsh; + osl_t *osh = pub->osh; + uint len; + void *p; + + if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) { + DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len)); + return BCME_BADLEN; + } + + if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) { + DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n", + len, data_len)); + /* return BCME_BADLEN; */ + } + + p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE); + if (p == NULL) { + DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n")); + return BCME_NOMEM; + } + + + /* copy in HCI ACL data header and HCI ACL data */ + PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN); + bcopy(data, PKTDATA(osh, p), len); + + /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */ + PKTPUSH(osh, p, RFC1042_HDR_LEN); + eh = (struct ether_header *)PKTDATA(osh, p); + bzero(eh->ether_dhost, ETHER_ADDR_LEN); + bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN); + eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN); + lsh = (struct dot11_llc_snap_header *)&eh[1]; + bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2); + lsh->type = HTON16(BTA_PROT_L2CAP); + + return dhd_sendpkt(pub, 0, p); +} + +/* txcomplete callback */ +void +dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success) +{ + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp); + amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN); + uint16 handle = ltoh16(ACL_data->handle); + uint16 llh = HCI_ACL_DATA_HANDLE(handle); + + wl_event_msg_t event; + uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)]; + amp_hci_event_t *evt; + num_completed_data_blocks_evt_parms_t *parms; + + uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t); + + /* update the event struct */ + memset(&event, 0, sizeof(event)); + event.version = hton16(BCM_EVENT_MSG_VERSION); + event.event_type = hton32(WLC_E_BTA_HCI_EVENT); + event.status = 0; + event.reason = 0; + event.auth_type = 0; + event.datalen = hton32(len); + event.flags = 0; + + /* generate Number of Completed Blocks event */ + evt = (amp_hci_event_t *)data; + evt->ecode = HCI_Number_of_Completed_Data_Blocks; + evt->plen = sizeof(num_completed_data_blocks_evt_parms_t); + + parms = (num_completed_data_blocks_evt_parms_t *)evt->parms; + htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks); + parms->num_handles = 1; + htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle); + parms->completed[0].pkts = 1; + parms->completed[0].blocks = 1; + + dhd_sendup_event_common(dhdp, &event, data); +} + +/* event callback */ +void +dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len) +{ + amp_hci_event_t *evt = (amp_hci_event_t *)data_buf; + + ASSERT(dhdp); + ASSERT(evt); + + switch (evt->ecode) { + case HCI_Command_Complete: { + cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms; + switch (ltoh16_ua((uint8 *)&parms->opcode)) { + case HCI_Read_Data_Block_Size: { + read_data_block_size_evt_parms_t *parms2 = + (read_data_block_size_evt_parms_t *)parms->parms; + dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num); + break; + } + } + break; + } + + case HCI_Flush_Occurred: { + flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms; + dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle)); + break; + } + default: + break; + } +} diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h new file mode 100644 index 000000000000..df9d1f91b9ce --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bta.h @@ -0,0 +1,42 @@ +/* + * BT-AMP support routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_bta.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef __dhd_bta_h__ +#define __dhd_bta_h__ + +struct dhd_pub; + +extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len); + +extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len); + +extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len); +extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success); + + +#endif /* __dhd_bta_h__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h new file mode 100644 index 000000000000..05ac0345cea7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bus.h @@ -0,0 +1,213 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_bus.h 602721 2015-11-27 10:32:48Z $ + */ + +#ifndef _dhd_bus_h_ +#define _dhd_bus_h_ + +/* + * Exported from dhd bus module (dhd_usb, dhd_sdio) + */ + +/* Indicate (dis)interest in finding dongles. */ +extern int dhd_bus_register(void); +extern void dhd_bus_unregister(void); + +/* Download firmware image and nvram image */ +extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, char *fw_path, char *nv_path); + +/* Stop bus module: clear pending frames, disable data flow */ +extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex); + +/* Initialize bus module: prepare for communication w/dongle */ +extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex); + +/* Get the Bus Idle Time */ +extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime); + +/* Set the Bus Idle Time */ +extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time); + +/* Send a data frame to the dongle. Callee disposes of txp. */ +#ifdef BCMPCIE +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx); +#else +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp); +#endif + + +/* Send/receive a control message to/from the dongle. + * Expects caller to enforce a single outstanding transaction. + */ +extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen); +extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen); + +/* Watchdog timer function */ +extern bool dhd_bus_watchdog(dhd_pub_t *dhd); + +extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp); +extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp); +extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable); +extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub); +extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub); +extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub); + +#if defined(DHD_DEBUG) +/* Device console input function */ +extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen); +#endif /* defined(DHD_DEBUG) */ + +/* Deferred processing for the bus, return TRUE requests reschedule */ +extern bool dhd_bus_dpc(struct dhd_bus *bus); +extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg); + + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add bus dump output to a buffer */ +extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Clear any bus counters */ +extern void dhd_bus_clearcounts(dhd_pub_t *dhdp); + +/* return the dongle chipid */ +extern uint dhd_bus_chip(struct dhd_bus *bus); + +/* return the dongle chiprev */ +extern uint dhd_bus_chiprev(struct dhd_bus *bus); + +/* Set user-specified nvram parameters. */ +extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params); + +extern void *dhd_bus_pub(struct dhd_bus *bus); +extern void *dhd_bus_txq(struct dhd_bus *bus); +extern void *dhd_bus_sih(struct dhd_bus *bus); +extern uint dhd_bus_hdrlen(struct dhd_bus *bus); +#ifdef BCMSDIO +extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val); +/* return sdio io status */ +extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus); +#else +#define dhd_bus_set_dotxinrx(a, b) do {} while (0) +#endif + +#define DHD_SET_BUS_STATE_DOWN(_bus) do { \ + (_bus)->dhd->busstate = DHD_BUS_DOWN; \ +} while (0) + +/* Register a dummy SDIO client driver in order to be notified of new SDIO device */ +extern int dhd_bus_reg_sdio_notify(void* semaphore); +extern void dhd_bus_unreg_sdio_notify(void); +extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable); +extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, + uint32 *slot_num); + +#ifdef BCMPCIE +enum { + /* Scratch buffer confiuguration update */ + D2H_DMA_SCRATCH_BUF, + D2H_DMA_SCRATCH_BUF_LEN, + + /* DMA Indices array buffers for: H2D WR and RD, and D2H WR and RD */ + H2D_DMA_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */ + H2D_DMA_INDX_RD_BUF, /* update H2D RD dma indices buf base addr to dongle */ + D2H_DMA_INDX_WR_BUF, /* update D2H WR dma indices buf base addr to dongle */ + D2H_DMA_INDX_RD_BUF, /* update D2H RD dma indices buf base addr to dongle */ + + /* DHD sets/gets WR or RD index, in host's H2D and D2H DMA indices buffer */ + H2D_DMA_INDX_WR_UPD, /* update H2D WR index in H2D WR dma indices buf */ + H2D_DMA_INDX_RD_UPD, /* update H2D RD index in H2D RD dma indices buf */ + D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */ + D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */ + + /* H2D and D2H Mailbox data update */ + H2D_MB_DATA, + D2H_MB_DATA, + + /* (Common) MsgBuf Ring configuration update */ + RING_BUF_ADDR, /* update ring base address to dongle */ + RING_ITEM_LEN, /* update ring item size to dongle */ + RING_MAX_ITEMS, /* update ring max items to dongle */ + + /* Update of WR or RD index, for a MsgBuf Ring */ + RING_RD_UPD, /* update ring read index from/to dongle */ + RING_WR_UPD, /* update ring write index from/to dongle */ + + TOTAL_LFRAG_PACKET_CNT, + MAX_HOST_RXBUFS +}; + +typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32); +extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type, + uint16 ringid); +extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value); +extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid); +extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus); +extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count); +extern void dhd_bus_start_queue(struct dhd_bus *bus); +extern void dhd_bus_stop_queue(struct dhd_bus *bus); + +extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus); +extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus, + void * data, uint16 flowid); +extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus, + void * data, uint8 flowid); +extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status); +extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus); +extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs); +extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val); + + +extern int dhdpcie_bus_clock_start(struct dhd_bus *bus); +extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus); +extern int dhdpcie_bus_enable_device(struct dhd_bus *bus); +extern int dhdpcie_bus_disable_device(struct dhd_bus *bus); +extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus); +extern void dhdpcie_bus_free_resource(struct dhd_bus *bus); +extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus); +extern int dhd_bus_release_dongle(struct dhd_bus *bus); +extern int dhd_bus_request_irq(struct dhd_bus *bus); + + +#ifdef DHD_FW_COREDUMP +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +#endif /* DHD_FW_COREDUMP */ + +#endif /* BCMPCIE */ +#endif /* _dhd_bus_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_buzzz.h b/drivers/net/wireless/bcmdhd/dhd_buzzz.h new file mode 100644 index 000000000000..a5422d58d7ef --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_buzzz.h @@ -0,0 +1,37 @@ +#ifndef _DHD_BUZZZ_H_INCLUDED_ +#define _DHD_BUZZZ_H_INCLUDED_ + +/* + * Broadcom logging system - Empty implementaiton + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_buzzz.h 591283 2015-10-07 11:52:00Z $ + */ + +#define dhd_buzzz_attach() do { /* noop */ } while (0) +#define dhd_buzzz_detach() do { /* noop */ } while (0) +#define dhd_buzzz_panic(x) do { /* noop */ } while (0) +#define BUZZZ_LOG(ID, N, ARG...) do { /* noop */ } while (0) + +#endif /* _DHD_BUZZZ_H_INCLUDED_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c new file mode 100644 index 000000000000..66a311069d09 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c @@ -0,0 +1,817 @@ +/* + * DHD Protocol Module for CDC and BDC. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cdc.c 596022 2015-10-29 11:02:47Z $ + * + * BDC is like CDC, except it includes a header for data packets to convey + * packet priority over the bus, and flags (e.g. to indicate checksum status + * for dongle offload.) + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + + +#ifdef PROP_TXSTATUS +#include +#include +#endif + + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ +#define BUS_HEADER_LEN (24+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE + * defined in dhd_sdio.c (amount of header tha might be added) + * plus any space that might be needed for alignment padding. + */ +#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for + * round off at the end of buffer + */ + +typedef struct dhd_prot { + uint16 reqid; + uint8 pending; + uint32 lastcmd; + uint8 bus_header[BUS_HEADER_LEN]; + cdc_ioctl_t msg; + unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN]; +} dhd_prot_t; + + +static int +dhdcdc_msg(dhd_pub_t *dhd) +{ + int err = 0; + dhd_prot_t *prot = dhd->prot; + int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t); + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_OS_WAKE_LOCK(dhd); + + /* NOTE : cdc->msg.len holds the desired length of the buffer to be + * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area + * is actually sent to the dongle + */ + if (len > CDC_MAX_MSG_SIZE) + len = CDC_MAX_MSG_SIZE; + + /* Send request */ + err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len); + + DHD_OS_WAKE_UNLOCK(dhd); + return err; +} + +static int +dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len) +{ + int ret; + int cdc_len = len + sizeof(cdc_ioctl_t); + dhd_prot_t *prot = dhd->prot; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + do { + ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len); + if (ret < 0) + break; + } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id); + + return ret; +} + +static int +dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0, retries = 0; + uint32 id, flags = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT); + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + if (!dhd->hang_was_sent) + DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); + goto done; + } + +retry: + /* wait for interrupt and get first fragment */ + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if ((id < prot->reqid) && (++retries < RETRIES)) + goto retry; + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Copy info buffer */ + if (buf) + { + if (ret < (int)len) + len = ret; + memcpy(buf, (void*) prot->buf, len); + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + + +static int +dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0; + uint32 flags, id; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + if (cmd == WLC_SET_PM) { + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET; + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret)); + goto done; + } + + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + + +int +dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + dhd_prot_t *prot = dhd->prot; + int ret = -1; + uint8 action; + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) + goto done; + + if (prot->pending == TRUE) { + DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n", + ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, + (unsigned long)prot->lastcmd)); + if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { + DHD_TRACE(("iovar cmd=%s\n", (char*)buf)); + } + goto done; + } + + prot->pending = TRUE; + prot->lastcmd = ioc->cmd; + action = ioc->set; + if (action & WL_IOCTL_ACTION_SET) + ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + else { + ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) + ioc->used = ret - sizeof(cdc_ioctl_t); + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) + ret = 0; + else { + cdc_ioctl_t *msg = &prot->msg; + ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */ + } + + /* Intercept the wme_dp ioctl here */ + if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) + bcopy(((char *)buf + slen), &val, sizeof(int)); + dhd->wme_dp = (uint8) ltoh32(val); + } + + prot->pending = FALSE; + +done: + + return ret; +} + +int +dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +void +dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + if (!dhdp || !dhdp->prot) + return; + bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid); +#ifdef PROP_TXSTATUS + dhd_wlfc_dump(dhdp, strbuf); +#endif +} + +/* The FreeBSD PKTPUSH could change the packet buf pinter + so we need to make it changable +*/ +#define PKTBUF pktbuf +void +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ +#ifdef BDC + struct bdc_header *h; +#endif /* BDC */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + /* Push BDC header used to convey priority for buses that don't */ + + PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN); + + h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF); + + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(PKTBUF)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + + h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = 0; +#endif /* BDC */ + BDC_SET_IF_IDX(h, ifidx); +} +#undef PKTBUF /* Only defined in the above routine */ + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + uint hdrlen = 0; +#ifdef BDC + /* Length of BDC(+WLFC) headers pushed */ + hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4); +#endif + return hdrlen; +} + +int +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info, + uint *reorder_info_len) +{ +#ifdef BDC + struct bdc_header *h; +#endif + uint8 data_offset = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + if (reorder_info_len) + *reorder_info_len = 0; + /* Pop BDC header used to convey priority for buses that don't */ + + if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + if (!ifidx) { + /* for tx packet, skip the analysis */ + data_offset = h->dataOffset; + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); + goto exit; + } + + *ifidx = BDC_GET_IF_IDX(h); + + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) { + DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1) + h->dataOffset = 0; + else + return BCME_ERROR; + } + + if (h->flags & BDC_FLAG_SUM_GOOD) { + DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + PKTSETSUMGOOD(pktbuf, TRUE); + } + + PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK)); + data_offset = h->dataOffset; + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); +#endif /* BDC */ + + +#ifdef PROP_TXSTATUS + if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) { + /* + - parse txstatus only for packets that came from the firmware + */ + dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2), + reorder_buf_info, reorder_info_len); + + } +#endif /* PROP_TXSTATUS */ + +exit: + PKTPULL(dhd->osh, pktbuf, (data_offset << 2)); + return 0; +} + + +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + dhd_prot_t *cdc; + + if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(cdc, 0, sizeof(dhd_prot_t)); + + /* ensure that the msg buf directly follows the cdc msg struct */ + if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) { + DHD_ERROR(("dhd_prot_t is not correctly defined\n")); + goto fail; + } + + dhd->prot = cdc; +#ifdef BDC + dhd->hdrlen += BDC_HEADER_LEN; +#endif + dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN; + return 0; + +fail: + if (cdc != NULL) + DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t)); + return BCME_NOMEM; +} + +/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ +#ifdef PROP_TXSTATUS + dhd_wlfc_deinit(dhd); +#endif + DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t)); + dhd->prot = NULL; +} + +void +dhd_prot_dstats(dhd_pub_t *dhd) +{ + /* copy bus stats */ + + dhd->dstats.tx_packets = dhd->tx_packets; + dhd->dstats.tx_errors = dhd->tx_errors; + dhd->dstats.rx_packets = dhd->rx_packets; + dhd->dstats.rx_errors = dhd->rx_errors; + dhd->dstats.rx_dropped = dhd->rx_dropped; + dhd->dstats.multicast = dhd->rx_multicast; + return; +} + +int +dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) + goto done; + + + dhd_process_cid_mac(dhd, TRUE); + + ret = dhd_preinit_ioctls(dhd); + + if (!ret) + dhd_process_cid_mac(dhd, FALSE); + + /* Always assumes wl for now */ + dhd->iswl = TRUE; + +done: + return ret; +} + +int dhd_prot_init(dhd_pub_t *dhd) +{ + return BCME_OK; +} + +void +dhd_prot_stop(dhd_pub_t *dhd) +{ +/* Nothing to do for CDC */ +} + + +static void +dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt, + uint32 *pkt_count, void **pplast, uint8 start, uint8 end) +{ + void *plast = NULL, *p; + uint32 pkt_cnt = 0; + + if (ptr->pend_pkts == 0) { + DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__)); + *pplast = NULL; + *pkt_count = 0; + *pkt = NULL; + return; + } + do { + p = (void *)(ptr->p[start]); + ptr->p[start] = NULL; + + if (p != NULL) { + if (plast == NULL) + *pkt = p; + else + PKTSETNEXT(osh, plast, p); + + plast = p; + pkt_cnt++; + } + start++; + if (start > ptr->max_idx) + start = 0; + } while (start != end); + *pplast = plast; + *pkt_count = pkt_cnt; + ptr->pend_pkts -= (uint8)pkt_cnt; +} + +int +dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, + void **pkt, uint32 *pkt_count) +{ + uint8 flow_id, max_idx, cur_idx, exp_idx; + struct reorder_info *ptr; + uint8 flags; + void *cur_pkt, *plast = NULL; + uint32 cnt = 0; + + if (pkt == NULL) { + if (pkt_count != NULL) + *pkt_count = 0; + return 0; + } + + flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET]; + flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET]; + + DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags, + reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET], + reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET], + reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET])); + + /* validate flags and flow id */ + if (flags == 0xFF) { + DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__)); + *pkt_count = 1; + return 0; + } + + cur_pkt = *pkt; + *pkt = NULL; + + ptr = dhd->reorder_bufs[flow_id]; + if (flags & WLHOST_REORDERDATA_DEL_FLOW) { + uint32 buf_size = sizeof(struct reorder_info); + + DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n", + __FUNCTION__, flow_id)); + + if (ptr == NULL) { + DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n", + __FUNCTION__, flow_id)); + *pkt_count = 1; + *pkt = cur_pkt; + return 0; + } + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, ptr->exp_idx); + /* set it to the last packet */ + if (plast) { + PKTSETNEXT(dhd->osh, plast, cur_pkt); + cnt++; + } + else { + if (cnt != 0) { + DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n", + __FUNCTION__, cnt)); + } + *pkt = cur_pkt; + cnt = 1; + } + buf_size += ((ptr->max_idx + 1) * sizeof(void *)); + MFREE(dhd->osh, ptr, buf_size); + dhd->reorder_bufs[flow_id] = NULL; + *pkt_count = cnt; + return 0; + } + /* all the other cases depend on the existance of the reorder struct for that flow id */ + if (ptr == NULL) { + uint32 buf_size_alloc = sizeof(reorder_info_t); + max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]; + + buf_size_alloc += ((max_idx + 1) * sizeof(void*)); + /* allocate space to hold the buffers, index etc */ + + DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n", + __FUNCTION__, buf_size_alloc, flow_id, max_idx)); + ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc); + if (ptr == NULL) { + DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__)); + *pkt_count = 1; + return 0; + } + bzero(ptr, buf_size_alloc); + dhd->reorder_bufs[flow_id] = ptr; + ptr->p = (void *)(ptr+1); + ptr->max_idx = max_idx; + } + if (flags & WLHOST_REORDERDATA_NEW_HOLE) { + DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__)); + if (ptr->pend_pkts) { + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, ptr->exp_idx); + ptr->pend_pkts = 0; + } + ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET]; + ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]; + ptr->p[ptr->cur_idx] = cur_pkt; + ptr->pend_pkts++; + *pkt_count = cnt; + } + else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) { + cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET]; + exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + + + if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) { + /* still in the current hole */ + /* enqueue the current on the buffer chain */ + if (ptr->p[cur_idx] != NULL) { + DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n", + __FUNCTION__)); + PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE); + ptr->p[cur_idx] = NULL; + } + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + ptr->cur_idx = cur_idx; + DHD_REORDER(("%s: fill up a hole..pending packets is %d\n", + __FUNCTION__, ptr->pend_pkts)); + *pkt_count = 0; + *pkt = NULL; + } + else if (ptr->exp_idx == cur_idx) { + /* got the right one ..flush from cur to exp and update exp */ + DHD_REORDER(("%s: got the right one now, cur_idx is %d\n", + __FUNCTION__, cur_idx)); + if (ptr->p[cur_idx] != NULL) { + DHD_REORDER(("%s: Error buffer pending..free it\n", + __FUNCTION__)); + PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE); + ptr->p[cur_idx] = NULL; + } + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + + ptr->cur_idx = cur_idx; + ptr->exp_idx = exp_idx; + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + cur_idx, exp_idx); + *pkt_count = cnt; + DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n", + __FUNCTION__, cnt, ptr->pend_pkts)); + } + else { + uint8 end_idx; + bool flush_current = FALSE; + /* both cur and exp are moved now .. */ + DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n", + __FUNCTION__, flow_id, ptr->cur_idx, cur_idx, + ptr->exp_idx, exp_idx)); + if (flags & WLHOST_REORDERDATA_FLUSH_ALL) + end_idx = ptr->exp_idx; + else + end_idx = exp_idx; + + /* flush pkts first */ + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, end_idx); + + if (cur_idx == ptr->max_idx) { + if (exp_idx == 0) + flush_current = TRUE; + } else { + if (exp_idx == cur_idx + 1) + flush_current = TRUE; + } + if (flush_current) { + if (plast) + PKTSETNEXT(dhd->osh, plast, cur_pkt); + else + *pkt = cur_pkt; + cnt++; + } + else { + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + } + ptr->exp_idx = exp_idx; + ptr->cur_idx = cur_idx; + *pkt_count = cnt; + } + } + else { + uint8 end_idx; + /* no real packet but update to exp_seq...that means explicit window move */ + exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + + DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n", + __FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx)); + if (flags & WLHOST_REORDERDATA_FLUSH_ALL) + end_idx = ptr->exp_idx; + else + end_idx = exp_idx; + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx); + if (plast) + PKTSETNEXT(dhd->osh, plast, cur_pkt); + else + *pkt = cur_pkt; + cnt++; + *pkt_count = cnt; + /* set the new expected idx */ + ptr->exp_idx = exp_idx; + } + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c new file mode 100644 index 000000000000..390747fe101d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c @@ -0,0 +1,268 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg80211.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include + +#include +#include +#include +#include + +#ifdef PKT_FILTER_SUPPORT +#include +#include +#endif + +extern struct bcm_cfg80211 *g_bcm_cfg; + +#ifdef PKT_FILTER_SUPPORT +extern uint dhd_pkt_filter_enable; +extern uint dhd_master_mode; +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +#endif + +static int dhd_dongle_up = FALSE; + +#include +#include +#include +#include +#include +#include + +static s32 wl_dongle_up(struct net_device *ndev); +static s32 wl_dongle_down(struct net_device *ndev); + +/** + * Function implementations + */ + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg) +{ + struct net_device *ndev; + s32 err = 0; + + WL_TRACE(("In\n")); + if (!dhd_dongle_up) { + WL_ERR(("Dongle is already down\n")); + return err; + } + + ndev = bcmcfg_to_prmry_ndev(cfg); + wl_dongle_down(ndev); + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd->op_mode |= val; + WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode)); +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->arp_version == 1) { + /* IF P2P is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, false); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + return 0; +} + +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE); + WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode)); + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->arp_version == 1) { + /* IF P2P is disabled, enable arpoe back for STA mode. */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, true); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + return 0; +} + +struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name, + uint8 *mac, uint8 bssidx, char *dngl_name) +{ + return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name); +} + +int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev) +{ + return dhd_register_if(cfg->pub, ifidx, FALSE); +} + +int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev) +{ + return dhd_remove_if(cfg->pub, ifidx, FALSE); +} + +struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev) +{ + if (ndev) { + if (ndev->ieee80211_ptr) { + kfree(ndev->ieee80211_ptr); + ndev->ieee80211_ptr = NULL; + } + free_netdev(ndev); + return NULL; + } + + return ndev; +} + +void dhd_netdev_free(struct net_device *ndev) +{ +#ifdef WL_CFG80211 + ndev = dhd_cfg80211_netdev_free(ndev); +#endif + if (ndev) + free_netdev(ndev); +} + +static s32 +wl_dongle_up(struct net_device *ndev) +{ + s32 err = 0; + u32 up = 0; + + err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + } + return err; +} + +static s32 +wl_dongle_down(struct net_device *ndev) +{ + s32 err = 0; + u32 down = 0; + + err = wldev_ioctl(ndev, WLC_DOWN, &down, sizeof(down), true); + if (unlikely(err)) { + WL_ERR(("WLC_DOWN error (%d)\n", err)); + } + return err; +} + + +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg) +{ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif + struct net_device *ndev; + s32 err = 0; + + WL_TRACE(("In\n")); + if (dhd_dongle_up) { + WL_ERR(("Dongle is already up\n")); + return err; + } + + ndev = bcmcfg_to_prmry_ndev(cfg); + + err = wl_dongle_up(ndev); + if (unlikely(err)) { + WL_ERR(("wl_dongle_up failed\n")); + goto default_conf_out; + } + dhd_dongle_up = true; + +default_conf_out: + + return err; + +} + +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, + const struct bcm_nlmsg_hdr *nlioc, void *buf) +{ + struct net_device *ndev = NULL; + dhd_pub_t *dhd; + dhd_ioctl_t ioc = { 0 }; + int ret = 0; + int8 index; + + WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); + + dhd = cfg->pub; + DHD_OS_WAKE_LOCK(dhd); + + /* send to dongle only if we are not waiting for reload already */ + if (dhd->hang_was_sent) { + WL_ERR(("HANG was sent up earlier\n")); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS); + DHD_OS_WAKE_UNLOCK(dhd); + return OSL_ERROR(BCME_DONGLE_DOWN); + } + + ndev = wdev_to_wlc_ndev(wdev, cfg); + index = dhd_net2idx(dhd->info, ndev); + if (index == DHD_BAD_IF) { + WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); + ret = BCME_ERROR; + goto done; + } + + ioc.cmd = nlioc->cmd; + ioc.len = nlioc->len; + ioc.set = nlioc->set; + ioc.driver = nlioc->magic; + ret = dhd_ioctl_process(dhd, index, &ioc, buf); + if (ret) { + WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); + ret = OSL_ERROR(ret); + goto done; + } + +done: + DHD_OS_WAKE_UNLOCK(dhd); + return ret; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h new file mode 100644 index 000000000000..cae7cc9b247b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h @@ -0,0 +1,54 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg80211.h 591285 2015-10-07 11:56:29Z $ + */ + + +#ifndef __DHD_CFG80211__ +#define __DHD_CFG80211__ + +#include +#include +#include + +#ifndef WL_ERR +#define WL_ERR CFG80211_ERR +#endif +#ifndef WL_TRACE +#define WL_TRACE CFG80211_TRACE +#endif + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val); +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg); +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg); +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, + struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data); + +#endif /* __DHD_CFG80211__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c b/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c new file mode 100644 index 000000000000..c72f8299aadb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c @@ -0,0 +1,174 @@ +/* + * Linux cfg80211 vendor command/event handlers of DHD + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg_vendor.c 525516 2015-01-09 23:12:53Z $ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef VENDOR_EXT_SUPPORT +static int dhd_cfgvendor_priv_string_handler(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + const struct bcm_nlmsg_hdr *nlioc = data; + struct net_device *ndev = NULL; + struct bcm_cfg80211 *cfg; + struct sk_buff *reply; + void *buf = NULL, *cur; + dhd_pub_t *dhd; + dhd_ioctl_t ioc = { 0 }; + int ret = 0, ret_len, payload, msglen; + int maxmsglen = PAGE_SIZE - 0x100; + int8 index; + + WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); + DHD_ERROR(("entry: cmd = %d\n", nlioc->cmd)); + + cfg = wiphy_priv(wiphy); + dhd = cfg->pub; + + DHD_OS_WAKE_LOCK(dhd); + + /* send to dongle only if we are not waiting for reload already */ + if (dhd->hang_was_sent) { + WL_ERR(("HANG was sent up earlier\n")); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS); + DHD_OS_WAKE_UNLOCK(dhd); + return OSL_ERROR(BCME_DONGLE_DOWN); + } + + len -= sizeof(struct bcm_nlmsg_hdr); + ret_len = nlioc->len; + if (ret_len > 0 || len > 0) { + if (len > DHD_IOCTL_MAXLEN) { + WL_ERR(("oversize input buffer %d\n", len)); + len = DHD_IOCTL_MAXLEN; + } + if (ret_len > DHD_IOCTL_MAXLEN) { + WL_ERR(("oversize return buffer %d\n", ret_len)); + ret_len = DHD_IOCTL_MAXLEN; + } + payload = max(ret_len, len) + 1; + buf = vzalloc(payload); + if (!buf) { + DHD_OS_WAKE_UNLOCK(dhd); + return -ENOMEM; + } + memcpy(buf, (void *)nlioc + nlioc->offset, len); + *(char *)(buf + len) = '\0'; + } + + ndev = wdev_to_wlc_ndev(wdev, cfg); + index = dhd_net2idx(dhd->info, ndev); + if (index == DHD_BAD_IF) { + WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); + ret = BCME_ERROR; + goto done; + } + + ioc.cmd = nlioc->cmd; + ioc.len = nlioc->len; + ioc.set = nlioc->set; + ioc.driver = nlioc->magic; + ret = dhd_ioctl_process(dhd, index, &ioc, buf); + if (ret) { + WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); + ret = OSL_ERROR(ret); + goto done; + } + + cur = buf; + while (ret_len > 0) { + msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len; + ret_len -= msglen; + payload = msglen + sizeof(msglen); + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + if (!reply) { + WL_ERR(("Failed to allocate reply msg\n")); + ret = -ENOMEM; + break; + } + + if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) || + nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) { + kfree_skb(reply); + ret = -ENOBUFS; + break; + } + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) { + WL_ERR(("testmode reply failed:%d\n", ret)); + break; + } + cur += msglen; + } + +done: + vfree(buf); + DHD_OS_WAKE_UNLOCK(dhd); + return ret; +} + +const struct wiphy_vendor_command dhd_cfgvendor_cmds [] = { + { + { + .vendor_id = OUI_BRCM, + .subcmd = BRCM_VENDOR_SCMD_PRIV_STR + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = dhd_cfgvendor_priv_string_handler + }, +}; + +int cfgvendor_attach(struct wiphy *wiphy) +{ + wiphy->vendor_commands = dhd_cfgvendor_cmds; + wiphy->n_vendor_commands = ARRAY_SIZE(dhd_cfgvendor_cmds); + + return 0; +} + +int cfgvendor_detach(struct wiphy *wiphy) +{ + wiphy->vendor_commands = NULL; + wiphy->n_vendor_commands = 0; + + return 0; +} +#endif /* VENDOR_EXT_SUPPORT */ diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c new file mode 100644 index 000000000000..55bc9ebc992a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_common.c @@ -0,0 +1,3557 @@ +/* + * Broadcom Dongle Host Driver (DHD), common DHD core. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_common.c 609263 2015-12-31 16:21:33Z $ + */ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef SHOW_LOGTRACE +#include +#endif /* SHOW_LOGTRACE */ + +#ifdef BCMPCIE +#include +#endif + +#include +#include +#include +#include + +#ifdef WL_CFG80211 +#include +#endif +#ifdef PNO_SUPPORT +#include +#endif + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#ifdef PROP_TXSTATUS +#include +#include +#endif + +#ifdef DHD_WMF +#include +#include +#endif /* DHD_WMF */ + +#ifdef DHD_L2_FILTER +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + + +#ifdef WLMEDIA_HTSF +extern void htsf_update(struct dhd_info *dhd, void *data); +#endif + +#ifdef DHD_LOG_DUMP +int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL; +#else +int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL; +#endif /* DHD_LOG_DUMP */ + + +#if defined(WL_WLC_SHIM) +#include +#else +#endif /* WL_WLC_SHIM */ + +#include + +#ifdef SOFTAP +char fw_path2[MOD_PARAM_PATHLEN]; +extern bool softap_enabled; +#endif + +/* Last connection success/failure status */ +uint32 dhd_conn_event; +uint32 dhd_conn_status; +uint32 dhd_conn_reason; + +#if defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) +static int check_event_log_sequence_number(uint32 seq_no); +#endif /* defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) */ +extern int dhd_iscan_request(void * dhdp, uint16 action); +extern void dhd_ind_scan_confirm(void *h, bool status); +extern int dhd_iscan_in_progress(void *h); +void dhd_iscan_lock(void); +void dhd_iscan_unlock(void); +extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx); +#if !defined(AP) && defined(WLP2P) +extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd); +#endif + +extern int dhd_socram_dump(struct dhd_bus *bus); + +bool ap_cfg_running = FALSE; +bool ap_fw_loaded = FALSE; + +/* Version string to report */ +#ifdef DHD_DEBUG +#ifndef SRCBASE +#define SRCBASE "drivers/net/wireless/bcmdhd" +#endif +#define DHD_COMPILED "\nCompiled in " SRCBASE +#endif /* DHD_DEBUG */ + +#if defined(DHD_DEBUG) +const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR; +#else +const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from "; +#endif +char fw_version[FW_VER_STR_LEN] = "\0"; + +void dhd_set_timer(void *bus, uint wdtick); + + + +/* IOVar table */ +enum { + IOV_VERSION = 1, + IOV_MSGLEVEL, + IOV_BCMERRORSTR, + IOV_BCMERROR, + IOV_WDTICK, + IOV_DUMP, + IOV_CLEARCOUNTS, + IOV_LOGDUMP, + IOV_LOGCAL, + IOV_LOGSTAMP, + IOV_GPIOOB, + IOV_IOCTLTIMEOUT, +#if defined(DHD_DEBUG) + IOV_CONS, + IOV_DCONSOLE_POLL, + IOV_DHD_JOIN_TIMEOUT_DBG, + IOV_SCAN_TIMEOUT, +#endif /* defined(DHD_DEBUG) */ +#ifdef PROP_TXSTATUS + IOV_PROPTXSTATUS_ENABLE, + IOV_PROPTXSTATUS_MODE, + IOV_PROPTXSTATUS_OPT, + IOV_PROPTXSTATUS_MODULE_IGNORE, + IOV_PROPTXSTATUS_CREDIT_IGNORE, + IOV_PROPTXSTATUS_TXSTATUS_IGNORE, + IOV_PROPTXSTATUS_RXPKT_CHK, +#endif /* PROP_TXSTATUS */ + IOV_BUS_TYPE, +#ifdef WLMEDIA_HTSF + IOV_WLPKTDLYSTAT_SZ, +#endif + IOV_CHANGEMTU, + IOV_HOSTREORDER_FLOWS, +#ifdef DHDTCPACK_SUPPRESS + IOV_TCPACK_SUPPRESS, +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + IOV_WMF_BSS_ENAB, + IOV_WMF_UCAST_IGMP, + IOV_WMF_MCAST_DATA_SENDUP, +#ifdef WL_IGMP_UCQUERY + IOV_WMF_UCAST_IGMP_QUERY, +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + IOV_WMF_UCAST_UPNP, +#endif /* DHD_UCAST_UPNP */ +#endif /* DHD_WMF */ + IOV_AP_ISOLATE, +#ifdef DHD_L2_FILTER + IOV_DHCP_UNICAST, + IOV_BLOCK_PING, + IOV_PROXY_ARP, + IOV_GRAT_ARP, +#endif /* DHD_L2_FILTER */ +#ifdef DHD_PSTA + IOV_PSTA, +#endif /* DHD_PSTA */ + IOV_CFG80211_OPMODE, + IOV_ASSERT_TYPE, + IOV_LMTEST, + IOV_LAST +}; + +const bcm_iovar_t dhd_iovars[] = { + {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) }, +#ifdef DHD_DEBUG + {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ + {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN }, + {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 }, + {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 }, + {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, +#ifdef DHD_DEBUG + {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 }, + {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 }, +#endif + {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 }, + {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 }, + {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 }, +#ifdef PROP_TXSTATUS + {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_BOOL, 0 }, + /* + set the proptxtstatus operation mode: + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + */ + {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 }, + {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, IOVT_UINT32, 0 }, + {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 }, + {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 }, + {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 }, + {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, IOVT_BOOL, 0 }, +#endif /* PROP_TXSTATUS */ + {"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0}, +#ifdef WLMEDIA_HTSF + {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 }, +#endif + {"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 }, + {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER, + (WLHOST_REORDERDATA_MAXFLOWS + 1) }, +#ifdef DHDTCPACK_SUPPRESS + {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, IOVT_UINT8, 0 }, +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, IOVT_BOOL, 0 }, + {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, IOVT_BOOL, 0 }, + {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, IOVT_BOOL, 0 }, +#ifdef WL_IGMP_UCQUERY + {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), IOVT_BOOL, 0 }, +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), IOVT_BOOL, 0 }, +#endif /* DHD_UCAST_UPNP */ +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + {"dhcp_unicast", IOV_DHCP_UNICAST, (0), IOVT_BOOL, 0 }, +#endif /* DHD_L2_FILTER */ + {"ap_isolate", IOV_AP_ISOLATE, (0), IOVT_BOOL, 0}, +#ifdef DHD_L2_FILTER + {"block_ping", IOV_BLOCK_PING, (0), IOVT_BOOL, 0}, + {"proxy_arp", IOV_PROXY_ARP, (0), IOVT_BOOL, 0}, + {"grat_arp", IOV_GRAT_ARP, (0), IOVT_BOOL, 0}, +#endif /* DHD_L2_FILTER */ +#ifdef DHD_PSTA + /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */ + {"psta", IOV_PSTA, 0, IOVT_UINT32, 0}, +#endif /* DHD PSTA */ + {"op_mode", IOV_CFG80211_OPMODE, 0, IOVT_UINT32, 0 }, + {"assert_type", IOV_ASSERT_TYPE, (0), IOVT_UINT32, 0}, + {"lmtest", IOV_LMTEST, 0, IOVT_UINT32, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +#define DHD_IOVAR_BUF_SIZE 128 + +#ifdef DHD_FW_COREDUMP +void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length) +{ + if (dhd_pub->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd_pub, dhd_pub->soc_ram, dhd_pub->soc_ram_length); +#else + MFREE(dhd_pub->osh, dhd_pub->soc_ram, dhd_pub->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhd_pub->soc_ram = NULL; + dhd_pub->soc_ram_length = 0; + } + +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub, + DHD_PREALLOC_MEMDUMP_RAM, length); + memset(dhd_pub->soc_ram, 0, length); +#else + dhd_pub->soc_ram = (uint8*) MALLOCZ(dhd_pub->osh, length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + if (dhd_pub->soc_ram == NULL) { + DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n", + __FUNCTION__)); + return; + } + + dhd_pub->soc_ram_length = length; + memcpy(dhd_pub->soc_ram, buffer, length); +} +#endif /* DHD_FW_COREDUMP */ + +/* to NDIS developer, the structure dhd_common is redundant, + * please do NOT merge it back from other branches !!! + */ + +static int +dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) +{ + char eabuf[ETHER_ADDR_STR_LEN]; + + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + if (!dhdp || !dhdp->prot || !buf) + return BCME_ERROR; + + bcm_binit(strbuf, buf, buflen); + + /* Base DHD info */ + bcm_bprintf(strbuf, "%s\n", dhd_version); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n", + dhdp->up, dhdp->txoff, dhdp->busstate); + bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n", + dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz); + bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n", + dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf)); + bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt); + + bcm_bprintf(strbuf, "dongle stats:\n"); + bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n", + dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes, + dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped); + bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n", + dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes, + dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped); + bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast); + + bcm_bprintf(strbuf, "bus stats:\n"); + bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n", + dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors); + bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n", + dhdp->tx_ctlpkts, dhdp->tx_ctlerrs); + bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n", + dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors); + bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n", + dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped); + bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n", + dhdp->rx_readahead_cnt, dhdp->tx_realloc); + bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n", + dhdp->tx_pktgetfail, dhdp->rx_pktgetfail); + bcm_bprintf(strbuf, "\n"); + + /* Add any prot info */ + dhd_prot_dump(dhdp, strbuf); + bcm_bprintf(strbuf, "\n"); + + /* Add any bus info */ + dhd_bus_dump(dhdp, strbuf); + + +#if defined(DHD_LB_STATS) + dhd_lb_stats_dump(dhdp, strbuf); +#endif /* DHD_LB_STATS */ + + return (!strbuf->size ? BCME_BUFTOOSHORT : 0); +} + +void +dhd_dump_to_kernelog(dhd_pub_t *dhdp) +{ + char buf[512]; + + DHD_ERROR(("F/W version: %s\n", fw_version)); + bcm_bprintf_bypass = TRUE; + dhd_dump(dhdp, buf, sizeof(buf)); + bcm_bprintf_bypass = FALSE; +} + +int +dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx) +{ + wl_ioctl_t ioc; + + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + ioc.set = set; + + return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len); +} + +int +dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + /* memset(iovbuf, 0, sizeof(iovbuf)); */ + if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + if (!ret) { + *pval = ltoh32(*((uint*)iovbuf)); + } else { + DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +int +dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + int lval = htol32(val); + + /* memset(iovbuf, 0, sizeof(iovbuf)); */ + if (bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + if (ret) { + DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +int +dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) +{ + int ret = BCME_ERROR; + unsigned long flags; + + if (dhd_os_proto_block(dhd_pub)) + { +#ifdef DHD_LOG_DUMP + int slen, i, val, rem; + long int lval; + char *pval, *pos, *msg; + char tmp[64]; +#endif /* DHD_LOG_DUMP */ + DHD_GENERAL_LOCK(dhd_pub, flags); + if (dhd_pub->busstate == DHD_BUS_DOWN || + dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); + return -ENODEV; + } + dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR; + DHD_GENERAL_UNLOCK(dhd_pub, flags); + +#ifdef DHD_LOG_DUMP + /* WLC_GET_VAR */ + if (ioc->cmd == WLC_GET_VAR) { + memset(tmp, 0, sizeof(tmp)); + bcopy(ioc->buf, tmp, strlen(ioc->buf) + 1); + } +#endif /* DHD_LOG_DUMP */ +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl); +#endif /* DHD_PCIE_RUNTIMEPM */ +#if defined(WL_WLC_SHIM) + { + struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); + + wl_io_pport_t io_pport; + io_pport.dhd_pub = dhd_pub; + io_pport.ifidx = ifidx; + + ret = wl_shim_ioctl(shim, ioc, len, &io_pport); + if (ret != BCME_OK) { + DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n", + __FUNCTION__, ioc->cmd, ret)); + } + } +#else + ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len); +#endif /* defined(WL_WLC_SHIM) */ + + if (ret && dhd_pub->up) { + /* Send hang event only if dhd_open() was success */ + dhd_os_check_hang(dhd_pub, ifidx, ret); + } + + if (ret == -ETIMEDOUT && !dhd_pub->up) { + DHD_ERROR(("%s: 'resumed on timeout' error is " + "occurred before the interface does not" + " bring up\n", __FUNCTION__)); + dhd_pub->busstate = DHD_BUS_DOWN; + } + + DHD_GENERAL_LOCK(dhd_pub, flags); + dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR; + dhd_os_busbusy_wake(dhd_pub); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + + dhd_os_proto_unblock(dhd_pub); + +#ifdef DHD_LOG_DUMP + if (ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) { + lval = 0; + slen = strlen(ioc->buf) + 1; + msg = (char*)ioc->buf; + if (ioc->cmd == WLC_GET_VAR) { + bcopy(msg, &lval, sizeof(long int)); + msg = tmp; + } else { + bcopy((msg + slen), &lval, sizeof(long int)); + } + DHD_ERROR_EX(("%s: cmd: %d, msg: %s, val: 0x%lx, len: %d, set: %d\n", + ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR", + ioc->cmd, msg, lval, ioc->len, ioc->set)); + } else { + slen = ioc->len; + if (ioc->buf != NULL) { + val = *(int*)ioc->buf; + pval = (char*)ioc->buf; + pos = tmp; + rem = sizeof(tmp); + memset(tmp, 0, sizeof(tmp)); + for (i = 0; i < slen; i++) { + pos += snprintf(pos, rem, "%02x ", pval[i]); + rem = sizeof(tmp) - (int)(pos - tmp); + if (rem <= 0) { + break; + } + } + DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, val: %d(%s), len: %d, set: %d\n", + ioc->cmd, val, tmp, ioc->len, ioc->set)); + } else { + DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd)); + } + } +#endif /* DHD_LOG_DUMP */ + } + + return ret; +} + +uint wl_get_port_num(wl_io_pport_t *io_pport) +{ + return 0; +} + +/* Get bssidx from iovar params + * Input: dhd_pub - pointer to dhd_pub_t + * params - IOVAR params + * Output: idx - BSS index + * val - ponter to the IOVAR arguments + */ +static int +dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, char *params, int *idx, char **val) +{ + char *prefix = "bsscfg:"; + uint32 bssidx; + + if (!(strncmp(params, prefix, strlen(prefix)))) { + /* per bss setting should be prefixed with 'bsscfg:' */ + char *p = (char *)params + strlen(prefix); + + /* Skip Name */ + while (*p != '\0') + p++; + /* consider null */ + p = p + 1; + bcopy(p, &bssidx, sizeof(uint32)); + /* Get corresponding dhd index */ + bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx)); + + if (bssidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* skip bss idx */ + p += sizeof(uint32); + *val = p; + *idx = bssidx; + } else { + DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#if defined(DHD_DEBUG) && defined(BCMDHDUSB) +/* USB Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + DHD_TRACE(("%s \n", __FUNCTION__)); + + return dhd_iovar(dhd, 0, "cons", msg, msglen, 1); + +} +#endif /* DHD_DEBUG && BCMDHDUSB */ + +static int +dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + switch (actionid) { + case IOV_GVAL(IOV_VERSION): + /* Need to have checked buffer length */ + bcm_strncpy_s((char*)arg, len, dhd_version, len); + break; + + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)dhd_msg_level; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): +#ifdef WL_CFG80211 + /* Enable DHD and WL logs in oneshot */ + if (int_val & DHD_WL_VAL2) + wl_cfg80211_enable_trace(TRUE, int_val & (~DHD_WL_VAL2)); + else if (int_val & DHD_WL_VAL) + wl_cfg80211_enable_trace(FALSE, WL_DBG_DBG); + if (!(int_val & DHD_WL_VAL2)) +#endif /* WL_CFG80211 */ + dhd_msg_level = int_val; + break; + case IOV_GVAL(IOV_BCMERRORSTR): + bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN); + ((char *)arg)[BCME_STRLEN - 1] = 0x00; + break; + + case IOV_GVAL(IOV_BCMERROR): + int_val = (int32)dhd_pub->bcmerror; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_WDTICK): + int_val = (int32)dhd_watchdog_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WDTICK): + if (!dhd_pub->up) { + bcmerror = BCME_NOTUP; + break; + } + + if (CUSTOM_DHD_WATCHDOG_MS == 0 && int_val == 0) { + dhd_watchdog_ms = (uint)int_val; + } + + dhd_os_wd_timer(dhd_pub, (uint)int_val); + break; + + case IOV_GVAL(IOV_DUMP): + bcmerror = dhd_dump(dhd_pub, arg, len); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_DCONSOLE_POLL): + int_val = (int32)dhd_console_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DCONSOLE_POLL): + dhd_console_ms = (uint)int_val; + break; + + case IOV_SVAL(IOV_CONS): + if (len > 0) + bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1); + break; +#endif /* DHD_DEBUG */ + + case IOV_SVAL(IOV_CLEARCOUNTS): + dhd_pub->tx_packets = dhd_pub->rx_packets = 0; + dhd_pub->tx_errors = dhd_pub->rx_errors = 0; + dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0; + dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0; + dhd_pub->tx_dropped = 0; + dhd_pub->rx_dropped = 0; + dhd_pub->tx_pktgetfail = 0; + dhd_pub->rx_pktgetfail = 0; + dhd_pub->rx_readahead_cnt = 0; + dhd_pub->tx_realloc = 0; + dhd_pub->wd_dpc_sched = 0; + memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats)); + dhd_bus_clearcounts(dhd_pub); +#ifdef PROP_TXSTATUS + /* clear proptxstatus related counters */ + dhd_wlfc_clear_counts(dhd_pub); +#endif /* PROP_TXSTATUS */ + DHD_LB_STATS_RESET(dhd_pub); + break; + + + case IOV_GVAL(IOV_IOCTLTIMEOUT): { + int_val = (int32)dhd_os_get_ioctl_resp_timeout(); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_IOCTLTIMEOUT): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_os_set_ioctl_resp_timeout((unsigned int)int_val); + break; + } + + +#ifdef PROP_TXSTATUS + case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): { + bool wlfc_enab = FALSE; + bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab); + if (bcmerror != BCME_OK) + goto exit; + int_val = wlfc_enab ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): { + bool wlfc_enab = FALSE; + bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab); + if (bcmerror != BCME_OK) + goto exit; + + /* wlfc is already set as desired */ + if (wlfc_enab == (int_val == 0 ? FALSE : TRUE)) + goto exit; + + if (int_val == TRUE) + bcmerror = dhd_wlfc_init(dhd_pub); + else + bcmerror = dhd_wlfc_deinit(dhd_pub); + + break; + } + case IOV_GVAL(IOV_PROPTXSTATUS_MODE): + bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_MODE): + dhd_wlfc_set_mode(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): + bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): + dhd_wlfc_set_module_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE): + bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE): + dhd_wlfc_set_credit_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE): + bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE): + dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK): + bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK): + dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val); + break; + +#endif /* PROP_TXSTATUS */ + + case IOV_GVAL(IOV_BUS_TYPE): + /* The dhd application queries the driver to check if its usb or sdio. */ +#ifdef BCMDHDUSB + int_val = BUS_TYPE_USB; +#endif +#ifdef BCMSDIO + int_val = BUS_TYPE_SDIO; +#endif +#ifdef PCIE_FULL_DONGLE + int_val = BUS_TYPE_PCIE; +#endif + bcopy(&int_val, arg, val_size); + break; + + +#ifdef WLMEDIA_HTSF + case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ): + int_val = dhd_pub->htsfdlystat_sz; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ): + dhd_pub->htsfdlystat_sz = int_val & 0xff; + printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz); + break; +#endif + case IOV_SVAL(IOV_CHANGEMTU): + int_val &= 0xffff; + bcmerror = dhd_change_mtu(dhd_pub, int_val, 0); + break; + + case IOV_GVAL(IOV_HOSTREORDER_FLOWS): + { + uint i = 0; + uint8 *ptr = (uint8 *)arg; + uint8 count = 0; + + ptr++; + for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) { + if (dhd_pub->reorder_bufs[i] != NULL) { + *ptr = dhd_pub->reorder_bufs[i]->flow_id; + ptr++; + count++; + } + } + ptr = (uint8 *)arg; + *ptr = count; + break; + } +#ifdef DHDTCPACK_SUPPRESS + case IOV_GVAL(IOV_TCPACK_SUPPRESS): { + int_val = (uint32)dhd_pub->tcpack_sup_mode; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_TCPACK_SUPPRESS): { + bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val); + break; + } +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + case IOV_GVAL(IOV_WMF_BSS_ENAB): { + uint32 bssidx; + dhd_wmf_t *wmf; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + wmf = dhd_wmf_conf(dhd_pub, bssidx); + int_val = wmf->wmf_enable ? 1 :0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_WMF_BSS_ENAB): { + /* Enable/Disable WMF */ + uint32 bssidx; + dhd_wmf_t *wmf; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + wmf = dhd_wmf_conf(dhd_pub, bssidx); + if (wmf->wmf_enable == int_val) + break; + if (int_val) { + /* Enable WMF */ + if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) { + DHD_ERROR(("%s: Error in creating WMF instance\n", + __FUNCTION__)); + break; + } + if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) { + DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__)); + break; + } + wmf->wmf_enable = TRUE; + } else { + /* Disable WMF */ + wmf->wmf_enable = FALSE; + dhd_wmf_stop(dhd_pub, bssidx); + dhd_wmf_instance_del(dhd_pub, bssidx); + } + break; + } + case IOV_GVAL(IOV_WMF_UCAST_IGMP): + int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_IGMP): + if (dhd_pub->wmf_ucast_igmp == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_igmp = int_val; + else + bcmerror = BCME_RANGE; + break; + case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP): + int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP): + dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val); + break; + +#ifdef WL_IGMP_UCQUERY + case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY): + int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY): + if (dhd_pub->wmf_ucast_igmp_query == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_igmp_query = int_val; + else + bcmerror = BCME_RANGE; + break; +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + case IOV_GVAL(IOV_WMF_UCAST_UPNP): + int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_UPNP): + if (dhd_pub->wmf_ucast_upnp == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_upnp = int_val; + else + bcmerror = BCME_RANGE; + break; +#endif /* DHD_UCAST_UPNP */ +#endif /* DHD_WMF */ + + +#ifdef DHD_L2_FILTER + case IOV_GVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_BLOCK_PING): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_block_ping_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_BLOCK_PING): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_PROXY_ARP): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_parp_status(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PROXY_ARP): { + uint32 bssidx; + char *val; + char iobuf[32]; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + bcopy(val, &int_val, sizeof(int_val)); + + /* Issue a iovar request to WL to update the proxy arp capability bit + * in the Extended Capability IE of beacons/probe responses. + */ + bcm_mkiovar("proxy_arp_advertise", val, sizeof(int_val), iobuf, + sizeof(iobuf)); + bcmerror = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, iobuf, + sizeof(iobuf), TRUE, bssidx); + + if (bcmerror == BCME_OK) { + dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0); + } + break; + } + case IOV_GVAL(IOV_GRAT_ARP): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_grat_arp_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_GRAT_ARP): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } +#endif /* DHD_L2_FILTER */ + case IOV_GVAL(IOV_AP_ISOLATE): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_ap_isolate(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_AP_ISOLATE): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_ap_isolate(dhd_pub, bssidx, int_val); + break; + } +#ifdef DHD_PSTA + case IOV_GVAL(IOV_PSTA): { + int_val = dhd_get_psta_mode(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PSTA): { + if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) { + dhd_set_psta_mode(dhd_pub, int_val); + } else { + bcmerror = BCME_RANGE; + } + break; + } +#endif /* DHD_PSTA */ + case IOV_GVAL(IOV_CFG80211_OPMODE): { + int_val = (int32)dhd_pub->op_mode; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_CFG80211_OPMODE): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_pub->op_mode = int_val; + break; + } + + case IOV_GVAL(IOV_ASSERT_TYPE): + int_val = g_assert_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ASSERT_TYPE): + g_assert_type = (uint32)int_val; + break; + + + case IOV_GVAL(IOV_LMTEST): { + *(uint32 *)arg = (uint32)lmtest; + break; + } + + case IOV_SVAL(IOV_LMTEST): { + uint32 val = *(uint32 *)arg; + if (val > 50) + bcmerror = BCME_BADARG; + else { + lmtest = (uint)val; + DHD_ERROR(("%s: lmtest %s\n", + __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON")); + } + break; + } + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror)); + return bcmerror; +} + +/* Store the status of a connection attempt for later retrieval by an iovar */ +void +dhd_store_conn_status(uint32 event, uint32 status, uint32 reason) +{ + /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID + * because an encryption/rsn mismatch results in both events, and + * the important information is in the WLC_E_PRUNE. + */ + if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL && + dhd_conn_event == WLC_E_PRUNE)) { + dhd_conn_event = event; + dhd_conn_status = status; + dhd_conn_reason = reason; + } +} + +bool +dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec) +{ + void *p; + int eprec = -1; /* precedence to evict from */ + bool discard_oldest; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktq_pfull(q, prec) && !pktq_full(q)) { + pktq_penq(q, prec, pkt); + return TRUE; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktq_pfull(q, prec)) + eprec = prec; + else if (pktq_full(q)) { + p = pktq_peek_tail(q, &eprec); + ASSERT(p); + if (eprec > prec || eprec < 0) + return FALSE; + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktq_pempty(q, eprec)); + discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec); + if (eprec == prec && !discard_oldest) + return FALSE; /* refuse newer (incoming) packet */ + /* Evict packet according to discard policy */ + p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec); + ASSERT(p); +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + PKTFREE(dhdp->osh, p, TRUE); + } + + /* Enqueue */ + p = pktq_penq(q, prec, pkt); + ASSERT(p); + + return TRUE; +} + +/* + * Functions to drop proper pkts from queue: + * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only + * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts + * If can't find pkts matching upper 2 cases, drop first pkt anyway + */ +bool +dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn) +{ + struct pktq_prec *q = NULL; + void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL; + pkt_frag_t frag_info; + + ASSERT(dhdp && pq); + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + if (p == NULL) + return FALSE; + + while (p) { + frag_info = pkt_frag_info(dhdp->osh, p); + if (frag_info == DHD_PKT_FRAG_NONE) { + break; + } else if (frag_info == DHD_PKT_FRAG_FIRST) { + if (first) { + /* No last frag pkt, use prev as last */ + last = prev; + break; + } else { + first = p; + prev_first = prev; + } + } else if (frag_info == DHD_PKT_FRAG_LAST) { + if (first) { + last = p; + break; + } + } + + prev = p; + p = PKTLINK(p); + } + + if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) { + /* Not found matching pkts, use oldest */ + prev = NULL; + p = q->head; + frag_info = 0; + } + + if (frag_info == DHD_PKT_FRAG_NONE) { + first = last = p; + prev_first = prev; + } + + p = first; + while (p) { + next = PKTLINK(p); + q->len--; + pq->len--; + + PKTSETLINK(p, NULL); + + if (fn) + fn(dhdp, prec, p, TRUE); + + if (p == last) + break; + + p = next; + } + + if (prev_first == NULL) { + if ((q->head = next) == NULL) + q->tail = NULL; + } else { + PKTSETLINK(prev_first, next); + if (!next) + q->tail = prev_first; + } + + return TRUE; +} + +static int +dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + int bcmerror = 0; + int val_size; + const bcm_iovar_t *vi = NULL; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + + bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +int +dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) +{ + int bcmerror = 0; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!buf) { + return BCME_BADARG; + } + + dhd_os_dhdiovar_lock(dhd_pub); + switch (ioc->cmd) { + case DHD_GET_MAGIC: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_MAGIC; + break; + + case DHD_GET_VERSION: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_VERSION; + break; + + case DHD_GET_VAR: + case DHD_SET_VAR: + { + char *arg; + uint arglen; + + DHD_GENERAL_LOCK(dhd_pub, flags); + if (dhd_pub->busstate == DHD_BUS_DOWN || + dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + /* In platforms like FC19, the FW download is done via IOCTL + * and should not return error for IOCTLs fired before FW + * Download is done + */ + if (dhd_pub->is_fw_download_done) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return -ENODEV; + } + } + dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR; + DHD_GENERAL_UNLOCK(dhd_pub, flags); +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl); +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* scan past the name to any arguments */ + for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--) + ; + + if (*arg) { + bcmerror = BCME_BUFTOOSHORT; + goto unlock_exit; + } + + /* account for the NUL terminator */ + arg++, arglen--; + + /* call with the appropriate arguments */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, + buf, buflen, IOV_GET); + } else { + bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, + arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + /* not in generic table, try protocol module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg, + arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + /* if still not found, try bus module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + arg, arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + } + goto unlock_exit; + + default: + bcmerror = BCME_UNSUPPORTED; + } + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; + +unlock_exit: + DHD_GENERAL_LOCK(dhd_pub, flags); + dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR; + dhd_os_busbusy_wake(dhd_pub); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; +} + +#ifdef SHOW_EVENTS +#ifdef SHOW_LOGTRACE + +#define MAX_NO_OF_ARG 16 + +#define FMTSTR_SIZE 132 +#define SIZE_LOC_STR 50 +#define MIN_DLEN 4 +#define TAG_BYTES 12 +#define TAG_WORDS 3 +#define ROMSTR_SIZE 200 + + +static int +check_event_log_sequence_number(uint32 seq_no) +{ + int32 diff; + uint32 ret; + static uint32 logtrace_seqnum_prev = 0; + + diff = ntoh32(seq_no)-logtrace_seqnum_prev; + switch (diff) + { + case 0: + ret = -1; /* duplicate packet . drop */ + break; + + case 1: + ret =0; /* in order */ + break; + + default: + if ((ntoh32(seq_no) == 0) && + (logtrace_seqnum_prev == 0xFFFFFFFF) ) { /* in-order - Roll over */ + ret = 0; + } else { + + if (diff > 0) { + DHD_EVENT(("WLC_E_TRACE:" + "Event lost (log) seqnum %d nblost %d\n", + ntoh32(seq_no), (diff-1))); + } else { + DHD_EVENT(("WLC_E_TRACE:" + "Event Packets coming out of order!!\n")); + } + ret = 0; + } + } + + logtrace_seqnum_prev = ntoh32(seq_no); + + return ret; +} + +static void +dhd_eventmsg_print(dhd_pub_t *dhd_pub, void *event_data, void *raw_event_ptr, + uint datalen, const char *event_name) +{ + msgtrace_hdr_t hdr; + uint32 nblost; + uint8 count; + char *s, *p; + static uint32 seqnum_prev = 0; + uint32 *log_ptr = NULL; + uchar *buf; + event_log_hdr_t event_hdr; + uint32 i; + int32 j; + + dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr; + + char fmtstr_loc_buf[FMTSTR_SIZE] = {0}; + char (*str_buf)[SIZE_LOC_STR] = NULL; + char * str_tmpptr = NULL; + uint32 addr = 0; + uint32 **hdr_ptr = NULL; + uint32 h_i = 0; + uint32 hdr_ptr_len = 0; + + typedef union { + uint32 val; + char * addr; + } u_arg; + u_arg arg[MAX_NO_OF_ARG] = {{0}}; + char *c_ptr = NULL; + char rom_log_str[ROMSTR_SIZE] = {0}; + uint32 rom_str_len = 0; + + BCM_REFERENCE(arg); + + if (!DHD_FWLOG_ON()) + return; + + buf = (uchar *) event_data; + memcpy(&hdr, buf, MSGTRACE_HDRLEN); + + if (hdr.version != MSGTRACE_VERSION) { + DHD_EVENT(("\nMACEVENT: %s [unsupported version --> " + "dhd version:%d dongle version:%d]\n", + event_name, MSGTRACE_VERSION, hdr.version)); + /* Reset datalen to avoid display below */ + datalen = 0; + return; + } + + if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) { + /* There are 2 bytes available at the end of data */ + buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0'; + + if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) { + DHD_FWLOG(("WLC_E_TRACE: [Discarded traces in dongle -->" + "discarded_bytes %d discarded_printf %d]\n", + ntoh32(hdr.discarded_bytes), + ntoh32(hdr.discarded_printf))); + } + + nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1; + if (nblost > 0) { + DHD_FWLOG(("WLC_E_TRACE:" + "[Event lost (msg) --> seqnum %d nblost %d\n", + ntoh32(hdr.seqnum), nblost)); + } + seqnum_prev = ntoh32(hdr.seqnum); + + /* Display the trace buffer. Advance from + * \n to \n to avoid display big + * printf (issue with Linux printk ) + */ + p = (char *)&buf[MSGTRACE_HDRLEN]; + while (*p != '\0' && (s = strstr(p, "\n")) != NULL) { + *s = '\0'; + DHD_FWLOG(("[FWLOG] %s\n", p)); + p = s+1; + } + if (*p) + DHD_FWLOG(("[FWLOG] %s", p)); + + /* Reset datalen to avoid display below */ + datalen = 0; + + } else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) { + /* Let the standard event printing work for now */ + uint32 timestamp, seq, pktlen; + + if (check_event_log_sequence_number(hdr.seqnum)) { + + DHD_EVENT(("%s: WLC_E_TRACE:" + "[Event duplicate (log) %d] dropping!!\n", + __FUNCTION__, hdr.seqnum)); + return; /* drop duplicate events */ + } + + p = (char *)&buf[MSGTRACE_HDRLEN]; + datalen -= MSGTRACE_HDRLEN; + pktlen = ltoh16(*((uint16 *)p)); + seq = ltoh16(*((uint16 *)(p + 2))); + p += MIN_DLEN; + datalen -= MIN_DLEN; + timestamp = ltoh32(*((uint32 *)p)); + BCM_REFERENCE(pktlen); + BCM_REFERENCE(seq); + BCM_REFERENCE(timestamp); + + /* + * Allocating max possible number of event TAGs in the received buffer + * considering that each event requires minimum of TAG_BYTES. + */ + hdr_ptr_len = ((datalen/TAG_BYTES)+1) * sizeof(uint32*); + + if ((raw_event->fmts)) { + if (!(str_buf = MALLOCZ(dhd_pub->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR)))) { + DHD_ERROR(("%s: malloc failed str_buf \n", __FUNCTION__)); + } + } + + if (!(hdr_ptr = MALLOCZ(dhd_pub->osh, hdr_ptr_len))) { + DHD_ERROR(("%s: malloc failed hdr_ptr \n", __FUNCTION__)); + } + + + DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[No.%d]: timestamp 0x%08x length = %d\n", + seq, timestamp, pktlen)); + + /* (raw_event->fmts) has value */ + + log_ptr = (uint32 *) (p + datalen); + + /* Store all hdr pointer while parsing from last of the log buffer + * sample format of + * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639 + * 001d3c54 00000064 00000064 035d6d89 0c580439 + * in above example 0c580439 -- 39 is tag , 04 is count, 580c is format number + * all these uint32 values comes in reverse order as group as EL data + * while decoding we can parse only from last to first + */ + + while (datalen > MIN_DLEN) { + log_ptr--; + datalen -= MIN_DLEN; + event_hdr.t = *log_ptr; + /* + * Check for partially overriten entries + */ + if (log_ptr - (uint32 *) p < event_hdr.count) { + break; + } + /* + * Check argument count (only when format is valid) + */ + if ((event_hdr.count > MAX_NO_OF_ARG) && + (event_hdr.fmt_num != 0xffff)) { + break; + } + /* + * Check for end of the Frame. + */ + if (event_hdr.tag == EVENT_LOG_TAG_NULL) { + continue; + } + log_ptr[0] = event_hdr.t; + if (h_i < (hdr_ptr_len / sizeof(uint32*))) { + hdr_ptr[h_i++] = log_ptr; + } + + /* Now place the header at the front + * and copy back. + */ + log_ptr -= event_hdr.count; + + c_ptr = NULL; + datalen = datalen - (event_hdr.count * MIN_DLEN); + } + datalen = 0; + + /* print all log using stored hdr pointer in reverse order of EL data + * which is actually print older log first and then other in order + */ + + for (j = (h_i-1); j >= 0; j--) { + if (!(hdr_ptr[j])) { + break; + } + + event_hdr.t = *hdr_ptr[j]; + + log_ptr = hdr_ptr[j]; + + /* Now place the header at the front + * and copy back. + */ + log_ptr -= event_hdr.count; + + if (event_hdr.tag == EVENT_LOG_TAG_ROM_PRINTF) { + + rom_str_len = ((event_hdr.count)-1) * sizeof(uint32); + + if (rom_str_len >= (ROMSTR_SIZE -1)) { + rom_str_len = ROMSTR_SIZE - 1; + } + + /* copy all ascii data for ROM printf to local string */ + memcpy(rom_log_str, log_ptr, rom_str_len); + /* add end of line at last */ + rom_log_str[rom_str_len] = '\0'; + + DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s", + log_ptr[event_hdr.count - 1], rom_log_str)); + + /* Add newline if missing */ + if (rom_log_str[strlen(rom_log_str) - 1] != '\n') { + DHD_EVENT(("\n")); + } + + memset(rom_log_str, 0, ROMSTR_SIZE); + + continue; + } + + /* + * Check For Special Time Stamp Packet + */ + if (event_hdr.tag == EVENT_LOG_TAG_TS) { + DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n", + log_ptr[event_hdr.count-1], log_ptr[0], log_ptr[1])); + continue; + } + + /* Simply print out event dump buffer (fmt_num = 0xffff) */ + if (!str_buf || event_hdr.fmt_num == 0xffff) { + /* + * Print out raw value if unable to interpret + */ +#ifdef DHD_LOG_DUMP + char buf[256]; + char *pos = buf; + memset(buf, 0, sizeof(buf)); + pos += snprintf(pos, 256, +#else + DHD_MSGTRACE_LOG(( +#endif /* DHD_LOG_DUMP */ + "EVENT_LOG_BUF[0x%08x]: tag=%d len=%d fmt=%04x", + log_ptr[event_hdr.count-1], event_hdr.tag, + event_hdr.count, event_hdr.fmt_num +#ifdef DHD_LOG_DUMP +); +#else +)); +#endif /* DHD_LOG_DUMP */ + + for (count = 0; count < (event_hdr.count-1); count++) { +#ifdef DHD_LOG_DUMP + if (strlen(buf) >= (256 - 1)) { + DHD_MSGTRACE_LOG(("%s\n", buf)); + memset(buf, 0, sizeof(buf)); + pos = buf; + } + pos += snprintf(pos, (256 - (int)(pos-buf)), + " %08x", log_ptr[count]); +#else + if (count % 8 == 0) + DHD_MSGTRACE_LOG(("\n\t%08x", log_ptr[count])); + else + DHD_MSGTRACE_LOG((" %08x", log_ptr[count])); +#endif /* DHD_LOG_DUMP */ + } +#ifdef DHD_LOG_DUMP + DHD_MSGTRACE_LOG(("%s\n", buf)); +#else + DHD_MSGTRACE_LOG(("\n")); +#endif /* DHD_LOG_DUMP */ + continue; + } + + /* Copy the format string to parse %s and add "EVENT_LOG: */ + if ((event_hdr.fmt_num >> 2) < raw_event->num_fmts) { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, + "EVENT_LOG[0x%08x]: %s", log_ptr[event_hdr.count-1], + raw_event->fmts[event_hdr.fmt_num >> 2]); + c_ptr = fmtstr_loc_buf; + } else { + DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__)); + continue; + } + + for (count = 0; count < (event_hdr.count-1); count++) { + if (c_ptr != NULL) { + if ((c_ptr = strstr(c_ptr, "%")) != NULL) { + c_ptr++; + } + } + + if ((c_ptr != NULL) && (*c_ptr == 's')) { + if ((raw_event->raw_sstr) && + ((log_ptr[count] > raw_event->rodata_start) && + (log_ptr[count] < raw_event->rodata_end))) { + /* ram static string */ + addr = log_ptr[count] - raw_event->rodata_start; + str_tmpptr = raw_event->raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else if ((raw_event->rom_raw_sstr) && + ((log_ptr[count] > + raw_event->rom_rodata_start) && + (log_ptr[count] < + raw_event->rom_rodata_end))) { + /* rom static string */ + addr = log_ptr[count] - raw_event->rom_rodata_start; + str_tmpptr = raw_event->rom_raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else { + /* + * Dynamic string OR + * No data for static string. + * So store all string's address as string. + */ + snprintf(str_buf[count], SIZE_LOC_STR, "(s)0x%x", + log_ptr[count]); + arg[count].addr = str_buf[count]; + } + } else { + /* Other than string */ + arg[count].val = log_ptr[count]; + } + } + + DHD_MSGTRACE_LOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], + arg[11], arg[12], arg[13], arg[14], arg[15])); + + if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n') { + /* Add newline if missing */ + DHD_MSGTRACE_LOG(("\n")); + } + + memset(fmtstr_loc_buf, 0, FMTSTR_SIZE); + + for (i = 0; i < MAX_NO_OF_ARG; i++) { + arg[i].addr = 0; + } + for (i = 0; i < MAX_NO_OF_ARG; i++) { + memset(str_buf[i], 0, SIZE_LOC_STR); + } + + } + DHD_MSGTRACE_LOG(("\n")); + + if (str_buf) { + MFREE(dhd_pub->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR)); + } + + if (hdr_ptr) { + MFREE(dhd_pub->osh, hdr_ptr, hdr_ptr_len); + } + } +} + +#endif /* SHOW_LOGTRACE */ + +static void +wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, + void *raw_event_ptr, char *eventmask) +{ + uint i, status, reason; + bool group = FALSE, flush_txq = FALSE, link = FALSE; + bool host_data = FALSE; /* prints event data after the case when set */ + const char *auth_str; + const char *event_name; + uchar *buf; + char err_msg[256], eabuf[ETHER_ADDR_STR_LEN]; + uint event_type, flags, auth_type, datalen; + + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + reason = ntoh32(event->reason); + BCM_REFERENCE(reason); + auth_type = ntoh32(event->auth_type); + datalen = ntoh32(event->datalen); + + /* debug dump of event messages */ + snprintf(eabuf, sizeof(eabuf), "%02x:%02x:%02x:%02x:%02x:%02x", + (uchar)event->addr.octet[0]&0xff, + (uchar)event->addr.octet[1]&0xff, + (uchar)event->addr.octet[2]&0xff, + (uchar)event->addr.octet[3]&0xff, + (uchar)event->addr.octet[4]&0xff, + (uchar)event->addr.octet[5]&0xff); + + event_name = bcmevent_get_name(event_type); + BCM_REFERENCE(event_name); + + if (flags & WLC_EVENT_MSG_LINK) + link = TRUE; + if (flags & WLC_EVENT_MSG_GROUP) + group = TRUE; + if (flags & WLC_EVENT_MSG_FLUSHTXQ) + flush_txq = TRUE; + + switch (event_type) { + case WLC_E_START: + case WLC_E_DEAUTH: + case WLC_E_DISASSOC: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC: + case WLC_E_REASSOC: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n", + event_name, eabuf, (int)reason)); + } else { + DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n", + event_name, eabuf, (int)status)); + } + break; + + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: + DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason)); + break; + + case WLC_E_AUTH: + case WLC_E_AUTH_IND: + if (auth_type == DOT11_OPEN_SYSTEM) + auth_str = "Open System"; + else if (auth_type == DOT11_SHARED_KEY) + auth_str = "Shared Key"; + else { + snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type); + auth_str = err_msg; + } + if (event_type == WLC_E_AUTH_IND) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n", + event_name, eabuf, auth_str, (int)reason)); + } + BCM_REFERENCE(auth_str); + + break; + + case WLC_E_JOIN: + case WLC_E_ROAM: + case WLC_E_SET_SSID: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, failed\n", event_name)); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", + event_name, (int)status)); + } + break; + + case WLC_E_BEACON_RX: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status)); + } + break; + + case WLC_E_LINK: + DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN")); + BCM_REFERENCE(link); + break; + + case WLC_E_MIC_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n", + event_name, eabuf, group, flush_txq)); + BCM_REFERENCE(group); + BCM_REFERENCE(flush_txq); + break; + + case WLC_E_ICV_ERROR: + case WLC_E_UNICAST_DECODE_ERROR: + case WLC_E_MULTICAST_DECODE_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", + event_name, eabuf)); + break; + + case WLC_E_TXFAIL: + DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status)); + break; + + case WLC_E_ASSOC_REQ_IE: + case WLC_E_ASSOC_RESP_IE: + case WLC_E_PMKID_CACHE: + case WLC_E_SCAN_COMPLETE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); + break; + + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_NET_LOST: + case WLC_E_PFN_SCAN_NONE: + case WLC_E_PFN_SCAN_ALLGONE: + case WLC_E_PFN_GSCAN_FULL_RESULT: + case WLC_E_PFN_SWC: + DHD_EVENT(("PNOEVENT: %s\n", event_name)); + break; + + case WLC_E_PSK_SUP: + case WLC_E_PRUNE: + DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); + break; + +#ifdef WIFI_ACT_FRAME + case WLC_E_ACTION_FRAME: + DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf)); + break; +#endif /* WIFI_ACT_FRAME */ + +#ifdef SHOW_LOGTRACE + case WLC_E_TRACE: + { + dhd_eventmsg_print(dhd_pub, event_data, raw_event_ptr, datalen, event_name); + break; + } +#endif /* SHOW_LOGTRACE */ + + case WLC_E_RSSI: + DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data)))); + break; + + case WLC_E_SERVICE_FOUND: + case WLC_E_P2PO_ADD_DEVICE: + case WLC_E_P2PO_DEL_DEVICE: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + +#ifdef BT_WIFI_HANDOBER + case WLC_E_BT_WIFI_HANDOVER_REQ: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; +#endif + + case WLC_E_CCA_CHAN_QUAL: + if (datalen) { + buf = (uchar *) event_data; + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d, " + "channel 0x%02x \n", event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, *(buf + 4))); + } + break; + case WLC_E_ESCAN_RESULT: + { +#ifndef DHD_IFDEBUG + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n", + event_name, event_type, eabuf, (int)status)); +#endif + } + break; + default: + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type)); + break; + } + + /* show any appended data if message level is set to bytes or host_data is set */ + if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) { + buf = (uchar *) event_data; + BCM_REFERENCE(buf); + DHD_EVENT((" data (%d) : ", datalen)); + for (i = 0; i < datalen; i++) + DHD_EVENT((" 0x%02x ", *buf++)); + DHD_EVENT(("\n")); + } +} +#endif /* SHOW_EVENTS */ + +/* Stub for now. Will become real function as soon as shim + * is being integrated to Android, Linux etc. + */ +int +wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport) +{ + return BCME_OK; +} + +int +wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, void **data_ptr, void *raw_event) +{ + wl_evt_pport_t evt_pport; + wl_event_msg_t event; + + /* make sure it is a BRCM event pkt and record event data */ + int ret = wl_host_event_get_data(pktdata, &event, data_ptr); + if (ret != BCME_OK) { + return ret; + } + + /* convert event from network order to host order */ + wl_event_to_host_order(&event); + + /* record event params to evt_pport */ + evt_pport.dhd_pub = dhd_pub; + evt_pport.ifidx = ifidx; + evt_pport.pktdata = pktdata; + evt_pport.data_ptr = data_ptr; + evt_pport.raw_event = raw_event; + +#if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS) + { + struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); + ASSERT(shim); + ret = wl_shim_event_process(shim, &event, &evt_pport); + } +#else + ret = wl_event_process_default(&event, &evt_pport); +#endif + + return ret; +} + +/* Check whether packet is a BRCM event pkt. If it is, record event data. */ +int +wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr) +{ + bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + + if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) { + DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */ + if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) { + DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__)); + return BCME_ERROR; + } + + *data_ptr = &pvt_data[1]; + + /* memcpy since BRCM event pkt may be unaligned. */ + memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t)); + + return BCME_OK; +} + +int +wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, size_t pktlen, + wl_event_msg_t *event, void **data_ptr, void *raw_event) +{ + bcm_event_t *pvt_data; + uint8 *event_data; + uint32 type, status, datalen; + uint16 flags; + int evlen; + + /* make sure it is a BRCM event pkt and record event data */ + int ret = wl_host_event_get_data(pktdata, event, data_ptr); + if (ret != BCME_OK) { + return ret; + } + + if (pktlen < sizeof(bcm_event_t)) + return (BCME_ERROR); + + pvt_data = (bcm_event_t *)pktdata; + event_data = *data_ptr; + + type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + + datalen = ntoh32_ua((void *)&event->datalen); + if (datalen > pktlen) + return (BCME_ERROR); + + evlen = datalen + sizeof(bcm_event_t); + if (evlen > pktlen) { + return (BCME_ERROR); + } + + switch (type) { +#ifdef PROP_TXSTATUS + case WLC_E_FIFO_CREDIT_MAP: + dhd_wlfc_enable(dhd_pub); + dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data); + WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): " + "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1], + event_data[2], + event_data[3], event_data[4], event_data[5])); + break; + + case WLC_E_BCMC_CREDIT_SUPPORT: + dhd_wlfc_BCMCCredit_support_event(dhd_pub); + break; +#endif + + case WLC_E_IF: + { + struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data; + + /* Ignore the event if NOIF is set */ + if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) { + DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n")); + return (BCME_UNSUPPORTED); + } +#ifdef PCIE_FULL_DONGLE + dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx, + ifevent->opcode, ifevent->role); +#endif +#ifdef PROP_TXSTATUS + { + uint8* ea = pvt_data->eth.ether_dhost; + WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, " + "[%02x:%02x:%02x:%02x:%02x:%02x]\n", + ifevent->ifidx, + ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"), + ((ifevent->role == 0) ? "STA":"AP "), + ea[0], ea[1], ea[2], ea[3], ea[4], ea[5])); + (void)ea; + + if (ifevent->opcode == WLC_E_IF_CHANGE) + dhd_wlfc_interface_event(dhd_pub, + eWLFC_MAC_ENTRY_ACTION_UPDATE, + ifevent->ifidx, ifevent->role, ea); + else + dhd_wlfc_interface_event(dhd_pub, + ((ifevent->opcode == WLC_E_IF_ADD) ? + eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL), + ifevent->ifidx, ifevent->role, ea); + + /* dhd already has created an interface by default, for 0 */ + if (ifevent->ifidx == 0) + break; + } +#endif /* PROP_TXSTATUS */ + + if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) { + if (ifevent->opcode == WLC_E_IF_ADD) { + if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname, + event->addr.octet)) { + + DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); + return (BCME_ERROR); + } + } else if (ifevent->opcode == WLC_E_IF_DEL) { + dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname, + event->addr.octet); + } else if (ifevent->opcode == WLC_E_IF_CHANGE) { +#ifdef WL_CFG80211 + wl_cfg80211_notify_ifchange(ifevent->ifidx, + event->ifname, event->addr.octet, ifevent->bssidx); +#endif /* WL_CFG80211 */ + } + } else { +#if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211) + DHD_ERROR(("%s: Invalid ifidx %d for %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); +#endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */ + } + /* send up the if event: btamp user needs it */ + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + break; + } + +#ifdef WLMEDIA_HTSF + case WLC_E_HTSFSYNC: + htsf_update(dhd_pub->info, event_data); + break; +#endif /* WLMEDIA_HTSF */ + case WLC_E_NDIS_LINK: + break; + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */ + case WLC_E_PFN_NET_LOST: + break; +#if defined(PNO_SUPPORT) + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BEST_BATCHING: + dhd_pno_event_handler(dhd_pub, event, (void *)event_data); + break; +#endif + /* These are what external supplicant/authenticator wants */ + case WLC_E_ASSOC_IND: + case WLC_E_AUTH_IND: + case WLC_E_REASSOC_IND: + dhd_findadd_sta(dhd_pub, + dhd_ifname2idx(dhd_pub->info, event->ifname), + &event->addr.octet); + break; +#if defined(DHD_FW_COREDUMP) + case WLC_E_PSM_WATCHDOG: + DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__)); + if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) { + DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__)); + } + break; +#endif + case WLC_E_LINK: +#ifdef PCIE_FULL_DONGLE + if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname), (uint8)flags) != BCME_OK) + break; + if (!flags) { + dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname)); + } + /* fall through */ +#endif + case WLC_E_DEAUTH: + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC: + case WLC_E_DISASSOC_IND: + DHD_EVENT(("%s: Link event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); +#ifdef PCIE_FULL_DONGLE + if (type != WLC_E_LINK) { + uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); + uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex); + uint8 del_sta = TRUE; +#ifdef WL_CFG80211 + if (role == WLC_E_IF_ROLE_STA && !wl_cfg80211_is_roam_offload() && + !wl_cfg80211_is_event_from_connected_bssid(event, *ifidx)) { + del_sta = FALSE; + } +#endif /* WL_CFG80211 */ + + if (del_sta) { + dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); + if (role == WLC_E_IF_ROLE_STA) { + dhd_flow_rings_delete(dhd_pub, ifindex); + } else { + dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, + &event->addr.octet[0]); + } + } + } +#endif /* PCIE_FULL_DONGLE */ + /* fall through */ + default: + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); + BCM_REFERENCE(flags); + BCM_REFERENCE(status); + + break; + } + +#ifdef SHOW_EVENTS + if (DHD_FWLOG_ON() || DHD_EVENT_ON()) { + wl_show_host_event(dhd_pub, event, + (void *)event_data, raw_event, dhd_pub->enable_log); + } +#endif /* SHOW_EVENTS */ + + return (BCME_OK); +} + +void +dhd_print_buf(void *pbuf, int len, int bytes_per_line) +{ +#ifdef DHD_DEBUG + int i, j = 0; + unsigned char *buf = pbuf; + + if (bytes_per_line == 0) { + bytes_per_line = len; + } + + for (i = 0; i < len; i++) { + printf("%2.2x", *buf++); + j++; + if (j == bytes_per_line) { + printf("\n"); + j = 0; + } else { + printf(":"); + } + } + printf("\n"); +#endif /* DHD_DEBUG */ +} +#ifndef strtoul +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#endif + +#ifdef PKT_FILTER_SUPPORT +/* Convert user's input in hex pattern to byte-size mask */ +static int +wl_pattern_atoh(char *src, char *dst) +{ + int i; + if (strncmp(src, "0x", 2) != 0 && + strncmp(src, "0X", 2) != 0) { + DHD_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + 2; /* Skip past 0x */ + if (strlen(src) % 2 != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[3]; + bcm_strncpy_s(num, sizeof(num), src, 2); + num[2] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += 2; + } + return i; +} + +void +dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode) +{ + char *argv[8]; + int i = 0; + const char *str; + int buf_len; + int str_len; + char *arg_save = 0, *arg_org = 0; + int rc; + char buf[32] = {0}; + wl_pkt_filter_enable_t enable_parm; + wl_pkt_filter_enable_t * pkt_filterp; + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + arg_org = arg_save; + memcpy(arg_save, arg, strlen(arg) + 1); + + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_enable"; + str_len = strlen(str); + bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1); + buf[ sizeof(buf) - 1 ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1); + + /* Parse packet filter id. */ + enable_parm.id = htod32(strtoul(argv[i], NULL, 0)); + + /* Parse enable/disable value. */ + enable_parm.enable = htod32(enable); + + buf_len += sizeof(enable_parm); + memcpy((char *)pkt_filterp, + &enable_parm, + sizeof(enable_parm)); + + /* Enable/disable the specified filter. */ + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + + /* Contorl the master mode */ + rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode", + master_mode, WLC_SET_VAR, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); +} + +void +dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) +{ + const char *str; + wl_pkt_filter_t pkt_filter; + wl_pkt_filter_t *pkt_filterp; + int buf_len; + int str_len; + int rc; + uint32 mask_size; + uint32 pattern_size; + char *argv[8], * buf = 0; + int i = 0; + char *arg_save = 0, *arg_org = 0; +#define BUF_SIZE 2048 + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + + arg_org = arg_save; + + if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + + memcpy(arg_save, arg, strlen(arg) + 1); + + if (strlen(arg) > BUF_SIZE) { + DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf))); + goto fail; + } + + argv[i] = bcmstrtok(&arg_save, " ", 0); + while (argv[i++]) + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_add"; + str_len = strlen(str); + bcm_strncpy_s(buf, BUF_SIZE, str, str_len); + buf[ str_len ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1); + + /* Parse packet filter id. */ + pkt_filter.id = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Polarity not provided\n")); + goto fail; + } + + /* Parse filter polarity. */ + pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Filter type not provided\n")); + goto fail; + } + + /* Parse filter type. */ + pkt_filter.type = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Offset not provided\n")); + goto fail; + } + + /* Parse pattern filter offset. */ + pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Bitmask not provided\n")); + goto fail; + } + + /* Parse pattern filter mask. */ + mask_size = + htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Pattern not provided\n")); + goto fail; + } + + /* Parse pattern filter pattern. */ + pattern_size = + htod32(wl_pattern_atoh(argv[i], + (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size])); + + if (mask_size != pattern_size) { + DHD_ERROR(("Mask and pattern not the same size\n")); + goto fail; + } + + pkt_filter.u.pattern.size_bytes = mask_size; + buf_len += WL_PKT_FILTER_FIXED_LEN; + buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); + + /* Keep-alive attributes are set in local variable (keep_alive_pkt), and + ** then memcpy'ed into buffer (keep_alive_pktp) since there is no + ** guarantee that the buffer is properly aligned. + */ + memcpy((char *)pkt_filterp, + &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); + + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); + + if (buf) + MFREE(dhd->osh, buf, BUF_SIZE); +} + +void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id) +{ + int ret; + + ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete", + id, WLC_SET_VAR, TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n", + __FUNCTION__, id, ret)); + } +} +#endif /* PKT_FILTER_SUPPORT */ + +/* ========================== */ +/* ==== ARP OFFLOAD SUPPORT = */ +/* ========================== */ +#ifdef ARP_OFFLOAD_SUPPORT +void +dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode) +{ + int retcode; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol", + arp_mode, WLC_SET_VAR, TRUE, 0); + + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n", + __FUNCTION__, arp_mode, retcode)); + else + DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n", + __FUNCTION__, arp_mode)); +} + +void +dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) +{ + int retcode; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe", + arp_enable, WLC_SET_VAR, TRUE, 0); + + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n", + __FUNCTION__, arp_enable, retcode)); + else + DHD_TRACE(("%s: successfully enabed ARP offload to %d\n", + __FUNCTION__, arp_enable)); + if (arp_enable) { + uint32 version; + retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version", + &version, WLC_GET_VAR, FALSE, 0); + if (retcode) { + DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n", + __FUNCTION__, retcode)); + dhd->arp_version = 1; + } + else { + DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version)); + dhd->arp_version = version; + } + } +} + +void +dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx) +{ + int ret = 0; + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return; + } + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); +} + +void +dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx) +{ + int ret = 0; + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return; + } + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); +} + +void +dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, + sizeof(ipaddr), iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: sARP H ipaddr entry added \n", + __FUNCTION__)); +} + +int +dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx) +{ + int retcode, i; + int iov_len; + uint32 *ptr32 = buf; + bool clr_bottom = FALSE; + + if (!buf) + return -1; + if (dhd == NULL) return -1; + if (dhd->arp_version == 1) + idx = 0; + + iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen); + BCM_REFERENCE(iov_len); + retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, idx); + + if (retcode) { + DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n", + __FUNCTION__, retcode)); + + return -1; + } + + /* clean up the buf, ascii reminder */ + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (!clr_bottom) { + if (*ptr32 == 0) + clr_bottom = TRUE; + } else { + *ptr32 = 0; + } + ptr32++; + } + + return 0; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* + * Neighbor Discovery Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up/goes down + */ +int +dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable) +{ + int retcode; + + if (dhd == NULL) + return -1; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe", + ndo_enable, WLC_SET_VAR, TRUE, 0); + if (retcode) + DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n", + __FUNCTION__, ndo_enable, retcode)); + else + DHD_TRACE(("%s: successfully enabed ndo offload to %d\n", + __FUNCTION__, ndo_enable)); + + return retcode; +} + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up + */ +int +dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + if (dhd == NULL) + return -1; + + iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr, + IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ndo ipaddr entry added \n", + __FUNCTION__)); + + return retcode; +} +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface goes down + */ +int +dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + if (dhd == NULL) + return -1; + + iov_len = bcm_mkiovar("nd_hostip_clear", NULL, + 0, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ndo ipaddr entry removed \n", + __FUNCTION__)); + + return retcode; +} + + + +/* + * returns = TRUE if associated, FALSE if not associated + */ +bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval) +{ + char bssid[6], zbuf[6]; + int ret = -1; + + bzero(bssid, 6); + bzero(zbuf, 6); + + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, + ETHER_ADDR_LEN, FALSE, ifidx); + DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTASSOCIATED) { + DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret)); + } + + if (retval) + *retval = ret; + + if (ret < 0) + return FALSE; + + if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) { + DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__)); + return FALSE; + } + return TRUE; +} + +/* Function to estimate possible DTIM_SKIP value */ +int +dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) +{ + int bcn_li_dtim = 1; /* deafult no dtim skip setting */ + int ret = -1; + int dtim_period = 0; + int ap_beacon = 0; +#ifndef ENABLE_MAX_DTIM_IN_SUSPEND + int allowed_skip_dtim_cnt = 0; +#endif /* !ENABLE_MAX_DTIM_IN_SUSPEND */ + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* read associated AP beacon interval */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD, + &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) { + DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* read associated ap's dtim setup */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, + &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* if not assocated just eixt */ + if (dtim_period == 0) { + goto exit; + } + +#ifdef ENABLE_MAX_DTIM_IN_SUSPEND + bcn_li_dtim = (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period)); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } +#else /* ENABLE_MAX_DTIM_IN_SUSPEND */ + /* attemp to use platform defined dtim skip interval */ + bcn_li_dtim = dhd->suspend_bcn_li_dtim; + + /* check if sta listen interval fits into AP dtim */ + if (dtim_period > CUSTOM_LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = NO_DTIM_SKIP; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL)); + goto exit; + } + + if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { + allowed_skip_dtim_cnt = MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon); + bcn_li_dtim = (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; + } + + if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } +#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ + + DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL)); + +exit: + return bcn_li_dtim; +} + +/* Check if the mode supports STA MODE */ +bool dhd_support_sta_mode(dhd_pub_t *dhd) +{ + +#ifdef WL_CFG80211 + if (!(dhd->op_mode & DHD_FLAG_STA_MODE)) + return FALSE; + else +#endif /* WL_CFG80211 */ + return TRUE; +} + +#if defined(KEEP_ALIVE) +int dhd_keep_alive_onoff(dhd_pub_t *dhd) +{ + char buf[32] = {0}; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0}; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int buf_len; + int str_len; + int res = -1; + + if (!dhd_support_sta_mode(dhd)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(buf, str, sizeof(buf) - 1); + buf[ sizeof(buf) - 1 ] = '\0'; + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); + mkeep_alive_pkt.period_msec = CUSTOM_KEEP_ALIVE_SETTING; + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data)); + /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + + return res; +} +#endif /* defined(KEEP_ALIVE) */ +/* Android ComboSCAN support */ + +/* + * data parsing from ComboScan tlv list +*/ +int +wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token, + int input_size, int *bytes_left) +{ + char* str; + uint16 short_temp; + uint32 int_temp; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + + /* Clean all dest bytes */ + memset(dst, 0, dst_size); + while (*bytes_left > 0) { + + if (str[0] != token) { + DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n", + __FUNCTION__, token, str[0], *bytes_left)); + return -1; + } + + *bytes_left -= 1; + str += 1; + + if (input_size == 1) { + memcpy(dst, str, input_size); + } + else if (input_size == 2) { + memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)), + input_size); + } + else if (input_size == 4) { + memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)), + input_size); + } + + *bytes_left -= input_size; + str += input_size; + *list_str = str; + return 1; + } + return 1; +} + +/* + * channel list parsing from cscan tlv list +*/ +int +wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, + int channel_num, int *bytes_left) +{ + char* str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) { + *list_str = str; + DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* All channels */ + channel_list[idx] = 0x0; + } + else { + channel_list[idx] = (uint16)str[0]; + DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx])); + } + *bytes_left -= 1; + str += 1; + + if (idx++ > 255) { + DHD_ERROR(("%s Too many channels \n", __FUNCTION__)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* + * SSIDs list parsing from cscan tlv list + */ +int +wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left) +{ + char* str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_SSID_IE) { + *list_str = str; + DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + + /* Get proper CSCAN_TLV_TYPE_SSID_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* Broadcast SSID */ + ssid[idx].SSID_len = 0; + memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN); + *bytes_left -= 1; + str += 1; + + DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left)); + } + else if (str[0] <= DOT11_MAX_SSID_LEN) { + /* Get proper SSID size */ + ssid[idx].SSID_len = str[0]; + *bytes_left -= 1; + str += 1; + + /* Get SSID */ + if (ssid[idx].SSID_len > *bytes_left) { + DHD_ERROR(("%s out of memory range len=%d but left=%d\n", + __FUNCTION__, ssid[idx].SSID_len, *bytes_left)); + return -1; + } + + memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len); + + *bytes_left -= ssid[idx].SSID_len; + str += ssid[idx].SSID_len; + ssid[idx].hidden = TRUE; + + DHD_TRACE(("%s :size=%d left=%d\n", + (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); + } + else { + DHD_ERROR(("### SSID size more that %d\n", str[0])); + return -1; + } + + if (idx++ > max) { + DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* Parse a comma-separated list from list_str into ssid array, starting + * at index idx. Max specifies size of the ssid array. Parses ssids + * and returns updated idx; if idx >= max not all fit, the excess have + * not been copied. Returns -1 on empty string, or on ssid too long. + */ +int +wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max) +{ + char* str, *ptr; + + if ((list_str == NULL) || (*list_str == NULL)) + return -1; + + for (str = *list_str; str != NULL; str = ptr) { + + /* check for next TAG */ + if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) { + *list_str = str + strlen(GET_CHANNEL); + return idx; + } + + if ((ptr = strchr(str, ',')) != NULL) { + *ptr++ = '\0'; + } + + if (strlen(str) > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN)); + return -1; + } + + if (strlen(str) == 0) + ssid[idx].SSID_len = 0; + + if (idx < max) { + bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID)); + strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1); + ssid[idx].SSID_len = strlen(str); + } + idx++; + } + return idx; +} + +/* + * Parse channel list from iwpriv CSCAN + */ +int +wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) +{ + int num; + int val; + char* str; + char* endptr = NULL; + + if ((list_str == NULL)||(*list_str == NULL)) + return -1; + + str = *list_str; + num = 0; + while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) { + val = (int)strtoul(str, &endptr, 0); + if (endptr == str) { + printf("could not parse channel number starting at" + " substring \"%s\" in list:\n%s\n", + str, *list_str); + return -1; + } + str = endptr + strspn(endptr, " ,"); + + if (num == channel_num) { + DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n", + channel_num, *list_str)); + return -1; + } + + channel_list[num++] = (uint16)val; + } + *list_str = str; + return num; +} + + +/* Given filename and download type, returns a buffer pointer and length + * for download to f/w. Type can be FW or NVRAM. + * + */ +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length) + +{ + int ret = BCME_ERROR; + int len = 0; + int file_len; + void *image = NULL; + uint8 *buf = NULL; + + /* Point to cache if available. */ +#ifdef CACHE_FW_IMAGES + if (component == FW) { + if (dhd->cached_fw_length) { + len = dhd->cached_fw_length; + buf = dhd->cached_fw; + } + } else if (component == NVRAM) { + if (dhd->cached_nvram_length) { + len = dhd->cached_nvram_length; + buf = dhd->cached_nvram; + } + } else { + return ret; + } +#endif + /* No Valid cache found on this call */ + if (!len) { + file_len = *length; + *length = 0; + + if (file_path) { + image = dhd_os_open_image(file_path); + if (image == NULL) { + goto err; + } + } + + buf = MALLOCZ(dhd->osh, file_len); + if (buf == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, file_len)); + goto err; + } + + /* Download image */ + len = dhd_os_get_image_block(buf, file_len, image); + if ((len <= 0 || len > file_len)) { + MFREE(dhd->osh, buf, file_len); + goto err; + } + } + + ret = BCME_OK; + *length = len; + *buffer = buf; + + /* Cache if first call. */ +#ifdef CACHE_FW_IMAGES + if (component == FW) { + if (!dhd->cached_fw_length) { + dhd->cached_fw = buf; + dhd->cached_fw_length = len; + } + } else if (component == NVRAM) { + if (!dhd->cached_nvram_length) { + dhd->cached_nvram = buf; + dhd->cached_nvram_length = len; + } + } +#endif + +err: + if (image) + dhd_os_close_image(image); + + return ret; +} + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length) +{ +#ifdef CACHE_FW_IMAGES + return; +#endif + MFREE(dhd->osh, buffer, length); +} +/* Parse EAPOL 4 way handshake messages */ +void +dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction) +{ + unsigned char type; + int pair, ack, mic, kerr, req, sec, install; + unsigned short us_tmp; + type = dump_data[18]; + if (type == 2 || type == 254) { + us_tmp = (dump_data[19] << 8) | dump_data[20]; + pair = 0 != (us_tmp & 0x08); + ack = 0 != (us_tmp & 0x80); + mic = 0 != (us_tmp & 0x100); + kerr = 0 != (us_tmp & 0x400); + req = 0 != (us_tmp & 0x800); + sec = 0 != (us_tmp & 0x200); + install = 0 != (us_tmp & 0x40); + if (!sec && !mic && ack && !install && pair && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M1 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && !install && !ack && mic && !sec && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M2 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && ack && mic && sec && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M3 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && !install && !ack && mic && sec && !req && !kerr) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M4 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } +} diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c new file mode 100644 index 000000000000..43a714041d2a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c @@ -0,0 +1,309 @@ +/* + * Customer code to add GPIO control during WLAN start/stop + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_custom_gpio.c 591129 2015-10-07 05:22:14Z $ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#if defined(WL_WIRELESS_EXT) +#include +#endif + +#define WL_ERROR(x) printf x +#define WL_TRACE(x) + +#if defined(OOB_INTR_ONLY) + +#if defined(BCMLXSDMMC) +extern int sdioh_mmc_irq(int irq); +#endif /* (BCMLXSDMMC) */ + +/* Customer specific Host GPIO defintion */ +static int dhd_oob_gpio_num = -1; + +module_param(dhd_oob_gpio_num, int, 0644); +MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number"); + +/* This function will return: + * 1) return : Host gpio interrupt number per customer platform + * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge + * + * NOTE : + * Customer should check his platform definitions + * and his Host Interrupt spec + * to figure out the proper setting for his platform. + * Broadcom provides just reference settings as example. + * + */ +int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr) +{ + int host_oob_irq = 0; + +#if defined(CUSTOMER_HW2) + host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr); + +#else +#if defined(CUSTOM_OOB_GPIO_NUM) + if (dhd_oob_gpio_num < 0) { + dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM; + } +#endif /* CUSTOMER_OOB_GPIO_NUM */ + + if (dhd_oob_gpio_num < 0) { + WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n", + __FUNCTION__)); + return (dhd_oob_gpio_num); + } + + WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", + __FUNCTION__, dhd_oob_gpio_num)); + +#endif + + return (host_oob_irq); +} +#endif + +/* Customer function to control hw specific wlan gpios */ +int +dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff) +{ + int err = 0; + + return err; +} + +#ifdef GET_CUSTOM_MAC_ENABLE +/* Function to get custom MAC address */ +int +dhd_custom_get_mac_address(void *adapter, unsigned char *buf) +{ + int ret = 0; + + WL_TRACE(("%s Enter\n", __FUNCTION__)); + if (!buf) + return -EINVAL; + + /* Customer access to MAC address stored outside of DHD driver */ +#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + ret = wifi_platform_get_mac_addr(adapter, buf); +#endif + +#ifdef EXAMPLE_GET_MAC + /* EXAMPLE code */ + { + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); + } +#endif /* EXAMPLE_GET_MAC */ + + return ret; +} +#endif /* GET_CUSTOM_MAC_ENABLE */ + +#if !defined(WL_WIRELESS_EXT) +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; +#endif /* WL_WIRELESS_EXT */ + +/* Customized Locale table : OPTIONAL feature */ +const struct cntry_locales_custom translate_custom_table[] = { +/* Table should be filled out based on custom platform regulatory requirement */ +#ifdef EXAMPLE_TABLE + {"", "XY", 4}, /* Universal if Country code is unknown or empty */ + {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */ + {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */ + {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */ + {"AT", "EU", 5}, + {"BE", "EU", 5}, + {"BG", "EU", 5}, + {"CY", "EU", 5}, + {"CZ", "EU", 5}, + {"DK", "EU", 5}, + {"EE", "EU", 5}, + {"FI", "EU", 5}, + {"FR", "EU", 5}, + {"DE", "EU", 5}, + {"GR", "EU", 5}, + {"HU", "EU", 5}, + {"IE", "EU", 5}, + {"IT", "EU", 5}, + {"LV", "EU", 5}, + {"LI", "EU", 5}, + {"LT", "EU", 5}, + {"LU", "EU", 5}, + {"MT", "EU", 5}, + {"NL", "EU", 5}, + {"PL", "EU", 5}, + {"PT", "EU", 5}, + {"RO", "EU", 5}, + {"SK", "EU", 5}, + {"SI", "EU", 5}, + {"ES", "EU", 5}, + {"SE", "EU", 5}, + {"GB", "EU", 5}, + {"KR", "XY", 3}, + {"AU", "XY", 3}, + {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */ + {"TW", "XY", 3}, + {"AR", "XY", 3}, + {"MX", "XY", 3}, + {"IL", "IL", 0}, + {"CH", "CH", 0}, + {"TR", "TR", 0}, + {"NO", "NO", 0}, +#endif /* EXMAPLE_TABLE */ +#if defined(CUSTOMER_HW2) +#if defined(BCM4335_CHIP) + {"", "XZ", 11}, /* Universal if Country code is unknown or empty */ +#endif + {"AE", "AE", 1}, + {"AR", "AR", 1}, + {"AT", "AT", 1}, + {"AU", "AU", 2}, + {"BE", "BE", 1}, + {"BG", "BG", 1}, + {"BN", "BN", 1}, + {"CA", "CA", 2}, + {"CH", "CH", 1}, + {"CY", "CY", 1}, + {"CZ", "CZ", 1}, + {"DE", "DE", 3}, + {"DK", "DK", 1}, + {"EE", "EE", 1}, + {"ES", "ES", 1}, + {"FI", "FI", 1}, + {"FR", "FR", 1}, + {"GB", "GB", 1}, + {"GR", "GR", 1}, + {"HR", "HR", 1}, + {"HU", "HU", 1}, + {"IE", "IE", 1}, + {"IS", "IS", 1}, + {"IT", "IT", 1}, + {"ID", "ID", 1}, + {"JP", "JP", 8}, + {"KR", "KR", 24}, + {"KW", "KW", 1}, + {"LI", "LI", 1}, + {"LT", "LT", 1}, + {"LU", "LU", 1}, + {"LV", "LV", 1}, + {"MA", "MA", 1}, + {"MT", "MT", 1}, + {"MX", "MX", 1}, + {"NL", "NL", 1}, + {"NO", "NO", 1}, + {"PL", "PL", 1}, + {"PT", "PT", 1}, + {"PY", "PY", 1}, + {"RO", "RO", 1}, + {"SE", "SE", 1}, + {"SI", "SI", 1}, + {"SK", "SK", 1}, + {"TR", "TR", 7}, + {"TW", "TW", 1}, + {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */ + {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */ + {"SY", "XZ", 11}, /* Universal if Country code is SYRIAN ARAB REPUBLIC */ + {"GL", "XZ", 11}, /* Universal if Country code is GREENLAND */ + {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */ + {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */ + {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */ +#ifdef BCM4330_CHIP + {"RU", "RU", 1}, + {"US", "US", 5} +#endif +#endif +}; + + +/* Customized Locale convertor +* input : ISO 3166-1 country abbreviation +* output: customized cspec +*/ +#ifdef CUSTOM_COUNTRY_CODE +void get_customized_country_code(void *adapter, char *country_iso_code, + wl_country_t *cspec, u32 flags) +#else +void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) +#endif /* CUSTOM_COUNTRY_CODE */ +{ +#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + + struct cntry_locales_custom *cloc_ptr; + + if (!cspec) + return; +#ifdef CUSTOM_COUNTRY_CODE + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, + flags); +#else + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code); +#endif /* CUSTOM_COUNTRY_CODE */ + if (cloc_ptr) { + strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = cloc_ptr->custom_locale_rev; + } + return; +#else + int size, i; + + size = ARRAYSIZE(translate_custom_table); + + if (cspec == 0) + return; + + if (size == 0) + return; + + for (i = 0; i < size; i++) { + if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) { + memcpy(cspec->ccode, + translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[i].custom_locale_rev; + return; + } + } +#ifdef EXAMPLE_TABLE + /* if no country code matched return first universal code from translate_custom_table */ + memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[0].custom_locale_rev; +#endif /* EXMAPLE_TABLE */ + return; +#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */ +} diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h new file mode 100644 index 000000000000..44899819a918 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h @@ -0,0 +1,193 @@ +/* + * Debug/trace/assert driver definitions for Dongle Host Driver. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_dbg.h 598059 2015-11-07 07:31:52Z $ + */ + +#ifndef _dhd_dbg_ +#define _dhd_dbg_ + +#if defined(DHD_DEBUG) +#ifdef DHD_LOG_DUMP +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ +#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0) +#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0) +#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0) +#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0) +#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0) +#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0) +#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0) +#ifdef DHD_LOG_DUMP +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ +#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0) +#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0) +#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0) +#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0) +#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0) +#ifdef DHD_LOG_DUMP +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ +#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0) +#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0) +#define DHD_IOV_INFO(args) do {if (dhd_msg_level & DHD_IOV_INFO_VAL) printf args;} while (0) + +#ifdef DHD_LOG_DUMP +#define DHD_ERROR_EX(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#endif /* DHD_LOG_DUMP */ + +#ifdef CUSTOMER_HW4_DEBUG +#define DHD_TRACE_HW4 DHD_ERROR +#define DHD_INFO_HW4 DHD_ERROR +#else +#define DHD_TRACE_HW4 DHD_TRACE +#define DHD_INFO_HW4 DHD_INFO +#endif /* CUSTOMER_HW4_DEBUG */ + +#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL) +#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL) +#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL) +#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL) +#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL) +#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL) +#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL) +#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL) +#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL) +#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL) +#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL) +#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL) +#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL) +#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL) +#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL) +#define DHD_NOCHECKDIED_ON() (dhd_msg_level & DHD_NOCHECKDIED_VAL) +#define DHD_PNO_ON() (dhd_msg_level & DHD_PNO_VAL) +#define DHD_FWLOG_ON() (dhd_msg_level & DHD_FWLOG_VAL) +#define DHD_IOV_INFO_ON() (dhd_msg_level & DHD_IOV_INFO_VAL) + +#else /* defined(BCMDBG) || defined(DHD_DEBUG) */ + +#define DHD_ERROR(args) do {printf args;} while (0) +#define DHD_TRACE(args) +#define DHD_INFO(args) +#define DHD_DATA(args) +#define DHD_CTL(args) +#define DHD_TIMER(args) +#define DHD_HDRS(args) +#define DHD_BYTES(args) +#define DHD_INTR(args) +#define DHD_GLOM(args) +#define DHD_EVENT(args) +#define DHD_BTA(args) +#define DHD_ISCAN(args) +#define DHD_ARPOE(args) +#define DHD_REORDER(args) +#define DHD_PNO(args) +#define DHD_MSGTRACE_LOG(args) +#define DHD_FWLOG(args) +#define DHD_IOV_INFO(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) + +#ifdef CUSTOMER_HW4_DEBUG +#define DHD_TRACE_HW4 DHD_ERROR +#define DHD_INFO_HW4 DHD_ERROR +#else +#define DHD_TRACE_HW4 DHD_TRACE +#define DHD_INFO_HW4 DHD_INFO +#endif /* CUSTOMER_HW4_DEBUG */ + +#define DHD_ERROR_ON() 0 +#define DHD_TRACE_ON() 0 +#define DHD_INFO_ON() 0 +#define DHD_DATA_ON() 0 +#define DHD_CTL_ON() 0 +#define DHD_TIMER_ON() 0 +#define DHD_HDRS_ON() 0 +#define DHD_BYTES_ON() 0 +#define DHD_INTR_ON() 0 +#define DHD_GLOM_ON() 0 +#define DHD_EVENT_ON() 0 +#define DHD_BTA_ON() 0 +#define DHD_ISCAN_ON() 0 +#define DHD_ARPOE_ON() 0 +#define DHD_REORDER_ON() 0 +#define DHD_NOCHECKDIED_ON() 0 +#define DHD_PNO_ON() 0 +#define DHD_FWLOG_ON() 0 +#define DHD_IOV_INFO_ON() 0 + +#endif + +#define DHD_LOG(args) + +#define DHD_BLOG(cp, size) + +#define DHD_NONE(args) +extern int dhd_msg_level; + +/* Defines msg bits */ +#include + +#endif /* _dhd_dbg_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.c b/drivers/net/wireless/bcmdhd/dhd_flowring.c new file mode 100644 index 000000000000..759dd0ed8bcf --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_flowring.c @@ -0,0 +1,964 @@ +/* + * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_flowring.c 591285 2015-10-07 11:56:29Z $ + */ + + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue); + +static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + +static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + +static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid); +int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt); + +#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p) +#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x)) + +#ifdef DHD_LOSSLESS_ROAMING +const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 }; +#else +const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }; +#endif +const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + +/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */ +static INLINE int +dhd_flow_queue_throttle(flow_queue_t *queue) +{ + return DHD_FLOW_QUEUE_FULL(queue); +} + +int BCMFASTPATH +dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt) +{ + return BCME_NORESOURCE; +} + +/** Returns flow ring given a flowid */ +flow_ring_node_t * +dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(flowid < dhdp->num_flow_rings); + + flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]); + + ASSERT(flow_ring_node->flowid == flowid); + return flow_ring_node; +} + +/** Returns 'backup' queue given a flowid */ +flow_queue_t * +dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node; + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + return &flow_ring_node->queue; +} + +/* Flow ring's queue management functions */ + +/** Initialize a flow ring's queue, called on driver initialization. */ +void +dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) +{ + ASSERT((queue != NULL) && (max > 0)); + + dll_init(&queue->list); + queue->head = queue->tail = NULL; + queue->len = 0; + + /* Set queue's threshold and queue's parent cummulative length counter */ + ASSERT(max > 1); + DHD_FLOW_QUEUE_SET_MAX(queue, max); + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max); + DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr); + + queue->failures = 0U; + queue->cb = &dhd_flow_queue_overflow; +} + +/** Register an enqueue overflow callback handler */ +void +dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb) +{ + ASSERT(queue != NULL); + queue->cb = cb; +} + +/** + * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on + * to the flow ring itself. + */ +int BCMFASTPATH +dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) +{ + int ret = BCME_OK; + + ASSERT(queue != NULL); + + if (dhd_flow_queue_throttle(queue)) { + queue->failures++; + ret = (*queue->cb)(queue, pkt); + goto done; + } + + if (queue->head) { + FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt); + } else { + queue->head = pkt; + } + + FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); + + queue->tail = pkt; /* at tail */ + + queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + +done: + return ret; +} + +/** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */ +void * BCMFASTPATH +dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue) +{ + void * pkt; + + ASSERT(queue != NULL); + + pkt = queue->head; /* from head */ + + if (pkt == NULL) { + ASSERT((queue->len == 0) && (queue->tail == NULL)); + goto done; + } + + queue->head = FLOW_QUEUE_PKT_NEXT(pkt); + if (queue->head == NULL) + queue->tail = NULL; + + queue->len--; + /* decrement parent's cummulative length */ + DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + + FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */ + +done: + return pkt; +} + +/** Reinsert a dequeued 802.3 packet back at the head */ +void BCMFASTPATH +dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) +{ + if (queue->head == NULL) { + queue->tail = pkt; + } + + FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head); + queue->head = pkt; + queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); +} + +/** Fetch the backup queue for a flowring, and assign flow control thresholds */ +void +dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr) +{ + flow_queue_t * queue; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(queue_budget > 1); + ASSERT(cumm_threshold > 1); + ASSERT(cumm_ctr != (void*)NULL); + + queue = dhd_flow_queue(dhdp, flowid); + + DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */ + + /* Set the queue's parent threshold and cummulative counter */ + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold); + DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr); +} + +/** Initializes data structures of multiple flow rings */ +int +dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) +{ + uint32 idx; + uint32 flow_ring_table_sz; + uint32 if_flow_lkup_sz = 0; + void * flowid_allocator; + flow_ring_table_t *flow_ring_table = NULL; + if_flow_lkup_t *if_flow_lkup = NULL; + void *lock = NULL; + void *list_lock = NULL; + unsigned long flags; + + DHD_INFO(("%s\n", __FUNCTION__)); + + /* Construct a 16bit flowid allocator */ + flowid_allocator = id16_map_init(dhdp->osh, + num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED); + if (flowid_allocator == NULL) { + DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Allocate a flow ring table, comprising of requested number of rings */ + flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t)); + flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz); + if (flow_ring_table == NULL) { + DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__)); + goto fail; + } + + /* Initialize flow ring table state */ + DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr); + bzero((uchar *)flow_ring_table, flow_ring_table_sz); + for (idx = 0; idx < num_flow_rings; idx++) { + flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED; + flow_ring_table[idx].flowid = (uint16)idx; + flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh); + if (flow_ring_table[idx].lock == NULL) { + DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__)); + goto fail; + } + + dll_init(&flow_ring_table[idx].list); + + /* Initialize the per flow ring backup queue */ + dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue, + FLOW_RING_QUEUE_THRESHOLD); + } + + /* Allocate per interface hash table (for fast lookup from interface to flow ring) */ + if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; + if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp, + DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz); + if (if_flow_lkup == NULL) { + DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__)); + goto fail; + } + + /* Initialize per interface hash table */ + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + int hash_ix; + if_flow_lkup[idx].status = 0; + if_flow_lkup[idx].role = 0; + for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++) + if_flow_lkup[idx].fl_hash[hash_ix] = NULL; + } + + lock = dhd_os_spin_lock_init(dhdp->osh); + if (lock == NULL) + goto fail; + + list_lock = dhd_os_spin_lock_init(dhdp->osh); + if (list_lock == NULL) + goto lock_fail; + + dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP; + bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); +#ifdef DHD_LOSSLESS_ROAMING + dhdp->dequeue_prec_map = ALLPRIO; +#endif + /* Now populate into dhd pub */ + DHD_FLOWID_LOCK(lock, flags); + dhdp->num_flow_rings = num_flow_rings; + dhdp->flowid_allocator = (void *)flowid_allocator; + dhdp->flow_ring_table = (void *)flow_ring_table; + dhdp->if_flow_lkup = (void *)if_flow_lkup; + dhdp->flowid_lock = lock; + dhdp->flow_rings_inited = TRUE; + dhdp->flowring_list_lock = list_lock; + DHD_FLOWID_UNLOCK(lock, flags); + + DHD_INFO(("%s done\n", __FUNCTION__)); + return BCME_OK; + +lock_fail: + /* deinit the spinlock */ + dhd_os_spin_lock_deinit(dhdp->osh, lock); + +fail: + /* Destruct the per interface flow lkup table */ + if (if_flow_lkup != NULL) { + DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz); + } + if (flow_ring_table != NULL) { + for (idx = 0; idx < num_flow_rings; idx++) { + if (flow_ring_table[idx].lock != NULL) + dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + } + MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); + } + id16_map_fini(dhdp->osh, flowid_allocator); + + return BCME_NOMEM; +} + +/** Deinit Flow Ring specific data structures */ +void dhd_flow_rings_deinit(dhd_pub_t *dhdp) +{ + uint16 idx; + uint32 flow_ring_table_sz; + uint32 if_flow_lkup_sz; + flow_ring_table_t *flow_ring_table; + unsigned long flags; + void *lock; + + DHD_INFO(("dhd_flow_rings_deinit\n")); + + if (!(dhdp->flow_rings_inited)) { + DHD_ERROR(("dhd_flow_rings not initialized!\n")); + return; + } + + if (dhdp->flow_ring_table != NULL) { + + ASSERT(dhdp->num_flow_rings > 0); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + dhdp->flow_ring_table = NULL; + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + for (idx = 0; idx < dhdp->num_flow_rings; idx++) { + if (flow_ring_table[idx].active) { + dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue)); + + /* Deinit flow ring queue locks before destroying flow ring table */ + dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + flow_ring_table[idx].lock = NULL; + + } + + /* Destruct the flow ring table */ + flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t); + MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + + /* Destruct the per interface flow lkup table */ + if (dhdp->if_flow_lkup != NULL) { + if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; + bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz); + DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz); + dhdp->if_flow_lkup = NULL; + } + + /* Destruct the flowid allocator */ + if (dhdp->flowid_allocator != NULL) + dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator); + + dhdp->num_flow_rings = 0U; + bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + lock = dhdp->flowid_lock; + dhdp->flowid_lock = NULL; + + DHD_FLOWID_UNLOCK(lock, flags); + dhd_os_spin_lock_deinit(dhdp->osh, lock); + + dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock); + dhdp->flowring_list_lock = NULL; + + ASSERT(dhdp->if_flow_lkup == NULL); + ASSERT(dhdp->flowid_allocator == NULL); + ASSERT(dhdp->flow_ring_table == NULL); + dhdp->flow_rings_inited = FALSE; +} + +/** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */ +uint8 +dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex) +{ + if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + ASSERT(if_flow_lkup); + return if_flow_lkup[ifindex].role; +} + +#ifdef WLTDLS +bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da) +{ + tdls_peer_node_t *cur = dhdp->peer_tbl.node; + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + return TRUE; + } + cur = cur->next; + } + return FALSE; +} +#endif /* WLTDLS */ + +/** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */ +static INLINE uint16 +dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) +{ + int hash; + bool ismcast = FALSE; + flow_hash_info_t *cur; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + ASSERT(if_flow_lkup); + + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { +#ifdef WLTDLS + if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) && + is_tdls_destination(dhdp, da)) { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + while (cur != NULL) { + if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + cur = cur->next; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return FLOWID_INVALID; + } +#endif /* WLTDLS */ + cur = if_flow_lkup[ifindex].fl_hash[prio]; + if (cur) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + } else { + + if (ETHER_ISMULTI(da)) { + ismcast = TRUE; + hash = 0; + } else { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + } + + cur = if_flow_lkup[ifindex].fl_hash[hash]; + + while (cur) { + if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) || + (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) && + (cur->flow_info.tid == prio))) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + cur = cur->next; + } + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__)); + return FLOWID_INVALID; +} /* dhd_flowid_find */ + +/** Create unique Flow ID, called when a flow ring is created. */ +static INLINE uint16 +dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) +{ + flow_hash_info_t *fl_hash_node, *cur; + if_flow_lkup_t *if_flow_lkup; + int hash; + uint16 flowid; + unsigned long flags; + + fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t)); + memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da)); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + ASSERT(dhdp->flowid_allocator != NULL); + flowid = id16_map_alloc(dhdp->flowid_allocator); + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + if (flowid == FLOWID_INVALID) { + MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t)); + DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__)); + return FLOWID_INVALID; + } + + fl_hash_node->flowid = flowid; + fl_hash_node->flow_info.tid = prio; + fl_hash_node->flow_info.ifindex = ifindex; + fl_hash_node->next = NULL; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { + /* For STA non TDLS dest we allocate entry based on prio only */ +#ifdef WLTDLS + if (dhdp->peer_tbl.tdls_peer_count && + (is_tdls_destination(dhdp, da))) { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + if (cur) { + while (cur->next) { + cur = cur->next; + } + cur->next = fl_hash_node; + } else { + if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; + } + } else +#endif /* WLTDLS */ + if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node; + } else { + + /* For bcast/mcast assign first slot in in interface */ + hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + if (cur) { + while (cur->next) { + cur = cur->next; + } + cur->next = fl_hash_node; + } else + if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid)); + + return fl_hash_node->flowid; +} /* dhd_flowid_alloc */ + +/** Get flow ring ID, if not present try to create one */ +static INLINE int +dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid) +{ + uint16 id; + flow_ring_node_t *flow_ring_node; + flow_ring_table_t *flow_ring_table; + unsigned long flags; + int ret; + + DHD_INFO(("%s\n", __FUNCTION__)); + + if (!dhdp->flow_ring_table) { + return BCME_ERROR; + } + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + + id = dhd_flowid_find(dhdp, ifindex, prio, sa, da); + + if (id == FLOWID_INVALID) { + + if_flow_lkup_t *if_flow_lkup; + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (!if_flow_lkup[ifindex].status) + return BCME_ERROR; + + + id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da); + if (id == FLOWID_INVALID) { + DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n", + __FUNCTION__, ifindex, if_flow_lkup[ifindex].status)); + return BCME_ERROR; + } + + /* register this flowid in dhd_pub */ + dhd_add_flowid(dhdp, ifindex, prio, da, id); + + ASSERT(id < dhdp->num_flow_rings); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Init Flow info */ + memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa)); + memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da)); + flow_ring_node->flow_info.tid = prio; + flow_ring_node->flow_info.ifindex = ifindex; + flow_ring_node->active = TRUE; + flow_ring_node->status = FLOW_RING_STATUS_PENDING; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Create and inform device about the new flow */ + if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node) + != BCME_OK) { + DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id)); + return BCME_ERROR; + } + + *flowid = id; + return BCME_OK; + } else { + /* if the Flow id was found in the hash */ + ASSERT(id < dhdp->num_flow_rings); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* + * If the flow_ring_node is in Open State or Status pending state then + * we can return the Flow id to the caller.If the flow_ring_node is in + * FLOW_RING_STATUS_PENDING this means the creation is in progress and + * hence the packets should be queued. + * + * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or + * FLOW_RING_STATUS_CLOSED, then we should return Error. + * Note that if the flowing is being deleted we would mark it as + * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and + * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets. + * We should drop the packets in that case. + * The decission to return OK should NOT be based on 'active' variable, beause + * active is made TRUE when a flow_ring_node gets allocated and is made + * FALSE when the flow ring gets removed and does not reflect the True state + * of the Flow ring. + */ + if (flow_ring_node->status == FLOW_RING_STATUS_OPEN || + flow_ring_node->status == FLOW_RING_STATUS_PENDING) { + *flowid = id; + ret = BCME_OK; + } else { + *flowid = FLOWID_INVALID; + ret = BCME_ERROR; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return ret; + + } /* Flow Id found in the hash */ +} /* dhd_flowid_lookup */ + +/** + * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to + * select the flowring to send the packet to the dongle. + */ +int BCMFASTPATH +dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) +{ + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + struct ether_header *eh = (struct ether_header *)pktdata; + uint16 flowid; + + ASSERT(ifindex < DHD_MAX_IFS); + + if (ifindex >= DHD_MAX_IFS) { + return BCME_BADARG; + } + + if (!dhdp->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost, + &flowid) != BCME_OK) { + return BCME_ERROR; + } + + DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid)); + + /* Tag the packet with flowid */ + DHD_PKT_SET_FLOWID(pktbuf, flowid); + return BCME_OK; +} + +void +dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) +{ + int hashix; + bool found = FALSE; + flow_hash_info_t *cur, *prev; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) { + + cur = if_flow_lkup[ifindex].fl_hash[hashix]; + + if (cur) { + if (cur->flowid == flowid) { + found = TRUE; + } + + prev = NULL; + while (!found && cur) { + if (cur->flowid == flowid) { + found = TRUE; + break; + } + prev = cur; + cur = cur->next; + } + if (found) { + if (!prev) { + if_flow_lkup[ifindex].fl_hash[hashix] = cur->next; + } else { + prev->next = cur->next; + } + + /* deregister flowid from dhd_pub. */ + dhd_del_flowid(dhdp, ifindex, flowid); + + id16_map_free(dhdp->flowid_allocator, flowid); + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t)); + + return; + } + } + } + + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n", + __FUNCTION__, flowid)); +} /* dhd_flowid_free */ + +/** + * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle + * indicates that a wireless link has gone down. + */ +void +dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_flow_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex)) { + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +/** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */ +void +dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_flow_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && + (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) { + DHD_INFO(("%s: deleting flowid %d\n", + __FUNCTION__, flow_ring_table[id].flowid)); + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +/** Handles interface ADD, CHANGE, DEL indications from the dongle */ +void +dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role) +{ + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + DHD_INFO(("%s: ifindex %u op %u role is %u \n", + __FUNCTION__, ifindex, op, role)); + if (!dhdp->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + return; + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) { + + if_flow_lkup[ifindex].role = role; + + if (role != WLC_E_IF_ROLE_STA) { + if_flow_lkup[ifindex].status = TRUE; + DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n", + __FUNCTION__, ifindex, role)); + /* Create Mcast Flow */ + } + } else if (op == WLC_E_IF_DEL) { + if_flow_lkup[ifindex].status = FALSE; + DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n", + __FUNCTION__, ifindex, role)); + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); +} + +/** Handles a STA 'link' indication from the dongle */ +int +dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) +{ + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return BCME_BADARG; + + DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status)); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { + if (status) + if_flow_lkup[ifindex].status = TRUE; + else + if_flow_lkup[ifindex].status = FALSE; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + return BCME_OK; +} + +/** Update flow priority mapping, called on IOVAR */ +int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) +{ + uint16 flowid; + flow_ring_node_t *flow_ring_node; + + if (map > DHD_FLOW_PRIO_LLR_MAP) + return BCME_BADOPTION; + + /* Check if we need to change prio map */ + if (map == dhdp->flow_prio_map_type) + return BCME_OK; + + /* If any ring is active we cannot change priority mapping for flow rings */ + for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (flow_ring_node->active) + return BCME_EPERM; + } + + /* Inform firmware about new mapping type */ + if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE)) + return BCME_ERROR; + + /* update internal structures */ + dhdp->flow_prio_map_type = map; + if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP) + bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + else + bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + return BCME_OK; +} + +/** Inform firmware on updated flow priority mapping, called on IOVAR */ +int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set) +{ + uint8 iovbuf[24]; + if (!set) { + bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) { + DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__)); + return BCME_ERROR; + } + *map = iovbuf[0]; + return BCME_OK; + } + bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { + DHD_ERROR(("%s: failed to set fl_prio_map \n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.h b/drivers/net/wireless/bcmdhd/dhd_flowring.h new file mode 100644 index 000000000000..7c36de5459bf --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_flowring.h @@ -0,0 +1,235 @@ +/* + * @file Header file describing the flow rings DHD interfaces. + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities. + * + * Provides type definitions and function prototypes used to create, delete and manage flow rings at + * high level. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_flowring.h 591285 2015-10-07 11:56:29Z $ + */ + + +/**************** + * Common types * + */ + +#ifndef _dhd_flowrings_h_ +#define _dhd_flowrings_h_ + +/* Max pkts held in a flow ring's backup queue */ +#define FLOW_RING_QUEUE_THRESHOLD (2048) + +/* Number of H2D common rings */ +#define FLOW_RING_COMMON BCMPCIE_H2D_COMMON_MSGRINGS + +#define FLOWID_INVALID (ID16_INVALID) +#define FLOWID_RESERVED (FLOW_RING_COMMON) + +#define FLOW_RING_STATUS_OPEN 0 +#define FLOW_RING_STATUS_PENDING 1 +#define FLOW_RING_STATUS_CLOSED 2 +#define FLOW_RING_STATUS_DELETE_PENDING 3 +#define FLOW_RING_STATUS_FLUSH_PENDING 4 +#define FLOW_RING_STATUS_STA_FREEING 5 + +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048 + +#define DHD_FLOW_PRIO_AC_MAP 0 +#define DHD_FLOW_PRIO_TID_MAP 1 +#define DHD_FLOW_PRIO_LLR_MAP 2 + +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_pkttag_fr { + uint16 flowid; + uint16 ifid; + int dataoff; + dmaaddr_t physaddr; + uint32 pa_len; + +} dhd_pkttag_fr_t; + +#define DHD_PKTTAG_SET_FLOWID(tag, flow) ((tag)->flowid = (uint16)(flow)) +#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx)) +#define DHD_PKTTAG_SET_DATAOFF(tag, offset) ((tag)->dataoff = (int)(offset)) +#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa)) +#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen)) + +#define DHD_PKTTAG_FLOWID(tag) ((tag)->flowid) +#define DHD_PKTTAG_IFID(tag) ((tag)->ifid) +#define DHD_PKTTAG_DATAOFF(tag) ((tag)->dataoff) +#define DHD_PKTTAG_PA(tag) ((tag)->physaddr) +#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len) + +/* Hashing a MacAddress for lkup into a per interface flow hash table */ +#define DHD_FLOWRING_HASH_SIZE 256 +#define DHD_FLOWRING_HASHINDEX(ea, prio) \ + ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \ + % DHD_FLOWRING_HASH_SIZE) + +#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role) +#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP) +#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA) +#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO) +#define DHD_FLOW_RING(dhdp, flowid) \ + (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid]) + +struct flow_queue; + +/* Flow Ring Queue Enqueue overflow callback */ +typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt); + +/** + * Each flow ring has an associated (tx flow controlled) queue. 802.3 packets are transferred + * between queue and ring. A packet from the host stack is first added to the queue, and in a later + * stage transferred to the flow ring. Packets in the queue are dhd owned, whereas packets in the + * flow ring are device owned. + */ +typedef struct flow_queue { + dll_t list; /* manage a flowring queue in a double linked list */ + void * head; /* first packet in the queue */ + void * tail; /* last packet in the queue */ + uint16 len; /* number of packets in the queue */ + uint16 max; /* maximum or min budget (used in cumm) */ + uint32 threshold; /* parent's cummulative length threshold */ + void * clen_ptr; /* parent's cummulative length counter */ + uint32 failures; /* enqueue failures due to queue overflow */ + flow_queue_cb_t cb; /* callback invoked on threshold crossing */ +} flow_queue_t; + +#define DHD_FLOW_QUEUE_LEN(queue) ((int)(queue)->len) +#define DHD_FLOW_QUEUE_MAX(queue) ((int)(queue)->max) +#define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold) +#define DHD_FLOW_QUEUE_EMPTY(queue) ((queue)->len == 0) +#define DHD_FLOW_QUEUE_FAILURES(queue) ((queue)->failures) + +#define DHD_FLOW_QUEUE_AVAIL(queue) ((int)((queue)->max - (queue)->len)) +#define DHD_FLOW_QUEUE_FULL(queue) ((queue)->len >= (queue)->max) + +#define DHD_FLOW_QUEUE_OVFL(queue, budget) \ + (((queue)->len) > budget) + +#define DHD_FLOW_QUEUE_SET_MAX(queue, budget) \ + ((queue)->max) = ((budget) - 1) + +/* Queue's cummulative threshold. */ +#define DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold) \ + ((queue)->threshold) = ((cumm_threshold) - 1) + +/* Queue's cummulative length object accessor. */ +#define DHD_FLOW_QUEUE_CLEN_PTR(queue) ((queue)->clen_ptr) + +/* Set a queue's cumm_len point to a parent's cumm_ctr_t cummulative length */ +#define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr) \ + ((queue)->clen_ptr) = (void *)(parent_clen_ptr) + +/* see wlfc_proto.h for tx status details */ +#define DHD_FLOWRING_MAXSTATUS_MSGS 5 +#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus) +/** each flow ring is dedicated to a tid/sa/da combination */ +typedef struct flow_info { + uint8 tid; + uint8 ifindex; + char sa[ETHER_ADDR_LEN]; + char da[ETHER_ADDR_LEN]; +} flow_info_t; + +/** a flow ring is used for outbound (towards antenna) 802.3 packets */ +typedef struct flow_ring_node { + dll_t list; /* manage a constructed flowring in a dll, must be at first place */ + flow_queue_t queue; /* queues packets before they enter the flow ring, flow control */ + bool active; + uint8 status; + /* + * flowid: unique ID of a flow ring, which can either be unicast or broadcast/multicast. For + * unicast flow rings, the flow id accelerates ARM 802.3->802.11 header translation. + */ + uint16 flowid; + flow_info_t flow_info; + void *prot_info; + void *lock; /* lock for flowring access protection */ +} flow_ring_node_t; + +typedef flow_ring_node_t flow_ring_table_t; + +typedef struct flow_hash_info { + uint16 flowid; + flow_info_t flow_info; + struct flow_hash_info *next; +} flow_hash_info_t; + +typedef struct if_flow_lkup { + bool status; + uint8 role; /* Interface role: STA/AP */ + flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */ +} if_flow_lkup_t; + +static INLINE flow_ring_node_t * +dhd_constlist_to_flowring(dll_t *item) +{ + return ((flow_ring_node_t *)item); +} + +/* Exported API */ + +/* Flow ring's queue management functions */ +extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid); +extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid); + +extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max); +extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb); +extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); +extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue); +extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); + +extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr); +extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings); + +extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp); + +extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, + void *pktbuf); + +extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid); + +extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex); + +extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, + char *addr); + +/* Handle Interface ADD, DEL operations */ +extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role); + +/* Handle a STA interface link status update */ +extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, + uint8 status); +extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set); +extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map); + +extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex); +#endif /* _dhd_flowrings_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.c b/drivers/net/wireless/bcmdhd/dhd_ip.c new file mode 100644 index 000000000000..96c5a23d3823 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_ip.c @@ -0,0 +1,1287 @@ +/* + * IP Packet Parser Module. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_ip.c 569132 2015-07-07 09:09:33Z $ + */ +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#ifdef DHDTCPACK_SUPPRESS +#include +#include +#include +#endif /* DHDTCPACK_SUPPRESS */ + +/* special values */ +/* 802.3 llc/snap header */ +static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; + +pkt_frag_t pkt_frag_info(osl_t *osh, void *p) +{ + uint8 *frame; + int length; + uint8 *pt; /* Pointer to type field */ + uint16 ethertype; + struct ipv4_hdr *iph; /* IP frame pointer */ + int ipl; /* IP frame length */ + uint16 iph_frag; + + ASSERT(osh && p); + + frame = PKTDATA(osh, p); + length = PKTLEN(osh, p); + + /* Process Ethernet II or SNAP-encapsulated 802.3 frames */ + if (length < ETHER_HDR_LEN) { + DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length)); + return DHD_PKT_FRAG_NONE; + } else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) { + /* Frame is Ethernet II */ + pt = frame + ETHER_TYPE_OFFSET; + } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN && + !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) { + pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN; + } else { + DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__)); + return DHD_PKT_FRAG_NONE; + } + + ethertype = ntoh16(*(uint16 *)pt); + + /* Skip VLAN tag, if any */ + if (ethertype == ETHER_TYPE_8021Q) { + pt += VLAN_TAG_LEN; + + if (pt + ETHER_TYPE_LEN > frame + length) { + DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length)); + return DHD_PKT_FRAG_NONE; + } + + ethertype = ntoh16(*(uint16 *)pt); + } + + if (ethertype != ETHER_TYPE_IP) { + DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n", + __FUNCTION__, ethertype, length)); + return DHD_PKT_FRAG_NONE; + } + + iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN); + ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame)); + + /* We support IPv4 only */ + if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) { + DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl)); + return DHD_PKT_FRAG_NONE; + } + + iph_frag = ntoh16(iph->frag); + + if (iph_frag & IPV4_FRAG_DONT) { + return DHD_PKT_FRAG_NONE; + } else if ((iph_frag & IPV4_FRAG_MORE) == 0) { + return DHD_PKT_FRAG_LAST; + } else { + return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST; + } +} + +#ifdef DHDTCPACK_SUPPRESS + +typedef struct { + void *pkt_in_q; /* TCP ACK packet that is already in txq or DelayQ */ + void *pkt_ether_hdr; /* Ethernet header pointer of pkt_in_q */ + int ifidx; + uint8 supp_cnt; + dhd_pub_t *dhdp; + struct timer_list timer; +} tcpack_info_t; + +typedef struct _tdata_psh_info_t { + uint32 end_seq; /* end seq# of a received TCP PSH DATA pkt */ + struct _tdata_psh_info_t *next; /* next pointer of the link chain */ +} tdata_psh_info_t; + +typedef struct { + struct { + uint8 src[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */ + uint8 dst[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */ + } ip_addr; + struct { + uint8 src[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */ + uint8 dst[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */ + } tcp_port; + tdata_psh_info_t *tdata_psh_info_head; /* Head of received TCP PSH DATA chain */ + tdata_psh_info_t *tdata_psh_info_tail; /* Tail of received TCP PSH DATA chain */ + uint32 last_used_time; /* The last time this tcpdata_info was used(in ms) */ +} tcpdata_info_t; + +/* TCPACK SUPPRESS module */ +typedef struct { + int tcpack_info_cnt; + tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM]; /* Info of TCP ACK to send */ + int tcpdata_info_cnt; + tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM]; /* Info of received TCP DATA */ + tdata_psh_info_t *tdata_psh_info_pool; /* Pointer to tdata_psh_info elements pool */ + tdata_psh_info_t *tdata_psh_info_free; /* free tdata_psh_info elements chain in pool */ +#ifdef DHDTCPACK_SUP_DBG + int psh_info_enq_num; /* Number of free TCP PSH DATA info elements in pool */ +#endif /* DHDTCPACK_SUP_DBG */ +} tcpack_sup_module_t; + +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) +counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1}; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + +static void +_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod, + tdata_psh_info_t *tdata_psh_info) +{ + if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) { + DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__, + tcpack_sup_mod, tdata_psh_info)); + return; + } + + ASSERT(tdata_psh_info->next == NULL); + tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free; + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info; +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num++; +#endif +} + +static tdata_psh_info_t* +_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod) +{ + tdata_psh_info_t *tdata_psh_info = NULL; + + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__, + tcpack_sup_mod)); + return NULL; + } + + tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free; + if (tdata_psh_info == NULL) + DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__)); + else { + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next; + tdata_psh_info->next = NULL; +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num--; +#endif /* DHDTCPACK_SUP_DBG */ + } + + return tdata_psh_info; +} + +#ifdef BCMSDIO +static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp, + tcpack_sup_module_t *tcpack_sup_mod) +{ + tdata_psh_info_t *tdata_psh_info_pool = NULL; + uint i; + + DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__)); + + if (tcpack_sup_mod == NULL) + return BCME_ERROR; + + ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL); + ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL); + + tdata_psh_info_pool = + MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); + + if (tdata_psh_info_pool == NULL) + return BCME_NOMEM; + bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num = 0; +#endif /* DHDTCPACK_SUP_DBG */ + + /* Enqueue newly allocated tcpdata psh info elements to the pool */ + for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++) + _tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]); + + ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL); + tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool; + + return BCME_OK; +} + +static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp, + tcpack_sup_module_t *tcpack_sup_mod) +{ + uint i; + tdata_psh_info_t *tdata_psh_info; + + DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__)); + + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n", + __FUNCTION__, __LINE__)); + return; + } + + for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) { + tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; + /* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */ + while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) { + tcpdata_info->tdata_psh_info_head = tdata_psh_info->next; + tdata_psh_info->next = NULL; + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info); + } + tcpdata_info->tdata_psh_info_tail = NULL; + } +#ifdef DHDTCPACK_SUP_DBG + DHD_ERROR(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + + i = 0; + /* Be sure we recollected all tdata_psh_info elements */ + while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) { + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next; + tdata_psh_info->next = NULL; + i++; + } + ASSERT(i == TCPDATA_PSH_INFO_MAXNUM); + MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool, + sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); + tcpack_sup_mod->tdata_psh_info_pool = NULL; + + return; +} +#endif /* BCMSDIO */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void dhd_tcpack_send(struct timer_list *t) +#else +static void dhd_tcpack_send(ulong data) +#endif +{ + tcpack_sup_module_t *tcpack_sup_mod; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + tcpack_info_t *cur_tbl = from_timer(cur_tbl, t, timer);; +#else + tcpack_info_t *cur_tbl = (tcpack_info_t *)data; +#endif + dhd_pub_t *dhdp; + int ifidx; + void* pkt; + unsigned long flags; + + if (!cur_tbl) { + return; + } + + dhdp = cur_tbl->dhdp; + if (!dhdp) { + return; + } + + flags = dhd_os_tcpacklock(dhdp); + + tcpack_sup_mod = dhdp->tcpack_sup_module; + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + return; + } + pkt = cur_tbl->pkt_in_q; + ifidx = cur_tbl->ifidx; + if (!pkt) { + dhd_os_tcpackunlock(dhdp, flags); + return; + } + cur_tbl->pkt_in_q = NULL; + cur_tbl->pkt_ether_hdr = NULL; + cur_tbl->ifidx = 0; + cur_tbl->supp_cnt = 0; + if (--tcpack_sup_mod->tcpack_info_cnt < 0) { + DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); + } + + dhd_os_tcpackunlock(dhdp, flags); + + dhd_sendpkt(dhdp, ifidx, pkt); +} + +int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) +{ + int ret = BCME_OK; + unsigned long flags; + + flags = dhd_os_tcpacklock(dhdp); + + if (dhdp->tcpack_sup_mode == mode) { + DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode)); + goto exit; + } + + if (mode >= TCPACK_SUP_LAST_MODE || +#ifndef BCMSDIO + mode == TCPACK_SUP_DELAYTX || +#endif /* !BCMSDIO */ + FALSE) { + DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode)); + ret = BCME_BADARG; + goto exit; + } + + DHD_TRACE(("%s: %d -> %d\n", + __FUNCTION__, dhdp->tcpack_sup_mode, mode)); + +#ifdef BCMSDIO + /* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */ + if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) { + tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; + /* We won't need tdata_psh_info pool and tcpddata_info_tbl anymore */ + _tdata_psh_info_pool_deinit(dhdp, tcpack_sup_mod); + tcpack_sup_mod->tcpdata_info_cnt = 0; + bzero(tcpack_sup_mod->tcpdata_info_tbl, + sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM); + /* For half duplex bus interface, tx precedes rx by default */ + if (dhdp->bus) + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + } +#endif /* BCMSDIO */ + dhdp->tcpack_sup_mode = mode; + + if (mode == TCPACK_SUP_OFF) { + ASSERT(dhdp->tcpack_sup_module != NULL); + /* Clean up timer/data structure for any remaining/pending packet or timer. */ + dhd_tcpack_info_tbl_clean(dhdp); + MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t)); + dhdp->tcpack_sup_module = NULL; + goto exit; + } + + if (dhdp->tcpack_sup_module == NULL) { + tcpack_sup_module_t *tcpack_sup_mod = + MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t)); + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: No MEM\n", __FUNCTION__, __LINE__)); + dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; + ret = BCME_NOMEM; + goto exit; + } + bzero(tcpack_sup_mod, sizeof(tcpack_sup_module_t)); + dhdp->tcpack_sup_module = tcpack_sup_mod; + } + +#ifdef BCMSDIO + if (mode == TCPACK_SUP_DELAYTX) { + ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module); + if (ret != BCME_OK) + DHD_ERROR(("%s %d: pool init fail with %d\n", __FUNCTION__, __LINE__, ret)); + else if (dhdp->bus) + dhd_bus_set_dotxinrx(dhdp->bus, FALSE); + } +#endif /* BCMSDIO */ + + if (mode == TCPACK_SUP_HOLD) { + int i; + tcpack_sup_module_t *tcpack_sup_mod = + (tcpack_sup_module_t *)dhdp->tcpack_sup_module; + dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO; + dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME; + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) + { + tcpack_sup_mod->tcpack_info_tbl[i].dhdp = dhdp; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&tcpack_sup_mod->tcpack_info_tbl[i].timer, dhd_tcpack_send, 0); +#else + init_timer(&tcpack_sup_mod->tcpack_info_tbl[i].timer); + tcpack_sup_mod->tcpack_info_tbl[i].timer.data = + (ulong)&tcpack_sup_mod->tcpack_info_tbl[i]; + tcpack_sup_mod->tcpack_info_tbl[i].timer.function = dhd_tcpack_send; +#endif + } + } + +exit: + dhd_os_tcpackunlock(dhdp, flags); + return ret; +} + +void +dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp) +{ + tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; + int i; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + flags = dhd_os_tcpacklock(dhdp); + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + if (tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q) { + PKTFREE(dhdp->osh, tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q, + TRUE); + tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q = NULL; + tcpack_sup_mod->tcpack_info_tbl[i].pkt_ether_hdr = NULL; + tcpack_sup_mod->tcpack_info_tbl[i].ifidx = 0; + tcpack_sup_mod->tcpack_info_tbl[i].supp_cnt = 0; + } + } + } else { + tcpack_sup_mod->tcpack_info_cnt = 0; + bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM); + } + + dhd_os_tcpackunlock(dhdp, flags); + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + del_timer_sync(&tcpack_sup_mod->tcpack_info_tbl[i].timer); + } + } + +exit: + return; +} + +inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt) +{ + uint8 i; + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int tbl_cnt; + int ret = BCME_OK; + void *pdata; + uint32 pktlen; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + pdata = PKTDATA(dhdp->osh, pkt); + pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata); + + if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, pktlen)); + goto exit; + } + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tbl_cnt = tcpack_sup_mod->tcpack_info_cnt; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM); + + for (i = 0; i < tbl_cnt; i++) { + if (tcpack_info_tbl[i].pkt_in_q == pkt) { + DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n", + __FUNCTION__, __LINE__, pkt, i, tbl_cnt)); + /* This pkt is being transmitted so remove the tcp_ack_info of it. */ + if (i < tbl_cnt - 1) { + bcopy(&tcpack_info_tbl[tbl_cnt - 1], + &tcpack_info_tbl[i], sizeof(tcpack_info_t)); + } + bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t)); + if (--tcpack_sup_mod->tcpack_info_cnt < 0) { + DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); + ret = BCME_ERROR; + } + break; + } + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return ret; +} + +static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr, + uint8 *tcp_hdr, uint32 tcp_ack_num) +{ + tcpack_sup_module_t *tcpack_sup_mod; + int i; + tcpdata_info_t *tcpdata_info = NULL; + tdata_psh_info_t *tdata_psh_info = NULL; + bool ret = FALSE; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) + goto exit; + + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + goto exit; + } + + DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), + tcp_ack_num)); + + for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) { + tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; + DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, i, + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tcpdata_info_tmp->tcp_port.src), + ntoh16_ua(tcpdata_info_tmp->tcp_port.dst))); + + /* If either IP address or TCP port number does not match, skip. */ + if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], + tcpdata_info_tmp->ip_addr.dst, IPV4_ADDR_LEN) == 0 && + memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET], + tcpdata_info_tmp->ip_addr.src, IPV4_ADDR_LEN) == 0 && + memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], + tcpdata_info_tmp->tcp_port.dst, TCP_PORT_LEN) == 0 && + memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET], + tcpdata_info_tmp->tcp_port.src, TCP_PORT_LEN) == 0) { + tcpdata_info = tcpdata_info_tmp; + break; + } + } + + if (tcpdata_info == NULL) { + DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__)); + goto exit; + } + + if (tcpdata_info->tdata_psh_info_head == NULL) { + DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__)); + } + + while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) { + if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) { + DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n", + __FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq)); + tcpdata_info->tdata_psh_info_head = tdata_psh_info->next; + tdata_psh_info->next = NULL; + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info); + ret = TRUE; + } else + break; + } + if (tdata_psh_info == NULL) + tcpdata_info->tdata_psh_info_tail = NULL; + +#ifdef DHDTCPACK_SUP_DBG + DHD_TRACE(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + +exit: + return ret; +} + +bool +dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *new_ether_hdr; /* Ethernet header of the new packet */ + uint16 new_ether_type; /* Ethernet type of the new packet */ + uint8 *new_ip_hdr; /* IP header of the new packet */ + uint8 *new_tcp_hdr; /* TCP header of the new packet */ + uint32 new_ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */ + uint16 new_ip_total_len; /* Total length of IP packet for the new packet */ + uint32 new_tcp_hdr_len; /* TCP header length of the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int i; + bool ret = FALSE; + bool set_dotxinrx = TRUE; + unsigned long flags; + + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + new_ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, cur_framelen)); + goto exit; + } + + new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13]; + + if (new_ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, new_ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type)); + + new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + new_ip_hdr_len = IPV4_HLEN(new_ip_hdr); + if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr))); + goto exit; + } + + new_tcp_hdr = new_ip_hdr + new_ip_hdr_len; + cur_framelen -= new_ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + /* is it an ack ? Allow only ACK flag, not to suppress others. */ + if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) { + DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n", + __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET])); + goto exit; + } + + new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]); + new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet has TCP data, so just send */ + if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len); + + new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + DHD_TRACE(("%s %d: TCP ACK with zero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ + flags = dhd_os_tcpacklock(dhdp); +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + counter_printlog(&tack_tbl); + tack_tbl.cnt[0]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + + tcpack_sup_mod = dhdp->tcpack_sup_module; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) { + /* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[5]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else + set_dotxinrx = FALSE; + + for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) { + void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ + uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; + uint32 old_ip_hdr_len, old_tcp_hdr_len; + uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ + + if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { + DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt)); + break; + } + + if (PKTDATA(dhdp->osh, oldpkt) == NULL) { + DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt)); + break; + } + + old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr; + old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; + old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); + old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; + old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]); + + DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* If either of IP address or TCP port number does not match, skip. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], + &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || + memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], + &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) + continue; + + old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) { + /* New packet has higher TCP ACK number, so it replaces the old packet */ + if (new_ip_hdr_len == old_ip_hdr_len && + new_tcp_hdr_len == old_tcp_hdr_len) { + ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0); + bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len); + PKTFREE(dhdp->osh, pkt, FALSE); + DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n", + __FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num)); +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[2]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + ret = TRUE; + } else { +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[6]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d" + " ACK %u -> %u\n", __FUNCTION__, __LINE__, + new_ip_hdr_len, old_ip_hdr_len, + new_tcp_hdr_len, old_tcp_hdr_len, + old_tcpack_num, new_tcp_ack_num)); + } + } else if (new_tcp_ack_num == old_tcpack_num) { + set_dotxinrx = TRUE; + /* TCPACK retransmission */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[3]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else { + DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n", + __FUNCTION__, __LINE__, old_tcpack_num, oldpkt, + new_tcp_ack_num, pkt)); + } + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) { + /* No TCPACK packet with the same IP addr and TCP port is found + * in tcp_ack_info_tbl. So add this packet to the table. + */ + DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n", + __FUNCTION__, __LINE__, pkt, new_ether_hdr, + tcpack_sup_mod->tcpack_info_cnt)); + + tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt; + tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr; + tcpack_sup_mod->tcpack_info_cnt++; +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[1]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else { + ASSERT(i == tcpack_sup_mod->tcpack_info_cnt); + DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", + __FUNCTION__, __LINE__)); + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + /* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */ + if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx) + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + + return ret; +} + +bool +dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *ether_hdr; /* Ethernet header of the new packet */ + uint16 ether_type; /* Ethernet type of the new packet */ + uint8 *ip_hdr; /* IP header of the new packet */ + uint8 *tcp_hdr; /* TCP header of the new packet */ + uint32 ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint16 ip_total_len; /* Total length of IP packet for the new packet */ + uint32 tcp_hdr_len; /* TCP header length of the new packet */ + uint32 tcp_seq_num; /* TCP sequence number of the new packet */ + uint16 tcp_data_len; /* TCP DATA length that excludes IP and TCP headers */ + uint32 end_tcp_seq_num; /* TCP seq number of the last byte in the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpdata_info_t *tcpdata_info = NULL; + tdata_psh_info_t *tdata_psh_info; + + int i; + bool ret = FALSE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) + goto exit; + + ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + ether_type = ether_hdr[12] << 8 | ether_hdr[13]; + + if (ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type)); + + ip_hdr = ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + ip_hdr_len = IPV4_HLEN(ip_hdr); + if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr))); + goto exit; + } + + tcp_hdr = ip_hdr + ip_hdr_len; + cur_framelen -= ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]); + tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet is mere TCP ACK, so do nothing */ + if (ip_total_len == ip_hdr_len + tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len); + + if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) { + DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__)); + goto exit; + } + + DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), + tcp_hdr[TCP_FLAGS_OFFSET])); + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + /* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */ + i = 0; + while (i < tcpack_sup_mod->tcpdata_info_cnt) { + tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; + uint32 now_in_ms = OSL_SYSUPTIME(); + DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, i, + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tdata_info_tmp->tcp_port.src), + ntoh16_ua(tdata_info_tmp->tcp_port.dst))); + + /* If both IP address and TCP port number match, we found it so break. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], + (void *)&tdata_info_tmp->ip_addr, IPV4_ADDR_LEN * 2) == 0 && + memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], + (void *)&tdata_info_tmp->tcp_port, TCP_PORT_LEN * 2) == 0) { + tcpdata_info = tdata_info_tmp; + tcpdata_info->last_used_time = now_in_ms; + break; + } + + if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) { + tdata_psh_info_t *tdata_psh_info_tmp; + tcpdata_info_t *last_tdata_info; + + while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) { + tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next; + tdata_psh_info_tmp->next = NULL; + DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n", + __FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq)); + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp); + } +#ifdef DHDTCPACK_SUP_DBG + DHD_ERROR(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + tcpack_sup_mod->tcpdata_info_cnt--; + ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0); + + last_tdata_info = + &tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt]; + if (i < tcpack_sup_mod->tcpdata_info_cnt) { + ASSERT(last_tdata_info != tdata_info_tmp); + bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t)); + } + bzero(last_tdata_info, sizeof(tcpdata_info_t)); + DHD_INFO(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt)); + /* Don't increase "i" here, so that the prev last tcpdata_info is checked */ + } else + i++; + } + + tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]); + tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len; + end_tcp_seq_num = tcp_seq_num + tcp_data_len; + + if (tcpdata_info == NULL) { + ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt); + if (i >= TCPDATA_INFO_MAXNUM) { + DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; + + /* No TCP flow with the same IP addr and TCP port is found + * in tcp_data_info_tbl. So add this flow to the table. + */ + DHD_INFO(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); + /* Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], (void *)&tcpdata_info->ip_addr, + IPV4_ADDR_LEN * 2); + bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], (void *)&tcpdata_info->tcp_port, + TCP_PORT_LEN * 2); + + tcpdata_info->last_used_time = OSL_SYSUPTIME(); + tcpack_sup_mod->tcpdata_info_cnt++; + } + + ASSERT(tcpdata_info != NULL); + + tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod); +#ifdef DHDTCPACK_SUP_DBG + DHD_TRACE(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + + if (tdata_psh_info == NULL) { + DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tdata_psh_info->end_seq = end_tcp_seq_num; + +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[4]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + + DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n", + __FUNCTION__, __LINE__, tdata_psh_info->end_seq)); + + ASSERT(tdata_psh_info->next == NULL); + + if (tcpdata_info->tdata_psh_info_head == NULL) + tcpdata_info->tdata_psh_info_head = tdata_psh_info; + else { + ASSERT(tcpdata_info->tdata_psh_info_tail); + tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info; + } + tcpdata_info->tdata_psh_info_tail = tdata_psh_info; + + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return ret; +} + +bool +dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + uint8 *new_ether_hdr; /* Ethernet header of the new packet */ + uint16 new_ether_type; /* Ethernet type of the new packet */ + uint8 *new_ip_hdr; /* IP header of the new packet */ + uint8 *new_tcp_hdr; /* TCP header of the new packet */ + uint32 new_ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */ + uint16 new_ip_total_len; /* Total length of IP packet for the new packet */ + uint32 new_tcp_hdr_len; /* TCP header length of the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int i, free_slot = TCPACK_INFO_MAXNUM; + bool hold = FALSE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) { + goto exit; + } + + if (dhdp->tcpack_sup_ratio == 1) { + goto exit; + } + + new_ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, cur_framelen)); + goto exit; + } + + new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13]; + + if (new_ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, new_ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type)); + + new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + new_ip_hdr_len = IPV4_HLEN(new_ip_hdr); + if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr))); + goto exit; + } + + new_tcp_hdr = new_ip_hdr + new_ip_hdr_len; + cur_framelen -= new_ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + /* is it an ack ? Allow only ACK flag, not to suppress others. */ + if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) { + DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n", + __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET])); + goto exit; + } + + new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]); + new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet has TCP data, so just send */ + if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len); + + new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + DHD_TRACE(("%s %d: TCP ACK with zero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ + flags = dhd_os_tcpacklock(dhdp); + + tcpack_sup_mod = dhdp->tcpack_sup_module; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + hold = TRUE; + + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ + uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; + uint32 old_ip_hdr_len, old_tcp_hdr_len; + uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ + + if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { + if (free_slot == TCPACK_INFO_MAXNUM) { + free_slot = i; + } + continue; + } + + if (PKTDATA(dhdp->osh, oldpkt) == NULL) { + DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n", + __FUNCTION__, __LINE__, i)); + hold = FALSE; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr; + old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; + old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); + old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; + old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]); + + DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* If either of IP address or TCP port number does not match, skip. */ + if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], + &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || + memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], + &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) { + continue; + } + + old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + if (IS_TCPSEQ_GE(new_tcp_ack_num, old_tcpack_num)) { + tcpack_info_tbl[i].supp_cnt++; + if (tcpack_info_tbl[i].supp_cnt >= dhdp->tcpack_sup_ratio) { + tcpack_info_tbl[i].pkt_in_q = NULL; + tcpack_info_tbl[i].pkt_ether_hdr = NULL; + tcpack_info_tbl[i].ifidx = 0; + tcpack_info_tbl[i].supp_cnt = 0; + hold = FALSE; + } else { + tcpack_info_tbl[i].pkt_in_q = pkt; + tcpack_info_tbl[i].pkt_ether_hdr = new_ether_hdr; + tcpack_info_tbl[i].ifidx = ifidx; + } + PKTFREE(dhdp->osh, oldpkt, TRUE); + } else { + PKTFREE(dhdp->osh, pkt, TRUE); + } + dhd_os_tcpackunlock(dhdp, flags); + + if (!hold) { + del_timer_sync(&tcpack_info_tbl[i].timer); + } + goto exit; + } + + if (free_slot < TCPACK_INFO_MAXNUM) { + /* No TCPACK packet with the same IP addr and TCP port is found + * in tcp_ack_info_tbl. So add this packet to the table. + */ + DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n", + __FUNCTION__, __LINE__, pkt, new_ether_hdr, + free_slot)); + + tcpack_info_tbl[free_slot].pkt_in_q = pkt; + tcpack_info_tbl[free_slot].pkt_ether_hdr = new_ether_hdr; + tcpack_info_tbl[free_slot].ifidx = ifidx; + tcpack_info_tbl[free_slot].supp_cnt = 1; + mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer, + jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay)); + tcpack_sup_mod->tcpack_info_cnt++; + } else { + DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", + __FUNCTION__, __LINE__)); + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return hold; +} +#endif /* DHDTCPACK_SUPPRESS */ diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.h b/drivers/net/wireless/bcmdhd/dhd_ip.h new file mode 100644 index 000000000000..a72976b07ccf --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_ip.h @@ -0,0 +1,85 @@ +/* + * Header file describing the common ip parser function. + * + * Provides type definitions and function prototypes used to parse ip packet. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_ip.h 537119 2015-02-25 04:24:14Z $ + */ + +#ifndef _dhd_ip_h_ +#define _dhd_ip_h_ + +#ifdef DHDTCPACK_SUPPRESS +#include +#include +#include +#endif /* DHDTCPACK_SUPPRESS */ + +typedef enum pkt_frag +{ + DHD_PKT_FRAG_NONE = 0, + DHD_PKT_FRAG_FIRST, + DHD_PKT_FRAG_CONT, + DHD_PKT_FRAG_LAST +} pkt_frag_t; + +extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p); + +#ifdef DHDTCPACK_SUPPRESS +#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN) +/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */ +#define TCPACKSZMAX (TCPACKSZMIN + 100) + +/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */ +#define TCPACK_INFO_MAXNUM 4 +#define TCPDATA_INFO_MAXNUM 4 +#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM) + +#define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */ + +#define DEFAULT_TCPACK_SUPP_RATIO 3 +#ifndef CUSTOM_TCPACK_SUPP_RATIO +#define CUSTOM_TCPACK_SUPP_RATIO DEFAULT_TCPACK_SUPP_RATIO +#endif /* CUSTOM_TCPACK_SUPP_RATIO */ + +#define DEFAULT_TCPACK_DELAY_TIME 10 /* ms */ +#ifndef CUSTOM_TCPACK_DELAY_TIME +#define CUSTOM_TCPACK_DELAY_TIME DEFAULT_TCPACK_DELAY_TIME +#endif /* CUSTOM_TCPACK_DELAY_TIME */ + +extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on); +extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp); +extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx); +/* #define DHDTCPACK_SUP_DBG */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) +extern counter_tbl_t tack_tbl; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ +#endif /* DHDTCPACK_SUPPRESS */ + +#endif /* _dhd_ip_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c new file mode 100644 index 000000000000..2786a6640181 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux.c @@ -0,0 +1,13676 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $ + */ + +#include +#include +#include +#ifdef SHOW_LOGTRACE +#include +#include +#endif /* SHOW_LOGTRACE */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef ENABLE_ADAPTIVE_SCHED +#include +#endif /* ENABLE_ADAPTIVE_SCHED */ + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef PCIE_FULL_DONGLE +#include +#endif +#include +#include +#include +#ifdef CONFIG_HAS_WAKELOCK +#include +#endif +#ifdef WL_CFG80211 +#include +#endif +#ifdef PNO_SUPPORT +#include +#endif +#ifdef RTT_SUPPORT +#include +#endif + +#ifdef CONFIG_COMPAT +#include +#endif + +#ifdef DHD_WMF +#include +#endif /* DHD_WMF */ + +#ifdef DHD_L2_FILTER +#include +#include +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + + +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef DHD_DEBUG_PAGEALLOC +typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len); +void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); +extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); +#endif /* DHD_DEBUG_PAGEALLOC */ + + +#if defined(DHD_LB) +/* Dynamic CPU selection for load balancing */ +#include +#include +#include +#include +#include + +#if !defined(DHD_LB_PRIMARY_CPUS) +#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ +#endif + +#if !defined(DHD_LB_SECONDARY_CPUS) +#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ +#endif + +#define HIST_BIN_SIZE 8 + +#if defined(DHD_LB_RXP) +static void dhd_rx_napi_dispatcher_fn(struct work_struct * work); +#endif /* DHD_LB_RXP */ + +#endif /* DHD_LB */ + +#ifdef WLMEDIA_HTSF +#include +#include + +#define HTSF_MINLEN 200 /* min. packet length to timestamp */ +#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */ +#define TSMAX 1000 /* max no. of timing record kept */ +#define NUMBIN 34 + +static uint32 tsidx = 0; +static uint32 htsf_seqnum = 0; +uint32 tsfsync; +struct timeval tsync; +static uint32 tsport = 5010; + +typedef struct histo_ { + uint32 bin[NUMBIN]; +} histo_t; + +#if !ISPOWEROF2(DHD_SDALIGN) +#error DHD_SDALIGN is not a power of 2! +#endif + +static histo_t vi_d1, vi_d2, vi_d3, vi_d4; +#endif /* WLMEDIA_HTSF */ + +#ifdef STBLINUX +#ifdef quote_str +#undef quote_str +#endif /* quote_str */ +#ifdef to_str +#undef to_str +#endif /* quote_str */ +#define to_str(s) #s +#define quote_str(s) to_str(s) + +static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET); +#endif /* STBLINUX */ + + +#if defined(SOFTAP) +extern bool ap_cfg_running; +extern bool ap_fw_loaded; +#endif +extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction); + +#ifdef FIX_CPU_MIN_CLOCK +#include +#endif /* FIX_CPU_MIN_CLOCK */ +#ifdef SET_RANDOM_MAC_SOFTAP +#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL +#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11 +#endif +static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL; +#endif /* SET_RANDOM_MAC_SOFTAP */ +#ifdef ENABLE_ADAPTIVE_SCHED +#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */ +#ifndef CUSTOM_CPUFREQ_THRESH +#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH +#endif /* CUSTOM_CPUFREQ_THRESH */ +#endif /* ENABLE_ADAPTIVE_SCHED */ + +/* enable HOSTIP cache update from the host side when an eth0:N is up */ +#define AOE_IP_ALIAS_SUPPORT 1 + +#ifdef BCM_FD_AGGR +#include +#include +#endif +#ifdef PROP_TXSTATUS +#include +#include +#endif + +#include + +/* Maximum STA per radio */ +#define DHD_MAX_STA 32 + + + +const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 }; +const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; +#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]] + +#ifdef ARP_OFFLOAD_SUPPORT +void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx); +static int dhd_inetaddr_notifier_call(struct notifier_block *this, + unsigned long event, void *ptr); +static struct notifier_block dhd_inetaddr_notifier = { + .notifier_call = dhd_inetaddr_notifier_call +}; +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_inetaddr_notifier_registered = FALSE; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +static int dhd_inet6addr_notifier_call(struct notifier_block *this, + unsigned long event, void *ptr); +static struct notifier_block dhd_inet6addr_notifier = { + .notifier_call = dhd_inet6addr_notifier_call +}; +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_inet6addr_notifier_registered = FALSE; +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include +volatile bool dhd_mmc_suspend = FALSE; +DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#if defined(OOB_INTR_ONLY) +extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +static void dhd_hang_process(void *dhd_info, void *event_data, u8 event); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +MODULE_LICENSE("GPL and additional rights"); +#endif /* LinuxVer */ + +#include + +#ifdef BCM_FD_AGGR +#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE) +#else +#ifndef PROP_TXSTATUS +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen) +#else +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128) +#endif +#endif /* BCM_FD_AGGR */ + +#ifdef PROP_TXSTATUS +extern bool dhd_wlfc_skip_fc(void); +extern void dhd_wlfc_plat_init(void *dhd); +extern void dhd_wlfc_plat_deinit(void *dhd); +#endif /* PROP_TXSTATUS */ +#ifdef USE_DYNAMIC_F2_BLKSIZE +extern uint sd_f2_blocksize; +extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) +const char * +print_tainted() +{ + return ""; +} +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */ + +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +extern wl_iw_extra_params_t g_wl_iw_params; +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef CONFIG_PARTIALSUSPEND_SLP +#include +#define CONFIG_HAS_EARLYSUSPEND +#define DHD_USE_EARLYSUSPEND +#define register_early_suspend register_pre_suspend +#define unregister_early_suspend unregister_pre_suspend +#define early_suspend pre_suspend +#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50 +#else +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ +#endif /* CONFIG_PARTIALSUSPEND_SLP */ + +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); + +#ifdef PKT_FILTER_SUPPORT +extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id); +#endif + + +#ifdef READ_MACADDR +extern int dhd_read_macaddr(struct dhd_info *dhd); +#else +static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; } +#endif +#ifdef WRITE_MACADDR +extern int dhd_write_macaddr(struct ether_addr *mac); +#else +static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; } +#endif + + + + + +#ifdef DHD_FW_COREDUMP +static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event); +#endif /* DHD_FW_COREDUMP */ +#ifdef DHD_LOG_DUMP +static void dhd_log_dump_init(dhd_pub_t *dhd); +static void dhd_log_dump_deinit(dhd_pub_t *dhd); +static void dhd_log_dump(void *handle, void *event_info, u8 event); +void dhd_schedule_log_dump(dhd_pub_t *dhdp); +static int do_dhd_log_dump(dhd_pub_t *dhdp); +#endif /* DHD_LOG_DUMP */ + +static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused); +static struct notifier_block dhd_reboot_notifier = { + .notifier_call = dhd_reboot_callback, + .priority = 1, +}; + +#ifdef BCMPCIE +static int is_reboot = 0; +#endif /* BCMPCIE */ + +typedef struct dhd_if_event { + struct list_head list; + wl_event_data_if_t event; + char name[IFNAMSIZ+1]; + uint8 mac[ETHER_ADDR_LEN]; +} dhd_if_event_t; + +/* Interface control information */ +typedef struct dhd_if { + struct dhd_info *info; /* back pointer to dhd_info */ + /* OS/stack specifics */ + struct net_device *net; + int idx; /* iface idx in dongle */ + uint subunit; /* subunit */ + uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ + bool set_macaddress; + bool set_multicast; + uint8 bssidx; /* bsscfg index for the interface */ + bool attached; /* Delayed attachment when unset */ + bool txflowcontrol; /* Per interface flow control indicator */ + char name[IFNAMSIZ+1]; /* linux interface name */ + char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */ + struct net_device_stats stats; +#ifdef DHD_WMF + dhd_wmf_t wmf; /* per bsscfg wmf setting */ +#endif /* DHD_WMF */ +#ifdef PCIE_FULL_DONGLE + struct list_head sta_list; /* sll of associated stations */ +#if !defined(BCM_GMAC3) + spinlock_t sta_list_lock; /* lock for manipulating sll */ +#endif /* ! BCM_GMAC3 */ +#endif /* PCIE_FULL_DONGLE */ + uint32 ap_isolate; /* ap-isolation settings */ +#ifdef DHD_L2_FILTER + bool parp_enable; + bool parp_discard; + bool parp_allnode; + arp_table_t *phnd_arp_table; +/* for Per BSS modification */ + bool dhcp_unicast; + bool block_ping; + bool grat_arp; +#endif /* DHD_L2_FILTER */ +} dhd_if_t; + +#ifdef WLMEDIA_HTSF +typedef struct { + uint32 low; + uint32 high; +} tsf_t; + +typedef struct { + uint32 last_cycle; + uint32 last_sec; + uint32 last_tsf; + uint32 coef; /* scaling factor */ + uint32 coefdec1; /* first decimal */ + uint32 coefdec2; /* second decimal */ +} htsf_t; + +typedef struct { + uint32 t1; + uint32 t2; + uint32 t3; + uint32 t4; +} tstamp_t; + +static tstamp_t ts[TSMAX]; +static tstamp_t maxdelayts; +static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0; + +#endif /* WLMEDIA_HTSF */ + +struct ipv6_work_info_t { + uint8 if_idx; + char ipv6_addr[16]; + unsigned long event; +}; + +#ifdef DHD_DEBUG +typedef struct dhd_dump { + uint8 *buf; + int bufsize; +} dhd_dump_t; +#endif /* DHD_DEBUG */ + +/* When Perimeter locks are deployed, any blocking calls must be preceeded + * with a PERIM UNLOCK and followed by a PERIM LOCK. + * Examples of blocking calls are: schedule_timeout(), down_interruptible(), + * wait_event_timeout(). + */ + +/* Local private structure (extension of pub) */ +typedef struct dhd_info { +#if defined(WL_WIRELESS_EXT) + wl_iw_t iw; /* wireless extensions state (must be first) */ +#endif /* defined(WL_WIRELESS_EXT) */ + dhd_pub_t pub; + dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */ + + void *adapter; /* adapter information, interrupt, fw path etc. */ + char fw_path[PATH_MAX]; /* path to firmware image */ + char nv_path[PATH_MAX]; /* path to nvram vars file */ + + /* serialize dhd iovars */ + struct mutex dhd_iovar_mutex; + + struct semaphore proto_sem; +#ifdef PROP_TXSTATUS + spinlock_t wlfc_spinlock; + +#endif /* PROP_TXSTATUS */ +#ifdef WLMEDIA_HTSF + htsf_t htsf; +#endif + wait_queue_head_t ioctl_resp_wait; + wait_queue_head_t d3ack_wait; + wait_queue_head_t dhd_bus_busy_state_wait; + uint32 default_wd_interval; + + struct timer_list timer; + bool wd_timer_valid; +#ifdef DHD_PCIE_RUNTIMEPM + struct timer_list rpm_timer; + bool rpm_timer_valid; + tsk_ctl_t thr_rpm_ctl; +#endif /* DHD_PCIE_RUNTIMEPM */ + struct tasklet_struct tasklet; + spinlock_t sdlock; + spinlock_t txqlock; + spinlock_t dhd_lock; + + struct semaphore sdsem; + tsk_ctl_t thr_dpc_ctl; + tsk_ctl_t thr_wdt_ctl; + + tsk_ctl_t thr_rxf_ctl; + spinlock_t rxf_lock; + bool rxthread_enabled; + + /* Wakelocks */ +#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + struct wake_lock wl_wifi; /* Wifi wakelock */ + struct wake_lock wl_rxwake; /* Wifi rx wakelock */ + struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ + struct wake_lock wl_wdwake; /* Wifi wd wakelock */ + struct wake_lock wl_evtwake; /* Wifi event wakelock */ +#ifdef BCMPCIE_OOB_HOST_WAKE + struct wake_lock wl_intrwake; /* Host wakeup wakelock */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + struct wake_lock wl_scanwake; /* Wifi scan wakelock */ +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + /* net_device interface lock, prevent race conditions among net_dev interface + * calls and wifi_on or wifi_off + */ + struct mutex dhd_net_if_mutex; + struct mutex dhd_suspend_mutex; +#endif + spinlock_t wakelock_spinlock; + spinlock_t wakelock_evt_spinlock; + uint32 wakelock_event_counter; + uint32 wakelock_counter; + int wakelock_wd_counter; + int wakelock_rx_timeout_enable; + int wakelock_ctrl_timeout_enable; + bool waive_wakelock; + uint32 wakelock_before_waive; + + /* Thread to issue ioctl for multicast */ + wait_queue_head_t ctrl_wait; + atomic_t pend_8021x_cnt; + dhd_attach_states_t dhd_state; +#ifdef SHOW_LOGTRACE + dhd_event_log_t event_data; +#endif /* SHOW_LOGTRACE */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + struct early_suspend early_suspend; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + u32 pend_ipaddr; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef BCM_FD_AGGR + void *rpc_th; + void *rpc_osh; + struct timer_list rpcth_timer; + bool rpcth_timer_active; + uint8 fdaggr; +#endif +#ifdef DHDTCPACK_SUPPRESS + spinlock_t tcpack_lock; +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef FIX_CPU_MIN_CLOCK + bool cpufreq_fix_status; + struct mutex cpufreq_fix; + struct pm_qos_request dhd_cpu_qos; +#ifdef FIX_BUS_MIN_CLOCK + struct pm_qos_request dhd_bus_qos; +#endif /* FIX_BUS_MIN_CLOCK */ +#endif /* FIX_CPU_MIN_CLOCK */ + void *dhd_deferred_wq; +#ifdef DEBUG_CPU_FREQ + struct notifier_block freq_trans; + int __percpu *new_freq; +#endif + unsigned int unit; + struct notifier_block pm_notifier; +#ifdef DHD_PSTA + uint32 psta_mode; /* PSTA or PSR */ +#endif /* DHD_PSTA */ +#ifdef DHD_DEBUG + dhd_dump_t *dump; + struct timer_list join_timer; + u32 join_timeout_val; + bool join_timer_active; + uint scan_time_count; + struct timer_list scan_timer; + bool scan_timer_active; +#endif +#if defined(DHD_LB) + /* CPU Load Balance dynamic CPU selection */ + + /* Variable that tracks the currect CPUs available for candidacy */ + cpumask_var_t cpumask_curr_avail; + + /* Primary and secondary CPU mask */ + cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ + cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ + + struct notifier_block cpu_notifier; + + /* Tasklet to handle Tx Completion packet freeing */ + struct tasklet_struct tx_compl_tasklet; + atomic_t tx_compl_cpu; + + + /* Tasklet to handle RxBuf Post during Rx completion */ + struct tasklet_struct rx_compl_tasklet; + atomic_t rx_compl_cpu; + + /* Napi struct for handling rx packet sendup. Packets are removed from + * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then + * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled + * to run to rx_napi_cpu. + */ + struct sk_buff_head rx_pend_queue ____cacheline_aligned; + struct sk_buff_head rx_napi_queue ____cacheline_aligned; + struct napi_struct rx_napi_struct ____cacheline_aligned; + atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ + struct net_device *rx_napi_netdev; /* netdev of primary interface */ + + struct work_struct rx_napi_dispatcher_work; + struct work_struct tx_compl_dispatcher_work; + struct work_struct rx_compl_dispatcher_work; + /* Number of times DPC Tasklet ran */ + uint32 dhd_dpc_cnt; + + /* Number of times NAPI processing got scheduled */ + uint32 napi_sched_cnt; + + /* Number of times NAPI processing ran on each available core */ + uint32 napi_percpu_run_cnt[NR_CPUS]; + + /* Number of times RX Completions got scheduled */ + uint32 rxc_sched_cnt; + /* Number of times RX Completion ran on each available core */ + uint32 rxc_percpu_run_cnt[NR_CPUS]; + + /* Number of times TX Completions got scheduled */ + uint32 txc_sched_cnt; + /* Number of times TX Completions ran on each available core */ + uint32 txc_percpu_run_cnt[NR_CPUS]; + + /* CPU status */ + /* Number of times each CPU came online */ + uint32 cpu_online_cnt[NR_CPUS]; + + /* Number of times each CPU went offline */ + uint32 cpu_offline_cnt[NR_CPUS]; + + /* + * Consumer Histogram - NAPI RX Packet processing + * ----------------------------------------------- + * On Each CPU, when the NAPI RX Packet processing call back was invoked + * how many packets were processed is captured in this data structure. + * Now its difficult to capture the "exact" number of packets processed. + * So considering the packet counter to be a 32 bit one, we have a + * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets + * processed is rounded off to the next power of 2 and put in the + * approriate "bin" the value in the bin gets incremented. + * For example, assume that in CPU 1 if NAPI Rx runs 3 times + * and the packet count processed is as follows (assume the bin counters are 0) + * iteration 1 - 10 (the bin counter 2^4 increments to 1) + * iteration 2 - 30 (the bin counter 2^5 increments to 1) + * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) + */ + uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE]; + uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE]; + uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE]; +#endif /* DHD_LB */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + + struct kobject dhd_kobj; +#ifdef SUPPORT_SENSORHUB + uint32 shub_enable; +#endif /* SUPPORT_SENSORHUB */ + + struct delayed_work dhd_memdump_work; +} dhd_info_t; + +#define DHDIF_FWDER(dhdif) FALSE + +/* Flag to indicate if we should download firmware on driver load */ +uint dhd_download_fw_on_driverload = TRUE; + +/* Flag to indicate if driver is initialized */ +uint dhd_driver_init_done = FALSE; + +/* Definitions to provide path to the firmware and nvram + * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" + */ +char firmware_path[MOD_PARAM_PATHLEN]; +char nvram_path[MOD_PARAM_PATHLEN]; + +/* backup buffer for firmware and nvram path */ +char fw_bak_path[MOD_PARAM_PATHLEN]; +char nv_bak_path[MOD_PARAM_PATHLEN]; + +/* information string to keep firmware, chio, cheip version info visiable from log */ +char info_string[MOD_PARAM_INFOLEN]; +module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444); +int op_mode = 0; +int disable_proptx = 0; +module_param(op_mode, int, 0644); + +#if defined(DHD_LB_RXP) +static int dhd_napi_weight = 32; +module_param(dhd_napi_weight, int, 0644); +#endif /* DHD_LB_RXP */ + +extern int wl_control_wl_start(struct net_device *dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC) +struct semaphore dhd_registration_sem; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + +/* deferred handlers */ +static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event); +static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event); +static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event); +static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event); +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event); +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#ifdef WL_CFG80211 +extern void dhd_netdev_free(struct net_device *ndev); +#endif /* WL_CFG80211 */ + +/* Error bits */ +module_param(dhd_msg_level, int, 0); + +#ifdef ARP_OFFLOAD_SUPPORT +/* ARP offload enable */ +uint dhd_arp_enable = TRUE; +module_param(dhd_arp_enable, uint, 0); + +/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ + +#ifdef ENABLE_ARP_SNOOP_MODE +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP; +#else +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY; +#endif /* ENABLE_ARP_SNOOP_MODE */ + +module_param(dhd_arp_mode, uint, 0); +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* Disable Prop tx */ +module_param(disable_proptx, int, 0644); +/* load firmware and/or nvram values from the filesystem */ +module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); +module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660); + +/* Watchdog interval */ + +/* extend watchdog expiration to 2 seconds when DPC is running */ +#define WATCHDOG_EXTEND_INTERVAL (2000) + +uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS; +module_param(dhd_watchdog_ms, uint, 0); + +#ifdef DHD_PCIE_RUNTIMEPM +uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS; +#endif /* DHD_PCIE_RUNTIMEPMT */ +#if defined(DHD_DEBUG) +/* Console poll interval */ +uint dhd_console_ms = 0; +module_param(dhd_console_ms, uint, 0644); +#endif /* defined(DHD_DEBUG) */ + + +uint dhd_slpauto = TRUE; +module_param(dhd_slpauto, uint, 0); + +#ifdef PKT_FILTER_SUPPORT +/* Global Pkt filter enable control */ +uint dhd_pkt_filter_enable = TRUE; +module_param(dhd_pkt_filter_enable, uint, 0); +#endif + +/* Pkt filter init setup */ +uint dhd_pkt_filter_init = 0; +module_param(dhd_pkt_filter_init, uint, 0); + +/* Pkt filter mode control */ +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER +uint dhd_master_mode = FALSE; +#else +uint dhd_master_mode = TRUE; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ +module_param(dhd_master_mode, uint, 0); + +int dhd_watchdog_prio = 0; +module_param(dhd_watchdog_prio, int, 0); + +/* DPC thread priority */ +int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING; +module_param(dhd_dpc_prio, int, 0); + +/* RX frame thread priority */ +int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING; +module_param(dhd_rxf_prio, int, 0); + +int passive_channel_skip = 0; +module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR)); + +#if !defined(BCMDHDUSB) +extern int dhd_dongle_ramsize; +module_param(dhd_dongle_ramsize, int, 0); +#endif /* BCMDHDUSB */ + +/* Keep track of number of instances */ +static int dhd_found = 0; +static int instance_base = 0; /* Starting instance number */ +module_param(instance_base, int, 0644); + +/* Functions to manage sysfs interface for dhd */ +static int dhd_sysfs_init(dhd_info_t *dhd); +static void dhd_sysfs_exit(dhd_info_t *dhd); + +#if defined(DHD_LB) + +static void +dhd_lb_set_default_cpus(dhd_info_t *dhd) +{ + /* Default CPU allocation for the jobs */ + atomic_set(&dhd->rx_napi_cpu, 1); + atomic_set(&dhd->rx_compl_cpu, 2); + atomic_set(&dhd->tx_compl_cpu, 2); +} + +static void +dhd_cpumasks_deinit(dhd_info_t *dhd) +{ + free_cpumask_var(dhd->cpumask_curr_avail); + free_cpumask_var(dhd->cpumask_primary); + free_cpumask_var(dhd->cpumask_primary_new); + free_cpumask_var(dhd->cpumask_secondary); + free_cpumask_var(dhd->cpumask_secondary_new); +} + +static int +dhd_cpumasks_init(dhd_info_t *dhd) +{ + int id; + uint32 cpus; + int ret = 0; + + if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { + DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask); + cpumask_clear(dhd->cpumask_primary); + cpumask_clear(dhd->cpumask_secondary); + + cpus = DHD_LB_PRIMARY_CPUS; + for (id = 0; id < NR_CPUS; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_primary); + } + + cpus = DHD_LB_SECONDARY_CPUS; + for (id = 0; id < NR_CPUS; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_secondary); + } + + return ret; +fail: + dhd_cpumasks_deinit(dhd); + return ret; +} + +/* + * The CPU Candidacy Algorithm + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The available CPUs for selection are divided into two groups + * Primary Set - A CPU mask that carries the First Choice CPUs + * Secondary Set - A CPU mask that carries the Second Choice CPUs. + * + * There are two types of Job, that needs to be assigned to + * the CPUs, from one of the above mentioned CPU group. The Jobs are + * 1) Rx Packet Processing - napi_cpu + * 2) Completion Processiong (Tx, RX) - compl_cpu + * + * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes + * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy + * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu. + * If there are more processors free, it assigns one to compl_cpu. + * It also tries to ensure that both napi_cpu and compl_cpu are not on the same + * CPU, as much as possible. + * + * By design, both Tx and Rx completion jobs are run on the same CPU core, as it + * would allow Tx completion skb's to be released into a local free pool from + * which the rx buffer posts could have been serviced. it is important to note + * that a Tx packet may not have a large enough buffer for rx posting. + */ +void dhd_select_cpu_candidacy(dhd_info_t *dhd) +{ + uint32 primary_available_cpus; /* count of primary available cpus */ + uint32 secondary_available_cpus; /* count of secondary available cpus */ + uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ + uint32 compl_cpu = 0; /* cpu selected for completion jobs */ + + cpumask_clear(dhd->cpumask_primary_new); + cpumask_clear(dhd->cpumask_secondary_new); + + /* + * Now select from the primary mask. Even if a Job is + * already running on a CPU in secondary group, we still move + * to primary CPU. So no conditional checks. + */ + cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary, + dhd->cpumask_curr_avail); + + cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary, + dhd->cpumask_curr_avail); + + primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new); + + if (primary_available_cpus > 0) { + napi_cpu = cpumask_first(dhd->cpumask_primary_new); + + /* If no further CPU is available, + * cpumask_next returns >= nr_cpu_ids + */ + compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); + if (compl_cpu >= nr_cpu_ids) + compl_cpu = 0; + } + + DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu)); + + /* -- Now check for the CPUs from the secondary mask -- */ + secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); + + DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n", + __FUNCTION__, secondary_available_cpus, nr_cpu_ids)); + + if (secondary_available_cpus > 0) { + /* At this point if napi_cpu is unassigned it means no CPU + * is online from Primary Group + */ + if (napi_cpu == 0) { + napi_cpu = cpumask_first(dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); + } else if (compl_cpu == 0) { + compl_cpu = cpumask_first(dhd->cpumask_secondary_new); + } + + /* If no CPU was available for completion, choose CPU 0 */ + if (compl_cpu >= nr_cpu_ids) + compl_cpu = 0; + } + if ((primary_available_cpus == 0) && + (secondary_available_cpus == 0)) { + /* No CPUs available from primary or secondary mask */ + napi_cpu = 0; + compl_cpu = 0; + } + + DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu)); + ASSERT(napi_cpu < nr_cpu_ids); + ASSERT(compl_cpu < nr_cpu_ids); + + atomic_set(&dhd->rx_napi_cpu, napi_cpu); + atomic_set(&dhd->tx_compl_cpu, compl_cpu); + atomic_set(&dhd->rx_compl_cpu, compl_cpu); + return; +} + +/* + * Function to handle CPU Hotplug notifications. + * One of the task it does is to trigger the CPU Candidacy algorithm + * for load balancing. + */ +int +dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + + dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier); + + switch (action) + { + case CPU_ONLINE: + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + default: + break; + } + + return NOTIFY_OK; +} + +#if defined(DHD_LB_STATS) +void dhd_lb_stats_init(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int i, j; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); + DHD_LB_STATS_CLR(dhd->napi_sched_cnt); + DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); + DHD_LB_STATS_CLR(dhd->txc_sched_cnt); + + for (i = 0; i < NR_CPUS; i++) { + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); + DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); + } + + for (i = 0; i < NR_CPUS; i++) { + for (j = 0; j < HIST_BIN_SIZE; j++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]); + DHD_LB_STATS_CLR(dhd->txc_hist[i][j]); + DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]); + } + } + + return; +} + +static void dhd_lb_stats_dump_histo( + struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE]) +{ + int i, j; + uint32 per_cpu_total[NR_CPUS] = {0}; + uint32 total = 0; + + bcm_bprintf(strbuf, "CPU: \t\t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\nBin\n"); + + for (i = 0; i < HIST_BIN_SIZE; i++) { + bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1)); + for (j = 0; j < num_possible_cpus(); j++) { + bcm_bprintf(strbuf, "%d\t", hist[j][i]); + } + bcm_bprintf(strbuf, "\n"); + } + bcm_bprintf(strbuf, "Per CPU Total \t"); + total = 0; + for (i = 0; i < num_possible_cpus(); i++) { + for (j = 0; j < HIST_BIN_SIZE; j++) { + per_cpu_total[i] += (hist[i][j] * (1<<(j+1))); + } + bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]); + total += per_cpu_total[i]; + } + bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total); + + return; +} + +static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p) +{ + int i; + + bcm_bprintf(strbuf, "CPU: \t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\n"); + + bcm_bprintf(strbuf, "Val: \t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%u\t", *(p+i)); + bcm_bprintf(strbuf, "\n"); + return; +} + +void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd; + + if (dhdp == NULL || strbuf == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", + __FUNCTION__, dhdp, strbuf)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + bcm_bprintf(strbuf, "\ncpu_online_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt); + + bcm_bprintf(strbuf, "cpu_offline_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); + + bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", + dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, + dhd->txc_sched_cnt); +#ifdef DHD_LB_RXP + bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); + bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_RXC + bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist); +#endif /* DHD_LB_RXC */ + + +#ifdef DHD_LB_TXC + bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist); +#endif /* DHD_LB_TXC */ +} + +static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count) +{ + uint32 bin_power; + uint32 *p = NULL; + + bin_power = next_larger_power2(count); + + switch (bin_power) { + case 0: break; + case 1: /* Fall through intentionally */ + case 2: p = bin + 0; break; + case 4: p = bin + 1; break; + case 8: p = bin + 2; break; + case 16: p = bin + 3; break; + case 32: p = bin + 4; break; + case 64: p = bin + 5; break; + case 128: p = bin + 6; break; + default : p = bin + 7; break; + } + if (p) + *p = *p + 1; + return; +} + +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt); +} + +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); +} + +#endif /* DHD_LB_STATS */ +#endif /* DHD_LB */ + + +#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF) +int g_frameburst = 1; +#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */ + +static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd); + +/* DHD Perimiter lock only used in router with bypass forwarding. */ +#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0) +#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0) +#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0) + +#ifdef PCIE_FULL_DONGLE +#if defined(BCM_GMAC3) +#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0) +#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) +#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#else /* ! BCM_GMAC3 */ +#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) +#define DHD_IF_STA_LIST_LOCK(ifp, flags) \ + spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) +#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \ + spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, + struct list_head *snapshot_list); +static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list); +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#endif /* ! BCM_GMAC3 */ +#endif /* PCIE_FULL_DONGLE */ + +/* Control fw roaming */ +uint dhd_roam_disable = 0; + +#ifdef BCMDBGFS +extern int dhd_dbg_init(dhd_pub_t *dhdp); +extern void dhd_dbg_remove(void); +#endif + +/* Control radio state */ +uint dhd_radio_up = 1; + +/* Network inteface name */ +char iface_name[IFNAMSIZ] = {'\0'}; +module_param_string(iface_name, iface_name, IFNAMSIZ, 0); + +/* The following are specific to the SDIO dongle */ + +/* IOCTL response timeout */ +int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + +/* Idle timeout for backplane clock */ +int dhd_idletime = DHD_IDLETIME_TICKS; +module_param(dhd_idletime, int, 0); + +/* Use polling */ +uint dhd_poll = FALSE; +module_param(dhd_poll, uint, 0); + +/* Use interrupts */ +uint dhd_intr = TRUE; +module_param(dhd_intr, uint, 0); + +/* SDIO Drive Strength (in milliamps) */ +uint dhd_sdiod_drive_strength = 6; +module_param(dhd_sdiod_drive_strength, uint, 0); + +#ifdef BCMSDIO +/* Tx/Rx bounds */ +extern uint dhd_txbound; +extern uint dhd_rxbound; +module_param(dhd_txbound, uint, 0); +module_param(dhd_rxbound, uint, 0); + +/* Deferred transmits */ +extern uint dhd_deferred_tx; +module_param(dhd_deferred_tx, uint, 0); + +#endif /* BCMSDIO */ + + +#ifdef SDTEST +/* Echo packet generator (pkts/s) */ +uint dhd_pktgen = 0; +module_param(dhd_pktgen, uint, 0); + +/* Echo packet len (0 => sawtooth, max 2040) */ +uint dhd_pktgen_len = 0; +module_param(dhd_pktgen_len, uint, 0); +#endif /* SDTEST */ + + + +/* Allow delayed firmware download for debug purpose */ +int allow_delay_fwdl = FALSE; +module_param(allow_delay_fwdl, int, 0); + +extern char dhd_version[]; +extern char fw_version[]; + +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +static void dhd_net_if_lock_local(dhd_info_t *dhd); +static void dhd_net_if_unlock_local(dhd_info_t *dhd); +static void dhd_suspend_lock(dhd_pub_t *dhdp); +static void dhd_suspend_unlock(dhd_pub_t *dhdp); + +#ifdef WLMEDIA_HTSF +void htsf_update(dhd_info_t *dhd, void *data); +tsf_t prev_tsf, cur_tsf; + +uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx); +static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx); +static void dhd_dump_latency(void); +static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf); +static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf); +static void dhd_dump_htsfhisto(histo_t *his, char *s); +#endif /* WLMEDIA_HTSF */ + +/* Monitor interface */ +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); + + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +#endif /* defined(WL_WIRELESS_EXT) */ + +static void dhd_dpc(ulong data); +/* forward decl */ +extern int dhd_wait_pend8021x(struct net_device *dev); +void dhd_os_wd_timer_extend(void *bus, bool extend); + +#ifdef TOE +#ifndef BDC +#error TOE requires BDC +#endif /* !BDC */ +static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); +static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); +#endif /* TOE */ + +static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, size_t pktlen, + wl_event_msg_t *event_ptr, void **data_ptr); + +#if defined(CONFIG_PM_SLEEP) +static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) +{ + int ret = NOTIFY_DONE; + bool suspend = FALSE; + dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); + + BCM_REFERENCE(dhdinfo); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + suspend = TRUE; + break; + + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + suspend = FALSE; + break; + } + +#if defined(SUPPORT_P2P_GO_PS) +#ifdef PROP_TXSTATUS + if (suspend) { + DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); + dhd_wlfc_suspend(&dhdinfo->pub); + DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); + } else + dhd_wlfc_resume(&dhdinfo->pub); +#endif /* PROP_TXSTATUS */ +#endif /* defined(SUPPORT_P2P_GO_PS) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 39)) + dhd_mmc_suspend = suspend; + smp_mb(); +#endif + + return ret; +} + +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_pm_notifier_registered = FALSE; + +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); +#endif /* CONFIG_PM_SLEEP */ + +/* Request scheduling of the bus rx frame */ +static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb); +static void dhd_os_rxflock(dhd_pub_t *pub); +static void dhd_os_rxfunlock(dhd_pub_t *pub); + +/** priv_link is the link between netdev and the dhdif and dhd_info structs. */ +typedef struct dhd_dev_priv { + dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */ + dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */ + int ifidx; /* interface index */ +} dhd_dev_priv_t; + +#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t)) +#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev)) +#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd) +#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp) +#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx) + +/** Clear the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_clear(struct net_device * dev) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = (dhd_info_t *)NULL; + dev_priv->ifp = (dhd_if_t *)NULL; + dev_priv->ifidx = DHD_BAD_IF; +} + +/** Setup the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp, + int ifidx) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = dhd; + dev_priv->ifp = ifp; + dev_priv->ifidx = ifidx; +} + +#ifdef PCIE_FULL_DONGLE + +/** Dummy objects are defined with state representing bad|down. + * Performance gains from reducing branch conditionals, instruction parallelism, + * dual issue, reducing load shadows, avail of larger pipelines. + * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer + * is accessed via the dhd_sta_t. + */ + +/* Dummy dhd_info object */ +dhd_info_t dhd_info_null = { +#if defined(BCM_GMAC3) + .fwdh = FWDER_NULL, +#endif + .pub = { + .info = &dhd_info_null, +#ifdef DHDTCPACK_SUPPRESS + .tcpack_sup_mode = TCPACK_SUP_REPLACE, +#endif /* DHDTCPACK_SUPPRESS */ + .up = FALSE, + .busstate = DHD_BUS_DOWN + } +}; +#define DHD_INFO_NULL (&dhd_info_null) +#define DHD_PUB_NULL (&dhd_info_null.pub) + +/* Dummy netdevice object */ +struct net_device dhd_net_dev_null = { + .reg_state = NETREG_UNREGISTERED +}; +#define DHD_NET_DEV_NULL (&dhd_net_dev_null) + +/* Dummy dhd_if object */ +dhd_if_t dhd_if_null = { +#if defined(BCM_GMAC3) + .fwdh = FWDER_NULL, +#endif +#ifdef WMF + .wmf = { .wmf_enable = TRUE }, +#endif + .info = DHD_INFO_NULL, + .net = DHD_NET_DEV_NULL, + .idx = DHD_BAD_IF +}; +#define DHD_IF_NULL (&dhd_if_null) + +#define DHD_STA_NULL ((dhd_sta_t *)NULL) + +/** Interface STA list management. */ + +/** Fetch the dhd_if object, given the interface index in the dhd. */ +static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx); + +/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */ +static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta); +static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp); + +/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */ +static void dhd_if_del_sta_list(dhd_if_t * ifp); +static void dhd_if_flush_sta(dhd_if_t * ifp); + +/* Construct/Destruct a sta pool. */ +static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta); +static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta); +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta); + + +/* Return interface pointer */ +static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx) +{ + ASSERT(ifidx < DHD_MAX_IFS); + + if (ifidx >= DHD_MAX_IFS) + return NULL; + + return dhdp->info->iflist[ifidx]; +} + +/** Reset a dhd_sta object and free into the dhd pool. */ +static void +dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta) +{ + int prio; + + ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); + + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + + /* + * Flush and free all packets in all flowring's queues belonging to sta. + * Packets in flow ring will be flushed later. + */ + for (prio = 0; prio < (int)NUMPRIO; prio++) { + uint16 flowid = sta->flowid[prio]; + + if (flowid != FLOWID_INVALID) { + unsigned long flags; + flow_queue_t * queue = dhd_flow_queue(dhdp, flowid); + flow_ring_node_t * flow_ring_node; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; + + if (!DHD_FLOW_QUEUE_EMPTY(queue)) { + void * pkt; + while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) { + PKTFREE(dhdp->osh, pkt, TRUE); + } + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + } + + sta->flowid[prio] = FLOWID_INVALID; + } + + id16_map_free(dhdp->staid_allocator, sta->idx); + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ + sta->ifidx = DHD_BAD_IF; + bzero(sta->ea.octet, ETHER_ADDR_LEN); + INIT_LIST_HEAD(&sta->list); + sta->idx = ID16_INVALID; /* implying free */ +} + +/** Allocate a dhd_sta object from the dhd pool. */ +static dhd_sta_t * +dhd_sta_alloc(dhd_pub_t * dhdp) +{ + uint16 idx; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + + idx = id16_map_alloc(dhdp->staid_allocator); + if (idx == ID16_INVALID) { + DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__)); + return DHD_STA_NULL; + } + + sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool); + sta = &sta_pool[idx]; + + ASSERT((sta->idx == ID16_INVALID) && + (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); + + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + + sta->idx = idx; /* implying allocated */ + + return sta; +} + +/** Delete all STAs in an interface's STA list. */ +static void +dhd_if_del_sta_list(dhd_if_t *ifp) +{ + dhd_sta_t *sta, *next; + unsigned long flags; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { +#if defined(BCM_GMAC3) + if (ifp->fwdh) { + /* Remove sta from WOFA forwarder. */ + fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta); + } +#endif /* BCM_GMAC3 */ + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return; +} + +/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */ +static void +dhd_if_flush_sta(dhd_if_t * ifp) +{ +#if defined(BCM_GMAC3) + + if (ifp && (ifp->fwdh != FWDER_NULL)) { + dhd_sta_t *sta, *next; + unsigned long flags; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + /* Remove any sta entry from WOFA forwarder. */ + fwder_flush(ifp->fwdh, (wofa_t)sta); + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + } +#endif /* BCM_GMAC3 */ +} + +/** Construct a pool of dhd_sta_t objects to be used by interfaces. */ +static int +dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void * staid_allocator; + + ASSERT(dhdp != (dhd_pub_t *)NULL); + ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL)); + + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + staid_allocator = id16_map_init(dhdp->osh, max_sta, 1); + if (staid_allocator == NULL) { + DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Pre allocate a pool of dhd_sta objects (one extra). */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */ + sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz); + if (sta_pool == NULL) { + DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__)); + id16_map_fini(dhdp->osh, staid_allocator); + return BCME_ERROR; + } + + dhdp->sta_pool = sta_pool; + dhdp->staid_allocator = staid_allocator; + + /* Initialize all sta(s) for the pre-allocated free pool. */ + bzero((uchar *)sta_pool, sta_pool_memsz); + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } + + return BCME_OK; +} + +/** Destruct the pool of dhd_sta_t objects. + * Caller must ensure that no STA objects are currently associated with an if. + */ +static void +dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) +{ + dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + + if (sta_pool) { + int idx; + int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + for (idx = 1; idx <= max_sta; idx++) { + ASSERT(sta_pool[idx].ifp == DHD_IF_NULL); + ASSERT(sta_pool[idx].idx == ID16_INVALID); + } + MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz); + dhdp->sta_pool = NULL; + } + + id16_map_fini(dhdp->osh, dhdp->staid_allocator); + dhdp->staid_allocator = NULL; +} + +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void +dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void *staid_allocator; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + staid_allocator = dhdp->staid_allocator; + + if (!sta_pool) { + DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__)); + return; + } + + if (!staid_allocator) { + DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__)); + return; + } + + /* clear free pool */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + bzero((uchar *)sta_pool, sta_pool_memsz); + + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + id16_map_clear(staid_allocator, max_sta, 1); + + /* Initialize all sta(s) for the pre-allocated free pool. */ + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } +} + +/** Find STA with MAC address ea in an interface's STA list. */ +dhd_sta_t * +dhd_find_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry(sta, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + return sta; + } + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return DHD_STA_NULL; +} + +/** Add STA into the interface's STA list. */ +dhd_sta_t * +dhd_add_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; + + sta = dhd_sta_alloc((dhd_pub_t *)pub); + if (sta == DHD_STA_NULL) { + DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__)); + return DHD_STA_NULL; + } + + memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN); + + /* link the sta and the dhd interface */ + sta->ifp = ifp; + sta->ifidx = ifidx; + INIT_LIST_HEAD(&sta->list); + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_add_tail(&sta->list, &ifp->sta_list); + +#if defined(BCM_GMAC3) + if (ifp->fwdh) { + ASSERT(ISALIGNED(ea, 2)); + /* Add sta to WOFA forwarder. */ + fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); + } +#endif /* BCM_GMAC3 */ + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return sta; +} + +/** Delete STA from the interface's STA list. */ +void +dhd_del_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { +#if defined(BCM_GMAC3) + if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */ + ASSERT(ISALIGNED(ea, 2)); + fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); + } +#endif /* BCM_GMAC3 */ + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, + ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + return; +} + +/** Add STA if it doesn't exist. Not reentrant. */ +dhd_sta_t* +dhd_findadd_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + + sta = dhd_find_sta(pub, ifidx, ea); + + if (!sta) { + /* Add entry */ + sta = dhd_add_sta(pub, ifidx, ea); + } + + return sta; +} + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +#if !defined(BCM_GMAC3) +static struct list_head * +dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list) +{ + unsigned long flags; + dhd_sta_t *sta, *snapshot; + + INIT_LIST_HEAD(snapshot_list); + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry(sta, &ifp->sta_list, list) { + /* allocate one and add to snapshot */ + snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); + if (snapshot == NULL) { + DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__)); + continue; + } + + memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); + + INIT_LIST_HEAD(&snapshot->list); + list_add_tail(&snapshot->list, snapshot_list); + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return snapshot_list; +} + +static void +dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list) +{ + dhd_sta_t *sta, *next; + + list_for_each_entry_safe(sta, next, snapshot_list, list) { + list_del(&sta->list); + MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); + } +} +#endif /* !BCM_GMAC3 */ +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#else +static inline void dhd_if_flush_sta(dhd_if_t * ifp) { } +static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} +static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } +static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} +static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} +dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } +void dhd_del_sta(void *pub, int ifidx, void *ea) {} +#endif /* PCIE_FULL_DONGLE */ + + +#if defined(DHD_LB) + +#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) +/** + * dhd_tasklet_schedule - Function that runs in IPI context of the destination + * CPU and schedules a tasklet. + * @tasklet: opaque pointer to the tasklet + */ +static INLINE void +dhd_tasklet_schedule(void *tasklet) +{ + tasklet_schedule((struct tasklet_struct *)tasklet); +} + +/** + * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU + * @tasklet: tasklet to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * smp_call_function_single with no wait and the tasklet_schedule function + * will be invoked to schedule the specified tasklet on the requested CPU. + */ +static void +dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu) +{ + const int wait = 0; + smp_call_function_single(on_cpu, + dhd_tasklet_schedule, (void *)tasklet, wait); +} +#endif /* DHD_LB_TXC || DHD_LB_RXC */ + + +#if defined(DHD_LB_TXC) +/** + * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet + * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and + * freeing the packets placed in the tx_compl workq + */ +void +dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->txc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->tx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + } else { + schedule_work(&dhd->tx_compl_dispatcher_work); + } +} + +static void dhd_tx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, tx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->tx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + else + dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu); + put_online_cpus(); +} + +#endif /* DHD_LB_TXC */ + + +#if defined(DHD_LB_RXC) +/** + * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet + * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers + * in the H2D RxBuffer Post common ring, by using the recycled pktids that were + * placed in the rx_compl workq. + * + * @dhdp: pointer to dhd_pub object + */ +void +dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->rxc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->rx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + } else { + schedule_work(&dhd->rx_compl_dispatcher_work); + } +} + +static void dhd_rx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->tx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + else + dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu); + put_online_cpus(); +} + +#endif /* DHD_LB_RXC */ + + +#if defined(DHD_LB_RXP) +/** + * dhd_napi_poll - Load balance napi poll function to process received + * packets and send up the network stack using netif_receive_skb() + * + * @napi: napi object in which context this poll function is invoked + * @budget: number of packets to be processed. + * + * Fetch the dhd_info given the rx_napi_struct. Move all packets from the + * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock). + * Dequeue each packet from head of rx_process_queue, fetch the ifid from the + * packet tag and sendup. + */ +static int +dhd_napi_poll(struct napi_struct *napi, int budget) +{ + int ifid; + const int pkt_count = 1; + const int chan = 0; + struct sk_buff * skb; + unsigned long flags; + struct dhd_info *dhd; + int processed = 0; + struct sk_buff_head rx_process_queue; + + dhd = container_of(napi, struct dhd_info, rx_napi_struct); + DHD_INFO(("%s napi_queue<%d> budget<%d>\n", + __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); + + __skb_queue_head_init(&rx_process_queue); + + /* extract the entire rx_napi_queue into local rx_process_queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) { + OSL_PREFETCH(skb->data); + + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + + DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n", + __FUNCTION__, skb, ifid)); + + dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan); + processed++; + } + + DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed); + + DHD_INFO(("%s processed %d\n", __FUNCTION__, processed)); + napi_complete(napi); + + return budget - 1; +} + +/** + * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi + * poll list. This function may be invoked via the smp_call_function_single + * from a remote CPU. + * + * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ) + * after the napi_struct is added to the softnet data's poll_list + * + * @info: pointer to a dhd_info struct + */ +static void +dhd_napi_schedule(void *info) +{ + dhd_info_t *dhd = (dhd_info_t *)info; + + DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n", + __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu))); + + /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */ + if (napi_schedule_prep(&dhd->rx_napi_struct)) { + __napi_schedule(&dhd->rx_napi_struct); + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt); + } + + /* + * If the rx_napi_struct was already running, then we let it complete + * processing all its packets. The rx_napi_struct may only run on one + * core at a time, to avoid out-of-order handling. + */ +} + +/** + * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ + * action after placing the dhd's rx_process napi object in the the remote CPU's + * softnet data's poll_list. + * + * @dhd: dhd_info which has the rx_process napi object + * @on_cpu: desired remote CPU id + */ +static INLINE int +dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) +{ + int wait = 0; /* asynchronous IPI */ + + DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", + __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); + + if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) { + DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n", + __FUNCTION__, on_cpu)); + } + + DHD_LB_STATS_INCR(dhd->napi_sched_cnt); + + return 0; +} + +/* + * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on + * Why should we do this? + * The candidacy algorithm is run from the call back function + * registered to CPU hotplug notifier. This call back happens from Worker + * context. The dhd_napi_schedule_on is also from worker context. + * Note that both of this can run on two different CPUs at the same time. + * So we can possibly have a window where a given CPUn is being brought + * down from CPUm while we try to run a function on CPUn. + * To prevent this its better have the whole code to execute an SMP + * function under get_online_cpus. + * This function call ensures that hotplug mechanism does not kick-in + * until we are done dealing with online CPUs + * If the hotplug worker is already running, no worries because the + * candidacy algo would then reflect the same in dhd->rx_napi_cpu. + * + * The below mentioned code structure is proposed in + * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt + * for the question + * Q: I need to ensure that a particular cpu is not removed when there is some + * work specific to this cpu is in progress + * + * According to the documentation calling get_online_cpus is NOT required, if + * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can + * run from Work Queue context we have to call these functions + */ +static void dhd_rx_napi_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_napi_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->rx_napi_cpu); + if (!cpu_online(cpu)) + dhd_napi_schedule(dhd); + else + dhd_napi_schedule_on(dhd, cpu); + put_online_cpus(); +} + +/** + * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct + * to run on another CPU. The rx_napi_struct's poll function will retrieve all + * the packets enqueued into the rx_napi_queue and sendup. + * The producer's rx packet queue is appended to the rx_napi_queue before + * dispatching the rx_napi_struct. + */ +void +dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) +{ + unsigned long flags; + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + int on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__, + skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue))); + + /* append the producer's queue of packets to the napi's rx process queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->rx_napi_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_napi_schedule(dhd); + } else { + schedule_work(&dhd->rx_napi_dispatcher_work); + } +} + +/** + * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue + */ +void +dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + dhd_info_t *dhd = dhdp->info; + + DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__, + pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue))); + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx); + __skb_queue_tail(&dhd->rx_pend_queue, pkt); +} +#endif /* DHD_LB_RXP */ + +#endif /* DHD_LB */ + +static void dhd_memdump_work_handler(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, dhd_memdump_work.work); + + BCM_REFERENCE(dhd); +#ifdef BCMPCIE + dhd_prot_collect_memdump(&dhd->pub); +#endif +} + + +/** Returns dhd iflist index corresponding the the bssidx provided by apps */ +int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_if_t *ifp; + dhd_info_t *dhd = dhdp->info; + int i; + + ASSERT(bssidx < DHD_MAX_IFS); + ASSERT(dhdp); + + for (i = 0; i < DHD_MAX_IFS; i++) { + ifp = dhd->iflist[i]; + if (ifp && (ifp->bssidx == bssidx)) { + DHD_TRACE(("Index manipulated for %s from %d to %d\n", + ifp->name, bssidx, i)); + break; + } + } + return i; +} + +static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb) +{ + uint32 store_idx; + uint32 sent_idx; + + if (!skb) { + DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n")); + return BCME_ERROR; + } + + dhd_os_rxflock(dhdp); + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + if (dhdp->skbbuf[store_idx] != NULL) { + /* Make sure the previous packets are processed */ + dhd_os_rxfunlock(dhdp); +#ifdef RXF_DEQUEUE_ON_BUSY + DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx)); + return BCME_BUSY; +#else /* RXF_DEQUEUE_ON_BUSY */ + DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx)); + /* removed msleep here, should use wait_event_timeout if we + * want to give rx frame thread a chance to run + */ +#if defined(WAIT_DEQUEUE) + OSL_SLEEP(1); +#endif + return BCME_ERROR; +#endif /* RXF_DEQUEUE_ON_BUSY */ + } + DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n", + skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1))); + dhdp->skbbuf[store_idx] = skb; + dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); + dhd_os_rxfunlock(dhdp); + + return BCME_OK; +} + +static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) +{ + uint32 store_idx; + uint32 sent_idx; + void *skb; + + dhd_os_rxflock(dhdp); + + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + skb = dhdp->skbbuf[sent_idx]; + + if (skb == NULL) { + dhd_os_rxfunlock(dhdp); + DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n", + store_idx, sent_idx)); + return NULL; + } + + dhdp->skbbuf[sent_idx] = NULL; + dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1); + + DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n", + skb, sent_idx)); + + dhd_os_rxfunlock(dhdp); + + return skb; +} + +int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + if (prepost) { /* pre process */ + dhd_read_macaddr(dhd); + } else { /* post process */ + dhd_write_macaddr(&dhd->pub.mac); + } + + return 0; +} + +#if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) +static bool +_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode) +{ + bool _apply = FALSE; + /* In case of IBSS mode, apply arp pkt filter */ + if (op_mode & DHD_FLAG_IBSS_MODE) { + _apply = TRUE; + goto exit; + } + /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */ + if ((dhd->arp_version == 1) && + (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) { + _apply = TRUE; + goto exit; + } + +exit: + return _apply; +} +#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */ + +void dhd_set_packet_filter(dhd_pub_t *dhd) +{ +#ifdef PKT_FILTER_SUPPORT + int i; + + DHD_TRACE(("%s: enter\n", __FUNCTION__)); + if (dhd_pkt_filter_enable) { + for (i = 0; i < dhd->pktfilter_count; i++) { + dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); + } + } +#endif /* PKT_FILTER_SUPPORT */ +} + +void dhd_enable_packet_filter(int value, dhd_pub_t *dhd) +{ +#ifdef PKT_FILTER_SUPPORT + int i; + + DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value)); + + if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) { + DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__)); + return; + } + /* 1 - Enable packet filter, only allow unicast packet to send up */ + /* 0 - Disable packet filter */ + if (dhd_pkt_filter_enable && (!value || + (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress))) + { + for (i = 0; i < dhd->pktfilter_count; i++) { +#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER + if (value && (i == DHD_ARP_FILTER_NUM) && + !_turn_on_arp_filter(dhd, dhd->op_mode)) { + DHD_TRACE(("Do not turn on ARP white list pkt filter:" + "val %d, cnt %d, op_mode 0x%x\n", + value, i, dhd->op_mode)); + continue; + } +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], + value, dhd_master_mode); + } + } +#endif /* PKT_FILTER_SUPPORT */ +} + +static int dhd_set_suspend(int value, dhd_pub_t *dhd) +{ +#ifndef SUPPORT_PM2_ONLY + int power_mode = PM_MAX; +#endif /* SUPPORT_PM2_ONLY */ +#ifdef SUPPORT_SENSORHUB + uint32 shub_msreq; +#endif /* SUPPORT_SENSORHUB */ + /* wl_pkt_filter_enable_t enable_parm; */ + char iovbuf[32]; + int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */ +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + int bcn_timeout = 0; +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + int roam_time_thresh = 0; /* (ms) */ +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + uint roamvar = 1; +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + int bcn_li_bcn; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint nd_ra_filter = 0; + int ret = 0; +#endif /* DHD_USE_EARLYSUSPEND */ +#ifdef PASS_ALL_MCAST_PKTS + struct dhd_info *dhdinfo; + uint32 allmulti; + uint i; +#endif /* PASS_ALL_MCAST_PKTS */ +#ifdef DYNAMIC_SWOOB_DURATION +#ifndef CUSTOM_INTR_WIDTH +#define CUSTOM_INTR_WIDTH 100 + int intr_width = 0; +#endif /* CUSTOM_INTR_WIDTH */ +#endif /* DYNAMIC_SWOOB_DURATION */ + if (!dhd) + return -ENODEV; + +#ifdef PASS_ALL_MCAST_PKTS + dhdinfo = dhd->info; +#endif /* PASS_ALL_MCAST_PKTS */ + + DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n", + __FUNCTION__, value, dhd->in_suspend)); + + dhd_suspend_lock(dhd); + +#ifdef CUSTOM_SET_CPUCORE + DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value)); + /* set specific cpucore */ + dhd_set_cpucore(dhd, TRUE); +#endif /* CUSTOM_SET_CPUCORE */ + if (dhd->up) { + if (value && dhd->in_suspend) { +#ifdef PKT_FILTER_SUPPORT + dhd->early_suspended = 1; +#endif + /* Kernel suspended */ + DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__)); + +#ifdef SUPPORT_SENSORHUB + shub_msreq = 1; + if (dhd->info->shub_enable == 1) { + bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n", + __FUNCTION__, ret)); + } + } +#endif /* SUPPORT_SENSORHUB */ + +#ifndef SUPPORT_PM2_ONLY + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); +#endif /* SUPPORT_PM2_ONLY */ + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter, + * only allow unicast packet to send up + */ + dhd_enable_packet_filter(1, dhd); +#endif /* PKT_FILTER_SUPPORT */ + +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 0; + bcm_mkiovar("allmulti", (char *)&allmulti, 4, + iovbuf, sizeof(iovbuf)); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, i); + } +#endif /* PASS_ALL_MCAST_PKTS */ + + /* If DTIM skip is set up as default, force it to wake + * each third DTIM for better power savings. Note that + * one side effect is a chance to miss BC/MC packet. + */ +#ifdef WLTDLS + /* Do not set bcn_li_ditm on WFD mode */ + if (dhd->tdls_mode) { + bcn_li_dtim = 0; + } else +#endif /* WLTDLS */ + bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd); + bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, + 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), + TRUE, 0) < 0) + DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__)); + +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND; + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND; + bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + /* Disable firmware roaming during suspend */ + bcm_mkiovar("roam_off", (char *)&roamvar, 4, + iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcn_li_bcn = 0; + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + if (FW_SUPPORTED(dhd, ndoe)) { + /* enable IPv6 RA filter in firmware during suspend */ + nd_ra_filter = 1; + bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("failed to set nd_ra_filter (%d)\n", + ret)); + } +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = CUSTOM_INTR_WIDTH; + bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#endif /* DHD_USE_EARLYSUSPEND */ + } else { +#ifdef PKT_FILTER_SUPPORT + dhd->early_suspended = 0; +#endif + /* Kernel resumed */ + DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__)); + +#ifdef SUPPORT_SENSORHUB + shub_msreq = 0; + if (dhd->info->shub_enable == 1) { + bcm_mkiovar("shub_msreq", (char *)&shub_msreq, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Sensor Hub move/stop stop:" + "failed %d\n", __FUNCTION__, ret)); + } + } +#endif /* SUPPORT_SENSORHUB */ + + +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = 0; + bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#ifndef SUPPORT_PM2_ONLY + power_mode = PM_FAST; + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); +#endif /* SUPPORT_PM2_ONLY */ +#ifdef PKT_FILTER_SUPPORT + /* disable pkt filter */ + dhd_enable_packet_filter(0, dhd); +#endif /* PKT_FILTER_SUPPORT */ +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 1; + bcm_mkiovar("allmulti", (char *)&allmulti, 4, + iovbuf, sizeof(iovbuf)); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, i); + } +#endif /* PASS_ALL_MCAST_PKTS */ + + /* restore pre-suspend setting for dtim_skip */ + bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, + 4, iovbuf, sizeof(iovbuf)); + + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT; + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = 2000; + bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + roamvar = dhd_roam_disable; + bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, + sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcn_li_bcn = 1; + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + if (FW_SUPPORTED(dhd, ndoe)) { + /* disable IPv6 RA filter in firmware during suspend */ + nd_ra_filter = 0; + bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("failed to set nd_ra_filter (%d)\n", + ret)); + } +#endif /* DHD_USE_EARLYSUSPEND */ + } + } + dhd_suspend_unlock(dhd); + + return 0; +} + +static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force) +{ + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_OS_WAKE_LOCK(dhdp); + DHD_PERIM_LOCK(dhdp); + + /* Set flag when early suspend was called */ + dhdp->in_suspend = val; + if ((force || !dhdp->suspend_disable_flag) && + dhd_support_sta_mode(dhdp)) + { + ret = dhd_set_suspend(val, dhdp); + } + + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WAKE_UNLOCK(dhdp); + return ret; +} + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +static void dhd_early_suspend(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 1, 0); +} + +static void dhd_late_resume(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 0, 0); +} +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +/* + * Generalized timeout mechanism. Uses spin sleep with exponential back-off until + * the sleep time reaches one jiffy, then switches over to task delay. Usage: + * + * dhd_timeout_start(&tmo, usec); + * while (!dhd_timeout_expired(&tmo)) + * if (poll_something()) + * break; + * if (dhd_timeout_expired(&tmo)) + * fatal(); + */ + +void +dhd_timeout_start(dhd_timeout_t *tmo, uint usec) +{ + tmo->limit = usec; + tmo->increment = 0; + tmo->elapsed = 0; + tmo->tick = jiffies_to_usecs(1); +} + +int +dhd_timeout_expired(dhd_timeout_t *tmo) +{ + /* Does nothing the first call */ + if (tmo->increment == 0) { + tmo->increment = 1; + return 0; + } + + if (tmo->elapsed >= tmo->limit) + return 1; + + /* Add the delay that's about to take place */ + tmo->elapsed += tmo->increment; + + if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) { + OSL_DELAY(tmo->increment); + tmo->increment *= 2; + if (tmo->increment > tmo->tick) + tmo->increment = tmo->tick; + } else { + wait_queue_head_t delay_wait; + DECLARE_WAITQUEUE(wait, current); + init_waitqueue_head(&delay_wait); + add_wait_queue(&delay_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + (void)schedule_timeout(1); + remove_wait_queue(&delay_wait, &wait); + set_current_state(TASK_RUNNING); + } + + return 0; +} + +int +dhd_net2idx(dhd_info_t *dhd, struct net_device *net) +{ + int i = 0; + + if (!dhd) { + DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__)); + return DHD_BAD_IF; + } + + while (i < DHD_MAX_IFS) { + if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net)) + return i; + i++; + } + + return DHD_BAD_IF; +} + +struct net_device * dhd_idx2net(void *pub, int ifidx) +{ + struct dhd_pub *dhd_pub = (struct dhd_pub *)pub; + struct dhd_info *dhd_info; + + if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS) + return NULL; + dhd_info = dhd_pub->info; + if (dhd_info && dhd_info->iflist[ifidx]) + return dhd_info->iflist[ifidx]->net; + return NULL; +} + +int +dhd_ifname2idx(dhd_info_t *dhd, char *name) +{ + int i = DHD_MAX_IFS; + + ASSERT(dhd); + + if (name == NULL || *name == '\0') + return 0; + + while (--i > 0) + if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ)) + break; + + DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name)); + + return i; /* default - the primary interface */ +} + +char * +dhd_ifname(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + ASSERT(dhd); + + if (ifidx < 0 || ifidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx]->net) + return dhd->iflist[ifidx]->net->name; + + return ""; +} + +uint8 * +dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx) +{ + int i; + dhd_info_t *dhd = (dhd_info_t *)dhdp; + + ASSERT(dhd); + for (i = 0; i < DHD_MAX_IFS; i++) + if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx) + return dhd->iflist[i]->mac_addr; + + return NULL; +} + + +static void +_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) +{ + struct net_device *dev; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *mclist; +#endif + uint32 allmulti, cnt; + + wl_ioctl_t ioc; + char *buf, *bufp; + uint buflen; + int ret; + + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx)); + return; + } + dev = dhd->iflist[ifidx]->net; + if (!dev) + return; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + cnt = netdev_mc_count(dev); +#else + cnt = dev->mc_count; +#endif /* LINUX >= 2.6.35 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif /* LINUX >= 2.6.27 */ + + /* Determine initial value of allmulti flag */ + allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; + +#ifdef PASS_ALL_MCAST_PKTS +#ifdef PKT_FILTER_SUPPORT + if (!dhd->pub.early_suspended) +#endif /* PKT_FILTER_SUPPORT */ + allmulti = TRUE; +#endif /* PASS_ALL_MCAST_PKTS */ + + /* Send down the multicast list first. */ + + + buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN); + if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + return; + } + + strncpy(bufp, "mcast_list", buflen - 1); + bufp[buflen - 1] = '\0'; + bufp += strlen("mcast_list") + 1; + + cnt = htol32(cnt); + memcpy(bufp, &cnt, sizeof(cnt)); + bufp += sizeof(cnt); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + netdev_for_each_mc_addr(ha, dev) { + if (!cnt) + break; + memcpy(bufp, ha->addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + cnt--; + } +#else /* LINUX < 2.6.35 */ + for (mclist = dev->mc_list; (mclist && (cnt > 0)); + cnt--, mclist = mclist->next) { + memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + } +#endif /* LINUX >= 2.6.35 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif /* LINUX >= 2.6.27 */ + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set mcast_list failed, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + allmulti = cnt ? TRUE : allmulti; + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Now send the allmulti setting. This is based on the setting in the + * net_device flags, but might be modified above to be turned on if we + * were trying to set some addresses and dongle rejected it... + */ + + buflen = sizeof("allmulti") + sizeof(allmulti); + if (!(buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx))); + return; + } + allmulti = htol32(allmulti); + + if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) { + DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n", + dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen)); + MFREE(dhd->pub.osh, buf, buflen); + return; + } + + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set allmulti %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Finally, pick up the PROMISC flag as well, like the NIC driver does */ + + allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE; + + allmulti = htol32(allmulti); + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_PROMISC; + ioc.buf = &allmulti; + ioc.len = sizeof(allmulti); + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set promisc %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } +} + +int +_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr) +{ + char buf[32]; + wl_ioctl_t ioc; + int ret; + + if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) { + DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx))); + return -1; + } + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = 32; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx))); + } else { + memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN); + if (ifidx == 0) + memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN); + } + + return ret; +} + +#ifdef SOFTAP +extern struct net_device *ap_net_dev; +extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */ +#endif + +#ifdef DHD_PSTA +/* Get psta/psr configuration configuration */ +int dhd_get_psta_mode(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return (int)dhd->psta_mode; +} +/* Set psta/psr configuration configuration */ +int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val) +{ + dhd_info_t *dhd = dhdp->info; + dhd->psta_mode = val; + return 0; +} +#endif /* DHD_PSTA */ + +static void +dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_event_t *if_event = event_info; + struct net_device *ndev; + int ifidx, bssidx; + int ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + struct wireless_dev *vwdev, *primary_wdev; + struct net_device *primary_ndev; +#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ + + if (event != DHD_WQ_WORK_IF_ADD) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + bssidx = if_event->event.bssidx; + DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx)); + + /* This path is for non-android case */ + /* The interface name in host and in event msg are same */ + /* if name in event msg is used to create dongle if list on host */ + ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name, + if_event->mac, bssidx, TRUE, if_event->name); + if (!ndev) { + DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__)); + goto done; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); + if (unlikely(!vwdev)) { + DHD_ERROR(("Could not allocate wireless device\n")); + goto done; + } + primary_ndev = dhd->pub.info->iflist[0]->net; + primary_wdev = ndev_to_wdev(primary_ndev); + vwdev->wiphy = primary_wdev->wiphy; + vwdev->iftype = if_event->event.role; + vwdev->netdev = ndev; + ndev->ieee80211_ptr = vwdev; + SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy)); + DHD_ERROR(("virtual interface(%s) is created\n", if_event->name)); +#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ + + DHD_PERIM_UNLOCK(&dhd->pub); + ret = dhd_register_if(&dhd->pub, ifidx, TRUE); + DHD_PERIM_LOCK(&dhd->pub); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__)); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + goto done; + } +#ifdef PCIE_FULL_DONGLE + /* Turn on AP isolation in the firmware for interfaces operating in AP mode */ + if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) { + char iovbuf[WLC_IOCTL_SMLEN]; + uint32 var_int = 1; + + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx); + + if (ret != BCME_OK) { + DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__)); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + } + } +#endif /* PCIE_FULL_DONGLE */ + +done: + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_ifdel_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + int ifidx; + dhd_if_event_t *if_event = event_info; + + + if (event != DHD_WQ_WORK_IF_DEL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + DHD_TRACE(("Removing interface with idx %d\n", ifidx)); + + DHD_PERIM_UNLOCK(&dhd->pub); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + DHD_PERIM_LOCK(&dhd->pub); + + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_t *ifp = event_info; + + if (event != DHD_WQ_WORK_SET_MAC) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + +#ifdef SOFTAP + { + unsigned long flags; + bool in_ap = FALSE; + DHD_GENERAL_LOCK(&dhd->pub, flags); + in_ap = (ap_net_dev != NULL); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + if (in_ap) { + DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n", + ifp->net->name)); + goto done; + } + } +#endif /* SOFTAP */ + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__)); + ifp->set_macaddress = FALSE; + if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0) + DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); + else + DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); + +done: + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_t *ifp = event_info; + int ifidx; + + if (event != DHD_WQ_WORK_SET_MCAST_LIST) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + +#ifdef SOFTAP + { + bool in_ap = FALSE; + unsigned long flags; + DHD_GENERAL_LOCK(&dhd->pub, flags); + in_ap = (ap_net_dev != NULL); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + if (in_ap) { + DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n", + ifp->net->name)); + ifp->set_multicast = FALSE; + goto done; + } + } +#endif /* SOFTAP */ + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + ifidx = ifp->idx; + + + _dhd_set_multicast_list(dhd, ifidx); + DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx)); + +done: + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static int +dhd_set_mac_address(struct net_device *dev, void *addr) +{ + int ret = 0; + + dhd_info_t *dhd = DHD_DEV_INFO(dev); + struct sockaddr *sa = (struct sockaddr *)addr; + int ifidx; + dhd_if_t *dhdif; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return -1; + + dhdif = dhd->iflist[ifidx]; + + dhd_net_if_lock_local(dhd); + memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN); + dhdif->set_macaddress = TRUE; + dhd_net_if_unlock_local(dhd); + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC, + dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW); + return ret; +} + +static void +dhd_set_multicast_list(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ifidx; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return; + + dhd->iflist[ifidx]->set_multicast = TRUE; + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx], + DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW); +} + +#ifdef PROP_TXSTATUS +int +dhd_os_wlfc_block(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + ASSERT(di != NULL); + spin_lock_bh(&di->wlfc_spinlock); + return 1; +} + +int +dhd_os_wlfc_unblock(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + + ASSERT(di != NULL); + spin_unlock_bh(&di->wlfc_spinlock); + return 1; +} + +#endif /* PROP_TXSTATUS */ + +#if defined(DHD_8021X_DUMP) +void +dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt) +{ + uint8 *dump_data; + uint16 protocol; + char *ifname; + + dump_data = PKTDATA(osh, pkt); + protocol = (dump_data[12] << 8) | dump_data[13]; + ifname = ndev ? ndev->name : "N/A"; + + if (protocol == ETHER_TYPE_802_1X) { + dhd_dump_eapol_4way_message(ifname, dump_data, TRUE); + } +} +#endif /* DHD_8021X_DUMP */ + +/* This routine do not support Packet chain feature, Currently tested for + * proxy arp feature + */ +int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p) +{ + struct sk_buff *skb; + void *skbhead = NULL; + void *skbprev = NULL; + dhd_if_t *ifp; + ASSERT(!PKTISCHAINED(p)); + skb = PKTTONATIVE(dhdp->osh, p); + + ifp = dhdp->info->iflist[ifidx]; + skb->dev = ifp->net; +#if defined(BCM_GMAC3) + /* Forwarder capable interfaces use WOFA based forwarding */ + if (ifp->fwdh) { + struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p); + uint16 * da = (uint16 *)(eh->ether_dhost); + wofa_t wofa; + ASSERT(ISALIGNED(da, 2)); + + wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx); + if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */ + if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) { + return BCME_OK; + } + } + PKTFRMNATIVE(dhdp->osh, p); + PKTFREE(dhdp->osh, p, FALSE); + return BCME_OK; + } +#endif /* BCM_GMAC3 */ + + skb->protocol = eth_type_trans(skb, skb->dev); + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx(skb); + } else { + if (dhdp->info->rxthread_enabled) { + if (!skbhead) { + skbhead = skb; + } else { + PKTSETNEXT(dhdp->osh, skbprev, skb); + } + skbprev = skb; + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + ulong flags; + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + } + } + + if (dhdp->info->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + + return BCME_OK; +} + +int BCMFASTPATH +__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = BCME_OK; + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh = NULL; +#ifdef DHD_L2_FILTER + dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx); +#endif +#ifdef DHD_8021X_DUMP + struct net_device *ndev; +#endif /* DHD_8021X_DUMP */ + + /* Reject if down */ + if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { + /* free the packet here since the caller won't */ + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + +#ifdef PCIE_FULL_DONGLE + if (dhdp->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); + PKTFREE(dhdp->osh, pktbuf, TRUE); + return -EBUSY; + } +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_L2_FILTER + /* if dhcp_unicast is enabled, we need to convert the */ + /* broadcast DHCP ACK/REPLY packets to Unicast. */ + if (ifp->dhcp_unicast) { + uint8* mac_addr; + uint8* ehptr = NULL; + int ret; + ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr); + if (ret == BCME_OK) { + /* if given mac address having valid entry in sta list + * copy the given mac address, and return with BCME_OK + */ + if (dhd_find_sta(dhdp, ifidx, mac_addr)) { + ehptr = PKTDATA(dhdp->osh, pktbuf); + bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + } + } + } + + if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } + + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } +#endif /* DHD_L2_FILTER */ + /* Update multicast statistic */ + if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + eh = (struct ether_header *)pktdata; + + if (ETHER_ISMULTI(eh->ether_dhost)) + dhdp->tx_multicast++; + if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) + atomic_inc(&dhd->pend_8021x_cnt); +#ifdef DHD_DHCP_DUMP + if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { + uint16 dump_hex; + uint16 source_port; + uint16 dest_port; + uint16 udp_port_pos; + uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN]; + uint8 ip_header_len = (*ptr8 & 0x0f)<<2; + struct net_device *net; + char *ifname; + + net = dhd_idx2net(dhdp, ifidx); + ifname = net ? net->name : "N/A"; + udp_port_pos = ETHER_HDR_LEN + ip_header_len; + source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1]; + dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3]; + if (source_port == 0x0044 || dest_port == 0x0044) { + dump_hex = (pktdata[udp_port_pos+249] << 8) | + pktdata[udp_port_pos+250]; + if (dump_hex == 0x0101) { + DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname)); + } else if (dump_hex == 0x0102) { + DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname)); + } else if (dump_hex == 0x0103) { + DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname)); + } else if (dump_hex == 0x0105) { + DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname)); + } else { + DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex)); + } +#ifdef DHD_LOSSLESS_ROAMING + if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) { + DHD_ERROR(("/%d", dhdp->dequeue_prec_map)); + } +#endif /* DHD_LOSSLESS_ROAMING */ + DHD_ERROR(("\n")); + } else if (source_port == 0x0043 || dest_port == 0x0043) { + DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname)); + } + } +#endif /* DHD_DHCP_DUMP */ + } else { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + + /* Look into the packet and update the packet priority */ +#ifndef PKTPRIO_OVERRIDE + if (PKTPRIO(pktbuf) == 0) +#endif /* !PKTPRIO_OVERRIDE */ + { +#ifdef QOS_MAP_SET + pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE); +#else + pktsetprio(pktbuf, FALSE); +#endif /* QOS_MAP_SET */ + } + + +#ifdef PCIE_FULL_DONGLE + /* + * Lkup the per interface hash table, for a matching flowring. If one is not + * available, allocate a unique flowid and add a flowring entry. + * The found or newly created flowid is placed into the pktbuf's tag. + */ + ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf); + if (ret != BCME_OK) { + PKTCFREE(dhd->pub.osh, pktbuf, TRUE); + return ret; + } +#endif + +#ifdef PROP_TXSTATUS + if (dhd_wlfc_is_supported(dhdp)) { + /* store the interface ID */ + DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx); + + /* store destination MAC in the tag as well */ + DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost); + + /* decide which FIFO this packet belongs to */ + if (ETHER_ISMULTI(eh->ether_dhost)) + /* one additional queue index (highest AC + 1) is used for bc/mc queue */ + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT); + else + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf))); + } else +#endif /* PROP_TXSTATUS */ + { + /* If the protocol uses a data header, apply it */ + dhd_prot_hdrpush(dhdp, ifidx, pktbuf); + } + + /* Use bus module to send data frame */ +#ifdef WLMEDIA_HTSF + dhd_htsf_addtxts(dhdp, pktbuf); +#endif +#if defined(DHD_8021X_DUMP) + ndev = dhd_idx2net(dhdp, ifidx); + dhd_tx_dump(ndev, dhdp->osh, pktbuf); +#endif +#ifdef PROP_TXSTATUS + { + if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata, + dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) { + /* non-proptxstatus way */ +#ifdef BCMPCIE + ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMPCIE */ + } + } +#else +#ifdef BCMPCIE + ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMPCIE */ +#endif /* PROP_TXSTATUS */ + + return ret; +} + +int BCMFASTPATH +dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = 0; + unsigned long flags; + + DHD_GENERAL_LOCK(dhdp, flags); + if (dhdp->busstate == DHD_BUS_DOWN || + dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhdp->busstate)); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT; + DHD_GENERAL_UNLOCK(dhdp, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) { + DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + ret = -EBUSY; + goto exit; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + ret = __dhd_sendpkt(dhdp, ifidx, pktbuf); + +#ifdef DHD_PCIE_RUNTIMEPM +exit: +#endif + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT; + DHD_GENERAL_UNLOCK(dhdp, flags); + return ret; +} + +int BCMFASTPATH +dhd_start_xmit(struct sk_buff *skb, struct net_device *net) +{ + int ret; + uint datalen; + void *pktbuf; + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp = NULL; + int ifidx; + unsigned long flags; +#ifdef WLMEDIA_HTSF + uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz; +#else + uint8 htsfdlystat_sz = 0; +#endif +#ifdef DHD_WMF + struct ether_header *eh; + uint8 *iph; +#endif /* DHD_WMF */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) { + /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */ + /* stop the network queue temporarily until resume done */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + if (!dhdpcie_is_resume_done(&dhd->pub)) { + dhd_bus_stop_queue(dhd->pub.bus); + } + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_GENERAL_LOCK(&dhd->pub, flags); +#ifdef PCIE_FULL_DONGLE + if (dhd->pub.busstate == DHD_BUS_SUSPEND) { + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } +#endif /* PCIE_FULL_DONGLE */ + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + + /* Reject if down */ + if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN || + dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n", + __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); + netif_stop_queue(net); + /* Send Event when bus down detected during data session */ + if (dhd->pub.up && !dhd->pub.hang_was_sent) { + DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__)); + dhd->pub.hang_reason = HANG_REASON_BUS_DOWN; + net_os_send_hang_message(net); + } +#ifdef PCIE_FULL_DONGLE + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } + + ifp = DHD_DEV_IFP(net); + ifidx = DHD_DEV_IFIDX(net); + BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb); + + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx)); + netif_stop_queue(net); +#ifdef PCIE_FULL_DONGLE + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + ASSERT(ifidx == dhd_net2idx(dhd, net)); + ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx]))); + + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); + + /* re-align socket buffer if "skb->data" is odd address */ + if (((unsigned long)(skb->data)) & 0x1) { + unsigned char *data = skb->data; + uint32 length = skb->len; + PKTPUSH(dhd->pub.osh, skb, 1); + memmove(skb->data, data, length); + PKTSETLEN(dhd->pub.osh, skb, length); + } + + datalen = PKTLEN(dhd->pub.osh, skb); + + /* Make sure there's enough room for any header */ + if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) { + struct sk_buff *skb2; + + DHD_INFO(("%s: insufficient headroom\n", + dhd_ifname(&dhd->pub, ifidx))); + dhd->pub.tx_realloc++; + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); + skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz); + + dev_kfree_skb(skb); + if ((skb = skb2) == NULL) { + DHD_ERROR(("%s: skb_realloc_headroom failed\n", + dhd_ifname(&dhd->pub, ifidx))); + ret = -ENOMEM; + goto done; + } + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); + } + + /* Convert to packet */ + if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { + DHD_ERROR(("%s: PKTFRMNATIVE failed\n", + dhd_ifname(&dhd->pub, ifidx))); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto done; + } + +#if defined(WLMEDIA_HTSF) + if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf); + struct ether_header *eh = (struct ether_header *)pktdata; + + if (!ETHER_ISMULTI(eh->ether_dhost) && + (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) { + eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS); + } + } +#endif + +#ifdef DHD_WMF + eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf); + iph = (uint8 *)eh + ETHER_HDR_LEN; + + /* WMF processing for multicast packets + * Only IPv4 packets are handled + */ + if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) && + (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) || + ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) { +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) + void *sdu_clone; + bool ucast_convert = FALSE; +#ifdef DHD_UCAST_UPNP + uint32 dest_ip; + + dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET))); + ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip); +#endif /* DHD_UCAST_UPNP */ +#ifdef DHD_IGMP_UCQUERY + ucast_convert |= dhd->pub.wmf_ucast_igmp_query && + (IPV4_PROT(iph) == IP_PROT_IGMP) && + (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY); +#endif /* DHD_IGMP_UCQUERY */ + if (ucast_convert) { + dhd_sta_t *sta; +#ifdef PCIE_FULL_DONGLE + unsigned long flags; +#endif + struct list_head snapshot_list; + struct list_head *wmf_ucforward_list; + + ret = NETDEV_TX_OK; + + /* For non BCM_GMAC3 platform we need a snapshot sta_list to + * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue. + */ + wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list); + + /* Convert upnp/igmp query to unicast for each assoc STA */ + list_for_each_entry(sta, wmf_ucforward_list, list) { + if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) { + ret = WMF_NOP; + break; + } + dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1); + } + DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list); + +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + if (ret == NETDEV_TX_OK) + PKTFREE(dhd->pub.osh, pktbuf, TRUE); + + return ret; + } else +#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */ + { + /* There will be no STA info if the packet is coming from LAN host + * Pass as NULL + */ + ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0); + switch (ret) { + case WMF_TAKEN: + case WMF_DROP: + /* Either taken by WMF or we should drop it. + * Exiting send path + */ +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return NETDEV_TX_OK; + default: + /* Continue the transmit path */ + break; + } + } + } +#endif /* DHD_WMF */ +#ifdef DHD_PSTA + /* PSR related packet proto manipulation should be done in DHD + * since dongle doesn't have complete payload + */ + if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub, + ifidx, &pktbuf, TRUE) < 0)) { + DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__, + dhd_ifname(&dhd->pub, ifidx))); + } +#endif /* DHD_PSTA */ + +#ifdef DHDTCPACK_SUPPRESS + if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) { + /* If this packet has been hold or got freed, just return */ + if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) { + ret = 0; + goto done; + } + } else { + /* If this packet has replaced another packet and got freed, just return */ + if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) { + ret = 0; + goto done; + } + } +#endif /* DHDTCPACK_SUPPRESS */ + + /* segmented SKB support (Kernel-3.18.y) */ + if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) { + PKTSETLINK(skb, NULL); + } + + ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf); + +done: + if (ret) { + ifp->stats.tx_dropped++; + dhd->pub.tx_dropped++; + } else { + +#ifdef PROP_TXSTATUS + /* tx_packets counter can counted only when wlfc is disabled */ + if (!dhd_wlfc_is_supported(&dhd->pub)) +#endif + { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } + } + +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + BUZZZ_LOG(START_XMIT_END, 0); + + /* Return ok: we always eat the packet */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return 0; +#else + return NETDEV_TX_OK; +#endif +} + + +void +dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) +{ + struct net_device *net; + dhd_info_t *dhd = dhdp->info; + int i; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(dhd); + +#ifdef DHD_LOSSLESS_ROAMING + /* block flowcontrol during roaming */ + if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) { + return; + } +#endif + + if (ifidx == ALL_INTERFACES) { + /* Flow control on all active interfaces */ + dhdp->txoff = state; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + net = dhd->iflist[i]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); + } + } + } else { + if (dhd->iflist[ifidx]) { + net = dhd->iflist[ifidx]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); + } + } +} + +#ifdef DHD_RX_DUMP +typedef struct { + uint16 type; + const char *str; +} PKTTYPE_INFO; + +static const PKTTYPE_INFO packet_type_info[] = +{ + { ETHER_TYPE_IP, "IP" }, + { ETHER_TYPE_ARP, "ARP" }, + { ETHER_TYPE_BRCM, "BRCM" }, + { ETHER_TYPE_802_1X, "802.1X" }, + { 0, ""} +}; + +static const char *_get_packet_type_str(uint16 type) +{ + int i; + int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1; + + for (i = 0; i < n; i++) { + if (packet_type_info[i].type == type) + return packet_type_info[i].str; + } + + return packet_type_info[n].str; +} +#endif /* DHD_RX_DUMP */ + + +#ifdef DHD_WMF +bool +dhd_is_rxthread_enabled(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + + return dhd->rxthread_enabled; +} +#endif /* DHD_WMF */ + +/** Called when a frame is received by the dongle on interface 'ifidx' */ +void +dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + uchar *eth; + uint len; + void *data, *pnext = NULL; + int i; + dhd_if_t *ifp; + wl_event_msg_t event; + int tout_rx = 0; + int tout_ctrl = 0; + void *skbhead = NULL; + void *skbprev = NULL; +#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) + char *dump_data; + uint16 protocol; + char *ifname; +#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { + struct ether_header *eh; + + pnext = PKTNEXT(dhdp->osh, pktbuf); + PKTSETNEXT(dhdp->osh, pktbuf, NULL); + + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) { + DHD_ERROR(("%s: ifp is NULL. drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + + /* Dropping only data packets before registering net device to avoid kernel panic */ +#ifndef PROP_TXSTATUS_VSDB + if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) { +#else + if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) { +#endif /* PROP_TXSTATUS_VSDB */ + DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + + +#ifdef PROP_TXSTATUS + if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) { + /* WLFC may send header only packet when + there is an urgent message but no packet to + piggy-back on + */ + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } +#endif +#ifdef DHD_L2_FILTER + /* If block_ping is enabled drop the ping packet */ + if (ifp->block_ping) { + if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + continue; + } + } +#endif /* DHD_L2_FILTER */ +#ifdef DHD_WMF + /* WMF processing for multicast packets */ + if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) { + dhd_sta_t *sta; + int ret; + + sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost); + ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1); + switch (ret) { + case WMF_TAKEN: + /* The packet is taken by WMF. Continue to next iteration */ + continue; + case WMF_DROP: + /* Packet DROP decision by WMF. Toss it */ + DHD_ERROR(("%s: WMF decides to drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + default: + /* Continue the transmit path */ + break; + } + } +#endif /* DHD_WMF */ + +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpdata_info_get(dhdp, pktbuf); +#endif + skb = PKTTONATIVE(dhdp->osh, pktbuf); + + ASSERT(ifp); + skb->dev = ifp->net; + +#ifdef DHD_PSTA + if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) { + DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__, + dhd_ifname(dhdp, ifidx))); + } +#endif /* DHD_PSTA */ + +#ifdef PCIE_FULL_DONGLE + if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) && + (!ifp->ap_isolate)) { + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + if (ETHER_ISUCAST(eh->ether_dhost)) { + if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) { + dhd_sendpkt(dhdp, ifidx, pktbuf); + continue; + } + } else { + void *npktbuf = PKTDUP(dhdp->osh, pktbuf); + if (npktbuf) + dhd_sendpkt(dhdp, ifidx, npktbuf); + } + } +#endif /* PCIE_FULL_DONGLE */ + + /* Get the protocol, maintain skb around eth_type_trans() + * The main reason for this hack is for the limitation of + * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len' + * to perform skb_pull inside vs ETH_HLEN. Since to avoid + * coping of the packet coming from the network stack to add + * BDC, Hardware header etc, during network interface registration + * we set the 'net->hard_header_len' to ETH_HLEN + extra space required + * for BDC, Hardware header etc. and not just the ETH_HLEN + */ + eth = skb->data; + len = skb->len; + +#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) + dump_data = skb->data; + protocol = (dump_data[12] << 8) | dump_data[13]; + ifname = skb->dev ? skb->dev->name : "N/A"; +#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */ +#ifdef DHD_8021X_DUMP + if (protocol == ETHER_TYPE_802_1X) { + dhd_dump_eapol_4way_message(ifname, dump_data, FALSE); + } +#endif /* DHD_8021X_DUMP */ +#ifdef DHD_DHCP_DUMP + if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) { + uint16 dump_hex; + uint16 source_port; + uint16 dest_port; + uint16 udp_port_pos; + uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN]; + uint8 ip_header_len = (*ptr8 & 0x0f)<<2; + + udp_port_pos = ETHER_HDR_LEN + ip_header_len; + source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1]; + dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3]; + if (source_port == 0x0044 || dest_port == 0x0044) { + dump_hex = (dump_data[udp_port_pos+249] << 8) | + dump_data[udp_port_pos+250]; + if (dump_hex == 0x0101) { + DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname)); + } else if (dump_hex == 0x0102) { + DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname)); + } else if (dump_hex == 0x0103) { + DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname)); + } else if (dump_hex == 0x0105) { + DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname)); + } else { + DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex)); + } + } else if (source_port == 0x0043 || dest_port == 0x0043) { + DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname)); + } + } +#endif /* DHD_DHCP_DUMP */ +#if defined(DHD_RX_DUMP) + DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol))); + if (protocol != ETHER_TYPE_BRCM) { + if (dump_data[0] == 0xFF) { + DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__)); + + if ((dump_data[12] == 8) && + (dump_data[13] == 6)) { + DHD_ERROR(("%s: ARP %d\n", + __FUNCTION__, dump_data[0x15])); + } + } else if (dump_data[0] & 1) { + DHD_ERROR(("%s: MULTICAST: " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(dump_data))); + } +#ifdef DHD_RX_FULL_DUMP + { + int k; + for (k = 0; k < skb->len; k++) { + DHD_ERROR(("%02X ", dump_data[k])); + if ((k & 15) == 15) + DHD_ERROR(("\n")); + } + DHD_ERROR(("\n")); + } +#endif /* DHD_RX_FULL_DUMP */ + } +#endif /* DHD_RX_DUMP */ + + skb->protocol = eth_type_trans(skb, skb->dev); + + if (skb->pkt_type == PACKET_MULTICAST) { + dhd->pub.rx_multicast++; + ifp->stats.multicast++; + } + + skb->data = eth; + skb->len = len; + +#ifdef WLMEDIA_HTSF + dhd_htsf_addrxts(dhdp, pktbuf); +#endif + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + /* Process special event packets and then discard them */ + memset(&event, 0, sizeof(event)); + if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { + dhd_wl_host_event(dhd, &ifidx, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + skb_mac_header(skb), +#else + skb->mac.raw, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ + len - 2, + &event, + &data); + + wl_event_to_host_order(&event); + if (!tout_ctrl) + tout_ctrl = DHD_PACKET_TIMEOUT_MS; + +#if defined(PNO_SUPPORT) + if (event.event_type == WLC_E_PFN_NET_FOUND) { + /* enforce custom wake lock to garantee that Kernel not suspended */ + tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS; + } +#endif /* PNO_SUPPORT */ + +#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + continue; +#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */ + } else { + tout_rx = DHD_PACKET_TIMEOUT_MS; + +#ifdef PROP_TXSTATUS + dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb)); +#endif /* PROP_TXSTATUS */ + } + + ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; + + if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) { + dhdp->dstats.rx_bytes += skb->len; + dhdp->rx_packets++; /* Local count */ + ifp->stats.rx_bytes += skb->len; + ifp->stats.rx_packets++; + } + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#if defined(DHD_LB) && defined(DHD_LB_RXP) + netif_receive_skb(skb); +#else + netif_rx(skb); +#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */ + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + } else { + if (dhd->rxthread_enabled) { + if (!skbhead) + skbhead = skb; + else + PKTSETNEXT(dhdp->osh, skbprev, skb); + skbprev = skb; + } else { + + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + +#if defined(DHD_LB) && defined(DHD_LB_RXP) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_receive_skb(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx_ni(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else + ulong flags; + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ +#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */ + } + } + } + + if (dhd->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + + DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl); + DHD_OS_WAKE_LOCK_TIMEOUT(dhdp); +} + +void +dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx) +{ + /* Linux version has nothing to do */ + return; +} + +void +dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh; + uint16 type; + + dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL); + + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); + type = ntoh16(eh->ether_type); + + if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) + atomic_dec(&dhd->pend_8021x_cnt); + +#ifdef PROP_TXSTATUS + if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) { + dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))]; + uint datalen = PKTLEN(dhd->pub.osh, txp); + if (ifp != NULL) { + if (success) { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } else { + ifp->stats.tx_dropped++; + } + } + } +#endif +} + +static struct net_device_stats * +dhd_get_stats(struct net_device *net) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + int ifidx; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__)); + + memset(&net->stats, 0, sizeof(net->stats)); + return &net->stats; + } + + ifp = dhd->iflist[ifidx]; + ASSERT(dhd && ifp); + + if (dhd->pub.up) { + /* Use the protocol to get dongle stats */ + dhd_prot_dstats(&dhd->pub); + } + return &ifp->stats; +} + +static int +dhd_watchdog_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_watchdog_prio > 0) { + struct sched_param param; + param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? + dhd_watchdog_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + while (1) { + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + unsigned long jiffies_at_start = jiffies; + unsigned long time_lapse; + + DHD_OS_WD_WAKE_LOCK(&dhd->pub); + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + dhd_bus_watchdog(&dhd->pub); + + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + time_lapse = jiffies - jiffies_at_start; + + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) { + mod_timer(&dhd->timer, + jiffies + + msecs_to_jiffies(dhd_watchdog_ms) - + min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse)); + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + } else { + break; + } + } + + complete_and_exit(&tsk->completed, 0); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void dhd_watchdog(struct timer_list *t) +#else +static void dhd_watchdog(ulong data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + dhd_info_t *dhd = from_timer(dhd, t, timer); +#else + dhd_info_t *dhd = (dhd_info_t *)data; +#endif + unsigned long flags; + + if (dhd->pub.dongle_reset) { + return; + } + + if (dhd->pub.busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__)); + return; + } + + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + up(&dhd->thr_wdt_ctl.sema); + return; + } + + DHD_OS_WD_WAKE_LOCK(&dhd->pub); + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; + +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); +} + +#ifdef DHD_PCIE_RUNTIMEPM +static int +dhd_rpm_state_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + while (1) { + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + unsigned long jiffies_at_start = jiffies; + unsigned long time_lapse; + + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + if (dhd->pub.up) { + dhd_runtimepm_state(&dhd->pub); + } + + DHD_GENERAL_LOCK(&dhd->pub, flags); + time_lapse = jiffies - jiffies_at_start; + + /* Reschedule the watchdog */ + if (dhd->rpm_timer_valid) { + mod_timer(&dhd->rpm_timer, + jiffies + + msecs_to_jiffies(dhd_runtimepm_ms) - + min(msecs_to_jiffies(dhd_runtimepm_ms), + time_lapse)); + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } + } else { + break; + } + } + + complete_and_exit(&tsk->completed, 0); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void dhd_runtimepm(struct timer_list *t) +#else +static void dhd_runtimepm(ulong data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + dhd_info_t *dhd = from_timer(dhd, t, rpm_timer);; +#else + dhd_info_t *dhd = (dhd_info_t *)data; +#endif + + if (dhd->pub.dongle_reset) { + return; + } + + if (dhd->thr_rpm_ctl.thr_pid >= 0) { + up(&dhd->thr_rpm_ctl.sema); + return; + } +} + +void dhd_runtime_pm_disable(dhd_pub_t *dhdp) +{ + dhd_os_runtimepm_timer(dhdp, 0); + dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0)); + DHD_ERROR(("DHD Runtime PM Disabled \n")); +} + +void dhd_runtime_pm_enable(dhd_pub_t *dhdp) +{ + dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms); + DHD_ERROR(("DHD Runtime PM Enabled \n")); +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + + +#ifdef ENABLE_ADAPTIVE_SCHED +static void +dhd_sched_policy(int prio) +{ + struct sched_param param; + if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) { + param.sched_priority = 0; + setScheduler(current, SCHED_NORMAL, ¶m); + } else { + if (get_scheduler_policy(current) != SCHED_FIFO) { + param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + } +} +#endif /* ENABLE_ADAPTIVE_SCHED */ +#ifdef DEBUG_CPU_FREQ +static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) +{ + dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans); + struct cpufreq_freqs *freq = data; + if (dhd) { + if (!dhd->new_freq) + goto exit; + if (val == CPUFREQ_POSTCHANGE) { + DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n", + freq->new, freq->cpu)); + *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new; + } + } +exit: + return 0; +} +#endif /* DEBUG_CPU_FREQ */ +static int +dhd_dpc_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_dpc_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + +#ifdef CUSTOM_DPC_CPUCORE + set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE)); +#endif +#ifdef CUSTOM_SET_CPUCORE + dhd->pub.current_dpc = current; +#endif /* CUSTOM_SET_CPUCORE */ + /* Run until signal received */ + while (1) { + if (!binary_sema_down(tsk)) { +#ifdef ENABLE_ADAPTIVE_SCHED + dhd_sched_policy(dhd_dpc_prio); +#endif /* ENABLE_ADAPTIVE_SCHED */ + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { +#ifdef DEBUG_DPC_THREAD_WATCHDOG + int resched_cnt = 0; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + dhd_os_wd_timer_extend(&dhd->pub, TRUE); + while (dhd_bus_dpc(dhd->pub.bus)) { + /* process all data */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + resched_cnt++; + if (resched_cnt > MAX_RESCHED_CNT) { + DHD_INFO(("%s Calling msleep to" + "let other processes run. \n", + __FUNCTION__)); + dhd->pub.dhd_bug_on = true; + resched_cnt = 0; + OSL_SLEEP(1); + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + dhd_os_wd_timer_extend(&dhd->pub, FALSE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } else { + if (dhd->pub.up) + dhd_bus_stop(dhd->pub.bus, TRUE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +static int +dhd_rxf_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; +#if defined(WAIT_DEQUEUE) +#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */ + ulong watchdogTime = OSL_SYSUPTIME(); /* msec */ +#endif + dhd_pub_t *pub = &dhd->pub; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_rxf_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + DAEMONIZE("dhd_rxf"); + /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */ + + /* signal: thread has started */ + complete(&tsk->completed); +#ifdef CUSTOM_SET_CPUCORE + dhd->pub.current_rxf = current; +#endif /* CUSTOM_SET_CPUCORE */ + /* Run until signal received */ + while (1) { + if (down_interruptible(&tsk->sema) == 0) { + void *skb; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) + ulong flags; +#endif +#ifdef ENABLE_ADAPTIVE_SCHED + dhd_sched_policy(dhd_rxf_prio); +#endif /* ENABLE_ADAPTIVE_SCHED */ + + SMP_RD_BARRIER_DEPENDS(); + + if (tsk->terminated) { + break; + } + skb = dhd_rxf_dequeue(pub); + + if (skb == NULL) { + continue; + } + while (skb) { + void *skbnext = PKTNEXT(pub->osh, skb); + PKTSETNEXT(pub->osh, skb, NULL); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); + +#endif + skb = skbnext; + } +#if defined(WAIT_DEQUEUE) + if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) { + OSL_SLEEP(1); + watchdogTime = OSL_SYSUPTIME(); + } +#endif + + DHD_OS_WAKE_UNLOCK(pub); + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +#ifdef BCMPCIE +void dhd_dpc_enable(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp || !dhdp->info) + return; + dhd = dhdp->info; + +#ifdef DHD_LB +#ifdef DHD_LB_RXP + __skb_queue_head_init(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXC + if (atomic_read(&dhd->tx_compl_tasklet.count) == 1) + tasklet_enable(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + if (atomic_read(&dhd->rx_compl_tasklet.count) == 1) + tasklet_enable(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ + if (atomic_read(&dhd->tasklet.count) == 1) + tasklet_enable(&dhd->tasklet); +} +#endif /* BCMPCIE */ + + +#ifdef BCMPCIE +void +dhd_dpc_kill(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp) { + return; + } + + dhd = dhdp->info; + + if (!dhd) { + return; + } + + if (dhd->thr_dpc_ctl.thr_pid < 0) { + tasklet_disable(&dhd->tasklet); + tasklet_kill(&dhd->tasklet); + DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__)); + } +#if defined(DHD_LB) +#ifdef DHD_LB_RXP + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + /* Kill the Load Balancing Tasklets */ +#if defined(DHD_LB_TXC) + tasklet_disable(&dhd->tx_compl_tasklet); + tasklet_kill(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + tasklet_disable(&dhd->rx_compl_tasklet); + tasklet_kill(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ +} +#endif /* BCMPCIE */ + +static void +dhd_dpc(ulong data) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)data; + + /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c] + * down below , wake lock is set, + * the tasklet is initialized in dhd_attach() + */ + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + if (dhd_bus_dpc(dhd->pub.bus)) { + DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt); + tasklet_schedule(&dhd->tasklet); + } + } else { + dhd_bus_stop(dhd->pub.bus, TRUE); + } +} + +void +dhd_sched_dpc(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + DHD_OS_WAKE_LOCK(dhdp); + /* If the semaphore does not get up, + * wake unlock should be done here + */ + if (!binary_sema_up(&dhd->thr_dpc_ctl)) { + DHD_OS_WAKE_UNLOCK(dhdp); + } + return; + } else { + tasklet_schedule(&dhd->tasklet); + } +} + +static void +dhd_sched_rxf(dhd_pub_t *dhdp, void *skb) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; +#ifdef RXF_DEQUEUE_ON_BUSY + int ret = BCME_OK; + int retry = 2; +#endif /* RXF_DEQUEUE_ON_BUSY */ + + DHD_OS_WAKE_LOCK(dhdp); + + DHD_TRACE(("dhd_sched_rxf: Enter\n")); +#ifdef RXF_DEQUEUE_ON_BUSY + do { + ret = dhd_rxf_enqueue(dhdp, skb); + if (ret == BCME_OK || ret == BCME_ERROR) + break; + else + OSL_SLEEP(50); /* waiting for dequeueing */ + } while (retry-- > 0); + + if (retry <= 0 && ret == BCME_BUSY) { + void *skbp = skb; + + while (skbp) { + void *skbnext = PKTNEXT(dhdp->osh, skbp); + PKTSETNEXT(dhdp->osh, skbp, NULL); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx_ni(skbp); + skbp = skbnext; + } + DHD_ERROR(("send skb to kernel backlog without rxf_thread\n")); + } else { + if (dhd->thr_rxf_ctl.thr_pid >= 0) { + up(&dhd->thr_rxf_ctl.sema); + } + } +#else /* RXF_DEQUEUE_ON_BUSY */ + do { + if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK) + break; + } while (1); + if (dhd->thr_rxf_ctl.thr_pid >= 0) { + up(&dhd->thr_rxf_ctl.sema); + } + return; +#endif /* RXF_DEQUEUE_ON_BUSY */ +} + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef TOE +/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ +static int +dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) +{ + wl_ioctl_t ioc; + char buf[32]; + int ret; + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = FALSE; + + strncpy(buf, "toe_ol", sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + /* Check for older dongle image that doesn't support toe_ol */ + if (ret == -EIO) { + DHD_ERROR(("%s: toe not supported by device\n", + dhd_ifname(&dhd->pub, ifidx))); + return -EOPNOTSUPP; + } + + DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + memcpy(toe_ol, buf, sizeof(uint32)); + return 0; +} + +/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ +static int +dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) +{ + wl_ioctl_t ioc; + char buf[32]; + int toe, ret; + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = TRUE; + + /* Set toe_ol as requested */ + + strncpy(buf, "toe_ol", sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; + memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32)); + + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + DHD_ERROR(("%s: could not set toe_ol: ret=%d\n", + dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + /* Enable toe globally only if any components are enabled. */ + + toe = (toe_ol != 0); + + strcpy(buf, "toe"); + memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32)); + + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + return 0; +} +#endif /* TOE */ + +#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE) +void dhd_set_scb_probe(dhd_pub_t *dhd) +{ + int ret = 0; + wl_scb_probe_t scb_probe; + char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)]; + + memset(&scb_probe, 0, sizeof(wl_scb_probe_t)); + + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + return; + } + + bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf)); + + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__)); + } + + memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t)); + + scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE; + + bcm_mkiovar("scb_probe", (char *)&scb_probe, + sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__)); + return; + } +} +#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) +static void +dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + + snprintf(info->driver, sizeof(info->driver), "wl"); + snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version); +} + +struct ethtool_ops dhd_ethtool_ops = { + .get_drvinfo = dhd_ethtool_get_drvinfo +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) +static int +dhd_ethtool(dhd_info_t *dhd, void *uaddr) +{ + struct ethtool_drvinfo info; + char drvname[sizeof(info.driver)]; + uint32 cmd; +#ifdef TOE + struct ethtool_value edata; + uint32 toe_cmpnt, csum_dir; + int ret; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* all ethtool calls start with a cmd word */ + if (copy_from_user(&cmd, uaddr, sizeof (uint32))) + return -EFAULT; + + switch (cmd) { + case ETHTOOL_GDRVINFO: + /* Copy out any request driver name */ + if (copy_from_user(&info, uaddr, sizeof(info))) + return -EFAULT; + strncpy(drvname, info.driver, sizeof(info.driver)); + drvname[sizeof(info.driver)-1] = '\0'; + + /* clear struct for return */ + memset(&info, 0, sizeof(info)); + info.cmd = cmd; + + /* if dhd requested, identify ourselves */ + if (strcmp(drvname, "?dhd") == 0) { + snprintf(info.driver, sizeof(info.driver), "dhd"); + strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1); + info.version[sizeof(info.version) - 1] = '\0'; + } + + /* otherwise, require dongle to be up */ + else if (!dhd->pub.up) { + DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__)); + return -ENODEV; + } + + /* finally, report dongle driver type */ + else if (dhd->pub.iswl) + snprintf(info.driver, sizeof(info.driver), "wl"); + else + snprintf(info.driver, sizeof(info.driver), "xx"); + + snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version); + if (copy_to_user(uaddr, &info, sizeof(info))) + return -EFAULT; + DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__, + (int)sizeof(drvname), drvname, info.driver)); + break; + +#ifdef TOE + /* Get toe offload components from dongle */ + case ETHTOOL_GRXCSUM: + case ETHTOOL_GTXCSUM: + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + edata.cmd = cmd; + edata.data = (toe_cmpnt & csum_dir) ? 1 : 0; + + if (copy_to_user(uaddr, &edata, sizeof(edata))) + return -EFAULT; + break; + + /* Set toe offload components in dongle */ + case ETHTOOL_SRXCSUM: + case ETHTOOL_STXCSUM: + if (copy_from_user(&edata, uaddr, sizeof(edata))) + return -EFAULT; + + /* Read the current settings, update and write back */ + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + if (edata.data != 0) + toe_cmpnt |= csum_dir; + else + toe_cmpnt &= ~csum_dir; + + if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0) + return ret; + + /* If setting TX checksum mode, tell Linux the new mode */ + if (cmd == ETHTOOL_STXCSUM) { + if (edata.data) + dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM; + else + dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM; + } + + break; +#endif /* TOE */ + + default: + return -EOPNOTSUPP; + } + + return 0; +} +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + +static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) +{ + dhd_info_t *dhd; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return FALSE; + } + + if (!dhdp->up) + return FALSE; + + dhd = (dhd_info_t *)dhdp->info; +#if !defined(BCMPCIE) + if (dhd->thr_dpc_ctl.thr_pid < 0) { + DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__)); + return FALSE; + } +#endif + + if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) || + ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) { +#ifdef BCMPCIE + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n", + __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout, + dhdp->d3ackcnt_timeout, error, dhdp->busstate)); +#else + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate)); +#endif /* BCMPCIE */ + if (dhdp->hang_reason == 0) { + if (dhdp->dongle_trap_occured) { + dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; +#ifdef BCMPCIE + } else if (dhdp->d3ackcnt_timeout) { + dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT; +#endif /* BCMPCIE */ + } else { + dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT; + } + } + net_os_send_hang_message(net); + return TRUE; + } + return FALSE; +} + +int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf) +{ + int bcmerror = BCME_OK; + int buflen = 0; + struct net_device *net; + + net = dhd_idx2net(pub, ifidx); + if (!net) { + bcmerror = BCME_BADARG; + goto done; + } + + if (data_buf) + buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN); + + /* check for local dhd ioctl and handle it */ + if (ioc->driver == DHD_IOCTL_MAGIC) { + bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen); + if (bcmerror) + pub->bcmerror = bcmerror; + goto done; + } + + /* send to dongle (must be up, and wl). */ + if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) { + if (allow_delay_fwdl) { + int ret = dhd_bus_start(pub); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } else { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } + + if (!pub->iswl) { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + + /* + * Flush the TX queue if required for proper message serialization: + * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to + * prevent M4 encryption and + * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to + * prevent disassoc frame being sent before WPS-DONE frame. + */ + if (ioc->cmd == WLC_SET_KEY || + (ioc->cmd == WLC_SET_VAR && data_buf != NULL && + strncmp("wsec_key", data_buf, 9) == 0) || + (ioc->cmd == WLC_SET_VAR && data_buf != NULL && + strncmp("bsscfg:wsec_key", data_buf, 15) == 0) || + ioc->cmd == WLC_DISASSOC) + dhd_wait_pend8021x(net); + +#ifdef WLMEDIA_HTSF + if (data_buf) { + /* short cut wl ioctl calls here */ + if (strcmp("htsf", data_buf) == 0) { + dhd_ioctl_htsf_get(dhd, 0); + return BCME_OK; + } + + if (strcmp("htsflate", data_buf) == 0) { + if (ioc->set) { + memset(ts, 0, sizeof(tstamp_t)*TSMAX); + memset(&maxdelayts, 0, sizeof(tstamp_t)); + maxdelay = 0; + tspktcnt = 0; + maxdelaypktno = 0; + memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN); + } else { + dhd_dump_latency(); + } + return BCME_OK; + } + if (strcmp("htsfclear", data_buf) == 0) { + memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN); + htsf_seqnum = 0; + return BCME_OK; + } + if (strcmp("htsfhis", data_buf) == 0) { + dhd_dump_htsfhisto(&vi_d1, "H to D"); + dhd_dump_htsfhisto(&vi_d2, "D to D"); + dhd_dump_htsfhisto(&vi_d3, "D to H"); + dhd_dump_htsfhisto(&vi_d4, "H to H"); + return BCME_OK; + } + if (strcmp("tsport", data_buf) == 0) { + if (ioc->set) { + memcpy(&tsport, data_buf + 7, 4); + } else { + DHD_ERROR(("current timestamp port: %d \n", tsport)); + } + return BCME_OK; + } + } +#endif /* WLMEDIA_HTSF */ + + if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) && + data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) { +#ifdef BCM_FD_AGGR + bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); +#else + bcmerror = BCME_UNSUPPORTED; +#endif + goto done; + } + +#ifdef DHD_DEBUG + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) { + if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) { + /* Print IOVAR Information */ + DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n", + __FUNCTION__, (char *)data_buf, ioc->set)); + if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) { + prhex(NULL, data_buf + strlen(data_buf) + 1, + buflen - strlen(data_buf) - 1); + } + } else { + /* Print IOCTL Information */ + DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n", + __FUNCTION__, ioc->cmd, ioc->set)); + if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) { + prhex(NULL, data_buf, buflen); + } + } + } +#endif /* DHD_DEBUG */ + + bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); + +done: + dhd_check_hang(net, pub, bcmerror); + + return bcmerror; +} + +static int +dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_ioctl_t ioc; + int ifidx; + int ret; + void *local_buf = NULL; + u16 buflen = 0; + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + /* Interface up check for built-in type */ + if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) { + DHD_TRACE(("%s: Interface is down \n", __FUNCTION__)); + ret = BCME_NOTUP; + goto exit; + } + + /* send to dongle only if we are not waiting for reload already */ + if (dhd->pub.hang_was_sent) { + DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); + ret = BCME_DONGLE_DOWN; + goto exit; + } + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd)); + + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: BAD IF\n", __FUNCTION__)); + ret = -1; + goto exit; + } + +#if defined(WL_WIRELESS_EXT) + /* linux wireless extensions */ + if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { + /* may recurse, do NOT lock */ + ret = wl_iw_ioctl(net, ifr, cmd); + goto exit; + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) + if (cmd == SIOCETHTOOL) { + ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); + goto exit; + } +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + + if (cmd == SIOCDEVPRIVATE+1) { + ret = wl_android_priv_cmd(net, ifr, cmd); + dhd_check_hang(net, &dhd->pub, ret); + goto exit; + } + + if (cmd != SIOCDEVPRIVATE) { + ret = -EOPNOTSUPP; + goto exit; + } + + memset(&ioc, 0, sizeof(ioc)); + +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + compat_wl_ioctl_t compat_ioc; + if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) { + ret = BCME_BADADDR; + goto done; + } + ioc.cmd = compat_ioc.cmd; + ioc.buf = compat_ptr(compat_ioc.buf); + ioc.len = compat_ioc.len; + ioc.set = compat_ioc.set; + ioc.used = compat_ioc.used; + ioc.needed = compat_ioc.needed; + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t), + sizeof(uint)) != 0)) { + ret = BCME_BADADDR; + goto done; + } + } else +#endif /* CONFIG_COMPAT */ + { + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { + ret = BCME_BADADDR; + goto done; + } + + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + ret = BCME_BADADDR; + goto done; + } + } + + if (!capable(CAP_NET_ADMIN)) { + ret = BCME_EPERM; + goto done; + } + + if (ioc.len > 0) { + buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN); + if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) { + ret = BCME_NOMEM; + goto done; + } + + DHD_PERIM_UNLOCK(&dhd->pub); + if (copy_from_user(local_buf, ioc.buf, buflen)) { + DHD_PERIM_LOCK(&dhd->pub); + ret = BCME_BADADDR; + goto done; + } + DHD_PERIM_LOCK(&dhd->pub); + + *(char *)(local_buf + buflen) = '\0'; + } + + ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); + + if (!ret && buflen && local_buf && ioc.buf) { + DHD_PERIM_UNLOCK(&dhd->pub); + if (copy_to_user(ioc.buf, local_buf, buflen)) + ret = -EFAULT; + DHD_PERIM_LOCK(&dhd->pub); + } + +done: + if (local_buf) + MFREE(dhd->pub.osh, local_buf, buflen+1); + +exit: + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return OSL_ERROR(ret); +} + + +#ifdef FIX_CPU_MIN_CLOCK +static int dhd_init_cpufreq_fix(dhd_info_t *dhd) +{ + if (dhd) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhd->cpufreq_fix); +#endif + dhd->cpufreq_fix_status = FALSE; + } + return 0; +} + +static void dhd_fix_cpu_freq(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhd->cpufreq_fix); +#endif + if (dhd && !dhd->cpufreq_fix_status) { + pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = TRUE; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif +} + +static void dhd_rollback_cpu_freq(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhd ->cpufreq_fix); +#endif + if (dhd && dhd->cpufreq_fix_status != TRUE) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif + return; + } + + pm_qos_remove_request(&dhd->dhd_cpu_qos); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_remove_request(&dhd->dhd_bus_qos); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = FALSE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif +} +#endif /* FIX_CPU_MIN_CLOCK */ + +static int +dhd_stop(struct net_device *net) +{ + int ifidx = 0; + dhd_info_t *dhd = DHD_DEV_INFO(net); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net)); + dhd->pub.rxcnt_timeout = 0; + dhd->pub.txcnt_timeout = 0; + +#ifdef BCMPCIE + dhd->pub.d3ackcnt_timeout = 0; +#endif /* BCMPCIE */ + + if (dhd->pub.up == 0) { + goto exit; + } + + dhd_if_flush_sta(DHD_DEV_IFP(net)); + + /* Disable Runtime PM before interface down */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) + dhd_rollback_cpu_freq(dhd); +#endif /* FIX_CPU_MIN_CLOCK */ + + ifidx = dhd_net2idx(dhd, net); + BCM_REFERENCE(ifidx); + + /* Set state and stop OS transmissions */ + netif_stop_queue(net); + dhd->pub.up = 0; + +#ifdef WL_CFG80211 + if (ifidx == 0) { + dhd_if_t *ifp; + wl_cfg80211_down(NULL); + + ifp = dhd->iflist[0]; + ASSERT(ifp && ifp->net); + /* + * For CFG80211: Clean up all the left over virtual interfaces + * when the primary Interface is brought down. [ifconfig wlan0 down] + */ + if (!dhd_download_fw_on_driverload) { + if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && + (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { + int i; + +#ifdef WL_CFG80211_P2P_DEV_IF + wl_cfg80211_del_p2p_wdev(); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + dhd_net_if_lock_local(dhd); + for (i = 1; i < DHD_MAX_IFS; i++) + dhd_remove_if(&dhd->pub, i, FALSE); + + if (ifp && ifp->net) { + dhd_if_del_sta_list(ifp); + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + dhd_net_if_unlock_local(dhd); + } + cancel_work_sync(dhd->dhd_deferred_wq); +#if defined(DHD_LB) && defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB && DHD_LB_RXP */ + } + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + if (ifp->net == dhd->rx_napi_netdev) { + DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + skb_queue_purge(&dhd->rx_napi_queue); + napi_disable(&dhd->rx_napi_struct); + netif_napi_del(&dhd->rx_napi_struct); + dhd->rx_napi_netdev = NULL; + } +#endif /* DHD_LB && DHD_LB_RXP */ + + } +#endif /* WL_CFG80211 */ + +#ifdef PROP_TXSTATUS + dhd_wlfc_cleanup(&dhd->pub, NULL, 0); +#endif + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + OLD_MOD_DEC_USE_COUNT; +exit: +#if defined(WL_CFG80211) + if (ifidx == 0 && !dhd_download_fw_on_driverload) + wl_android_wifi_off(net, TRUE); +#endif + dhd->pub.hang_was_sent = 0; + + /* Clear country spec for for built-in type driver */ + if (!dhd_download_fw_on_driverload) { + dhd->pub.dhd_cspec.country_abbrev[0] = 0x00; + dhd->pub.dhd_cspec.rev = 0; + dhd->pub.dhd_cspec.ccode[0] = 0x00; + } + +#ifdef BCMDBGFS + dhd_dbg_remove(); +#endif + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + /* Destroy wakelock */ + if (!dhd_download_fw_on_driverload && + (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_DESTROY(dhd); + dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + + return 0; +} + +#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME) +extern bool g_first_broadcast_scan; +#endif + +#ifdef WL11U +static int dhd_interworking_enable(dhd_pub_t *dhd) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + uint32 enable = true; + int ret = BCME_OK; + + bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret)); + } + + if (ret == BCME_OK) { + /* basic capabilities for HS20 REL2 */ + uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF; + bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret)); + } + } + + return ret; +} +#endif /* WL11u */ + +static int +dhd_open(struct net_device *net) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); +#ifdef TOE + uint32 toe_ol; +#endif +#ifdef BCM_FD_AGGR + char iovbuf[WLC_IOCTL_SMLEN]; + dbus_config_t config; + uint32 agglimit = 0; + uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */ +#endif /* BCM_FD_AGGR */ + int ifidx; + int32 ret = 0; + + if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) { + DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__)); + return -1; + } + + /* Init wakelock */ + if (!dhd_download_fw_on_driverload && + !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + +#ifdef PREVENT_REOPEN_DURING_HANG + /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */ + if (dhd->pub.hang_was_sent == 1) { + DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); + /* Force to bring down WLAN interface in case dhd_stop() is not called + * from the upper layer when HANG event is triggered. + */ + if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) { + DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__)); + dhd_stop(net); + } else { + return -1; + } + } +#endif /* PREVENT_REOPEN_DURING_HANG */ + + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + dhd->pub.dongle_trap_occured = 0; + dhd->pub.hang_was_sent = 0; + dhd->pub.hang_reason = 0; +#ifdef DHD_LOSSLESS_ROAMING + dhd->pub.dequeue_prec_map = ALLPRIO; +#endif +#if !defined(WL_CFG80211) + /* + * Force start if ifconfig_up gets called before START command + * We keep WEXT's wl_control_wl_start to provide backward compatibility + * This should be removed in the future + */ + ret = wl_control_wl_start(net); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } + +#endif + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + if (ifidx < 0) { + DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (ifidx == 0) { + atomic_set(&dhd->pend_8021x_cnt, 0); +#if defined(WL_CFG80211) + if (!dhd_download_fw_on_driverload) { + DHD_ERROR(("\n%s\n", dhd_version)); +#if defined(USE_INITIAL_SHORT_DWELL_TIME) + g_first_broadcast_scan = TRUE; +#endif + ret = wl_android_wifi_on(net); + if (ret != 0) { + DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n", + __FUNCTION__, ret)); + ret = -1; + goto exit; + } + } +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) { + dhd_init_cpufreq_fix(dhd); + dhd_fix_cpu_freq(dhd); + } +#endif /* FIX_CPU_MIN_CLOCK */ +#endif + + if (dhd->pub.busstate != DHD_BUS_DATA) { + + /* try to bring up bus */ + DHD_PERIM_UNLOCK(&dhd->pub); + ret = dhd_bus_start(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + if (ret) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } + + } + +#ifdef BCM_FD_AGGR + config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT; + + + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4, + iovbuf, sizeof(iovbuf)); + + if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) { + agglimit = *(uint32 *)iovbuf; + config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT; + config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK; + DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n", + agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize)); + if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) { + DHD_ERROR(("set tx/rx queue size and buffersize failed\n")); + } + } else { + DHD_ERROR(("get rpc_dngl_agglimit failed\n")); + rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC; + } + + /* Set aggregation for TX */ + bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK, + rpc_agg & BCM_RPC_TP_HOST_AGG_MASK); + + /* Set aggregation for RX */ + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf)); + if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) { + dhd->pub.info->fdaggr = 0; + if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK) + dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED; + if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK) + dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED; + } else { + DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret)); + } +#endif /* BCM_FD_AGGR */ + + /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */ + memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + +#ifdef TOE + /* Get current TOE mode from dongle */ + if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) { + dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM; + } else { + dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM; + } +#endif /* TOE */ + +#if defined(WL_CFG80211) + if (unlikely(wl_cfg80211_up(NULL))) { + DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__)); + ret = -1; + goto exit; + } + if (!dhd_download_fw_on_driverload) { +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#ifdef DHD_LB + DHD_LB_STATS_INIT(&dhd->pub); +#ifdef DHD_LB_RXP + __skb_queue_head_init(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#endif /* DHD_LB */ + } + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) +#if defined(SET_RPS_CPUS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#else + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); +#endif +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + if (dhd->rx_napi_netdev == NULL) { + dhd->rx_napi_netdev = dhd->iflist[ifidx]->net; + memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct)); + netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct, + dhd_napi_poll, dhd_napi_weight); + DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + napi_enable(&dhd->rx_napi_struct); + DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__)); + skb_queue_head_init(&dhd->rx_napi_queue); + } +#endif /* DHD_LB && DHD_LB_RXP */ +#if defined(NUM_SCB_MAX_PROBE) + dhd_set_scb_probe(&dhd->pub); +#endif /* NUM_SCB_MAX_PROBE */ +#endif /* WL_CFG80211 */ + } + + /* Allow transmit calls */ + netif_start_queue(net); + dhd->pub.up = 1; + + OLD_MOD_INC_USE_COUNT; + +#ifdef BCMDBGFS + dhd_dbg_init(&dhd->pub); +#endif + +exit: + if (ret) { + dhd_stop(net); + } + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + + return ret; +} + +int dhd_do_driver_init(struct net_device *net) +{ + dhd_info_t *dhd = NULL; + + if (!net) { + DHD_ERROR(("Primary Interface not initialized \n")); + return -EINVAL; + } + + + /* && defined(OEM_ANDROID) && defined(BCMSDIO) */ + dhd = DHD_DEV_INFO(net); + + /* If driver is already initialized, do nothing + */ + if (dhd->pub.busstate == DHD_BUS_DATA) { + DHD_TRACE(("Driver already Inititalized. Nothing to do")); + return 0; + } + + if (dhd_open(net) < 0) { + DHD_ERROR(("Driver Init Failed \n")); + return -1; + } + + return 0; +} + +int +dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ + +#ifdef WL_CFG80211 + if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) + return BCME_OK; +#endif + + /* handle IF event caused by wl commands, SoftAP, WEXT and + * anything else. This has to be done asynchronously otherwise + * DPC will be blocked (and iovars will timeout as DPC has no chance + * to read the response back) + */ + if (ifevent->ifidx > 0) { + dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strncpy(if_event->name, name, IFNAMSIZ); + if_event->name[IFNAMSIZ - 1] = '\0'; + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, + DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW); + } + + return BCME_OK; +} + +int +dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ + dhd_if_event_t *if_event; + +#ifdef WL_CFG80211 + if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) + return BCME_OK; +#endif /* WL_CFG80211 */ + + /* handle IF event caused by wl commands, SoftAP, WEXT and + * anything else + */ + if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strncpy(if_event->name, name, IFNAMSIZ); + if_event->name[IFNAMSIZ - 1] = '\0'; + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL, + dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW); + + return BCME_OK; +} + +/* unregister and free the existing net_device interface (if any) in iflist and + * allocate a new one. the slot is reused. this function does NOT register the + * new interface to linux kernel. dhd_register_if does the job + */ +struct net_device* +dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; + dhd_if_t *ifp; + + ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS)); + ifp = dhdinfo->iflist[ifidx]; + + if (ifp != NULL) { + if (ifp->net != NULL) { + DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name)); + + dhd_dev_priv_clear(ifp->net); /* clear net_device private */ + + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { + netif_stop_queue(ifp->net); + if (need_rtnl_lock) + unregister_netdev(ifp->net); + else + unregister_netdevice(ifp->net); + } + ifp->net = NULL; + } + } else { + ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t)); + if (ifp == NULL) { + DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t))); + return NULL; + } + } + + memset(ifp, 0, sizeof(dhd_if_t)); + ifp->info = dhdinfo; + ifp->idx = ifidx; + ifp->bssidx = bssidx; + if (mac != NULL) + memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN); + + /* Allocate etherdev, including space for private structure */ + ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE); + if (ifp->net == NULL) { + DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo))); + goto fail; + } + + /* Setup the dhd interface's netdevice private structure. */ + dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx); + + if (name && name[0]) { + strncpy(ifp->net->name, name, IFNAMSIZ); + ifp->net->name[IFNAMSIZ - 1] = '\0'; + } + + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + ifp->net->needs_free_netdev = true; +#ifdef WL_CFG80211 + if (ifidx == 0) + ifp->net->priv_destructor = free_netdev; + else + ifp->net->priv_destructor = dhd_netdev_free; +#else + ifp->net->priv_destructor = free_netdev; +#endif /* WL_CFG80211 */ +#else +#ifdef WL_CFG80211 + if (ifidx == 0) + ifp->net->destructor = free_netdev; + else + ifp->net->destructor = dhd_netdev_free; +#else + ifp->net->destructor = free_netdev; +#endif /* WL_CFG80211 */ +#endif + strncpy(ifp->name, ifp->net->name, IFNAMSIZ); + ifp->name[IFNAMSIZ - 1] = '\0'; + dhdinfo->iflist[ifidx] = ifp; + +/* initialize the dongle provided if name */ + if (dngl_name) + strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ); + else + strncpy(ifp->dngl_name, name, IFNAMSIZ); + +#ifdef PCIE_FULL_DONGLE + /* Initialize STA info list */ + INIT_LIST_HEAD(&ifp->sta_list); + DHD_IF_STA_LIST_LOCK_INIT(ifp); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_L2_FILTER + ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh); + ifp->parp_allnode = TRUE; +#endif + return ifp->net; + +fail: + + if (ifp != NULL) { + if (ifp->net != NULL) { + dhd_dev_priv_clear(ifp->net); + free_netdev(ifp->net); + ifp->net = NULL; + } + MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); + ifp = NULL; + } + + dhdinfo->iflist[ifidx] = NULL; + return NULL; +} + +/* unregister and free the the net_device interface associated with the indexed + * slot, also free the slot memory and set the slot pointer to NULL + */ +int +dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; + dhd_if_t *ifp; + + ifp = dhdinfo->iflist[ifidx]; + + if (ifp != NULL) { + if (ifp->net != NULL) { + DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx)); + + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { + netif_tx_disable(ifp->net); + + + +#if defined(SET_RPS_CPUS) + custom_rps_map_clear(ifp->net->_rx); +#endif /* SET_RPS_CPUS */ +#if defined(SET_RPS_CPUS) +#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)) + dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */ +#endif + if (need_rtnl_lock) + unregister_netdev(ifp->net); + else + unregister_netdevice(ifp->net); + } + ifp->net = NULL; + dhdinfo->iflist[ifidx] = NULL; + } +#ifdef DHD_WMF + dhd_wmf_cleanup(dhdpub, ifidx); +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdpub->tickcnt); + deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ + + dhd_if_del_sta_list(ifp); + + MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); + + } + + return BCME_OK; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +static struct net_device_ops dhd_ops_pri = { + .ndo_open = dhd_open, + .ndo_stop = dhd_stop, + .ndo_get_stats = dhd_get_stats, + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, + .ndo_set_mac_address = dhd_set_mac_address, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_set_multicast_list, +#endif +}; + +static struct net_device_ops dhd_ops_virt = { + .ndo_get_stats = dhd_get_stats, + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, + .ndo_set_mac_address = dhd_set_mac_address, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_set_multicast_list, +#endif +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */ + +#ifdef DEBUGGER +extern void debugger_init(void *bus_handle); +#endif + + +#ifdef SHOW_LOGTRACE +static char *logstrs_path = "/root/logstrs.bin"; +static char *st_str_file_path = "/root/rtecdc.bin"; +static char *map_file_path = "/root/rtecdc.map"; +static char *rom_st_str_file_path = "/root/roml.bin"; +static char *rom_map_file_path = "/root/roml.map"; + +#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */ +#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */ +#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */ +static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */ +static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */ +static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */ +static char *ram_file_str = "rtecdc"; +static char *rom_file_str = "roml"; +#define RAMSTART_BIT 0x01 +#define RDSTART_BIT 0x02 +#define RDEND_BIT 0x04 +#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT) + +module_param(logstrs_path, charp, S_IRUGO); +module_param(st_str_file_path, charp, S_IRUGO); +module_param(map_file_path, charp, S_IRUGO); +module_param(rom_st_str_file_path, charp, S_IRUGO); +module_param(rom_map_file_path, charp, S_IRUGO); + +static void +dhd_init_logstrs_array(dhd_event_log_t *temp) +{ + struct file *filep = NULL; + struct kstat stat; + mm_segment_t fs; + char *raw_fmts = NULL; + int logstrs_size = 0; + + logstr_header_t *hdr = NULL; + uint32 *lognums = NULL; + char *logstrs = NULL; + int ram_index = 0; + char **fmts; + int num_fmts = 0; + uint32 i = 0; + int error = 0; + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(logstrs_path, O_RDONLY, 0); + + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path)); + goto fail; + } + error = vfs_stat(logstrs_path, &stat); + if (error) { + DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path)); + goto fail; + } + logstrs_size = (int) stat.size; + + raw_fmts = kmalloc(logstrs_size, GFP_KERNEL); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__)); + goto fail; + } + if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) { + DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path)); + goto fail; + } + + /* Remember header from the logstrs.bin file */ + hdr = (logstr_header_t *) (raw_fmts + logstrs_size - + sizeof(logstr_header_t)); + + if (hdr->log_magic == LOGSTRS_MAGIC) { + /* + * logstrs.bin start with header. + */ + num_fmts = hdr->rom_logstrs_offset / sizeof(uint32); + ram_index = (hdr->ram_lognums_offset - + hdr->rom_lognums_offset) / sizeof(uint32); + lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset]; + logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset]; + } else { + /* + * Legacy logstrs.bin format without header. + */ + num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32); + if (num_fmts == 0) { + /* Legacy ROM/RAM logstrs.bin format: + * - ROM 'lognums' section + * - RAM 'lognums' section + * - ROM 'logstrs' section. + * - RAM 'logstrs' section. + * + * 'lognums' is an array of indexes for the strings in the + * 'logstrs' section. The first uint32 is 0 (index of first + * string in ROM 'logstrs' section). + * + * The 4324b5 is the only ROM that uses this legacy format. Use the + * fixed number of ROM fmtnums to find the start of the RAM + * 'lognums' section. Use the fixed first ROM string ("Con\n") to + * find the ROM 'logstrs' section. + */ + #define NUM_4324B5_ROM_FMTS 186 + #define FIRST_4324B5_ROM_LOGSTR "Con\n" + ram_index = NUM_4324B5_ROM_FMTS; + lognums = (uint32 *) raw_fmts; + num_fmts = ram_index; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) { + num_fmts++; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + } + } else { + /* Legacy RAM-only logstrs.bin format: + * - RAM 'lognums' section + * - RAM 'logstrs' section. + * + * 'lognums' is an array of indexes for the strings in the + * 'logstrs' section. The first uint32 is an index to the + * start of 'logstrs'. Therefore, if this index is divided + * by 'sizeof(uint32)' it provides the number of logstr + * entries. + */ + ram_index = 0; + lognums = (uint32 *) raw_fmts; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + } + } + fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL); + if (fmts == NULL) { + DHD_ERROR(("Failed to allocate fmts memory")); + goto fail; + } + + for (i = 0; i < num_fmts; i++) { + /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base + * (they are 0-indexed relative to 'rom_logstrs_offset'). + * + * RAM lognums are already indexed to point to the correct RAM logstrs (they + * are 0-indexed relative to the start of the logstrs.bin file). + */ + if (i == ram_index) { + logstrs = raw_fmts; + } + fmts[i] = &logstrs[lognums[i]]; + } + temp->fmts = fmts; + temp->raw_fmts = raw_fmts; + temp->num_fmts = num_fmts; + filp_close(filep, NULL); + set_fs(fs); + return; +fail: + if (raw_fmts) { + kfree(raw_fmts); + raw_fmts = NULL; + } + if (!IS_ERR(filep)) + filp_close(filep, NULL); + set_fs(fs); + temp->fmts = NULL; + return; +} + +static int +dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) +{ + struct file *filep = NULL; + mm_segment_t fs; + char *raw_fmts = NULL; + uint32 read_size = READ_NUM_BYTES; + int error = 0; + char * cptr = NULL; + char c; + uint8 count = 0; + + *ramstart = 0; + *rodata_start = 0; + *rodata_end = 0; + + if (fname == NULL) { + DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__)); + return BCME_ERROR; + } + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(fname, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname)); + goto fail; + } + + /* Allocate 1 byte more than read_size to terminate it with NULL */ + raw_fmts = kmalloc(read_size + 1, GFP_KERNEL); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + /* read ram start, rodata_start and rodata_end values from map file */ + + while (count != ALL_MAP_VAL) + { + error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos)); + if (error < 0) { + DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__, + map_file_path, error)); + goto fail; + } + + if (error < read_size) { + /* + * since we reset file pos back to earlier pos by + * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF. + * So if ret value is less than read_size, reached EOF don't read further + */ + break; + } + /* End raw_fmts with NULL as strstr expects NULL terminated strings */ + raw_fmts[read_size] = '\0'; + + /* Get ramstart address */ + if ((cptr = strstr(raw_fmts, ramstart_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c text_start", ramstart, &c); + count |= RAMSTART_BIT; + } + + /* Get ram rodata start address */ + if ((cptr = strstr(raw_fmts, rodata_start_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_start", rodata_start, &c); + count |= RDSTART_BIT; + } + + /* Get ram rodata end address */ + if ((cptr = strstr(raw_fmts, rodata_end_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_end", rodata_end, &c); + count |= RDEND_BIT; + } + memset(raw_fmts, 0, read_size); + /* + * go back to predefined NUM of bytes so that we won't miss + * the string and addr even if it comes as splited in next read. + */ + filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES; + } + + DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", + *ramstart, *rodata_start, *rodata_end)); + + DHD_ERROR(("readmap over \n")); + +fail: + if (raw_fmts) { + kfree(raw_fmts); + raw_fmts = NULL; + } + if (!IS_ERR(filep)) + filp_close(filep, NULL); + + set_fs(fs); + if (count == ALL_MAP_VAL) { + return BCME_OK; + } + DHD_ERROR(("readmap error 0X%x \n", count)); + return BCME_ERROR; +} + +static void +dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file) +{ + struct file *filep = NULL; + mm_segment_t fs; + char *raw_fmts = NULL; + uint32 logstrs_size = 0; + + int error = 0; + uint32 ramstart = 0; + uint32 rodata_start = 0; + uint32 rodata_end = 0; + uint32 logfilebase = 0; + + error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end); + if (error == BCME_ERROR) { + DHD_ERROR(("readmap Error!! \n")); + /* don't do event log parsing in actual case */ + temp->raw_sstr = NULL; + return; + } + DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", + ramstart, rodata_start, rodata_end)); + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(str_file, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file)); + goto fail; + } + + /* Full file size is huge. Just read required part */ + logstrs_size = rodata_end - rodata_start; + + raw_fmts = kmalloc(logstrs_size, GFP_KERNEL); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + logfilebase = rodata_start - ramstart; + + error = generic_file_llseek(filep, logfilebase, SEEK_SET); + if (error < 0) { + DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + + error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos)); + if (error != logstrs_size) { + DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = raw_fmts; + temp->ramstart = ramstart; + temp->rodata_start = rodata_start; + temp->rodata_end = rodata_end; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = raw_fmts; + temp->rom_ramstart = ramstart; + temp->rom_rodata_start = rodata_start; + temp->rom_rodata_end = rodata_end; + } + + filp_close(filep, NULL); + set_fs(fs); + + return; +fail: + if (raw_fmts) { + kfree(raw_fmts); + raw_fmts = NULL; + } + if (!IS_ERR(filep)) + filp_close(filep, NULL); + set_fs(fs); + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = NULL; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = NULL; + } + return; +} + +#endif /* SHOW_LOGTRACE */ + + +dhd_pub_t * +dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) +{ + dhd_info_t *dhd = NULL; + struct net_device *net = NULL; + char if_name[IFNAMSIZ] = {'\0'}; + uint32 bus_type = -1; + uint32 bus_num = -1; + uint32 slot_num = -1; + wifi_adapter_info_t *adapter = NULL; + + dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef STBLINUX + DHD_ERROR(("%s\n", driver_target)); +#endif /* STBLINUX */ + /* will implement get_ids for DBUS later */ +#if defined(BCMSDIO) + dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num); +#endif + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); + + /* Allocate primary dhd_info */ + dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t)); + if (dhd == NULL) { + dhd = MALLOC(osh, sizeof(dhd_info_t)); + if (dhd == NULL) { + DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__)); + goto fail; + } + } + memset(dhd, 0, sizeof(dhd_info_t)); + dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC; + + dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */ + + dhd->pub.osh = osh; + dhd->adapter = adapter; + +#ifdef GET_CUSTOM_MAC_ENABLE + wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet); +#endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef CUSTOM_FORCE_NODFS_FLAG + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef CUSTOM_COUNTRY_CODE + get_customized_country_code(dhd->adapter, + dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec, + dhd->pub.dhd_cflags); +#endif /* CUSTOM_COUNTRY_CODE */ + dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID; + dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID; + + /* Initialize thread based operation and lock */ + sema_init(&dhd->sdsem, 1); + + /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name. + * This is indeed a hack but we have to make it work properly before we have a better + * solution + */ + dhd_update_fw_nv_path(dhd); + + /* Link to info module */ + dhd->pub.info = dhd; + + + /* Link to bus module */ + dhd->pub.bus = bus; + dhd->pub.hdrlen = bus_hdrlen; + + /* Set network interface name if it was provided as module parameter */ + if (iface_name[0]) { + int len; + char ch; + strncpy(if_name, iface_name, IFNAMSIZ); + if_name[IFNAMSIZ - 1] = 0; + len = strlen(if_name); + ch = if_name[len - 1]; + if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) + strcat(if_name, "%d"); + } + + /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */ + net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL); + if (net == NULL) { + goto fail; + } + + + dhd_state |= DHD_ATTACH_STATE_ADD_IF; +#ifdef DHD_L2_FILTER + /* initialize the l2_filter_cnt */ + dhd->pub.l2_filter_cnt = 0; +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + + mutex_init(&dhd->dhd_iovar_mutex); + sema_init(&dhd->proto_sem, 1); + +#ifdef PROP_TXSTATUS + spin_lock_init(&dhd->wlfc_spinlock); + + dhd->pub.skip_fc = dhd_wlfc_skip_fc; + dhd->pub.plat_init = dhd_wlfc_plat_init; + dhd->pub.plat_deinit = dhd_wlfc_plat_deinit; + +#ifdef DHD_WLFC_THREAD + init_waitqueue_head(&dhd->pub.wlfc_wqhead); + dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread"); + if (IS_ERR(dhd->pub.wlfc_thread)) { + DHD_ERROR(("create wlfc thread failed\n")); + goto fail; + } else { + wake_up_process(dhd->pub.wlfc_thread); + } +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + + /* Initialize other structure content */ + init_waitqueue_head(&dhd->ioctl_resp_wait); + init_waitqueue_head(&dhd->d3ack_wait); + init_waitqueue_head(&dhd->ctrl_wait); + init_waitqueue_head(&dhd->dhd_bus_busy_state_wait); + dhd->pub.dhd_bus_busy_state = 0; + + /* Initialize the spinlocks */ + spin_lock_init(&dhd->sdlock); + spin_lock_init(&dhd->txqlock); + spin_lock_init(&dhd->dhd_lock); + spin_lock_init(&dhd->rxf_lock); +#if defined(RXFRAME_THREAD) + dhd->rxthread_enabled = TRUE; +#endif /* defined(RXFRAME_THREAD) */ + +#ifdef DHDTCPACK_SUPPRESS + spin_lock_init(&dhd->tcpack_lock); +#endif /* DHDTCPACK_SUPPRESS */ + + /* Initialize Wakelock stuff */ + spin_lock_init(&dhd->wakelock_spinlock); + spin_lock_init(&dhd->wakelock_evt_spinlock); + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->wakelock_wd_counter = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake"); +#endif /* CONFIG_HAS_WAKELOCK */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhd->dhd_net_if_mutex); + mutex_init(&dhd->dhd_suspend_mutex); +#endif + dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + + /* Attach and link in the protocol */ + if (dhd_prot_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_prot_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH; + +#ifdef WL_CFG80211 + /* Attach and link in the cfg80211 */ + if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) { + DHD_ERROR(("wl_cfg80211_attach failed\n")); + goto fail; + } + + dhd_monitor_init(&dhd->pub); + dhd_state |= DHD_ATTACH_STATE_CFG80211; +#endif +#ifdef DHD_LOG_DUMP + dhd_log_dump_init(&dhd->pub); +#endif /* DHD_LOG_DUMP */ +#if defined(WL_WIRELESS_EXT) + /* Attach and link in the iw */ + if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) { + if (wl_iw_attach(net, (void *)&dhd->pub) != 0) { + DHD_ERROR(("wl_iw_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef SHOW_LOGTRACE + dhd_init_logstrs_array(&dhd->event_data); + dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path); + dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path); +#endif /* SHOW_LOGTRACE */ + + if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) { + DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA)); + goto fail; + } + + + + /* Set up the watchdog timer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&dhd->timer, dhd_watchdog, dhd_watchdog_ms); +#else + init_timer(&dhd->timer); + dhd->timer.data = (ulong)dhd; + dhd->timer.function = dhd_watchdog; + dhd->default_wd_interval = dhd_watchdog_ms; +#endif + + if (dhd_watchdog_prio >= 0) { + /* Initialize watchdog thread */ + PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread"); + if (dhd->thr_wdt_ctl.thr_pid < 0) { + goto fail; + } + + } else { + dhd->thr_wdt_ctl.thr_pid = -1; + } + +#ifdef DHD_PCIE_RUNTIMEPM + /* Setup up the runtime PM Idlecount timer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&dhd->rpm_timer, dhd_runtimepm, 0); +#else + init_timer(&dhd->rpm_timer); + dhd->rpm_timer.data = (ulong)dhd; + dhd->rpm_timer.function = dhd_runtimepm; +#endif + + dhd->rpm_timer_valid = FALSE; + + dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID; + PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread"); + if (dhd->thr_rpm_ctl.thr_pid < 0) { + goto fail; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + +#ifdef DEBUGGER + debugger_init((void *) bus); +#endif + + /* Set up the bottom half handler */ + if (dhd_dpc_prio >= 0) { + /* Initialize DPC thread */ + PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc"); + if (dhd->thr_dpc_ctl.thr_pid < 0) { + goto fail; + } + } else { + /* use tasklet for dpc */ + tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); + dhd->thr_dpc_ctl.thr_pid = -1; + } + + if (dhd->rxthread_enabled) { + bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND); + /* Initialize RXF thread */ + PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf"); + if (dhd->thr_rxf_ctl.thr_pid < 0) { + goto fail; + } + } + + dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED; + +#if defined(CONFIG_PM_SLEEP) + if (!dhd_pm_notifier_registered) { + dhd_pm_notifier_registered = TRUE; + dhd->pm_notifier.notifier_call = dhd_pm_callback; + dhd->pm_notifier.priority = 10; + register_pm_notifier(&dhd->pm_notifier); + } + +#endif /* CONFIG_PM_SLEEP */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20; + dhd->early_suspend.suspend = dhd_early_suspend; + dhd->early_suspend.resume = dhd_late_resume; + register_early_suspend(&dhd->early_suspend); + dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd); +#ifdef DEBUG_CPU_FREQ + dhd->new_freq = alloc_percpu(int); + dhd->freq_trans.notifier_call = dhd_cpufreq_notifier; + cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); +#endif +#ifdef DHDTCPACK_SUPPRESS +#ifdef BCMSDIO + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX); +#elif defined(BCMPCIE) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); +#else + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMSDIO */ +#endif /* DHDTCPACK_SUPPRESS */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + + dhd_state |= DHD_ATTACH_STATE_DONE; + dhd->dhd_state = dhd_state; + + dhd_found++; +#ifdef DHD_DEBUG_PAGEALLOC + register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#if defined(DHD_LB) + DHD_ERROR(("DHD LOAD BALANCING Enabled\n")); + + dhd_lb_set_default_cpus(dhd); + + /* Initialize the CPU Masks */ + if (dhd_cpumasks_init(dhd) == 0) { + + /* Now we have the current CPU maps, run through candidacy */ + dhd_select_cpu_candidacy(dhd); + + /* + * If we are able to initialize CPU masks, lets register to the + * CPU Hotplug framework to change the CPU for each job dynamically + * using candidacy algorithm. + */ + dhd->cpu_notifier.notifier_call = dhd_cpu_callback; + register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */ + } else { + /* + * We are unable to initialize CPU masks, so candidacy algorithm + * won't run, but still Load Balancing will be honoured based + * on the CPUs allocated for a given job statically during init + */ + dhd->cpu_notifier.notifier_call = NULL; + DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n", + __FUNCTION__)); + } + + + DHD_LB_STATS_INIT(&dhd->pub); + + /* Initialize the Load Balancing Tasklets and Napi object */ +#if defined(DHD_LB_TXC) + tasklet_init(&dhd->tx_compl_tasklet, + dhd_lb_tx_compl_handler, (ulong)(&dhd->pub)); + INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn); + DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__)); +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + tasklet_init(&dhd->rx_compl_tasklet, + dhd_lb_rx_compl_handler, (ulong)(&dhd->pub)); + INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn); + DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__)); +#endif /* DHD_LB_RXC */ + +#if defined(DHD_LB_RXP) + __skb_queue_head_init(&dhd->rx_pend_queue); + skb_queue_head_init(&dhd->rx_napi_queue); + + /* Initialize the work that dispatches NAPI job to a given core */ + INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn); + DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__)); +#endif /* DHD_LB_RXP */ + +#endif /* DHD_LB */ + + INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler); + + (void)dhd_sysfs_init(dhd); + + return &dhd->pub; + +fail: + if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) { + DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n", + __FUNCTION__, dhd_state, &dhd->pub)); + dhd->dhd_state = dhd_state; + dhd_detach(&dhd->pub); + dhd_free(&dhd->pub); + } + + return NULL; +} + +#include + +void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs) +{ + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + + schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs)); +} + +int dhd_get_fw_mode(dhd_info_t *dhdinfo) +{ + if (strstr(dhdinfo->fw_path, "_apsta") != NULL) + return DHD_FLAG_HOSTAP_MODE; + if (strstr(dhdinfo->fw_path, "_p2p") != NULL) + return DHD_FLAG_P2P_MODE; + if (strstr(dhdinfo->fw_path, "_ibss") != NULL) + return DHD_FLAG_IBSS_MODE; + if (strstr(dhdinfo->fw_path, "_mfg") != NULL) + return DHD_FLAG_MFG_MODE; + + return DHD_FLAG_STA_MODE; +} + +bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) +{ + int fw_len; + int nv_len; + const char *fw = NULL; + const char *nv = NULL; + wifi_adapter_info_t *adapter = dhdinfo->adapter; + + + /* Update firmware and nvram path. The path may be from adapter info or module parameter + * The path from adapter info is used for initialization only (as it won't change). + * + * The firmware_path/nvram_path module parameter may be changed by the system at run + * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private + * command may change dhdinfo->fw_path. As such we need to clear the path info in + * module parameter after it is copied. We won't update the path until the module parameter + * is changed again (first character is not '\0') + */ + + /* set default firmware and nvram path for built-in type driver */ + if (!dhd_download_fw_on_driverload) { +#ifdef CONFIG_BCMDHD_FW_PATH + fw = CONFIG_BCMDHD_FW_PATH; +#endif /* CONFIG_BCMDHD_FW_PATH */ +#ifdef CONFIG_BCMDHD_NVRAM_PATH + nv = CONFIG_BCMDHD_NVRAM_PATH; +#endif /* CONFIG_BCMDHD_NVRAM_PATH */ + } + + /* check if we need to initialize the path */ + if (dhdinfo->fw_path[0] == '\0') { + if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0') + fw = adapter->fw_path; + + } + if (dhdinfo->nv_path[0] == '\0') { + if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0') + nv = adapter->nv_path; + } + + /* Use module parameter if it is valid, EVEN IF the path has not been initialized + * + * TODO: need a solution for multi-chip, can't use the same firmware for all chips + */ + if (firmware_path[0] != '\0') + fw = firmware_path; + if (nvram_path[0] != '\0') + nv = nvram_path; + + if (fw && fw[0] != '\0') { + fw_len = strlen(fw); + if (fw_len >= sizeof(dhdinfo->fw_path)) { + DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n")); + return FALSE; + } + strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path)); + if (dhdinfo->fw_path[fw_len-1] == '\n') + dhdinfo->fw_path[fw_len-1] = '\0'; + } + if (nv && nv[0] != '\0') { + nv_len = strlen(nv); + if (nv_len >= sizeof(dhdinfo->nv_path)) { + DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n")); + return FALSE; + } + strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path)); + if (dhdinfo->nv_path[nv_len-1] == '\n') + dhdinfo->nv_path[nv_len-1] = '\0'; + } + + /* clear the path in module parameter */ + if (dhd_download_fw_on_driverload) { + firmware_path[0] = '\0'; + nvram_path[0] = '\0'; + } +#ifndef BCMEMBEDIMAGE + /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */ + if (dhdinfo->fw_path[0] == '\0') { + DHD_ERROR(("firmware path not found\n")); + return FALSE; + } + if (dhdinfo->nv_path[0] == '\0') { + DHD_ERROR(("nvram path not found\n")); + return FALSE; + } +#endif /* BCMEMBEDIMAGE */ + + return TRUE; +} + +#ifdef CUSTOMER_HW4_DEBUG +bool dhd_validate_chipid(dhd_pub_t *dhdp) +{ + uint chipid = dhd_bus_chip_id(dhdp); + uint config_chipid; + +#ifdef BCM4359_CHIP + config_chipid = BCM4359_CHIP_ID; +#elif defined(BCM4358_CHIP) + config_chipid = BCM4358_CHIP_ID; +#elif defined(BCM4354_CHIP) + config_chipid = BCM4354_CHIP_ID; +#elif defined(BCM4356_CHIP) + config_chipid = BCM4356_CHIP_ID; +#elif defined(BCM4339_CHIP) + config_chipid = BCM4339_CHIP_ID; +#elif defined(BCM43349_CHIP) + config_chipid = BCM43349_CHIP_ID; +#elif defined(BCM4335_CHIP) + config_chipid = BCM4335_CHIP_ID; +#elif defined(BCM43241_CHIP) + config_chipid = BCM4324_CHIP_ID; +#elif defined(BCM4330_CHIP) + config_chipid = BCM4330_CHIP_ID; +#elif defined(BCM43430_CHIP) + config_chipid = BCM43430_CHIP_ID; +#elif defined(BCM4334W_CHIP) + config_chipid = BCM43342_CHIP_ID; +#elif defined(BCM43455_CHIP) + config_chipid = BCM4345_CHIP_ID; +#else + DHD_ERROR(("%s: Unknown chip id, if you use new chipset," + " please add CONFIG_BCMXXXX into the Kernel and" + " BCMXXXX_CHIP definition into the DHD driver\n", + __FUNCTION__)); + config_chipid = 0; + + return FALSE; +#endif /* BCM4354_CHIP */ + +#if defined(BCM4359_CHIP) + if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) { + return TRUE; + } +#endif /* BCM4359_CHIP */ + + return config_chipid == chipid; +} +#endif /* CUSTOMER_HW4_DEBUG */ + +int +dhd_bus_start(dhd_pub_t *dhdp) +{ + int ret = -1; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + unsigned long flags; + + ASSERT(dhd); + + DHD_TRACE(("Enter %s:\n", __FUNCTION__)); + + DHD_PERIM_LOCK(dhdp); + + /* try to download image and nvram to the dongle */ + if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) { + /* Indicate FW Download has not yet done */ + dhd->pub.is_fw_download_done = FALSE; + DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path)); + ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, + dhd->fw_path, dhd->nv_path); + if (ret < 0) { + DHD_ERROR(("%s: failed to download firmware %s\n", + __FUNCTION__, dhd->fw_path)); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + /* Indicate FW Download has succeeded */ + dhd->pub.is_fw_download_done = TRUE; + } + if (dhd->pub.busstate != DHD_BUS_LOAD) { + DHD_PERIM_UNLOCK(dhdp); + return -ENETDOWN; + } + + dhd_os_sdlock(dhdp); + + /* Start the watchdog timer */ + dhd->pub.tickcnt = 0; + dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); + DHD_ENABLE_RUNTIME_PM(&dhd->pub); + + /* Bring up the bus */ + if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { + + DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); + dhd_os_sdunlock(dhdp); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } +#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE) +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhd_os_sdunlock(dhdp); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + /* Host registration for OOB interrupt */ + if (dhd_bus_oob_intr_register(dhdp)) { + /* deactivate timer and wait for the handler to finish */ +#if !defined(BCMPCIE_OOB_HOST_WAKE) + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + + dhd_os_sdunlock(dhdp); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); + return -ENODEV; + } + +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhd_os_sdlock(dhdp); + dhd_bus_oob_intr_set(dhdp, TRUE); +#else + /* Enable oob at firmware */ + dhd_enable_oob_intr(dhd->pub.bus, TRUE); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#endif +#ifdef PCIE_FULL_DONGLE + { + /* max_h2d_rings includes H2D common rings */ + uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus); + + DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__, + max_h2d_rings)); + if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) { + dhd_os_sdunlock(dhdp); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + } +#endif /* PCIE_FULL_DONGLE */ + + /* Do protocol initialization necessary for IOCTL/IOVAR */ +#ifdef PCIE_FULL_DONGLE + dhd_os_sdunlock(dhdp); +#endif /* PCIE_FULL_DONGLE */ + ret = dhd_prot_init(&dhd->pub); + if (unlikely(ret) != BCME_OK) { + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return ret; + } +#ifdef PCIE_FULL_DONGLE + dhd_os_sdlock(dhdp); +#endif /* PCIE_FULL_DONGLE */ + + /* If bus is not ready, can't come up */ + if (dhd->pub.busstate != DHD_BUS_DATA) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + dhd_os_sdunlock(dhdp); + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return -ENODEV; + } + + dhd_os_sdunlock(dhdp); + + /* Bus is ready, query any dongle information */ + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__)); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->pend_ipaddr) { +#ifdef AOE_IP_ALIAS_SUPPORT + aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0); +#endif /* AOE_IP_ALIAS_SUPPORT */ + dhd->pend_ipaddr = 0; + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + DHD_PERIM_UNLOCK(dhdp); + return 0; +} +#ifdef WLTDLS +int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + uint32 tdls = tdls_on; + int ret = 0; + uint32 tdls_auto_op = 0; + uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING; + int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH; + int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW; + BCM_REFERENCE(mac); + if (!FW_SUPPORTED(dhd, tdls)) + return BCME_ERROR; + + if (dhd->tdls_enable == tdls_on) + goto auto_mode; + bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret)); + goto exit; + } + dhd->tdls_enable = tdls_on; +auto_mode: + + tdls_auto_op = auto_on; + bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret)); + goto exit; + } + + if (tdls_auto_op) { + bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time, + sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret)); + goto exit; + } + bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret)); + goto exit; + } + bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret)); + goto exit; + } + } + +exit: + return ret; +} +int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + if (dhd) + ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac); + else + ret = BCME_ERROR; + return ret; +} +int +dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = 0; + bool auto_on = false; + uint32 mode = wfd_mode; + +#ifdef ENABLE_TDLS_AUTO_MODE + if (wfd_mode) { + auto_on = false; + } else { + auto_on = true; + } +#else + auto_on = false; +#endif /* ENABLE_TDLS_AUTO_MODE */ + ret = _dhd_tdls_enable(dhd, false, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + + bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode), + iovbuf, sizeof(iovbuf)); + if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) && + (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret)); + return ret; + } + + ret = _dhd_tdls_enable(dhd, true, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + dhd->tdls_mode = mode; + return ret; +} +#ifdef PCIE_FULL_DONGLE +void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub; + tdls_peer_node_t *cur = dhdp->peer_tbl.node; + tdls_peer_node_t *new = NULL, *prev = NULL; + dhd_if_t *dhdif; + uint8 sa[ETHER_ADDR_LEN]; + int ifidx = dhd_net2idx(dhd, dev); + + if (ifidx == DHD_BAD_IF) + return; + + dhdif = dhd->iflist[ifidx]; + memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN); + + if (connect) { + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: TDLS Peer exist already %d\n", + __FUNCTION__, __LINE__)); + return; + } + cur = cur->next; + } + + new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t)); + if (new == NULL) { + DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__)); + return; + } + memcpy(new->addr, da, ETHER_ADDR_LEN); + new->next = dhdp->peer_tbl.node; + dhdp->peer_tbl.node = new; + dhdp->peer_tbl.tdls_peer_count++; + + } else { + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + dhd_flow_rings_delete_for_peer(dhdp, ifidx, da); + if (prev) + prev->next = cur->next; + else + dhdp->peer_tbl.node = cur->next; + MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t)); + dhdp->peer_tbl.tdls_peer_count--; + return; + } + prev = cur; + cur = cur->next; + } + DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__)); + } +} +#endif /* PCIE_FULL_DONGLE */ +#endif + +bool dhd_is_concurrent_mode(dhd_pub_t *dhd) +{ + if (!dhd) + return FALSE; + + if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE) + return TRUE; + else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) == + DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) + return TRUE; + else + return FALSE; +} +#if !defined(AP) && defined(WLP2P) +/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware + * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA + * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware + * would still be named as fw_bcmdhd_apsta. + */ +uint32 +dhd_get_concurrent_capabilites(dhd_pub_t *dhd) +{ + int32 ret = 0; + char buf[WLC_IOCTL_SMLEN]; + bool mchan_supported = FALSE; + /* if dhd->op_mode is already set for HOSTAP and Manufacturing + * test mode, that means we only will use the mode as it is + */ + if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)) + return 0; + if (FW_SUPPORTED(dhd, vsdb)) { + mchan_supported = TRUE; + } + if (!FW_SUPPORTED(dhd, p2p)) { + DHD_TRACE(("Chip does not support p2p\n")); + return 0; + } else { + /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret)); + return 0; + } else { + if (buf[0] == 1) { + /* By default, chip supports single chan concurrency, + * now lets check for mchan + */ + ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE; + if (mchan_supported) + ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE; + if (FW_SUPPORTED(dhd, rsdb)) { + ret |= DHD_FLAG_RSDB_MODE; + } + if (FW_SUPPORTED(dhd, mp2p)) { + ret |= DHD_FLAG_MP2P_MODE; + } +#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF) + return ret; +#else + return 0; +#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */ + } + } + } + return 0; +} +#endif + +#ifdef SUPPORT_AP_POWERSAVE +#define RXCHAIN_PWRSAVE_PPS 10 +#define RXCHAIN_PWRSAVE_QUIET_TIME 10 +#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0 +int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable) +{ + char iovbuf[128]; + int32 pps = RXCHAIN_PWRSAVE_PPS; + int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME; + int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK; + + if (enable) { + bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to enable AP power save")); + } + bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to set pps")); + } + bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time, + 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to set quiet time")); + } + bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check, + 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to set stas assoc check")); + } + } else { + bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to disable AP power save")); + } + } + + return 0; +} +#endif /* SUPPORT_AP_POWERSAVE */ + + +int +dhd_preinit_ioctls(dhd_pub_t *dhd) +{ + int ret = 0; + char eventmask[WL_EVENTING_MASK_LEN]; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + uint32 buf_key_b4_m4 = 1; + uint8 msglen; + eventmsgs_ext_t *eventmask_msg = NULL; + char* iov_buf = NULL; + int ret2 = 0; +#if defined(CUSTOM_AMPDU_BA_WSIZE) + uint32 ampdu_ba_wsize = 0; +#endif +#if defined(CUSTOM_AMPDU_MPDU) + int32 ampdu_mpdu = 0; +#endif +#if defined(CUSTOM_AMPDU_RELEASE) + int32 ampdu_release = 0; +#endif +#if defined(CUSTOM_AMSDU_AGGSF) + int32 amsdu_aggsf = 0; +#endif +#ifdef SUPPORT_SENSORHUB + int32 shub_enable = 0; +#endif /* SUPPORT_SENSORHUB */ +#if defined(BCMSDIO) +#ifdef PROP_TXSTATUS + int wlfc_enable = TRUE; +#ifndef DISABLE_11N + uint32 hostreorder = 1; +#endif /* DISABLE_11N */ +#endif /* PROP_TXSTATUS */ +#endif +#ifdef PCIE_FULL_DONGLE + uint32 wl_ap_isolate; +#endif /* PCIE_FULL_DONGLE */ + +#if defined(BCMSDIO) + /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */ + uint32 frameburst = 0; +#else + uint32 frameburst = 1; +#endif /* BCMSDIO */ + +#ifdef DHD_ENABLE_LPC + uint32 lpc = 1; +#endif /* DHD_ENABLE_LPC */ + uint power_mode = PM_FAST; +#if defined(BCMSDIO) + uint32 dongle_align = DHD_SDALIGN; + uint32 glom = CUSTOM_GLOM_SETTING; +#endif /* defined(BCMSDIO) */ +#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL) + uint32 credall = 1; +#endif +#if defined(VSDB) || defined(ROAM_ENABLE) + uint bcn_timeout = CUSTOM_BCN_TIMEOUT; +#else + uint bcn_timeout = 4; +#endif /* VSDB || ROAM_ENABLE */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + uint32 bcn_li_bcn = 1; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint retry_max = CUSTOM_ASSOC_RETRY_MAX; +#if defined(ARP_OFFLOAD_SUPPORT) + int arpoe = 1; +#endif + int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME; + int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME; + int scan_passive_time = DHD_SCAN_PASSIVE_TIME; + char buf[WLC_IOCTL_SMLEN]; + char *ptr; + uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ +#ifdef ROAM_ENABLE + uint roamvar = 0; + int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL}; + int roam_scan_period[2] = {10, WLC_BAND_ALL}; + int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL}; +#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC + int roam_fullscan_period = 60; +#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ + int roam_fullscan_period = 120; +#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ +#else +#ifdef DISABLE_BUILTIN_ROAM + uint roamvar = 1; +#endif /* DISABLE_BUILTIN_ROAM */ +#endif /* ROAM_ENABLE */ + +#if defined(SOFTAP) + uint dtim = 1; +#endif +#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211)) + uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */ + struct ether_addr p2p_ea; +#endif +#ifdef SOFTAP_UAPSD_OFF + uint32 wme_apsd = 0; +#endif /* SOFTAP_UAPSD_OFF */ +#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) + uint32 apsta = 1; /* Enable APSTA mode */ +#elif defined(SOFTAP_AND_GC) + uint32 apsta = 0; + int ap_mode = 1; +#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */ +#ifdef GET_CUSTOM_MAC_ENABLE + struct ether_addr ea_addr; +#endif /* GET_CUSTOM_MAC_ENABLE */ + +#ifdef DISABLE_11N + uint32 nmode = 0; +#endif /* DISABLE_11N */ + +#ifdef USE_WL_TXBF + uint32 txbf = 1; +#endif /* USE_WL_TXBF */ +#if defined(PROP_TXSTATUS) +#ifdef USE_WFA_CERT_CONF + uint32 proptx = 0; +#endif /* USE_WFA_CERT_CONF */ +#endif /* PROP_TXSTATUS */ +#ifdef CUSTOM_PSPRETEND_THR + uint32 pspretend_thr = CUSTOM_PSPRETEND_THR; +#endif + uint32 rsdb_mode = 0; +#ifdef ENABLE_TEMP_THROTTLING + wl_temp_control_t temp_control; +#endif /* ENABLE_TEMP_THROTTLING */ +#ifdef DISABLE_PRUNED_SCAN + uint32 scan_features = 0; +#endif /* DISABLE_PRUNED_SCAN */ +#ifdef CUSTOM_EVENT_PM_WAKE + uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = TRUE; +#endif /* PKT_FILTER_SUPPORT */ +#ifdef WLTDLS + dhd->tdls_enable = FALSE; + dhd_tdls_set_mode(dhd, false); +#endif /* WLTDLS */ + dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM; + DHD_TRACE(("Enter %s\n", __FUNCTION__)); + dhd->op_mode = 0; +#ifdef CUSTOMER_HW4_DEBUG + if (!dhd_validate_chipid(dhd)) { + DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n", + __FUNCTION__, dhd_bus_chip_id(dhd))); +#ifndef SUPPORT_MULTIPLE_CHIPS + ret = BCME_BADARG; + goto done; +#endif /* !SUPPORT_MULTIPLE_CHIPS */ + } +#endif /* CUSTOMER_HW4_DEBUG */ + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { +#ifdef DHD_PCIE_RUNTIMEPM + /* Disable RuntimePM in mfg mode */ + DHD_DISABLE_RUNTIME_PM(dhd); + DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__)); +#endif /* DHD_PCIE_RUNTIME_PM */ + /* Check and adjust IOCTL response timeout for Manufactring firmware */ + dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT); + DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n", + __FUNCTION__)); + } else { + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__)); + } +#ifdef GET_CUSTOM_MAC_ENABLE + ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet); + if (!ret) { + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } + memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN); + } else { +#endif /* GET_CUSTOM_MAC_ENABLE */ + /* Get the default device MAC address directly from firmware */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } + /* Update public MAC address after reading from Firmware */ + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); + +#ifdef GET_CUSTOM_MAC_ENABLE + } +#endif /* GET_CUSTOM_MAC_ENABLE */ + + /* get a capabilities from firmware */ + { + uint32 cap_buf_size = sizeof(dhd->fw_capabilities); + memset(dhd->fw_capabilities, 0, cap_buf_size); + bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities, + (cap_buf_size - 1), FALSE, 0)) < 0) + { + DHD_ERROR(("%s: Get Capability failed (error=%d)\n", + __FUNCTION__, ret)); + return 0; + } + + memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1)); + dhd->fw_capabilities[0] = ' '; + dhd->fw_capabilities[cap_buf_size - 2] = ' '; + dhd->fw_capabilities[cap_buf_size - 1] = '\0'; + } + + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) || + (op_mode == DHD_FLAG_HOSTAP_MODE)) { +#ifdef SET_RANDOM_MAC_SOFTAP + uint rand_mac; +#endif /* SET_RANDOM_MAC_SOFTAP */ + dhd->op_mode = DHD_FLAG_HOSTAP_MODE; +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif +#ifdef SET_RANDOM_MAC_SOFTAP + SRANDOM32((uint)jiffies); + rand_mac = RANDOM32(); + iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */ + iovbuf[1] = (unsigned char)(vendor_oui >> 8); + iovbuf[2] = (unsigned char)vendor_oui; + iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0; + iovbuf[4] = (unsigned char)(rand_mac >> 8); + iovbuf[5] = (unsigned char)(rand_mac >> 16); + + bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + } else + memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN); +#endif /* SET_RANDOM_MAC_SOFTAP */ +#if !defined(AP) && defined(WL_CFG80211) + /* Turn off MPC in AP mode */ + bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret)); + } +#endif +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ +#ifdef SUPPORT_AP_POWERSAVE + dhd_set_ap_powersave(dhd, 0, TRUE); +#endif /* SUPPORT_AP_POWERSAVE */ +#ifdef SOFTAP_UAPSD_OFF + bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", + __FUNCTION__, ret)); + } +#endif /* SOFTAP_UAPSD_OFF */ + } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif /* PKT_FILTER_SUPPORT */ + dhd->op_mode = DHD_FLAG_MFG_MODE; +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + if (FW_SUPPORTED(dhd, rsdb)) { + rsdb_mode = 0; + bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n", + __FUNCTION__, ret)); + } + } + } else { + uint32 concurrent_mode = 0; + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) || + (op_mode == DHD_FLAG_P2P_MODE)) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif + dhd->op_mode = DHD_FLAG_P2P_MODE; + } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) || + (op_mode == DHD_FLAG_IBSS_MODE)) { + dhd->op_mode = DHD_FLAG_IBSS_MODE; + } else + dhd->op_mode = DHD_FLAG_STA_MODE; +#if !defined(AP) && defined(WLP2P) + if (dhd->op_mode != DHD_FLAG_IBSS_MODE && + (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 1; +#endif + dhd->op_mode |= concurrent_mode; + } + + /* Check if we are enabling p2p */ + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret)); + } + +#if defined(SOFTAP_AND_GC) + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, + (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) { + DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret)); + } +#endif + memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN); + ETHER_SET_LOCALADDR(&p2p_ea); + bcm_mkiovar("p2p_da_override", (char *)&p2p_ea, + ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret)); + } else { + DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n")); + } + } +#else + (void)concurrent_mode; +#endif + } + +#ifdef RSDB_MODE_FROM_FILE + (void)dhd_rsdb_mode_from_file(dhd); +#endif /* RSDB_MODE_FROM_FILE */ + +#ifdef DISABLE_PRUNED_SCAN + if (FW_SUPPORTED(dhd, rsdb)) { + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("scan_features", (char *)&scan_features, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, + iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + DHD_ERROR(("%s get scan_features is failed ret=%d\n", + __FUNCTION__, ret)); + } else { + memcpy(&scan_features, iovbuf, 4); + scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM; + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("scan_features", (char *)&scan_features, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s set scan_features is failed ret=%d\n", + __FUNCTION__, ret)); + } + } + } +#endif /* DISABLE_PRUNED_SCAN */ + + DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n", + dhd->op_mode, MAC2STRDBG(dhd->mac.octet))); + #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA) + if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) + dhd->info->rxthread_enabled = FALSE; + else + dhd->info->rxthread_enabled = TRUE; + #endif + /* Set Country code */ + if (dhd->dhd_cspec.ccode[0] != 0) { + bcm_mkiovar("country", (char *)&dhd->dhd_cspec, + sizeof(wl_country_t), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__)); + } + + + /* Set Listen Interval */ + bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); + +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) { + DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar)); + } +#endif /* USE_WFA_CERT_CONF */ + /* Disable built-in roaming to allowed ext supplicant to take care of roaming */ + bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ +#if defined(ROAM_ENABLE) + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger, + sizeof(roam_trigger), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period, + sizeof(roam_scan_period), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret)); + if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta, + sizeof(roam_delta), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret)); + bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret)); +#endif /* ROAM_ENABLE */ + +#ifdef CUSTOM_EVENT_PM_WAKE + bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef WLTDLS +#ifdef ENABLE_TDLS_AUTO_MODE + /* by default TDLS on and auto mode on */ + _dhd_tdls_enable(dhd, true, true, NULL); +#else + /* by default TDLS on and auto mode off */ + _dhd_tdls_enable(dhd, true, false, NULL); +#endif /* ENABLE_TDLS_AUTO_MODE */ +#endif /* WLTDLS */ + +#ifdef DHD_ENABLE_LPC + /* Set lpc 1 */ + bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, + (char *)&wl_down, sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc)); + + bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret)); + } + } +#endif /* DHD_ENABLE_LPC */ + + /* Set PowerSave mode */ + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); + +#if defined(BCMSDIO) + /* Match Host and Dongle rx alignment */ + bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + +#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL) + /* enable credall to reduce the chance of no bus credit happened. */ + bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif + +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) { + DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom)); + } +#endif /* USE_WFA_CERT_CONF */ + if (glom != DEFAULT_GLOM_VALUE) { + DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom)); + bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + } +#endif /* defined(BCMSDIO) */ + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + /* Setup assoc_retry_max count to reconnect target AP in dongle */ + bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#if defined(AP) && !defined(WLP2P) + /* Turn off MPC in AP mode */ + bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* defined(AP) && !defined(WLP2P) */ + +#ifdef MIMO_ANT_SETTING + dhd_sel_ant_from_file(dhd); +#endif /* MIMO_ANT_SETTING */ + +#if defined(SOFTAP) + if (ap_fw_loaded == TRUE) { + dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0); + } +#endif + +#if defined(KEEP_ALIVE) + { + /* Set Keep Alive : be sure to use FW with -keepalive */ + int res; + +#if defined(SOFTAP) + if (ap_fw_loaded == FALSE) +#endif + if (!(dhd->op_mode & + (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { + if ((res = dhd_keep_alive_onoff(dhd)) < 0) + DHD_ERROR(("%s set keeplive failed %d\n", + __FUNCTION__, res)); + } + } +#endif /* defined(KEEP_ALIVE) */ + +#ifdef USE_WL_TXBF + bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret)); + } +#endif /* USE_WL_TXBF */ + +#ifdef USE_WFA_CERT_CONF +#ifdef USE_WL_FRAMEBURST + if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) { + DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst)); + } +#endif /* USE_WL_FRAMEBURST */ +#ifdef DISABLE_FRAMEBURST_VSDB + g_frameburst = frameburst; +#endif /* DISABLE_FRAMEBURST_VSDB */ +#endif /* USE_WFA_CERT_CONF */ +#ifdef DISABLE_WL_FRAMEBURST_SOFTAP + /* Disable Framebursting for SofAP */ + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + frameburst = 0; + } +#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */ + /* Set frameburst to value */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst, + sizeof(frameburst), TRUE, 0)) < 0) { + DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret)); + } +#if defined(CUSTOM_AMPDU_BA_WSIZE) + /* Set ampdu ba wsize to 64 or 16 */ +#ifdef CUSTOM_AMPDU_BA_WSIZE + ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE; +#endif + if (ampdu_ba_wsize != 0) { + bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n", + __FUNCTION__, ampdu_ba_wsize, ret)); + } + } +#endif + + iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); + if (iov_buf == NULL) { + DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN)); + ret = BCME_NOMEM; + goto done; + } +#ifdef ENABLE_TEMP_THROTTLING + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + memset(&temp_control, 0, sizeof(temp_control)); + temp_control.enable = 1; + temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT; + bcm_mkiovar("temp_throttle_control", (char *)&temp_control, + sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s Set temp_throttle_control to %d failed \n", + __FUNCTION__, ret)); + } + } +#endif /* ENABLE_TEMP_THROTTLING */ +#if defined(CUSTOM_AMPDU_MPDU) + ampdu_mpdu = CUSTOM_AMPDU_MPDU; + if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) { + bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n", + __FUNCTION__, CUSTOM_AMPDU_MPDU, ret)); + } + } +#endif /* CUSTOM_AMPDU_MPDU */ + +#if defined(CUSTOM_AMPDU_RELEASE) + ampdu_release = CUSTOM_AMPDU_RELEASE; + if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) { + bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set ampdu_release to %d failed %d\n", + __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret)); + } + } +#endif /* CUSTOM_AMPDU_RELEASE */ + +#if defined(CUSTOM_AMSDU_AGGSF) + amsdu_aggsf = CUSTOM_AMSDU_AGGSF; + if (amsdu_aggsf != 0) { + bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n", + __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret)); + } + } +#endif /* CUSTOM_AMSDU_AGGSF */ + +#ifdef CUSTOM_PSPRETEND_THR + /* Turn off MPC in AP mode */ + bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n", + __FUNCTION__, ret)); + } +#endif + + bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret)); + } + + /* Read event_msgs mask */ + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret)); + goto done; + } + bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); + + /* Setup event_msgs */ + setbit(eventmask, WLC_E_SET_SSID); + setbit(eventmask, WLC_E_PRUNE); + setbit(eventmask, WLC_E_AUTH); + setbit(eventmask, WLC_E_AUTH_IND); + setbit(eventmask, WLC_E_ASSOC); + setbit(eventmask, WLC_E_REASSOC); + setbit(eventmask, WLC_E_REASSOC_IND); + if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE)) + setbit(eventmask, WLC_E_DEAUTH); + setbit(eventmask, WLC_E_DEAUTH_IND); + setbit(eventmask, WLC_E_DISASSOC_IND); + setbit(eventmask, WLC_E_DISASSOC); + setbit(eventmask, WLC_E_JOIN); + setbit(eventmask, WLC_E_START); + setbit(eventmask, WLC_E_ASSOC_IND); + setbit(eventmask, WLC_E_PSK_SUP); + setbit(eventmask, WLC_E_LINK); + setbit(eventmask, WLC_E_MIC_ERROR); + setbit(eventmask, WLC_E_ASSOC_REQ_IE); + setbit(eventmask, WLC_E_ASSOC_RESP_IE); +#ifndef WL_CFG80211 + setbit(eventmask, WLC_E_PMKID_CACHE); + setbit(eventmask, WLC_E_TXFAIL); +#endif + setbit(eventmask, WLC_E_JOIN_START); + setbit(eventmask, WLC_E_SCAN_COMPLETE); +#ifdef DHD_DEBUG + setbit(eventmask, WLC_E_SCAN_CONFIRM_IND); +#endif +#ifdef WLMEDIA_HTSF + setbit(eventmask, WLC_E_HTSFSYNC); +#endif /* WLMEDIA_HTSF */ +#ifdef PNO_SUPPORT + setbit(eventmask, WLC_E_PFN_NET_FOUND); + setbit(eventmask, WLC_E_PFN_BEST_BATCHING); + setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND); + setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST); +#endif /* PNO_SUPPORT */ + /* enable dongle roaming event */ + setbit(eventmask, WLC_E_ROAM); + setbit(eventmask, WLC_E_BSSID); +#ifdef WLTDLS + setbit(eventmask, WLC_E_TDLS_PEER_EVENT); +#endif /* WLTDLS */ +#ifdef WL_CFG80211 + setbit(eventmask, WLC_E_ESCAN_RESULT); + setbit(eventmask, WLC_E_AP_STARTED); + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + setbit(eventmask, WLC_E_ACTION_FRAME_RX); + setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE); + } +#endif /* WL_CFG80211 */ + +#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) + if (dhd_logtrace_from_file(dhd)) { + setbit(eventmask, WLC_E_TRACE); + } else { + clrbit(eventmask, WLC_E_TRACE); + } +#elif defined(SHOW_LOGTRACE) + setbit(eventmask, WLC_E_TRACE); +#else + clrbit(eventmask, WLC_E_TRACE); +#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */ + + setbit(eventmask, WLC_E_CSA_COMPLETE_IND); +#ifdef DHD_LOSSLESS_ROAMING + setbit(eventmask, WLC_E_ROAM_PREP); +#endif +#ifdef CUSTOM_EVENT_PM_WAKE + setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT); +#endif /* CUSTOM_EVENT_PM_WAKE */ +#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */ + + /* Write updated Event mask */ + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret)); + goto done; + } + + /* make up event mask ext message iovar for event larger than 128 */ + msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE; + eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL); + if (eventmask_msg == NULL) { + DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen)); + ret = BCME_NOMEM; + goto done; + } + bzero(eventmask_msg, msglen); + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; + + /* Read event_msgs_ext mask */ + bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN); + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0); + if (ret2 == 0) { /* event_msgs_ext must be supported */ + bcopy(iov_buf, eventmask_msg, msglen); +#ifdef GSCAN_SUPPORT + setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT); + setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE); + setbit(eventmask_msg->mask, WLC_E_PFN_SWC); +#endif /* GSCAN_SUPPORT */ +#ifdef BT_WIFI_HANDOVER + setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ); +#endif /* BT_WIFI_HANDOVER */ + + /* Write updated Event mask */ + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->command = EVENTMSGS_SET_MASK; + eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; + bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, + msglen, iov_buf, WLC_IOCTL_SMLEN); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) { + DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret)); + goto done; + } + } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) { + /* Skip for BCME_UNSUPPORTED or BCME_VERSION */ + DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n", + __FUNCTION__, ret2)); + } else { + DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2)); + ret = ret2; + goto done; + } + + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, + sizeof(scan_assoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, + sizeof(scan_unassoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time, + sizeof(scan_passive_time), TRUE, 0); + +#ifdef ARP_OFFLOAD_SUPPORT + /* Set and enable ARP offload feature for STA only */ +#if defined(SOFTAP) + if (arpoe && !ap_fw_loaded) { +#else + if (arpoe) { +#endif + dhd_arp_offload_enable(dhd, TRUE); + dhd_arp_offload_set(dhd, dhd_arp_mode); + } else { + dhd_arp_offload_enable(dhd, FALSE); + dhd_arp_offload_set(dhd, 0); + } + dhd_arp_enable = arpoe; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#ifdef PKT_FILTER_SUPPORT + /* Setup default defintions for pktfilter , enable in suspend */ + dhd->pktfilter_count = 6; + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; + /* apply APP pktfilter */ + dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806"; + + /* Setup filter to allow only unicast */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00"; + + /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; + +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd->pktfilter_count = 4; + /* Setup filter to block broadcast and NAT Keepalive packets */ + /* discard all broadcast packets */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ + +#if defined(SOFTAP) + if (ap_fw_loaded) { + dhd_enable_packet_filter(0, dhd); + } +#endif /* defined(SOFTAP) */ + dhd_set_packet_filter(dhd); +#endif /* PKT_FILTER_SUPPORT */ +#ifdef DISABLE_11N + bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret)); +#endif /* DISABLE_11N */ + +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + bcmstrtok(&ptr, "\n", 0); + /* Print fw version info */ + DHD_ERROR(("Firmware version = %s\n", buf)); + strncpy(fw_version, buf, FW_VER_STR_LEN); +#if defined(BCMSDIO) + dhd_set_version_info(dhd, buf); +#endif /* defined(BCMSDIO) */ +#ifdef WRITE_WLANINFO + sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path); +#endif /* WRITE_WLANINFO */ + } + +#if defined(BCMSDIO) + dhd_txglom_enable(dhd, TRUE); +#endif /* defined(BCMSDIO) */ + +#if defined(BCMSDIO) +#ifdef PROP_TXSTATUS + if (disable_proptx || +#ifdef PROP_TXSTATUS_VSDB + /* enable WLFC only if the firmware is VSDB when it is in STA mode */ + (dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) || +#endif /* PROP_TXSTATUS_VSDB */ + FALSE) { + wlfc_enable = FALSE; + } + +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) { + DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx)); + wlfc_enable = proptx; + } +#endif /* USE_WFA_CERT_CONF */ + +#ifndef DISABLE_11N + bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf)); + if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, + sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n", + __FUNCTION__, ret2, hostreorder)); + + bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, + iovbuf, sizeof(iovbuf)); + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + } + if (ret2 != BCME_OK) + hostreorder = 0; + } +#endif /* DISABLE_11N */ + + + if (wlfc_enable) + dhd_wlfc_init(dhd); +#ifndef DISABLE_11N + else if (hostreorder) + dhd_wlfc_hostreorder_init(dhd); +#endif /* DISABLE_11N */ + +#endif /* PROP_TXSTATUS */ +#endif /* BCMSDIO || BCMBUS */ +#ifdef PCIE_FULL_DONGLE + /* For FD we need all the packets at DHD to handle intra-BSS forwarding */ + if (FW_SUPPORTED(dhd, ap)) { + wl_ap_isolate = AP_ISOLATE_SENDUP_ALL; + bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + } +#endif /* PCIE_FULL_DONGLE */ +#ifdef PNO_SUPPORT + if (!dhd->pno_state) { + dhd_pno_init(dhd); + } +#endif +#ifdef WL11U + dhd_interworking_enable(dhd); +#endif /* WL11U */ + +#ifdef SUPPORT_SENSORHUB + bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf)); + if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s failed to get shub hub enable information %d\n", + __FUNCTION__, ret)); + dhd->info->shub_enable = 0; + } else { + memcpy(&shub_enable, iovbuf, sizeof(uint32)); + dhd->info->shub_enable = shub_enable; + DHD_ERROR(("%s: checking sensorhub enable %d\n", + __FUNCTION__, dhd->info->shub_enable)); + } +#endif /* SUPPORT_SENSORHUB */ +done: + + if (eventmask_msg) + kfree(eventmask_msg); + if (iov_buf) + kfree(iov_buf); + + return ret; +} + + +int +dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set) +{ + char buf[strlen(name) + 1 + cmd_len]; + int len = sizeof(buf); + wl_ioctl_t ioc; + int ret; + + len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + if (!set && ret >= 0) + memcpy(cmd_buf, buf, cmd_len); + + return ret; +} + +int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx) +{ + struct dhd_info *dhd = dhdp->info; + struct net_device *dev = NULL; + + ASSERT(dhd && dhd->iflist[ifidx]); + dev = dhd->iflist[ifidx]->net; + ASSERT(dev); + + if (netif_running(dev)) { + DHD_ERROR(("%s: Must be down to change its MTU", dev->name)); + return BCME_NOTDOWN; + } + +#define DHD_MIN_MTU 1500 +#define DHD_MAX_MTU 1752 + + if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) { + DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu)); + return BCME_BADARG; + } + + dev->mtu = new_mtu; + return 0; +} + +#ifdef ARP_OFFLOAD_SUPPORT +/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */ +void +aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx) +{ + u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */ + int i; + int ret; + + bzero(ipv4_buf, sizeof(ipv4_buf)); + + /* display what we've got */ + ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx); + DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__)); +#ifdef AOE_DBG + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif + /* now we saved hoste_ip table, clr it in the dongle AOE */ + dhd_aoe_hostip_clr(dhd_pub, idx); + + if (ret) { + DHD_ERROR(("%s failed\n", __FUNCTION__)); + return; + } + + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (add && (ipv4_buf[i] == 0)) { + ipv4_buf[i] = ipa; + add = FALSE; /* added ipa to local table */ + DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n", + __FUNCTION__, i)); + } else if (ipv4_buf[i] == ipa) { + ipv4_buf[i] = 0; + DHD_ARPOE(("%s: removed IP:%x from temp table %d\n", + __FUNCTION__, ipa, i)); + } + + if (ipv4_buf[i] != 0) { + /* add back host_ip entries from our local cache */ + dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx); + DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n", + __FUNCTION__, ipv4_buf[i], i)); + } + } +#ifdef AOE_DBG + /* see the resulting hostip table */ + dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx); + DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__)); + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif +} + +/* + * Notification mechanism from kernel to our driver. This function is called by the Linux kernel + * whenever there is an event related to an IP address. + * ptr : kernel provided pointer to IP address that has changed + */ +static int dhd_inetaddr_notifier_call(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dhd_info_t *dhd; + dhd_pub_t *dhd_pub; + int idx; + + if (!dhd_arp_enable) + return NOTIFY_DONE; + if (!ifa || !(ifa->ifa_dev->dev)) + return NOTIFY_DONE; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + /* Filter notifications meant for non Broadcom devices */ + if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) && + (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) { +#if defined(WL_ENABLE_P2P_IF) + if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops)) +#endif /* WL_ENABLE_P2P_IF */ + return NOTIFY_DONE; + } +#endif /* LINUX_VERSION_CODE */ + + dhd = DHD_DEV_INFO(ifa->ifa_dev->dev); + if (!dhd) + return NOTIFY_DONE; + + dhd_pub = &dhd->pub; + + if (dhd_pub->arp_version == 1) { + idx = 0; + } else { + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev) + break; + } + if (idx < DHD_MAX_IFS) { + DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net, + dhd->iflist[idx]->name, dhd->iflist[idx]->idx)); + } else { + DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label)); + idx = 0; + } + } + + switch (event) { + case NETDEV_UP: + DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + + if (dhd->pub.busstate != DHD_BUS_DATA) { + DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__)); + if (dhd->pend_ipaddr) { + DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n", + __FUNCTION__, dhd->pend_ipaddr)); + } + dhd->pend_ipaddr = ifa->ifa_address; + break; + } + +#ifdef AOE_IP_ALIAS_SUPPORT + DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n", + __FUNCTION__)); + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx); +#endif /* AOE_IP_ALIAS_SUPPORT */ + break; + + case NETDEV_DOWN: + DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + dhd->pend_ipaddr = 0; +#ifdef AOE_IP_ALIAS_SUPPORT + DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n", + __FUNCTION__)); + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx); +#else + dhd_aoe_hostip_clr(&dhd->pub, idx); + dhd_aoe_arp_clr(&dhd->pub, idx); +#endif /* AOE_IP_ALIAS_SUPPORT */ + break; + + default: + DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n", + __func__, ifa->ifa_label, event)); + break; + } + return NOTIFY_DONE; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +/* Neighbor Discovery Offload: defered handler */ +static void +dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event) +{ + struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data; + dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub; + int ret; + + if (event != DHD_WQ_WORK_IPV6_NDO) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!ndo_work) { + DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__)); + return; + } + + if (!pub) { + DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__)); + return; + } + + if (ndo_work->if_idx) { + DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx)); + return; + } + + switch (ndo_work->event) { + case NETDEV_UP: + DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__)); + ret = dhd_ndo_enable(pub, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret)); + } + + ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx); + if (ret < 0) { + DHD_ERROR(("%s: Adding host ip for NDO failed %d\n", + __FUNCTION__, ret)); + } + break; + case NETDEV_DOWN: + DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__)); + ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx); + if (ret < 0) { + DHD_ERROR(("%s: Removing host ip for NDO failed %d\n", + __FUNCTION__, ret)); + goto done; + } + + ret = dhd_ndo_enable(pub, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret)); + goto done; + } + break; + default: + DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__)); + break; + } +done: + /* free ndo_work. alloced while scheduling the work */ + kfree(ndo_work); + + return; +} + +/* + * Neighbor Discovery Offload: Called when an interface + * is assigned with ipv6 address. + * Handles only primary interface + */ +static int dhd_inet6addr_notifier_call(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + dhd_info_t *dhd; + dhd_pub_t *dhd_pub; + struct inet6_ifaddr *inet6_ifa = ptr; + struct in6_addr *ipv6_addr = &inet6_ifa->addr; + struct ipv6_work_info_t *ndo_info; + int idx = 0; /* REVISIT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + /* Filter notifications meant for non Broadcom devices */ + if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) { + return NOTIFY_DONE; + } +#endif /* LINUX_VERSION_CODE */ + + dhd = DHD_DEV_INFO(inet6_ifa->idev->dev); + if (!dhd) + return NOTIFY_DONE; + + if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev) + return NOTIFY_DONE; + dhd_pub = &dhd->pub; + + if (!FW_SUPPORTED(dhd_pub, ndoe)) + return NOTIFY_DONE; + + ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC); + if (!ndo_info) { + DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__)); + return NOTIFY_DONE; + } + + ndo_info->event = event; + ndo_info->if_idx = idx; + memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN); + + /* defer the work to thread as it may block kernel */ + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO, + dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW); + return NOTIFY_DONE; +} +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + +int +dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + dhd_if_t *ifp; + struct net_device *net = NULL; + int err = 0; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 }; + + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + ASSERT(dhd && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; + net = ifp->net; + ASSERT(net && (ifp->idx == ifidx)); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + ASSERT(!net->open); + net->get_stats = dhd_get_stats; + net->do_ioctl = dhd_ioctl_entry; + net->hard_start_xmit = dhd_start_xmit; + net->set_mac_address = dhd_set_mac_address; + net->set_multicast_list = dhd_set_multicast_list; + net->open = net->stop = NULL; +#else + ASSERT(!net->netdev_ops); + net->netdev_ops = &dhd_ops_virt; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + + /* Ok, link into the network layer... */ + if (ifidx == 0) { + /* + * device functions for the primary interface only + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + net->open = dhd_open; + net->stop = dhd_stop; +#else + net->netdev_ops = &dhd_ops_pri; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + if (!ETHER_ISNULLADDR(dhd->pub.mac.octet)) + memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + } else { + /* + * We have to use the primary MAC for virtual interfaces + */ + memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN); + /* + * Android sets the locally administered bit to indicate that this is a + * portable hotspot. This will not work in simultaneous AP/STA mode, + * nor with P2P. Need to set the Donlge's MAC address, and then use that. + */ + if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr, + ETHER_ADDR_LEN)) { + DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n", + __func__, net->name)); + temp_addr[0] |= 0x02; + } + } + + net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + net->ethtool_ops = &dhd_ethtool_ops; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + +#if defined(WL_WIRELESS_EXT) +#if WIRELESS_EXT < 19 + net->get_wireless_stats = dhd_get_wireless_stats; +#endif /* WIRELESS_EXT < 19 */ +#if WIRELESS_EXT > 12 + net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def; +#endif /* WIRELESS_EXT > 12 */ +#endif /* defined(WL_WIRELESS_EXT) */ + + dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net); + + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + if (ifidx == 0) + printf("%s\n", dhd_version); + + if (need_rtnl_lock) + err = register_netdev(net); + else + err = register_netdevice(net); + + if (err != 0) { + DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err)); + goto fail; + } + + + + printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name, +#if defined(CUSTOMER_HW4_DEBUG) + MAC2STRDBG(dhd->pub.mac.octet)); +#else + MAC2STRDBG(net->dev_addr)); +#endif /* CUSTOMER_HW4_DEBUG */ + +#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211) + wl_iw_iscan_set_scan_broadcast_prep(net, 1); +#endif + +#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(2, 6, 27)))) + if (ifidx == 0) { +#ifdef BCMLXSDMMC + up(&dhd_registration_sem); +#endif /* BCMLXSDMMC */ + if (!dhd_download_fw_on_driverload) { +#ifdef WL_CFG80211 + wl_terminate_event_handler(); +#endif /* WL_CFG80211 */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB && DHD_LB_RXP */ +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ + dhd_net_bus_devreset(net, TRUE); +#ifdef BCMLXSDMMC + dhd_net_bus_suspend(net); +#endif /* BCMLXSDMMC */ + wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY); + } + } +#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */ + return 0; + +fail: +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + return err; +} + +void +dhd_bus_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + dhd = (dhd_info_t *)dhdp->info; + if (dhd) { + + /* + * In case of Android cfg80211 driver, the bus is down in dhd_stop, + * calling stop again will cuase SD read/write errors. + */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + /* Stop the bus module */ + dhd_bus_stop(dhd->pub.bus, TRUE); + } + +#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE) + dhd_bus_oob_intr_unregister(dhdp); +#endif + } + } +} + + +void dhd_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + unsigned long flags; + int timer_valid = FALSE; + struct net_device *dev; + + if (!dhdp) + return; + + dhd = (dhd_info_t *)dhdp->info; + if (!dhd) + return; + + dev = dhd->iflist[0]->net; + + if (dev) { + rtnl_lock(); + if (dev->flags & IFF_UP) { + /* If IFF_UP is still up, it indicates that + * "ifconfig wlan0 down" hasn't been called. + * So invoke dev_close explicitly here to + * bring down the interface. + */ + DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n")); + dev_close(dev); + } + rtnl_unlock(); + } + + DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state)); + + dhd->pub.up = 0; + if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { + /* Give sufficient time for threads to start running in case + * dhd_attach() has failed + */ + OSL_SLEEP(100); + } + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef PROP_TXSTATUS +#ifdef DHD_WLFC_THREAD + if (dhd->pub.wlfc_thread) { + kthread_stop(dhd->pub.wlfc_thread); + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); + } + dhd->pub.wlfc_thread = NULL; +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + + if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { + + dhd_bus_detach(dhdp); +#ifdef BCMPCIE + if (is_reboot == SYS_RESTART) { + extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata; + if (dhd_wifi_platdata && !dhdp->dongle_reset) { + dhdpcie_bus_clock_stop(dhdp->bus); + wifi_platform_set_power(dhd_wifi_platdata->adapters, + FALSE, WIFI_TURNOFF_DELAY); + } + } +#endif /* BCMPCIE */ +#ifndef PCIE_FULL_DONGLE + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) { + if (dhd->early_suspend.suspend) + unregister_early_suspend(&dhd->early_suspend); + } +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#if defined(WL_WIRELESS_EXT) + if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) { + /* Detatch and unlink in the iw */ + wl_iw_detach(); + } +#endif /* defined(WL_WIRELESS_EXT) */ + + /* delete all interfaces, start with virtual */ + if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) { + int i = 1; + dhd_if_t *ifp; + + /* Cleanup virtual interfaces */ + dhd_net_if_lock_local(dhd); + for (i = 1; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) + dhd_remove_if(&dhd->pub, i, TRUE); + } + dhd_net_if_unlock_local(dhd); + + /* delete primary interface 0 */ + ifp = dhd->iflist[0]; + ASSERT(ifp); + ASSERT(ifp->net); + if (ifp && ifp->net) { + + + + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { +#ifdef SET_RPS_CPUS + custom_rps_map_clear(ifp->net->_rx); +#endif /* SET_RPS_CPUS */ + netif_tx_disable(ifp->net); + unregister_netdev(ifp->net); + } + ifp->net = NULL; +#ifdef DHD_WMF + dhd_wmf_cleanup(dhdp, 0); +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdp->tickcnt); + deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ + + dhd_if_del_sta_list(ifp); + + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); + dhd->iflist[0] = NULL; + } + } + + /* Clear the watchdog timer */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + timer_valid = dhd->wd_timer_valid; + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + if (timer_valid) + del_timer_sync(&dhd->timer); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + + if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) { +#ifdef DHD_PCIE_RUNTIMEPM + if (dhd->thr_rpm_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_rpm_ctl); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_wdt_ctl); + } + + if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_rxf_ctl); + } + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_dpc_ctl); + } else { + tasklet_kill(&dhd->tasklet); +#ifdef DHD_LB_RXP + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + } + } + +#if defined(DHD_LB) + /* Kill the Load Balancing Tasklets */ +#if defined(DHD_LB_TXC) + tasklet_disable(&dhd->tx_compl_tasklet); + tasklet_kill(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + tasklet_disable(&dhd->rx_compl_tasklet); + tasklet_kill(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ + if (dhd->cpu_notifier.notifier_call != NULL) + unregister_cpu_notifier(&dhd->cpu_notifier); + dhd_cpumasks_deinit(dhd); +#endif /* DHD_LB */ + +#ifdef DHD_LOG_DUMP + dhd_log_dump_deinit(&dhd->pub); +#endif /* DHD_LOG_DUMP */ +#ifdef WL_CFG80211 + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { + wl_cfg80211_detach(NULL); + dhd_monitor_uninit(); + } +#endif + /* free deferred work queue */ + dhd_deferred_work_deinit(dhd->dhd_deferred_wq); + dhd->dhd_deferred_wq = NULL; + +#ifdef SHOW_LOGTRACE + if (dhd->event_data.fmts) + kfree(dhd->event_data.fmts); + if (dhd->event_data.raw_fmts) + kfree(dhd->event_data.raw_fmts); + if (dhd->event_data.raw_sstr) + kfree(dhd->event_data.raw_sstr); +#endif /* SHOW_LOGTRACE */ + +#ifdef PNO_SUPPORT + if (dhdp->pno_state) + dhd_pno_deinit(dhdp); +#endif +#if defined(CONFIG_PM_SLEEP) + if (dhd_pm_notifier_registered) { + unregister_pm_notifier(&dhd->pm_notifier); + dhd_pm_notifier_registered = FALSE; + } +#endif /* CONFIG_PM_SLEEP */ + +#ifdef DEBUG_CPU_FREQ + if (dhd->new_freq) + free_percpu(dhd->new_freq); + dhd->new_freq = NULL; + cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); +#endif + if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { + DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_wd_counter = 0; + wake_lock_destroy(&dhd->wl_wdwake); +#endif /* CONFIG_HAS_WAKELOCK */ + DHD_OS_WAKE_LOCK_DESTROY(dhd); + } + + + +#ifdef DHDTCPACK_SUPPRESS + /* This will free all MEM allocated for TCPACK SUPPRESS */ + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef PCIE_FULL_DONGLE + dhd_flow_rings_deinit(dhdp); + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif + + + dhd_sysfs_exit(dhd); + dhd->pub.is_fw_download_done = FALSE; +} + + +void +dhd_free(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + int i; + for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { + if (dhdp->reorder_bufs[i]) { + reorder_info_t *ptr; + uint32 buf_size = sizeof(struct reorder_info); + + ptr = dhdp->reorder_bufs[i]; + + buf_size += ((ptr->max_idx + 1) * sizeof(void*)); + DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n", + i, ptr->max_idx, buf_size)); + + MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size); + dhdp->reorder_bufs[i] = NULL; + } + } + + dhd_sta_pool_fini(dhdp, DHD_MAX_STA); + + dhd = (dhd_info_t *)dhdp->info; + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } +#ifdef CACHE_FW_IMAGES + if (dhdp->cached_fw) { + MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize); + dhdp->cached_fw = NULL; + } + + if (dhdp->cached_nvram) { + MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE); + dhdp->cached_nvram = NULL; + } +#endif + /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */ + if (dhd && + dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE)) + MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); + dhd = NULL; + } +} + +void +dhd_clear(dhd_pub_t *dhdp) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + int i; +#ifdef DHDTCPACK_SUPPRESS + /* Clean up timer/data structure for any remaining/pending packet or timer. */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { + if (dhdp->reorder_bufs[i]) { + reorder_info_t *ptr; + uint32 buf_size = sizeof(struct reorder_info); + + ptr = dhdp->reorder_bufs[i]; + + buf_size += ((ptr->max_idx + 1) * sizeof(void*)); + DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n", + i, ptr->max_idx, buf_size)); + + MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size); + dhdp->reorder_bufs[i] = NULL; + } + } + + dhd_sta_pool_clear(dhdp, DHD_MAX_STA); + + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } + } +} + +static void +dhd_module_cleanup(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_bus_unregister(); + + wl_android_exit(); + + dhd_wifi_platform_unregister_drv(); +} + +static void __exit +dhd_module_exit(void) +{ + dhd_buzzz_detach(); + dhd_module_cleanup(); + unregister_reboot_notifier(&dhd_reboot_notifier); +} + +static int __init +dhd_module_init(void) +{ + int err; + int retry = POWERUP_MAX_RETRY; + + DHD_ERROR(("%s in\n", __FUNCTION__)); + + dhd_buzzz_attach(); + + DHD_PERIM_RADIO_INIT(); + + + if (firmware_path[0] != '\0') { + strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN); + fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + + if (nvram_path[0] != '\0') { + strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN); + nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + + do { + err = dhd_wifi_platform_register_drv(); + if (!err) { + register_reboot_notifier(&dhd_reboot_notifier); + break; + } + else { + DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n", + __FUNCTION__, retry)); + strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN); + firmware_path[MOD_PARAM_PATHLEN-1] = '\0'; + strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN); + nvram_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + } while (retry--); + + if (err) { + DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__)); + } else { + if (!dhd_download_fw_on_driverload) { + dhd_driver_init_done = TRUE; + } + } + + DHD_ERROR(("%s out\n", __FUNCTION__)); + + return err; +} + +static int +dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused) +{ + DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code)); + if (code == SYS_RESTART) { +#ifdef BCMPCIE + is_reboot = code; +#endif /* BCMPCIE */ + } + return NOTIFY_DONE; +} + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) +#if defined(CONFIG_DEFERRED_INITCALLS) +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \ + defined(CONFIG_ARCH_MSM8996) +deferred_module_init_sync(dhd_module_init); +#else +deferred_module_init(dhd_module_init); +#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 || + * CONFIG_ARCH_MSM8996 + */ +#elif defined(USE_LATE_INITCALL_SYNC) +late_initcall_sync(dhd_module_init); +#else +late_initcall(dhd_module_init); +#endif /* USE_LATE_INITCALL_SYNC */ +#else +module_init(dhd_module_init); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + +module_exit(dhd_module_exit); + +/* + * OS specific functions required to implement DHD driver in OS independent way + */ +int +dhd_os_proto_block(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + DHD_PERIM_UNLOCK(pub); + + down(&dhd->proto_sem); + + DHD_PERIM_LOCK(pub); + return 1; + } + + return 0; +} + +int +dhd_os_proto_unblock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + up(&dhd->proto_sem); + return 1; + } + + return 0; +} + +void +dhd_os_dhdiovar_lock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_lock(&dhd->dhd_iovar_mutex); + } +} + +void +dhd_os_dhdiovar_unlock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_unlock(&dhd->dhd_iovar_mutex); + } +} + +unsigned int +dhd_os_get_ioctl_resp_timeout(void) +{ + return ((unsigned int)dhd_ioctl_timeout_msec); +} + +void +dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) +{ + dhd_ioctl_timeout_msec = (int)timeout_msec; +} + +int +dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); +#else + timeout = dhd_ioctl_timeout_msec * HZ / 1000; +#endif + + DHD_PERIM_UNLOCK(pub); + + timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout); + + DHD_PERIM_LOCK(pub); + + return timeout; +} + +int +dhd_os_ioctl_resp_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->ioctl_resp_wait); + return 0; +} + +int +dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); +#else + timeout = dhd_ioctl_timeout_msec * HZ / 1000; +#endif + + DHD_PERIM_UNLOCK(pub); + + timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout); + + DHD_PERIM_LOCK(pub); + + return timeout; +} + +int +dhd_os_d3ack_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->d3ack_wait); + return 0; +} + +int +dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Wait for bus usage contexts to gracefully exit within some timeout value + * Set time out to little higher than dhd_ioctl_timeout_msec, + * so that IOCTL timeout should not get affected. + */ + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); +#else + timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; +#endif + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout); + + return timeout; +} + +int INLINE +dhd_os_busbusy_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + /* Call wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + wake_up(&dhd->dhd_bus_busy_state_wait); + return 0; +} + +void +dhd_os_wd_timer_extend(void *bus, bool extend) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + + if (extend) + dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL); + else + dhd_os_wd_timer(bus, dhd->default_wd_interval); +} + + +void +dhd_os_wd_timer(void *bus, uint wdtick) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__)); + return; + } + + DHD_OS_WD_WAKE_LOCK(pub); + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the wd until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN) { + DHD_GENERAL_UNLOCK(pub, flags); + if (!wdtick) + DHD_OS_WD_WAKE_UNLOCK(pub); + return; + } + + /* Totally stop the timer */ + if (!wdtick && dhd->wd_timer_valid == TRUE) { + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->timer); + DHD_OS_WD_WAKE_UNLOCK(pub); + return; + } + + if (wdtick) { + DHD_OS_WD_WAKE_LOCK(pub); + dhd_watchdog_ms = (uint)wdtick; + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd->wd_timer_valid = TRUE; + } + DHD_GENERAL_UNLOCK(pub, flags); + DHD_OS_WD_WAKE_UNLOCK(pub); +} + +#ifdef DHD_PCIE_RUNTIMEPM +void +dhd_os_runtimepm_timer(void *bus, uint tick) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the RPM until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN || + pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(pub, flags); + return; + } + + /* If tick is non-zero, the request is to start the timer */ + if (tick) { + /* Start the timer only if its not already running */ + if (dhd->rpm_timer_valid == FALSE) { + mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms)); + dhd->rpm_timer_valid = TRUE; + } + } else { + /* tick is zero, we have to stop the timer */ + /* Stop the timer only if its running, otherwise we don't have to do anything */ + if (dhd->rpm_timer_valid == TRUE) { + dhd->rpm_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->rpm_timer); + /* we have already released the lock, so just go to exit */ + goto exit; + } + } + + DHD_GENERAL_UNLOCK(pub, flags); +exit: + return; + +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + +void * +dhd_os_open_image(char *filename) +{ + struct file *fp; + int size; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) { + fp = NULL; + goto err; + } + + if (!S_ISREG(file_inode(fp)->i_mode)) { + DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename)); + fp = NULL; + goto err; + } + + size = i_size_read(file_inode(fp)); + if (size <= 0) { + DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size)); + fp = NULL; + goto err; + } + + DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size)); + +err: + return fp; +} + +int +dhd_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + int size; + + if (!image) + return 0; + + size = i_size_read(file_inode(fp)); + rdlen = kernel_read(fp, buf, MIN(len, size), &fp->f_pos); + + if (len >= size && size != rdlen) { + return -EIO; + } + + if (rdlen > 0) + fp->f_pos += rdlen; + + return rdlen; +} + +void +dhd_os_close_image(void *image) +{ + if (image) + filp_close((struct file *)image, NULL); +} + +void +dhd_os_sdlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd_dpc_prio >= 0) + down(&dhd->sdsem); + else + spin_lock_bh(&dhd->sdlock); +} + +void +dhd_os_sdunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd_dpc_prio >= 0) + up(&dhd->sdsem); + else + spin_unlock_bh(&dhd->sdlock); +} + +void +dhd_os_sdlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->txqlock); +} + +void +dhd_os_sdunlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->txqlock); +} + +void +dhd_os_sdlock_rxq(dhd_pub_t *pub) +{ +} + +void +dhd_os_sdunlock_rxq(dhd_pub_t *pub) +{ +} + +static void +dhd_os_rxflock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->rxf_lock); + +} + +static void +dhd_os_rxfunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->rxf_lock); +} + +#ifdef DHDTCPACK_SUPPRESS +unsigned long +dhd_os_tcpacklock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + unsigned long flags = 0; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef BCMSDIO + spin_lock_bh(&dhd->tcpack_lock); +#else + spin_lock_irqsave(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } + + return flags; +} + +void +dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd; + +#ifdef BCMSDIO + BCM_REFERENCE(flags); +#endif /* BCMSDIO */ + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef BCMSDIO + spin_lock_bh(&dhd->tcpack_lock); +#else + spin_unlock_irqrestore(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } +} +#endif /* DHDTCPACK_SUPPRESS */ + +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail) +{ + uint8* buf; + gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + + buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size); + if (buf == NULL && kmalloc_if_fail) + buf = kmalloc(size, flags); + + return buf; +} + +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size) +{ +} + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics * +dhd_get_wireless_stats(struct net_device *dev) +{ + int res = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!dhd->pub.up) { + return NULL; + } + + res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); + + if (res == 0) + return &dhd->iw.wstats; + else + return NULL; +} +#endif /* defined(WL_WIRELESS_EXT) */ + +static int +dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, size_t pktlen, + wl_event_msg_t *event, void **data) +{ + int bcmerror = 0; + ASSERT(dhd != NULL); + +#ifdef SHOW_LOGTRACE + bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, pktlen, event, data, &dhd->event_data); +#else + bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, pktlen, event, data, NULL); +#endif /* SHOW_LOGTRACE */ + + if (bcmerror != BCME_OK) + return (bcmerror); + +#if defined(WL_WIRELESS_EXT) + if (event->bsscfgidx == 0) { + /* + * Wireless ext is on primary interface only + */ + + ASSERT(dhd->iflist[*ifidx] != NULL); + ASSERT(dhd->iflist[*ifidx]->net != NULL); + + if (dhd->iflist[*ifidx]->net) { + wl_iw_event(dhd->iflist[*ifidx]->net, event, *data); + } + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef WL_CFG80211 + ASSERT(dhd->iflist[*ifidx] != NULL); + ASSERT(dhd->iflist[*ifidx]->net != NULL); + if (dhd->iflist[*ifidx]->net) + wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data); +#endif /* defined(WL_CFG80211) */ + + return (bcmerror); +} + +/* send up locally generated event */ +void +dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) +{ + switch (ntoh32(event->event_type)) { + + default: + break; + } +} + +#ifdef LOG_INTO_TCPDUMP +void +dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len) +{ + struct sk_buff *p, *skb; + uint32 pktlen; + int len; + dhd_if_t *ifp; + dhd_info_t *dhd; + uchar *skb_data; + int ifidx = 0; + struct ether_header eth; + + pktlen = sizeof(eth) + data_len; + dhd = dhdp->info; + + if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { + ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); + + bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN); + bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN); + ETHER_TOGGLE_LOCALADDR(ð.ether_shost); + eth.ether_type = hton16(ETHER_TYPE_BRCM); + + bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth)); + bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len); + skb = PKTTONATIVE(dhdp->osh, p); + skb_data = skb->data; + len = skb->len; + + ifidx = dhd_ifname2idx(dhd, "wlan0"); + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) + ifp = dhd->iflist[0]; + + ASSERT(ifp); + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->data = skb_data; + skb->len = len; + + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + /* Send the packet */ + if (in_interrupt()) { + netif_rx(skb); + } else { + netif_rx_ni(skb); + } + } + else { + /* Could not allocate a sk_buf */ + DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__)); + } +} +#endif /* LOG_INTO_TCPDUMP */ + +void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) +{ +#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT); +#else + int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + + dhd_os_sdunlock(dhd); + wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout); + dhd_os_sdlock(dhd); +#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + return; +} + +void dhd_wait_event_wakeup(dhd_pub_t *dhd) +{ +#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + if (waitqueue_active(&dhdinfo->ctrl_wait)) + wake_up(&dhdinfo->ctrl_wait); +#endif + return; +} + +#if defined(BCMSDIO) || defined(BCMPCIE) +int +dhd_net_bus_devreset(struct net_device *dev, uint8 flag) +{ + int ret; + + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (flag == TRUE) { + /* Issue wl down command before resetting the chip */ + if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { + DHD_TRACE(("%s: wl down failed\n", __FUNCTION__)); + } +#ifdef PROP_TXSTATUS + if (dhd->pub.wlfc_enabled) + dhd_wlfc_deinit(&dhd->pub); +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + if (dhd->pub.pno_state) + dhd_pno_deinit(&dhd->pub); +#endif + } + +#ifdef BCMSDIO + if (!flag) { + dhd_update_fw_nv_path(dhd); + /* update firmware and nvram path to sdio bus */ + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path); + } +#endif /* BCMSDIO */ + + ret = dhd_bus_devreset(&dhd->pub, flag); + if (ret) { + DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); + return ret; + } + + return ret; +} + +#ifdef BCMSDIO +int +dhd_net_bus_suspend(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_suspend(&dhd->pub); +} + +int +dhd_net_bus_resume(struct net_device *dev, uint8 stage) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_resume(&dhd->pub, stage); +} + +#endif /* BCMSDIO */ +#endif /* BCMSDIO || BCMPCIE */ + +int net_os_set_suspend_disable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) { + ret = dhd->pub.suspend_disable_flag; + dhd->pub.suspend_disable_flag = val; + } + return ret; +} + +int net_os_set_suspend(struct net_device *dev, int val, int force) +{ + int ret = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { +#ifdef CONFIG_MACH_UNIVERSAL7420 +#endif /* CONFIG_MACH_UNIVERSAL7420 */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + ret = dhd_set_suspend(val, &dhd->pub); +#else + ret = dhd_suspend_resume_helper(dhd, val, force); +#endif +#ifdef WL_CFG80211 + wl_cfg80211_update_power_mode(dev); +#endif + } + return ret; +} + +int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) + dhd->pub.suspend_bcn_li_dtim = val; + + return 0; +} + +#ifdef PKT_FILTER_SUPPORT +int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) +{ +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + return 0; +#else + dhd_info_t *dhd = DHD_DEV_INFO(dev); + char *filterp = NULL; + int filter_id = 0; + int ret = 0; + + DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num)); + if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) + return ret; + if (num >= dhd->pub.pktfilter_count) + return -EINVAL; + switch (num) { + case DHD_BROADCAST_FILTER_NUM: + filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + filter_id = 101; + break; + case DHD_MULTICAST4_FILTER_NUM: + filterp = "102 0 0 0 0xFFFFFF 0x01005E"; + filter_id = 102; + break; + case DHD_MULTICAST6_FILTER_NUM: + filterp = "103 0 0 0 0xFFFF 0x3333"; + filter_id = 103; + break; + case DHD_MDNS_FILTER_NUM: + filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; + filter_id = 104; + break; + default: + return -EINVAL; + } + + /* Add filter */ + if (add_remove) { + dhd->pub.pktfilter[num] = filterp; + dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]); + } else { /* Delete filter */ + if (dhd->pub.pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(&dhd->pub, filter_id); + dhd->pub.pktfilter[num] = NULL; + } + } + return ret; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ +} + +int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val) + +{ + int ret = 0; + + /* Packet filtering is set only if we still in early-suspend and + * we need either to turn it ON or turn it OFF + * We can always turn it OFF in case of early-suspend, but we turn it + * back ON only if suspend_disable_flag was not set + */ + if (dhdp && dhdp->up) { + if (dhdp->in_suspend) { + if (!val || (val && !dhdp->suspend_disable_flag)) + dhd_enable_packet_filter(val, dhdp); + } + } + return ret; +} + +/* function to enable/disable packet for Network device */ +int net_os_enable_packet_filter(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val)); + return dhd_os_enable_packet_filter(&dhd->pub, val); +} +#endif /* PKT_FILTER_SUPPORT */ + +int +dhd_dev_init_ioctl(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret; + + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) + goto done; + +done: + return ret; +} + +int +dhd_dev_get_feature_set(struct net_device *dev) +{ + dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhd = (&ptr->pub); + int feature_set = 0; + +#ifdef DYNAMIC_SWOOB_DURATION +#ifndef CUSTOM_INTR_WIDTH +#define CUSTOM_INTR_WIDTH 100 + int intr_width = 0; +#endif /* CUSTOM_INTR_WIDTH */ +#endif /* DYNAMIC_SWOOB_DURATION */ + if (!dhd) + return feature_set; + + if (FW_SUPPORTED(dhd, sta)) + feature_set |= WIFI_FEATURE_INFRA; + if (FW_SUPPORTED(dhd, dualband)) + feature_set |= WIFI_FEATURE_INFRA_5G; + if (FW_SUPPORTED(dhd, p2p)) + feature_set |= WIFI_FEATURE_P2P; + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) + feature_set |= WIFI_FEATURE_SOFT_AP; + if (FW_SUPPORTED(dhd, tdls)) + feature_set |= WIFI_FEATURE_TDLS; + if (FW_SUPPORTED(dhd, vsdb)) + feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL; + if (FW_SUPPORTED(dhd, nan)) { + feature_set |= WIFI_FEATURE_NAN; + /* NAN is essentail for d2d rtt */ + if (FW_SUPPORTED(dhd, rttd2d)) + feature_set |= WIFI_FEATURE_D2D_RTT; + } +#ifdef RTT_SUPPORT + feature_set |= WIFI_FEATURE_D2AP_RTT; +#endif /* RTT_SUPPORT */ +#ifdef LINKSTAT_SUPPORT + feature_set |= WIFI_FEATURE_LINKSTAT; +#endif /* LINKSTAT_SUPPORT */ + /* Supports STA + STA always */ + feature_set |= WIFI_FEATURE_ADDITIONAL_STA; +#ifdef PNO_SUPPORT + if (dhd_is_pno_supported(dhd)) { + feature_set |= WIFI_FEATURE_PNO; + feature_set |= WIFI_FEATURE_BATCH_SCAN; +#ifdef GSCAN_SUPPORT + feature_set |= WIFI_FEATURE_GSCAN; +#endif /* GSCAN_SUPPORT */ + } +#endif /* PNO_SUPPORT */ +#ifdef WL11U + feature_set |= WIFI_FEATURE_HOTSPOT; +#endif /* WL11U */ + return feature_set; +} + + +int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num) +{ + int feature_set_full, mem_needed; + int *ret; + + *num = 0; + mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS; + ret = (int *) kmalloc(mem_needed, GFP_KERNEL); + if (!ret) { + DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__, + mem_needed)); + return ret; + } + + feature_set_full = dhd_dev_get_feature_set(dev); + + ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_PNO) | + (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | + (feature_set_full & WIFI_FEATURE_GSCAN) | + (feature_set_full & WIFI_FEATURE_HOTSPOT) | + (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) | + (feature_set_full & WIFI_FEATURE_EPR); + + ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + /* Not yet verified NAN with P2P */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + (feature_set_full & WIFI_FEATURE_P2P) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_EPR); + + ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) | + (feature_set_full & WIFI_FEATURE_EPR); + *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS; + + return ret; +} +#ifdef CUSTOM_FORCE_NODFS_FLAG +int +dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (nodfs) + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + else + dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; + return 0; +} +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef PNO_SUPPORT +/* Linux wrapper to call common dhd_pno_stop_for_ssid */ +int +dhd_dev_pno_stop_for_ssid(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_stop_for_ssid(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_set_for_ssid */ +int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr, + pno_repeat, pno_freq_expo_max, channel_list, nchan)); +} + +/* Linux wrapper to call common dhd_pno_enable */ +int +dhd_dev_pno_enable(struct net_device *dev, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_enable(&dhd->pub, enable)); +} + +/* Linux wrapper to call common dhd_pno_set_for_hotlist */ +int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params)); +} +/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */ +int +dhd_dev_pno_stop_for_batch(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_stop_for_batch(&dhd->pub)); +} +/* Linux wrapper to call common dhd_dev_pno_set_for_batch */ +int +dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); +} +/* Linux wrapper to call common dhd_dev_pno_get_for_batch */ +int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); +} +/* Linux wrapper to call common dhd_pno_set_mac_oui */ +int +dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_mac_oui(&dhd->pub, oui)); +} +#endif /* PNO_SUPPORT */ + +#if defined(PNO_SUPPORT) +#ifdef GSCAN_SUPPORT +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush)); +} + +/* Linux wrapper to call common dhd_pno_get_gscan */ +void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); +} + +/* Linux wrapper to call common dhd_wait_batch_results_complete */ +void +dhd_dev_wait_batch_results_complete(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_wait_batch_results_complete(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_lock_batch_results */ +void +dhd_dev_pno_lock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_lock_batch_results(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_unlock_batch_results */ +void +dhd_dev_pno_unlock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_unlock_batch_results(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_initiate_gscan_request */ +int +dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush)); +} + +/* Linux wrapper to call common dhd_pno_enable_full_scan_result */ +int +dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); +} + +/* Linux wrapper to call common dhd_handle_swc_evt */ +void * +dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes)); +} + +/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ +void * +dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type)); +} + +/* Linux wrapper to call common dhd_process_full_gscan_result */ +void * +dhd_dev_process_full_gscan_result(struct net_device *dev, +const void *data, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes)); +} + +void +dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type); + + return; +} + +int +dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_gscan_batch_cache_cleanup(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_retreive_batch_scan_results */ +int +dhd_dev_retrieve_batch_scan(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_retreive_batch_scan_results(&dhd->pub)); +} +#endif /* GSCAN_SUPPORT */ +#endif +#ifdef RTT_SUPPORT +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_set_cfg(&dhd->pub, buf)); +} +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt)); +} +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn)); +} +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn)); +} + +int +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_capability(&dhd->pub, capa)); +} + +#endif /* RTT_SUPPORT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) +{ + dhd_info_t *dhd; + struct net_device *dev; + + dhd = (dhd_info_t *)dhd_info; + dev = dhd->iflist[0]->net; + + if (dev) { +#if defined(WL_WIRELESS_EXT) + wl_iw_send_priv_event(dev, "HANG"); +#endif +#if defined(WL_CFG80211) + wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif + } +} + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +extern dhd_pub_t *link_recovery; +void dhd_host_recover_link(void) +{ + DHD_ERROR(("****** %s ******\n", __FUNCTION__)); + link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_bus_set_linkdown(link_recovery, TRUE); + dhd_os_send_hang_message(link_recovery); +} +EXPORT_SYMBOL(dhd_host_recover_link); +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + +int dhd_os_send_hang_message(dhd_pub_t *dhdp) +{ + int ret = 0; + if (dhdp) { + if (!dhdp->hang_was_sent) { + dhdp->hang_was_sent = 1; + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp, + DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH); + } + } + return ret; +} + +int net_os_send_hang_message(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) { + /* Report FW problem when enabled */ + if (dhd->pub.hang_report) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + ret = dhd_os_send_hang_message(&dhd->pub); +#else + ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif + } else { + DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n", + __FUNCTION__)); + /* Enforce bus down to stop any future traffic */ + dhd->pub.busstate = DHD_BUS_DOWN; + } + } + return ret; +} + +int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num) +{ + dhd_info_t *dhd = NULL; + dhd_pub_t *dhdp = NULL; + int reason; + + dhd = DHD_DEV_INFO(dev); + if (dhd) { + dhdp = &dhd->pub; + } + + if (!dhd || !dhdp) { + return 0; + } + + reason = bcm_strtoul(string_num, NULL, 0); + DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason)); + + if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { + reason = 0; + } + + dhdp->hang_reason = reason; + + return net_os_send_hang_message(dev); +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ + + +int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return wifi_platform_set_power(dhd->adapter, on, delay_msec); +} + +bool dhd_force_country_change(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd && dhd->pub.up) + return dhd->pub.force_country_change; + return FALSE; +} +void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, + wl_country_t *cspec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef CUSTOM_COUNTRY_CODE + get_customized_country_code(dhd->adapter, country_iso_code, cspec, + dhd->pub.dhd_cflags); +#else + get_customized_country_code(dhd->adapter, country_iso_code, cspec); +#endif /* CUSTOM_COUNTRY_CODE */ +} +void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (dhd && dhd->pub.up) { + memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); +#ifdef WL_CFG80211 + wl_update_wiphybands(NULL, notify); +#endif + } +} + +void dhd_bus_band_set(struct net_device *dev, uint band) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (dhd && dhd->pub.up) { +#ifdef WL_CFG80211 + wl_update_wiphybands(NULL, true); +#endif + } +} + +int dhd_net_set_fw_path(struct net_device *dev, char *fw) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!fw || fw[0] == '\0') + return -EINVAL; + + strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1); + dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0'; + +#if defined(SOFTAP) + if (strstr(fw, "apsta") != NULL) { + DHD_INFO(("GOT APSTA FIRMWARE\n")); + ap_fw_loaded = TRUE; + } else { + DHD_INFO(("GOT STA FIRMWARE\n")); + ap_fw_loaded = FALSE; + } +#endif + return 0; +} + +void dhd_net_if_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_lock_local(dhd); +} + +void dhd_net_if_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_unlock_local(dhd); +} + +static void dhd_net_if_lock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) + mutex_lock(&dhd->dhd_net_if_mutex); +#endif +} + +static void dhd_net_if_unlock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) + mutex_unlock(&dhd->dhd_net_if_mutex); +#endif +} + +static void dhd_suspend_lock(dhd_pub_t *pub) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_lock(&dhd->dhd_suspend_mutex); +#endif +} + +static void dhd_suspend_unlock(dhd_pub_t *pub) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_unlock(&dhd->dhd_suspend_mutex); +#endif +} + +unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags = 0; + + if (dhd) + spin_lock_irqsave(&dhd->dhd_lock, flags); + + return flags; +} + +void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) + spin_unlock_irqrestore(&dhd->dhd_lock, flags); +} + +/* Linux specific multipurpose spinlock API */ +void * +dhd_os_spin_lock_init(osl_t *osh) +{ + /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */ + /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */ + /* and this results in kernel asserts in internal builds */ + spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4); + if (lock) + spin_lock_init(lock); + return ((void *)lock); +} +void +dhd_os_spin_lock_deinit(osl_t *osh, void *lock) +{ + if (lock) + MFREE(osh, lock, sizeof(spinlock_t) + 4); +} +unsigned long +dhd_os_spin_lock(void *lock) +{ + unsigned long flags = 0; + + if (lock) + spin_lock_irqsave((spinlock_t *)lock, flags); + + return flags; +} +void +dhd_os_spin_unlock(void *lock, unsigned long flags) +{ + if (lock) + spin_unlock_irqrestore((spinlock_t *)lock, flags); +} + +static int +dhd_get_pend_8021x_cnt(dhd_info_t *dhd) +{ + return (atomic_read(&dhd->pend_8021x_cnt)); +} + +#define MAX_WAIT_FOR_8021X_TX 100 + +int +dhd_wait_pend8021x(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int timeout = msecs_to_jiffies(10); + int ntimes = MAX_WAIT_FOR_8021X_TX; + int pend = dhd_get_pend_8021x_cnt(dhd); + + while (ntimes && pend) { + if (pend) { + set_current_state(TASK_INTERRUPTIBLE); + DHD_PERIM_UNLOCK(&dhd->pub); + schedule_timeout(timeout); + DHD_PERIM_LOCK(&dhd->pub); + set_current_state(TASK_RUNNING); + ntimes--; + } + pend = dhd_get_pend_8021x_cnt(dhd); + } + if (ntimes == 0) + { + atomic_set(&dhd->pend_8021x_cnt, 0); + DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__)); + } + return pend; +} + +#ifdef DHD_DEBUG +static void +dhd_convert_memdump_type_to_str(uint32 type, char *buf) +{ + char *type_str = NULL; + + switch (type) { + case DUMP_TYPE_RESUMED_ON_TIMEOUT: + type_str = "resumed_on_timeout"; + break; + case DUMP_TYPE_D3_ACK_TIMEOUT: + type_str = "D3_ACK_timeout"; + break; + case DUMP_TYPE_DONGLE_TRAP: + type_str = "Dongle_Trap"; + break; + case DUMP_TYPE_MEMORY_CORRUPTION: + type_str = "Memory_Corruption"; + break; + case DUMP_TYPE_PKTID_AUDIT_FAILURE: + type_str = "PKTID_AUDIT_Fail"; + break; + case DUMP_TYPE_SCAN_TIMEOUT: + type_str = "SCAN_timeout"; + break; + case DUMP_TYPE_SCAN_BUSY: + type_str = "SCAN_Busy"; + break; + case DUMP_TYPE_BY_SYSDUMP: + type_str = "BY_SYSDUMP"; + break; + case DUMP_TYPE_BY_LIVELOCK: + type_str = "BY_LIVELOCK"; + break; + case DUMP_TYPE_AP_LINKUP_FAILURE: + type_str = "BY_AP_LINK_FAILURE"; + break; + default: + type_str = "Unknown_type"; + break; + } + + strncpy(buf, type_str, strlen(type_str)); + buf[strlen(type_str)] = 0; +} + +int +write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) +{ + int ret = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + char memdump_path[128]; + char memdump_type[32]; + struct timeval curtime; + uint32 file_mode; + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* Init file name */ + memset(memdump_path, 0, sizeof(memdump_path)); + memset(memdump_type, 0, sizeof(memdump_type)); + do_gettimeofday(&curtime); + dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type); +#ifdef CUSTOMER_HW4_DEBUG + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + DHD_COMMON_DUMP_PATH "mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; +#elif defined(CUSTOMER_HW2) + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + "/data/misc/wifi/mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; +#else + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + "/installmedia/mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are + * calling BUG_ON immediately after collecting the socram dump. + * So the file write operation should directly write the contents into the + * file instead of caching it. O_TRUNC flag ensures that file will be re-written + * instead of appending. + */ + file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC; +#endif /* CUSTOMER_HW4_DEBUG */ + + /* print SOCRAM dump file path */ + DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path)); + + /* open file to write */ + fp = filp_open(memdump_path, file_mode, 0644); + if (IS_ERR(fp)) { + ret = PTR_ERR(fp); + printf("%s: open file error, err = %d\n", __FUNCTION__, ret); + goto exit; + } + + /* Write buf to file */ + fp->f_op->write(fp, buf, size, &pos); + +exit: + /* close file before return */ + if (!ret) + filp_close(fp, current->files); + + /* restore previous address limit */ + set_fs(old_fs); + + /* free buf before return */ +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd, buf, size); +#else + MFREE(dhd->osh, buf, size); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + + return ret; +} +#endif /* DHD_DEBUG */ + +int dhd_os_wake_lock_timeout(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? + dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; +#ifdef CONFIG_HAS_WAKELOCK + if (dhd->wakelock_rx_timeout_enable) + wake_lock_timeout(&dhd->wl_rxwake, + msecs_to_jiffies(dhd->wakelock_rx_timeout_enable)); + if (dhd->wakelock_ctrl_timeout_enable) + wake_lock_timeout(&dhd->wl_ctrlwake, + msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable)); +#endif + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int net_os_wake_lock_timeout(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_timeout(&dhd->pub); + return ret; +} + +int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_rx_timeout_enable) + dhd->wakelock_rx_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_ctrl_timeout_enable) + dhd->wakelock_ctrl_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + if (wake_lock_active(&dhd->wl_ctrlwake)) + wake_unlock(&dhd->wl_ctrlwake); +#endif + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val); + return ret; +} + +int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val); + return ret; +} + + +#if defined(DHD_TRACE_WAKE_LOCK) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#include +#else +#include +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +/* Define 2^5 = 32 bucket size hash table */ +DEFINE_HASHTABLE(wklock_history, 5); +#else +/* Define 2^5 = 32 bucket size hash table */ +struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT }; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +int trace_wklock_onoff = 1; + +typedef enum dhd_wklock_type { + DHD_WAKE_LOCK, + DHD_WAKE_UNLOCK, + DHD_WAIVE_LOCK, + DHD_RESTORE_LOCK +} dhd_wklock_t; + +struct wk_trace_record { + unsigned long addr; /* Address of the instruction */ + dhd_wklock_t lock_type; /* lock_type */ + unsigned long long counter; /* counter information */ + struct hlist_node wklock_node; /* hash node */ +}; + + +static struct wk_trace_record *find_wklock_entry(unsigned long addr) +{ + struct wk_trace_record *wklock_info; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr) +#else + struct hlist_node *entry; + int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history))); + hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + if (wklock_info->addr == addr) { + return wklock_info; + } + } + return NULL; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#define HASH_ADD(hashtable, node, key) \ + do { \ + hash_add(hashtable, node, key); \ + } while (0); +#else +#define HASH_ADD(hashtable, node, key) \ + do { \ + int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \ + hlist_add_head(node, &hashtable[index]); \ + } while (0); +#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */ + +#define STORE_WKLOCK_RECORD(wklock_type) \ + do { \ + struct wk_trace_record *wklock_info = NULL; \ + unsigned long func_addr = (unsigned long)__builtin_return_address(0); \ + wklock_info = find_wklock_entry(func_addr); \ + if (wklock_info) { \ + if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + } else { \ + wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \ + if (!wklock_info) {\ + printk("Can't allocate wk_trace_record \n"); \ + } else { \ + wklock_info->addr = func_addr; \ + wklock_info->lock_type = wklock_type; \ + if (wklock_type == DHD_WAIVE_LOCK || \ + wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \ + } \ + } \ + } while (0); + +static inline void dhd_wk_lock_rec_dump(void) +{ + int bkt; + struct wk_trace_record *wklock_info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each(wklock_history, bkt, wklock_info, wklock_node) +#else + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + switch (wklock_info->lock_type) { + case DHD_WAKE_LOCK: + DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_WAKE_UNLOCK: + DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_WAIVE_LOCK: + DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_RESTORE_LOCK: + DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + } + } +} + +static void dhd_wk_lock_trace_init(struct dhd_info *dhd) +{ + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + int i; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_init(wklock_history); +#else + for (i = 0; i < ARRAY_SIZE(wklock_history); i++) + INIT_HLIST_HEAD(&wklock_history[i]); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +} + +static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd) +{ + int bkt; + struct wk_trace_record *wklock_info; + struct hlist_node *tmp; + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node) +#else + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry_safe(wklock_info, entry, tmp, + &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_del(&wklock_info->wklock_node); +#else + hlist_del_init(&wklock_info->wklock_node); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + kfree(wklock_info); + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +} + +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + unsigned long flags; + + DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n")); + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd_wk_lock_rec_dump(); + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter)); +} +#else +#define STORE_WKLOCK_RECORD(wklock_type) +#endif /* ! DHD_TRACE_WAKE_LOCK */ + +int dhd_os_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(pub); +#endif + } +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAKE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + dhd->wakelock_counter++; + ret = dhd->wakelock_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + + return ret; +} + +int dhd_event_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags); + if (dhd->wakelock_event_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(pub); +#endif + } + dhd->wakelock_event_counter++; + ret = dhd->wakelock_event_counter; + spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags); + } + + return ret; +} + +int net_os_wake_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock(&dhd->pub); + return ret; +} + +int dhd_os_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + dhd_os_wake_lock_timeout(pub); + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + if (dhd->wakelock_counter > 0) { + dhd->wakelock_counter--; +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(pub); +#endif + } + ret = dhd->wakelock_counter; + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_event_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags); + + if (dhd->wakelock_event_counter > 0) { + dhd->wakelock_event_counter--; + if (dhd->wakelock_event_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(pub); +#endif + } + ret = dhd->wakelock_event_counter; + } + spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags); + } + return ret; +} + +int dhd_os_check_wakelock(dhd_pub_t *pub) +{ +#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ + KERNEL_VERSION(2, 6, 36))) + dhd_info_t *dhd; + + if (!pub) + return 0; + dhd = (dhd_info_t *)(pub->info); +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + +#ifdef CONFIG_HAS_WAKELOCK + /* Indicate to the SD Host to avoid going to suspend if internal locks are up */ + if (dhd && (wake_lock_active(&dhd->wl_wifi) || + (wake_lock_active(&dhd->wl_wdwake)))) + return 1; +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) + return 1; +#endif + return 0; +} + +int +dhd_os_check_wakelock_all(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + int l1, l2, l3, l4, l7; + int l5 = 0, l6 = 0; + int c, lock_active; +#endif /* CONFIG_HAS_WAKELOCK */ +#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ + KERNEL_VERSION(2, 6, 36))) + dhd_info_t *dhd; + + if (!pub) { + return 0; + } + dhd = (dhd_info_t *)(pub->info); + if (!dhd) { + return 0; + } +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + +#ifdef CONFIG_HAS_WAKELOCK + c = dhd->wakelock_counter; + l1 = wake_lock_active(&dhd->wl_wifi); + l2 = wake_lock_active(&dhd->wl_wdwake); + l3 = wake_lock_active(&dhd->wl_rxwake); + l4 = wake_lock_active(&dhd->wl_ctrlwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + l5 = wake_lock_active(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + l6 = wake_lock_active(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ + l7 = wake_lock_active(&dhd->wl_evtwake); + lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7); + + /* Indicate to the Host to avoid going to suspend if internal locks are up */ + if (dhd && lock_active) { + DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d " + "ctl-%d intr-%d scan-%d evt-%d\n", + __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7)); + return 1; + } +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { + return 1; + } +#endif /* CONFIG_HAS_WAKELOCK */ + return 0; +} + +int net_os_wake_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_unlock(&dhd->pub); + return ret; +} + +int dhd_os_wd_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#ifdef CONFIG_HAS_WAKELOCK + /* if wakelock_wd_counter was never used : lock it at once */ + if (!dhd->wakelock_wd_counter) + wake_lock(&dhd->wl_wdwake); +#endif + dhd->wakelock_wd_counter++; + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wd_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_wd_counter) { + dhd->wakelock_wd_counter = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wdwake); +#endif + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +void +dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_intrwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_intrwake)) { + wake_unlock(&dhd->wl_intrwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DHD_USE_SCAN_WAKELOCK +void +dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_scan_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_scanwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_scanwake)) { + wake_unlock(&dhd->wl_scanwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* DHD_USE_SCAN_WAKELOCK */ + +/* waive wakelocks for operations such as IOVARs in suspend function, must be closed + * by a paired function call to dhd_wakelock_restore. returns current wakelock counter + */ +int dhd_os_wake_lock_waive(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (dhd->waive_wakelock == FALSE) { +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + /* record current lock status */ + dhd->wakelock_before_waive = dhd->wakelock_counter; + dhd->waive_wakelock = TRUE; + } + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wake_lock_restore(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (!dhd) + return 0; + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (!dhd->waive_wakelock) + goto exit; + + dhd->waive_wakelock = FALSE; + /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore, + * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases + * the lock in between, do the same by calling wake_unlock or pm_relax + */ +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + + if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(&dhd->pub); +#endif + } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(&dhd->pub); +#endif + } + dhd->wakelock_before_waive = 0; +exit: + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + return ret; +} + +void dhd_os_wake_lock_init(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__)); + dhd->wakelock_event_counter = 0; + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); + wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); + wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); + wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake"); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake"); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_init(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +} + +void dhd_os_wake_lock_destroy(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_event_counter = 0; + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + wake_lock_destroy(&dhd->wl_wifi); + wake_lock_destroy(&dhd->wl_rxwake); + wake_lock_destroy(&dhd->wl_ctrlwake); + wake_lock_destroy(&dhd->wl_evtwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_destroy(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_destroy(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_deinit(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +} + +bool dhd_os_check_if_up(dhd_pub_t *pub) +{ + if (!pub) + return FALSE; + return pub->up; +} + +#if defined(BCMSDIO) +/* function to collect firmware, chip id and chip version info */ +void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) +{ + int i; + + i = snprintf(info_string, sizeof(info_string), + " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw); + + if (!dhdp) + return; + + i = snprintf(&info_string[i], sizeof(info_string) - i, + "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp), + dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp)); +} +#endif /* defined(BCMSDIO) */ +int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) +{ + int ifidx; + int ret = 0; + dhd_info_t *dhd = NULL; + + if (!net || !DEV_PRIV(net)) { + DHD_ERROR(("%s invalid parameter\n", __FUNCTION__)); + return -EINVAL; + } + + dhd = DHD_DEV_INFO(net); + if (!dhd) + return -EINVAL; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len); + dhd_check_hang(net, &dhd->pub, ret); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return ret; +} + +bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret) +{ + struct net_device *net; + + net = dhd_idx2net(dhdp, ifidx); + if (!net) { + DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + return dhd_check_hang(net, dhdp, ret); +} + +/* Return instance */ +int dhd_get_instance(dhd_pub_t *dhdp) +{ + return dhdp->info->unit; +} + + +#ifdef PROP_TXSTATUS + +void dhd_wlfc_plat_init(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; +} + +void dhd_wlfc_plat_deinit(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; +} + +bool dhd_wlfc_skip_fc(void) +{ +#ifdef SKIP_WLFC_ON_CONCURRENT +#ifdef WL_CFG80211 + + /* enable flow control in vsdb mode */ + return !(wl_cfg80211_is_concurrent_mode()); +#else + return TRUE; /* skip flow control */ +#endif /* WL_CFG80211 */ + +#else + return FALSE; +#endif /* SKIP_WLFC_ON_CONCURRENT */ +} +#endif /* PROP_TXSTATUS */ + +#ifdef BCMDBGFS +#include + +typedef struct dhd_dbgfs { + struct dentry *debugfs_dir; + struct dentry *debugfs_mem; + dhd_pub_t *dhdp; + uint32 size; +} dhd_dbgfs_t; + +dhd_dbgfs_t g_dbgfs; + +extern uint32 dhd_readregl(void *bp, uint32 addr); +extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); + +static int +dhd_dbg_state_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +dhd_dbg_state_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + ssize_t rval; + uint32 tmp; + loff_t pos = *ppos; + size_t ret; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */ + tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3)); + + ret = copy_to_user(ubuf, &tmp, 4); + if (ret == count) + return -EFAULT; + + count -= ret; + *ppos = pos + count; + rval = count; + + return rval; +} + + +static ssize_t +dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t ret; + uint32 buf; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + ret = copy_from_user(&buf, ubuf, sizeof(uint32)); + if (ret == count) + return -EFAULT; + + /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */ + dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf); + + return count; +} + + +loff_t +dhd_debugfs_lseek(struct file *file, loff_t off, int whence) +{ + loff_t pos = -1; + + switch (whence) { + case 0: + pos = off; + break; + case 1: + pos = file->f_pos + off; + break; + case 2: + pos = g_dbgfs.size - off; + } + return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos); +} + +static const struct file_operations dhd_dbg_state_ops = { + .read = dhd_dbg_state_read, + .write = dhd_debugfs_write, + .open = dhd_dbg_state_open, + .llseek = dhd_debugfs_lseek +}; + +static void dhd_dbg_create(void) +{ + if (g_dbgfs.debugfs_dir) { + g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, + NULL, &dhd_dbg_state_ops); + } +} + +void dhd_dbg_init(dhd_pub_t *dhdp) +{ + g_dbgfs.dhdp = dhdp; + g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ + + g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); + if (IS_ERR(g_dbgfs.debugfs_dir)) { + g_dbgfs.debugfs_dir = NULL; + return; + } + + dhd_dbg_create(); + + return; +} + +void dhd_dbg_remove(void) +{ + debugfs_remove(g_dbgfs.debugfs_mem); + debugfs_remove(g_dbgfs.debugfs_dir); + + bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); +} +#endif /* BCMDBGFS */ + +#ifdef WLMEDIA_HTSF + +static +void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct sk_buff *skb; + uint32 htsf = 0; + uint16 dport = 0, oldmagic = 0xACAC; + char *p1; + htsfts_t ts; + + /* timestamp packet */ + + p1 = (char*) PKTDATA(dhdp->osh, pktbuf); + + if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) { +/* memcpy(&proto, p1+26, 4); */ + memcpy(&dport, p1+40, 2); +/* proto = ((ntoh32(proto))>> 16) & 0xFF; */ + dport = ntoh16(dport); + } + + /* timestamp only if icmp or udb iperf with port 5555 */ +/* if (proto == 17 && dport == tsport) { */ + if (dport >= tsport && dport <= tsport + 20) { + + skb = (struct sk_buff *) pktbuf; + + htsf = dhd_get_htsf(dhd, 0); + memset(skb->data + 44, 0, 2); /* clear checksum */ + memcpy(skb->data+82, &oldmagic, 2); + memcpy(skb->data+84, &htsf, 4); + + memset(&ts, 0, sizeof(htsfts_t)); + ts.magic = HTSFMAGIC; + ts.prio = PKTPRIO(pktbuf); + ts.seqnum = htsf_seqnum++; + ts.c10 = get_cycles(); + ts.t10 = htsf; + ts.endmagic = HTSFENDMAGIC; + + memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts)); + } +} + +static void dhd_dump_htsfhisto(histo_t *his, char *s) +{ + int pktcnt = 0, curval = 0, i; + for (i = 0; i < (NUMBIN-2); i++) { + curval += 500; + printf("%d ", his->bin[i]); + pktcnt += his->bin[i]; + } + printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt, + his->bin[NUMBIN-1], s); +} + +static +void sorttobin(int value, histo_t *histo) +{ + int i, binval = 0; + + if (value < 0) { + histo->bin[NUMBIN-1]++; + return; + } + if (value > histo->bin[NUMBIN-2]) /* store the max value */ + histo->bin[NUMBIN-2] = value; + + for (i = 0; i < (NUMBIN-2); i++) { + binval += 500; /* 500m s bins */ + if (value <= binval) { + histo->bin[i]++; + return; + } + } + histo->bin[NUMBIN-3]++; +} + +static +void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + char *p1; + uint16 old_magic; + int d1, d2, d3, end2end; + htsfts_t *htsf_ts; + uint32 htsf; + + skb = PKTTONATIVE(dhdp->osh, pktbuf); + p1 = (char*)PKTDATA(dhdp->osh, pktbuf); + + if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) { + memcpy(&old_magic, p1+78, 2); + htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4); + } else { + return; + } + if (htsf_ts->magic == HTSFMAGIC) { + htsf_ts->tE0 = dhd_get_htsf(dhd, 0); + htsf_ts->cE0 = get_cycles(); + } + + if (old_magic == 0xACAC) { + + tspktcnt++; + htsf = dhd_get_htsf(dhd, 0); + memcpy(skb->data+92, &htsf, sizeof(uint32)); + + memcpy(&ts[tsidx].t1, skb->data+80, 16); + + d1 = ts[tsidx].t2 - ts[tsidx].t1; + d2 = ts[tsidx].t3 - ts[tsidx].t2; + d3 = ts[tsidx].t4 - ts[tsidx].t3; + end2end = ts[tsidx].t4 - ts[tsidx].t1; + + sorttobin(d1, &vi_d1); + sorttobin(d2, &vi_d2); + sorttobin(d3, &vi_d3); + sorttobin(end2end, &vi_d4); + + if (end2end > 0 && end2end > maxdelay) { + maxdelay = end2end; + maxdelaypktno = tspktcnt; + memcpy(&maxdelayts, &ts[tsidx], 16); + } + if (++tsidx >= TSMAX) + tsidx = 0; + } +} + +uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx) +{ + uint32 htsf = 0, cur_cycle, delta, delta_us; + uint32 factor, baseval, baseval2; + cycles_t t; + + t = get_cycles(); + cur_cycle = t; + + if (cur_cycle > dhd->htsf.last_cycle) { + delta = cur_cycle - dhd->htsf.last_cycle; + } else { + delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle); + } + + delta = delta >> 4; + + if (dhd->htsf.coef) { + /* times ten to get the first digit */ + factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1); + baseval = (delta*10)/factor; + baseval2 = (delta*10)/(factor+1); + delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10); + htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY; + } else { + DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n")); + } + + return htsf; +} + +static void dhd_dump_latency(void) +{ + int i, max = 0; + int d1, d2, d3, d4, d5; + + printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n"); + for (i = 0; i < TSMAX; i++) { + d1 = ts[i].t2 - ts[i].t1; + d2 = ts[i].t3 - ts[i].t2; + d3 = ts[i].t4 - ts[i].t3; + d4 = ts[i].t4 - ts[i].t1; + d5 = ts[max].t4-ts[max].t1; + if (d4 > d5 && d4 > 0) { + max = i; + } + printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n", + ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4, + d1, d2, d3, d4, i); + } + + printf("current idx = %d \n", tsidx); + + printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt); + printf("%08X %08X %08X %08X \t%d %d %d %d\n", + maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4, + maxdelayts.t2 - maxdelayts.t1, + maxdelayts.t3 - maxdelayts.t2, + maxdelayts.t4 - maxdelayts.t3, + maxdelayts.t4 - maxdelayts.t1); +} + + +static int +dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx) +{ + wl_ioctl_t ioc; + char buf[32]; + int ret; + uint32 s1, s2; + + struct tsf { + uint32 low; + uint32 high; + } tsf_buf; + + memset(&ioc, 0, sizeof(ioc)); + memset(&tsf_buf, 0, sizeof(tsf_buf)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = FALSE; + + strncpy(buf, "tsf", sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; + s1 = dhd_get_htsf(dhd, 0); + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + if (ret == -EIO) { + DHD_ERROR(("%s: tsf is not supported by device\n", + dhd_ifname(&dhd->pub, ifidx))); + return -EOPNOTSUPP; + } + return ret; + } + s2 = dhd_get_htsf(dhd, 0); + + memcpy(&tsf_buf, buf, sizeof(tsf_buf)); + printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ", + tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1, + dhd->htsf.coefdec2, s2-tsf_buf.low); + printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle); + return 0; +} + +void htsf_update(dhd_info_t *dhd, void *data) +{ + static ulong cur_cycle = 0, prev_cycle = 0; + uint32 htsf, tsf_delta = 0; + uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp; + ulong b, a; + cycles_t t; + + /* cycles_t in inlcude/mips/timex.h */ + + t = get_cycles(); + + prev_cycle = cur_cycle; + cur_cycle = t; + + if (cur_cycle > prev_cycle) + cyc_delta = cur_cycle - prev_cycle; + else { + b = cur_cycle; + a = prev_cycle; + cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle); + } + + if (data == NULL) + printf(" tsf update ata point er is null \n"); + + memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t)); + memcpy(&cur_tsf, data, sizeof(tsf_t)); + + if (cur_tsf.low == 0) { + DHD_INFO((" ---- 0 TSF, do not update, return\n")); + return; + } + + if (cur_tsf.low > prev_tsf.low) + tsf_delta = (cur_tsf.low - prev_tsf.low); + else { + DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n", + cur_tsf.low, prev_tsf.low)); + if (cur_tsf.high > prev_tsf.high) { + tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low); + DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta)); + } else { + return; /* do not update */ + } + } + + if (tsf_delta) { + hfactor = cyc_delta / tsf_delta; + tmp = (cyc_delta - (hfactor * tsf_delta))*10; + dec1 = tmp/tsf_delta; + dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta; + tmp = (tmp - (dec1*tsf_delta))*10; + dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta; + + if (dec3 > 4) { + if (dec2 == 9) { + dec2 = 0; + if (dec1 == 9) { + dec1 = 0; + hfactor++; + } else { + dec1++; + } + } else { + dec2++; + } + } + } + + if (hfactor) { + htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low; + dhd->htsf.coef = hfactor; + dhd->htsf.last_cycle = cur_cycle; + dhd->htsf.last_tsf = cur_tsf.low; + dhd->htsf.coefdec1 = dec1; + dhd->htsf.coefdec2 = dec2; + } else { + htsf = prev_tsf.low; + } +} + +#endif /* WLMEDIA_HTSF */ + +#ifdef CUSTOM_SET_CPUCORE +void dhd_set_cpucore(dhd_pub_t *dhd, int set) +{ + int e_dpc = 0, e_rxf = 0, retry_set = 0; + + if (!(dhd->chan_isvht80)) { + DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80)); + return; + } + + if (DPC_CPUCORE) { + do { + if (set == TRUE) { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(DPC_CPUCORE)); + } else { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc)); + return; + } + if (e_dpc < 0) + OSL_SLEEP(1); + } while (e_dpc < 0); + } + if (RXF_CPUCORE) { + do { + if (set == TRUE) { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(RXF_CPUCORE)); + } else { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf)); + return; + } + if (e_rxf < 0) + OSL_SLEEP(1); + } while (e_rxf < 0); + } +#ifdef DHD_OF_SUPPORT + interrupt_set_cpucore(set); +#endif /* DHD_OF_SUPPORT */ + DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set)); + + return; +} +#endif /* CUSTOM_SET_CPUCORE */ + +/* Get interface specific ap_isolate configuration */ +int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->ap_isolate; +} + +/* Set interface specific ap_isolate configuration */ +int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ifp->ap_isolate = val; + + return 0; +} + +#ifdef DHD_FW_COREDUMP + + +#ifdef CUSTOMER_HW4_DEBUG +#ifdef PLATFORM_SLP +#define MEMDUMPINFO "/opt/etc/.memdump.info" +#else +#define MEMDUMPINFO "/data/.memdump.info" +#endif /* PLATFORM_SLP */ +#elif defined(CUSTOMER_HW2) +#define MEMDUMPINFO "/data/misc/wifi/.memdump.info" +#else +#define MEMDUMPINFO "/installmedia/.memdump.info" +#endif /* CUSTOMER_HW4_DEBUG */ + +void dhd_get_memdump_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + uint32 mem_val = DUMP_MEMFILE_MAX; + int ret = 0; + char *filepath = MEMDUMPINFO; + loff_t pos=0; + + /* Read memdump info from the file */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } else { + ret = kernel_read(fp, (char *)&mem_val, 4, &pos); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + mem_val = bcm_atoi((char *)&mem_val); + + DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val)); + filp_close(fp, NULL); + } + +done: +#ifdef CUSTOMER_HW4_DEBUG + dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED; +#else + dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON; +#endif /* CUSTOMER_HW4_DEBUG */ +} + + +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) +{ + dhd_dump_t *dump = NULL; + dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t)); + if (dump == NULL) { + DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__)); + return; + } + dump->buf = buf; + dump->bufsize = size; + +#if defined(CONFIG_ARM64) + DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__, + (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size)); +#elif defined(__ARM_ARCH_7A__) + DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__, + (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size)); +#endif /* __ARM_ARCH_7A__ */ + if (dhdp->memdump_enabled == DUMP_MEMONLY) { + BUG_ON(1); + } + +#ifdef DHD_LOG_DUMP + if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) { + dhd_schedule_log_dump(dhdp); + } +#endif /* DHD_LOG_DUMP */ + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump, + DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH); +} +static void +dhd_mem_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_dump_t *dump = event_info; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (!dump) { + DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__)); + return; + } + + if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) { + DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); + } + + if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON && +#ifdef DHD_LOG_DUMP + dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP && +#endif + TRUE) { + BUG_ON(1); + } + MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t)); +} +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_LOG_DUMP +static void +dhd_log_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (do_dhd_log_dump(&dhd->pub)) { + DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__)); + return; + } +} + +void dhd_schedule_log_dump(dhd_pub_t *dhdp) +{ + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP, + dhd_log_dump, DHD_WORK_PRIORITY_HIGH); +} + +static int +do_dhd_log_dump(dhd_pub_t *dhdp) +{ + int ret = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + char dump_path[128]; + char common_info[1024]; + struct timeval curtime; + uint32 file_mode; + unsigned long flags = 0; + + if (!dhdp) { + return -1; + } + + /* Building the additional information like DHD, F/W version */ + memset(common_info, 0, sizeof(common_info)); + snprintf(common_info, sizeof(common_info), + "---------- Common information ----------\n" + "DHD version: %s\n" + "F/W version: %s\n" + "----------------------------------------\n", + dhd_version, fw_version); + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* Init file name */ + memset(dump_path, 0, sizeof(dump_path)); + do_gettimeofday(&curtime); + snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld", + DHD_COMMON_DUMP_PATH "debug_dump", + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; + + DHD_ERROR(("debug_dump_path = %s\n", dump_path)); + fp = filp_open(dump_path, file_mode, 0644); + if (IS_ERR(fp)) { + ret = PTR_ERR(fp); + DHD_ERROR(("open file error, err = %d\n", ret)); + ret = -1; + goto exit; + } + + fp->f_op->write(fp, common_info, strlen(common_info), &pos); + if (dhdp->dld_buf.wraparound) { + fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos); + } else { + fp->f_op->write(fp, dhdp->dld_buf.buffer, + (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos); + } + + /* re-init dhd_log_dump_buf structure */ + spin_lock_irqsave(&dhdp->dld_buf.lock, flags); + dhdp->dld_buf.wraparound = 0; + dhdp->dld_buf.present = dhdp->dld_buf.front; + dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE); + spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags); +exit: + if (!ret) { + filp_close(fp, NULL); + } + set_fs(old_fs); + + return ret; +} +#endif /* DHD_LOG_DUMP */ + +#ifdef BCMASSERT_LOG +#ifdef CUSTOMER_HW4_DEBUG +#ifdef PLATFORM_SLP +#define ASSERTINFO "/opt/etc/.assert.info" +#else +#define ASSERTINFO "/data/.assert.info" +#endif /* PLATFORM_SLP */ +#elif defined(CUSTOMER_HW2) +#define ASSERTINFO "/data/misc/wifi/.assert.info" +#else +#define ASSERTINFO "/installmedia/.assert.info" +#endif /* CUSTOMER_HW4_DEBUG */ +void dhd_get_assert_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + char *filepath = ASSERTINFO; + loff_t pos=0; + + /* + * Read assert info from the file + * 0: Trigger Kernel crash by panic() + * 1: Print out the logs and don't trigger Kernel panic. (default) + * 2: Trigger Kernel crash by BUG() + * File doesn't exist: Keep default value (1). + */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + } else { + int mem_val = 0; + int ret = kernel_read(fp, (char *)&mem_val, 4, &pos); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + } else { + mem_val = bcm_atoi((char *)&mem_val); + DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val)); + g_assert_type = mem_val; + } + filp_close(fp, NULL); + } +} +#endif /* BCMASSERT_LOG */ + + +#ifdef DHD_WMF +/* Returns interface specific WMF configuration */ +dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + return &ifp->wmf; +} +#endif /* DHD_WMF */ + + +#if defined(DHD_L2_FILTER) +bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac) +{ + return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE; +} +#endif + +#ifdef DHD_L2_FILTER +arp_table_t* +dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(bssidx < DHD_MAX_IFS); + + ifp = dhd->iflist[bssidx]; + return ifp->phnd_arp_table; +} + +int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if (ifp) + return ifp->parp_enable; + else + return FALSE; +} + +/* Set interface specific proxy arp configuration */ +int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + if (!ifp) + return BCME_ERROR; + + /* At present all 3 variables are being + * handled at once + */ + ifp->parp_enable = val; + ifp->parp_discard = val; + ifp->parp_allnode = !val; + + /* Flush ARP entries when disabled */ + if (val == FALSE) { + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL, + FALSE, dhdp->tickcnt); + } + return BCME_OK; +} + +bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + return ifp->parp_discard; +} + +bool +dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->parp_allnode; +} + +int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->dhcp_unicast; +} + +int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->dhcp_unicast = val; + return BCME_OK; +} + +int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->block_ping; +} + +int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->block_ping = val; + + return BCME_OK; +} + +int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->grat_arp; +} + +int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->grat_arp = val; + + return BCME_OK; +} +#endif /* DHD_L2_FILTER */ + + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + int ifidx; + char * RPS_CPU_SETBUF; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + if (ifidx == PRIMARY_INF) { + if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) { + DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS; + } else { + DHD_INFO(("%s : set for BSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK; + } + } else if (ifidx == VIRTUAL_INF) { + DHD_INFO(("%s : set for P2P.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P; + } else { + DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + ifp = dhd->iflist[ifidx]; + if (ifp) { + if (enable) { + DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF)); + custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF)); + } else { + custom_rps_map_clear(ifp->net->_rx); + } + } else { + DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__)); + return -ENODEV; + } + return BCME_OK; +} + +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len) +{ + struct rps_map *old_map, *map; + cpumask_var_t mask; + int err, cpu, i; + static DEFINE_SPINLOCK(rps_map_lock); + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); + if (err) { + free_cpumask_var(mask); + DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__)); + return err; + } + + map = kzalloc(max_t(unsigned int, + RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), + GFP_KERNEL); + if (!map) { + free_cpumask_var(mask); + DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + i = 0; + for_each_cpu(cpu, mask) { + map->cpus[i++] = cpu; + } + + if (i) { + map->len = i; + } else { + kfree(map); + map = NULL; + free_cpumask_var(mask); + DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__)); + return -1; + } + + spin_lock(&rps_map_lock); + old_map = rcu_dereference_protected(queue->rps_map, + lockdep_is_held(&rps_map_lock)); + rcu_assign_pointer(queue->rps_map, map); + spin_unlock(&rps_map_lock); + + if (map) { + static_key_slow_inc(&rps_needed); + } + if (old_map) { + kfree_rcu(old_map, rcu); + static_key_slow_dec(&rps_needed); + } + free_cpumask_var(mask); + + DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len)); + return map->len; +} + +void custom_rps_map_clear(struct netdev_rx_queue *queue) +{ + struct rps_map *map; + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + map = rcu_dereference_protected(queue->rps_map, 1); + if (map) { + RCU_INIT_POINTER(queue->rps_map, NULL); + kfree_rcu(map, rcu); + DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__)); + } +} +#endif + + + +#ifdef DHD_DEBUG_PAGEALLOC + +void +dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n", + __FUNCTION__, addr_corrupt, (uint32)len)); + + DHD_OS_WAKE_LOCK(dhdp); + prhex("Page Corruption:", addr_corrupt, len); + dhd_dump_to_kernelog(dhdp); +#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + /* Load the dongle side dump to host memory and then BUG_ON() */ + dhdp->memdump_enabled = DUMP_MEMONLY; + dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); +} +EXPORT_SYMBOL(dhd_page_corrupt_cb); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#ifdef DHD_PKTID_AUDIT_ENABLED +void +dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp) +{ + DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(dhdp); + dhd_dump_to_kernelog(dhdp); +#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + /* Load the dongle side dump to host memory and then BUG_ON() */ + dhdp->memdump_enabled = DUMP_MEMFILE_BUGON; + dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); +} +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/* ---------------------------------------------------------------------------- + * Infrastructure code for sysfs interface support for DHD + * + * What is sysfs interface? + * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt + * + * Why sysfs interface? + * This is the Linux standard way of changing/configuring Run Time parameters + * for a driver. We can use this interface to control "linux" specific driver + * parameters. + * + * ----------------------------------------------------------------------------- + */ + +#include +#include + +#if defined(DHD_TRACE_WAKE_LOCK) + +/* Function to show the history buffer */ +static ssize_t +show_wklock_trace(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + dhd_info_t *dhd = (dhd_info_t *)dev; + + buf[ret] = '\n'; + buf[ret+1] = 0; + + dhd_wk_lock_stats_dump(&dhd->pub); + return ret+1; +} + +/* Function to enable/disable wakelock trace */ +static ssize_t +wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + unsigned long flags; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = bcm_strtoul(buf, NULL, 10); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + trace_wklock_onoff = onoff; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (trace_wklock_onoff) { + printk("ENABLE WAKLOCK TRACE\n"); + } else { + printk("DISABLE WAKELOCK TRACE\n"); + } + + return (ssize_t)(onoff+1); +} +#endif /* DHD_TRACE_WAKE_LOCK */ + +/* + * Generic Attribute Structure for DHD. + * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have + * to instantiate an object of type dhd_attr, populate it with + * the required show/store functions (ex:- dhd_attr_cpumask_primary) + * and add the object to default_attrs[] array, that gets registered + * to the kobject of dhd (named bcm-dhd). + */ + +struct dhd_attr { + struct attribute attr; + ssize_t(*show)(struct dhd_info *, char *); + ssize_t(*store)(struct dhd_info *, const char *, size_t count); +}; + +#if defined(DHD_TRACE_WAKE_LOCK) +static struct dhd_attr dhd_attr_wklock = + __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff); +#endif /* defined(DHD_TRACE_WAKE_LOCK */ + +/* Attribute object that gets registered with "bcm-dhd" kobject tree */ +static struct attribute *default_attrs[] = { +#if defined(DHD_TRACE_WAKE_LOCK) + &dhd_attr_wklock.attr, +#endif + NULL +}; + +#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj) +#define to_attr(a) container_of(a, struct dhd_attr, attr) + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the show function is called. + */ +static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); + int ret; + + if (d_attr->show) + ret = d_attr->show(dhd, buf); + else + ret = -EIO; + + return ret; +} + + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the store function is called. + */ +static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); + int ret; + + if (d_attr->store) + ret = d_attr->store(dhd, buf, count); + else + ret = -EIO; + + return ret; + +} + +static struct sysfs_ops dhd_sysfs_ops = { + .show = dhd_show, + .store = dhd_store, +}; + +static struct kobj_type dhd_ktype = { + .sysfs_ops = &dhd_sysfs_ops, + .default_attrs = default_attrs, +}; + +/* Create a kobject and attach to sysfs interface */ +static int dhd_sysfs_init(dhd_info_t *dhd) +{ + int ret = -1; + + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return ret; + } + + /* Initialize the kobject */ + ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd"); + if (ret) { + kobject_put(&dhd->dhd_kobj); + DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); + return ret; + } + + /* + * We are always responsible for sending the uevent that the kobject + * was added to the system. + */ + kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD); + + return ret; +} + +/* Done with the kobject and detach the sysfs interface */ +static void dhd_sysfs_exit(dhd_info_t *dhd) +{ + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return; + } + + /* Releae the kobject */ + kobject_put(&dhd->dhd_kobj); +} + +#ifdef DHD_LOG_DUMP +void +dhd_log_dump_init(dhd_pub_t *dhd) +{ + spin_lock_init(&dhd->dld_buf.lock); +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd, + DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE); +#else + dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + + if (!dhd->dld_buf.buffer) { + dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL); + DHD_ERROR(("Try to allocate memory using kmalloc().\n")); + + if (!dhd->dld_buf.buffer) { + DHD_ERROR(("Failed to allocate memory for dld_buf.\n")); + return; + } + } + + dhd->dld_buf.wraparound = 0; + dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE; + dhd->dld_buf.present = dhd->dld_buf.buffer; + dhd->dld_buf.front = dhd->dld_buf.buffer; + dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + dhd->dld_enable = 1; +} + +void +dhd_log_dump_deinit(dhd_pub_t *dhd) +{ + dhd->dld_enable = 0; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd, + dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE); +#else + kfree(dhd->dld_buf.buffer); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ +} + +void +dhd_log_dump_print(const char *fmt, ...) +{ + int len = 0; + char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, }; + va_list args; + dhd_pub_t *dhd = NULL; + unsigned long flags = 0; + + if (wl_get_bcm_cfg80211_ptr()) { + dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub); + } + + if (!dhd || dhd->dld_enable != 1) { + return; + } + + va_start(args, fmt); + + len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args); + if (len < 0) { + return; + } + + /* make a critical section to eliminate race conditions */ + spin_lock_irqsave(&dhd->dld_buf.lock, flags); + if (dhd->dld_buf.remain < len) { + dhd->dld_buf.wraparound = 1; + dhd->dld_buf.present = dhd->dld_buf.front; + dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + } + + strncpy(dhd->dld_buf.present, tmp_buf, len); + dhd->dld_buf.remain -= len; + dhd->dld_buf.present += len; + spin_unlock_irqrestore(&dhd->dld_buf.lock, flags); + + /* double check invalid memory operation */ + ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max); + va_end(args); +} + +char* +dhd_log_dump_get_timestamp(void) +{ + static char buf[16]; + u64 ts_nsec; + unsigned long rem_nsec; + + ts_nsec = local_clock(); + rem_nsec = do_div(ts_nsec, 1000000000); + snprintf(buf, sizeof(buf), "%5lu.%06lu", + (unsigned long)ts_nsec, rem_nsec / 1000); + + return buf; +} + +#endif /* DHD_LOG_DUMP */ + +/* ---------------------------- End of sysfs implementation ------------------------------------- */ diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.h b/drivers/net/wireless/bcmdhd/dhd_linux.h new file mode 100644 index 000000000000..dd3397723f73 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux.h @@ -0,0 +1,127 @@ +/* + * DHD Linux header file (dhd_linux exports for cfg80211 and other components) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux.h 591285 2015-10-07 11:56:29Z $ + */ + +/* wifi platform functions for power, interrupt and pre-alloc, either + * from Android-like platform device data, or Broadcom wifi platform + * device data. + * + */ +#ifndef __DHD_LINUX_H__ +#define __DHD_LINUX_H__ + +#include +#include +#include +#include +#include +#ifdef DHD_WMF +#include +#endif +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +#endif /* defined(WL_WIRELESS_EXT) */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ + +#if defined(CONFIG_WIFI_CONTROL_FUNC) +#include +#endif + +#if !defined(CONFIG_WIFI_CONTROL_FUNC) +#define WLAN_PLAT_NODFS_FLAG 0x01 +struct wifi_platform_data { + int (*set_power)(int val); + int (*set_reset)(int val); + int (*set_carddetect)(int val); + void *(*mem_prealloc)(int section, unsigned long size); + int (*get_mac_addr)(unsigned char *buf); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) || defined(CUSTOM_COUNTRY_CODE) + void *(*get_country_code)(char *ccode, u32 flags); +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) || defined (CUSTOM_COUNTRY_CODE) */ + void *(*get_country_code)(char *ccode); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) */ + }; +#endif /* CONFIG_WIFI_CONTROL_FUNC */ +#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */ + +typedef struct wifi_adapter_info { + const char *name; + uint irq_num; + uint intr_flags; + const char *fw_path; + const char *nv_path; + void *wifi_plat_data; /* wifi ctrl func, for backward compatibility */ + uint bus_type; + uint bus_num; + uint slot_num; +} wifi_adapter_info_t; + +typedef struct bcmdhd_wifi_platdata { + uint num_adapters; + wifi_adapter_info_t *adapters; +} bcmdhd_wifi_platdata_t; + +/** Per STA params. A list of dhd_sta objects are managed in dhd_if */ +typedef struct dhd_sta { + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ + uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */ + void * ifp; /* associated dhd_if */ + struct ether_addr ea; /* stations ethernet mac address */ + struct list_head list; /* link into dhd_if::sta_list */ + int idx; /* index of self in dhd_pub::sta_pool[] */ + int ifidx; /* index of interface in dhd */ +} dhd_sta_t; +typedef dhd_sta_t dhd_sta_pool_t; + +int dhd_wifi_platform_register_drv(void); +void dhd_wifi_platform_unregister_drv(void); +wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, + uint32 slot_num); +int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec); +int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present); +int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr); +int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf); +#ifdef CUSTOM_COUNTRY_CODE +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, + u32 flags); +#else +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode); +#endif /* CUSTOM_COUNTRY_CODE */ +void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size); +void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter); + +int dhd_get_fw_mode(struct dhd_info *dhdinfo); +bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo); + +#ifdef DHD_WMF +dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx); +#endif /* DHD_WMF */ +#endif /* __DHD_LINUX_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c new file mode 100644 index 000000000000..f7696d80a09a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c @@ -0,0 +1,823 @@ +/* + * Linux platform device for DHD WLAN adapter + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_platdev.c 591285 2015-10-07 11:56:29Z $ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_WIFI_CONTROL_FUNC) +#include +#endif +#ifdef CONFIG_DTS +#include +#include +#endif /* CONFIG_DTS */ + + +#define WIFI_PLAT_NAME "bcmdhd_wlan" +#define WIFI_PLAT_NAME2 "bcm4329_wlan" +#define WIFI_PLAT_EXT "bcmdhd_wifi_platform" + +#ifdef CONFIG_DTS +struct regulator *wifi_regulator = NULL; +#endif /* CONFIG_DTS */ + +bool cfg_multichip = FALSE; +bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL; +static int wifi_plat_dev_probe_ret = 0; +static bool is_power_on = FALSE; +#if !defined(CONFIG_DTS) +#if defined(DHD_OF_SUPPORT) +static bool dts_enabled = TRUE; +extern struct resource dhd_wlan_resources; +extern struct wifi_platform_data dhd_wlan_control; +#else +static bool dts_enabled = FALSE; +struct resource dhd_wlan_resources = {0}; +struct wifi_platform_data dhd_wlan_control = {0}; +#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */ +#endif /* !defind(CONFIG_DTS) */ + +static int dhd_wifi_platform_load(void); + +extern void* wl_cfg80211_get_dhdp(void); + +#ifdef ENABLE_4335BT_WAR +extern int bcm_bt_lock(int cookie); +extern void bcm_bt_unlock(int cookie); +static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */ +#endif /* ENABLE_4335BT_WAR */ + +wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num) +{ + int i; + + if (dhd_wifi_platdata == NULL) + return NULL; + + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i]; + if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) && + (adapter->bus_num == -1 || adapter->bus_num == bus_num) && + (adapter->slot_num == -1 || adapter->slot_num == slot_num)) { + DHD_TRACE(("found adapter info '%s'\n", adapter->name)); + return adapter; + } + } + return NULL; +} + +void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size) +{ + void *alloc_ptr = NULL; + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + if (plat_data->mem_prealloc) { + alloc_ptr = plat_data->mem_prealloc(section, size); + if (alloc_ptr) { + DHD_INFO(("success alloc section %d\n", section)); + if (size != 0L) + bzero(alloc_ptr, size); + return alloc_ptr; + } + } + + DHD_ERROR(("%s: failed to alloc static mem section %d\n", __FUNCTION__, section)); + return NULL; +} + +void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter) +{ + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + return plat_data->mem_prealloc; +} + +int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr) +{ + if (adapter == NULL) + return -1; + if (irq_flags_ptr) + *irq_flags_ptr = adapter->intr_flags; + return adapter->irq_num; +} + +int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec) +{ + int err = 0; +#ifdef CONFIG_DTS + if (on) { + // err = regulator_enable(wifi_regulator); + is_power_on = TRUE; + } + else { + // err = regulator_disable(wifi_regulator); + is_power_on = FALSE; + } + if (err < 0) + DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__)); +#else + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + + DHD_ERROR(("%s = %d\n", __FUNCTION__, on)); + if (plat_data->set_power) { +#ifdef ENABLE_4335BT_WAR + if (on) { + printk("WiFi: trying to acquire BT lock\n"); + if (bcm_bt_lock(lock_cookie_wifi) != 0) + printk("** WiFi: timeout in acquiring bt lock**\n"); + printk("%s: btlock acquired\n", __FUNCTION__); + } + else { + /* For a exceptional case, release btlock */ + bcm_bt_unlock(lock_cookie_wifi); + } +#endif /* ENABLE_4335BT_WAR */ + + err = plat_data->set_power(on); + } + + if (msec && !err) + OSL_SLEEP(msec); + + if (on && !err) + is_power_on = TRUE; + else + is_power_on = FALSE; + +#endif /* CONFIG_DTS */ + + return err; +} + +int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present) +{ + int err = 0; + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + + DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present)); + if (plat_data->set_carddetect) { + err = plat_data->set_carddetect(device_present); + } + return err; + +} + +int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf) +{ + struct wifi_platform_data *plat_data; + + DHD_ERROR(("%s\n", __FUNCTION__)); + if (!buf || !adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + if (plat_data->get_mac_addr) { + return plat_data->get_mac_addr(buf); + } + return -EOPNOTSUPP; +} +#ifdef CUSTOM_COUNTRY_CODE +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, u32 flags) +#else +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode) +#endif /* CUSTOM_COUNTRY_CODE */ +{ + /* get_country_code was added after 2.6.39 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct wifi_platform_data *plat_data; + + if (!ccode || !adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + + DHD_TRACE(("%s\n", __FUNCTION__)); + if (plat_data->get_country_code) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) + return plat_data->get_country_code(ccode, WLAN_PLAT_NODFS_FLAG); +#else +#ifdef CUSTOM_COUNTRY_CODE + return plat_data->get_country_code(ccode, flags); +#else + return plat_data->get_country_code(ccode); +#endif /* CUSTOM_COUNTRY_CODE */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) */ + } +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ + + return NULL; +} + +static int wifi_plat_dev_drv_probe(struct platform_device *pdev) +{ + struct resource *resource; + wifi_adapter_info_t *adapter; +#ifdef CONFIG_DTS + int irq, gpio; +#endif /* CONFIG_DTS */ + + /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan") + * is kept for backward compatibility and supports only 1 adapter + */ + ASSERT(dhd_wifi_platdata != NULL); + ASSERT(dhd_wifi_platdata->num_adapters == 1); + adapter = &dhd_wifi_platdata->adapters[0]; + adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data); + + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq"); + if (resource == NULL) + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq"); + if (resource) { + adapter->irq_num = resource->start; + adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; + } + +#ifdef CONFIG_DTS + wifi_regulator = regulator_get(&pdev->dev, "wlreg_on"); + if (wifi_regulator == NULL) { + DHD_ERROR(("%s regulator is null\n", __FUNCTION__)); + return -1; + } + + /* This is to get the irq for the OOB */ + gpio = of_get_gpio(pdev->dev.of_node, 0); + + if (gpio < 0) { + DHD_ERROR(("%s gpio information is incorrect\n", __FUNCTION__)); + return -1; + } + irq = gpio_to_irq(gpio); + if (irq < 0) { + DHD_ERROR(("%s irq information is incorrect\n", __FUNCTION__)); + return -1; + } + adapter->irq_num = irq; + + /* need to change the flags according to our requirement */ + adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | + IORESOURCE_IRQ_SHAREABLE; +#endif /* CONFIG_DTS */ + + wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); + return wifi_plat_dev_probe_ret; +} + +static int wifi_plat_dev_drv_remove(struct platform_device *pdev) +{ + wifi_adapter_info_t *adapter; + + /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan") + * is kept for backward compatibility and supports only 1 adapter + */ + ASSERT(dhd_wifi_platdata != NULL); + ASSERT(dhd_wifi_platdata->num_adapters == 1); + adapter = &dhd_wifi_platdata->adapters[0]; + if (is_power_on) { +#ifdef BCMPCIE + wifi_platform_bus_enumerate(adapter, FALSE); + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); +#else + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); +#endif /* BCMPCIE */ + } + +#ifdef CONFIG_DTS + regulator_put(wifi_regulator); +#endif /* CONFIG_DTS */ + return 0; +} + +static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \ + defined(BCMSDIO) + bcmsdh_oob_intr_set(0); +#endif /* (OOB_INTR_ONLY) */ + return 0; +} + +static int wifi_plat_dev_drv_resume(struct platform_device *pdev) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \ + defined(BCMSDIO) + if (dhd_os_check_if_up(wl_cfg80211_get_dhdp())) + bcmsdh_oob_intr_set(1); +#endif /* (OOB_INTR_ONLY) */ + return 0; +} + +#ifdef CONFIG_DTS +static const struct of_device_id wifi_device_dt_match[] = { + { .compatible = "android,bcmdhd_wlan", }, + {}, +}; +#endif /* CONFIG_DTS */ +static struct platform_driver wifi_platform_dev_driver = { + .probe = wifi_plat_dev_drv_probe, + .remove = wifi_plat_dev_drv_remove, + .suspend = wifi_plat_dev_drv_suspend, + .resume = wifi_plat_dev_drv_resume, + .driver = { + .name = WIFI_PLAT_NAME, +#ifdef CONFIG_DTS + .of_match_table = wifi_device_dt_match, +#endif /* CONFIG_DTS */ + } +}; + +static struct platform_driver wifi_platform_dev_driver_legacy = { + .probe = wifi_plat_dev_drv_probe, + .remove = wifi_plat_dev_drv_remove, + .suspend = wifi_plat_dev_drv_suspend, + .resume = wifi_plat_dev_drv_resume, + .driver = { + .name = WIFI_PLAT_NAME2, + } +}; + +static int wifi_platdev_match(struct device *dev, void *data) +{ + char *name = (char*)data; + struct platform_device *pdev = to_platform_device(dev); + + if (strcmp(pdev->name, name) == 0) { + DHD_ERROR(("found wifi platform device %s\n", name)); + return TRUE; + } + + return FALSE; +} + +static int wifi_ctrlfunc_register_drv(void) +{ + int err = 0; + struct device *dev1, *dev2; + wifi_adapter_info_t *adapter; + + dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match); + dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match); + +#if !defined(CONFIG_DTS) + if (!dts_enabled) { + if (dev1 == NULL && dev2 == NULL) { + DHD_ERROR(("no wifi platform data, skip\n")); + return -ENXIO; + } + } +#endif /* !defined(CONFIG_DTS) */ + + /* multi-chip support not enabled, build one adapter information for + * DHD (either SDIO, USB or PCIe) + */ + adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL); + if (adapter == NULL) { + DHD_ERROR(("%s:adapter alloc failed", __FUNCTION__)); + return ENOMEM; + } + adapter->name = "DHD generic adapter"; + adapter->bus_type = -1; + adapter->bus_num = -1; + adapter->slot_num = -1; + adapter->irq_num = -1; + is_power_on = FALSE; + wifi_plat_dev_probe_ret = 0; + dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL); + dhd_wifi_platdata->num_adapters = 1; + dhd_wifi_platdata->adapters = adapter; + + if (dev1) { + err = platform_driver_register(&wifi_platform_dev_driver); + if (err) { + DHD_ERROR(("%s: failed to register wifi ctrl func driver\n", + __FUNCTION__)); + return err; + } + } + if (dev2) { + err = platform_driver_register(&wifi_platform_dev_driver_legacy); + if (err) { + DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n", + __FUNCTION__)); + return err; + } + } + +#if !defined(CONFIG_DTS) + if (dts_enabled) { + struct resource *resource; + adapter->wifi_plat_data = (void *)&dhd_wlan_control; + resource = &dhd_wlan_resources; + adapter->irq_num = resource->start; + adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; + wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); + } +#endif /* !defined(CONFIG_DTS) */ + + +#ifdef CONFIG_DTS + wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver); +#endif /* CONFIG_DTS */ + + /* return probe function's return value if registeration succeeded */ + return wifi_plat_dev_probe_ret; +} + +void wifi_ctrlfunc_unregister_drv(void) +{ + +#ifdef CONFIG_DTS + DHD_ERROR(("unregister wifi platform drivers\n")); + platform_driver_unregister(&wifi_platform_dev_driver); +#else + struct device *dev1, *dev2; + dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match); + dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match); + if (!dts_enabled) + if (dev1 == NULL && dev2 == NULL) + return; + + DHD_ERROR(("unregister wifi platform drivers\n")); + if (dev1) + platform_driver_unregister(&wifi_platform_dev_driver); + if (dev2) + platform_driver_unregister(&wifi_platform_dev_driver_legacy); + if (dts_enabled) { + wifi_adapter_info_t *adapter; + adapter = &dhd_wifi_platdata->adapters[0]; + if (is_power_on) { + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } + } +#endif /* !defined(CONFIG_DTS) */ + + kfree(dhd_wifi_platdata->adapters); + dhd_wifi_platdata->adapters = NULL; + dhd_wifi_platdata->num_adapters = 0; + kfree(dhd_wifi_platdata); + dhd_wifi_platdata = NULL; +} + +static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev) +{ + dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data); + + return dhd_wifi_platform_load(); +} + +static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev) +{ + int i; + wifi_adapter_info_t *adapter; + ASSERT(dhd_wifi_platdata != NULL); + + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } + return 0; +} + +static struct platform_driver dhd_wifi_platform_dev_driver = { + .probe = bcmdhd_wifi_plat_dev_drv_probe, + .remove = bcmdhd_wifi_plat_dev_drv_remove, + .driver = { + .name = WIFI_PLAT_EXT, + } +}; + +int dhd_wifi_platform_register_drv(void) +{ + int err = 0; + struct device *dev; + + /* register Broadcom wifi platform data driver if multi-chip is enabled, + * otherwise use Android style wifi platform data (aka wifi control function) + * if it exists + * + * to support multi-chip DHD, Broadcom wifi platform data device must + * be added in kernel early boot (e.g. board config file). + */ + if (cfg_multichip) { + dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match); + if (dev == NULL) { + DHD_ERROR(("bcmdhd wifi platform data device not found!!\n")); + return -ENXIO; + } + err = platform_driver_register(&dhd_wifi_platform_dev_driver); + } else { + err = wifi_ctrlfunc_register_drv(); + + /* no wifi ctrl func either, load bus directly and ignore this error */ + if (err) { + if (err == -ENXIO) { + /* wifi ctrl function does not exist */ + err = dhd_wifi_platform_load(); + } else { + /* unregister driver due to initialization failure */ + wifi_ctrlfunc_unregister_drv(); + } + } + } + + return err; +} + +#ifdef BCMPCIE +static int dhd_wifi_platform_load_pcie(void) +{ + int err = 0; + int i; + wifi_adapter_info_t *adapter; + + BCM_REFERENCE(i); + BCM_REFERENCE(adapter); + + if (dhd_wifi_platdata == NULL) { + err = dhd_bus_register(); + } else { + if (dhd_download_fw_on_driverload) { + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + int retry = POWERUP_MAX_RETRY; + adapter = &dhd_wifi_platdata->adapters[i]; + + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, + adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + + do { + err = wifi_platform_set_power(adapter, + TRUE, WIFI_TURNON_DELAY); + if (err) { + DHD_ERROR(("failed to power up %s," + " %d retry left\n", + adapter->name, retry)); + /* WL_REG_ON state unknown, Power off forcely */ + wifi_platform_set_power(adapter, + FALSE, WIFI_TURNOFF_DELAY); + continue; + } else { + err = wifi_platform_bus_enumerate(adapter, TRUE); + if (err) { + DHD_ERROR(("failed to enumerate bus %s, " + "%d retry left\n", + adapter->name, retry)); + wifi_platform_set_power(adapter, FALSE, + WIFI_TURNOFF_DELAY); + } else { + break; + } + } + } while (retry--); + + if (!retry) { + DHD_ERROR(("failed to power up %s, max retry reached**\n", + adapter->name)); + return -ENODEV; + } + } + } + + err = dhd_bus_register(); + + if (err) { + DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__)); + if (dhd_download_fw_on_driverload) { + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_bus_enumerate(adapter, FALSE); + wifi_platform_set_power(adapter, + FALSE, WIFI_TURNOFF_DELAY); + } + } + } + } + + return err; +} +#else +static int dhd_wifi_platform_load_pcie(void) +{ + return 0; +} +#endif /* BCMPCIE */ + + +void dhd_wifi_platform_unregister_drv(void) +{ + if (cfg_multichip) + platform_driver_unregister(&dhd_wifi_platform_dev_driver); + else + wifi_ctrlfunc_unregister_drv(); +} + +extern int dhd_watchdog_prio; +extern int dhd_dpc_prio; +extern uint dhd_deferred_tx; +#if defined(BCMLXSDMMC) +extern struct semaphore dhd_registration_sem; +#endif + +#ifdef BCMSDIO +static int dhd_wifi_platform_load_sdio(void) +{ + int i; + int err = 0; + wifi_adapter_info_t *adapter; + + BCM_REFERENCE(i); + BCM_REFERENCE(adapter); + /* Sanity check on the module parameters + * - Both watchdog and DPC as tasklets are ok + * - If both watchdog and DPC are threads, TX must be deferred + */ + if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) && + !(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx)) + return -EINVAL; + +#if defined(BCMLXSDMMC) + if (dhd_wifi_platdata == NULL) { + DHD_ERROR(("DHD wifi platform data is required for Android build\n")); + return -EINVAL; + } + + sema_init(&dhd_registration_sem, 0); + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + bool chip_up = FALSE; + int retry = POWERUP_MAX_RETRY; + struct semaphore dhd_chipup_sem; + + adapter = &dhd_wifi_platdata->adapters[i]; + + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + + do { + sema_init(&dhd_chipup_sem, 0); + err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem); + if (err) { + DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n", + __FUNCTION__, err)); + return err; + } + err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY); + if (err) { + /* WL_REG_ON state unknown, Power off forcely */ + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + continue; + } else { + wifi_platform_bus_enumerate(adapter, TRUE); + err = 0; + } + + if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) { + dhd_bus_unreg_sdio_notify(); + chip_up = TRUE; + break; + } + + DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry)); + dhd_bus_unreg_sdio_notify(); + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } while (retry--); + + if (!chip_up) { + DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name)); + return -ENODEV; + } + + } + + err = dhd_bus_register(); + + if (err) { + DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__)); + goto fail; + } + + + /* + * Wait till MMC sdio_register_driver callback called and made driver attach. + * It's needed to make sync up exit from dhd insmod and + * Kernel MMC sdio device callback registration + */ + err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)); + if (err) { + DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__)); + dhd_bus_unregister(); + goto fail; + } + + return err; + +fail: + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } +#else + + /* x86 bring-up PC needs no power-up operations */ + err = dhd_bus_register(); + +#endif + + return err; +} +#else /* BCMSDIO */ +static int dhd_wifi_platform_load_sdio(void) +{ + return 0; +} +#endif /* BCMSDIO */ + +static int dhd_wifi_platform_load_usb(void) +{ + return 0; +} + +static int dhd_wifi_platform_load() +{ + int err = 0; + + wl_android_init(); + + if ((err = dhd_wifi_platform_load_usb())) + goto end; + else if ((err = dhd_wifi_platform_load_sdio())) + goto end; + else + err = dhd_wifi_platform_load_pcie(); + +end: + if (err) + wl_android_exit(); + else + wl_android_post_init(); + + return err; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c new file mode 100644 index 000000000000..66eb8940ba3f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c @@ -0,0 +1,51 @@ +/* + * Expose some of the kernel scheduler routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $ + */ +#include +#include +#include +#include +#include + +int setScheduler(struct task_struct *p, int policy, struct sched_param *param) +{ + int rc = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = sched_setscheduler(p, policy, param); +#endif /* LinuxVer */ + return rc; +} + +int get_scheduler_policy(struct task_struct *p) +{ + int rc = SCHED_NORMAL; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = p->policy; +#endif /* LinuxVer */ + return rc; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.c b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c new file mode 100644 index 000000000000..d2513cc4ab0d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c @@ -0,0 +1,320 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_wq.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct dhd_deferred_event_t { + u8 event; /* holds the event */ + void *event_data; /* Holds event specific data */ + event_handler_t event_handler; +}; +#define DEFRD_EVT_SIZE sizeof(struct dhd_deferred_event_t) + +struct dhd_deferred_wq { + struct work_struct deferred_work; /* should be the first member */ + + /* + * work events may occur simultaneously. + * Can hold upto 64 low priority events and 4 high priority events + */ +#define DHD_PRIO_WORK_FIFO_SIZE (4 * sizeof(struct dhd_deferred_event_t)) +#define DHD_WORK_FIFO_SIZE (64 * sizeof(struct dhd_deferred_event_t)) + struct kfifo *prio_fifo; + struct kfifo *work_fifo; + u8 *prio_fifo_buf; + u8 *work_fifo_buf; + spinlock_t work_lock; + void *dhd_info; /* review: does it require */ +}; + +static inline struct kfifo* +dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) +{ + struct kfifo *fifo; + gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) + fifo = kfifo_init(buf, size, flags, lock); +#else + fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); + if (!fifo) { + return NULL; + } + kfifo_init(fifo, buf, size); +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + return fifo; +} + +static inline void +dhd_kfifo_free(struct kfifo *fifo) +{ + kfifo_free(fifo); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31)) + /* FC11 releases the fifo memory */ + kfree(fifo); +#endif +} + +/* deferred work functions */ +static void dhd_deferred_work_handler(struct work_struct *data); + +void* +dhd_deferred_work_init(void *dhd_info) +{ + struct dhd_deferred_wq *work = NULL; + u8* buf; + unsigned long fifo_size = 0; + gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + + if (!dhd_info) { + DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__)); + goto return_null; + } + + work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), + flags); + + if (!work) { + DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__)); + goto return_null; + } + + INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler); + + /* initialize event fifo */ + spin_lock_init(&work->work_lock); + + /* allocate buffer to hold prio events */ + fifo_size = DHD_PRIO_WORK_FIFO_SIZE; + fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); + buf = (u8*)kzalloc(fifo_size, flags); + if (!buf) { + DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__)); + goto return_null; + } + + /* Initialize prio event fifo */ + work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); + if (!work->prio_fifo) { + kfree(buf); + goto return_null; + } + + /* allocate buffer to hold work events */ + fifo_size = DHD_WORK_FIFO_SIZE; + fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); + buf = (u8*)kzalloc(fifo_size, flags); + if (!buf) { + DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__)); + goto return_null; + } + + /* Initialize event fifo */ + work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); + if (!work->work_fifo) { + kfree(buf); + goto return_null; + } + + work->dhd_info = dhd_info; + DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__)); + return work; + +return_null: + + if (work) + dhd_deferred_work_deinit(work); + + return NULL; +} + +void +dhd_deferred_work_deinit(void *work) +{ + struct dhd_deferred_wq *deferred_work = work; + + + if (!deferred_work) { + DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__)); + return; + } + + /* cancel the deferred work handling */ + cancel_work_sync((struct work_struct *)deferred_work); + + /* + * free work event fifo. + * kfifo_free frees locally allocated fifo buffer + */ + if (deferred_work->prio_fifo) + dhd_kfifo_free(deferred_work->prio_fifo); + + if (deferred_work->work_fifo) + dhd_kfifo_free(deferred_work->work_fifo); + + kfree(deferred_work); +} + +/* + * Prepares event to be queued + * Schedules the event + */ +int +dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t event_handler, u8 priority) +{ + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq; + struct dhd_deferred_event_t deferred_event; + int status; + + if (!deferred_wq) { + DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); + ASSERT(0); + return DHD_WQ_STS_UNINITIALIZED; + } + + if (!event || (event >= DHD_MAX_WQ_EVENTS)) { + DHD_ERROR(("%s: Unknown event \n", __FUNCTION__)); + return DHD_WQ_STS_UNKNOWN_EVENT; + } + + /* + * default element size is 1, which can be changed + * using kfifo_esize(). Older kernel(FC11) doesn't support + * changing element size. For compatibility changing + * element size is not prefered + */ + ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); + ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); + + deferred_event.event = event; + deferred_event.event_data = event_data; + deferred_event.event_handler = event_handler; + + if (priority == DHD_WORK_PRIORITY_HIGH) { + status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } else { + status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + if (!status) { + return DHD_WQ_STS_SCHED_FAILED; + } + schedule_work((struct work_struct *)deferred_wq); + return DHD_WQ_STS_OK; +} + +static int +dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event) +{ + int status = 0; + + if (!deferred_wq) { + DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); + return DHD_WQ_STS_UNINITIALIZED; + } + + /* + * default element size is 1 byte, which can be changed + * using kfifo_esize(). Older kernel(FC11) doesn't support + * changing element size. For compatibility changing + * element size is not prefered + */ + ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); + ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); + + /* first read priorit event fifo */ + status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + + if (!status) { + /* priority fifo is empty. Now read low prio work fifo */ + status = kfifo_out_spinlocked(deferred_wq->work_fifo, event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + return status; +} + +/* + * Called when work is scheduled + */ +static void +dhd_deferred_work_handler(struct work_struct *work) +{ + struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; + struct dhd_deferred_event_t work_event; + int status; + + if (!deferred_work) { + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); + return; + } + + do { + status = dhd_get_scheduled_work(deferred_work, &work_event); + DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status)); + if (!status) { + DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status)); + break; + } + + if (work_event.event > DHD_MAX_WQ_EVENTS) { + DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event)); + break; + } + + if (work_event.event_handler) { + work_event.event_handler(deferred_work->dhd_info, + work_event.event_data, work_event.event); + } else { + DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event)); + } + } while (1); + return; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.h b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h new file mode 100644 index 000000000000..e6197b26f211 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h @@ -0,0 +1,69 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_wq.h 597512 2015-11-05 11:37:36Z $ + */ +#ifndef _dhd_linux_wq_h_ +#define _dhd_linux_wq_h_ +/* + * Work event definitions + */ +enum _wq_event { + DHD_WQ_WORK_IF_ADD = 1, + DHD_WQ_WORK_IF_DEL, + DHD_WQ_WORK_SET_MAC, + DHD_WQ_WORK_SET_MCAST_LIST, + DHD_WQ_WORK_IPV6_NDO, + DHD_WQ_WORK_HANG_MSG, + DHD_WQ_WORK_SOC_RAM_DUMP, + DHD_WQ_WORK_DHD_LOG_DUMP, + + DHD_MAX_WQ_EVENTS +}; + +/* + * Work event priority + */ +#define DHD_WORK_PRIORITY_LOW 0 +#define DHD_WORK_PRIORITY_HIGH 1 + +/* + * Error definitions + */ +#define DHD_WQ_STS_OK 0 +#define DHD_WQ_STS_FAILED -1 /* General failure */ +#define DHD_WQ_STS_UNINITIALIZED -2 +#define DHD_WQ_STS_SCHED_FAILED -3 +#define DHD_WQ_STS_UNKNOWN_EVENT -4 + +typedef void (*event_handler_t)(void *handle, void *event_data, u8 event); + +void *dhd_deferred_work_init(void *dhd); +void dhd_deferred_work_deinit(void *workq); +int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t evt_handler, u8 priority); +#endif /* _dhd_linux_wq_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_msgbuf.c b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c new file mode 100644 index 000000000000..3bd9a45d7906 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c @@ -0,0 +1,6413 @@ +/** + * @file definition of host message ring functionality + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_msgbuf.c 605475 2015-12-10 12:49:49Z $ + */ + + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + + +#include + +#include +#include +#include + +#if defined(DHD_LB) +#include +#include +#define DHD_LB_WORKQ_SZ (8192) +#define DHD_LB_WORKQ_SYNC (16) +#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2) +#endif /* DHD_LB */ + + +/** + * Host configures a soft doorbell for d2h rings, by specifying a 32bit host + * address where a value must be written. Host may also interrupt coalescing + * on this soft doorbell. + * Use Case: Hosts with network processors, may register with the dongle the + * network processor's thread wakeup register and a value corresponding to the + * core/thread context. Dongle will issue a write transaction + * to the PCIE RC which will need to be routed to the mapped register space, by + * the host. + */ +/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */ + +/* Dependency Check */ +#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF" +#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */ + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ + +#define DEFAULT_RX_BUFFERS_TO_POST 256 +#define RXBUFPOST_THRESHOLD 32 +#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */ + +#define DHD_STOP_QUEUE_THRESHOLD 200 +#define DHD_START_QUEUE_THRESHOLD 100 + +#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */ +#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) +#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE) + +/* flags for ioctl pending status */ +#define MSGBUF_IOCTL_ACK_PENDING (1<<0) +#define MSGBUF_IOCTL_RESP_PENDING (1<<1) + +#define DMA_ALIGN_LEN 4 + +#define DMA_D2H_SCRATCH_BUF_LEN 8 +#define DMA_XFER_LEN_LIMIT 0x400000 + +#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 + +#define DHD_FLOWRING_MAX_EVENTBUF_POST 8 +#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 + +#define DHD_PROT_FUNCS 37 + +/* Length of buffer in host for bus throughput measurement */ +#define DHD_BUS_TPUT_BUF_LEN 2048 + +#define TXP_FLUSH_NITEMS + +/* optimization to write "n" tx items at a time to ring */ +#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 + +#define RING_NAME_MAX_LENGTH 24 + + +struct msgbuf_ring; /* ring context for common and flow rings */ + +/** + * PCIE D2H DMA Complete Sync Modes + * + * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into + * Host system memory. A WAR using one of 3 approaches is needed: + * 1. Dongle places a modulo-253 seqnum in last word of each D2H message + * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum + * writes in the last word of each work item. Each work item has a seqnum + * number = sequence num % 253. + * + * 3. Read Barrier: Dongle does a host memory read access prior to posting an + * interrupt, ensuring that D2H data transfer indeed completed. + * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing + * ring contents before the indices. + * + * Host does not sync for DMA to complete with option #3 or #4, and a noop sync + * callback (see dhd_prot_d2h_sync_none) may be bound. + * + * Dongle advertizes host side sync mechanism requirements. + */ +#define PCIE_D2H_SYNC + +#if defined(PCIE_D2H_SYNC) +#define PCIE_D2H_SYNC_WAIT_TRIES (512UL) +#define PCIE_D2H_SYNC_NUM_OF_STEPS (3UL) +#define PCIE_D2H_SYNC_DELAY (50UL) /* in terms of usecs */ + +/** + * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. + * + * On success: return cmn_msg_hdr_t::msg_type + * On failure: return 0 (invalid msg_type) + */ +typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +#endif /* PCIE_D2H_SYNC */ + + +/* + * +---------------------------------------------------------------------------- + * + * RingIds and FlowId are not equivalent as ringids include D2H rings whereas + * flowids do not. + * + * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes + * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings + * + * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where, + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings, + * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings. + * + * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated + * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated + * + * D2H Control Complete RingId = 2 + * D2H Transmit Complete RingId = 3 + * D2H Receive Complete RingId = 4 + * + * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring) + * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring) + * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring) + * + * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are + * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS. + * + * Example: when a system supports 4 bc/mc and 128 uc flowrings, with + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the + * FlowId values would be in the range [2..133] and the corresponding + * RingId values would be in the range [5..136]. + * + * The flowId allocator, may chose to, allocate Flowids: + * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS)) + * X# of uc flowids in consecutive ranges (per station Id), where X is the + * packet's access category (e.g. 4 uc flowids per station). + * + * CAUTION: + * When DMA indices array feature is used, RingId=5, corresponding to the 0th + * FLOWRING, will actually use the FlowId as index into the H2D DMA index, + * since the FlowId truly represents the index in the H2D DMA indices array. + * + * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS, + * will represent the index in the D2H DMA indices array. + * + * +---------------------------------------------------------------------------- + */ + +/* First TxPost Flowring Id */ +#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS + +/* Determine whether a ringid belongs to a TxPost flowring */ +#define DHD_IS_FLOWRING(ringid) \ + ((ringid) >= BCMPCIE_COMMON_MSGRINGS) + +/* Convert a H2D TxPost FlowId to a MsgBuf RingId */ +#define DHD_FLOWID_TO_RINGID(flowid) \ + (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)) + +/* Convert a MsgBuf RingId to a H2D TxPost FlowId */ +#define DHD_RINGID_TO_FLOWID(ringid) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS)) + +/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array + * This may be used for the H2D DMA WR index array or H2D DMA RD index array or + * any array of H2D rings. + */ +#define DHD_H2D_RING_OFFSET(ringid) \ + ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid)) + +/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array + * This may be used for the D2H DMA WR index array or D2H DMA RD index array or + * any array of D2H rings. + */ +#define DHD_D2H_RING_OFFSET(ringid) \ + ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Convert a D2H DMA Indices Offset to a RingId */ +#define DHD_D2H_RINGID(offset) \ + ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS) + + +#define DHD_DMAH_NULL ((void*)NULL) + +/* + * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able + * buffer does not occupy the entire cacheline, and another object is placed + * following the DMA-able buffer, data corruption may occur if the DMA-able + * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency + * is not available. + */ +#if defined(L1_CACHE_BYTES) +#define DHD_DMA_PAD (L1_CACHE_BYTES) +#else +#define DHD_DMA_PAD (128) +#endif + +/* Used in loopback tests */ +typedef struct dhd_dmaxfer { + dhd_dma_buf_t srcmem; + dhd_dma_buf_t dstmem; + uint32 srcdelay; + uint32 destdelay; + uint32 len; + bool in_progress; +} dhd_dmaxfer_t; + +/** + * msgbuf_ring : This object manages the host side ring that includes a DMA-able + * buffer, the WR and RD indices, ring parameters such as max number of items + * an length of each items, and other miscellaneous runtime state. + * A msgbuf_ring may be used to represent a H2D or D2H common ring or a + * H2D TxPost ring as specified in the PCIE FullDongle Spec. + * Ring parameters are conveyed to the dongle, which maintains its own peer end + * ring state. Depending on whether the DMA Indices feature is supported, the + * host will update the WR/RD index in the DMA indices array in host memory or + * directly in dongle memory. + */ +typedef struct msgbuf_ring { + bool inited; + uint16 idx; /* ring id */ + uint16 rd; /* read index */ + uint16 curr_rd; /* read index for debug */ + uint16 wr; /* write index */ + uint16 max_items; /* maximum number of items in ring */ + uint16 item_len; /* length of each item in the ring */ + sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */ + dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */ + uint32 seqnum; /* next expected item's sequence number */ +#ifdef TXP_FLUSH_NITEMS + void *start_addr; + /* # of messages on ring not yet announced to dongle */ + uint16 pend_items_count; +#endif /* TXP_FLUSH_NITEMS */ + uchar name[RING_NAME_MAX_LENGTH]; +} msgbuf_ring_t; + +#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va) +#define DHD_RING_END_VA(ring) \ + ((uint8 *)(DHD_RING_BGN_VA((ring))) + \ + (((ring)->max_items - 1) * (ring)->item_len)) + + + +/** DHD protocol handle. Is an opaque type to other DHD software layers. */ +typedef struct dhd_prot { + osl_t *osh; /* OSL handle */ + uint16 rxbufpost; + uint16 max_rxbufpost; + uint16 max_eventbufpost; + uint16 max_ioctlrespbufpost; + uint16 cur_event_bufs_posted; + uint16 cur_ioctlresp_bufs_posted; + + /* Flow control mechanism based on active transmits pending */ + uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */ + uint16 max_tx_count; + uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */ + + /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */ + msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */ + msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */ + msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */ + msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */ + msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */ + + msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */ + dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */ + uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */ + + uint32 rx_dataoffset; + + dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */ + + /* ioctl related resources */ + uint8 ioctl_state; + int16 ioctl_status; /* status returned from dongle */ + uint16 ioctl_resplen; + dhd_ioctl_recieved_status_t ioctl_received; + uint curr_ioctl_cmd; + dhd_dma_buf_t retbuf; /* For holding ioctl response */ + dhd_dma_buf_t ioctbuf; /* For holding ioctl request */ + + dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */ + + /* DMA-able arrays for holding WR and RD indices */ + uint32 rw_index_sz; /* Size of a RD or WR index in dongle */ + dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */ + dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */ + dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */ + dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */ + + dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */ + + dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */ + uint32 flowring_num; + +#if defined(PCIE_D2H_SYNC) + d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ + ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ + ulong d2h_sync_wait_tot; /* total wait loops */ +#endif /* PCIE_D2H_SYNC */ + + dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */ + + uint16 ioctl_seq_no; + uint16 data_seq_no; + uint16 ioctl_trans_id; + void *pktid_map_handle; /* a pktid maps to a packet and its metadata */ + bool metadata_dbg; + void *pktid_map_handle_ioctl; + + /* Applications/utilities can read tx and rx metadata using IOVARs */ + uint16 rx_metadata_offset; + uint16 tx_metadata_offset; + + +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + /* Host's soft doorbell configuration */ + bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS]; +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ +#if defined(DHD_LB) + /* Work Queues to be used by the producer and the consumer, and threshold + * when the WRITE index must be synced to consumer's workq + */ +#if defined(DHD_LB_TXC) + uint32 tx_compl_prod_sync ____cacheline_aligned; + bcm_workq_t tx_compl_prod, tx_compl_cons; +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + uint32 rx_compl_prod_sync ____cacheline_aligned; + bcm_workq_t rx_compl_prod, rx_compl_cons; +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ +} dhd_prot_t; + +/* Convert a dmaaddr_t to a base_addr with htol operations */ +static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa); + +/* APIs for managing a DMA-able buffer */ +static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len); +static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); + +/* msgbuf ring management */ +static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, + const char *name, uint16 max_items, uint16 len_item, uint16 ringid); +static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring); + +/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */ +static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd); + +/* Fetch and Release a flowring msgbuf_ring from flowring pool */ +static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, + uint16 flowid); +/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */ + +/* Producer: Allocate space in a msgbuf ring */ +static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 *alloced, bool exactly_nitems); +static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, + uint16 *alloced, bool exactly_nitems); + +/* Consumer: Determine the location where the next message may be consumed */ +static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint32 *available_len); + +/* Producer (WR index update) or Consumer (RD index update) indication */ +static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring, + void *p, uint16 len); +static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); + +/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */ +static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz); + +/* Set/Get a RD or WR index in the array of indices */ +/* See also: dhd_prot_dma_indx_init() */ +static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, + uint16 ringid); +static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid); + +/* Locate a packet given a pktid */ +static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, + bool free_pktid); +/* Locate a packet given a PktId and free it. */ +static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send); + +static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf); +static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, + void *buf, int ifidx); + +/* Post buffers for Rx, control ioctl response and events */ +static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post); +static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid); +static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid); + +static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt); + +/* D2H Message handling */ +static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len); + +/* D2H Message handlers */ +static void dhd_prot_noop(dhd_pub_t *dhd, void *msg); +static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg); + +/* Loopback test with dongle */ +static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma); +static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, + uint destdelay, dhd_dmaxfer_t *dma); +static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg); + +/* Flowring management communication with dongle */ +static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg); + +/* Configure a soft doorbell per D2H ring */ +static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd); +static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg); + +typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg); + +/** callback functions for messages generated by the dongle */ +#define MSG_TYPE_INVALID 0 + +static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { + dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */ + dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */ + dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */ + NULL, + dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */ + NULL, + dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */ + NULL, + dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */ + NULL, + dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */ + NULL, + dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */ + NULL, + dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */ + NULL, + dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ + NULL, + dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */ + NULL, + dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_RESUME */ + NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */ + NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */ + NULL, /* MSG_TYPE_INFO_BUF_POST */ + NULL, /* MSG_TYPE_INFO_BUF_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CREATE */ + NULL, /* MSG_TYPE_D2H_RING_CREATE */ + NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ + NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG */ + NULL, /* MSG_TYPE_D2H_RING_CONFIG */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */ + dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */ + NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */ + NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */ +}; + + +#ifdef DHD_RX_CHAINING + +#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ + (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ + !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ + ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ + ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ + (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \ + dhd_l2_filter_chainable((dhd), (evh), (ifidx))) + +static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain); +static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); +static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd); + +#define DHD_PKT_CTF_MAX_CHAIN_LEN 64 + +#endif /* DHD_RX_CHAINING */ + +static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd); + +#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */ + +/** + * D2H DMA to completion callback handlers. Based on the mode advertised by the + * dongle through the PCIE shared region, the appropriate callback will be + * registered in the proto layer to be invoked prior to precessing any message + * from a D2H DMA ring. If the dongle uses a read barrier or another mode that + * does not require host participation, then a noop callback handler will be + * bound that simply returns the msg_type. + */ +static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint32 tries, uchar *msg, int msglen); +static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd); + +void dhd_prot_collect_memdump(dhd_pub_t *dhd) +{ + DHD_ERROR(("%s(): Collecting mem dump now \r\n", __FUNCTION__)); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; + dhd_os_send_hang_message(dhd); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +} + +/** + * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has + * not completed, a livelock condition occurs. Host will avert this livelock by + * dropping this message and moving to the next. This dropped message can lead + * to a packet leak, or even something disastrous in the case the dropped + * message happens to be a control response. + * Here we will log this condition. One may choose to reboot the dongle. + * + */ +static void +dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries, + uchar *msg, int msglen) +{ + uint32 seqnum = ring->seqnum; + + DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>" + "dma_buf va<%p> msg<%p> curr_rd<%d>\n", + dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries, + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, + ring->dma_buf.va, msg, ring->curr_rd)); + prhex("D2H MsgBuf Failure", (uchar *)msg, msglen); + dhd_dump_to_kernelog(dhd); + +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; + dhd_os_send_hang_message(dhd); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +} + +/** + * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM + * mode. Sequence number is always in the last word of a message. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */ + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + uint32 msg_seqnum = *marker; + if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) + /* For ARM there is no pause in cpu_relax, so add extra delay */ + OSL_DELAY(delay * step); +#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */ + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for number of steps */ + + dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += total_tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM + * mode. The xorcsum is placed in the last word of a message. Dongle will also + * place a seqnum in the epoch field of the cmn_msg_hdr. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 prot_checksum = 0; /* computed checksum */ + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words); + if (prot_checksum == 0U) { /* checksum is OK */ + if (msg->epoch == ring_seqnum) { + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) + /* For ARM there is no pause in cpu_relax, so add extra delay */ + OSL_DELAY(delay * step); +#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for number of steps */ + + dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += total_tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host + * need to try to sync. This noop sync handler will be bound when the dongle + * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what + * dongle advertizes. + */ +static void +dhd_prot_d2h_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->d2h_sync_wait_max = 0UL; + prot->d2h_sync_wait_tot = 0UL; + + prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; + } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; + } else { + prot->d2h_sync_cb = dhd_prot_d2h_sync_none; + } +} + +#endif /* PCIE_D2H_SYNC */ + +int INLINE +dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason) +{ + /* To synchronize with the previous memory operations call wmb() */ + OSL_SMP_WMB(); + dhd->prot->ioctl_received = reason; + /* Call another wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + dhd_os_ioctl_resp_wake(dhd); + return 0; +} + +/** + * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum + */ +static void +dhd_prot_h2d_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; +} + +/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */ + + +/* + * +---------------------------------------------------------------------------+ + * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the + * virtual and physical address, the buffer lenght and the DMA handler. + * A secdma handler is also included in the dhd_dma_buf object. + * +---------------------------------------------------------------------------+ + */ + +static INLINE void +dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa) +{ + base_addr->low_addr = htol32(PHYSADDRLO(pa)); + base_addr->high_addr = htol32(PHYSADDRHI(pa)); +} + + +/** + * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer. + */ +static int +dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + uint32 base, end; /* dongle uses 32bit ptr arithmetic */ + + ASSERT(dma_buf); + base = PHYSADDRLO(dma_buf->pa); + ASSERT(base); + ASSERT(ISALIGNED(base, DMA_ALIGN_LEN)); + ASSERT(dma_buf->len != 0); + + /* test 32bit offset arithmetic over dma buffer for loss of carry-over */ + end = (base + dma_buf->len); /* end address */ + + if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */ + DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n", + __FUNCTION__, base, dma_buf->len)); + return BCME_ERROR; + } + + return BCME_OK; +} + +/** + * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer. + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) +{ + uint32 dma_pad = 0; + osl_t *osh = dhd->osh; + + ASSERT(dma_buf != NULL); + ASSERT(dma_buf->va == NULL); + ASSERT(dma_buf->len == 0); + + /* Pad the buffer length by one extra cacheline size. + * Required for D2H direction. + */ + dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; + dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, + DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); + + if (dma_buf->va == NULL) { + DHD_ERROR(("%s: buf_len %d, no memory available\n", + __FUNCTION__, buf_len)); + return BCME_NOMEM; + } + + dma_buf->len = buf_len; /* not including padded len */ + + if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */ + dhd_dma_buf_free(dhd, dma_buf); + return BCME_ERROR; + } + + dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */ + + return BCME_OK; +} + +/** + * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer. + */ +static void +dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + if ((dma_buf == NULL) || (dma_buf->va == NULL)) { + return; + } + + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* Zero out the entire buffer and cache flush */ + memset((void*)dma_buf->va, 0, dma_buf->len); + OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len); +} + +/** + * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using + * dhd_dma_buf_alloc(). + */ +static void +dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + osl_t *osh = dhd->osh; + + ASSERT(dma_buf); + + if (dma_buf->va == NULL) { + return; /* Allow for free invocation, when alloc failed */ + } + + /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */ + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* dma buffer may have been padded at allocation */ + DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced, + dma_buf->pa, dma_buf->dmah); + + memset(dma_buf, 0, sizeof(dhd_dma_buf_t)); +} + +/** + * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values. + * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0. + */ +void +dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma) +{ + dhd_dma_buf_t *dma_buf; + ASSERT(dhd_dma_buf); + dma_buf = (dhd_dma_buf_t *)dhd_dma_buf; + dma_buf->va = va; + dma_buf->len = len; + dma_buf->pa = pa; + dma_buf->dmah = dmah; + dma_buf->secdma = secdma; + + /* Audit user defined configuration */ + (void)dhd_dma_buf_audit(dhd, dma_buf); +} + +/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */ + +/* + * +---------------------------------------------------------------------------+ + * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. + * Main purpose is to save memory on the dongle, has other purposes as well. + * The packet id map, also includes storage for some packet parameters that + * may be saved. A native packet pointer along with the parameters may be saved + * and a unique 32bit pkt id will be returned. Later, the saved packet pointer + * and the metadata may be retrieved using the previously allocated packet id. + * +---------------------------------------------------------------------------+ + */ +#define DHD_PCIE_PKTID +#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */ + +/* On Router, the pktptr serves as a pktid. */ + + +#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID) +#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC" +#endif + +/* Enum for marking the buffer color based on usage */ +typedef enum dhd_pkttype { + PKTTYPE_DATA_TX = 0, + PKTTYPE_DATA_RX, + PKTTYPE_IOCTL_RX, + PKTTYPE_EVENT_RX, + /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */ + PKTTYPE_NO_CHECK +} dhd_pkttype_t; + +#define DHD_PKTID_INVALID (0U) +#define DHD_IOCTL_REQ_PKTID (0xFFFE) +#define DHD_FAKE_PKTID (0xFACE) + +#define DHD_PKTID_FREE_LOCKER (FALSE) +#define DHD_PKTID_RSV_LOCKER (TRUE) + +typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ + +/* Construct a packet id mapping table, returning an opaque map handle */ +static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index); + +/* Destroy a packet id mapping table, freeing all packets active in the table */ +static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map); + +#define PKTID_MAP_HANDLE (0) +#define PKTID_MAP_HANDLE_IOCTL (1) + +#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index)) +#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map)) + +#if defined(DHD_PCIE_PKTID) + + +/* Determine number of pktids that are available */ +static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle); + +/* Allocate a unique pktid against which a pkt and some metadata is saved */ +static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt); +static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); +static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + void *pkt, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); + +/* Return an allocated pktid, retrieving previously saved pkt and metadata */ +static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah, + void **secdma, dhd_pkttype_t pkttype, bool rsv_locker); + +/* + * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees + * + * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator + * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation + * + * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined, + * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected. + */ +#if defined(DHD_PKTID_AUDIT_ENABLED) +#define USE_DHD_PKTID_AUDIT_LOCK 1 +/* Audit the pktidmap allocator */ +/* #define DHD_PKTID_AUDIT_MAP */ + +/* Audit the pktid during production/consumption of workitems */ +#define DHD_PKTID_AUDIT_RING + +#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING) +#error "May only enabled audit of MAP or RING, at a time." +#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */ + +#define DHD_DUPLICATE_ALLOC 1 +#define DHD_DUPLICATE_FREE 2 +#define DHD_TEST_IS_ALLOC 3 +#define DHD_TEST_IS_FREE 4 + +#ifdef USE_DHD_PKTID_AUDIT_LOCK +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock) +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) +#else +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0) +#define DHD_PKTID_AUDIT_LOCK(lock) 0 +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0) +#endif /* !USE_DHD_PKTID_AUDIT_LOCK */ + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/* #define USE_DHD_PKTID_LOCK 1 */ + +#ifdef USE_DHD_PKTID_LOCK +#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock) +#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) +#else +#define DHD_PKTID_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) \ + do { \ + BCM_REFERENCE(osh); \ + BCM_REFERENCE(lock); \ + } while (0) +#define DHD_PKTID_LOCK(lock) 0 +#define DHD_PKTID_UNLOCK(lock, flags) \ + do { \ + BCM_REFERENCE(lock); \ + BCM_REFERENCE(flags); \ + } while (0) +#endif /* !USE_DHD_PKTID_LOCK */ + +/* Packet metadata saved in packet id mapper */ + +/* The Locker can be 3 states + * LOCKER_IS_FREE - Locker is free and can be allocated + * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the + * locker (buffer address, len, phy addr etc) are populated + * with valid values + * LOCKER_IS_RSVD - The locker is reserved for future use, but the values + * in the locker are not valid. Especially pkt should be + * NULL in this state. When the user wants to re-use the + * locker dhd_pktid_map_free can be called with a flag + * to reserve the pktid for future use, which will clear + * the contents of the locker. When the user calls + * dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY + */ +typedef enum dhd_locker_state { + LOCKER_IS_FREE, + LOCKER_IS_BUSY, + LOCKER_IS_RSVD +} dhd_locker_state_t; + +typedef struct dhd_pktid_item { + dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */ + uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */ + dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */ + uint16 len; /* length of mapped packet's buffer */ + void *pkt; /* opaque native pointer to a packet */ + dmaaddr_t pa; /* physical address of mapped packet's buffer */ + void *dmah; /* handle to OS specific DMA map */ + void *secdma; +} dhd_pktid_item_t; + +typedef struct dhd_pktid_map { + uint32 items; /* total items in map */ + uint32 avail; /* total available items */ + int failures; /* lockers unavailable count */ + /* Spinlock to protect dhd_pktid_map in process/tasklet context */ + void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + void *pktid_audit_lock; + struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */ + dhd_pktid_item_t lockers[0]; /* metadata storage */ +} dhd_pktid_map_t; + +/* + * PktId (Locker) #0 is never allocated and is considered invalid. + * + * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a + * depleted pktid pool and must not be used by the caller. + * + * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. + */ + +#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) +#define DHD_PKIDMAP_ITEMS(items) (items) +#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ + (DHD_PKTID_ITEM_SZ * ((items) + 1))) + +#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map)) + +/* Convert a packet to a pktid, and save pkt pointer in busy locker */ +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt)) + +/* Reuse a previously reserved locker to save packet params */ +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) + +/* Convert a packet to a pktid, and save packet params in locker */ +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) + +/* Convert pktid to a packet, and free the locker */ +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER) + +/* Convert the pktid to a packet, empty locker, but keep it reserved */ +#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER) + +#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) + +#if defined(DHD_PKTID_AUDIT_ENABLED) + +static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg); + +/** +* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid. +*/ +static int +dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg) +{ +#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: " + + const uint32 max_pktid_items = (MAX_PKTID_ITEMS); + struct bcm_mwbmap *handle; + uint32 flags; + bool ignore_audit; + + if (pktid_map == (dhd_pktid_map_t *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg)); + return BCME_OK; + } + + flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock); + + handle = pktid_map->pktid_audit; + if (handle == (struct bcm_mwbmap *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg)); + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + } + + /* Exclude special pktids from audit */ + ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID); + if (ignore_audit) { + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + } + + if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid)); + /* lock is released in "error" */ + goto error; + } + + /* Perform audit */ + switch (test_for) { + case DHD_DUPLICATE_ALLOC: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n", + errmsg, pktid)); + goto error; + } + bcm_mwbmap_force(handle, pktid); + break; + + case DHD_DUPLICATE_FREE: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n", + errmsg, pktid)); + goto error; + } + bcm_mwbmap_free(handle, pktid); + break; + + case DHD_TEST_IS_ALLOC: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n", + errmsg, pktid)); + goto error; + } + break; + + case DHD_TEST_IS_FREE: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free", + errmsg, pktid)); + goto error; + } + break; + + default: + goto error; + } + + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + +error: + + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + /* May insert any trap mechanism here ! */ + dhd_pktid_audit_fail_cb(dhd); + + return BCME_ERROR; +} + +#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \ + dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__) + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */ + + +/** + * +---------------------------------------------------------------------------+ + * Packet to Packet Id mapper using a paradigm. + * + * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS]. + * + * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique + * packet id is returned. This unique packet id may be used to retrieve the + * previously saved packet metadata, using dhd_pktid_map_free(). On invocation + * of dhd_pktid_map_free(), the unique packet id is essentially freed. A + * subsequent call to dhd_pktid_map_alloc() may reuse this packet id. + * + * Implementation Note: + * Convert this into a abstraction and place into bcmutils ! + * Locker abstraction should treat contents as opaque storage, and a + * callback should be registered to handle busy lockers on destructor. + * + * +---------------------------------------------------------------------------+ + */ + +/** Allocate and initialize a mapper of num_items */ + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) +{ + void *osh; + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_items; +#ifdef DHD_USE_STATIC_PKTIDMAP + uint32 section; +#endif /* DHD_USE_STATIC_PKTIDMAP */ + osh = dhd->osh; + + ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS)); + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); + +#ifdef DHD_USE_STATIC_PKTIDMAP + if (index == PKTID_MAP_HANDLE) { + section = DHD_PREALLOC_PKTID_MAP; + } else { + section = DHD_PREALLOC_PKTID_MAP_IOCTL; + } + + map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz); +#else + map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ + + if (map == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", + __FUNCTION__, __LINE__, dhd_pktid_map_sz)); + goto error; + } + + bzero(map, dhd_pktid_map_sz); + + /* Initialize the lock that protects this structure */ + map->pktid_lock = DHD_PKTID_LOCK_INIT(osh); + if (map->pktid_lock == NULL) { + DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__)); + goto error; + } + + map->items = num_items; + map->avail = num_items; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */ + map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); + if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { + DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__)); + goto error; + } else { + DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", + __FUNCTION__, __LINE__, map_items + 1)); + } + + map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */ + map->keys[nkey] = nkey; /* populate with unique keys */ + map->lockers[nkey].state = LOCKER_IS_FREE; + map->lockers[nkey].pkt = NULL; /* bzero: redundant */ + map->lockers[nkey].len = 0; + } + + /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */ + map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; + map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */ + map->lockers[DHD_PKTID_INVALID].len = 0; + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */ + bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + return (dhd_pktid_map_handle_t *)map; /* opaque handle */ + +error: + + if (map) { + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + if (map->pktid_lock) + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + + MFREE(osh, map, dhd_pktid_map_sz); + } + + return (dhd_pktid_map_handle_t *)NULL; +} + +/** + * Retrieve all allocated keys and free all . + * Freeing implies: unmapping the buffers and freeing the native packet + * This could have been a callback registered with the pktid mapper. + */ + +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + void *osh; + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + dhd_pktid_item_t *locker; + uint32 map_items; + uint32 flags; + + if (handle == NULL) { + return; + } + + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + osh = dhd->osh; + + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + + nkey = 1; /* skip reserved KEY #0, and start from 1 */ + locker = &map->lockers[nkey]; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + + for (; nkey <= map_items; nkey++, locker++) { + + if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */ + + locker->state = LOCKER_IS_FREE; /* force open the locker */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + { /* This could be a callback registered with dhd_pktid_map */ + DMA_UNMAP(osh, locker->pa, locker->len, + locker->dir, 0, DHD_DMAH_NULL); + dhd_prot_packet_free(dhd, (ulong*)locker->pkt, + locker->pkttype, TRUE); + } + } +#if defined(DHD_PKTID_AUDIT_ENABLED) + else { + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + locker->pkt = NULL; /* clear saved pkt */ + locker->len = 0; + } + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + +#ifdef DHD_USE_STATIC_PKTIDMAP + DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz); +#else + MFREE(osh, handle, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ +} + +#ifdef IOCTLRESP_USE_CONSTMEM +/** Called in detach scenario. Releasing IOCTL buffers. */ +static void +dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + dhd_pktid_item_t *locker; + uint32 map_items; + uint32 flags; + osl_t *osh = dhd->osh; + + if (handle == NULL) { + return; + } + + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + + nkey = 1; /* skip reserved KEY #0, and start from 1 */ + locker = &map->lockers[nkey]; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + + for (; nkey <= map_items; nkey++, locker++) { + + if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */ + + locker->state = LOCKER_IS_FREE; /* force open the locker */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + { + dhd_dma_buf_t retbuf; + retbuf.va = locker->pkt; + retbuf.len = locker->len; + retbuf.pa = locker->pa; + retbuf.dmah = locker->dmah; + retbuf.secdma = locker->secdma; + + /* This could be a callback registered with dhd_pktid_map */ + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + free_ioctl_return_buffer(dhd, &retbuf); + flags = DHD_PKTID_LOCK(map->pktid_lock); + } + } +#if defined(DHD_PKTID_AUDIT_ENABLED) + else { + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + locker->pkt = NULL; /* clear saved pkt */ + locker->len = 0; + } + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + +#ifdef DHD_USE_STATIC_PKTIDMAP + DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz); +#else + MFREE(osh, handle, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +/** Get the pktid free count */ +static INLINE uint32 BCMFASTPATH +dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 flags; + uint32 avail; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + avail = map->avail; + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return avail; +} + +/** + * Allocate locker, save pkt contents, and return the locker's numbered key. + * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility. + * Caller must treat a returned value DHD_PKTID_INVALID as a failure case, + * implying a depleted pool of pktids. + */ + +static INLINE uint32 +__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt) +{ + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + if (map->avail <= 0) { /* no more pktids to allocate */ + map->failures++; + DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); + return DHD_PKTID_INVALID; /* failed alloc request */ + } + + ASSERT(map->avail <= map->items); + nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ + locker = &map->lockers[nkey]; /* save packet metadata in locker */ + map->avail--; + locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ + locker->len = 0; + locker->state = LOCKER_IS_BUSY; /* reserve this locker */ + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + ASSERT(nkey != DHD_PKTID_INVALID); + return nkey; /* return locker's numbered key */ +} + + +/** + * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not + * yet populated. Invoke the pktid save api to populate the packet parameters + * into the locker. + * Wrapper that takes the required lock when called directly. + */ +static INLINE uint32 +dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt) +{ + dhd_pktid_map_t *map; + uint32 flags; + uint32 ret; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + ret = __dhd_pktid_map_reserve(dhd, handle, pkt); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return ret; +} + +static INLINE void +__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); + + locker = &map->lockers[nkey]; + + ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) || + ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL))); + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + /* store contents in locker */ + locker->dir = dir; + locker->pa = pa; + locker->len = (uint16)len; /* 16bit len */ + locker->dmah = dmah; /* 16bit len */ + locker->secdma = secdma; + locker->pkttype = pkttype; + locker->pkt = pkt; + locker->state = LOCKER_IS_BUSY; /* make this locker busy */ +} + +/** + * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding + * to a previously reserved unique numbered key. + * Wrapper that takes the required lock when called directly. + */ +static INLINE void +dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + dhd_pktid_map_t *map; + uint32 flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len, + dir, dmah, secdma, pkttype); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} + +/** + * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet + * contents into the corresponding locker. Return the numbered key. + */ +static uint32 BCMFASTPATH +dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + uint32 nkey; + uint32 flags; + dhd_pktid_map_t *map; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + + nkey = __dhd_pktid_map_reserve(dhd, handle, pkt); + if (nkey != DHD_PKTID_INVALID) { + __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, + len, dir, dmah, secdma, pkttype); +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */ +#endif /* DHD_PKTID_AUDIT_MAP */ + } + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return nkey; +} + +/** + * dhd_pktid_map_free - Given a numbered key, return the locker contents. + * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility. + * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid + * value. Only a previously allocated pktid may be freed. + */ +static void * BCMFASTPATH +dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, + dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, + dhd_pkttype_t pkttype, bool rsv_locker) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + void * pkt; + uint32 flags; + + ASSERT(handle != NULL); + + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + + ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); + + locker = &map->lockers[nkey]; + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */ + DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n", + __FUNCTION__, __LINE__, nkey)); + ASSERT(locker->state != LOCKER_IS_FREE); + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return NULL; + } + + /* Check for the colour of the buffer i.e The buffer posted for TX, + * should be freed for TX completion. Similarly the buffer posted for + * IOCTL should be freed for IOCT completion etc. + */ + if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) { + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n", + __FUNCTION__, __LINE__, nkey)); + ASSERT(locker->pkttype == pkttype); + + return NULL; + } + + if (rsv_locker == DHD_PKTID_FREE_LOCKER) { + map->avail++; + map->keys[map->avail] = nkey; /* make this numbered key available */ + locker->state = LOCKER_IS_FREE; /* open and free Locker */ + } else { + /* pktid will be reused, but the locker does not have a valid pkt */ + locker->state = LOCKER_IS_RSVD; + } + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_MAP */ + + *pa = locker->pa; /* return contents of locker */ + *len = (uint32)locker->len; + *dmah = locker->dmah; + *secdma = locker->secdma; + + pkt = locker->pkt; + locker->pkt = NULL; /* Clear pkt */ + locker->len = 0; + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return pkt; +} + +#else /* ! DHD_PCIE_PKTID */ + + +typedef struct pktlist { + PKT_LIST *tx_pkt_list; /* list for tx packets */ + PKT_LIST *rx_pkt_list; /* list for rx packets */ + PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */ +} pktlists_t; + +/* + * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail + * of a one to one mapping 32bit pktptr and a 32bit pktid. + * + * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail. + * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by + * a lock. + * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined. + */ +#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32)) +#define DHD_PKTPTR32(pktid32) ((void *)(pktid32)) + + +static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype); +static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype); + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = NULL; + + if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(pktlists_t))); + goto error_done; + } + + if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + PKTLIST_INIT(handle->tx_pkt_list); + PKTLIST_INIT(handle->rx_pkt_list); + PKTLIST_INIT(handle->ctrl_pkt_list); + + return (dhd_pktid_map_handle_t *) handle; + +error: + if (handle->ctrl_pkt_list) { + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } + +error_done: + return (dhd_pktid_map_handle_t *)NULL; +} + +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = (pktlists_t *) map; + + ASSERT(handle != NULL); + if (handle == (pktlists_t *)NULL) { + return; + } + + if (handle->ctrl_pkt_list) { + PKTLIST_FINI(handle->ctrl_pkt_list); + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + PKTLIST_FINI(handle->rx_pkt_list); + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + PKTLIST_FINI(handle->tx_pkt_list); + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } +} + +/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */ +static INLINE uint32 +dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + ASSERT(pktptr32 != NULL); + DHD_PKT_SET_DMA_LEN(pktptr32, dma_len); + DHD_PKT_SET_DMAH(pktptr32, dmah); + DHD_PKT_SET_PA(pktptr32, pa); + DHD_PKT_SET_SECDMA(pktptr32, secdma); + + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_ENQ(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_ENQ(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32); + } + + return DHD_PKTID32(pktptr32); +} + +/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */ +static INLINE void * +dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + void *pktptr32; + + ASSERT(pktid32 != 0U); + pktptr32 = DHD_PKTPTR32(pktid32); + *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32); + *dmah = DHD_PKT_GET_DMAH(pktptr32); + *pa = DHD_PKT_GET_PA(pktptr32); + *secdma = DHD_PKT_GET_SECDMA(pktptr32); + + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32); + } + + return pktptr32; +} + +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt) + +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \ + dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&secdma, (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_AVAIL(map) (~0) + +#endif /* ! DHD_PCIE_PKTID */ + +/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */ + + +/** + * The PCIE FD protocol layer is constructed in two phases: + * Phase 1. dhd_prot_attach() + * Phase 2. dhd_prot_init() + * + * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields. + * All Common rings are allose attached (msgbuf_ring_t objects are allocated + * with DMA-able buffers). + * All dhd_dma_buf_t objects are also allocated here. + * + * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any + * initialization of objects that requires information advertized by the dongle + * may not be performed here. + * E.g. the number of TxPost flowrings is not know at this point, neither do + * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or + * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H + * rings (common + flow). + * + * dhd_prot_init() is invoked after the bus layer has fetched the information + * advertized by the dongle in the pcie_shared_t. + */ +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + osl_t *osh = dhd->osh; + dhd_prot_t *prot; + + /* Allocate prot structure */ + if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, + sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(prot, 0, sizeof(*prot)); + + prot->osh = osh; + dhd->prot = prot; + + /* DMAing ring completes supported? FALSE by default */ + dhd->dma_d2h_ring_upd_support = FALSE; + dhd->dma_h2d_ring_upd_support = FALSE; + + /* Common Ring Allocations */ + + /* Ring 0: H2D Control Submission */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl", + H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE, + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 1: H2D Receive Buffer Post */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp", + H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE, + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 2: D2H Control Completion */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl", + D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 3: D2H Transmit Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl", + D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* Ring 4: D2H Receive Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl", + D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* + * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able + * buffers for flowrings will be instantiated, in dhd_prot_init() . + * See dhd_prot_flowrings_pool_attach() + */ + /* ioctl response buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* IOCTL request buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* Scratch buffer for dma rx offset */ + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) { + goto fail; + } + + /* scratch buffer bus throughput measurement */ + if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) { + goto fail; + } + +#ifdef DHD_RX_CHAINING + dhd_rxchain_reset(&prot->rxchain); +#endif + +#if defined(DHD_LB) + + /* Initialize the work queues to be used by the Load Balancing logic */ +#if defined(DHD_LB_TXC) + { + void *buffer; + buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ); + bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons, + buffer, DHD_LB_WORKQ_SZ); + prot->tx_compl_prod_sync = 0; + DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n", + __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); + } +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + { + void *buffer; + buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ); + bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons, + buffer, DHD_LB_WORKQ_SZ); + prot->rx_compl_prod_sync = 0; + DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n", + __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); + } +#endif /* DHD_LB_RXC */ + +#endif /* DHD_LB */ + + return BCME_OK; + +fail: + +#ifndef CONFIG_DHD_USE_STATIC_BUF + if (prot != NULL) { + dhd_prot_detach(dhd); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + return BCME_NOMEM; +} /* dhd_prot_attach */ + + +/** + * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has + * completed it's initialization of the pcie_shared structure, we may now fetch + * the dongle advertized features and adjust the protocol layer accordingly. + * + * dhd_prot_init() may be invoked again after a dhd_prot_reset(). + */ +int +dhd_prot_init(dhd_pub_t *dhd) +{ + sh_addr_t base_addr; + dhd_prot_t *prot = dhd->prot; + + /* PKTID handle INIT */ + if (prot->pktid_map_handle != NULL) { + DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__)); + ASSERT(0); + return BCME_ERROR; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (prot->pktid_map_handle_ioctl != NULL) { + DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__)); + ASSERT(0); + return BCME_ERROR; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ + + prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE); + if (prot->pktid_map_handle == NULL) { + DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__)); + ASSERT(0); + return BCME_NOMEM; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL); + if (prot->pktid_map_handle_ioctl == NULL) { + DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__)); + ASSERT(0); + return BCME_NOMEM; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ + + /* Max pkts in ring */ + prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM; + + DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count)); + + /* Read max rx packets supported by dongle */ + dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); + if (prot->max_rxbufpost == 0) { + /* This would happen if the dongle firmware is not */ + /* using the latest shared structure template */ + prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; + } + DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); + + /* Initialize. bzero() would blow away the dma pointers. */ + prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST; + prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; + + prot->cur_ioctlresp_bufs_posted = 0; + prot->active_tx_count = 0; + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + + prot->dmaxfer.srcmem.va = NULL; + prot->dmaxfer.dstmem.va = NULL; + prot->dmaxfer.in_progress = FALSE; + + prot->metadata_dbg = FALSE; + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; + + prot->ioctl_trans_id = 0; + + /* Register the interrupt function upfront */ + /* remove corerev checks in data path */ + prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); + + /* Initialize Common MsgBuf Rings */ + + dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln); + +#if defined(PCIE_D2H_SYNC) + dhd_prot_d2h_sync_init(dhd); +#endif /* PCIE_D2H_SYNC */ + + dhd_prot_h2d_sync_init(dhd); + + /* init the scratch buffer */ + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_SCRATCH_BUF, 0); + dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len, + sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0); + + /* If supported by the host, indicate the memory block + * for completion writes / submission reads to shared space + */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_RD_BUF, 0); + } + + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_RD_BUF, 0); + } + + /* + * If the DMA-able buffers for flowring needs to come from a specific + * contiguous memory region, then setup prot->flowrings_dma_buf here. + * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from + * this contiguous memory region, for each of the flowrings. + */ + + /* Pre-allocate pool of msgbuf_ring for flowrings */ + if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) { + return BCME_ERROR; + } + + /* Host should configure soft doorbells if needed ... here */ + + /* Post to dongle host configured soft doorbells */ + dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd); + + /* Post buffers for packet reception and ioctl/event responses */ + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + return BCME_OK; +} /* dhd_prot_init */ + + +/** + * dhd_prot_detach - PCIE FD protocol layer destructor. + * Unlink, frees allocated protocol memory (including dhd_prot) + */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + /* Stop the protocol module */ + if (prot) { + + /* free up all DMA-able buffers allocated during prot attach/init */ + + dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */ + dhd_dma_buf_free(dhd, &prot->ioctbuf); + dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf); + + /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */ + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); + + /* Common MsgBuf Rings */ + dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln); + + /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */ + dhd_prot_flowrings_pool_detach(dhd); + + DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle); + +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +#if defined(DHD_LB) +#if defined(DHD_LB_TXC) + if (prot->tx_compl_prod.buffer) { + MFREE(dhd->osh, prot->tx_compl_prod.buffer, + sizeof(void*) * DHD_LB_WORKQ_SZ); + } +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + if (prot->rx_compl_prod.buffer) { + MFREE(dhd->osh, prot->rx_compl_prod.buffer, + sizeof(void*) * DHD_LB_WORKQ_SZ); + } +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ + + dhd->prot = NULL; + } +} /* dhd_prot_detach */ + + +/** + * dhd_prot_reset - Reset the protocol layer without freeing any objects. This + * may be invoked to soft reboot the dongle, without having to detach and attach + * the entire protocol layer. + * + * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through + * a dhd_prot_attach() phase. + */ +void +dhd_prot_reset(dhd_pub_t *dhd) +{ + struct dhd_prot *prot = dhd->prot; + + DHD_TRACE(("%s\n", __FUNCTION__)); + + if (prot == NULL) { + return; + } + + dhd_prot_flowrings_pool_reset(dhd); + + dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln); + + dhd_dma_buf_reset(dhd, &prot->retbuf); + dhd_dma_buf_reset(dhd, &prot->ioctbuf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf); + + + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->cur_ioctlresp_bufs_posted = 0; + + prot->active_tx_count = 0; + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + prot->ioctl_trans_id = 0; + + /* dhd_flow_rings_init is located at dhd_bus_start, + * so when stopping bus, flowrings shall be deleted + */ + if (dhd->flow_rings_inited) { + dhd_flow_rings_deinit(dhd); + } + + if (prot->pktid_map_handle) { + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle); + prot->pktid_map_handle = NULL; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (prot->pktid_map_handle_ioctl) { + DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); + prot->pktid_map_handle_ioctl = NULL; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ +} /* dhd_prot_reset */ + + +void +dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) +{ + dhd_prot_t *prot = dhd->prot; + prot->rx_dataoffset = rx_offset; +} + +/** + * Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +int +dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + + + +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__)); + goto done; + } + DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, + revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); + + dhd_process_cid_mac(dhd, TRUE); + + ret = dhd_preinit_ioctls(dhd); + + if (!ret) { + dhd_process_cid_mac(dhd, FALSE); + } + + /* Always assumes wl for now */ + dhd->iswl = TRUE; +done: + return ret; +} /* dhd_sync_with_dongle */ + +#if defined(DHD_LB) + +/* DHD load balancing: deferral of work to another online CPU */ + +/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */ +extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); + +extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); + +/** + * dhd_lb_dispatch - load balance by dispatch work to other CPU cores + * Note: rx_compl_tasklet is dispatched explicitly. + */ +static INLINE void +dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx) +{ + switch (ring_idx) { + +#if defined(DHD_LB_TXC) + case BCMPCIE_D2H_MSGRING_TX_COMPLETE: + bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */ + dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */ + break; +#endif /* DHD_LB_TXC */ + + case BCMPCIE_D2H_MSGRING_RX_COMPLETE: + { +#if defined(DHD_LB_RXC) + dhd_prot_t *prot = dhdp->prot; + /* Schedule the takslet only if we have to */ + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + /* flush WR index */ + bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod); + dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */ + } +#endif /* DHD_LB_RXC */ +#if defined(DHD_LB_RXP) + dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */ +#endif /* DHD_LB_RXP */ + break; + } + default: + break; + } +} + + +#if defined(DHD_LB_TXC) +/** + * DHD load balanced tx completion tasklet handler, that will perform the + * freeing of packets on the selected CPU. Packet pointers are delivered to + * this tasklet via the tx complete workq. + */ +void +dhd_lb_tx_compl_handler(unsigned long data) +{ + int elem_ix; + void *pkt, **elem; + dmaaddr_t pa; + uint32 pa_len; + dhd_pub_t *dhd = (dhd_pub_t *)data; + dhd_prot_t *prot = dhd->prot; + bcm_workq_t *workq = &prot->tx_compl_cons; + uint32 count = 0; + + DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd); + + while (1) { + elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + if (elem_ix == BCM_RING_EMPTY) { + break; + } + + elem = WORKQ_ELEMENT(void *, workq, elem_ix); + pkt = *elem; + + DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt)); + + OSL_PREFETCH(PKTTAG(pkt)); + OSL_PREFETCH(pkt); + + pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt)); + pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt)); + + DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0); + +#if defined(BCMPCIE) + dhd_txcomplete(dhd, pkt, true); +#endif + + PKTFREE(dhd->osh, pkt, TRUE); + count++; + } + + /* smp_wmb(); */ + bcm_workq_cons_sync(workq); + DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count); +} +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) +void +dhd_lb_rx_compl_handler(unsigned long data) +{ + dhd_pub_t *dhd = (dhd_pub_t *)data; + bcm_workq_t *workq = &dhd->prot->rx_compl_cons; + + DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd); + + dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */ + bcm_workq_cons_sync(workq); +} +#endif /* DHD_LB_RXC */ + +#endif /* DHD_LB */ + +#define DHD_DBG_SHOW_METADATA 0 + +#if DHD_DBG_SHOW_METADATA +static void BCMFASTPATH +dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len) +{ + uint8 tlv_t; + uint8 tlv_l; + uint8 *tlv_v = (uint8 *)ptr; + + if (len <= BCMPCIE_D2H_METADATA_HDRLEN) + return; + + len -= BCMPCIE_D2H_METADATA_HDRLEN; + tlv_v += BCMPCIE_D2H_METADATA_HDRLEN; + + while (len > TLV_HDR_LEN) { + tlv_t = tlv_v[TLV_TAG_OFF]; + tlv_l = tlv_v[TLV_LEN_OFF]; + + len -= TLV_HDR_LEN; + tlv_v += TLV_HDR_LEN; + if (len < tlv_l) + break; + if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) + break; + + switch (tlv_t) { + case WLFC_CTL_TYPE_TXSTATUS: { + uint32 txs; + memcpy(&txs, tlv_v, sizeof(uint32)); + if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) { + printf("METADATA TX_STATUS: %08x\n", txs); + } else { + wl_txstatus_additional_info_t tx_add_info; + memcpy(&tx_add_info, tlv_v + sizeof(uint32), + sizeof(wl_txstatus_additional_info_t)); + printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]" + " rate = %08x tries = %d - %d\n", txs, + tx_add_info.seq, tx_add_info.entry_ts, + tx_add_info.enq_ts, tx_add_info.last_ts, + tx_add_info.rspec, tx_add_info.rts_cnt, + tx_add_info.tx_cnt); + } + } break; + + case WLFC_CTL_TYPE_RSSI: { + if (tlv_l == 1) + printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v); + else + printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n", + (*(tlv_v + 3) << 8) | *(tlv_v + 2), + (int8)(*tlv_v), *(tlv_v + 1)); + } break; + + case WLFC_CTL_TYPE_FIFO_CREDITBACK: + bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_TX_ENTRY_STAMP: + bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_RX_STAMP: { + struct { + uint32 rspec; + uint32 bus_time; + uint32 wlan_time; + } rx_tmstamp; + memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp)); + printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n", + rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec); + } break; + + case WLFC_CTL_TYPE_TRANS_ID: + bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_COMP_TXSTATUS: + bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l); + break; + + default: + bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l); + break; + } + + len -= tlv_l; + tlv_v += tlv_l; + } +} +#endif /* DHD_DBG_SHOW_METADATA */ + +static INLINE void BCMFASTPATH +dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send) +{ + if (pkt) { + if (pkttype == PKTTYPE_IOCTL_RX || + pkttype == PKTTYPE_EVENT_RX) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, pkt, send); +#else + PKTFREE(dhd->osh, pkt, send); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTFREE(dhd->osh, pkt, send); + } + } +} + +static INLINE void * BCMFASTPATH +dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid) +{ + void *PKTBUF; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + +#ifdef DHD_PCIE_PKTID + if (free_pktid) { + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, pkttype); + } else { + PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, pkttype); + } +#else + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa, + len, dmah, secdma, pkttype); +#endif /* DHD_PCIE_PKTID */ + + if (PKTBUF) { + { + if (SECURE_DMA_ENAB(dhd->osh)) { + SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah, + secdma, 0); + } else { + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + } + } + } + + return PKTBUF; +} + +#ifdef IOCTLRESP_USE_CONSTMEM +static INLINE void BCMFASTPATH +dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf) +{ + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, + retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX); + + return; +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +static void BCMFASTPATH +dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid) +{ + dhd_prot_t *prot = dhd->prot; + int16 fillbufs; + uint16 cnt = 256; + int retcount = 0; + + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + while (fillbufs >= RX_BUF_BURST) { + cnt--; + if (cnt == 0) { + /* find a better way to reschedule rx buf post if space not available */ + DHD_ERROR(("h2d rx post ring not available to post host buffers \n")); + DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost)); + break; + } + + /* Post in a burst of 32 buffers at a time */ + fillbufs = MIN(fillbufs, RX_BUF_BURST); + + /* Post buffers */ + retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid); + + if (retcount >= 0) { + prot->rxbufpost += (uint16)retcount; +#ifdef DHD_LB_RXC + /* dhd_prot_rxbuf_post returns the number of buffers posted */ + DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount); +#endif /* DHD_LB_RXC */ + /* how many more to post */ + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + } else { + /* Make sure we don't run loop any further */ + fillbufs = 0; + } + } +} + +/** Post 'count' no of rx buffers to dongle */ +static int BCMFASTPATH +dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) +{ + void *p; + uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + uint8 *rxbuf_post_tmp; + host_rxbuf_post_t *rxbuf_post; + void *msg_start; + dmaaddr_t pa; + uint32 pktlen; + uint8 i = 0; + uint16 alloced = 0; + unsigned long flags; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) + dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE); + + DHD_GENERAL_UNLOCK(dhd, flags); + + if (msg_start == NULL) { + DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); + return -1; + } + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + rxbuf_post_tmp = (uint8*)msg_start; + + /* loop through each allocated message in the rxbuf post msgbuf_ring */ + for (i = 0; i < alloced; i++) { + rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; + /* Create a rx buffer */ + if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { + DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); + dhd->rx_pktgetfail++; + break; + } + + pktlen = PKTLEN(dhd->osh, p); + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); + } + + if (PHYSADDRISZERO(pa)) { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + + PKTFREE(dhd->osh, p, FALSE); + DHD_ERROR(("Invalid phyaddr 0\n")); + ASSERT(0); + break; + } + + PKTPULL(dhd->osh, p, prot->rx_metadata_offset); + pktlen = PKTLEN(dhd->osh, p); + + /* Common msg header */ + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + +#if defined(DHD_LB_RXC) + if (use_rsv_pktid == TRUE) { + bcm_workq_t *workq = &prot->rx_compl_cons; + int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + if (elem_ix == BCM_RING_EMPTY) { + DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__)); + pktid = DHD_PKTID_INVALID; + goto alloc_pkt_id; + } else { + uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix); + pktid = *elem; + } + + /* Now populate the previous locker with valid information */ + if (pktid != DHD_PKTID_INVALID) { + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid, + pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma, + PKTTYPE_DATA_RX); + } + } else +#endif /* DHD_LB_RXC */ + { +#if defined(DHD_LB_RXC) +alloc_pkt_id: +#endif +#if defined(DHD_PCIE_PKTID) + /* get the lock before calling DHD_NATIVE_TO_PKTID */ + DHD_GENERAL_LOCK(dhd, flags); +#endif + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa, + pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); + +#if defined(DHD_PCIE_PKTID) + /* free lock */ + DHD_GENERAL_UNLOCK(dhd, flags); + + if (pktid == DHD_PKTID_INVALID) { + + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + + PKTFREE(dhd->osh, p, FALSE); + DHD_ERROR(("Pktid pool depleted.\n")); + break; + } +#endif /* DHD_PCIE_PKTID */ + } + + rxbuf_post->data_buf_len = htol16((uint16)pktlen); + rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->data_buf_addr.low_addr = + htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset); + + if (prot->rx_metadata_offset) { + rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; + rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + } else { + rxbuf_post->metadata_buf_len = 0; + rxbuf_post->metadata_buf_addr.high_addr = 0; + rxbuf_post->metadata_buf_addr.low_addr = 0; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + + /* Move rxbuf_post_tmp to next item */ + rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len; + } + + if (i < alloced) { + if (ring->wr < (alloced - i)) { + ring->wr = ring->max_items - (alloced - i); + } else { + ring->wr -= (alloced - i); + } + + alloced = i; + } + + /* Update ring's WR index and ring doorbell to dongle */ + if (alloced > 0) { + dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + } + + return alloced; +} /* dhd_prot_rxbuf_post */ + +#ifdef IOCTLRESP_USE_CONSTMEM +static int +alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + int err; + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + + if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) { + DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err)); + ASSERT(0); + return BCME_NOMEM; + } + + return BCME_OK; +} + +static void +free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + /* retbuf (declared on stack) not fully populated ... */ + if (retbuf->va) { + uint32 dma_pad; + dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; + retbuf->len = IOCT_RETBUF_SIZE; + retbuf->_alloced = retbuf->len + dma_pad; + /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle. + * Need to reassign before free to pass the check in dhd_dma_buf_audit(). + */ + retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL); + } + + dhd_dma_buf_free(dhd, retbuf); + return; +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +static int +dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) +{ + void *p; + uint16 pktsz; + ioctl_resp_evt_buf_post_msg_t *rxbuf_post; + dmaaddr_t pa; + uint32 pktlen; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + unsigned long flags; + dhd_dma_buf_t retbuf; + void *dmah = NULL; + uint32 pktid; + void *map_handle; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return -1; + } + + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + if (event_buf) { + /* Allocate packet for event buffer post */ + pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } else { + /* Allocate packet for ctrl/ioctl buffer post */ + pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) { + DHD_ERROR(("Could not allocate IOCTL response buffer\n")); + return -1; + } + ASSERT(retbuf.len == IOCT_RETBUF_SIZE); + p = retbuf.va; + pktlen = retbuf.len; + pa = retbuf.pa; + dmah = retbuf.dmah; + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { +#ifdef DHD_USE_STATIC_CTRLBUF + p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); +#else + p = PKTGET(dhd->osh, pktsz, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + if (p == NULL) { + DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n", + __FUNCTION__, __LINE__, event_buf ? + "EVENT" : "IOCTL RESP")); + dhd->rx_pktgetfail++; + return -1; + } + + pktlen = PKTLEN(dhd->osh, p); + + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); + } + + if (PHYSADDRISZERO(pa)) { + DHD_ERROR(("Invalid physaddr 0\n")); + ASSERT(0); + goto free_pkt_return; + } + } + + DHD_GENERAL_LOCK(dhd, flags); + + rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (rxbuf_post == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n", + __FUNCTION__, __LINE__)); + +#ifdef IOCTLRESP_USE_CONSTMEM + if (event_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + } + goto free_pkt_return; + } + + /* CMN msg header */ + if (event_buf) { + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST; + } else { + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + map_handle = dhd->prot->pktid_map_handle_ioctl; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, + DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX); + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + map_handle = dhd->prot->pktid_map_handle; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, + p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma, + event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX); + } + + if (pktid == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + } + DHD_GENERAL_UNLOCK(dhd, flags); + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + goto free_pkt_return; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + +#if defined(DHD_PCIE_PKTID) + if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + } + DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef IOCTLRESP_USE_CONSTMEM + if (event_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + } + goto free_pkt_return; + } +#endif /* DHD_PCIE_PKTID */ + + rxbuf_post->cmn_hdr.flags = 0; +#ifndef IOCTLRESP_USE_CONSTMEM + rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); +#else + rxbuf_post->host_buf_len = htol16((uint16)pktlen); +#endif /* IOCTLRESP_USE_CONSTMEM */ + rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 1; + +free_pkt_return: +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + free_ioctl_return_buffer(dhd, &retbuf); + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + dhd_prot_packet_free(dhd, p, + event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX, + FALSE); + } + + return -1; +} /* dhd_prot_rxbufpost_ctrl */ + +static uint16 +dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post) +{ + uint32 i = 0; + int32 ret_val; + + DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return 0; + } + + while (i < max_to_post) { + ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf); + if (ret_val < 0) { + break; + } + i++; + } + DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf)); + return (uint16)i; +} + +static void +dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + DHD_INFO(("ioctl resp buf post\n")); + max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n", + __FUNCTION__)); + return; + } + prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + FALSE, max_to_post); +} + +static void +dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max event buffers\n", + __FUNCTION__)); + return; + } + prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + TRUE, max_to_post); +} + +/** called when DHD needs to check for 'receive complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check RX bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} + +/** + * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring) + */ +void +dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring) +{ + msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring; + + /* Update read pointer */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } + + DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n", + ring->idx, flowid, ring->wr, ring->rd)); + + /* Need more logic here, but for now use it directly */ + dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */ +} + +/** called when DHD needs to check for 'transmit complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} + +/** called when DHD needs to check for 'ioctl complete' messages from the dongle */ +int BCMFASTPATH +dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + } + + return 0; +} + +/** + * Consume messages out of the D2H ring. Ensure that the message's DMA to host + * memory has completed, before invoking the message handler via a table lookup + * of the cmn_msg_hdr::msg_type. + */ +static int BCMFASTPATH +dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len) +{ + int buf_len = len; + uint16 item_len; + uint8 msg_type; + cmn_msg_hdr_t *msg = NULL; + int ret = BCME_OK; + + ASSERT(ring); + item_len = ring->item_len; + if (item_len == 0) { + DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n", + __FUNCTION__, ring->idx, item_len, buf_len)); + return BCME_ERROR; + } + + while (buf_len > 0) { + if (dhd->hang_was_sent) { + ret = BCME_ERROR; + goto done; + } + + msg = (cmn_msg_hdr_t *)buf; + + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + +#if defined(PCIE_D2H_SYNC) + /* Wait until DMA completes, then fetch msg_type */ + msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); +#else + msg_type = msg->msg_type; +#endif /* !PCIE_D2H_SYNC */ + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(buf + item_len); + + DHD_INFO(("msg_type %d item_len %d buf_len %d\n", + msg_type, item_len, buf_len)); + + if (msg_type == MSG_TYPE_LOOPBACK) { + bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len); + DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len)); + } + + ASSERT(msg_type < DHD_PROT_FUNCS); + if (msg_type >= DHD_PROT_FUNCS) { + DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n", + __FUNCTION__, msg_type, item_len, buf_len)); + ret = BCME_ERROR; + goto done; + } + + if (table_lookup[msg_type]) { + table_lookup[msg_type](dhd, buf); + } + + if (buf_len < item_len) { + ret = BCME_ERROR; + goto done; + } + buf_len = buf_len - item_len; + buf = buf + item_len; + } + +done: + +#ifdef DHD_RX_CHAINING + dhd_rxchain_commit(dhd); +#endif +#if defined(DHD_LB) + dhd_lb_dispatch(dhd, ring->idx); +#endif + return ret; +} /* dhd_prot_process_msgtype */ + +static void +dhd_prot_noop(dhd_pub_t *dhd, void *msg) +{ + return; +} + +/** called on MSG_TYPE_RING_STATUS message received from dongle */ +static void +dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg; + DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n", + ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status, + ring_status->compl_hdr.flow_ring_id, ring_status->write_idx)); + /* How do we track this to pair it with ??? */ + return; +} + +/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */ +static void +dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg; + DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n", + gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, + gen_status->compl_hdr.flow_ring_id)); + + /* How do we track this to pair it with ??? */ + return; +} + +/** + * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the + * dongle received the ioctl message in dongle memory. + */ +static void +dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) +{ + uint32 pktid; + ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg; + unsigned long flags; + + pktid = ltoh32(ioct_ack->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */ + if (pktid != DHD_IOCTL_REQ_PKTID) { + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_TEST_IS_ALLOC) == BCME_ERROR) { + prhex("dhd_prot_ioctack_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) && + (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING; + } else { + DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctack_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + DHD_GENERAL_UNLOCK(dhd, flags); + + DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n", + ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, + ioct_ack->compl_hdr.flow_ring_id)); + if (ioct_ack->compl_hdr.status != 0) { + DHD_ERROR(("got an error status for the ioctl request...need to handle that\n")); + } +} + +/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */ +static void +dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + uint32 pkt_id, xt_id; + ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg; + void *pkt; + unsigned long flags; + dhd_dma_buf_t retbuf; + + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + { + int ret; +#ifndef IOCTLRESP_USE_CONSTMEM + ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id, + DHD_DUPLICATE_FREE); +#else + ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id, + DHD_DUPLICATE_FREE); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + if (ret == BCME_ERROR) { + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) || + !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } +#ifndef IOCTLRESP_USE_CONSTMEM + pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE); +#else + dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf); + pkt = retbuf.va; +#endif /* !IOCTLRESP_USE_CONSTMEM */ + if (!pkt) { + prot->ioctl_state = 0; + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__)); + return; + } + DHD_GENERAL_UNLOCK(dhd, flags); + + prot->ioctl_resplen = ltoh16(ioct_resp->resp_len); + prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status); + xt_id = ltoh16(ioct_resp->trans_id); + if (xt_id != prot->ioctl_trans_id) { + ASSERT(0); + goto exit; + } + + DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n", + pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen)); + + if (prot->ioctl_resplen > 0) { +#ifndef IOCTLRESP_USE_CONSTMEM + bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen); +#else + bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + } + + /* wake up any dhd_os_ioctl_resp_wait() */ + dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS); + +exit: +#ifndef IOCTLRESP_USE_CONSTMEM + dhd_prot_packet_free(dhd, pkt, + PKTTYPE_IOCTL_RX, FALSE); +#else + free_ioctl_return_buffer(dhd, &retbuf); +#endif /* !IOCTLRESP_USE_CONSTMEM */ +} + +/** called on MSG_TYPE_TX_STATUS message received from dongle */ +static void BCMFASTPATH +dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + host_txbuf_cmpl_t * txstatus; + unsigned long flags; + uint32 pktid; + void *pkt = NULL; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + + /* locks required to protect circular buffer accesses */ + DHD_GENERAL_LOCK(dhd, flags); + + txstatus = (host_txbuf_cmpl_t *)msg; + pktid = ltoh32(txstatus->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_txstatus_process:", + (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("txstatus for pktid 0x%04x\n", pktid)); + if (prot->active_tx_count) { + prot->active_tx_count--; + + /* Release the Lock when no more tx packets are pending */ + if (prot->active_tx_count == 0) + DHD_OS_WAKE_UNLOCK(dhd); + + } else { + DHD_ERROR(("Extra packets are freed\n")); + } + + ASSERT(pktid != 0); + +#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA) + { + int elem_ix; + void **elem; + bcm_workq_t *workq; + + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX); + + workq = &prot->tx_compl_prod; + /* + * Produce the packet into the tx_compl workq for the tx compl tasklet + * to consume. + */ + OSL_PREFETCH(PKTTAG(pkt)); + + /* fetch next available slot in workq */ + elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa); + DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len); + + if (elem_ix == BCM_RING_FULL) { + DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n")); + goto workq_ring_full; + } + + elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix); + *elem = pkt; + + smp_wmb(); + + /* Sync WR index to consumer if the SYNC threshold has been reached */ + if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { + bcm_workq_prod_sync(workq); + prot->tx_compl_prod_sync = 0; + } + + DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n", + __FUNCTION__, pkt, prot->tx_compl_prod_sync)); + + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } + +workq_ring_full: + +#endif /* !DHD_LB_TXC */ + + /* + * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is + * defined but the tx_compl queue is full. + */ + if (pkt == NULL) { + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX); + } + + if (pkt) { + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (dhd->prot->tx_metadata_offset) + offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN; + SECURE_DMA_UNMAP(dhd->osh, (uint) pa, + (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah, + secdma, offset); + } else { + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + } +#if defined(BCMPCIE) + dhd_txcomplete(dhd, pkt, true); +#endif + +#if DHD_DBG_SHOW_METADATA + if (dhd->prot->metadata_dbg && + dhd->prot->tx_metadata_offset && txstatus->metadata_len) { + uchar *ptr; + /* The Ethernet header of TX frame was copied and removed. + * Here, move the data pointer forward by Ethernet header size. + */ + PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); + ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); + bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); + dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + PKTFREE(dhd->osh, pkt, TRUE); + DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, + txstatus->tx_status); + } + + DHD_GENERAL_UNLOCK(dhd, flags); + + return; +} /* dhd_prot_txstatus_process */ + +/** called on MSG_TYPE_WL_EVENT message received from dongle */ +static void +dhd_prot_event_process(dhd_pub_t *dhd, void *msg) +{ + wlevent_req_msg_t *evnt; + uint32 bufid; + uint16 buflen; + int ifidx = 0; + void* pkt; + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + + /* Event complete header */ + evnt = (wlevent_req_msg_t *)msg; + bufid = ltoh32(evnt->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_event_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + buflen = ltoh16(evnt->event_data_len); + + ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); + + /* Post another rxbuf to the device */ + if (prot->cur_event_bufs_posted) { + prot->cur_event_bufs_posted--; + } + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + /* locks required to protect pktid_map */ + DHD_GENERAL_LOCK(dhd, flags); + pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (!pkt) { + return; + } + + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) { + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); + } + + PKTSETLEN(dhd->osh, pkt, buflen); + + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +} + +/** called on MSG_TYPE_RX_CMPLT message received from dongle */ +static void BCMFASTPATH +dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg) +{ + host_rxbuf_cmpl_t *rxcmplt_h; + uint16 data_offset; /* offset at which data starts */ + void *pkt; + unsigned long flags; + uint ifidx; + uint32 pktid; +#if defined(DHD_LB_RXC) + const bool free_pktid = FALSE; +#else + const bool free_pktid = TRUE; +#endif /* DHD_LB_RXC */ + + /* RXCMPLT HDR */ + rxcmplt_h = (host_rxbuf_cmpl_t *)msg; + + /* offset from which data starts is populated in rxstatus0 */ + data_offset = ltoh16(rxcmplt_h->data_offset); + + pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_rxcmplt_process:", + (uchar *)msg, D2HRING_RXCMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (!pkt) { + return; + } + + /* Post another set of rxbufs to the device */ + dhd_prot_return_rxbuf(dhd, pktid, 1); + + DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n", + ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len), + rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), + ltoh16(rxcmplt_h->metadata_len))); +#if DHD_DBG_SHOW_METADATA + if (dhd->prot->metadata_dbg && + dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) { + uchar *ptr; + ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset); + /* header followed by data */ + bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len); + dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + + if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) { + DHD_INFO(("D11 frame rxed \n")); + } + + /* data_offset from buf start */ + if (data_offset) { + /* data offset given from dongle after split rx */ + PKTPULL(dhd->osh, pkt, data_offset); /* data offset */ + } else { + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) { + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); + } + } + /* Actual length of the packet */ + PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len)); + + ifidx = rxcmplt_h->cmn_hdr.if_id; + +#if defined(DHD_LB_RXP) + dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx); +#else /* ! DHD_LB_RXP */ +#ifdef DHD_RX_CHAINING + /* Chain the packets */ + dhd_rxchain_frame(dhd, pkt, ifidx); +#else /* ! DHD_RX_CHAINING */ + /* offset from which data starts is populated in rxstatus0 */ + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +#endif /* ! DHD_RX_CHAINING */ +#endif /* ! DHD_LB_RXP */ +} /* dhd_prot_rxcmplt_process */ + +/** Stop protocol: sync w/dongle state. */ +void dhd_prot_stop(dhd_pub_t *dhd) +{ + ASSERT(dhd); + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +} + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +void BCMFASTPATH +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ + return; +} + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + return 0; +} + + +#define PKTBUF pktbuf + +/** + * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in + * the corresponding flow ring. + */ +int BCMFASTPATH +dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + host_txbuf_post_t *txdesc = NULL; + dmaaddr_t pa, meta_pa; + uint8 *pktdata; + uint32 pktlen; + uint32 pktid; + uint8 prio; + uint16 flowid = 0; + uint16 alloced = 0; + uint16 headroom; + msgbuf_ring_t *ring; + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + + if (dhd->flow_ring_table == NULL) { + return BCME_NORESOURCE; + } + + flowid = DHD_PKT_GET_FLOWID(PKTBUF); + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + + DHD_GENERAL_LOCK(dhd, flags); + + /* Create a unique 32-bit packet id */ + pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF); +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + DHD_ERROR(("Pktid pool depleted.\n")); + /* + * If we return error here, the caller would queue the packet + * again. So we'll just free the skb allocated in DMA Zone. + * Since we have not freed the original SKB yet the caller would + * requeue the same. + */ + goto err_no_res_pktfree; + } +#endif /* DHD_PCIE_PKTID */ + + /* Reserve space in the circular buffer */ + txdesc = (host_txbuf_post_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (txdesc == NULL) { +#if defined(DHD_PCIE_PKTID) + void *dmah; + void *secdma; + /* Free up the PKTID. physaddr and pktlen will be garbage. */ + DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, + pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK); +#endif /* DHD_PCIE_PKTID */ + DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", + __FUNCTION__, __LINE__, prot->active_tx_count)); + goto err_no_res_pktfree; + } + + /* Extract the data pointer and length information */ + pktdata = PKTDATA(dhd->osh, PKTBUF); + pktlen = PKTLEN(dhd->osh, PKTBUF); + + /* Ethernet header: Copy before we cache flush packet using DMA_MAP */ + bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); + + /* Extract the ethernet header and adjust the data pointer and length */ + pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN); + pktlen -= ETHER_HDR_LEN; + + /* Map the data pointer to a DMA-able address */ + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (prot->tx_metadata_offset) { + offset = prot->tx_metadata_offset + ETHER_HDR_LEN; + } + + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, + DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); + } + + if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) { + DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); + ASSERT(0); + } + + /* No need to lock. Save the rest of the packet's metadata */ + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid, + pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX); + +#ifdef TXP_FLUSH_NITEMS + if (ring->pend_items_count == 0) { + ring->start_addr = (void *)txdesc; + } + ring->pend_items_count++; +#endif + + /* Form the Tx descriptor message buffer */ + + /* Common message hdr */ + txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; + txdesc->cmn_hdr.if_id = ifidx; + + txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3; + prio = (uint8)PKTPRIO(PKTBUF); + + + txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT; + txdesc->seg_cnt = 1; + + txdesc->data_len = htol16((uint16) pktlen); + txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + + /* Move data pointer to keep ether header in local PKTBUF for later reference */ + PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN); + + /* Handle Tx metadata */ + headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); + if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) { + DHD_ERROR(("No headroom for Metadata tx %d %d\n", + prot->tx_metadata_offset, headroom)); + } + + if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { + DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset)); + + /* Adjust the data pointer to account for meta data in DMA_MAP */ + PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + if (SECURE_DMA_ENAB(dhd->osh)) { + meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF, + 0, ring->dma_buf.secdma); + } else { + meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); + } + + if (PHYSADDRISZERO(meta_pa)) { + DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); + ASSERT(0); + } + + /* Adjust the data pointer back to original value */ + PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + txdesc->metadata_buf_len = prot->tx_metadata_offset; + txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa)); + txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa)); + } else { + txdesc->metadata_buf_len = htol16(0); + txdesc->metadata_buf_addr.high_addr = 0; + txdesc->metadata_buf_addr.low_addr = 0; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, + DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + txdesc->cmn_hdr.request_id = htol32(pktid); + + DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len, + txdesc->cmn_hdr.request_id)); + + /* Update the write pointer in TCM & ring bell */ +#ifdef TXP_FLUSH_NITEMS + /* Flush if we have either hit the txp_threshold or if this msg is */ + /* occupying the last slot in the flow_ring - before wrap around. */ + if ((ring->pend_items_count == prot->txp_threshold) || + ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) { + dhd_prot_txdata_write_flush(dhd, flowid, TRUE); + } +#else + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, txdesc, 1); +#endif + + prot->active_tx_count++; + + /* + * Take a wake lock, do not sleep if we have atleast one packet + * to finish. + */ + if (prot->active_tx_count == 1) + DHD_OS_WAKE_LOCK(dhd); + + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; + +err_no_res_pktfree: + + + + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NORESOURCE; +} /* dhd_prot_txdata */ + +/* called with a lock */ +/** optimization to write "n" tx items at a time to ring */ +void BCMFASTPATH +dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock) +{ +#ifdef TXP_FLUSH_NITEMS + unsigned long flags = 0; + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + msgbuf_ring_t *ring; + + if (dhd->flow_ring_table == NULL) { + return; + } + + if (!in_lock) { + DHD_GENERAL_LOCK(dhd, flags); + } + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + if (ring->pend_items_count) { + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ring->start_addr, + ring->pend_items_count); + ring->pend_items_count = 0; + ring->start_addr = NULL; + } + + if (!in_lock) { + DHD_GENERAL_UNLOCK(dhd, flags); + } +#endif /* TXP_FLUSH_NITEMS */ +} + +#undef PKTBUF /* Only defined in the above routine */ + +int BCMFASTPATH +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len) +{ + return 0; +} + +/** post a set of receive buffers to the dongle */ +static void BCMFASTPATH +dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt) +{ + dhd_prot_t *prot = dhd->prot; +#if defined(DHD_LB_RXC) + int elem_ix; + uint32 *elem; + bcm_workq_t *workq; + + workq = &prot->rx_compl_prod; + + /* Produce the work item */ + elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + if (elem_ix == BCM_RING_FULL) { + DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__)); + ASSERT(0); + return; + } + + elem = WORKQ_ELEMENT(uint32, workq, elem_ix); + *elem = pktid; + + smp_wmb(); + + /* Sync WR index to consumer if the SYNC threshold has been reached */ + if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { + bcm_workq_prod_sync(workq); + prot->rx_compl_prod_sync = 0; + } + + DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n", + __FUNCTION__, pktid, prot->rx_compl_prod_sync)); + +#endif /* DHD_LB_RXC */ + + + if (prot->rxbufpost >= rxcnt) { + prot->rxbufpost -= rxcnt; + } else { + /* ASSERT(0); */ + prot->rxbufpost = 0; + } + +#if !defined(DHD_LB_RXC) + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + } +#endif /* !DHD_LB_RXC */ +} + +/* called before an ioctl is sent to the dongle */ +static void +dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf) +{ + dhd_prot_t *prot = dhd->prot; + + if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) { + int slen = 0; + pcie_bus_tput_params_t *tput_params; + + slen = strlen("pcie_bus_tput") + 1; + tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen); + bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr, + sizeof(tput_params->host_buf_addr)); + tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN; + } +} + + +/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */ +int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + int ret = -1; + uint8 action; + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + if (dhd->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (ioc->cmd == WLC_SET_PM) { + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); + } + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) { + goto done; + } + + action = ioc->set; + + dhd_prot_wlioctl_intercept(dhd, ioc, buf); + + if (action & WL_IOCTL_ACTION_SET) { + ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + } else { + ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) { + ioc->used = ret; + } + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) { + ret = 0; + } else { + DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret)); + dhd->dongle_error = ret; + } + + if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) { + /* Intercept the wme_dp ioctl here */ + if (!strcmp(buf, "wme_dp")) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) { + bcopy(((char *)buf + slen), &val, sizeof(int)); + } + dhd->wme_dp = (uint8) ltoh32(val); + } + + } + +done: + return ret; + +} /* dhd_prot_ioctl */ + +/** test / loopback */ + +int +dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + + ioct_reqst_hdr_t *ioct_rqst; + + uint16 hdrlen = sizeof(ioct_reqst_hdr_t); + uint16 msglen = len + hdrlen; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN); + msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE); + + DHD_GENERAL_LOCK(dhd, flags); + + ioct_rqst = (ioct_reqst_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (ioct_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + return 0; + } + + { + uint8 *ptr; + uint16 i; + + ptr = (uint8 *)ioct_rqst; + for (i = 0; i < msglen; i++) { + ptr[i] = i % 256; + } + } + + /* Common msg buf hdr */ + ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; + ioct_rqst->msg.if_id = 0; + + bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} + +/** test / loopback */ +void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer) +{ + if (dmaxfer == NULL) { + return; + } + + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + dhd_dma_buf_free(dhd, &dmaxfer->dstmem); +} + +/** test / loopback */ +int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, + uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer) +{ + uint i; + if (!dmaxfer) { + return BCME_ERROR; + } + + /* First free up existing buffers */ + dmaxfer_free_dmaaddr(dhd, dmaxfer); + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) { + return BCME_NOMEM; + } + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) { + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + return BCME_NOMEM; + } + + dmaxfer->len = len; + + /* Populate source with a pattern */ + for (i = 0; i < dmaxfer->len; i++) { + ((uint8*)dmaxfer->srcmem.va)[i] = i % 256; + } + OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len); + + dmaxfer->srcdelay = srcdelay; + dmaxfer->destdelay = destdelay; + + return BCME_OK; +} /* dmaxfer_prepare_dmaaddr */ + +static void +dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + + OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) { + if (memcmp(prot->dmaxfer.srcmem.va, + prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) { + bcm_print_bytes("XFER SRC: ", + prot->dmaxfer.srcmem.va, prot->dmaxfer.len); + bcm_print_bytes("XFER DST: ", + prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + } else { + DHD_INFO(("DMA successful\n")); + } + } + dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + dhd->prot->dmaxfer.in_progress = FALSE; +} + +/** Test functionality. + * Transfers bytes from host to dongle and to host again using DMA + * This function is not reentrant, as prot->dmaxfer.in_progress is not protected + * by a spinlock. + */ +int +dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) +{ + unsigned long flags; + int ret = BCME_OK; + dhd_prot_t *prot = dhd->prot; + pcie_dma_xfer_params_t *dmap; + uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT); + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + if (prot->dmaxfer.in_progress) { + DHD_ERROR(("DMA is in progress...\n")); + return ret; + } + + prot->dmaxfer.in_progress = TRUE; + if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay, + &prot->dmaxfer)) != BCME_OK) { + prot->dmaxfer.in_progress = FALSE; + return ret; + } + + DHD_GENERAL_LOCK(dhd, flags); + + dmap = (pcie_dma_xfer_params_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (dmap == NULL) { + dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + prot->dmaxfer.in_progress = FALSE; + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; + dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID); + dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); + dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa)); + dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa)); + dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa)); + dmap->xfer_len = htol32(prot->dmaxfer.len); + dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); + dmap->destdelay = htol32(prot->dmaxfer.destdelay); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, dmap, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + DHD_ERROR(("DMA Started...\n")); + + return BCME_OK; +} /* dhdmsgbuf_dmaxfer_req */ + +/** Called in the process of submitting an ioctl to the dongle */ +static int +dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + + DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + + /* wait for IOCTL completion message from dongle and get first fragment */ + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + +done: + return ret; +} + +/** + * Waits for IOCTL completion message from the dongle, copies this into caller + * provided parameter 'buf'. + */ +static int +dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) +{ + dhd_prot_t *prot = dhd->prot; + int timeleft; + unsigned long flags; + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->dongle_reset) { + ret = -EIO; + goto out; + } + + if (prot->cur_ioctlresp_bufs_posted) { + prot->cur_ioctlresp_bufs_posted--; + } + + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + + timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received); + if (timeleft == 0) { + dhd->rxcnt_timeout++; + dhd->rx_ctlerrs++; + DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d " + "trans_id %d state %d busstate=%d ioctl_received=%d\n", + __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd, + prot->ioctl_trans_id, prot->ioctl_state, + dhd->busstate, prot->ioctl_received)); + + dhd_prot_debug_info_print(dhd); + +#ifdef DHD_FW_COREDUMP + /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */ + if (dhd->memdump_enabled && !dhd->dongle_trap_occured) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ + if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) { +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__)); + } + ret = -ETIMEDOUT; + goto out; + } else { + if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) { + DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n", + __FUNCTION__, prot->ioctl_received)); + ret = -ECONNABORTED; + goto out; + } + dhd->rxcnt_timeout = 0; + dhd->rx_ctlpkts++; + DHD_CTL(("%s: ioctl resp resumed, got %d\n", + __FUNCTION__, prot->ioctl_resplen)); + } + + if (dhd->dongle_trap_occured) { +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__)); + ret = -EREMOTEIO; + goto out; + } + + if (dhd->prot->ioctl_resplen > len) { + dhd->prot->ioctl_resplen = (uint16)len; + } + if (buf) { + bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen); + } + + ret = (int)(dhd->prot->ioctl_status); +out: + DHD_GENERAL_LOCK(dhd, flags); + dhd->prot->ioctl_state = 0; + dhd->prot->ioctl_resplen = 0; + dhd->prot->ioctl_received = IOCTL_WAIT; + dhd->prot->curr_ioctl_cmd = 0; + DHD_GENERAL_UNLOCK(dhd, flags); + + return ret; +} /* dhd_msgbuf_wait_ioctl_cmplt */ + +static int +dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + + DHD_TRACE(("%s: Enter \n", __FUNCTION__)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + /* Fill up msgbuf for ioctl req */ + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + + DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + + return ret; +} + +/** Called by upper DHD layer. Handles a protocol control response asynchronously. */ +int dhd_prot_ctl_complete(dhd_pub_t *dhd) +{ + return 0; +} + +/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */ +int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +/** Add prot dump output to a buffer */ +void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + +#if defined(PCIE_D2H_SYNC) + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) + bcm_bprintf(b, "\nd2h_sync: SEQNUM:"); + else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) + bcm_bprintf(b, "\nd2h_sync: XORCSUM:"); + else + bcm_bprintf(b, "\nd2h_sync: NONE:"); + bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n", + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); +#endif /* PCIE_D2H_SYNC */ + + bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n", + DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support), + DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support), + dhd->prot->rw_index_sz); +} + +/* Update local copy of dongle statistics */ +void dhd_prot_dstats(dhd_pub_t *dhd) +{ + return; +} + +/** Called by upper DHD layer */ +int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count) +{ + return 0; +} + +/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */ +int +dhd_post_dummy_msg(dhd_pub_t *dhd) +{ + unsigned long flags; + hostevent_hdr_t *hevent = NULL; + uint16 alloced = 0; + + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + hevent = (hostevent_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (hevent == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + return -1; + } + + /* CMN msg header */ + hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; + hevent->msg.if_id = 0; + + /* Event payload */ + hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); + + /* Since, we are filling the data directly into the bufptr obtained + * from the msgbuf, we can directly call the write_complete + */ + dhd_prot_ring_write_complete(dhd, ring, hevent, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} + +/** + * If exactly_nitems is true, this function will allocate space for nitems or fail + * If exactly_nitems is false, this function will allocate space for nitems or less + */ +static void * BCMFASTPATH +dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 * alloced, bool exactly_nitems) +{ + void * ret_buf; + + /* Alloc space for nitems in the ring */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + /* if alloc failed , invalidate cached read ptr */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx); + } + + /* Try allocating once more */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + DHD_INFO(("%s: Ring space not available \n", ring->name)); + return NULL; + } + } + + /* Return alloced space */ + return ret_buf; +} + +/** + * Non inline ioct request. + * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer + * Form a separate request buffer where a 4 byte cmn header is added in the front + * buf contents from parent function is copied to remaining section of this buffer + */ +static int +dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx) +{ + dhd_prot_t *prot = dhd->prot; + ioctl_req_msg_t *ioct_rqst; + void * ioct_buf; /* For ioctl payload */ + uint16 rqstlen, resplen; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + rqstlen = len; + resplen = len; + + /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */ + /* 8K allocation of dongle buffer fails */ + /* dhd doesnt give separate input & output buf lens */ + /* so making the assumption that input length can never be more than 1.5k */ + rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE); + + DHD_GENERAL_LOCK(dhd, flags); + + if (prot->ioctl_state) { + DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state)); + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_BUSY; + } else { + prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING; + } + + /* Request for cbuf space */ + ioct_rqst = (ioctl_req_msg_t*) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (ioct_rqst == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n")); + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + DHD_GENERAL_UNLOCK(dhd, flags); + return -1; + } + + /* Common msg buf hdr */ + ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; + ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; + ioct_rqst->cmn_hdr.flags = 0; + ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID); + ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->cmd = htol32(cmd); + prot->curr_ioctl_cmd = cmd; + ioct_rqst->output_buf_len = htol16(resplen); + prot->ioctl_trans_id++; + ioct_rqst->trans_id = prot->ioctl_trans_id; + + /* populate ioctl buffer info */ + ioct_rqst->input_buf_len = htol16(rqstlen); + ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa)); + ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa)); + /* copy ioct payload */ + ioct_buf = (void *) prot->ioctbuf.va; + + if (buf) { + memcpy(ioct_buf, buf, len); + } + + OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); + + if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) { + DHD_ERROR(("host ioct address unaligned !!!!! \n")); + } + + DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", + ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, + ioct_rqst->trans_id)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} /* dhd_fillup_ioct_reqst */ + + +/** + * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a + * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring + * information is posted to the dongle. + * + * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for + * each flowring in pool of flowrings. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name, + uint16 max_items, uint16 item_len, uint16 ringid) +{ + int dma_buf_alloced = BCME_NOMEM; + uint32 dma_buf_len = max_items * item_len; + dhd_prot_t *prot = dhd->prot; + + ASSERT(ring); + ASSERT(name); + ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF)); + + /* Init name */ + strncpy(ring->name, name, RING_NAME_MAX_LENGTH); + ring->name[RING_NAME_MAX_LENGTH - 1] = '\0'; + + ring->idx = ringid; + + ring->max_items = max_items; + ring->item_len = item_len; + + /* A contiguous space may be reserved for all flowrings */ + if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) { + /* Carve out from the contiguous DMA-able flowring buffer */ + uint16 flowid; + uint32 base_offset; + + dhd_dma_buf_t *dma_buf = &ring->dma_buf; + dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf; + + flowid = DHD_RINGID_TO_FLOWID(ringid); + base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len; + + ASSERT(base_offset + dma_buf_len <= rsv_buf->len); + + dma_buf->len = dma_buf_len; + dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset); + PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa)); + PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset); + + /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */ + ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa)); + + dma_buf->dmah = rsv_buf->dmah; + dma_buf->secdma = rsv_buf->secdma; + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + } else { + /* Allocate a dhd_dma_buf */ + dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len); + if (dma_buf_alloced != BCME_OK) { + return BCME_NOMEM; + } + } + + /* CAUTION: Save ring::base_addr in little endian format! */ + dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa); + +#ifdef BCM_SECURE_DMA + if (SECURE_DMA_ENAB(prot->osh)) { + ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t)); + if (ring->dma_buf.secdma == NULL) { + goto free_dma_buf; + } + } +#endif /* BCM_SECURE_DMA */ + + DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d " + "ring start %p buf phys addr %x:%x \n", + ring->name, ring->max_items, ring->item_len, + dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr))); + + return BCME_OK; + +#ifdef BCM_SECURE_DMA +free_dma_buf: + if (dma_buf_alloced == BCME_OK) { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } +#endif /* BCM_SECURE_DMA */ + + return BCME_NOMEM; + +} /* dhd_prot_ring_attach */ + + +/** + * dhd_prot_ring_init - Post the common ring information to dongle. + * + * Used only for common rings. + * + * The flowrings information is passed via the create flowring control message + * (tx_flowring_create_request_t) sent over the H2D control submission common + * ring. + */ +static void +dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + + /* CAUTION: ring::base_addr already in Little Endian */ + dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr, + sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items, + sizeof(uint16), RING_MAX_ITEMS, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len, + sizeof(uint16), RING_ITEM_LEN, ring->idx); + + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + + /* ring inited */ + ring->inited = TRUE; + +} /* dhd_prot_ring_init */ + + +/** + * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush + * Reset WR and RD indices to 0. + */ +static void +dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + DHD_TRACE(("%s\n", __FUNCTION__)); + + dhd_dma_buf_reset(dhd, &ring->dma_buf); + + ring->rd = ring->wr = 0; + ring->curr_rd = 0; +} + + +/** + * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects + * hanging off the msgbuf_ring. + */ +static void +dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + dhd_prot_t *prot = dhd->prot; + ASSERT(ring); + + ring->inited = FALSE; + /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */ + +#ifdef BCM_SECURE_DMA + if (SECURE_DMA_ENAB(prot->osh)) { + SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma); + if (ring->dma_buf.secdma) { + MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t)); + } + ring->dma_buf.secdma = NULL; + } +#endif /* BCM_SECURE_DMA */ + + /* If the DMA-able buffer was carved out of a pre-reserved contiguous + * memory, then simply stop using it. + */ + if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) { + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t)); + } else { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } + +} /* dhd_prot_ring_detach */ + + +/* + * +---------------------------------------------------------------------------- + * Flowring Pool + * + * Unlike common rings, which are attached very early on (dhd_prot_attach), + * flowrings are dynamically instantiated. Moreover, flowrings may require a + * larger DMA-able buffer. To avoid issues with fragmented cache coherent + * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once. + * The DMA-able buffers are attached to these pre-allocated msgbuf_ring. + * + * Each DMA-able buffer may be allocated independently, or may be carved out + * of a single large contiguous region that is registered with the protocol + * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region + * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic). + * + * No flowring pool action is performed in dhd_prot_attach(), as the number + * of h2d rings is not yet known. + * + * In dhd_prot_init(), the dongle advertized number of h2d rings is used to + * determine the number of flowrings required, and a pool of msgbuf_rings are + * allocated and a DMA-able buffer (carved or allocated) is attached. + * See: dhd_prot_flowrings_pool_attach() + * + * A flowring msgbuf_ring object may be fetched from this pool during flowring + * creation, using the flowid. Likewise, flowrings may be freed back into the + * pool on flowring deletion. + * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release() + * + * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers + * are detached (returned back to the carved region or freed), and the pool of + * msgbuf_ring and any objects allocated against it are freed. + * See: dhd_prot_flowrings_pool_detach() + * + * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a + * state as-if upon an attach. All DMA-able buffers are retained. + * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring + * pool attach will notice that the pool persists and continue to use it. This + * will avoid the case of a fragmented DMA-able region. + * + * +---------------------------------------------------------------------------- + */ + +/* Fetch number of H2D flowrings given the total number of h2d rings */ +#define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \ + ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Conversion of a flowid to a flowring pool index */ +#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ +#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ + (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid) + +/* Traverse each flowring in the flowring pool, assigning ring and flowid */ +#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \ + for ((flowid) = DHD_FLOWRING_START_FLOWID, \ + (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ + (flowid) < (prot)->h2d_rings_total; \ + (flowid)++, (ring)++) + +/** + * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t. + * + * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings. + * Dongle includes common rings when it advertizes the number of H2D rings. + * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to + * allocate the DMA-able buffer and initialize each msgbuf_ring_t object. + * + * dhd_prot_ring_attach is invoked to perform the actual initialization and + * attaching the DMA-able buffer. + * + * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and + * initialized msgbuf_ring_t object. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) +{ + uint16 flowid; + msgbuf_ring_t *ring; + uint16 h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + char ring_name[RING_NAME_MAX_LENGTH]; + + if (prot->h2d_flowrings_pool != NULL) { + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + ASSERT(prot->h2d_rings_total == 0); + + /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */ + prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus); + + if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) { + DHD_ERROR(("%s: h2d_rings_total advertized as %u\n", + __FUNCTION__, prot->h2d_rings_total)); + return BCME_ERROR; + } + + /* Subtract number of H2D common rings, to determine number of flowrings */ + h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total); + + DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total)); + + /* Allocate pool of msgbuf_ring_t objects for all flowrings */ + prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + if (prot->h2d_flowrings_pool == NULL) { + DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n", + __FUNCTION__, h2d_flowrings_total)); + goto fail; + } + + /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid); + ring_name[RING_NAME_MAX_LENGTH - 1] = '\0'; + if (dhd_prot_ring_attach(dhd, ring, ring_name, + H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE, + DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) { + goto attach_fail; + } + } + + return BCME_OK; + +attach_fail: + dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */ + +fail: + prot->h2d_rings_total = 0; + return BCME_NOMEM; + +} /* dhd_prot_flowrings_pool_attach */ + + +/** + * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool. + * Invokes dhd_prot_ring_reset to perform the actual reset. + * + * The DMA-able buffer is not freed during reset and neither is the flowring + * pool freed. + * + * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following + * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool + * from a previous flowring pool instantiation will be reused. + * + * This will avoid a fragmented DMA-able memory condition, if multiple + * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach + * cycle. + */ +static void +dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd) +{ + uint16 flowid; + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + + /* Reset each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + dhd_prot_ring_reset(dhd, ring); + ring->inited = FALSE; + } + + /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */ +} + + +/** + * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with + * DMA-able buffers for flowrings. + * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any + * de-initialization of each msgbuf_ring_t. + */ +static void +dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd) +{ + int flowid; + msgbuf_ring_t *ring; + int h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + + /* Detach the DMA-able buffer for each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + dhd_prot_ring_detach(dhd, ring); + } + + h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total); + + MFREE(prot->osh, prot->h2d_flowrings_pool, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL; + prot->h2d_rings_total = 0; + +} /* dhd_prot_flowrings_pool_detach */ + + +/** + * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized + * msgbuf_ring from the flowring pool, and assign it. + * + * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common + * ring information to the dongle, a flowring's information is passed via a + * flowring create control message. + * + * Only the ring state (WR, RD) index are initialized. + */ +static msgbuf_ring_t * +dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + /* ASSERT flow_ring->inited == FALSE */ + + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + ring->inited = TRUE; + + return ring; +} + + +/** + * dhd_prot_flowrings_pool_release - release a previously fetched flowring's + * msgbuf_ring back to the flow_ring pool. + */ +void +dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + ASSERT(ring == (msgbuf_ring_t*)flow_ring); + /* ASSERT flow_ring->inited == TRUE */ + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + + ring->wr = 0; + ring->rd = 0; + ring->inited = FALSE; + + ring->curr_rd = 0; +} + + +/* Assumes only one index is updated at a time */ +/* If exactly_nitems is true, this function will allocate space for nitems or fail */ +/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */ +/* If exactly_nitems is false, this function will allocate space for nitems or less */ +static void *BCMFASTPATH +dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced, + bool exactly_nitems) +{ + void *ret_ptr = NULL; + uint16 ring_avail_cnt; + + ASSERT(nitems <= ring->max_items); + + ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items); + + if ((ring_avail_cnt == 0) || + (exactly_nitems && (ring_avail_cnt < nitems) && + ((ring->max_items - ring->wr) >= nitems))) { + DHD_INFO(("Space not available: ring %s items %d write %d read %d\n", + ring->name, nitems, ring->wr, ring->rd)); + return NULL; + } + *alloced = MIN(nitems, ring_avail_cnt); + + /* Return next available space */ + ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len); + + /* Update write index */ + if ((ring->wr + *alloced) == ring->max_items) { + ring->wr = 0; + } else if ((ring->wr + *alloced) < ring->max_items) { + ring->wr += *alloced; + } else { + /* Should never hit this */ + ASSERT(0); + return NULL; + } + + return ret_ptr; +} /* dhd_prot_get_ring_space */ + + +/** + * dhd_prot_ring_write_complete - Host updates the new WR index on producing + * new messages in a H2D ring. The messages are flushed from cache prior to + * posting the new WR index. The new WR index will be updated in the DMA index + * array or directly in the dongle's ring state memory. + * A PCIE doorbell will be generated to wake up the dongle. + */ +static void BCMFASTPATH +dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, + uint16 nitems) +{ + dhd_prot_t *prot = dhd->prot; + + /* cache flush */ + OSL_CACHE_FLUSH(p, ring->item_len * nitems); + + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_DMA_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + } + + /* raise h2d interrupt */ + prot->mb_ring_fn(dhd->bus, ring->wr); +} + + +/** + * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages + * from a D2H ring. The new RD index will be updated in the DMA Index array or + * directly in dongle's ring state memory. + */ +static void +dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) +{ + /* update read index */ + /* If dma'ing h2d indices supported + * update the r -indices in the + * host memory o/w in TCM + */ + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, ring->rd, + D2H_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + } +} + + +/** + * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array. + * Dongle will DMA the entire array (if DMA_INDX feature is enabled). + * See dhd_prot_dma_indx_init() + */ +static void +dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + *(uint16*)ptr = htol16(new_index); + + OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, new_index, type, ringid, ptr, offset)); + +} /* dhd_prot_dma_indx_set */ + + +/** + * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index + * array. + * Dongle DMAes an entire array to host memory (if the feature is enabled). + * See dhd_prot_dma_indx_init() + */ +static uint16 +dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 data; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case H2D_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return 0; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + OSL_CACHE_INV((void *)ptr, prot->rw_index_sz); + + data = LTOH16(*((uint16*)ptr)); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, data, type, ringid, ptr, offset)); + + return (data); + +} /* dhd_prot_dma_indx_get */ + +/** + * An array of DMA read/write indices, containing information about host rings, can be maintained + * either in host memory or in device memory, dependent on preprocessor options. This function is, + * dependent on these options, called during driver initialization. It reserves and initializes + * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical + * address of these host memory blocks are communicated to the dongle later on. By reading this host + * memory, the dongle learns about the state of the host rings. + */ + +static INLINE int +dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz) +{ + int rc; + + if ((dma_buf->len == bufsz) || (dma_buf->va != NULL)) + return BCME_OK; + + rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz); + + return rc; +} + +int +dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length) +{ + uint32 bufsz; + dhd_prot_t *prot = dhd->prot; + dhd_dma_buf_t *dma_buf; + + if (prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + + /* Dongle advertizes 2B or 4B RW index size */ + ASSERT(rw_index_sz != 0); + prot->rw_index_sz = rw_index_sz; + + bufsz = rw_index_sz * length; + + switch (type) { + case H2D_DMA_INDX_WR_BUF: + dma_buf = &prot->h2d_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case H2D_DMA_INDX_RD_BUF: + dma_buf = &prot->h2d_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_WR_BUF: + dma_buf = &prot->d2h_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_RD_BUF: + dma_buf = &prot->d2h_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + default: + DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); + return BCME_BADOPTION; + } + + return BCME_OK; + +ret_no_mem: + DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n", + __FUNCTION__, type, bufsz)); + return BCME_NOMEM; + +} /* dhd_prot_dma_indx_init */ + + +/** + * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read + * from, or NULL if there are no more messages to read. + */ +static uint8* +dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len) +{ + uint16 wr; + uint16 rd; + uint16 depth; + uint16 items; + void *read_addr = NULL; /* address of next msg to be read in ring */ + uint16 d2h_wr = 0; + + DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n", + __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va), + (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va))); + + /* Remember the read index in a variable. + * This is becuase ring->rd gets updated in the end of this function + * So if we have to print the exact read index from which the + * message is read its not possible. + */ + ring->curr_rd = ring->rd; + + /* update write pointer */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + /* DMAing write/read indices supported */ + d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + ring->wr = d2h_wr; + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx); + } + + wr = ring->wr; + rd = ring->rd; + depth = ring->max_items; + + /* check for avail space, in number of ring items */ + items = READ_AVAIL_SPACE(wr, rd, depth); + if (items == 0) { + return NULL; + } + + ASSERT(items < ring->max_items); + + /* + * Note that there are builds where Assert translates to just printk + * so, even if we had hit this condition we would never halt. Now + * dhd_prot_process_msgtype can get into an big loop if this + * happens. + */ + if (items >= ring->max_items) { + DHD_ERROR(("\r\n======================= \r\n")); + DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", + __FUNCTION__, ring, ring->name, ring->max_items, items)); + DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth)); + DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n", + dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack)); + DHD_ERROR(("\r\n======================= \r\n")); + + *available_len = 0; + return NULL; + } + + /* if space is available, calculate address to be read */ + read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len); + + /* update read pointer */ + if ((ring->rd + items) >= ring->max_items) { + ring->rd = 0; + } else { + ring->rd += items; + } + + ASSERT(ring->rd < ring->max_items); + + /* convert items to bytes : available_len must be 32bits */ + *available_len = (uint32)(items * ring->item_len); + + OSL_CACHE_INV(read_addr, *available_len); + + /* return read address */ + return read_addr; + +} /* dhd_prot_get_read_addr */ + +/** Creates a flow ring and informs dongle of this event */ +int +dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_create_request_t *flow_create_rqst; + msgbuf_ring_t *flow_ring; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + + /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ + flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); + if (flow_ring == NULL) { + DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_NOMEM; + } + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ctrl_ring buffer space */ + flow_create_rqst = (tx_flowring_create_request_t *) + dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); + + if (flow_create_rqst == NULL) { + dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); + DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n", + __FUNCTION__, flow_ring_node->flowid)); + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NOMEM; + } + + flow_ring_node->prot_info = (void *)flow_ring; + + /* Common msg buf hdr */ + flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; + flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_create_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update flow create message */ + flow_create_rqst->tid = flow_ring_node->flow_info.tid; + flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa)); + memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da)); + /* CAUTION: ring::base_addr already in Little Endian */ + flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr; + flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr; + flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM); + flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); + DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* Update the flow_ring's WRITE index */ + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), + sizeof(uint16), RING_WR_UPD, flow_ring->idx); + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1); + + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_create */ + +/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */ +static void +dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg; + + DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__, + ltoh16(flow_create_resp->cmplt.status), + ltoh16(flow_create_resp->cmplt.flow_ring_id))); + + dhd_bus_flow_ring_create_response(dhd->bus, + ltoh16(flow_create_resp->cmplt.flow_ring_id), + ltoh16(flow_create_resp->cmplt.status)); +} + +/** called on e.g. flow ring delete */ +void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) +{ + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + dhd_prot_ring_detach(dhd, flow_ring); + DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__)); +} + +void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, + struct bcmstrbuf *strbuf, const char * fmt) +{ + const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d\n"; + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + uint16 rd, wr; + uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len; + + if (fmt == NULL) { + fmt = default_fmt; + } + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va, + ltoh32(flow_ring->base_addr.high_addr), + ltoh32(flow_ring->base_addr.low_addr), dma_buf_len); +} + +void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + dhd_prot_t *prot = dhd->prot; + bcm_bprintf(strbuf, "CtrlPost: "); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL); + bcm_bprintf(strbuf, "CtrlCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL); + + bcm_bprintf(strbuf, "RxPost: "); + bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL); + bcm_bprintf(strbuf, "RxCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL); + + bcm_bprintf(strbuf, "TxCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL); + bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n", + dhd->prot->active_tx_count, + DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle)); +} + +int +dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_delete_request_t *flow_delete_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ring buffer space */ + flow_delete_rqst = (tx_flowring_delete_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (flow_delete_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; + flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_delete_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + /* Update Delete info */ + flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_delete_rqst->reason = htol16(BCME_OK); + + DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} + +static void +dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg; + + DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__, + flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id)); + + dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id, + flow_delete_resp->cmplt.status); +} + +int +dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_flush_request_t *flow_flush_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ring buffer space */ + flow_flush_rqst = (tx_flowring_flush_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (flow_flush_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; + flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_flush_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_flush_rqst->reason = htol16(BCME_OK); + + DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_flush */ + +static void +dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg; + + DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__, + flow_flush_resp->cmplt.status)); + + dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id, + flow_flush_resp->cmplt.status); +} + +/** + * Request dongle to configure soft doorbells for D2H rings. Host populated soft + * doorbell information is transferred to dongle via the d2h ring config control + * message. + */ +void +dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd) +{ +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + uint16 ring_idx; + uint8 *msg_next; + void *msg_start; + uint16 alloced = 0; + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + ring_config_req_t *ring_config_req; + bcmpcie_soft_doorbell_t *soft_doorbell; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS; + + /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */ + DHD_GENERAL_LOCK(dhd, flags); + msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE); + + if (msg_start == NULL) { + DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n", + __FUNCTION__, d2h_rings)); + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } + + msg_next = (uint8*)msg_start; + + for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) { + + /* position the ring_config_req into the ctrl subm ring */ + ring_config_req = (ring_config_req_t *)msg_next; + + /* Common msg header */ + ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG; + ring_config_req->msg.if_id = 0; + ring_config_req->msg.flags = 0; + + ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */ + + /* Ring Config subtype and d2h ring_id */ + ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL); + ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx)); + + /* Host soft doorbell configuration */ + soft_doorbell = &prot->soft_doorbell[ring_idx]; + + ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value); + ring_config_req->soft_doorbell.haddr.high = + htol32(soft_doorbell->haddr.high); + ring_config_req->soft_doorbell.haddr.low = + htol32(soft_doorbell->haddr.low); + ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items); + ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs); + + DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n", + __FUNCTION__, ring_config_req->soft_doorbell.haddr.high, + ring_config_req->soft_doorbell.haddr.low, + ring_config_req->soft_doorbell.value)); + + msg_next = msg_next + ctrl_ring->item_len; + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings); + DHD_GENERAL_UNLOCK(dhd, flags); +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ +} + +static void +dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg) +{ + DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n", + __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status), + ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id))); +} + +int +dhd_prot_debug_info_print(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring; + uint16 rd, wr; + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 mbintstatus = 0; + uint32 d2h_mb_data = 0; + uint32 dma_buf_len; + + DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n")); + + ring = &prot->h2dring_ctrl_subn; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + + ring = &prot->d2hring_ctrl_cpln; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum)); + + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); + DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,", + intstatus, intmask, mbintstatus)); + DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask)); + + return 0; +} + +int +dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + uint32 *ptr; + uint32 value; + uint32 i; + uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus); + + OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va, + dhd->prot->d2h_dma_indx_wr_buf.len); + + ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va); + + bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues); + + bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value); + + ptr++; + bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr); + for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) { + value = ltoh32(*ptr); + bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value); + ptr++; + } + + OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va, + dhd->prot->h2d_dma_indx_rd_buf.len); + + ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va); + + bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value); + + return 0; +} + +uint32 +dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val) +{ + dhd_prot_t *prot = dhd->prot; +#if DHD_DBG_SHOW_METADATA + prot->metadata_dbg = val; +#endif + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadata_dbg_get(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + prot->rx_metadata_offset = (uint16)val; + else + prot->tx_metadata_offset = (uint16)val; + return dhd_prot_metadatalen_get(dhd, rx); +} + +uint32 +dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + return prot->rx_metadata_offset; + else + return prot->tx_metadata_offset; +} + +/** optimization to write "n" tx items at a time to ring */ +uint32 +dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + dhd_prot_t *prot = dhd->prot; + if (set) + prot->txp_threshold = (uint16)val; + val = prot->txp_threshold; + return val; +} + +#ifdef DHD_RX_CHAINING + +static INLINE void BCMFASTPATH +dhd_rxchain_reset(rxchain_info_t *rxchain) +{ + rxchain->pkt_count = 0; +} + +static void BCMFASTPATH +dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx) +{ + uint8 *eh; + uint8 prio; + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + ASSERT(!PKTISCHAINED(pkt)); + ASSERT(PKTCLINK(pkt) == NULL); + ASSERT(PKTCGETATTR(pkt) == 0); + + eh = PKTDATA(dhd->osh, pkt); + prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT; + + if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, + rxchain->h_da, rxchain->h_prio))) { + /* Different flow - First release the existing chain */ + dhd_rxchain_commit(dhd); + } + + /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */ + /* so that the chain can be handed off to CTF bridge as is. */ + if (rxchain->pkt_count == 0) { + /* First packet in chain */ + rxchain->pkthead = rxchain->pkttail = pkt; + + /* Keep a copy of ptr to ether_da, ether_sa and prio */ + rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; + rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; + rxchain->h_prio = prio; + rxchain->ifidx = ifidx; + rxchain->pkt_count++; + } else { + /* Same flow - keep chaining */ + PKTSETCLINK(rxchain->pkttail, pkt); + rxchain->pkttail = pkt; + rxchain->pkt_count++; + } + + if ((!ETHER_ISMULTI(rxchain->h_da)) && + ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || + (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { + PKTSETCHAINED(dhd->osh, pkt); + PKTCINCRCNT(rxchain->pkthead); + PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt)); + } else { + dhd_rxchain_commit(dhd); + return; + } + + /* If we have hit the max chain length, dispatch the chain and reset */ + if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) { + dhd_rxchain_commit(dhd); + } +} + +static void BCMFASTPATH +dhd_rxchain_commit(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + if (rxchain->pkt_count == 0) + return; + + /* Release the packets to dhd_linux */ + dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count); + + /* Reset the chain */ + dhd_rxchain_reset(rxchain); +} + +#endif /* DHD_RX_CHAINING */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.c b/drivers/net/wireless/bcmdhd/dhd_pcie.c new file mode 100644 index 000000000000..7438d581b4ac --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pcie.c @@ -0,0 +1,5700 @@ +/* + * DHD Bus Module for PCIE + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie.c 609007 2015-12-30 07:44:52Z $ + */ + + +/* include files */ +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef BCMEMBEDIMAGE +#include BCMEMBEDIMAGE +#endif /* BCMEMBEDIMAGE */ + +#ifdef PCIE_OOB +#include "ftdi_sio_external.h" +#endif /* PCIE_OOB */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */ + +#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32)) +#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32)) +/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */ + +#if defined(SUPPORT_MULTIPLE_BOARD_REV) + extern unsigned int system_rev; +#endif /* SUPPORT_MULTIPLE_BOARD_REV */ + +int dhd_dongle_memsize; +int dhd_dongle_ramsize; +static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size); +#ifdef DHD_DEBUG +static int dhdpcie_bus_readconsole(dhd_bus_t *bus); +#endif /* DHD_DEBUG */ +#if defined(DHD_FW_COREDUMP) +static int dhdpcie_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ + +static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size); +static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, + const char *name, void *params, + int plen, void *arg, int len, int val_size); +static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval); +static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, + uint32 len, uint32 srcdelay, uint32 destdelay); +static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter); +static int _dhdpcie_download_firmware(struct dhd_bus *bus); +static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh); +static int dhdpcie_bus_write_vars(dhd_bus_t *bus); +static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus); +static bool dhdpci_bus_read_frames(dhd_bus_t *bus); +static int dhdpcie_readshared(dhd_bus_t *bus); +static void dhdpcie_init_shared_addr(dhd_bus_t *bus); +static bool dhdpcie_dongle_attach(dhd_bus_t *bus); +static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size); +static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, + bool dongle_isolation, bool reset_flag); +static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh); +static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len); +static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); +static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); +static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); +static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data); +static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data); +static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size); +static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b); +static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data); +static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info); +extern void dhd_dpc_kill(dhd_pub_t *dhdp); + +#ifdef BCMEMBEDIMAGE +static int dhdpcie_download_code_array(dhd_bus_t *bus); +#endif /* BCMEMBEDIMAGE */ + + +#ifdef EXYNOS_PCIE_DEBUG +extern void exynos_pcie_register_dump(int ch_num); +#endif /* EXYNOS_PCIE_DEBUG */ + +#define PCI_VENDOR_ID_BROADCOM 0x14e4 + +static void dhd_bus_set_device_wake(struct dhd_bus *bus, bool val); +extern void wl_nddbg_wpp_log(const char *format, ...); +#ifdef PCIE_OOB +static void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus); + +#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */ +static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT; + +#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */ +#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */ +#define BIT_WL_REG_ON 6 +#define BIT_BT_REG_ON 7 + +int gpio_handle_val = 0; +unsigned char gpio_port = 0; +unsigned char gpio_direction = 0; +#define OOB_PORT "ttyUSB0" +#endif /* PCIE_OOB */ +static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version); + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_MEMBYTES, + IOV_MEMSIZE, + IOV_SET_DOWNLOAD_STATE, + IOV_DEVRESET, + IOV_VARS, + IOV_MSI_SIM, + IOV_PCIE_LPBK, + IOV_CC_NVMSHADOW, + IOV_RAMSIZE, + IOV_RAMSTART, + IOV_SLEEP_ALLOWED, + IOV_PCIE_DMAXFER, + IOV_PCIE_SUSPEND, + IOV_PCIEREG, + IOV_PCIECFGREG, + IOV_PCIECOREREG, + IOV_PCIESERDESREG, + IOV_BAR0_SECWIN_REG, + IOV_SBREG, + IOV_DONGLEISOLATION, + IOV_LTRSLEEPON_UNLOOAD, + IOV_METADATA_DBG, + IOV_RX_METADATALEN, + IOV_TX_METADATALEN, + IOV_TXP_THRESHOLD, + IOV_BUZZZ_DUMP, + IOV_DUMP_RINGUPD_BLOCK, + IOV_DMA_RINGINDICES, + IOV_DB1_FOR_MB, + IOV_FLOW_PRIO_MAP, +#ifdef DHD_PCIE_RUNTIMEPM + IOV_IDLETIME, +#endif /* DHD_PCIE_RUNTIMEPM */ + IOV_RXBOUND, + IOV_TXBOUND, + IOV_HANGREPORT, +#ifdef PCIE_OOB + IOV_OOB_BT_REG_ON, + IOV_OOB_ENABLE +#endif /* PCIE_OOB */ +}; + + +const bcm_iovar_t dhdpcie_iovars[] = { + {"intr", IOV_INTR, 0, IOVT_BOOL, 0 }, + {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, + {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 }, + {"pcie_lpbk", IOV_PCIE_LPBK, 0, IOVT_UINT32, 0 }, + {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 }, + {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 }, + {"pciereg", IOV_PCIEREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pciecfgreg", IOV_PCIECFGREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pciecorereg", IOV_PCIECOREREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pcieserdesreg", IOV_PCIESERDESREG, 0, IOVT_BUFFER, 3 * sizeof(int32) }, + {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, IOVT_BUFFER, 3 * sizeof(int32) }, + {"pcie_suspend", IOV_PCIE_SUSPEND, 0, IOVT_UINT32, 0 }, +#ifdef PCIE_OOB + {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, IOVT_UINT32, 0 }, + {"oob_enable", IOV_OOB_ENABLE, 0, IOVT_UINT32, 0 }, +#endif /* PCIE_OOB */ + {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, IOVT_BOOL, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 }, + {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, IOVT_UINT32, 0 }, + {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, IOVT_BUFFER, 0 }, + {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, IOVT_UINT32, 0}, + {"metadata_dbg", IOV_METADATA_DBG, 0, IOVT_BOOL, 0 }, + {"rx_metadata_len", IOV_RX_METADATALEN, 0, IOVT_UINT32, 0 }, + {"tx_metadata_len", IOV_TX_METADATALEN, 0, IOVT_UINT32, 0 }, + {"db1_for_mb", IOV_DB1_FOR_MB, 0, IOVT_UINT32, 0 }, + {"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 }, + {"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 }, + {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 }, +#ifdef DHD_PCIE_RUNTIMEPM + {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, +#endif /* DHD_PCIE_RUNTIMEPM */ + {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, + {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + + +#define MAX_READ_TIMEOUT 5 * 1000 * 1000 + +#ifndef DHD_RXBOUND +#define DHD_RXBOUND 64 +#endif +#ifndef DHD_TXBOUND +#define DHD_TXBOUND 64 +#endif +uint dhd_rxbound = DHD_RXBOUND; +uint dhd_txbound = DHD_TXBOUND; + +/* Register/Unregister functions are called by the main DHD entry + * point (e.g. module insertion) to link with the bus driver, in + * order to look for or await the device. + */ + +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return dhdpcie_bus_register(); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhdpcie_bus_unregister(); + return; +} + + +/** returns a host virtual address */ +uint32 * +dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); + return; +} + +/** + * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096 + * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The + * precondition is that the PCIEBAR0Window register 'points' at the PCIe core. + * + * 'tcm' is the *host* virtual address at which tcm is mapped. + */ +dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, + volatile char *regs, volatile char *tcm, void *pci_dev) +{ + dhd_bus_t *bus; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + do { + if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + break; + } + + bus->regs = regs; + bus->tcm = tcm; + bus->osh = osh; + /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */ + bus->dev = (struct pci_dev *)pci_dev; + + dll_init(&bus->const_flowring); + + /* Attach pcie shared structure */ + if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) { + DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__)); + break; + } + + /* dhd_common_init(osh); */ + + if (dhdpcie_dongle_attach(bus)) { + DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__)); + break; + } + + /* software resources */ + if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + + break; + } + bus->dhd->busstate = DHD_BUS_DOWN; + bus->db1_for_mb = TRUE; + bus->dhd->hang_report = TRUE; + bus->irq_registered = FALSE; + + bus->d3_ack_war_cnt = 0; + + DHD_TRACE(("%s: EXIT SUCCESS\n", + __FUNCTION__)); + + return bus; + } while (0); + + DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__)); + + if (bus && bus->pcie_sh) { + MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + } + + if (bus) { + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + return NULL; +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chiprev; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_sih(struct dhd_bus *bus) +{ + return (void *)bus->sih; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +/** Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chip; +} + +/** Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chiprev; +} + +/** Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chippkg; +} + +/** Read and clear intstatus. This should be called with interupts disabled or inside isr */ +uint32 +dhdpcie_bus_intstatus(dhd_bus_t *bus) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + + if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || + (bus->sih->buscorerev == 2)) { + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); + intstatus &= I_MB; + } else { + /* this is a PCIE core register..not a config register... */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + + /* this is a PCIE core register..not a config register... */ + intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + + /* + * The fourth argument to si_corereg is the "mask" fields of the register to update + * and the fifth field is the "value" to update. Now if we are interested in only + * few fields of the "mask" bit map, we should not be writing back what we read + * By doing so, we might clear/ack interrupts that are not handled yet. + */ + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask, + intstatus); + + intstatus &= intmask; + + /* Is device removed. intstatus & intmask read 0xffffffff */ + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s: !!!!!!Device Removed or dead chip.\n", __FUNCTION__)); + intstatus = 0; +#ifdef CUSTOMER_HW4_DEBUG +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(bus->dhd); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ +#endif /* CUSTOMER_HW4_DEBUG */ + } + + intstatus &= bus->def_intmask; + } + + return intstatus; +} + +/** + * Name: dhdpcie_bus_isr + * Parameters: + * 1: IN int irq -- interrupt vector + * 2: IN void *arg -- handle to private data structure + * Return value: + * Status (TRUE or FALSE) + * + * Description: + * Interrupt Service routine checks for the status register, + * disable interrupt and queue DPC if mail box interrupts are raised. + */ +int32 +dhdpcie_bus_isr(dhd_bus_t *bus) +{ + uint32 intstatus = 0; + + do { + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + /* verify argument */ + if (!bus) { + DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__)); + break; + } + + if (bus->dhd->dongle_reset) { + break; + } + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: BUS is down, not processing the interrupt \r\n", + __FUNCTION__)); + break; + } + + intstatus = dhdpcie_bus_intstatus(bus); + + /* Check if the interrupt is ours or not */ + if (intstatus == 0) { + break; + } + + /* save the intstatus */ + bus->intstatus = intstatus; + + /* Overall operation: + * - Mask further interrupts + * - Read/ack intstatus + * - Take action based on bits and state + * - Reenable interrupts (as per state) + */ + + /* Count the interrupt call */ + bus->intrcount++; + + /* read interrupt status register!! Status bits will be cleared in DPC !! */ + bus->ipend = TRUE; + dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */ + bus->intdis = TRUE; + +#if defined(PCIE_ISR_THREAD) + + DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + while (dhd_bus_dpc(bus)); + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ +#endif /* defined(SDIO_ISR_THREAD) */ + + DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__)); + return TRUE; + + } while (0); + + DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__)); + return FALSE; +} + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +dhd_pub_t *link_recovery = NULL; +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +static bool +dhdpcie_dongle_attach(dhd_bus_t *bus) +{ + + osl_t *osh = bus->osh; + void *regsva = (void*)bus->regs; + uint16 devid = bus->cl_devid; + uint32 val; + sbpcieregs_t *sbpcieregs; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY + link_recovery = bus->dhd; +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + + bus->alp_only = TRUE; + bus->sih = NULL; + + /* Set bar0 window to si_enum_base */ + dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE); + + /* Checking PCIe bus status with reading configuration space */ + val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32)); + if ((val & 0xFFFF) != VENDOR_BROADCOM) { + DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__)); + goto fail; + } + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + + + si_setcore(bus->sih, PCIE2_CORE_ID, 0); + sbpcieregs = (sbpcieregs_t*)(bus->regs); + + /* WAR where the BAR1 window may not be sized properly */ + W_REG(osh, &sbpcieregs->configaddr, 0x4e0); + val = R_REG(osh, &sbpcieregs->configdata); + W_REG(osh, &sbpcieregs->configdata, val); + + /* Get info on the ARM and SOCRAM cores... */ + /* Should really be qualified by device id */ + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + bus->dongle_ram_base = CA7_4365_RAM_BASE; + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + } else { + /* cr4 has a different way to find the RAM size from TCM's */ + if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { + DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4339_CHIP_ID: + case BCM4335_CHIP_ID: + bus->dongle_ram_base = CR4_4335_RAM_BASE; + break; + case BCM4358_CHIP_ID: + case BCM4356_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43567_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM4350_CHIP_ID: + case BCM43570_CHIP_ID: + bus->dongle_ram_base = CR4_4350_RAM_BASE; + break; + case BCM4360_CHIP_ID: + bus->dongle_ram_base = CR4_4360_RAM_BASE; + break; + CASE_BCM4345_CHIP: + bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */ + ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; + break; + CASE_BCM43602_CHIP: + bus->dongle_ram_base = CR4_43602_RAM_BASE; + break; + case BCM4349_CHIP_GRPID: + /* RAM base changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9); + break; + default: + bus->dongle_ram_base = 0; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + } + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_memsize) + dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize); + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", + bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); + + bus->srmemsize = si_socram_srmem_size(bus->sih); + + + bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1; + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + + bus->wait_for_d3_ack = 1; + bus->suspended = FALSE; + +#ifdef PCIE_OOB + gpio_handle_val = get_handle(OOB_PORT); + if (gpio_handle_val < 0) + { + DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__)); + ASSERT(FALSE); + } + + gpio_direction = 0; + ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG); + + /* Note BT core is also enabled here */ + gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + gpio_write_port(gpio_handle_val, gpio_port); + + gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG); + + bus->oob_enabled = TRUE; + + /* drive the Device_Wake GPIO low on startup */ + bus->device_wake_state = TRUE; + dhd_bus_set_device_wake(bus, FALSE); + dhd_bus_doorbell_timeout_reset(bus); +#endif /* PCIE_OOB */ + + DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__)); + return 0; + +fail: + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__)); + return -1; +} + +int +dhpcie_bus_unmask_interrupt(dhd_bus_t *bus) +{ + dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB); + return 0; +} +int +dhpcie_bus_mask_interrupt(dhd_bus_t *bus) +{ + dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0); + return 0; +} + +void +dhdpcie_bus_intr_enable(dhd_bus_t *bus) +{ + DHD_TRACE(("enable interrupts\n")); + if (bus && bus->sih && !bus->is_linkdown) { + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_unmask_interrupt(bus); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, + bus->def_intmask, bus->def_intmask); + } + } else { + DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__)); + DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n", + bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1)); + } +} + +void +dhdpcie_bus_intr_disable(dhd_bus_t *bus) +{ + + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + + if (bus && bus->sih && !bus->is_linkdown) { + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_mask_interrupt(bus); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, + bus->def_intmask, 0); + } + } else { + DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__)); + DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n", + bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1)); + } + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +/* + * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts + * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for + * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so + * they will exit from there itself without marking dhd_bus_busy_state as BUSY. + */ +static void +dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if (timeleft == 0) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + BUG_ON(1); + } + + return; +} + +static void +dhdpcie_bus_remove_prep(dhd_bus_t *bus) +{ + unsigned long flags; + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhd_os_sdlock(bus->dhd); + + dhdpcie_bus_intr_disable(bus); + if (!bus->dhd->dongle_isolation) { + pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + } + + dhd_os_sdunlock(bus->dhd); + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +/** Detach and free everything */ +void +dhdpcie_bus_release(dhd_bus_t *bus) +{ + bool dongle_isolation = FALSE; + osl_t *osh = NULL; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + + osh = bus->osh; + ASSERT(osh); + + if (bus->dhd) { + dhdpcie_advertise_bus_cleanup(bus->dhd); + dongle_isolation = bus->dhd->dongle_isolation; + dhdpcie_bus_remove_prep(bus); + + if (bus->intr) { + dhdpcie_bus_intr_disable(bus); + dhdpcie_free_irq(bus); + } + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_detach(bus->dhd); + dhd_free(bus->dhd); + bus->dhd = NULL; + } + + /* unmap the regs and tcm here!! */ + if (bus->regs) { + dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE); + bus->regs = NULL; + } + if (bus->tcm) { + dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE); + bus->tcm = NULL; + } + + dhdpcie_bus_release_malloc(bus, osh); + /* Detach pcie shared structure */ + if (bus->pcie_sh) { + MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + bus->pcie_sh = NULL; + } + +#ifdef DHD_DEBUG + + if (bus->console.buf != NULL) + MFREE(osh, bus->console.buf, bus->console.bufsize); +#endif + + + /* Finally free bus info */ + MFREE(osh, bus, sizeof(dhd_bus_t)); + + } + + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); +} /* dhdpcie_bus_release */ + + +void +dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) { + DHD_TRACE(("%s Exit\n", __FUNCTION__)); + return; + } + + if (bus->sih) { + + if (!dongle_isolation) + pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + + if (bus->ltrsleep_on_unload) { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0); + } + + if (bus->sih->buscorerev == 13) + pcie_serdes_iddqdisable(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +uint32 +dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size) +{ + uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size); + return data; +} + +/** 32 bit config write */ +void +dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data); +} + +void +dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data); +} + +void +dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_MEMSIZE; + /* Restrict the memsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_memsize, min_size)); + if ((dhd_dongle_memsize > min_size) && + (dhd_dongle_memsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_memsize; +} + +void +dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); + return; + +} + +/** Stop bus module: clear pending frames, disable data flow */ +void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + uint32 status; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus->dhd) + return; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__)); + goto done; + } + + DHD_DISABLE_RUNTIME_PM(bus->dhd); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhdpcie_bus_intr_disable(bus); + status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status); + + if (!dhd_download_fw_on_driverload) { + dhd_dpc_kill(bus->dhd); + } + + /* Clear rx control and wake any waiters */ + dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT); + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP); + +done: + return; +} + +/** Watchdog timer function */ +bool dhd_bus_watchdog(dhd_pub_t *dhd) +{ + unsigned long flags; +#ifdef DHD_DEBUG + dhd_bus_t *bus; + bus = dhd->bus; + + DHD_GENERAL_LOCK(dhd, flags); + if (dhd->busstate == DHD_BUS_DOWN || + dhd->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; + } + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD; + DHD_GENERAL_UNLOCK(dhd, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + + + + /* Poll for console output periodically */ + if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhd_console_ms) { + bus->console.count -= dhd_console_ms; + /* Make sure backplane clock is on */ + if (dhdpcie_bus_readconsole(bus) < 0) + dhd_console_ms = 0; /* On error, stop trying */ + } + } +#endif /* DHD_DEBUG */ + +#ifdef PCIE_OOB + /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */ + if (dhd_doorbell_timeout != 0 && !(bus->dhd->busstate == DHD_BUS_SUSPEND) && + dhd_timeout_expired(&bus->doorbell_timer)) { + dhd_bus_set_device_wake(bus, FALSE); + } +#endif /* PCIE_OOB */ + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD; + DHD_GENERAL_UNLOCK(dhd, flags); + + return TRUE; +} /* dhd_bus_watchdog */ + + +#define DEADBEEF_PATTERN 0xADDEADDE // "DeadDead" +#define MEMCHECKINFO "/data/.memcheck.info" + +static int +dhd_get_memcheck_info(void) +{ + struct file *fp = NULL; + uint32 mem_val = 0; + int ret = 0; + char *filepath = MEMCHECKINFO; + loff_t pos=0; + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } else { + ret = kernel_read(fp, (char *)&mem_val, 4, &pos); + if (ret < 0) { + DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + mem_val = bcm_atoi((char *)&mem_val); + + DHD_ERROR(("[WIFI_SEC]%s: MEMCHECK ENABLED = %d\n", __FUNCTION__, mem_val)); + filp_close(fp, NULL); + } +done: + return mem_val; +} + +static int +dhdpcie_mem_check(struct dhd_bus *bus) +{ + int bcmerror = BCME_OK; + int offset = 0; + int len = 0; + uint8 *memblock = NULL, *memptr; + int size = bus->ramsize; + int i; + uint32 memcheck_enabled; + + /* Read memcheck info from the file */ + /* 0 : Disable */ + /* 1 : "Dead Beef" pattern write */ + /* 2 : "Dead Beef" pattern write and checking the pattern value */ + + memcheck_enabled = dhd_get_memcheck_info(); + + DHD_ERROR(("%s: memcheck_enabled: %d \n", __FUNCTION__, memcheck_enabled)); + + if (memcheck_enabled == 0) { + return bcmerror; + } + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + + if ((ulong)memblock % DHD_SDALIGN) { + memptr += (DHD_SDALIGN - ((ulong)memblock % DHD_SDALIGN)); + } + + for (i = 0; i < MEMBLOCK; i = i + 4) { + *(ulong*)(memptr + i) = DEADBEEF_PATTERN; + } + + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + if (offset == 0) { + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + /* Write "DeadBeef" pattern with MEMBLOCK size */ + while (size) { + len = MIN(MEMBLOCK, size); + + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + if (memcheck_enabled == 2) { + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on read %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } else { + for (i = 0; i < len; i = i+4) { + if ((*(uint32*)(memptr + i)) != DEADBEEF_PATTERN) { + DHD_ERROR(("%s: error on reading pattern at " + "0x%08x\n", __FUNCTION__, (offset + i))); + bcmerror = BCME_ERROR; + goto err; + } + } + } + } + offset += MEMBLOCK; + size -= MEMBLOCK; + } + + DHD_ERROR(("%s: Writing the Dead Beef pattern is Done \n", __FUNCTION__)); + +err: + if (memblock) { + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + } + + return bcmerror; +} + +/* Download firmware image and nvram image */ +int +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path) +{ + int ret; + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + + DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + + dhdpcie_mem_check(bus); + + ret = dhdpcie_download_firmware(bus, osh); + + return ret; +} + +static int +dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) +{ + int ret = 0; +#if defined(BCM_REQUEST_FW) + uint chipid = bus->sih->chip; + uint revid = bus->sih->chiprev; + char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */ + char nv_path[64]; /* path to nvram vars file */ + bus->fw_path = fw_path; + bus->nv_path = nv_path; + switch (chipid) { + case BCM43570_CHIP_ID: + bcmstrncat(fw_path, "43570", 5); + switch (revid) { + case 0: + bcmstrncat(fw_path, "a0", 2); + break; + case 2: + bcmstrncat(fw_path, "a2", 2); + break; + default: + DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__, + revid)); + break; + } + break; + default: + DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__, + chipid)); + return 0; + } + /* load board specific nvram file */ + snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path); + /* load firmware */ + snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path); +#endif /* BCM_REQUEST_FW */ + + DHD_OS_WAKE_LOCK(bus->dhd); + + ret = _dhdpcie_download_firmware(bus); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} + +static int +dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = BCME_ERROR; + int offset = 0; + int len = 0; + char *imgbuf = NULL; + uint8 *memblock = NULL, *memptr; + + int offset_end = bus->ramsize; + + DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); + + /* Should succeed in opening image if it is actually given through registry + * entry or in module param. + */ + imgbuf = dhd_os_open_image(pfw_path); + if (imgbuf == NULL) + goto err; + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + DHD_INFO_HW4(("%s: dongle_ram_base: 0x%x ramsize: 0x%x tcm: %p\n", + __FUNCTION__, bus->dongle_ram_base, bus->ramsize, bus->tcm)); + /* Download image with MEMBLOCK size */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) { + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + /* check if CR4/CA7 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + offset_end += offset; + } + } + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + offset += MEMBLOCK; + + if (offset >= offset_end) { + DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n", + __FUNCTION__, offset, offset_end)); + bcmerror = BCME_ERROR; + goto err; + } + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + if (imgbuf) + dhd_os_close_image(imgbuf); + + return bcmerror; +} /* dhdpcie_download_code_file */ + +#ifdef CUSTOMER_HW4_DEBUG +#define MIN_NVRAMVARS_SIZE 128 +#endif /* CUSTOMER_HW4_DEBUG */ + +static int +dhdpcie_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = BCME_ERROR; + uint len; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + bool nvram_uefi_exists = FALSE; + bool local_alloc = FALSE; + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + + /* First try UEFI */ + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, &len); + + /* If UEFI empty, then read from file system */ + if ((len == 0) || (memblock[0] == '\0')) { + + if (nvram_file_exists) { + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, &len); + if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) { + goto err; + } + } + else { + /* For SROM OTP no external file or UEFI required */ + bcmerror = BCME_OK; + } + } else { + nvram_uefi_exists = TRUE; + } + + DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len)); + + if (len > 0 && len <= MAX_NVRAMBUF_SIZE) { + bufp = (char *) memblock; + +#ifdef CACHE_FW_IMAGES + if (bus->processed_nvram_params_len) { + len = bus->processed_nvram_params_len; + } + + if (!bus->processed_nvram_params_len) { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + bus->processed_nvram_params_len = len; + } + } else +#else + { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + } + } +#endif /* CACHE_FW_IMAGES */ + + DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len)); +#ifdef CUSTOMER_HW4_DEBUG + if (len < MIN_NVRAMVARS_SIZE) { + DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto err; + } +#endif /* CUSTOMER_HW4_DEBUG */ + + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } + + +err: + if (memblock) { + if (local_alloc) { + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + } else { + dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE); + } + } + + return bcmerror; +} + + +#ifdef BCMEMBEDIMAGE +int +dhdpcie_download_code_array(struct dhd_bus *bus) +{ + int bcmerror = -1; + int offset = 0; + unsigned char *p_dlarray = NULL; + unsigned int dlarray_size = 0; + unsigned int downloded_len, remaining_len, len; + char *p_dlimagename, *p_dlimagever, *p_dlimagedate; + uint8 *memblock = NULL, *memptr; + + downloded_len = 0; + remaining_len = 0; + len = 0; + + p_dlarray = dlarray; + dlarray_size = sizeof(dlarray); + p_dlimagename = dlimagename; + p_dlimagever = dlimagever; + p_dlimagedate = dlimagedate; + + if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) || + (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0)) + goto err; + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + while (downloded_len < dlarray_size) { + remaining_len = dlarray_size - downloded_len; + if (remaining_len >= MEMBLOCK) + len = MEMBLOCK; + else + len = remaining_len; + + memcpy(memptr, (p_dlarray + downloded_len), len); + /* check if CR4/CA7 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + downloded_len += len; + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + offset += MEMBLOCK; + } + +#ifdef DHD_DEBUG + /* Upload and compare the downloaded code */ + { + unsigned char *ularray = NULL; + unsigned int uploded_len; + uploded_len = 0; + bcmerror = -1; + ularray = MALLOC(bus->dhd->osh, dlarray_size); + if (ularray == NULL) + goto upload_err; + /* Upload image to verify downloaded contents. */ + offset = bus->dongle_ram_base; + memset(ularray, 0xaa, dlarray_size); + while (uploded_len < dlarray_size) { + remaining_len = dlarray_size - uploded_len; + if (remaining_len >= MEMBLOCK) + len = MEMBLOCK; + else + len = remaining_len; + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, + (uint8 *)(ularray + uploded_len), len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto upload_err; + } + + uploded_len += len; + offset += MEMBLOCK; + } + + if (memcmp(p_dlarray, ularray, dlarray_size)) { + DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n", + __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate)); + goto upload_err; + + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n", + __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate)); +upload_err: + if (ularray) + MFREE(bus->dhd->osh, ularray, dlarray_size); + } +#endif /* DHD_DEBUG */ +err: + + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + return bcmerror; +} /* dhdpcie_download_code_array */ +#endif /* BCMEMBEDIMAGE */ + + +static int +_dhdpcie_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__)); + return 0; +#endif + } + + /* Keep arm in reset */ + if (dhdpcie_bus_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdpcie_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__)); +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + goto err; +#endif + } else { + embed = FALSE; + dlok = TRUE; + } + } + +#ifdef BCMEMBEDIMAGE + if (embed) { + if (dhdpcie_download_code_array(bus)) { + DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); + goto err; + } else { + dlok = TRUE; + } + } +#else + BCM_REFERENCE(embed); +#endif + if (!dlok) { + DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__)); + goto err; + } + + /* EXAMPLE: nvram_array */ + /* If a valid nvram_arry is specified as above, it can be passed down to dongle */ + /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */ + + + /* External nvram takes precedence if specified */ + if (dhdpcie_download_nvram(bus)) { + DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdpcie_bus_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} /* _dhdpcie_download_firmware */ + +#define CONSOLE_LINE_MAX 192 + +#ifdef DHD_DEBUG +static int +dhdpcie_bus_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return -1; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + } + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + /* Read the console buffer */ + addr = ltoh32(c->log.buf); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. Instead, back up + * the buffer pointer and output this line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); + + } + } +break2: + + return BCME_OK; +} /* dhdpcie_bus_readconsole */ +#endif /* DHD_DEBUG */ + +static int +dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + char *console_buffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + trap_t tr; + pciedev_shared_t *pciedev_shared = bus->pcie_sh; + struct bcmstrbuf strbuf; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, i, addr; + int rv; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (DHD_NOCHECKDIED_ON()) { + return 0; + } + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + + if ((bcmerror = dhdpcie_readshared(bus)) < 0) { + goto done; + } + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + pciedev_shared->msgtrace_addr, pciedev_shared->console_addr); + + if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (bus->pcie_sh->assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) { + goto done; + } + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (bus->pcie_sh->assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) { + goto done; + } + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line); + } + + if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) { + bus->dhd->dongle_trap_occured = TRUE; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->trap_addr, (uint8*)&tr, sizeof(trap_t))) < 0) { + goto done; + } + + bcm_bprintf(&strbuf, + "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + " lp 0x%x, rpc 0x%x" + "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), + ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), + ltoh32(bus->pcie_sh->trap_addr), + ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3), + ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7)); + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) { + goto printbuf; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) { + goto printbuf; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) { + goto printbuf; + } + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) { + goto printbuf; + } + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) { + goto printbuf; + } + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + printf("CONSOLE: %s\n", line); + } + } + } + } + +printbuf: + if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) { + printf("%s: %s\n", __FUNCTION__, strbuf.origbuf); + + /* wake up IOCTL wait event */ + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); + +#if defined(DHD_FW_COREDUMP) + /* save core dump or write to a file */ + if (bus->dhd->memdump_enabled) { + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + + + } + +done: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + + return bcmerror; +} /* dhdpcie_checkdied */ + + +/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */ +void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf) +{ + int ret = 0; + int size; /* Full mem size */ + int start; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *databuf = buf; + + if (bus == NULL) { + return; + } + + start = bus->dongle_ram_base; + /* Get full mem size */ + size = bus->ramsize; + /* Read mem content */ + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) { + return; + } + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + bus->dhd->soc_ram = buf; + bus->dhd->soc_ram_length = bus->ramsize; + return; +} + + +#if defined(DHD_FW_COREDUMP) +static int +dhdpcie_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start = bus->dongle_ram_base; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + +#ifdef EXYNOS_PCIE_DEBUG + exynos_pcie_register_dump(1); +#endif /* EXYNOS_PCIE_DEBUG */ + +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down so skip\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + /* Get full mem size */ + size = bus->ramsize; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + buf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_MEMDUMP_BUF, size); + bzero(buf, size); +#else + buf = MALLOC(bus->dhd->osh, size); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + if (!buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); + return BCME_ERROR; + } + + /* Read mem content */ + DHD_TRACE_HW4(("Dump dongle memory")); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) + { + DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); + if (buf) { + MFREE(bus->dhd->osh, buf, size); + } + return BCME_ERROR; + } + DHD_TRACE((".")); + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + + DHD_TRACE_HW4(("%s FUNC: Copy fw image to the embedded buffer \n", __FUNCTION__)); + + dhd_save_fwdump(bus->dhd, buf, bus->ramsize); + dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + + return ret; +} + +int +dhd_bus_mem_dump(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + if (bus->suspended) { + DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__)); + return 0; + } + + return dhdpcie_mem_dump(bus); +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t *bus) +{ +#if defined(DHD_FW_COREDUMP) + return (dhdpcie_mem_dump(bus)); +#else + return -1; +#endif +} + +/** + * Transfers bytes from host to dongle using pio mode. + * Parameter 'address' is a backplane address. + */ +static int +dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size) +{ + uint dsize; + int detect_endian_flag = 0x01; + bool little_endian; + + if (write && bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Detect endianness. */ + little_endian = *(char *)&detect_endian_flag; + + /* In remap mode, adjust address beyond socram and redirect + * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize + * is not backplane accessible + */ + + /* Determine initial transfer parameters */ + dsize = sizeof(uint64); + + /* Do the transfer(s) */ + if (write) { + while (size) { + if (size >= sizeof(uint64) && little_endian && +#ifdef CONFIG_64BIT + !(address % 8) && +#endif /* CONFIG_64BIT */ + 1) { + dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data)); + } else { + dsize = sizeof(uint8); + dhdpcie_bus_wtcm8(bus, address, *data); + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + } + } + } else { + while (size) { + if (size >= sizeof(uint64) && little_endian && +#ifdef CONFIG_64BIT + !(address % 8) && +#endif /* CONFIG_64BIT */ + 1) { + *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address); + } else { + dsize = sizeof(uint8); + *data = dhdpcie_bus_rtcm8(bus, address); + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize) > 0) { + data += dsize; + address += dsize; + } + } + } + return BCME_OK; +} /* dhdpcie_bus_membytes */ + +/** + * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue + * to the (non flow controlled) flow ring. + */ +int BCMFASTPATH +dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) +{ + flow_ring_node_t *flow_ring_node; + int ret = BCME_OK; +#ifdef DHD_LOSSLESS_ROAMING + dhd_pub_t *dhdp = bus->dhd; +#endif + DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id)); + + /* ASSERT on flow_id */ + if (flow_id >= bus->max_sub_queues) { + DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__, + flow_id, bus->max_sub_queues)); + return 0; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id); + +#ifdef DHD_LOSSLESS_ROAMING + if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) { + DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n", + __FUNCTION__, flow_ring_node->flow_info.tid)); + return BCME_OK; + } +#endif /* DHD_LOSSLESS_ROAMING */ + + { + unsigned long flags; + void *txp = NULL; + flow_queue_t *queue; +#ifdef DHD_LOSSLESS_ROAMING + struct ether_header *eh; + uint8 *pktdata; +#endif /* DHD_LOSSLESS_ROAMING */ + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return BCME_NOTREADY; + } + + while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTORPHAN(txp); + + /* + * Modifying the packet length caused P2P cert failures. + * Specifically on test cases where a packet of size 52 bytes + * was injected, the sniffer capture showed 62 bytes because of + * which the cert tests failed. So making the below change + * only Router specific. + */ + +#ifdef DHDTCPACK_SUPPRESS + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) { + ret = dhd_tcpack_check_xmit(bus->dhd, txp); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n", + __FUNCTION__)); + } + } +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_LOSSLESS_ROAMING + pktdata = (uint8 *)PKTDATA(OSH_NULL, txp); + eh = (struct ether_header *) pktdata; + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + uint8 prio = (uint8)PKTPRIO(txp); + + /* Restore to original priority for 802.1X packet */ + if (prio == PRIO_8021D_NC) { + PKTSETPRIO(txp, PRIO_8021D_BE); + } + } +#endif /* DHD_LOSSLESS_ROAMING */ + + /* Attempt to transfer packet over flow ring */ + ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex); + if (ret != BCME_OK) { /* may not have resources in flow ring */ + DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret)); + dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE); + /* reinsert at head */ + dhd_flow_queue_reinsert(bus->dhd, queue, txp); + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* If we are able to requeue back, return success */ + return BCME_OK; + } + } + + dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + + return ret; +} /* dhd_bus_schedule_queue */ + +/** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */ +int BCMFASTPATH +dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) +{ + uint16 flowid; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + int ret = BCME_OK; + void *txp_pend = NULL; + + if (!bus->dhd->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + goto toss; + } + + flowid = DHD_PKT_GET_FLOWID(txp); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + + DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if ((flowid >= bus->dhd->num_flow_rings) || + (!flow_ring_node->active) || + (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) || + (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + ret = BCME_ERROR; + goto toss; + } + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) { + txp_pend = txp; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + if (flow_ring_node->status) { + DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + if (txp_pend) { + txp = txp_pend; + goto toss; + } + return BCME_OK; + } + ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + /* If we have anything pending, try to push into q */ + if (txp_pend) { + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + txp = txp_pend; + goto toss; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + + return ret; + +toss: + DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret)); + PKTCFREE(bus->dhd->osh, txp, TRUE); + return ret; +} /* dhd_bus_txdata */ + + +void +dhd_bus_stop_queue(struct dhd_bus *bus) +{ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); + bus->bus_flowctrl = TRUE; +} + +void +dhd_bus_start_queue(struct dhd_bus *bus) +{ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); + bus->bus_flowctrl = TRUE; +} + +#if defined(DHD_DEBUG) +/* Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhd->bus; + uint32 addr, val; + int rv; + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + dhd_os_sdunlock(bus->dhd); + return BCME_NOTREADY; + } + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); + val = htol32(0); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* generate an interrupt to dongle to indicate that it needs to process cons command */ + dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT); +done: + return rv; +} /* dhd_bus_console_in */ +#endif /* defined(DHD_DEBUG) */ + +/** + * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is + * contained in 'pkt'. Processes rx frame, forwards up the layer to netif. + */ +void BCMFASTPATH +dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count) +{ + dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0); +} + +/** 'offset' is a backplane address */ +void +dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) +{ + *(volatile uint8 *)(bus->tcm + offset) = (uint8)data; +} + +uint8 +dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset) +{ + volatile uint8 data; + + data = *(volatile uint8 *)(bus->tcm + offset); + + return data; +} + +void +dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) +{ + *(volatile uint32 *)(bus->tcm + offset) = (uint32)data; +} +void +dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) +{ + *(volatile uint16 *)(bus->tcm + offset) = (uint16)data; +} +void +dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) +{ + *(volatile uint64 *)(bus->tcm + offset) = (uint64)data; +} + +uint16 +dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset) +{ + volatile uint16 data; + + data = *(volatile uint16 *)(bus->tcm + offset); + + return data; +} + +uint32 +dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset) +{ + volatile uint32 data; + + data = *(volatile uint32 *)(bus->tcm + offset); + + return data; +} + +uint64 +dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) +{ + volatile uint64 data; + + data = *(volatile uint64 *)(bus->tcm + offset); + + return data; +} + +/** A snippet of dongle memory is shared between host and dongle */ +void +dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid) +{ + uint64 long_data; + ulong tcm_offset; + + DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + switch (type) { + case D2H_DMA_SCRATCH_BUF: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)&(sh->host_dma_scratch_buffer); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case D2H_DMA_SCRATCH_BUF_LEN: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; + tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len); + dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data)); + prhex(__FUNCTION__, data, len); + break; + } + + case H2D_DMA_INDX_WR_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case H2D_DMA_INDX_RD_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case D2H_DMA_INDX_WR_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case D2H_DMA_INDX_RD_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case RING_ITEM_LEN: + tcm_offset = bus->ring_sh[ringid].ring_mem_addr; + tcm_offset += OFFSETOF(ring_mem_t, len_items); + dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_MAX_ITEMS: + tcm_offset = bus->ring_sh[ringid].ring_mem_addr; + tcm_offset += OFFSETOF(ring_mem_t, max_item); + dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_BUF_ADDR: + long_data = HTOL64(*(uint64 *)data); + tcm_offset = bus->ring_sh[ringid].ring_mem_addr; + tcm_offset += OFFSETOF(ring_mem_t, base_addr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + + case RING_WR_UPD: + tcm_offset = bus->ring_sh[ringid].ring_state_w; + dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_RD_UPD: + tcm_offset = bus->ring_sh[ringid].ring_state_r; + dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + break; + + case D2H_MB_DATA: + dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr, + (uint32) HTOL32(*(uint32 *)data)); + break; + + case H2D_MB_DATA: + dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr, + (uint32) HTOL32(*(uint32 *)data)); + break; + + default: + break; + } +} /* dhd_bus_cmn_writeshared */ + +/** A snippet of dongle memory is shared between host and dongle */ +void +dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid) +{ + ulong tcm_offset; + + switch (type) { + case RING_WR_UPD: + tcm_offset = bus->ring_sh[ringid].ring_state_w; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset)); + break; + case RING_RD_UPD: + tcm_offset = bus->ring_sh[ringid].ring_state_r; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset)); + break; + case TOTAL_LFRAG_PACKET_CNT: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, + (ulong) &sh->total_lfrag_pkt_cnt)); + break; + } + case H2D_MB_DATA: + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr)); + break; + case D2H_MB_DATA: + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr)); + break; + case MAX_HOST_RXBUFS: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, + (ulong) &sh->max_host_rxbufs)); + break; + } + default : + break; + } +} + +uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus) +{ + return ((pciedev_shared_t*)bus->pcie_sh)->flags; +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) { + goto exit; + } + + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} /* dhd_bus_iovar_op */ + +#ifdef BCM_BUZZZ +#include + +int +dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log, + const int num_counters) +{ + int bytes = 0; + uint32 ctr; + uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX]; + uint32 delta[BCM_BUZZZ_COUNTERS_MAX]; + + /* Compute elapsed counter values per counter event type */ + for (ctr = 0U; ctr < num_counters; ctr++) { + prev[ctr] = core[ctr]; + curr[ctr] = *log++; + core[ctr] = curr[ctr]; /* saved for next log */ + + if (curr[ctr] < prev[ctr]) + delta[ctr] = curr[ctr] + (~0U - prev[ctr]); + else + delta[ctr] = (curr[ctr] - prev[ctr]); + + bytes += sprintf(p + bytes, "%12u ", delta[ctr]); + } + + return bytes; +} + +typedef union cm3_cnts { /* export this in bcm_buzzz.h */ + uint32 u32; + uint8 u8[4]; + struct { + uint8 cpicnt; + uint8 exccnt; + uint8 sleepcnt; + uint8 lsucnt; + }; +} cm3_cnts_t; + +int +dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log) +{ + int bytes = 0; + + uint32 cyccnt, instrcnt; + cm3_cnts_t cm3_cnts; + uint8 foldcnt; + + { /* 32bit cyccnt */ + uint32 curr, prev, delta; + prev = core[0]; curr = *log++; core[0] = curr; + if (curr < prev) + delta = curr + (~0U - prev); + else + delta = (curr - prev); + + bytes += sprintf(p + bytes, "%12u ", delta); + cyccnt = delta; + } + + { /* Extract the 4 cnts: cpi, exc, sleep and lsu */ + int i; + uint8 max8 = ~0; + cm3_cnts_t curr, prev, delta; + prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32; + for (i = 0; i < 4; i++) { + if (curr.u8[i] < prev.u8[i]) + delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]); + else + delta.u8[i] = (curr.u8[i] - prev.u8[i]); + bytes += sprintf(p + bytes, "%4u ", delta.u8[i]); + } + cm3_cnts.u32 = delta.u32; + } + + { /* Extract the foldcnt from arg0 */ + uint8 curr, prev, delta, max8 = ~0; + bcm_buzzz_arg0_t arg0; arg0.u32 = *log; + prev = core[2]; curr = arg0.klog.cnt; core[2] = curr; + if (curr < prev) + delta = curr + (max8 - prev); + else + delta = (curr - prev); + bytes += sprintf(p + bytes, "%4u ", delta); + foldcnt = delta; + } + + instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2] + + cm3_cnts.u8[3]) + foldcnt; + if (instrcnt > 0xFFFFFF00) + bytes += sprintf(p + bytes, "[%10s] ", "~"); + else + bytes += sprintf(p + bytes, "[%10u] ", instrcnt); + return bytes; +} + +int +dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz) +{ + int bytes = 0; + bcm_buzzz_arg0_t arg0; + static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS; + + if (buzzz->counters == 6) { + bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log); + log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */ + } else { + bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters); + log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */ + } + + /* Dump the logged arguments using the registered formats */ + arg0.u32 = *log++; + + switch (arg0.klog.args) { + case 0: + bytes += sprintf(p + bytes, fmt[arg0.klog.id]); + break; + case 1: + { + uint32 arg1 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1); + break; + } + case 2: + { + uint32 arg1, arg2; + arg1 = *log++; arg2 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2); + break; + } + case 3: + { + uint32 arg1, arg2, arg3; + arg1 = *log++; arg2 = *log++; arg3 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3); + break; + } + case 4: + { + uint32 arg1, arg2, arg3, arg4; + arg1 = *log++; arg2 = *log++; + arg3 = *log++; arg4 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4); + break; + } + default: + printf("Maximum one argument supported\n"); + break; + } + + bytes += sprintf(p + bytes, "\n"); + + return bytes; +} + +void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p) +{ + int i; + uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX]; + void * log; + + for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) { + core[i] = 0; + } + + log_sz = buzzz_p->log_sz; + + part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz; + + if (buzzz_p->wrap == TRUE) { + part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz; + total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz; + } else { + part2 = 0U; + total = buzzz_p->count; + } + + if (total == 0U) { + printf("bcm_buzzz_dump total<%u> done\n", total); + return; + } else { + printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", + total, part2, part1); + } + + if (part2) { /* with wrap */ + log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log)); + while (part2--) { /* from cur to end : part2 */ + p[0] = '\0'; + dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); + printf("%s", p); + log = (void*)((size_t)log + buzzz_p->log_sz); + } + } + + log = (void*)buffer_p; + while (part1--) { + p[0] = '\0'; + dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); + printf("%s", p); + log = (void*)((size_t)log + buzzz_p->log_sz); + } + + printf("bcm_buzzz_dump done.\n"); +} + +int dhd_buzzz_dump_dngl(dhd_bus_t *bus) +{ + bcm_buzzz_t * buzzz_p = NULL; + void * buffer_p = NULL; + char * page_p = NULL; + pciedev_shared_t *sh; + int ret = 0; + + if (bus->dhd->busstate != DHD_BUS_DATA) { + return BCME_UNSUPPORTED; + } + if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) { + printf("Page memory allocation failure\n"); + goto done; + } + if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) { + printf("BCM BUZZZ memory allocation failure\n"); + goto done; + } + + ret = dhdpcie_readshared(bus); + if (ret < 0) { + DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); + goto done; + } + + sh = bus->pcie_sh; + + DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz)); + + if (sh->buzzz != 0U) { /* Fetch and display dongle BUZZZ Trace */ + + dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz, + (uint8 *)buzzz_p, sizeof(bcm_buzzz_t)); + + printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> " + "count<%u> status<%u> wrap<%u>\n" + "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n", + (int)sh->buzzz, + (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end, + buzzz_p->count, buzzz_p->status, buzzz_p->wrap, + buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group, + buzzz_p->buffer_sz, buzzz_p->log_sz); + + if (buzzz_p->count == 0) { + printf("Empty dongle BUZZZ trace\n\n"); + goto done; + } + + /* Allocate memory for trace buffer and format strings */ + buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz); + if (buffer_p == NULL) { + printf("Buffer memory allocation failure\n"); + goto done; + } + + /* Fetch the trace. format strings are exported via bcm_buzzz.h */ + dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */ + (uint8 *)buffer_p, buzzz_p->buffer_sz); + + /* Process and display the trace using formatted output */ + + { + int ctr; + for (ctr = 0; ctr < buzzz_p->counters; ctr++) { + printf(" ", buzzz_p->eventid[ctr]); + } + printf("\n"); + } + + dhd_buzzz_dump(buzzz_p, buffer_p, page_p); + + printf("----- End of dongle BCM BUZZZ Trace -----\n\n"); + + MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL; + } + +done: + + if (page_p) MFREE(bus->dhd->osh, page_p, 4096); + if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t)); + if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); + + return BCME_OK; +} +#endif /* BCM_BUZZZ */ + +#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \ + ((sih)->buscoretype == PCIE2_CORE_ID)) + +static bool +pcie2_mdiosetblock(dhd_bus_t *bus, uint blk) +{ + uint mdiodata, mdioctrl, i = 0; + uint pcie_serdes_spinwait = 200; + + mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF); + mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE; + + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata); + + OSL_DELAY(10); + /* retry till the transaction is complete */ + while (i < pcie_serdes_spinwait) { + uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, + 0, 0); + if (!(mdioctrl_read & MDIODATA2_DONE)) { + break; + } + OSL_DELAY(1000); + i++; + } + + if (i >= pcie_serdes_spinwait) { + DHD_ERROR(("pcie_mdiosetblock: timed out\n")); + return FALSE; + } + + return TRUE; +} + + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + dhd_bus_t *bus = dhdp->bus; + int bcmerror = 0; + unsigned long flags; +#ifdef CONFIG_ARCH_MSM + int retry = POWERUP_MAX_RETRY; +#endif /* CONFIG_ARCH_MSM */ + + if (dhd_download_fw_on_driverload) { + bcmerror = dhd_bus_start(dhdp); + } else { + if (flag == TRUE) { /* Turn off WLAN */ + /* Removing Power */ + DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + + bus->dhd->up = FALSE; + + if (bus->dhd->busstate != DHD_BUS_DOWN) { + dhdpcie_advertise_bus_cleanup(bus->dhd); + if (bus->intr) { + dhdpcie_bus_intr_disable(bus); + dhdpcie_free_irq(bus); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + /* Clean up any pending host wake IRQ */ + dhd_bus_oob_intr_set(bus->dhd, FALSE); + dhd_bus_oob_intr_unregister(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_os_wd_timer(dhdp, 0); + dhd_bus_stop(bus, TRUE); + dhd_prot_reset(dhdp); + dhd_clear(dhdp); + dhd_bus_release_dongle(bus); + dhdpcie_bus_free_resource(bus); + bcmerror = dhdpcie_bus_disable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_bus_clock_stop(bus); + if (bcmerror) { + DHD_ERROR(("%s: host clock stop failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } else { + if (bus->intr) { + dhdpcie_free_irq(bus); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + /* Clean up any pending host wake IRQ */ + dhd_bus_oob_intr_set(bus->dhd, FALSE); + dhd_bus_oob_intr_unregister(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_prot_reset(dhdp); + dhd_clear(dhdp); + dhd_bus_release_dongle(bus); + dhdpcie_bus_free_resource(bus); + bcmerror = dhdpcie_bus_disable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_bus_clock_stop(bus); + if (bcmerror) { + DHD_ERROR(("%s: host clock stop failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + } + + bus->dhd->dongle_reset = TRUE; + DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__)); + + } else { /* Turn on WLAN */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + /* Powering On */ + DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__)); +#ifdef CONFIG_ARCH_MSM + while (--retry) { + bcmerror = dhdpcie_bus_clock_start(bus); + if (!bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n", + __FUNCTION__)); + break; + } else { + OSL_SLEEP(10); + } + } + + if (bcmerror && !retry) { + DHD_ERROR(("%s: host pcie clock enable failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 0; + bus->pci_d3hot_done = 0; + bcmerror = dhdpcie_bus_enable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: host configuration restore failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhdpcie_bus_alloc_resource(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhdpcie_bus_dongle_attach(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhd_bus_request_irq(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bus->dhd->dongle_reset = FALSE; + + bcmerror = dhd_bus_start(dhdp); + if (bcmerror) { + DHD_ERROR(("%s: dhd_bus_start: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bus->dhd->up = TRUE; + DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: what should we do here\n", __FUNCTION__)); + goto done; + } + } + } + +done: + if (bcmerror) { + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + + return bcmerror; +} + +static int +pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val, + bool slave_bypass) +{ + uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; + uint32 reg32; + + pcie2_mdiosetblock(bus, physmedia); + + /* enable mdio access to SERDES */ + mdio_ctrl = MDIOCTL2_DIVISOR_VAL; + mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); + + if (slave_bypass) + mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; + + if (!write) + mdio_ctrl |= MDIOCTL2_READ; + + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl); + + if (write) { + reg32 = PCIE2_MDIO_WR_DATA; + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, + *val | MDIODATA2_DONE); + } else + reg32 = PCIE2_MDIO_RD_DATA; + + /* retry till the transaction is complete */ + while (i < pcie_serdes_spinwait) { + uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0); + if (!(done_val & MDIODATA2_DONE)) { + if (!write) { + *val = si_corereg(bus->sih, bus->sih->buscoreidx, + PCIE2_MDIO_RD_DATA, 0, 0); + *val = *val & MDIODATA2_MASK; + } + return 0; + } + OSL_DELAY(1000); + i++; + } + return -1; +} + +static int +dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + int32 int_val2 = 0; + int32 int_val3 = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + if (plen >= (int)sizeof(int_val) * 2) + bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2)); + + if (plen >= (int)sizeof(int_val) * 3) + bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + switch (actionid) { + + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdpcie_downloadvars(bus, arg, len); + break; + + case IOV_SVAL(IOV_PCIEREG): + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + int_val); + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0, + int_val2); + break; + + case IOV_GVAL(IOV_PCIEREG): + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + int_val); + int_val = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + bcopy(&int_val, arg, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_PCIECOREREG): + si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2); + break; + case IOV_GVAL(IOV_BAR0_SECWIN_REG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_BAR0_SECWIN_REG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset | SI_ENUM_BASE; + size = sdreg.func; + + if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset | SI_ENUM_BASE; + size = sdreg.func; + if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_GVAL(IOV_PCIESERDESREG): + { + uint val; + if (!PCIE_GEN2(bus->sih)) { + DHD_ERROR(("supported only in pcie gen2\n")); + bcmerror = BCME_ERROR; + break; + } + + if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) { + bcopy(&val, arg, sizeof(int32)); + } else { + DHD_ERROR(("pcie2_mdioop failed.\n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_SVAL(IOV_PCIESERDESREG): + if (!PCIE_GEN2(bus->sih)) { + DHD_ERROR(("supported only in pcie gen2\n")); + bcmerror = BCME_ERROR; + break; + } + if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) { + DHD_ERROR(("pcie2_mdioop failed.\n")); + bcmerror = BCME_ERROR; + } + break; + case IOV_GVAL(IOV_PCIECOREREG): + int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0); + bcopy(&int_val, arg, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_PCIECFGREG): + OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2); + break; + + case IOV_GVAL(IOV_PCIECFGREG): + int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4); + bcopy(&int_val, arg, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_PCIE_LPBK): + bcmerror = dhdpcie_bus_lpback_req(bus, int_val); + break; + + case IOV_SVAL(IOV_PCIE_DMAXFER): + bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3); + break; + + case IOV_GVAL(IOV_PCIE_SUSPEND): + int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PCIE_SUSPEND): + dhdpcie_bus_suspend(bus, bool_val); + break; + + case IOV_GVAL(IOV_MEMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; /* absolute backplane address */ + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__, + (set ? "write" : "read"), size, address, dsize)); + + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + if (set && address == bus->dongle_ram_base) { + bus->resetinstr = *(((uint32*)params) + 2); + } + } else { + /* If we know about SOCRAM, check for a fit */ + if ((bus->orig_ramsize) && + ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) + { + uint8 enable, protect, remap; + si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); + if (!enable || protect) { + DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n", + __FUNCTION__, bus->orig_ramsize, size, address)); + DHD_ERROR(("%s: socram enable %d, protect %d\n", + __FUNCTION__, enable, protect)); + bcmerror = BCME_BADARG; + break; + } + + if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) { + uint32 devramsize = si_socdevram_size(bus->sih); + if ((address < SOCDEVRAM_ARM_ADDR) || + (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) { + DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n", + __FUNCTION__, address, size)); + DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n", + __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize)); + bcmerror = BCME_BADARG; + break; + } + /* move it such that address is real now */ + address -= SOCDEVRAM_ARM_ADDR; + address += SOCDEVRAM_BP_ADDR; + DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n", + __FUNCTION__, (set ? "write" : "read"), size, address)); + } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) { + /* Can not access remap region while devram remap bit is set + * ROM content would be returned in this case + */ + DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n", + __FUNCTION__, address)); + bcmerror = BCME_ERROR; + break; + } + } + } + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size); + + break; + } + +#ifdef BCM_BUZZZ + /* Dump dongle side buzzz trace to console */ + case IOV_GVAL(IOV_BUZZZ_DUMP): + bcmerror = dhd_buzzz_dump_dngl(bus); + break; +#endif /* BCM_BUZZZ */ + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + bcmerror = dhdpcie_bus_download_state(bus, bool_val); + break; + + case IOV_GVAL(IOV_RAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_RAMSTART): + int_val = (int32)bus->dongle_ram_base; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_CC_NVMSHADOW): + { + struct bcmstrbuf dump_b; + + bcm_binit(&dump_b, arg, len); + bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b); + break; + } + + case IOV_GVAL(IOV_SLEEP_ALLOWED): + bool_val = bus->sleep_allowed; + bcopy(&bool_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SLEEP_ALLOWED): + bus->sleep_allowed = bool_val; + break; + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD): + int_val = bus->ltrsleep_on_unload; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD): + bus->ltrsleep_on_unload = bool_val; + break; + + case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b); + break; + } + case IOV_GVAL(IOV_DMA_RINGINDICES): + { int h2d_support, d2h_support; + + d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0; + h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0; + int_val = d2h_support | (h2d_support << 1); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_DMA_RINGINDICES): + /* Can change it only during initialization/FW download */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + if ((int_val > 3) || (int_val < 0)) { + DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n")); + bcmerror = BCME_BADARG; + } else { + bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE; + bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE; + } + } else { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + } + break; + + case IOV_GVAL(IOV_METADATA_DBG): + int_val = dhd_prot_metadata_dbg_get(bus->dhd); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_METADATA_DBG): + dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0)); + break; + + case IOV_GVAL(IOV_RX_METADATALEN): + int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RX_METADATALEN): + if (int_val > 64) { + bcmerror = BCME_BUFTOOLONG; + break; + } + dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE); + break; + + case IOV_SVAL(IOV_TXP_THRESHOLD): + dhd_prot_txp_threshold(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_TXP_THRESHOLD): + int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DB1_FOR_MB): + if (int_val) + bus->db1_for_mb = TRUE; + else + bus->db1_for_mb = FALSE; + break; + + case IOV_GVAL(IOV_DB1_FOR_MB): + if (bus->db1_for_mb) + int_val = 1; + else + int_val = 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TX_METADATALEN): + int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TX_METADATALEN): + if (int_val > 64) { + bcmerror = BCME_BUFTOOLONG; + break; + } + dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE); + break; + + case IOV_SVAL(IOV_DEVRESET): + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + break; + + case IOV_GVAL(IOV_FLOW_PRIO_MAP): + int_val = bus->dhd->flow_prio_map_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FLOW_PRIO_MAP): + int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val); + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_PCIE_RUNTIMEPM + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if (int_val < 0) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; +#endif /* DHD_PCIE_RUNTIMEPM */ + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", + __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + return bcmerror; +} /* dhdpcie_bus_doiovar */ + +/** Transfers bytes from host to dongle using pio mode */ +static int +dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) +{ + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return 0; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return 0; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + return 0; + } + dhdmsgbuf_lpbk_req(bus->dhd, len); + return 0; +} + +int +dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) +{ + int timeleft; + unsigned long flags; + int rc = 0; + + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + DHD_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_ERROR; + } + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->dongle_reset) { + DHD_ERROR(("Dongle is in reset state.\n")); + return -EIO; + } + + if (bus->suspended == state) { /* Set to same state */ + DHD_ERROR(("Bus is already in SUSPEND state.\n")); + return BCME_OK; + } + + if (state) { + int idle_retry = 0; + int active; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down, state=%d\n", + __FUNCTION__, state)); + return BCME_ERROR; + } + + /* Suspend */ + DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__)); + bus->wait_for_d3_ack = 0; + bus->suspended = TRUE; + + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); + bus->dhd->busstate = DHD_BUS_SUSPEND; + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) { + DHD_ERROR(("Tx Request is not ended\n")); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + bus->suspended = FALSE; + return -EBUSY; + } + + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SUSPEND; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT); + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + + { + uint32 d2h_mb_data = 0; + uint32 zero = 0; + + /* If wait_for_d3_ack was not updated because D2H MB was not received */ + if (bus->wait_for_d3_ack == 0) { + /* Read the Mb data to see if the Dongle has actually sent D3 ACK */ + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + + if (d2h_mb_data & D2H_DEV_D3_ACK) { + DHD_ERROR(("*** D3 WAR for missing interrupt ***\r\n")); + /* Clear the MB Data */ + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), + D2H_MB_DATA, 0); + + /* Consider that D3 ACK is received */ + bus->wait_for_d3_ack = 1; + bus->d3_ack_war_cnt++; + + } /* d2h_mb_data & D2H_DEV_D3_ACK */ + } /* bus->wait_for_d3_ack was 0 */ + } + + /* To allow threads that got pre-empted to complete. + */ + while ((active = dhd_os_check_wakelock_all(bus->dhd)) && + (idle_retry < MAX_WKLK_IDLE_CHECK)) { + msleep(1); + idle_retry++; + } + + if (bus->wait_for_d3_ack) { + DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__)); + /* Got D3 Ack. Suspend the bus */ + if (active) { + DHD_ERROR(("%s():Suspend failed because of wakelock restoring " + "Dongle to D0\n", __FUNCTION__)); + + /* + * Dongle still thinks that it has to be in D3 state + * until gets a D0 Inform, but we are backing off from suspend. + * Ensure that Dongle is brought back to D0. + * + * Bringing back Dongle from D3 Ack state to D0 state + * is a 2 step process. Dongle would want to know that D0 Inform + * would be sent as a MB interrupt + * to bring it out of D3 Ack state to D0 state. + * So we have to send both this message. + */ + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, + (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + + bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + rc = BCME_ERROR; + } else { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + dhdpcie_bus_intr_disable(bus); + rc = dhdpcie_pci_suspend_resume(bus, state); + dhd_bus_set_device_wake(bus, FALSE); + } + bus->dhd->d3ackcnt_timeout = 0; +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhdpcie_oob_intr_set(bus, TRUE); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + } else if (timeleft == 0) { + bus->dhd->d3ackcnt_timeout++; + DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n", + __FUNCTION__, bus->dhd->d3ackcnt_timeout)); + dhd_prot_debug_info_print(bus->dhd); +#ifdef DHD_FW_COREDUMP + if (bus->dhd->memdump_enabled) { + /* write core dump to file */ + bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->d3ackcnt_timeout >= MAX_CNTL_D3ACK_TIMEOUT) { + DHD_ERROR(("%s: Event HANG send up " + "due to PCIe linkdown\n", __FUNCTION__)); +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT); + } + rc = -ETIMEDOUT; + + } + + bus->wait_for_d3_ack = 1; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SUSPEND; + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } else { + /* Resume */ +#if defined(BCMPCIE_OOB_HOST_WAKE) + DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_RESUME; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + rc = dhdpcie_pci_suspend_resume(bus, state); + if (bus->dhd->busstate == DHD_BUS_SUSPEND) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + dhd_bus_set_device_wake(bus, TRUE); + } + bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_RESUME; +#ifdef DHD_PCIE_RUNTIMEPM + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { + bus->bus_wake = 1; + OSL_SMP_WMB(); + wake_up_interruptible(&bus->rpm_queue); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + dhdpcie_bus_intr_enable(bus); + } + return rc; +} + +/** Transfers bytes from host to dongle and to host again using DMA */ +static int +dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay) +{ + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + return BCME_ERROR; + } + + if (len < 5 || len > 4194296) { + DHD_ERROR(("len is too small or too large\n")); + return BCME_ERROR; + } + return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay); +} + + + +static int +dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) +{ + int bcmerror = 0; + uint32 *cr4_regs; + + if (!bus->sih) { + DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__)); + return BCME_ERROR; + } + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + bus->alp_only = TRUE; + + /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */ + cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + + if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } else if (cr4_regs == NULL) { /* no CR4 present on chip */ + si_core_disable(bus->sih, 0); + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4, + (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_ERROR; + goto fail; + } + } + } else { + /* For CR4, + * Halt ARM + * Remove ARM reset + * Read RAM base address [0x18_0000] + * [next] Download firmware + * [done at else] Populate the reset vector + * [done at else] Remove ARM halt + */ + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (BCM43602_CHIP(bus->sih->chip)) { + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); + } + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } + } else { + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + /* now remove reset and halt and continue to run CA7 */ + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* Enable remap before ARM reset but after vars. + * No backplane access in remap mode + */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } else { + if (BCM43602_CHIP(bus->sih->chip)) { + /* Firmware crashes on SOCSRAM access when core is in reset */ + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + } + + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_ERROR; + goto fail; + } + } + + /* now remove reset and halt and continue to run CR4 */ + } + + si_core_reset(bus->sih, 0, 0); + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to PCIE core */ + si_setcore(bus->sih, PCIE2_CORE_ID, 0); + + return bcmerror; +} /* dhdpcie_bus_download_state */ + +static int +dhdpcie_bus_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize, phys_size; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + varaddr += bus->dongle_ram_base; + + if (bus->vars) { + + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + /* Write the vars list */ + DHD_INFO_HW4(("%s: tcm: %p varaddr: 0x%x varsize: %d\n", + __FUNCTION__, bus->tcm, varaddr, varsize)); + bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize); + + /* Implement read back and verify later */ +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) + return BCME_NOMEM; + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; + + phys_size += bus->dongle_ram_base; + + /* adjust to the user specified RAM */ + DHD_INFO(("Physical memory size: %d, usable memory size: %d\n", + phys_size, bus->ramsize)); + DHD_INFO(("Vars are at %d, orig varsize is %d\n", + varaddr, varsize)); + varsize = ((phys_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + bus->nvram_csm = varsizew; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + bus->nvram_csm = varsizew; + varsizew = htol32(varsizew); + } + + DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + DHD_INFO_HW4(("%s: tcm: %p phys_size: 0x%x varsizew: %x\n", + __FUNCTION__, bus->tcm, phys_size, varsizew)); + bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} /* dhdpcie_bus_write_vars */ + +int +dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Basic sanity checks */ + if (bus->dhd->up) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); + + +err: + return bcmerror; +} + +#ifndef BCMPCIE_OOB_HOST_WAKE +/* loop through the capability list and see if the pcie capabilty exists */ +uint8 +dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id) +{ + uint8 cap_id; + uint8 cap_ptr = 0; + uint8 byte_val; + + /* check for Header type 0 */ + byte_val = read_pci_cfg_byte(PCI_CFG_HDR); + if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) { + DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__)); + goto end; + } + + /* check if the capability pointer field exists */ + byte_val = read_pci_cfg_byte(PCI_CFG_STAT); + if (!(byte_val & PCI_CAPPTR_PRESENT)) { + DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__)); + goto end; + } + + cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR); + /* check if the capability pointer is 0x00 */ + if (cap_ptr == 0x00) { + DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__)); + goto end; + } + + /* loop thr'u the capability list and see if the pcie capabilty exists */ + + cap_id = read_pci_cfg_byte(cap_ptr); + + while (cap_id != req_cap_id) { + cap_ptr = read_pci_cfg_byte((cap_ptr + 1)); + if (cap_ptr == 0x00) break; + cap_id = read_pci_cfg_byte(cap_ptr); + } + +end: + return cap_ptr; +} + +void +dhdpcie_pme_active(osl_t *osh, bool enable) +{ + uint8 cap_ptr; + uint32 pme_csr; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return; + } + + pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32)); + DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr)); + + pme_csr |= PME_CSR_PME_STAT; + if (enable) { + pme_csr |= PME_CSR_PME_EN; + } else { + pme_csr &= ~PME_CSR_PME_EN; + } + + OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr); +} + +bool +dhdpcie_pme_cap(osl_t *osh) +{ + uint8 cap_ptr; + uint32 pme_cap; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return FALSE; + } + + pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32)); + + DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap)); + + return ((pme_cap & PME_CAP_PM_STATES) != 0); +} +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + +void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 mbintstatus = 0; + uint32 d2h_mb_data = 0; + + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n", + intstatus, intmask, mbintstatus); + bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n", + d2h_mb_data, dhd->bus->def_intmask); +} + +/** Add bus dump output to a buffer */ +void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + uint16 flowid; + int ix = 0; + flow_ring_node_t *flow_ring_node; + flow_info_t *flow_info; + char eabuf[ETHER_ADDR_STR_LEN]; + + if (dhdp->busstate != DHD_BUS_DATA) + return; + + dhd_prot_print_info(dhdp, strbuf); + dhd_dump_intr_registers(dhdp, strbuf); + bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n", + dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr); + bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr)); + bcm_bprintf(strbuf, + "%s %4s %2s %4s %17s %4s %4s %10s %4s %4s ", + "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", + "Overflows", "RD", "WR"); + bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack"); + + for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (flow_ring_node->active) { + flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, + "%3d. %4d %2d %4d %17s %4d %4d %10u ", ix++, + flow_ring_node->flowid, flow_info->ifindex, flow_info->tid, + bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf), + DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)), + DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue)); + dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf, + "%4d %4d "); + bcm_bprintf(strbuf, + "%5s %6s %5s\n", "NA", "NA", "NA"); + } + } + bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt); + bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt); + bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt); + bcm_bprintf(strbuf, "D3 Ack WAR cnt %d\n", dhdp->bus->d3_ack_war_cnt); +} + +/** + * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their + * flow queue to their flow ring. + */ +static void +dhd_update_txflowrings(dhd_pub_t *dhd) +{ + unsigned long flags; + dll_t *item, *next; + flow_ring_node_t *flow_ring_node; + struct dhd_bus *bus = dhd->bus; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + for (item = dll_head_p(&bus->const_flowring); + (!dhd_is_device_removed(dhd) && !dll_end(&bus->const_flowring, item)); + item = next) { + if (dhd->hang_was_sent) { + break; + } + + next = dll_next_p(item); + flow_ring_node = dhd_constlist_to_flowring(item); + + /* Ensure that flow_ring_node in the list is Not Null */ + ASSERT(flow_ring_node != NULL); + + /* Ensure that the flowring node has valid contents */ + ASSERT(flow_ring_node->prot_info != NULL); + + dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info); + } + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); +} + +/** Mailbox ringbell Function */ +static void +dhd_bus_gen_devmb_intr(struct dhd_bus *bus) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + DHD_ERROR(("mailbox communication not supported\n")); + return; + } + if (bus->db1_for_mb) { + /* this is a pcie core register, not the config register */ + DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n")); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678); + } else { + DHD_INFO(("writing a mail box interrupt to the device, through config space\n")); + dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); + dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); + } +} + +static void +dhd_bus_set_device_wake(struct dhd_bus *bus, bool val) +{ + if (bus->device_wake_state != val) + { + DHD_INFO(("Set Device_Wake to %d\n", val)); +#ifdef PCIE_OOB + if (bus->oob_enabled) + { + if (val) + { + gpio_port = gpio_port | (1 << DEVICE_WAKE); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE)); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } + } +#endif /* PCIE_OOB */ + bus->device_wake_state = val; + } +} + +#ifdef PCIE_OOB +void +dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val) +{ + DHD_INFO(("Set Device_Wake to %d\n", val)); + if (val) + { + gpio_port = gpio_port | (1 << BIT_BT_REG_ON); + gpio_write_port(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON)); + gpio_write_port(gpio_handle_val, gpio_port); + } +} + +int +dhd_oob_get_bt_reg_on(struct dhd_bus *bus) +{ + int ret; + uint8 val; + ret = gpio_read_port(gpio_handle_val, &val); + + if (ret < 0) { + DHD_ERROR(("gpio_read_port returns %d\n", ret)); + return ret; + } + + if (val & (1 << BIT_BT_REG_ON)) + { + ret = 1; + } else { + ret = 0; + } + + return ret; +} + +static void +dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus) +{ + if (dhd_doorbell_timeout) + dhd_timeout_start(&bus->doorbell_timer, + (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms); + else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) + dhd_bus_set_device_wake(bus, FALSE); +} +#endif /* PCIE_OOB */ + +/** mailbox doorbell ring function */ +void +dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB); + } else { + /* this is a pcie core register, not the config regsiter */ + DHD_INFO(("writing a door bell to the device\n")); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678); + } +} + +void +dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) +{ +#ifdef PCIE_OOB + dhd_bus_set_device_wake(bus, TRUE); + dhd_bus_doorbell_timeout_reset(bus); +#endif + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value); +} + +static void +dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value) +{ + uint32 w; + w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB; + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w); +} + +dhd_mb_ring_t +dhd_bus_get_mbintr_fn(struct dhd_bus *bus) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + PCIMailBoxInt); + if (bus->pcie_mb_intr_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhd_bus_ringbell_oldpcie; + } + } else { + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + PCIH2D_MailBox); + if (bus->pcie_mb_intr_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhdpcie_bus_ringbell_fast; + } + } + return dhd_bus_ringbell; +} + +bool BCMFASTPATH +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched = FALSE; /* Flag indicating resched wanted */ + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS + * to avoid IOCTL Resumed On timeout when ioctl is waiting for response + * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS + * and if we return from here, then IOCTL response will never be handled + */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return 0; + } + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus); + if (!resched) { + bus->intstatus = 0; + if (!bus->pci_d3hot_done) { + dhdpcie_bus_intr_enable(bus); + } else { + DHD_ERROR(("%s: dhdpcie_bus_intr_enable skip in pci D3hot state \n", + __FUNCTION__)); + } + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC; + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return resched; + +} + + +static void +dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) +{ + uint32 cur_h2d_mb_data = 0; + + DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + + if (cur_h2d_mb_data != 0) { + uint32 i = 0; + DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data)); + while ((i++ < 100) && cur_h2d_mb_data) { + OSL_DELAY(10); + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + } + if (i >= 100) { + DHD_ERROR(("%s : waited 1ms for the dngl " + "to ack the previous mb transaction\n", __FUNCTION__)); + DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n", + __FUNCTION__, cur_h2d_mb_data)); + } + } + + dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0); + dhd_bus_gen_devmb_intr(bus); + + if (h2d_mb_data == H2D_HOST_D3_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__)); + bus->d3_inform_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__)); + bus->d0_inform_in_use_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__)); + bus->d0_inform_cnt++; + } +} + +static void +dhdpcie_handle_mb_data(dhd_bus_t *bus) +{ + uint32 d2h_mb_data = 0; + uint32 zero = 0; + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + if (!d2h_mb_data) { + DHD_INFO_HW4(("%s: Invalid D2H_MB_DATA: 0x%08x\n", + __FUNCTION__, d2h_mb_data)); + return; + } + + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); + + DHD_INFO_HW4(("D2H_MB_DATA: 0x%08x\n", d2h_mb_data)); + if (d2h_mb_data & D2H_DEV_FWHALT) { + DHD_ERROR(("FW trap has happened\n")); + dhdpcie_checkdied(bus, NULL, 0); + /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */ + bus->dhd->busstate = DHD_BUS_DOWN; + return; + } + if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n")); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n")); + } + if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n")); + } + if (d2h_mb_data & D2H_DEV_D3_ACK) { + /* what should we do */ + DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n")); + if (!bus->wait_for_d3_ack) { + bus->wait_for_d3_ack = 1; + dhd_os_d3ack_wake(bus->dhd); + } + } +} + +/* Inform Dongle to print HW Registers for Livelock Debug */ +void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus) +{ + dhdpcie_send_mb_data(bus, H2D_FW_TRAP); +} + +static bool +dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) +{ + bool resched = FALSE; + + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + /* Msg stream interrupt */ + if (intstatus & I_BIT1) { + resched = dhdpci_bus_read_frames(bus); + } else if (intstatus & I_BIT0) { + /* do nothing for Now */ + } + } else { + if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) + dhdpcie_handle_mb_data(bus); + + if (bus->dhd->busstate == DHD_BUS_SUSPEND) { + goto exit; + } + + if (intstatus & PCIE_MB_D2H_MB_MASK) { + resched = dhdpci_bus_read_frames(bus); + } + } + +exit: + return resched; +} + +static bool +dhdpci_bus_read_frames(dhd_bus_t *bus) +{ + bool more = FALSE; + + /* There may be frames in both ctrl buf and data buf; check ctrl buf first */ + DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + dhd_prot_process_ctrlbuf(bus->dhd); + /* Unlock to give chance for resp to be handled */ + DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + + DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + /* update the flow ring cpls */ + dhd_update_txflowrings(bus->dhd); + + /* With heavy TX traffic, we could get a lot of TxStatus + * so add bound + */ + more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound); + + /* With heavy RX traffic, this routine potentially could spend some time + * processing RX frames without RX bound + */ + more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound); + + /* don't talk to the dongle if fw is about to be reloaded */ + if (bus->dhd->hang_was_sent) { + more = FALSE; + } + DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + + return more; +} + +bool +dhdpcie_tcm_valid(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv; + uint32 shaddr = 0; + pciedev_shared_t sh; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n", + __FUNCTION__, addr)); + return FALSE; + } + + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv)); + return FALSE; + } + + /* Compare any field in pciedev_shared_t */ + if (sh.console_addr != bus->pcie_sh->console_addr) { + DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n")); + return FALSE; + } + + return TRUE; +} + +static bool +dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version) +{ + DHD_INFO(("firmware api revision %d, host api revision %d\n", + firmware_api_version, host_api_version)); + if (firmware_api_version <= host_api_version) + return TRUE; + if ((firmware_api_version == 6) && (host_api_version == 5)) + return TRUE; + if ((firmware_api_version == 5) && (host_api_version == 6)) + return TRUE; + return FALSE; +} + +static int +dhdpcie_readshared(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv, dma_indx_wr_buf, dma_indx_rd_buf; + uint32 shaddr = 0; + pciedev_shared_t *sh = bus->pcie_sh; + dhd_timeout_t tmo; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + + DHD_INFO_HW4(("%s: ram_base: 0x%x ramsize 0x%x tcm: %p shaddr: 0x%x nvram_csm: 0x%x\n", + __FUNCTION__, bus->dongle_ram_base, bus->ramsize, + bus->tcm, shaddr, bus->nvram_csm)); + /* start a timer for 5 seconds */ + dhd_timeout_start(&tmo, MAX_READ_TIMEOUT); + + while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) { + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); + } + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n", + __FUNCTION__, addr)); + DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed)); + return BCME_ERROR; + } else { + bus->shared_addr = (ulong)addr; + DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec " + "before dongle is ready\n", addr, tmo.elapsed)); + } + + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv)); + return rv; + } + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + sh->dma_rxoffset = ltoh32(sh->dma_rxoffset); + sh->rings_info_ptr = ltoh32(sh->rings_info_ptr); + +#ifdef DHD_DEBUG + /* load bus console address */ + bus->console_addr = sh->console_addr; +#endif + + /* Read the dma rx offset */ + bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset; + dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset); + + DHD_ERROR(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset)); + + if (!(dhdpcie_check_firmware_compatible(sh->flags & PCIE_SHARED_VERSION_MASK, + PCIE_SHARED_VERSION))) + { + DHD_ERROR(("%s: pcie_shared version %d in dhd " + "is older than pciedev_shared version %d in dongle\n", + __FUNCTION__, PCIE_SHARED_VERSION, + sh->flags & PCIE_SHARED_VERSION_MASK)); + return BCME_ERROR; + } + + bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ? + sizeof(uint16) : sizeof(uint32); + DHD_ERROR(("%s: Dongle advertizes %d size indices\n", + __FUNCTION__, bus->rw_index_sz)); + + /* Does the FW support DMA'ing r/w indices */ + if (sh->flags & PCIE_SHARED_DMA_INDEX) { + + + DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n", + __FUNCTION__, + (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0), + (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0))); + + } else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) || + DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) { + +#ifdef BCM_INDX_DMA + DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n", + __FUNCTION__)); + return BCME_ERROR; +#endif + DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n", + __FUNCTION__)); + bus->dhd->dma_d2h_ring_upd_support = FALSE; + bus->dhd->dma_h2d_ring_upd_support = FALSE; + } + + + /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */ + { + ring_info_t ring_info; + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr, + (uint8 *)&ring_info, sizeof(ring_info_t))) < 0) + return rv; + + bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr); + bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr); + + + bus->max_sub_queues = ltoh16(ring_info.max_sub_queues); + + /* If both FW and Host support DMA'ing indices, allocate memory and notify FW + * The max_sub_queues is read from FW initialized ring_info + */ + if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_WR_BUF, bus->max_sub_queues); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_RD_BUF, BCMPCIE_D2H_COMMON_MSGRINGS); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { + DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices" + "Host will use w/r indices in TCM\n", + __FUNCTION__)); + bus->dhd->dma_h2d_ring_upd_support = FALSE; + } + } + + if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_WR_BUF, BCMPCIE_D2H_COMMON_MSGRINGS); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_RD_BUF, bus->max_sub_queues); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { + DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices" + "Host will use w/r indices in TCM\n", + __FUNCTION__)); + bus->dhd->dma_d2h_ring_upd_support = FALSE; + } + } + + /* read ringmem and ringstate ptrs from shared area and store in host variables */ + dhd_fillup_ring_sharedptr_info(bus, &ring_info); + + bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t)); + DHD_INFO(("ring_info\n")); + + DHD_ERROR(("%s: max H2D queues %d\n", + __FUNCTION__, ltoh16(ring_info.max_sub_queues))); + + DHD_INFO(("mail box address\n")); + DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->h2d_mb_data_ptr_addr)); + DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->d2h_mb_data_ptr_addr)); + } + + bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK; + DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", + __FUNCTION__, bus->dhd->d2h_sync_mode)); + + return BCME_OK; +} /* dhdpcie_readshared */ + +/** Read ring mem and ring state ptr info from shared memory area in device memory */ +static void +dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) +{ + uint16 i = 0; + uint16 j = 0; + uint32 tcm_memloc; + uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr; + + /* Ring mem ptr info */ + /* Alloated in the order + H2D_MSGRING_CONTROL_SUBMIT 0 + H2D_MSGRING_RXPOST_SUBMIT 1 + D2H_MSGRING_CONTROL_COMPLETE 2 + D2H_MSGRING_TX_COMPLETE 3 + D2H_MSGRING_RX_COMPLETE 4 + */ + + { + /* ringmemptr holds start of the mem block address space */ + tcm_memloc = ltoh32(ring_info->ringmem_ptr); + + /* Find out ringmem ptr for each ring common ring */ + for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) { + bus->ring_sh[i].ring_mem_addr = tcm_memloc; + /* Update mem block */ + tcm_memloc = tcm_memloc + sizeof(ring_mem_t); + DHD_INFO(("ring id %d ring mem addr 0x%04x \n", + i, bus->ring_sh[i].ring_mem_addr)); + } + } + + /* Ring state mem ptr info */ + { + d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr); + d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr); + h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr); + h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr); + + /* Store h2d common ring write/read pointers */ + for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) { + bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; + bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; + + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } + + /* Store d2h common ring write/read pointers */ + for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) { + bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; + bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; + + /* update mem block */ + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } + + /* Store txflow ring write/read pointers */ + for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS); + i++, j++) + { + bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; + bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; + + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, + bus->ring_sh[i].ring_state_r)); + } + } +} /* dhd_fillup_ring_sharedptr_info */ + +/** + * Initialize bus module: prepare for communication with the dongle. Called after downloading + * firmware into the dongle. + */ +int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + /* Make sure we're talking to the core. */ + bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); + ASSERT(bus->reg != NULL); + + /* before opening up bus for data transfer, check if shared are is intact */ + ret = dhdpcie_readshared(bus); + if (ret < 0) { + DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); + return ret; + } + + /* Make sure we're talking to the core. */ + bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); + ASSERT(bus->reg != NULL); + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + + if (!dhd_download_fw_on_driverload) + dhd_dpc_enable(bus->dhd); + + /* Enable the interrupt after device is up */ + dhdpcie_bus_intr_enable(bus); + + /* bcmsdh_intr_unmask(bus->sdh); */ + +#ifdef DHD_PCIE_RUNTIMEPM + bus->idlecount = 0; + bus->idletime = (int32)MAX_IDLE_COUNT; + init_waitqueue_head(&bus->rpm_queue); + mutex_init(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ + + bus->d3_ack_war_cnt = 0; + + return ret; +} + +static void +dhdpcie_init_shared_addr(dhd_bus_t *bus) +{ + uint32 addr = 0; + uint32 val = 0; + addr = bus->dongle_ram_base + bus->ramsize - 4; +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + DHD_INFO_HW4(("%s: tcm: %p, addr: 0x%x val: 0x%x\n", __FUNCTION__, bus->tcm, addr, val)); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val)); +} + + +bool +dhdpcie_chipmatch(uint16 vendor, uint16 device) +{ + if (vendor != PCI_VENDOR_ID_BROADCOM) { + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, + vendor, device)); + return (-ENODEV); + } + + if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) || + (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) || + (device == BCM43569_CHIP_ID)) + return 0; + + if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) || + (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) + return 0; + + if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) || + (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) + return 0; + + if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) || + (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) + return 0; + + if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) || + (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) + return 0; + + if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) || + (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) + return 0; + + if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) || + (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) + return 0; + + if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) || + (device == BCM4358_D11AC5G_ID)) + return 0; + + if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) || + (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) + return 0; + + if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) || + (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) + return 0; + + if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) || + (device == BCM4359_D11AC5G_ID)) + return 0; + + if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) || + (device == BCM43596_D11AC5G_ID)) + return 0; + + + if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) || + (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) + return 0; + + if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) || + (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID)) + return 0; + + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device)); + return (-ENODEV); +} /* dhdpcie_chipmatch */ + +/** + * Name: dhdpcie_cc_nvmshadow + * + * Description: + * A shadow of OTP/SPROM exists in ChipCommon Region + * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF). + * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size + * can also be read from ChipCommon Registers. + */ +static int +dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) +{ + uint16 dump_offset = 0; + uint32 dump_size = 0, otp_size = 0, sprom_size = 0; + + /* Table for 65nm OTP Size (in bits) */ + int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024}; + + volatile uint16 *nvm_shadow; + + uint cur_coreid; + uint chipc_corerev; + chipcregs_t *chipcregs; + + /* Save the current core */ + cur_coreid = si_coreid(bus->sih); + /* Switch to ChipC */ + chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0); + ASSERT(chipcregs != NULL); + + chipc_corerev = si_corerev(bus->sih); + + /* Check ChipcommonCore Rev */ + if (chipc_corerev < 44) { + DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev)); + return BCME_UNSUPPORTED; + } + + /* Check ChipID */ + if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip)) { + DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n", + __FUNCTION__)); + return BCME_UNSUPPORTED; + } + + /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */ + if (chipcregs->sromcontrol & SRC_PRESENT) { + /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */ + sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK) + >> SRC_SIZE_SHIFT))) * 1024; + bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size); + } + + if (chipcregs->sromcontrol & SRC_OTPPRESENT) { + bcm_bprintf(b, "\nOTP Present"); + + if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT) + == OTPL_WRAP_TYPE_40NM) { + /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */ + otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } else { + /* This part is untested since newer chips have 40nm OTP */ + otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT]; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n", + __FUNCTION__)); + } + } + + if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && + ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) { + DHD_ERROR(("%s: SPROM and OTP could not be found \n", + __FUNCTION__)); + return BCME_NOTFOUND; + } + + /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */ + if ((chipcregs->sromcontrol & SRC_OTPSEL) && + (chipcregs->sromcontrol & SRC_OTPPRESENT)) { + + bcm_bprintf(b, "OTP Strap selected.\n" + "\nOTP Shadow in ChipCommon:\n"); + + dump_size = otp_size / 16 ; /* 16bit words */ + + } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) && + (chipcregs->sromcontrol & SRC_PRESENT)) { + + bcm_bprintf(b, "SPROM Strap selected\n" + "\nSPROM Shadow in ChipCommon:\n"); + + /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */ + /* dump_size in 16bit words */ + dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16; + } else { + DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n", + __FUNCTION__)); + return BCME_NOTFOUND; + } + + if (bus->regs == NULL) { + DHD_ERROR(("ChipCommon Regs. not initialized\n")); + return BCME_NOTREADY; + } else { + bcm_bprintf(b, "\n OffSet:"); + + /* Point to the SPROM/OTP shadow in ChipCommon */ + nvm_shadow = chipcregs->sromotp; + + /* + * Read 16 bits / iteration. + * dump_size & dump_offset in 16-bit words + */ + while (dump_offset < dump_size) { + if (dump_offset % 2 == 0) + /* Print the offset in the shadow space in Bytes */ + bcm_bprintf(b, "\n 0x%04x", dump_offset * 2); + + bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset)); + dump_offset += 0x1; + } + } + + /* Switch back to the original core */ + si_setcore(bus->sih, cur_coreid, 0); + + return BCME_OK; +} /* dhdpcie_cc_nvmshadow */ + +/** Flow rings are dynamically created and destroyed */ +void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) +{ + void *pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node; + unsigned long flags; + + queue = &flow_ring_node->queue; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + + /* clean up BUS level info */ + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + flow_ring_node->status = FLOW_RING_STATUS_CLOSED; + flow_ring_node->active = FALSE; + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_delete(&flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + /* Release the flowring object back into the pool */ + dhd_prot_flowrings_pool_release(bus->dhd, + flow_ring_node->flowid, flow_ring_node->prot_info); + + /* Free the flowid back to the flowid allocator */ + dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex, + flow_ring_node->flowid); +} + +/** + * Allocate a Flow ring buffer, + * Init Ring buffer, send Msg to device about flow ring creation +*/ +int +dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg) +{ + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; + + DHD_INFO(("%s :Flow create\n", __FUNCTION__)); + + /* Send Msg to device about flow ring creation */ + if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK) + return BCME_NOMEM; + + return BCME_OK; +} + +/** Handle response from dongle on a 'flow ring create' request */ +void +dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) +{ + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow create Response failure error status = %d \n", + __FUNCTION__, status)); + /* Call Flow clean up */ + dhd_bus_clean_flow_ring(bus, flow_ring_node); + return; + } + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Now add the Flow ring node into the active list + * Note that this code to add the newly created node to the active + * list was living in dhd_flowid_lookup. But note that after + * adding the node to the active list the contents of node is being + * filled in dhd_prot_flow_ring_create. + * If there is a D2H interrupt after the node gets added to the + * active list and before the node gets populated with values + * from the Bottom half dhd_update_txflowrings would be called. + * which will then try to walk through the active flow ring list, + * pickup the nodes and operate on them. Now note that since + * the function dhd_prot_flow_ring_create is not finished yet + * the contents of flow_ring_node can still be NULL leading to + * crashes. Hence the flow_ring_node should be added to the + * active list only after its truely created, which is after + * receiving the create response message from the Host. + */ + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_prepend(&bus->const_flowring, &flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + return; +} + +int +dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg) +{ + void * pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Delete\n", __FUNCTION__)); + + flow_ring_node = (flow_ring_node_t *)arg; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_ERROR(("%s :Delete Pending Flow %d\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_ERROR; + } + flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING; + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Send Msg to device about flow ring deletion */ + dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +void +dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status) +{ + flow_ring_node_t *flow_ring_node; + + DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow Delete Response failure error status = %d \n", + __FUNCTION__, status)); + return; + } + /* Call Flow clean up */ + dhd_bus_clean_flow_ring(bus, flow_ring_node); + + return; + +} + +/** This function is not called. Obsolete ? */ +int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) +{ + void *pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Delete\n", __FUNCTION__)); + + flow_ring_node = (flow_ring_node_t *)arg; + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Send Msg to device about flow ring flush */ + dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node); + + flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING; + return BCME_OK; +} + +void +dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status) +{ + flow_ring_node_t *flow_ring_node; + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow flush Response failure error status = %d \n", + __FUNCTION__, status)); + return; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + return; +} + +uint32 +dhd_bus_max_h2d_queues(struct dhd_bus *bus) +{ + return bus->max_sub_queues; +} + +/* To be symmetric with SDIO */ +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + return; +} + +void +dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) +{ + dhdp->bus->is_linkdown = val; +} + +int +dhdpcie_bus_clock_start(struct dhd_bus *bus) +{ + return dhdpcie_start_host_pcieclock(bus); +} + +int +dhdpcie_bus_clock_stop(struct dhd_bus *bus) +{ + return dhdpcie_stop_host_pcieclock(bus); +} + +int +dhdpcie_bus_disable_device(struct dhd_bus *bus) +{ + return dhdpcie_disable_device(bus); +} + +int +dhdpcie_bus_enable_device(struct dhd_bus *bus) +{ + return dhdpcie_enable_device(bus); +} + +int +dhdpcie_bus_alloc_resource(struct dhd_bus *bus) +{ + return dhdpcie_alloc_resource(bus); +} + +void +dhdpcie_bus_free_resource(struct dhd_bus *bus) +{ + dhdpcie_free_resource(bus); +} + +int +dhd_bus_request_irq(struct dhd_bus *bus) +{ + return dhdpcie_bus_request_irq(bus); +} + +bool +dhdpcie_bus_dongle_attach(struct dhd_bus *bus) +{ + return dhdpcie_dongle_attach(bus); +} + +int +dhd_bus_release_dongle(struct dhd_bus *bus) +{ + bool dongle_isolation; + osl_t *osh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + osh = bus->osh; + ASSERT(osh); + + if (bus->dhd) { + dongle_isolation = bus->dhd->dongle_isolation; + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + } + } + + return 0; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +int +dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +{ + return dhdpcie_oob_intr_register(dhdp->bus); +} + +void +dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +{ + dhdpcie_oob_intr_unregister(dhdp->bus); +} + +void +dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +{ + dhdpcie_oob_intr_set(dhdp->bus, enable); +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.h b/drivers/net/wireless/bcmdhd/dhd_pcie.h new file mode 100644 index 000000000000..511d00e8ce2c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pcie.h @@ -0,0 +1,315 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie.h 607608 2015-12-21 13:14:19Z $ + */ + + +#ifndef dhd_pcie_h +#define dhd_pcie_h + +#include +#include +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM +#ifdef CONFIG_PCI_MSM +#include +#else +#include +#endif /* CONFIG_PCI_MSM */ +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 +#include +extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg); +extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +#ifdef DHD_PCIE_RUNTIMEPM +#include +#include + +#define DEFAULT_DHD_RUNTIME_MS 100 +#ifndef CUSTOM_DHD_RUNTIME_MS +#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS +#endif /* CUSTOM_DHD_RUNTIME_MS */ + + +#ifndef MAX_IDLE_COUNT +#define MAX_IDLE_COUNT 16 +#endif /* MAX_IDLE_COUNT */ + +#ifndef MAX_RESUME_WAIT +#define MAX_RESUME_WAIT 100 +#endif /* MAX_RESUME_WAIT */ +#endif /* DHD_PCIE_RUNTIMEPM */ + +/* defines */ + +#define PCMSGBUF_HDRLEN 0 +#define DONGLE_REG_MAP_SIZE (32 * 1024) +#define DONGLE_TCM_MAP_SIZE (4096 * 1024) +#define DONGLE_MIN_MEMSIZE (128 *1024) +#ifdef DHD_DEBUG +#define DHD_PCIE_SUCCESS 0 +#define DHD_PCIE_FAILURE 1 +#endif /* DHD_DEBUG */ +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) + +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM +#define struct_pcie_notify struct msm_pcie_notify +#define struct_pcie_register_event struct msm_pcie_register_event +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 +#define struct_pcie_notify struct exynos_pcie_notify +#define struct_pcie_register_event struct exynos_pcie_register_event +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +/* + * Router with 4366 can have 128 stations and 16 BSS, + * hence (128 stations x 4 access categories for ucast) + 16 bc/mc flowrings + */ +#define MAX_DHD_TX_FLOWS 320 + +/* user defined data structures */ +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX (8 * 1024) + +#ifndef MAX_CNTL_D3ACK_TIMEOUT +#define MAX_CNTL_D3ACK_TIMEOUT 2 +#endif /* MAX_CNTL_D3ACK_TIMEOUT */ + +#ifdef DHD_DEBUG + +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; +#endif /* DHD_DEBUG */ +typedef struct ring_sh_info { + uint32 ring_mem_addr; + uint32 ring_state_w; + uint32 ring_state_r; +} ring_sh_info_t; + +typedef struct dhd_bus { + dhd_pub_t *dhd; + struct pci_dev *dev; /* pci device handle */ + dll_t const_flowring; /* constructed list of tx flowring queues */ + + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + sbpcieregs_t *reg; /* Registers for PCIE core */ + + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ +#ifdef CACHE_FW_IMAGES + int processed_nvram_params_len; /* Modified len of NVRAM info */ +#endif + + + struct pktq txq; /* Queue length used for flow-control */ + + bool intr; /* Use interrupts */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + +#ifdef DHD_DEBUG + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ +#endif /* DHD_DEBUG */ + + bool alp_only; /* Don't use HT clock (ALP only) */ + + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + uint32 resetinstr; + uint32 dongle_ram_base; + + ulong shared_addr; + pciedev_shared_t *pcie_sh; + bool bus_flowctrl; + uint32 dma_rxoffset; + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + osl_t *osh; + uint32 nvram_csm; /* Nvram checksum */ + uint16 pollrate; + uint16 polltick; + + uint32 *pcie_mb_intr_addr; + void *pcie_mb_intr_osh; + bool sleep_allowed; + + /* version 3 shared struct related info start */ + ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS]; + uint8 h2d_ring_count; + uint8 d2h_ring_count; + uint32 ringmem_ptr; + uint32 ring_state_ptr; + + uint32 d2h_dma_scratch_buffer_mem_addr; + + uint32 h2d_mb_data_ptr_addr; + uint32 d2h_mb_data_ptr_addr; + /* version 3 shared struct related info end */ + + uint32 def_intmask; + bool ltrsleep_on_unload; + uint wait_for_d3_ack; + uint32 max_sub_queues; + uint32 rw_index_sz; + bool db1_for_mb; + bool suspended; + + dhd_timeout_t doorbell_timer; + bool device_wake_state; + bool irq_registered; +#ifdef PCIE_OOB + bool oob_enabled; +#endif /* PCIE_OOB */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ + defined(CONFIG_SOC_EXYNOS8890)) +#ifdef CONFIG_ARCH_MSM + uint8 no_cfg_restore; +#endif /* CONFIG_ARCH_MSM */ + struct_pcie_register_event pcie_event; +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#ifdef DHD_PCIE_RUNTIMEPM + int32 idlecount; /* Activity timeout counter */ + int32 idletime; /* Control for activity timeout */ + int32 bus_wake; /* For wake up the bus */ + bool runtime_resume_done; /* For check runtime suspend end */ + struct mutex pm_lock; /* Synchronize for system PM & runtime PM */ + wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */ +#endif /* DHD_PCIE_RUNTIMEPM */ + uint32 d3_inform_cnt; + uint32 d0_inform_cnt; + uint32 d0_inform_in_use_cnt; + uint8 force_suspend; + uint32 d3_ack_war_cnt; + uint8 is_linkdown; + uint32 pci_d3hot_done; +} dhd_bus_t; + +/* function declarations */ + +extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size); +extern int dhdpcie_bus_register(void); +extern void dhdpcie_bus_unregister(void); +extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device); + +extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, + volatile char *regs, volatile char *tcm, void *pci_dev); +extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size); +extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data); +extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus); +extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus); +extern void dhdpcie_bus_release(struct dhd_bus *bus); +extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); +extern void dhdpcie_free_irq(dhd_bus_t *bus); +extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value); +extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state); +extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state); +extern bool dhdpcie_tcm_valid(dhd_bus_t *bus); +extern void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus); +#ifndef BCMPCIE_OOB_HOST_WAKE +extern void dhdpcie_pme_active(osl_t *osh, bool enable); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ +extern bool dhdpcie_pme_cap(osl_t *osh); +extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus); +extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus); +extern int dhdpcie_disable_device(dhd_bus_t *bus); +extern int dhdpcie_enable_device(dhd_bus_t *bus); +extern int dhdpcie_alloc_resource(dhd_bus_t *bus); +extern void dhdpcie_free_resource(dhd_bus_t *bus); +extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); +#ifdef BCMPCIE_OOB_HOST_WAKE +extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef PCIE_OOB +extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val); +extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus); +#endif /* PCIE_OOB */ + +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH +#if defined(CONFIG_MACH_UNIVERSAL5433) +#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3 +#define SAMSUNG_PCIE_CH_NUM +#elif defined(CONFIG_MACH_UNIVERSAL7420) +#define SAMSUNG_PCIE_DEVICE_ID 0xa575 +#define SAMSUNG_PCIE_CH_NUM 1 +#elif defined(CONFIG_SOC_EXYNOS8890) +#define SAMSUNG_PCIE_DEVICE_ID 0xa544 +#define SAMSUNG_PCIE_CH_NUM 0 +#else +#error "Not supported platform" +#endif +#ifdef CONFIG_MACH_UNIVERSAL5433 +extern int exynos_pcie_pm_suspend(void); +extern int exynos_pcie_pm_resume(void); +#else +extern int exynos_pcie_pm_suspend(int ch_num); +extern int exynos_pcie_pm_resume(int ch_num); +#endif /* CONFIG_MACH_UNIVERSAL5433 */ +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ + +extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus); +#endif /* dhd_pcie_h */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c new file mode 100644 index 000000000000..e9814d22e559 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c @@ -0,0 +1,1562 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie_linux.c 610267 2016-01-06 16:03:53Z $ + */ + + +/* include files */ +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_ARCH_MSM +#ifdef CONFIG_PCI_MSM +#include +#else +#include +#endif /* CONFIG_PCI_MSM */ +#endif /* CONFIG_ARCH_MSM */ + +#define PCI_CFG_RETRY 10 +#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ +#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ + +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + ASSERT(OSL_PKTTAG_SZ == 32); \ + *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \ + *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ + *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ + *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ +} while (0) + + +/* user defined data structures */ + +typedef struct dhd_pc_res { + uint32 bar0_size; + void* bar0_addr; + uint32 bar1_size; + void* bar1_addr; +} pci_config_res, *pPci_config_res; + +typedef bool (*dhdpcie_cb_fn_t)(void *); + +typedef struct dhdpcie_info +{ + dhd_bus_t *bus; + osl_t *osh; + struct pci_dev *dev; /* pci device handle */ + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + uint32 tcm_size; /* pci device memory size */ + struct pcos_info *pcos_info; + uint16 last_intrstatus; /* to cache intrstatus */ + int irq; + char pciname[32]; + struct pci_saved_state* default_state; + struct pci_saved_state* state; +#ifdef BCMPCIE_OOB_HOST_WAKE + void *os_cxt; /* Pointer to per-OS private data */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +} dhdpcie_info_t; + + +struct pcos_info { + dhdpcie_info_t *pc; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; + struct timer_list tuning_timer; + int tuning_timer_exp; + atomic_t timer_enab; + struct tasklet_struct tuning_tasklet; +}; + +#ifdef BCMPCIE_OOB_HOST_WAKE +typedef struct dhdpcie_os_info { + int oob_irq_num; /* valid when hardware or software oob in use */ + unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ + bool oob_irq_registered; + bool oob_irq_enabled; + bool oob_irq_wake_enabled; + spinlock_t oob_irq_spinlock; + void *dev; /* handle to the underlying device */ +} dhdpcie_os_info_t; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +/* function declarations */ +static int __devinit +dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit +dhdpcie_pci_remove(struct pci_dev *pdev); +static int dhdpcie_init(struct pci_dev *pdev); +static irqreturn_t dhdpcie_isr(int irq, void *arg); +/* OS Routine functions for PCI suspend/resume */ + +static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state); +static int dhdpcie_resume_host_dev(dhd_bus_t *bus); +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus); +static int dhdpcie_resume_dev(struct pci_dev *dev); +static int dhdpcie_suspend_dev(struct pci_dev *dev); +#ifdef DHD_PCIE_RUNTIMEPM +static int dhdpcie_pm_suspend(struct device *dev); +static int dhdpcie_pm_prepare(struct device *dev); +static int dhdpcie_pm_resume(struct device *dev); +static void dhdpcie_pm_complete(struct device *dev); +#else +static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); +static int dhdpcie_pci_resume(struct pci_dev *dev); +#endif /* DHD_PCIE_RUNTIMEPM */ +static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { + { vendor: 0x14e4, + device: PCI_ANY_ID, + subvendor: PCI_ANY_ID, + subdevice: PCI_ANY_ID, + class: PCI_CLASS_NETWORK_OTHER << 8, + class_mask: 0xffff00, + driver_data: 0, + }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid); + +/* Power Management Hooks */ +#ifdef DHD_PCIE_RUNTIMEPM +static const struct dev_pm_ops dhd_pcie_pm_ops = { + .prepare = dhdpcie_pm_prepare, + .suspend = dhdpcie_pm_suspend, + .resume = dhdpcie_pm_resume, + .complete = dhdpcie_pm_complete, +}; +#endif /* DHD_PCIE_RUNTIMEPM */ + +static struct pci_driver dhdpcie_driver = { + node: {}, + name: "pcieh", + id_table: dhdpcie_pci_devid, + probe: dhdpcie_pci_probe, + remove: dhdpcie_pci_remove, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + save_state: NULL, +#endif +#ifdef DHD_PCIE_RUNTIMEPM + .driver.pm = &dhd_pcie_pm_ops, +#else + suspend: dhdpcie_pci_suspend, + resume: dhdpcie_pci_resume, +#endif /* DHD_PCIE_RUNTIMEPM */ +}; + +int dhdpcie_init_succeeded = FALSE; + +#ifdef DHD_PCIE_RUNTIMEPM +static int dhdpcie_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + return dhdpcie_set_suspend_resume(pdev, TRUE); +} + +static int dhdpcie_pm_prepare(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (pch) { + bus = pch->bus; + DHD_DISABLE_RUNTIME_PM(bus->dhd); + } + + return 0; +} + +static int dhdpcie_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + return dhdpcie_set_suspend_resume(pdev, FALSE); +} + +static void dhdpcie_pm_complete(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (pch) { + bus = pch->bus; + DHD_ENABLE_RUNTIME_PM(bus->dhd); + } + + return; +} +#else +static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) +{ + BCM_REFERENCE(state); + return dhdpcie_set_suspend_resume(pdev, TRUE); +} + +static int dhdpcie_pci_resume(struct pci_dev *pdev) +{ + return dhdpcie_set_suspend_resume(pdev, FALSE); +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + +static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (pch) { + bus = pch->bus; + } + +#ifdef DHD_PCIE_RUNTIMEPM + if (bus && !bus->dhd->dongle_reset) { + /* if wakelock is held during suspend, return failed */ + if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) { + return -EBUSY; + } + + mutex_lock(&bus->pm_lock); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* When firmware is not loaded do the PCI bus */ + /* suspend/resume only */ + if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) && + !bus->dhd->dongle_reset) { + ret = dhdpcie_pci_suspend_resume(bus, state); +#ifdef DHD_PCIE_RUNTIMEPM + mutex_unlock(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ + return ret; + } + + if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)|| + (bus->dhd->busstate == DHD_BUS_DATA)) && + (bus->suspended != state)) { + ret = dhdpcie_bus_suspend(bus, state); + } + +#ifdef DHD_PCIE_RUNTIMEPM + if (bus && !bus->dhd->dongle_reset) { + mutex_unlock(&bus->pm_lock); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + return ret; +} + +static int dhdpcie_suspend_dev(struct pci_dev *dev) +{ + int ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch = pci_get_drvdata(dev); + dhd_bus_t *bus = pch->bus; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + bus->pci_d3hot_done = 1; +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_save_state(dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch->state = pci_store_saved_state(dev); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_enable_wake(dev, PCI_D0, TRUE); + if (pci_is_enabled(dev)) { + pci_disable_device(dev); + } + ret = pci_set_power_state(dev, PCI_D3hot); + if (ret) { + DHD_ERROR(("%s: pci_set_power_state error %d\n", + __FUNCTION__, ret)); + } + disable_irq(dev->irq); + return ret; +} + +static int dhdpcie_resume_dev(struct pci_dev *dev) +{ + int err = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch = pci_get_drvdata(dev); + dhd_bus_t *bus = pch->bus; + pci_load_and_free_saved_state(dev, &pch->state); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + bus->pci_d3hot_done = 0; +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_restore_state(dev); + err = pci_enable_device(dev); + if (err) { + printf("%s:pci_enable_device error %d \n", __FUNCTION__, err); + goto out; + } + pci_set_master(dev); + err = pci_set_power_state(dev, PCI_D0); + if (err) { + printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err); + goto out; + } + +out: + enable_irq(dev->irq); + return err; +} + +static int dhdpcie_resume_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH + bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM); +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_start_host_pcieclock(bus); +#endif /* CONFIG_ARCH_MSM */ + if (bcmerror < 0) { + DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n", + __FUNCTION__, bcmerror)); + bus->is_linkdown = 1; +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + } + + return bcmerror; +} + +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH + struct pci_dev *rc_pci_dev; + rc_pci_dev = pci_get_device(0x144d, SAMSUNG_PCIE_DEVICE_ID, NULL); + if (rc_pci_dev) { + pci_save_state(rc_pci_dev); + } + exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM); +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_stop_host_pcieclock(bus); +#endif /* CONFIG_ARCH_MSM */ + return bcmerror; +} + +int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) +{ + int rc; + + struct pci_dev *dev = bus->dev; + + if (state) { + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } +#ifndef BCMPCIE_OOB_HOST_WAKE + dhdpcie_pme_active(bus->osh, state); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + rc = dhdpcie_suspend_dev(dev); + if (!rc) { + dhdpcie_suspend_host_dev(bus); + } + } else { + dhdpcie_resume_host_dev(bus); + rc = dhdpcie_resume_dev(dev); +#ifndef BCMPCIE_OOB_HOST_WAKE + dhdpcie_pme_active(bus->osh, state); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (bus->is_linkdown) { + bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; + dhd_os_send_hang_message(bus->dhd); + } +#endif + } + return rc; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +static int dhdpcie_device_scan(struct device *dev, void *data) +{ + struct pci_dev *pcidev; + int *cnt = data; + + pcidev = container_of(dev, struct pci_dev, dev); + if (pcidev->vendor != 0x14e4) + return 0; + + DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device)); + *cnt += 1; + if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name)) + DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n", + pcidev->device, pcidev->driver->name)); + + return 0; +} +#endif /* LINUX_VERSION >= 2.6.0 */ + +int +dhdpcie_bus_register(void) +{ + int error = 0; + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (!(error = pci_module_init(&dhdpcie_driver))) + return 0; + + DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error)); +#else + if (!(error = pci_register_driver(&dhdpcie_driver))) { + bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan); + if (!error) { + DHD_ERROR(("No Broadcom PCI device enumerated!\n")); + } else if (!dhdpcie_init_succeeded) { + DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__)); + } else { + return 0; + } + + pci_unregister_driver(&dhdpcie_driver); + error = BCME_ERROR; + } +#endif /* LINUX_VERSION < 2.6.0 */ + + return error; +} + + +void +dhdpcie_bus_unregister(void) +{ + pci_unregister_driver(&dhdpcie_driver); +} + +int __devinit +dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + + if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) { + DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__)); + return -ENODEV; + } + printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X" + "(good PCI location)\n", pdev->bus->number, + PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device); + + if (dhdpcie_init (pdev)) { + DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__)); + return -ENODEV; + } + +#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND + /* disable async suspend */ + device_disable_async_suspend(&pdev->dev); +#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */ + + DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__)); + return 0; +} + +int +dhdpcie_detach(dhdpcie_info_t *pch) +{ + if (pch) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (!dhd_download_fw_on_driverload) { + pci_load_and_free_saved_state(pch->dev, &pch->default_state); + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + MFREE(pch->osh, pch, sizeof(dhdpcie_info_t)); + } + return 0; +} + + +void __devexit +dhdpcie_pci_remove(struct pci_dev *pdev) +{ + osl_t *osh = NULL; + dhdpcie_info_t *pch = NULL; + dhd_bus_t *bus = NULL; + + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + pch = pci_get_drvdata(pdev); + bus = pch->bus; + osh = pch->osh; + +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus) { +#ifdef CONFIG_ARCH_MSM + msm_pcie_deregister_event(&bus->pcie_event); +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 + exynos_pcie_deregister_event(&bus->pcie_event); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhdpcie_bus_release(bus); + pci_disable_device(pdev); +#ifdef BCMPCIE_OOB_HOST_WAKE + /* pcie os info detach */ + MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t)); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + /* pcie info detach */ + dhdpcie_detach(pch); + /* osl detach */ + osl_detach(osh); + + dhdpcie_init_succeeded = FALSE; + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); + + return; +} + +/* Free Linux irq */ +int +dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info) +{ + dhd_bus_t *bus = dhdpcie_info->bus; + struct pci_dev *pdev = dhdpcie_info->bus->dev; + + if (!bus->irq_registered) { + snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), + "dhdpcie:%s", pci_name(pdev)); + if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, + dhdpcie_info->pciname, bus) < 0) { + DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); + return -1; + } else { + bus->irq_registered = TRUE; + } + } else { + DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__)); + } + + DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname)); + + + return 0; /* SUCCESS */ +} + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define PRINTF_RESOURCE "0x%016llx" +#else +#define PRINTF_RESOURCE "0x%08x" +#endif + +/* + +Name: osl_pci_get_resource + +Parametrs: + +1: struct pci_dev *pdev -- pci device structure +2: pci_res -- structure containing pci configuration space values + + +Return value: + +int - Status (TRUE or FALSE) + +Description: +Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure. + + */ +int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) +{ + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + struct pci_dev *pdev = NULL; + pdev = dhdpcie_info->dev; + do { + if (pci_enable_device(pdev)) { + printf("%s: Cannot enable PCI device\n", __FUNCTION__); + break; + } + pci_set_master(pdev); + bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */ + bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */ + + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(pdev, 2); + + if ((bar1_size == 0) || (bar1_addr == 0)) { + printf("%s: BAR1 Not enabled for this device size(%ld)," + " addr(0x"PRINTF_RESOURCE")\n", + __FUNCTION__, bar1_size, bar1_addr); + goto err; + } + + dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); + dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; + + if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { + DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); + break; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (!dhd_download_fw_on_driverload) { + /* Backup PCIe configuration so as to use Wi-Fi on/off process + * in case of built in driver + */ + pci_save_state(pdev); + dhdpcie_info->default_state = pci_store_saved_state(pdev); + + if (dhdpcie_info->default_state == NULL) { + DHD_ERROR(("%s pci_store_saved_state returns NULL\n", + __FUNCTION__)); + REG_UNMAP(dhdpcie_info->regs); + REG_UNMAP(dhdpcie_info->tcm); + pci_disable_device(pdev); + break; + } + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->regs, bar0_addr)); + DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); + + return 0; /* SUCCESS */ + } while (0); +err: + return -1; /* FAILURE */ +} + +int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info) +{ + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + do { + /* define it here only!! */ + if (dhdpcie_get_resource (dhdpcie_info)) { + DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__)); + break; + } + DHD_TRACE(("%s:Exit - SUCCESS \n", + __FUNCTION__)); + + return 0; /* SUCCESS */ + + } while (0); + + DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); + + return -1; /* FAILURE */ + +} + +#ifdef SUPPORT_LINKDOWN_RECOVERY +#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ + defined(CONFIG_SOC_EXYNOS8890)) +void dhdpcie_linkdown_cb(struct_pcie_notify *noti) +{ + struct pci_dev *pdev = (struct pci_dev *)noti->user; + dhdpcie_info_t *pch = NULL; + + if (pdev) { + pch = pci_get_drvdata(pdev); + if (pch) { + dhd_bus_t *bus = pch->bus; + if (bus) { + dhd_pub_t *dhd = bus->dhd; + if (dhd) { + DHD_ERROR(("%s: Event HANG send up " + "due to PCIe linkdown\n", + __FUNCTION__)); +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 1; + DHD_OS_WAKE_LOCK(dhd); + dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(dhd); + } + } + } + } + +} +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + +int dhdpcie_init(struct pci_dev *pdev) +{ + + osl_t *osh = NULL; + dhd_bus_t *bus = NULL; + dhdpcie_info_t *dhdpcie_info = NULL; + wifi_adapter_info_t *adapter = NULL; +#ifdef BCMPCIE_OOB_HOST_WAKE + dhdpcie_os_info_t *dhdpcie_osinfo = NULL; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + do { + /* osl attach */ + if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { + DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__)); + break; + } + + /* initialize static buffer */ + adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number, + PCI_SLOT(pdev->devfn)); + if (adapter != NULL) + DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name)); + else + DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__)); + osl_static_mem_init(osh, adapter); + + /* Set ACP coherence flag */ + if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT()) + osl_flag_set(osh, OSL_ACP_COHERENCE); + + /* allocate linux spcific pcie structure here */ + if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + break; + } + bzero(dhdpcie_info, sizeof(dhdpcie_info_t)); + dhdpcie_info->osh = osh; + dhdpcie_info->dev = pdev; + +#ifdef BCMPCIE_OOB_HOST_WAKE + /* allocate OS speicific structure */ + dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t)); + if (dhdpcie_osinfo == NULL) { + DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n", + __FUNCTION__)); + break; + } + bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo; + + /* Initialize host wake IRQ */ + spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock); + /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */ + dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter, + &dhdpcie_osinfo->oob_irq_flags); + if (dhdpcie_osinfo->oob_irq_num < 0) { + DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__)); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + /* Find the PCI resources, verify the */ + /* vendor and device ID, map BAR regions and irq, update in structures */ + if (dhdpcie_scan_resource(dhdpcie_info)) { + DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__)); + + break; + } + + /* Bus initialization */ + bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev); + if (!bus) { + DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__)); + break; + } + + dhdpcie_info->bus = bus; + bus->is_linkdown = 0; + bus->pci_d3hot_done = 0; +#ifdef DONGLE_ENABLE_ISOLATION + bus->dhd->dongle_isolation = TRUE; +#endif /* DONGLE_ENABLE_ISOLATION */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN; + bus->pcie_event.user = pdev; + bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK; + bus->pcie_event.callback = dhdpcie_linkdown_cb; + bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY; + msm_pcie_register_event(&bus->pcie_event); + bus->no_cfg_restore = 0; +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 + bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN; + bus->pcie_event.user = pdev; + bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK; + bus->pcie_event.callback = dhdpcie_linkdown_cb; + exynos_pcie_register_event(&bus->pcie_event); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); + dhdpcie_bus_intr_disable(bus); + + if (dhdpcie_request_irq(dhdpcie_info)) { + DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); + break; + } + } else { + bus->pollrate = 1; + DHD_INFO(("%s: PCIe interrupt function is NOT registered " + "due to polling mode\n", __FUNCTION__)); + } + +#if defined(BCM_REQUEST_FW) + if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) { + DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__)); + } + bus->nv_path = NULL; + bus->fw_path = NULL; +#endif /* BCM_REQUEST_FW */ + + /* set private data for pci_dev */ + pci_set_drvdata(pdev, dhdpcie_info); + + if (dhd_download_fw_on_driverload) { + if (dhd_bus_start(bus->dhd)) { + DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__)); + if (!allow_delay_fwdl) + break; + } + } else { + /* Set ramdom MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } + + /* Attach to the OS network interface */ + DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__)); + if (dhd_register_if(bus->dhd, 0, TRUE)) { + DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__)); + break; + } + + dhdpcie_init_succeeded = TRUE; + + DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__)); + return 0; /* return SUCCESS */ + + } while (0); + /* reverse the initialization in order in case of error */ + + if (bus) + dhdpcie_bus_release(bus); + +#ifdef BCMPCIE_OOB_HOST_WAKE + if (dhdpcie_osinfo) { + MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + if (dhdpcie_info) + dhdpcie_detach(dhdpcie_info); + pci_disable_device(pdev); + if (osh) + osl_detach(osh); + + dhdpcie_init_succeeded = FALSE; + + DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); + + return -1; /* return FAILURE */ +} + +/* Free Linux irq */ +void +dhdpcie_free_irq(dhd_bus_t *bus) +{ + struct pci_dev *pdev = NULL; + + DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__)); + if (!bus) { + return; + } + + if (bus->irq_registered) { + pdev = bus->dev; + free_irq(pdev->irq, bus); + bus->irq_registered = FALSE; + } else { + DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); + } + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); + return; +} + +/* + +Name: dhdpcie_isr + +Parametrs: + +1: IN int irq -- interrupt vector +2: IN void *arg -- handle to private data structure + +Return value: + +Status (TRUE or FALSE) + +Description: +Interrupt Service routine checks for the status register, +disable interrupt and queue DPC if mail box interrupts are raised. +*/ + + +irqreturn_t +dhdpcie_isr(int irq, void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + if (dhdpcie_bus_isr(bus)) + return TRUE; + else + return FALSE; +} + +int +dhdpcie_start_host_pcieclock(dhd_bus_t *bus) +{ + int ret = 0; +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + int options = 0; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->no_cfg_restore) { + options = MSM_PCIE_CONFIG_NO_CFG_RESTORE; + } + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, + bus->dev, NULL, options); + if (bus->no_cfg_restore && !ret) { + msm_pcie_recover_config(bus->dev); + bus->no_cfg_restore = 0; + } +#else + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, + bus->dev, NULL, 0); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + if (ret) { + DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__)); + goto done; + } + +done: +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Exit:\n", __FUNCTION__)); + return ret; +} + +int +dhdpcie_stop_host_pcieclock(dhd_bus_t *bus) +{ + int ret = 0; +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + int options = 0; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM */ + + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->no_cfg_restore) { + options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN; + } + + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + bus->dev, NULL, options); +#else + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + bus->dev, NULL, 0); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + if (ret) { + DHD_ERROR(("Failed to stop PCIe link\n")); + goto done; + } +done: +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Exit:\n", __FUNCTION__)); + return ret; +} + +int +dhdpcie_disable_device(dhd_bus_t *bus) +{ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + + pci_disable_device(bus->dev); + + return 0; +} + +int +dhdpcie_enable_device(dhd_bus_t *bus) +{ + int ret = BCME_ERROR; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !defined(CONFIG_SOC_EXYNOS8890) + /* Updated with pci_load_and_free_saved_state to compatible + * with kernel 3.14 or higher + */ + pci_load_and_free_saved_state(bus->dev, &pch->default_state); + pch->default_state = pci_store_saved_state(bus->dev); +#else + pci_load_saved_state(bus->dev, pch->default_state); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !CONFIG_SOC_EXYNOS8890 */ + + pci_restore_state(bus->dev); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */ + + ret = pci_enable_device(bus->dev); + if (ret) { + pci_disable_device(bus->dev); + } else { + pci_set_master(bus->dev); + } + + return ret; +} + +int +dhdpcie_alloc_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *dhdpcie_info; + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + + do { + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + break; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + break; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + break; + } + + bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */ + bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */ + + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(bus->dev, 2); + + if ((bar1_size == 0) || (bar1_addr == 0)) { + printf("%s: BAR1 Not enabled for this device size(%ld)," + " addr(0x"PRINTF_RESOURCE")\n", + __FUNCTION__, bar1_size, bar1_addr); + break; + } + + dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); + if (!dhdpcie_info->regs) { + DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); + break; + } + + bus->regs = dhdpcie_info->regs; + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); + dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; + if (!dhdpcie_info->tcm) { + DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); + REG_UNMAP(dhdpcie_info->regs); + bus->regs = NULL; + break; + } + + bus->tcm = dhdpcie_info->tcm; + + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->regs, bar0_addr)); + DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); + + return 0; + } while (0); + + return BCME_ERROR; +} + +void +dhdpcie_free_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *dhdpcie_info; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + return; + } + + if (bus->regs) { + REG_UNMAP(dhdpcie_info->regs); + bus->regs = NULL; + } + + if (bus->tcm) { + REG_UNMAP(dhdpcie_info->tcm); + bus->tcm = NULL; + } +} + +int +dhdpcie_bus_request_irq(struct dhd_bus *bus) +{ + dhdpcie_info_t *dhdpcie_info; + int ret = 0; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); + dhdpcie_bus_intr_disable(bus); + ret = dhdpcie_request_irq(dhdpcie_info); + if (ret) { + DHD_ERROR(("%s: request_irq() failed, ret=%d\n", + __FUNCTION__, ret)); + return ret; + } + } + + return ret; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable) +{ + unsigned long flags; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags); + if ((dhdpcie_osinfo->oob_irq_enabled != enable) && + (dhdpcie_osinfo->oob_irq_num > 0)) { + if (enable) { + enable_irq(dhdpcie_osinfo->oob_irq_num); + } else { + disable_irq_nosync(dhdpcie_osinfo->oob_irq_num); + } + dhdpcie_osinfo->oob_irq_enabled = enable; + } + spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags); +} + +static irqreturn_t wlan_oob_irq(int irq, void *data) +{ + dhd_bus_t *bus; + DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__)); + bus = (dhd_bus_t *)data; + dhdpcie_oob_intr_set(bus, FALSE); +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq); +#endif /* DHD_PCIE_RUNTIMPM */ + if (bus->dhd->up && bus->suspended) { + DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT); + } + return IRQ_HANDLED; +} + +int dhdpcie_oob_intr_register(dhd_bus_t *bus) +{ + int err = 0; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + if (dhdpcie_osinfo->oob_irq_registered) { + DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__)); + return -EBUSY; + } + + if (dhdpcie_osinfo->oob_irq_num > 0) { + DHD_INFO_HW4(("%s OOB irq=%d flags=%X \n", __FUNCTION__, + (int)dhdpcie_osinfo->oob_irq_num, + (int)dhdpcie_osinfo->oob_irq_flags)); + err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq, + dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake", + bus); + if (err) { + DHD_ERROR(("%s: request_irq failed with %d\n", + __FUNCTION__, err)); + return err; + } + err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num); + if (!err) { + dhdpcie_osinfo->oob_irq_wake_enabled = TRUE; + } + dhdpcie_osinfo->oob_irq_enabled = TRUE; + } + + dhdpcie_osinfo->oob_irq_registered = TRUE; + + return err; +} + +void dhdpcie_oob_intr_unregister(dhd_bus_t *bus) +{ + int err = 0; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + if (!dhdpcie_osinfo->oob_irq_registered) { + DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__)); + return; + } + if (dhdpcie_osinfo->oob_irq_num > 0) { + if (dhdpcie_osinfo->oob_irq_wake_enabled) { + err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num); + if (!err) { + dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; + } + } + if (dhdpcie_osinfo->oob_irq_enabled) { + disable_irq(dhdpcie_osinfo->oob_irq_num); + dhdpcie_osinfo->oob_irq_enabled = FALSE; + } + free_irq(dhdpcie_osinfo->oob_irq_num, bus); + } + dhdpcie_osinfo->oob_irq_registered = FALSE; +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DHD_PCIE_RUNTIMEPM +bool dhd_runtimepm_state(dhd_pub_t *dhd) +{ + dhd_bus_t *bus; + unsigned long flags; + bus = dhd->bus; + + DHD_GENERAL_LOCK(dhd, flags); + if (bus->suspended == TRUE) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_INFO(("Bus is already suspended system PM: %d\n", bus->suspended)); + return FALSE; + } + + bus->idlecount++; + + DHD_TRACE(("%s : Enter \n", __FUNCTION__)); + if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { + bus->idlecount = 0; + if (dhd->dhd_bus_busy_state == 0 && dhd->busstate != DHD_BUS_DOWN && + dhd->busstate != DHD_BUS_DOWN_IN_PROGRESS) { + bus->bus_wake = 0; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + bus->runtime_resume_done = FALSE; + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n", + __FUNCTION__, bus->idletime, dhd_runtimepm_ms)); + /* RPM suspend is failed, return FALSE then re-trying */ + if (dhdpcie_set_suspend_resume(bus->dev, TRUE)) { + DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__)); + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + bus->runtime_resume_done = TRUE; + /* It can make stuck NET TX Queue without below */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + return FALSE; + } + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE; + /* For making sure NET TX Queue active */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + + wait_event_interruptible(bus->rpm_queue, bus->bus_wake); + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; + DHD_GENERAL_UNLOCK(dhd, flags); + + dhdpcie_set_suspend_resume(bus->dev, FALSE); + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; + /* Inform the wake up context that Resume is over */ + bus->runtime_resume_done = TRUE; + /* For making sure NET TX Queue active */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + DHD_ERROR(("%s : runtime resume ended\n", __FUNCTION__)); + return TRUE; + } else { + DHD_GENERAL_UNLOCK(dhd, flags); + /* Since one of the contexts are busy (TX, IOVAR or RX) + * we should not suspend + */ + DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, dhd->dhd_bus_busy_state)); + return FALSE; + } + } + + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; +} /* dhd_runtimepm_state */ + +/* + * dhd_runtime_bus_wake + * TRUE - related with runtime pm context + * FALSE - It isn't invloved in runtime pm context + */ +bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr) +{ + unsigned long flags; + bus->idlecount = 0; + DHD_TRACE(("%s : enter\n", __FUNCTION__)); + if (bus->dhd->up == FALSE) { + DHD_INFO(("%s : dhd is not up\n", __FUNCTION__)); + return FALSE; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) { + /* Wake up RPM state thread if it is suspend in progress or suspended */ + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS || + bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { + bus->bus_wake = 1; + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr)); + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + /* No need to wake up the RPM state thread */ + } else if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + + /* If wait is TRUE, function with wait = TRUE will be wait in here */ + if (wait) { + wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done); + } else { + DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__)); + } + /* If it is called from RPM context, it returns TRUE */ + return TRUE; + } + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return FALSE; +} + +bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr) +{ + dhd_bus_t *bus = dhdp->bus; + return dhd_runtime_bus_wake(bus, wait, func_addr); +} + +void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bus->idletime = 0; +} + +bool dhdpcie_is_resume_done(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->runtime_resume_done; +} +#endif /* DHD_PCIE_RUNTIMEPM */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.c b/drivers/net/wireless/bcmdhd/dhd_pno.c new file mode 100644 index 000000000000..90e6877ec686 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pno.c @@ -0,0 +1,3889 @@ +/* + * Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pno.c 606280 2015-12-15 05:28:25Z $ + */ + +#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT) +#error "GSCAN needs PNO to be enabled!" +#endif + +#ifdef PNO_SUPPORT +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef GSCAN_SUPPORT +#include +#endif /* GSCAN_SUPPORT */ + +#ifdef __BIG_ENDIAN +#include +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) +#endif /* IL_BIGENDINA */ + +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) +#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state) +#define PNO_BESTNET_LEN 1024 +#define PNO_ON 1 +#define PNO_OFF 0 +#define CHANNEL_2G_MAX 14 +#define CHANNEL_5G_MAX 165 +#define MAX_NODE_CNT 5 +#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE) +#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \ + - (uint32)(timestamp2/1000))) +#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1) \ + - (uint32)(timestamp2))) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) + +#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====") +#define TIME_MIN_DIFF 5 +static wlc_ssid_ext_t * dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, + dhd_pno_status_info_t *pno_state); +#ifdef GSCAN_SUPPORT +static wl_pfn_gscan_channel_bucket_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state, +uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw); +#endif /* GSCAN_SUPPORT */ + +static inline bool +is_dfs(uint16 channel) +{ + if (channel >= 52 && channel <= 64) /* class 2 */ + return TRUE; + else if (channel >= 100 && channel <= 140) /* class 4 */ + return TRUE; + else + return FALSE; +} +int +dhd_pno_clean(dhd_pub_t *dhd) +{ + int pfn = 0; + int err; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + /* Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn(error : %d)\n", + __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = DHD_PNO_DISABLED; + err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n", + __FUNCTION__, err)); + } +exit: + return err; +} + +bool +dhd_is_pno_supported(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return FALSE; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + return WLS_SUPPORTED(_pno_state); +} + +int +dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__)); + return BCME_ERROR; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + if (ETHER_ISMULTI(oui)) { + DHD_ERROR(("Expected unicast OUI\n")); + err = BCME_ERROR; + } else { + memcpy(_pno_state->pno_oui, oui, DOT11_OUI_LEN); + DHD_PNO(("PNO mac oui to be used - %02x:%02x:%02x\n", _pno_state->pno_oui[0], + _pno_state->pno_oui[1], _pno_state->pno_oui[2])); + } + + return err; +} + +#ifdef GSCAN_SUPPORT +static uint64 +convert_fw_rel_time_to_systime(uint32 fw_ts_ms) +{ + struct timespec ts; + + get_monotonic_boottime(&ts); + return ((uint64)(TIMESPEC_TO_US(ts)) - (uint64)(fw_ts_ms * 1000)); +} + +static int +_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +static bool +is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params) +{ + smp_rmb(); + return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE); +} +#endif /* GSCAN_SUPPORT */ + +static int +dhd_pno_set_mac_addr(dhd_pub_t *dhd, struct ether_addr *macaddr) +{ + int err; + wl_pfn_macaddr_cfg_t cfg; + + cfg.version = WL_PFN_MACADDR_CFG_VER; + if (ETHER_ISNULLADDR(macaddr)) { + cfg.flags = 0; + } else { + cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); + } + memcpy(&cfg.macaddr, macaddr, ETHER_ADDR_LEN); + + err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&cfg, sizeof(cfg), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_macaddr\n", __FUNCTION__)); + } + + return err; +} + +static int +_dhd_pno_suspend(dhd_pub_t *dhd) +{ + int err; + int suspend = 1; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err)); + goto exit; + + } + _pno_state->pno_status = DHD_PNO_SUSPEND; +exit: + return err; +} +static int +_dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (enable & 0xfffe) { + DHD_ERROR(("%s invalid value\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + if (!dhd_support_sta_mode(dhd)) { + DHD_ERROR(("PNO is not allowed for non-STA mode")); + err = BCME_BADOPTION; + goto exit; + } + if (enable) { + if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + dhd_is_associated(dhd, 0, NULL)) { + DHD_ERROR(("%s Legacy PNO mode cannot be enabled " + "in assoc mode , ignore it\n", __FUNCTION__)); + err = BCME_BADOPTION; + goto exit; + } + } + /* Enable/Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = (enable)? + DHD_PNO_ENABLED : DHD_PNO_DISABLED; + if (!enable) + _pno_state->pno_mode = DHD_PNO_NONE_MODE; + + DHD_PNO(("%s set pno as %s\n", + __FUNCTION__, enable ? "Enable" : "Disable")); +exit: + return err; +} + +static int +_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + wl_pfn_param_t pfn_param; + dhd_pno_params_t *_params; + dhd_pno_status_info_t *_pno_state; + bool combined_scan = FALSE; + struct ether_addr macaddr; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + memset(&pfn_param, 0, sizeof(pfn_param)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) | + (ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT)); + if (mode == DHD_PNO_LEGACY_MODE) { + /* check and set extra pno params */ + if ((pno_params->params_legacy.pno_repeat != 0) || + (pno_params->params_legacy.pno_freq_expo_max != 0)) { + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat); + pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max); + } + /* set up pno scan fr */ + if (pno_params->params_legacy.scan_fr != 0) + pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr); + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n")); + mode |= DHD_PNO_BATCH_MODE; + combined_scan = TRUE; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n")); + mode |= DHD_PNO_HOTLIST_MODE; + combined_scan = TRUE; + } +#ifdef GSCAN_SUPPORT + else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n")); + mode |= DHD_PNO_GSCAN_MODE; + } +#endif /* GSCAN_SUPPORT */ + } + if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* Scan frequency of 30 sec */ + pfn_param.scan_freq = htod32(30); + /* slow adapt scan is off by default */ + pfn_param.slow_freq = htod32(0); + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + /* Network timeout 60 sec */ + pfn_param.lost_network_timeout = htod32(60); + /* best n = 2 by default */ + pfn_param.bestn = DEFAULT_BESTN; + /* mscan m=0 by default, so not record best networks by default */ + pfn_param.mscan = DEFAULT_MSCAN; + /* default repeat = 10 */ + pfn_param.repeat = DEFAULT_REPEAT; + /* by default, maximum scan interval = 2^2 + * scan_freq when adaptive scan is turned on + */ + pfn_param.exp = DEFAULT_EXP; + if (mode == DHD_PNO_BATCH_MODE) { + /* In case of BATCH SCAN */ + if (pno_params->params_batch.bestn) + pfn_param.bestn = pno_params->params_batch.bestn; + if (pno_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr); + if (pno_params->params_batch.mscan) + pfn_param.mscan = pno_params->params_batch.mscan; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } else if (mode == DHD_PNO_HOTLIST_MODE) { + /* In case of HOTLIST SCAN */ + if (pno_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + if (combined_scan) { + /* Disable Adaptive Scan */ + pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + pfn_param.repeat = 0; + pfn_param.exp = 0; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* In case of Legacy PNO + BATCH SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params->params_batch.bestn) + pfn_param.bestn = _params->params_batch.bestn; + if (_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(_params->params_batch.scan_fr); + if (_params->params_batch.mscan) + pfn_param.mscan = _params->params_batch.mscan; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* In case of Legacy PNO + HOTLIST SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + } + } + } +#ifdef GSCAN_SUPPORT + if (mode & DHD_PNO_GSCAN_MODE) { + uint32 lost_network_timeout; + + pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr); + if (pno_params->params_gscan.mscan) { + pfn_param.bestn = pno_params->params_gscan.bestn; + pfn_param.mscan = pno_params->params_gscan.mscan; + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + /* ADAPTIVE turned off */ + pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); + pfn_param.repeat = 0; + pfn_param.exp = 0; + pfn_param.slow_freq = 0; + + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + dhd_pno_params_t *_params; + + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + + pfn_param.scan_freq = htod32(MIN(pno_params->params_gscan.scan_fr, + _params->params_legacy.scan_fr)); + } + + lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq * + pfn_param.scan_freq * + pno_params->params_gscan.lost_ap_window); + if (lost_network_timeout) { + pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout, + GSCAN_MIN_BSSID_TIMEOUT)); + } else { + pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT); + } + } else +#endif /* GSCAN_SUPPORT */ + { + if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) || + pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) { + DHD_ERROR(("%s pno freq(%d sec) is not valid \n", + __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); + err = BCME_BADARG; + goto exit; + } + } + + memset(&macaddr, 0, ETHER_ADDR_LEN); + memcpy(&macaddr, _pno_state->pno_oui, DOT11_OUI_LEN); + + DHD_PNO(("Setting mac oui to FW - %02x:%02x:%02x\n", _pno_state->pno_oui[0], + _pno_state->pno_oui[1], _pno_state->pno_oui[2])); + err = dhd_pno_set_mac_addr(dhd, &macaddr); + if (err < 0) { + DHD_ERROR(("%s : failed to set pno mac address, error - %d\n", __FUNCTION__, err)); + goto exit; + } + + +#ifdef GSCAN_SUPPORT + if (mode == DHD_PNO_BATCH_MODE || + ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan)) { +#else + if (mode == DHD_PNO_BATCH_MODE) { +#endif /* GSCAN_SUPPORT */ + + int _tmp = pfn_param.bestn; + /* set bestn to calculate the max mscan which firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__)); + goto exit; + } + /* get max mscan which the firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0); + if (err < 0) { + DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__)); + goto exit; + } + DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn)); + pfn_param.mscan = MIN(pfn_param.mscan, _tmp); + } + err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err)); + goto exit; + } + /* need to return mscan if this is for batch scan instead of err */ + err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err; +exit: + return err; +} +static int +_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssids_list, int nssid) +{ + int err = BCME_OK; + int i = 0; + wl_pfn_t pfn_element; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nssid) { + NULL_CHECK(ssids_list, "ssid list is NULL", err); + } + memset(&pfn_element, 0, sizeof(pfn_element)); + { + int j; + for (j = 0; j < nssid; j++) { + DHD_PNO(("%d: scan for %s size = %d hidden = %d\n", j, + ssids_list[j].SSID, ssids_list[j].SSID_len, ssids_list[j].hidden)); + } + } + /* Check for broadcast ssid */ + for (i = 0; i < nssid; i++) { + if (!ssids_list[i].SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", i)); + err = BCME_ERROR; + goto exit; + } + } + /* set all pfn ssid */ + for (i = 0; i < nssid; i++) { + pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE); + pfn_element.auth = (DOT11_OPEN_SYSTEM); + pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); + pfn_element.wsec = htod32(0); + pfn_element.infra = htod32(1); + if (ssids_list[i].hidden) { + pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT); + } else { + pfn_element.flags = 0; + } + memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID, + ssids_list[i].SSID_len); + pfn_element.ssid.SSID_len = ssids_list[i].SSID_len; + err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_element, + sizeof(pfn_element), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); + goto exit; + } + } +exit: + return err; +} +/* qsort compare function */ +static int +_dhd_pno_cmpfunc(const void *a, const void *b) +{ + return (*(uint16*)a - *(uint16*)b); +} +static int +_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan, + uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2) +{ + int err = BCME_OK; + int i = 0, j = 0, k = 0; + uint16 tmp; + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + NULL_CHECK(nchan, "nchan is NULL", err); + NULL_CHECK(chan_list1, "chan_list1 is NULL", err); + NULL_CHECK(chan_list2, "chan_list2 is NULL", err); + /* chan_list1 and chan_list2 should be sorted at first */ + while (i < nchan1 && j < nchan2) { + tmp = chan_list1[i] < chan_list2[j]? + chan_list1[i++] : chan_list2[j++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + } + + while (i < nchan1) { + tmp = chan_list1[i++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + d_chan_list[k++] = tmp; + } + + while (j < nchan2) { + tmp = chan_list2[j++]; + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + + } + *nchan = k; + return err; +} +static int +_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list, + int *nchan, uint8 band, bool skip_dfs) +{ + int err = BCME_OK; + int i, j; + uint32 chan_buf[WL_NUMCHANNELS + 1]; + wl_uint32_list_t *list; + NULL_CHECK(dhd, "dhd is NULL", err); + if (*nchan) { + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + } + list = (wl_uint32_list_t *) (void *)chan_buf; + list->count = htod32(WL_NUMCHANNELS); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0); + if (err < 0) { + DHD_ERROR(("failed to get channel list (err: %d)\n", err)); + goto exit; + } + for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) { + if (band == WLC_BAND_2G) { + if (dtoh32(list->element[i]) > CHANNEL_2G_MAX) + continue; + } else if (band == WLC_BAND_5G) { + if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX) + continue; + if (skip_dfs && is_dfs(dtoh32(list->element[i]))) + continue; + + + } else if (band == WLC_BAND_AUTO) { + if (skip_dfs || !is_dfs(dtoh32(list->element[i]))) + continue; + } else { /* All channels */ + if (skip_dfs && is_dfs(dtoh32(list->element[i]))) + continue; + } + if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) { + d_chan_list[j++] = (uint16) dtoh32(list->element[i]); + } else { + err = BCME_BADCHAN; + goto exit; + } + } + *nchan = j; +exit: + return err; +} +static int +_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch, + char *buf, int nbufsize) +{ + int err = BCME_OK; + int bytes_written = 0, nreadsize = 0; + int t_delta = 0; + int nleftsize = nbufsize; + uint8 cnt = 0; + char *bp = buf; + char eabuf[ETHER_ADDR_STR_LEN]; +#ifdef PNO_DEBUG + char *_base_bp; + char msg[150]; +#endif + dhd_pno_bestnet_entry_t *iter, *next; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + NULL_CHECK(params_batch, "params_batch is NULL", err); + if (nbufsize > 0) + NULL_CHECK(buf, "buf is NULL", err); + /* initialize the buffer */ + memset(buf, 0, nbufsize); + DHD_PNO(("%s enter \n", __FUNCTION__)); + /* # of scans */ + if (!params_batch->get_batch.batch_started) { + bp += nreadsize = sprintf(bp, "scancount=%d\n", + params_batch->get_batch.expired_tot_scan_cnt); + nleftsize -= nreadsize; + params_batch->get_batch.batch_started = TRUE; + } + DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt)); + /* preestimate scan count until which scan result this report is going to end */ + list_for_each_entry_safe(siter, snext, + ¶ms_batch->get_batch.expired_scan_results_list, list) { + phead = siter->bestnetheader; + while (phead != NULL) { + /* if left_size is less than bestheader total size , stop this */ + if (nleftsize <= + (phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD)) + goto exit; + /* increase scan count */ + cnt++; + /* # best of each scan */ + DHD_PNO(("\n\n", cnt - 1, phead->tot_cnt)); + /* attribute of the scan */ + if (phead->reason & PNO_STATUS_ABORT_MASK) { + bp += nreadsize = sprintf(bp, "trunc\n"); + nleftsize -= nreadsize; + } + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + t_delta = jiffies_to_msecs(jiffies - iter->recorded_time); +#ifdef PNO_DEBUG + _base_bp = bp; + memset(msg, 0, sizeof(msg)); +#endif + /* BSSID info */ + bp += nreadsize = sprintf(bp, "bssid=%s\n", + bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf)); + nleftsize -= nreadsize; + /* SSID */ + bp += nreadsize = sprintf(bp, "ssid=%s\n", iter->SSID); + nleftsize -= nreadsize; + /* channel */ + bp += nreadsize = sprintf(bp, "freq=%d\n", + wf_channel2mhz(iter->channel, + iter->channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + nleftsize -= nreadsize; + /* RSSI */ + bp += nreadsize = sprintf(bp, "level=%d\n", iter->RSSI); + nleftsize -= nreadsize; + /* add the time consumed in Driver to the timestamp of firmware */ + iter->timestamp += t_delta; + bp += nreadsize = sprintf(bp, "age=%d\n", iter->timestamp); + nleftsize -= nreadsize; + /* RTT0 */ + bp += nreadsize = sprintf(bp, "dist=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt0); + nleftsize -= nreadsize; + /* RTT1 */ + bp += nreadsize = sprintf(bp, "distSd=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt1); + nleftsize -= nreadsize; + bp += nreadsize = sprintf(bp, "%s", AP_END_MARKER); + nleftsize -= nreadsize; + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); +#ifdef PNO_DEBUG + memcpy(msg, _base_bp, bp - _base_bp); + DHD_PNO(("Entry : \n%s", msg)); +#endif + } + bp += nreadsize = sprintf(bp, "%s", SCAN_END_MARKER); + DHD_PNO(("%s", SCAN_END_MARKER)); + nleftsize -= nreadsize; + pprev = phead; + /* reset the header */ + siter->bestnetheader = phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + + siter->cnt_header--; + } + if (phead == NULL) { + /* we store all entry in this scan , so it is ok to delete */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } +exit: + if (cnt < params_batch->get_batch.expired_tot_scan_cnt) { + DHD_ERROR(("Buffer size is small to save all batch entry," + " cnt : %d (remained_scan_cnt): %d\n", + cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt)); + } + params_batch->get_batch.expired_tot_scan_cnt -= cnt; + /* set FALSE only if the link list is empty after returning the data */ + if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) { + params_batch->get_batch.batch_started = FALSE; + bp += sprintf(bp, "%s", RESULTS_END_MARKER); + DHD_PNO(("%s", RESULTS_END_MARKER)); + DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__)); + } + /* return used memory in buffer */ + bytes_written = (int32)(bp - buf); + return bytes_written; +} +static int +_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last) +{ + int err = BCME_OK; + int removed_scan_cnt = 0; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + dhd_pno_bestnet_entry_t *iter, *next; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(head, "head is NULL", err); + NULL_CHECK(head->next, "head->next is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + list_for_each_entry_safe(siter, snext, + head, list) { + if (only_last) { + /* in case that we need to delete only last one */ + if (!list_is_last(&siter->list, head)) { + /* skip if the one is not last */ + continue; + } + } + /* delete all data belong if the one is last */ + phead = siter->bestnetheader; + while (phead != NULL) { + removed_scan_cnt++; + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); + } + pprev = phead; + phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + } + if (phead == NULL) { + /* it is ok to delete top node */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } + return removed_scan_cnt; +} + +static int +_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan) +{ + int err = BCME_OK; + int i = 0; + wl_pfn_cfg_t pfncfg_param; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nchan) { + NULL_CHECK(channel_list, "nchan is NULL", err); + } + DHD_PNO(("%s enter : nchan : %d\n", __FUNCTION__, nchan)); + memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t)); + /* Setup default values */ + pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET); + pfncfg_param.channel_num = htod32(0); + + for (i = 0; i < nchan && nchan < WL_NUMCHANNELS; i++) + pfncfg_param.channel_list[i] = channel_list[i]; + + pfncfg_param.channel_num = htod32(nchan); + err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} +static int +_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + switch (mode) { + case DHD_PNO_LEGACY_MODE: { + struct dhd_pno_ssid *iter, *next; + if (params->params_legacy.nssid > 0) { + list_for_each_entry_safe(iter, next, + ¶ms->params_legacy.ssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + params->params_legacy.nssid = 0; + params->params_legacy.scan_fr = 0; + params->params_legacy.pno_freq_expo_max = 0; + params->params_legacy.pno_repeat = 0; + params->params_legacy.nchan = 0; + memset(params->params_legacy.chan_list, 0, + sizeof(params->params_legacy.chan_list)); + break; + } + case DHD_PNO_BATCH_MODE: { + params->params_batch.scan_fr = 0; + params->params_batch.mscan = 0; + params->params_batch.nchan = 0; + params->params_batch.rtt = 0; + params->params_batch.bestn = 0; + params->params_batch.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_batch.chan_list, 0, + sizeof(params->params_batch.chan_list)); + params->params_batch.get_batch.batch_started = FALSE; + params->params_batch.get_batch.buf = NULL; + params->params_batch.get_batch.bufsize = 0; + params->params_batch.get_batch.reason = 0; + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.scan_results_list, FALSE); + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.expired_scan_results_list, FALSE); + params->params_batch.get_batch.tot_scan_cnt = 0; + params->params_batch.get_batch.expired_tot_scan_cnt = 0; + params->params_batch.get_batch.top_node_cnt = 0; + INIT_LIST_HEAD(¶ms->params_batch.get_batch.scan_results_list); + INIT_LIST_HEAD(¶ms->params_batch.get_batch.expired_scan_results_list); + break; + } + case DHD_PNO_HOTLIST_MODE: { + struct dhd_pno_bssid *iter, *next; + if (params->params_hotlist.nbssid > 0) { + list_for_each_entry_safe(iter, next, + ¶ms->params_hotlist.bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + params->params_hotlist.scan_fr = 0; + params->params_hotlist.nbssid = 0; + params->params_hotlist.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_hotlist.chan_list, 0, + sizeof(params->params_hotlist.chan_list)); + break; + } + default: + DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode)); + break; + } + mutex_unlock(&_pno_state->pno_mutex); + return err; +} +static int +_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nbssid) { + NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err); + } + err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * nbssid, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +#ifdef GSCAN_SUPPORT +static int +_dhd_pno_add_significant_bssid(dhd_pub_t *dhd, + wl_pfn_significant_bssid_t *p_pfn_significant_bssid, int nbssid) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + + if (!nbssid) { + err = BCME_ERROR; + goto exit; + } + + NULL_CHECK(p_pfn_significant_bssid, "bssid list is NULL", err); + + err = dhd_iovar(dhd, 0, "pfn_add_swc_bssid", (char *)p_pfn_significant_bssid, + sizeof(wl_pfn_significant_bssid_t) * nbssid, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_significant_bssid %d\n", __FUNCTION__, err)); + goto exit; + } +exit: + return err; +} +#endif /* GSCAN_SUPPORT */ + +int +dhd_pno_stop_for_ssid(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + NULL_CHECK(dhd, "dev is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) { + DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = &_params->params_gscan; + + if (gscan_params->mscan) + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + /* Restart gscan */ + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + /* restart Batch mode if the batch mode is on */ + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + dhd_pno_clean(dhd); + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* restart HOTLIST SCAN */ + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + /* convert dhd_pno_bssid to wl_pfn_bssid */ + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + memcpy(&p_pfn_bssid->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + p_pfn_bssid->flags = iter->flags; + p_pfn_bssid++; + } + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + kfree(p_pfn_bssid); + return err; +} + +int +dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + return (_dhd_pno_enable(dhd, enable)); +} + +static wlc_ssid_ext_t * +dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state) +{ + int err = BCME_OK; + int i; + struct dhd_pno_ssid *iter, *next; + dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + wlc_ssid_ext_t *p_ssid_list; + + p_ssid_list = kzalloc(sizeof(wlc_ssid_ext_t) * + _params1->params_legacy.nssid, GFP_KERNEL); + if (p_ssid_list == NULL) { + DHD_ERROR(("%s : failed to allocate wlc_ssid_ext_t array (count: %d)", + __FUNCTION__, _params1->params_legacy.nssid)); + err = BCME_ERROR; + pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + goto exit; + } + i = 0; + /* convert dhd_pno_ssid to wlc_ssid_ext_t */ + list_for_each_entry_safe(iter, next, &_params1->params_legacy.ssid_list, list) { + p_ssid_list[i].SSID_len = iter->SSID_len; + p_ssid_list[i].hidden = iter->hidden; + memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len); + i++; + } +exit: + return p_ssid_list; +} + +static int +dhd_pno_add_to_ssid_list(dhd_pno_params_t *params, wlc_ssid_ext_t *ssid_list, + int nssid) +{ + int ret = 0; + int i; + struct dhd_pno_ssid *_pno_ssid; + + for (i = 0; i < nssid; i++) { + if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s : Invalid SSID length %d\n", + __FUNCTION__, ssid_list[i].SSID_len)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL); + if (_pno_ssid == NULL) { + DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n", + __FUNCTION__)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid->SSID_len = ssid_list[i].SSID_len; + _pno_ssid->hidden = ssid_list[i].hidden; + memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len); + list_add_tail(&_pno_ssid->list, ¶ms->params_legacy.ssid_list); + } + +exit: + return ret; +} + +int +dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + uint16 _chan_list[WL_NUMCHANNELS]; + int32 tot_nchan = 0; + int err = BCME_OK; + int i; + int mode = 0; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit_no_clear; + } + DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d," + "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__, + scan_fr, pno_repeat, pno_freq_expo_max, nchan)); + + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + /* If GSCAN is also ON will handle this down below */ +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE && + !(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { +#else + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { +#endif /* GSCAN_SUPPORT */ + DHD_ERROR(("%s : Legacy PNO mode was already started, " + "will disable previous one to start new one\n", __FUNCTION__)); + err = dhd_pno_stop_for_ssid(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n", + __FUNCTION__, err)); + goto exit_no_clear; + } + } + _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n", + __FUNCTION__, err)); + goto exit_no_clear; + } + memset(_chan_list, 0, sizeof(_chan_list)); + tot_nchan = MIN(nchan, WL_NUMCHANNELS); + if (tot_nchan > 0 && channel_list) { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i]; + } +#ifdef GSCAN_SUPPORT + else { + tot_nchan = WL_NUMCHANNELS; + err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan, + (WLC_BAND_2G | WLC_BAND_5G), TRUE); + if (err < 0) { + tot_nchan = 0; + DHD_PNO(("Could not get channel list for PNO SSID\n")); + } else { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i]; + } + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + DHD_PNO(("BATCH SCAN is on progress in firmware\n")); + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit_no_clear; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* use superset of channel list between two mode */ + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params2->params_batch.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_batch.chan_list[0], + _params2->params_batch.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit_no_clear; + } + } else { + DHD_PNO(("superset channel will use" + " all channels in firmware\n")); + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_hotlist.chan_list[0], + _params2->params_hotlist.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and hotlist\n", + __FUNCTION__)); + goto exit_no_clear; + } + } + } + } + _params->params_legacy.scan_fr = scan_fr; + _params->params_legacy.pno_repeat = pno_repeat; + _params->params_legacy.pno_freq_expo_max = pno_freq_expo_max; + _params->params_legacy.nchan = tot_nchan; + _params->params_legacy.nssid = nssid; + INIT_LIST_HEAD(&_params->params_legacy.ssid_list); +#ifdef GSCAN_SUPPORT + /* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */ + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) { + err = BCME_ERROR; + goto exit; + } + DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n")); + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) { + DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid)); + goto exit; + } + if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) { + err = BCME_ERROR; + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + if (err < 0) { + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } +exit_no_clear: + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + } + } + return err; +} +int +dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params) +{ + int err = BCME_OK; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0, tot_nchan = 0; + int mode = 0, mscan = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(batch_params, "batch_params is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + _pno_state->pno_mode |= DHD_PNO_BATCH_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } else { + /* batch mode is already started */ + return -EBUSY; + } + _params->params_batch.scan_fr = batch_params->scan_fr; + _params->params_batch.bestn = batch_params->bestn; + _params->params_batch.mscan = (batch_params->mscan)? + batch_params->mscan : DEFAULT_BATCH_MSCAN; + _params->params_batch.nchan = batch_params->nchan; + memcpy(_params->params_batch.chan_list, batch_params->chan_list, + sizeof(_params->params_batch.chan_list)); + + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan; + if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_batch.chan_list[batch_params->nchan], + &rem_nchan, batch_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, batch_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_batch.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_batch.chan_list, _params->params_batch.nchan, + sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif + if (_params->params_batch.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list)); + tot_nchan = _params->params_batch.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_batch.chan_list[0], _params->params_batch.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit; + } + } else { + DHD_PNO(("superset channel will use all channels in firmware\n")); + } + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list, + _params2->params_legacy.nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } else { + /* we need to return mscan */ + mscan = err; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + else { + /* return #max scan firmware can do */ + err = mscan; + } + if (p_ssid_list) + kfree(p_ssid_list); + return err; +} + + +#ifdef GSCAN_SUPPORT +static void +dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params, + dhd_pno_status_info_t *_pno_state, uint8 flags) +{ + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (flags & GSCAN_FLUSH_SCAN_CFG) { + _params->params_gscan.bestn = 0; + _params->params_gscan.mscan = 0; + _params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET; + _params->params_gscan.scan_fr = 0; + _params->params_gscan.send_all_results_flag = 0; + memset(_params->params_gscan.channel_bucket, 0, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + _params->params_gscan.nchannel_buckets = 0; + DHD_PNO(("Flush Scan config\n")); + } + if (flags & GSCAN_FLUSH_HOTLIST_CFG) { + struct dhd_pno_bssid *iter, *next; + if (_params->params_gscan.nbssid_hotlist > 0) { + list_for_each_entry_safe(iter, next, + &_params->params_gscan.hotlist_bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + _params->params_gscan.nbssid_hotlist = 0; + DHD_PNO(("Flush Hotlist Config\n")); + } + if (flags & GSCAN_FLUSH_SIGNIFICANT_CFG) { + dhd_pno_significant_bssid_t *iter, *next; + + if (_params->params_gscan.nbssid_significant_change > 0) { + list_for_each_entry_safe(iter, next, + &_params->params_gscan.significant_bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + _params->params_gscan.nbssid_significant_change = 0; + DHD_PNO(("Flush Significant Change Config\n")); + } + + return; +} + +void +dhd_pno_lock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + return; +} + +void +dhd_pno_unlock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_unlock(&_pno_state->pno_mutex); + return; +} + +void +dhd_wait_batch_results_complete(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + /* Has the workqueue finished its job already?? */ + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) { + DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__)); + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */ + gscan_results_cache_t *iter; + uint16 num_results = 0; + int err; + + mutex_lock(&_pno_state->pno_mutex); + iter = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + iter = iter->next; + } + mutex_unlock(&_pno_state->pno_mutex); + + /* All results consumed/No results cached?? + * Get fresh results from FW + */ + if (!num_results) { + DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__)); + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } + } + DHD_PNO(("%s: Wait complete\n", __FUNCTION__)); + + return; +} + +static void * +dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len) +{ + gscan_results_cache_t *iter, *results; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + uint16 num_scan_ids = 0, num_results = 0; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + iter = results = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + num_scan_ids++; + iter = iter->next; + } + + *len = ((num_results << 16) | (num_scan_ids)); + return results; +} + +void * +dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + void *ret = NULL; + dhd_pno_gscan_capabilities_t *ptr; + + if (!len) { + DHD_ERROR(("%s: len is NULL\n", __FUNCTION__)); + return ret; + } + + switch (type) { + case DHD_PNO_GET_CAPABILITIES: + ptr = (dhd_pno_gscan_capabilities_t *) + kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL); + if (!ptr) + break; + /* Hardcoding these values for now, need to get + * these values from FW, will change in a later check-in + */ + ptr->max_scan_cache_size = 12; + ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS; + ptr->max_ap_cache_per_scan = 16; + ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX; + ptr->max_scan_reporting_threshold = 100; + ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_significant_wifi_change_aps = PFN_SWC_MAX_NUM_APS; + ret = (void *)ptr; + *len = sizeof(dhd_pno_gscan_capabilities_t); + break; + + case DHD_PNO_GET_BATCH_RESULTS: + ret = dhd_get_gscan_batch_results(dhd, len); + break; + case DHD_PNO_GET_CHANNEL_LIST: + if (info) { + uint16 ch_list[WL_NUMCHANNELS]; + uint32 *ptr, mem_needed, i; + int32 err, nchan = WL_NUMCHANNELS; + uint32 *gscan_band = (uint32 *) info; + uint8 band = 0; + + /* No band specified?, nothing to do */ + if ((*gscan_band & GSCAN_BAND_MASK) == 0) { + DHD_PNO(("No band specified\n")); + *len = 0; + break; + } + + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (*gscan_band & GSCAN_BG_BAND_MASK) { + band |= WLC_BAND_2G; + } + if (*gscan_band & GSCAN_A_BAND_MASK) { + band |= WLC_BAND_5G; + } + + err = _dhd_pno_get_channels(dhd, ch_list, &nchan, + (band & GSCAN_ABG_BAND_MASK), + !(*gscan_band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list\n", + __FUNCTION__)); + *len = 0; + } else { + mem_needed = sizeof(uint32) * nchan; + ptr = (uint32 *) kmalloc(mem_needed, GFP_KERNEL); + if (!ptr) { + DHD_ERROR(("%s: Unable to malloc %d bytes\n", + __FUNCTION__, mem_needed)); + break; + } + for (i = 0; i < nchan; i++) { + ptr[i] = wf_channel2mhz(ch_list[i], + (ch_list[i] <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + } + ret = ptr; + *len = mem_needed; + } + } else { + *len = 0; + DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__)); + } + break; + + default: + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } + + return ret; + +} + +int +dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush) +{ + int err = BCME_OK; + dhd_pno_params_t *_params; + int i; + dhd_pno_status_info_t *_pno_state; + + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + + switch (type) { + case DHD_PNO_BATCH_SCAN_CFG_ID: + { + gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf; + _params->params_gscan.bestn = ptr->bestn; + _params->params_gscan.mscan = ptr->mscan; + _params->params_gscan.buffer_threshold = ptr->buffer_threshold; + break; + } + case DHD_PNO_GEOFENCE_SCAN_CFG_ID: + { + gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf; + struct dhd_pno_bssid *_pno_bssid; + struct bssid_t *bssid_ptr; + int8 flags; + + if (flush) { + dhd_pno_reset_cfg_gscan(_params, _pno_state, + GSCAN_FLUSH_HOTLIST_CFG); + } + + if (!ptr->nbssid) { + break; + } + if (!_params->params_gscan.nbssid_hotlist) { + INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list); + } + if ((_params->params_gscan.nbssid_hotlist + + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + DHD_ERROR(("Excessive number of hotlist APs programmed %d\n", + (_params->params_gscan.nbssid_hotlist + + ptr->nbssid))); + err = BCME_RANGE; + goto exit; + } + + for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) { + _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL); + + if (!_pno_bssid) { + DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes", + sizeof(struct dhd_pno_bssid))); + err = BCME_NOMEM; + goto exit; + } + memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN); + + flags = (int8) bssid_ptr->rssi_reporting_threshold; + _pno_bssid->flags = flags << WL_PFN_RSSI_SHIFT; + list_add_tail(&_pno_bssid->list, + &_params->params_gscan.hotlist_bssid_list); + } + + _params->params_gscan.nbssid_hotlist += ptr->nbssid; + _params->params_gscan.lost_ap_window = ptr->lost_ap_window; + break; + } + case DHD_PNO_SIGNIFICANT_SCAN_CFG_ID: + { + gscan_swc_params_t *ptr = (gscan_swc_params_t *)buf; + dhd_pno_significant_bssid_t *_pno_significant_change_bssid; + wl_pfn_significant_bssid_t *significant_bssid_ptr; + + if (flush) { + dhd_pno_reset_cfg_gscan(_params, _pno_state, + GSCAN_FLUSH_SIGNIFICANT_CFG); + } + + if (!ptr->nbssid) { + break; + } + if (!_params->params_gscan.nbssid_significant_change) { + INIT_LIST_HEAD(&_params->params_gscan.significant_bssid_list); + } + if ((_params->params_gscan.nbssid_significant_change + + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + DHD_ERROR(("Excessive number of SWC APs programmed %d\n", + (_params->params_gscan.nbssid_significant_change + + ptr->nbssid))); + err = BCME_RANGE; + goto exit; + } + + for (i = 0, significant_bssid_ptr = ptr->bssid_elem_list; + i < ptr->nbssid; i++, significant_bssid_ptr++) { + _pno_significant_change_bssid = + kzalloc(sizeof(dhd_pno_significant_bssid_t), + GFP_KERNEL); + + if (!_pno_significant_change_bssid) { + DHD_ERROR(("SWC bssidptr is NULL, cannot kalloc %zd bytes", + sizeof(dhd_pno_significant_bssid_t))); + err = BCME_NOMEM; + goto exit; + } + memcpy(&_pno_significant_change_bssid->BSSID, + &significant_bssid_ptr->macaddr, ETHER_ADDR_LEN); + _pno_significant_change_bssid->rssi_low_threshold = + significant_bssid_ptr->rssi_low_threshold; + _pno_significant_change_bssid->rssi_high_threshold = + significant_bssid_ptr->rssi_high_threshold; + list_add_tail(&_pno_significant_change_bssid->list, + &_params->params_gscan.significant_bssid_list); + } + + _params->params_gscan.swc_nbssid_threshold = ptr->swc_threshold; + _params->params_gscan.swc_rssi_window_size = ptr->rssi_window; + _params->params_gscan.lost_ap_window = ptr->lost_ap_window; + _params->params_gscan.nbssid_significant_change += ptr->nbssid; + break; + } + case DHD_PNO_SCAN_CFG_ID: + { + int i, k, valid = 0; + uint16 band, min; + gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf; + struct dhd_pno_gscan_channel_bucket *ch_bucket; + + if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) { + _params->params_gscan.nchannel_buckets = ptr->nchannel_buckets; + + memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + min = ptr->channel_bucket[0].bucket_freq_multiple; + ch_bucket = _params->params_gscan.channel_bucket; + + for (i = 0; i < ptr->nchannel_buckets; i++) { + band = ch_bucket[i].band; + for (k = 0; k < ptr->channel_bucket[i].num_channels; k++) { + ch_bucket[i].chan_list[k] = + wf_mhz2channel(ptr->channel_bucket[i].chan_list[k], + 0); + } + ch_bucket[i].band = 0; + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (band & GSCAN_BG_BAND_MASK) + ch_bucket[i].band |= WLC_BAND_2G; + + if (band & GSCAN_A_BAND_MASK) + ch_bucket[i].band |= WLC_BAND_5G; + + if (band & GSCAN_DFS_MASK) + ch_bucket[i].band |= GSCAN_DFS_MASK; + if (ptr->scan_fr == + ptr->channel_bucket[i].bucket_freq_multiple) { + valid = 1; + } + if (ptr->channel_bucket[i].bucket_freq_multiple < min) + min = ptr->channel_bucket[i].bucket_freq_multiple; + + DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band, + ch_bucket[i].report_flag)); + } + if (!valid) + ptr->scan_fr = min; + + for (i = 0; i < ptr->nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple/ptr->scan_fr; + } + _params->params_gscan.scan_fr = ptr->scan_fr; + + DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets, + _params->params_gscan.scan_fr)); + } else { + err = BCME_BADARG; + } + break; + } + default: + err = BCME_BADARG; + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } +exit: + mutex_unlock(&_pno_state->pno_mutex); + return err; + +} + + +static bool +validate_gscan_params(struct dhd_pno_gscan_params *gscan_params) +{ + unsigned int i, k; + + if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) { + DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n", + __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets)); + return false; + } + + for (i = 0; i < gscan_params->nchannel_buckets; i++) { + if (!gscan_params->channel_bucket[i].band) { + for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) { + if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) { + DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__, + gscan_params->channel_bucket[i].chan_list[k])); + return false; + } + } + } + } + + return true; +} + +static int +dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) +{ + int err = BCME_OK; + int mode, i = 0, k; + uint16 _chan_list[WL_NUMCHANNELS]; + int tot_nchan = 0; + int num_buckets_to_fw, tot_num_buckets, gscan_param_size; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + wl_pfn_gscan_channel_bucket_t *ch_bucket = NULL; + wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL; + wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + wlc_ssid_ext_t *pssid_list = NULL; + dhd_pno_params_t *params_legacy; + dhd_pno_params_t *_params; + + params_legacy = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(gscan_params, "gscan_params is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!validate_gscan_params(gscan_params)) { + DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + /* Create channel list based on channel buckets */ + if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state, + _chan_list, &tot_num_buckets, &num_buckets_to_fw))) { + goto exit; + } + + if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) { + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + } + + _pno_state->pno_mode |= DHD_PNO_GSCAN_MODE; + + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + + if (!pssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + + if ((err = _dhd_pno_add_ssid(dhd, pssid_list, + params_legacy->params_legacy.nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + + gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) + + (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_channel_bucket_t); + pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOC(dhd->osh, gscan_param_size); + + if (!pfn_gscan_cfg_t) { + DHD_ERROR(("%s: failed to malloc memory of size %d\n", + __FUNCTION__, gscan_param_size)); + err = BCME_NOMEM; + goto exit; + } + + + if (gscan_params->mscan) { + pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold; + } else { + pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET; + } + if (gscan_params->nbssid_significant_change) { + pfn_gscan_cfg_t->swc_nbssid_threshold = gscan_params->swc_nbssid_threshold; + pfn_gscan_cfg_t->swc_rssi_window_size = gscan_params->swc_rssi_window_size; + pfn_gscan_cfg_t->lost_ap_window = gscan_params->lost_ap_window; + } else { + pfn_gscan_cfg_t->swc_nbssid_threshold = 0; + pfn_gscan_cfg_t->swc_rssi_window_size = 0; + pfn_gscan_cfg_t->lost_ap_window = 0; + } + + pfn_gscan_cfg_t->flags = + (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK); + pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw; + + + for (i = 0, k = 0; i < tot_num_buckets; i++) { + if (ch_bucket[i].bucket_end_index != CHANNEL_BUCKET_EMPTY_INDEX) { + pfn_gscan_cfg_t->channel_bucket[k].bucket_end_index = + ch_bucket[i].bucket_end_index; + pfn_gscan_cfg_t->channel_bucket[k].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[k].report_flag = + ch_bucket[i].report_flag; + k++; + } + } + + tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1; + DHD_PNO(("Total channel num %d total ch_buckets %d ch_buckets_to_fw %d \n", tot_nchan, + tot_num_buckets, num_buckets_to_fw)); + + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + + if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) { + DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + if (gscan_params->nbssid_significant_change) { + dhd_pno_significant_bssid_t *iter, *next; + + + p_pfn_significant_bssid = kzalloc(sizeof(wl_pfn_significant_bssid_t) * + gscan_params->nbssid_significant_change, GFP_KERNEL); + if (p_pfn_significant_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate memory %zd\n", + __FUNCTION__, + sizeof(wl_pfn_significant_bssid_t) * + gscan_params->nbssid_significant_change)); + err = BCME_NOMEM; + goto exit; + } + i = 0; + /* convert dhd_pno_significant_bssid_t to wl_pfn_significant_bssid_t */ + list_for_each_entry_safe(iter, next, &gscan_params->significant_bssid_list, list) { + p_pfn_significant_bssid[i].rssi_low_threshold = iter->rssi_low_threshold; + p_pfn_significant_bssid[i].rssi_high_threshold = iter->rssi_high_threshold; + memcpy(&p_pfn_significant_bssid[i].macaddr, &iter->BSSID, ETHER_ADDR_LEN); + i++; + } + DHD_PNO(("nbssid_significant_change %d \n", + gscan_params->nbssid_significant_change)); + err = _dhd_pno_add_significant_bssid(dhd, p_pfn_significant_bssid, + gscan_params->nbssid_significant_change); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_significant_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if (gscan_params->nbssid_hotlist) { + struct dhd_pno_bssid *iter, *next; + wl_pfn_bssid_t *ptr; + p_pfn_bssid = (wl_pfn_bssid_t *)kzalloc(sizeof(wl_pfn_bssid_t) * + gscan_params->nbssid_hotlist, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_NOMEM; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + ptr = p_pfn_bssid; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist)); + list_for_each_entry_safe(iter, next, + &gscan_params->hotlist_bssid_list, list) { + memcpy(&ptr->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + ptr->flags = iter->flags; + ptr++; + } + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) { + DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err)); + } + +exit: + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE; + } + } + kfree(pssid_list); + kfree(p_pfn_significant_bssid); + kfree(p_pfn_bssid); + if (pfn_gscan_cfg_t) { + MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size); + } + if (ch_bucket) { + MFREE(dhd->osh, ch_bucket, + (tot_num_buckets * sizeof(wl_pfn_gscan_channel_bucket_t))); + } + return err; + +} + + +static void +dhd_pno_merge_gscan_pno_channels(dhd_pno_status_info_t *pno_state, + uint16 *chan_list, + uint8 *ch_scratch_pad, + wl_pfn_gscan_channel_bucket_t *ch_bucket, + uint32 *num_buckets_to_fw, + int num_channels) +{ + uint16 chan_buf[WL_NUMCHANNELS]; + int i, j = 0, ch_bucket_idx = 0; + dhd_pno_params_t *_params = &pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + uint16 *legacy_chan_list = _params1->params_legacy.chan_list; + bool is_legacy_scan_freq_higher; + uint8 report_flag = CH_BUCKET_REPORT_REGULAR; + + if (!_params1->params_legacy.scan_fr) + _params1->params_legacy.scan_fr = PNO_SCAN_MIN_FW_SEC; + + is_legacy_scan_freq_higher = + _params->params_gscan.scan_fr < _params1->params_legacy.scan_fr; + + /* Calculate new Legacy scan multiple of base scan_freq + * The legacy PNO channel bucket is added at the end of the + * channel bucket list. + */ + if (is_legacy_scan_freq_higher) { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = + _params1->params_legacy.scan_fr/_params->params_gscan.scan_fr; + + } else { + uint16 max = 0; + + /* Calculate new multiple of base scan_freq for gscan buckets */ + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = 1; + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr; + ch_bucket[i].bucket_freq_multiple /= _params1->params_legacy.scan_fr; + if (max < ch_bucket[i].bucket_freq_multiple) + max = ch_bucket[i].bucket_freq_multiple; + } + _params->params_gscan.max_ch_bucket_freq = max; + } + + /* Off to remove duplicates!! + * Find channels that are already being serviced by gscan before legacy bucket + * These have to be removed from legacy bucket. + * !!Assuming chan_list channels are validated list of channels!! + * ch_scratch_pad is 1 at gscan bucket locations see dhd_pno_gscan_create_channel_list() + */ + for (i = 0; i < _params1->params_legacy.nchan; i++) + ch_scratch_pad[legacy_chan_list[i]] += 2; + + ch_bucket_idx = 0; + memcpy(chan_buf, chan_list, num_channels * sizeof(uint16)); + + /* Finally create channel list and bucket + * At this point ch_scratch_pad can have 4 values: + * 0 - Channel not present in either Gscan or Legacy PNO bucket + * 1 - Channel present only in Gscan bucket + * 2 - Channel present only in Legacy PNO bucket + * 3 - Channel present in both Gscan and Legacy PNO buckets + * Thus Gscan buckets can have values 1 or 3 and Legacy 2 or 3 + * For channel buckets with scan_freq < legacy accept all + * channels i.e. ch_scratch_pad = 1 and 3 + * else accept only ch_scratch_pad = 1 and mark rejects as + * ch_scratch_pad = 4 so that they go in legacy + */ + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + if (ch_bucket[i].bucket_freq_multiple <= + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple) { + for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++, j++) + chan_list[j] = chan_buf[ch_bucket_idx]; + + ch_bucket[i].bucket_end_index = j - 1; + } else { + num_channels = 0; + for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++) { + if (ch_scratch_pad[chan_buf[ch_bucket_idx]] == 1) { + chan_list[j] = chan_buf[ch_bucket_idx]; + j++; + num_channels++; + } else { + ch_scratch_pad[chan_buf[ch_bucket_idx]] = 4; + /* If Gscan channel is merged off to legacy bucket and + * if the gscan channel bucket has a report flag > 0 + * use the same for legacy + */ + if (report_flag < ch_bucket[i].report_flag) + report_flag = ch_bucket[i].report_flag; + } + } + + if (num_channels) { + ch_bucket[i].bucket_end_index = j - 1; + } else { + ch_bucket[i].bucket_end_index = CHANNEL_BUCKET_EMPTY_INDEX; + *num_buckets_to_fw = *num_buckets_to_fw - 1; + } + } + + } + + num_channels = 0; + ch_bucket[_params->params_gscan.nchannel_buckets].report_flag = report_flag; + /* Now add channels to the legacy scan bucket + * ch_scratch_pad = 0 to 4 at this point, for legacy -> 2,3,4. 2 means exclusively + * Legacy so add to bucket. 4 means it is a reject of gscan bucket and must + * be added to Legacy bucket,reject 3 + */ + for (i = 0; i < _params1->params_legacy.nchan; i++) { + if (ch_scratch_pad[legacy_chan_list[i]] != 3) { + chan_list[j] = legacy_chan_list[i]; + j++; + num_channels++; + } + } + if (num_channels) { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = j - 1; + } + else { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = + CHANNEL_BUCKET_EMPTY_INDEX; + *num_buckets_to_fw = *num_buckets_to_fw - 1; + } + + return; +} +static wl_pfn_gscan_channel_bucket_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, + dhd_pno_status_info_t *_pno_state, + uint16 *chan_list, + uint32 *num_buckets, + uint32 *num_buckets_to_fw) +{ + int i, num_channels, err, nchan = WL_NUMCHANNELS; + uint16 *ptr = chan_list, max; + uint8 *ch_scratch_pad; + wl_pfn_gscan_channel_bucket_t *ch_bucket; + dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + bool is_pno_legacy_running = _pno_state->pno_mode & DHD_PNO_LEGACY_MODE; + dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket; + + if (is_pno_legacy_running) + *num_buckets = _params->params_gscan.nchannel_buckets + 1; + else + *num_buckets = _params->params_gscan.nchannel_buckets; + + + *num_buckets_to_fw = *num_buckets; + + + ch_bucket = (wl_pfn_gscan_channel_bucket_t *) MALLOC(dhd->osh, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + + if (!ch_bucket) { + DHD_ERROR(("%s: failed to malloc memory of size %zd\n", + __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + max = gscan_buckets[0].bucket_freq_multiple; + num_channels = 0; + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + if (!gscan_buckets[i].band) { + num_channels += gscan_buckets[i].num_channels; + memcpy(ptr, gscan_buckets[i].chan_list, + gscan_buckets[i].num_channels * sizeof(uint16)); + ptr = ptr + gscan_buckets[i].num_channels; + } else { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, ptr, + &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK), + !(gscan_buckets[i].band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, gscan_buckets[i].band)); + MFREE(dhd->osh, ch_bucket, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + num_channels += nchan; + ptr = ptr + nchan; + } + + ch_bucket[i].bucket_end_index = num_channels - 1; + ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple; + ch_bucket[i].report_flag = gscan_buckets[i].report_flag; + if (max < gscan_buckets[i].bucket_freq_multiple) + max = gscan_buckets[i].bucket_freq_multiple; + nchan = WL_NUMCHANNELS - num_channels; + DHD_PNO(("end_idx %d freq_mult - %d\n", + ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple)); + } + + ch_scratch_pad = (uint8 *) kzalloc(CHANNEL_5G_MAX, GFP_KERNEL); + if (!ch_scratch_pad) { + DHD_ERROR(("%s: failed to malloc memory of size %d\n", + __FUNCTION__, CHANNEL_5G_MAX)); + MFREE(dhd->osh, ch_bucket, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + /* Need to look for duplicates in gscan buckets if the framework programmed + * the gscan buckets badly, for now return error if there are duplicates. + * Plus as an added bonus, we get all channels in Gscan bucket + * set to 1 for dhd_pno_merge_gscan_pno_channels() + */ + for (i = 0; i < num_channels; i++) { + if (!ch_scratch_pad[chan_list[i]]) { + ch_scratch_pad[chan_list[i]] = 1; + } else { + DHD_ERROR(("%s: Duplicate channel - %d programmed in channel bucket\n", + __FUNCTION__, chan_list[i])); + MFREE(dhd->osh, ch_bucket, ((*num_buckets) * + sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + kfree(ch_scratch_pad); + return NULL; + } + } + _params->params_gscan.max_ch_bucket_freq = max; + /* Legacy PNO maybe running, which means we need to create a legacy PNO bucket + * Plus need to remove duplicates as the legacy PNO chan_list may have common channels + * If channel is to be scanned more frequently as per gscan requirements + * remove from legacy PNO ch_bucket. Similarly, if legacy wants a channel scanned + * more often, it is removed from the Gscan channel bucket. + * In the end both are satisfied. + */ + if (is_pno_legacy_running) + dhd_pno_merge_gscan_pno_channels(_pno_state, chan_list, + ch_scratch_pad, ch_bucket, num_buckets_to_fw, num_channels); + + kfree(ch_scratch_pad); + return ch_bucket; +} + +static int +dhd_pno_stop_for_gscan(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode; + wlc_ssid_ext_t *pssid_list = NULL; + dhd_pno_status_info_t *_pno_state; + + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit; + } + mutex_lock(&_pno_state->pno_mutex); + mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE; + err = dhd_pno_clean(dhd); + if (err < 0) { + + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + mutex_unlock(&_pno_state->pno_mutex); + return err; + } + _pno_state->pno_mode = mode; + mutex_unlock(&_pno_state->pno_mutex); + + /* Reprogram Legacy PNO if it was running */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *params_legacy; + uint16 chan_list[WL_NUMCHANNELS]; + + params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!pssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + + DHD_PNO(("Restarting Legacy PNO SSID scan...\n")); + memcpy(chan_list, params_legacy->chan_list, + (params_legacy->nchan * sizeof(uint16))); + err = dhd_pno_set_for_ssid(dhd, pssid_list, params_legacy->nssid, + params_legacy->scan_fr, params_legacy->pno_repeat, + params_legacy->pno_freq_expo_max, chan_list, + params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + } + +exit: + kfree(pssid_list); + return err; +} + +int +dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush)); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + if (run) { + err = dhd_pno_set_for_gscan(dhd, gscan_params); + } else { + if (flush) { + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } + /* Need to stop all gscan */ + err = dhd_pno_stop_for_gscan(dhd); + } + + return err; +} + +int +dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + uint8 old_flag; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + mutex_lock(&_pno_state->pno_mutex); + + old_flag = gscan_params->send_all_results_flag; + gscan_params->send_all_results_flag = (uint8) real_time_flag; + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + if (old_flag != gscan_params->send_all_results_flag) { + wl_pfn_gscan_cfg_t gscan_cfg; + gscan_cfg.flags = (gscan_params->send_all_results_flag & + GSCAN_SEND_ALL_RESULTS_MASK); + gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK; + + if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg, + sizeof(wl_pfn_gscan_cfg_t))) < 0) { + DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + } else { + DHD_PNO(("No change in flag - %d\n", old_flag)); + } + } else { + DHD_PNO(("Gscan not started\n")); + } +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + return err; +} + +int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd) +{ + int ret = 0; + dhd_pno_params_t *params; + struct dhd_pno_gscan_params *gscan_params; + dhd_pno_status_info_t *_pno_state; + gscan_results_cache_t *iter, *tmp; + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + iter = gscan_params->gscan_batch_cache; + + while (iter) { + if (iter->tot_consumed == iter->tot_count) { + tmp = iter->next; + kfree(iter); + iter = tmp; + } else + break; +} + gscan_params->gscan_batch_cache = iter; + ret = (iter == NULL); + return ret; +} + +static int +_dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 timestamp = 0, ts = 0, i, j, timediff; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + wl_pfn_lnet_info_t *plnetinfo; + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_lscanresults_t *plbestnet = NULL; + gscan_results_cache_t *iter, *tail; + wifi_gscan_result_t *result; + uint8 *nAPs_per_scan = NULL; + uint8 num_scans_in_cur_iter; + uint16 count, scan_id = 0; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + gscan_params = ¶ms->params_gscan; + nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan); + + if (!nAPs_per_scan) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__, + gscan_params->mscan)); + err = BCME_NOMEM; + goto exit; + } + + plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + + mutex_lock(&_pno_state->pno_mutex); + iter = gscan_params->gscan_batch_cache; + /* If a cache has not been consumed , just delete it */ + while (iter) { + iter->tot_consumed = iter->tot_count; + iter = iter->next; + } + dhd_gscan_batch_cache_cleanup(dhd); + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit_mutex_unlock; + } + + timediff = gscan_params->scan_fr * 1000; + timediff = timediff >> 1; + + /* Ok, now lets start getting results from the FW */ + plbestnet->status = PFN_INCOMPLETE; + tail = gscan_params->gscan_batch_cache; + while (plbestnet->status != PFN_COMPLETE) { + memset(plbestnet, 0, PNO_BESTNET_LEN); + err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); + if (err < 0) { + DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version, + plbestnet->status, plbestnet->count)); + if (plbestnet->version != PFN_SCANRESULT_VERSION) { + err = BCME_VERSION; + DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n", + plbestnet->version, PFN_SCANRESULT_VERSION)); + goto exit_mutex_unlock; + } + + num_scans_in_cur_iter = 0; + timestamp = plbestnet->netinfo[0].timestamp; + /* find out how many scans' results did we get in this batch of FW results */ + for (i = 0, count = 0; i < plbestnet->count; i++, count++) { + plnetinfo = &plbestnet->netinfo[i]; + /* Unlikely to happen, but just in case the results from + * FW doesnt make sense..... Assume its part of one single scan + */ + if (num_scans_in_cur_iter > gscan_params->mscan) { + num_scans_in_cur_iter = 0; + count = plbestnet->count; + break; + } + if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + count = 0; + num_scans_in_cur_iter++; + } + timestamp = plnetinfo->timestamp; + } + nAPs_per_scan[num_scans_in_cur_iter] = count; + num_scans_in_cur_iter++; + + DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter)); + plnetinfo = &plbestnet->netinfo[0]; + + for (i = 0; i < num_scans_in_cur_iter; i++) { + iter = (gscan_results_cache_t *) + kzalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t), GFP_KERNEL); + if (!iter) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", + __FUNCTION__, gscan_params->mscan)); + err = BCME_NOMEM; + goto exit_mutex_unlock; + } + /* Need this check because the new set of results from FW + * maybe a continuation of previous sets' scan results + */ + if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) { + iter->scan_id = ++scan_id; + } else { + iter->scan_id = scan_id; + } + DHD_PNO(("scan_id %d tot_count %d\n", scan_id, nAPs_per_scan[i])); + iter->tot_count = nAPs_per_scan[i]; + iter->tot_consumed = 0; + + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + DHD_PNO(("This scan is aborted\n")); + iter->flag = (ENABLE << PNO_STATUS_ABORT); + } else if (gscan_params->reason) { + iter->flag = (ENABLE << gscan_params->reason); + } + + if (!tail) { + gscan_params->gscan_batch_cache = iter; + } else { + tail->next = iter; + } + tail = iter; + iter->next = NULL; + for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) { + result = &iter->results[j]; + + result->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel, + (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->rssi = (int32) plnetinfo->RSSI; + /* Info not available & not expected */ + result->beacon_period = 0; + result->capability = 0; + result->ie_length = 0; + result->rtt = (uint64) plnetinfo->rtt0; + result->rtt_sd = (uint64) plnetinfo->rtt1; + result->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp); + ts = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d\n", + __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(result->ssid, plnetinfo->pfnsubnet.SSID, + plnetinfo->pfnsubnet.SSID_len); + result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; + memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + + DHD_PNO(("\tSSID : ")); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", + result->macaddr.octet[0], + result->macaddr.octet[1], + result->macaddr.octet[2], + result->macaddr.octet[3], + result->macaddr.octet[4], + result->macaddr.octet[5])); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", + plnetinfo->rtt0, plnetinfo->rtt1)); + + } + } + } +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE; + smp_wmb(); + wake_up_interruptible(&_pno_state->batch_get_wait); + if (nAPs_per_scan) { + MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan); + } + if (plbestnet) { + MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); + } + DHD_PNO(("Batch retrieval done!\n")); + return err; +} +#endif /* GSCAN_SUPPORT */ + +static int +_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + int i, j; + uint32 timestamp = 0; + dhd_pno_params_t *_params = NULL; + dhd_pno_status_info_t *_pno_state = NULL; + wl_pfn_lscanresults_t *plbestnet = NULL; + wl_pfn_lnet_info_t *plnetinfo; + dhd_pno_bestnet_entry_t *pbestnet_entry; + dhd_pno_best_header_t *pbestnetheader = NULL; + dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext; + bool allocate_header = FALSE; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit_no_unlock; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit_no_unlock; + } +#ifdef GSCAN_SUPPORT + if (!(_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_GSCAN_MODE))) { +#else + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { +#endif /* GSCAN_SUPPORT */ + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + goto exit_no_unlock; + } + mutex_lock(&_pno_state->pno_mutex); + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (buf && bufsize) { + if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) { + /* need to check whether we have cashed data or not */ + DHD_PNO(("%s: have cashed batching data in Driver\n", + __FUNCTION__)); + /* convert to results format */ + goto convert_format; + } else { + /* this is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + goto convert_format; + } + } + } + /* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */ + pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE); + if (pscan_results == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n")); + goto exit; + } + pscan_results->bestnetheader = NULL; + pscan_results->cnt_header = 0; + /* add the element into list unless total node cnt is less than MAX_NODE_ CNT */ + if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) { + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + _params->params_batch.get_batch.top_node_cnt++; + } else { + int _removed_scan_cnt; + /* remove oldest one and add new one */ + DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__)); + _removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd, + &_params->params_batch.get_batch.scan_results_list, TRUE); + _params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt; + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + + } + plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + NULL_CHECK(plbestnet, "failed to allocate buffer for bestnet", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + memset(plbestnet, 0, PNO_BESTNET_LEN); + while (plbestnet->status != PFN_COMPLETE) { + memset(plbestnet, 0, PNO_BESTNET_LEN); + err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); + if (err < 0) { + if (err == BCME_EPERM) { + DHD_ERROR(("we cannot get the batching data " + "during scanning in firmware, try again\n,")); + msleep(500); + continue; + } else { + DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version, + plbestnet->status, plbestnet->count)); + if (plbestnet->version != PFN_SCANRESULT_VERSION) { + err = BCME_VERSION; + DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n", + plbestnet->version, PFN_SCANRESULT_VERSION)); + goto exit; + } + plnetinfo = plbestnet->netinfo; + for (i = 0; i < plbestnet->count; i++) { + pbestnet_entry = (dhd_pno_bestnet_entry_t *) + MALLOC(dhd->osh, BESTNET_ENTRY_SIZE); + if (pbestnet_entry == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE); + pbestnet_entry->recorded_time = jiffies; /* record the current time */ + /* create header for the first entry */ + allocate_header = (i == 0)? TRUE : FALSE; + /* check whether the new generation is started or not */ + if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp) + > TIME_MIN_DIFF)) + allocate_header = TRUE; + timestamp = plnetinfo->timestamp; + if (allocate_header) { + pbestnetheader = (dhd_pno_best_header_t *) + MALLOC(dhd->osh, BEST_HEADER_SIZE); + if (pbestnetheader == NULL) { + err = BCME_NOMEM; + if (pbestnet_entry) + MFREE(dhd->osh, pbestnet_entry, + BESTNET_ENTRY_SIZE); + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + /* increase total cnt of bestnet header */ + pscan_results->cnt_header++; + /* need to record the reason to call dhd_pno_get_for_bach */ + if (reason) + pbestnetheader->reason = (ENABLE << reason); + memset(pbestnetheader, 0, BEST_HEADER_SIZE); + /* initialize the head of linked list */ + INIT_LIST_HEAD(&(pbestnetheader->entry_list)); + /* link the pbestnet heaer into existed list */ + if (pscan_results->bestnetheader == NULL) + /* In case of header */ + pscan_results->bestnetheader = pbestnetheader; + else { + dhd_pno_best_header_t *head = pscan_results->bestnetheader; + pscan_results->bestnetheader = pbestnetheader; + pbestnetheader->next = head; + } + } + /* fills the best network info */ + pbestnet_entry->channel = plnetinfo->pfnsubnet.channel; + pbestnet_entry->RSSI = plnetinfo->RSSI; + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + /* if RSSI is positive value, we assume that + * this scan is aborted by other scan + */ + DHD_PNO(("This scan is aborted\n")); + pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT); + } + pbestnet_entry->rtt0 = plnetinfo->rtt0; + pbestnet_entry->rtt1 = plnetinfo->rtt1; + pbestnet_entry->timestamp = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d: trimming it to max\n", + __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len; + memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID, + pbestnet_entry->SSID_len); + memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + /* add the element into list */ + list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list); + /* increase best entry count */ + pbestnetheader->tot_cnt++; + pbestnetheader->tot_size += BESTNET_ENTRY_SIZE; + DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); + DHD_PNO(("\tSSID : ")); + for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++) + DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j])); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", + plnetinfo->pfnsubnet.BSSID.octet[0], + plnetinfo->pfnsubnet.BSSID.octet[1], + plnetinfo->pfnsubnet.BSSID.octet[2], + plnetinfo->pfnsubnet.BSSID.octet[3], + plnetinfo->pfnsubnet.BSSID.octet[4], + plnetinfo->pfnsubnet.BSSID.octet[5])); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1)); + plnetinfo++; + } + } + if (pscan_results->cnt_header == 0) { + /* In case that we didn't get any data from the firmware + * Remove the current scan_result list from get_bach.scan_results_list. + */ + DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n")); + list_del(&pscan_results->list); + MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE); + _params->params_batch.get_batch.top_node_cnt--; + } + /* increase total scan count using current scan count */ + _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header; + + if (buf && bufsize) { + /* This is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } + /* reset gloval values after moving to expired list */ + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + } +convert_format: + err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize); + if (err < 0) { + DHD_ERROR(("failed to convert the data into upper layer format\n")); + goto exit; + } + } +exit: + if (plbestnet) + MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); + if (_params) { + _params->params_batch.get_batch.buf = NULL; + _params->params_batch.get_batch.bufsize = 0; + _params->params_batch.get_batch.bytes_written = err; + } + mutex_unlock(&_pno_state->pno_mutex); +exit_no_unlock: + if (waitqueue_active(&_pno_state->get_batch_done.wait)) + complete(&_pno_state->get_batch_done); + return err; +} +static void +_dhd_pno_get_batch_handler(struct work_struct *work) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pub_t *dhd; + struct dhd_pno_batch_params *params_batch; + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = container_of(work, struct dhd_pno_status_info, work); + dhd = _pno_state->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + _dhd_pno_get_gscan_batch_from_fw(dhd); + return; + } else +#endif /* GSCAN_SUPPORT */ + { + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + + _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf, + params_batch->get_batch.bufsize, params_batch->get_batch.reason); + } + +} + +int +dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + char *pbuf = buf; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_batch_params *params_batch; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + gscan_params->reason = reason; + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(gscan_params), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } else +#endif + { + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + memset(pbuf, 0, bufsize); + pbuf += sprintf(pbuf, "scancount=%d\n", 0); + sprintf(pbuf, "%s", RESULTS_END_MARKER); + err = strlen(buf); + goto exit; + } + params_batch->get_batch.buf = buf; + params_batch->get_batch.bufsize = bufsize; + params_batch->get_batch.reason = reason; + params_batch->get_batch.bytes_written = 0; + schedule_work(&_pno_state->work); + wait_for_completion(&_pno_state->get_batch_done); + } + +#ifdef GSCAN_SUPPORT + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) +#endif + err = params_batch->get_batch.bytes_written; +exit: + return err; +} + +int +dhd_pno_stop_for_batch(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode = 0; + int i = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("Gscan is ongoing, nothing to stop here\n")); + return err; + } +#endif + + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) { + mode = _pno_state->pno_mode; + dhd_pno_clean(dhd); + _pno_state->pno_mode = mode; + /* restart Legacy PNO if the Legacy PNO is on */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, + _params_legacy->scan_fr, _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, + _params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + i = 0; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN); + p_pfn_bssid[i].flags = iter->flags; + i++; + } + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + kfree(p_ssid_list); + kfree(p_pfn_bssid); + return err; +} + +int +dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + int err = BCME_OK; + int i; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0; + int tot_nchan = 0; + int mode = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + struct dhd_pno_bssid *_pno_bssid; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(hotlist_params, "hotlist_params is NULL", err); + NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + _pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } + _params->params_batch.nchan = hotlist_params->nchan; + _params->params_batch.scan_fr = hotlist_params->scan_fr; + if (hotlist_params->nchan) + memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list, + sizeof(_params->params_hotlist.chan_list)); + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan; + if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_hotlist.chan_list[hotlist_params->nchan], + &rem_nchan, hotlist_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, hotlist_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_hotlist.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan, + sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + int i; + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif + if (_params->params_hotlist.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_hotlist.chan_list, + sizeof(_chan_list)); + tot_nchan = _params->params_hotlist.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && + _params->params_hotlist.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_hotlist.chan_list[0], + _params->params_hotlist.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + "between legacy and hotlist\n", + __FUNCTION__)); + goto exit; + } + } + + } + + INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list)); + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + for (i = 0; i < hotlist_params->nbssid; i++) { + _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL); + NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err); + memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN); + _pno_bssid->flags = p_pfn_bssid[i].flags; + list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list); + } + _params->params_hotlist.nbssid = hotlist_params->nbssid; + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + return err; +} + +int +dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + DHD_ERROR(("%s : Hotlist MODE is not enabled\n", + __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previos pno mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + /* restart Legacy PNO Scan */ + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, + _params_legacy->scan_fr, _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, + _params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* restart Batching Scan */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + kfree(p_ssid_list); + return err; +} + +#ifdef GSCAN_SUPPORT +int +dhd_retreive_batch_scan_results(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + struct dhd_pno_batch_params *params_batch; + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) { + DHD_PNO(("Retreive batch results\n")); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + _params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS; + schedule_work(&_pno_state->work); + } else { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval" + "already in progress, will skip\n", __FUNCTION__)); + err = BCME_ERROR; + } + + return err; +} + +/* Handle Significant WiFi Change (SWC) event from FW + * Send event to HAL when all results arrive from FW + */ +void * +dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes) +{ + void *ptr = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + struct dhd_pno_swc_evt_param *params; + wl_pfn_swc_results_t *results = (wl_pfn_swc_results_t *)event_data; + wl_pfn_significant_net_t *change_array; + int i; + + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + params = &(gscan_params->param_significant); + + if (!results->total_count) { + *send_evt_bytes = 0; + return ptr; + } + + if (!params->results_rxed_so_far) { + if (!params->change_array) { + params->change_array = (wl_pfn_significant_net_t *) + kmalloc(sizeof(wl_pfn_significant_net_t) * results->total_count, + GFP_KERNEL); + + if (!params->change_array) { + DHD_ERROR(("%s Cannot Malloc %zd bytes!!\n", __FUNCTION__, + sizeof(wl_pfn_significant_net_t) * results->total_count)); + *send_evt_bytes = 0; + return ptr; + } + } else { + DHD_ERROR(("RX'ed WLC_E_PFN_SWC evt from FW, previous evt not complete!!")); + *send_evt_bytes = 0; + return ptr; + } + + } + + DHD_PNO(("%s: pkt_count %d total_count %d\n", __FUNCTION__, + results->pkt_count, results->total_count)); + + for (i = 0; i < results->pkt_count; i++) { + DHD_PNO(("\t %02x:%02x:%02x:%02x:%02x:%02x\n", + results->list[i].BSSID.octet[0], + results->list[i].BSSID.octet[1], + results->list[i].BSSID.octet[2], + results->list[i].BSSID.octet[3], + results->list[i].BSSID.octet[4], + results->list[i].BSSID.octet[5])); + } + + change_array = ¶ms->change_array[params->results_rxed_so_far]; + memcpy(change_array, results->list, sizeof(wl_pfn_significant_net_t) * results->pkt_count); + params->results_rxed_so_far += results->pkt_count; + + if (params->results_rxed_so_far == results->total_count) { + params->results_rxed_so_far = 0; + *send_evt_bytes = sizeof(wl_pfn_significant_net_t) * results->total_count; + /* Pack up change buffer to send up and reset + * results_rxed_so_far, after its done. + */ + ptr = (void *) params->change_array; + /* expecting the callee to free this mem chunk */ + params->change_array = NULL; + } + else { + *send_evt_bytes = 0; + } + + return ptr; +} + +void +dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) +{ + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + gscan_results_cache_t *iter, *tmp; + + if (!_pno_state) { + return; + } + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (type == HOTLIST_FOUND) { + iter = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = NULL; + } else { + iter = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = NULL; + } + + while (iter) { + tmp = iter->next; + kfree(iter); + iter = tmp; + } + + return; +} + +void * +dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size) +{ + wl_bss_info_t *bi = NULL; + wl_gscan_result_t *gscan_result; + wifi_gscan_result_t *result = NULL; + u32 bi_length = 0; + uint8 channel; + uint32 mem_needed; + + struct timespec ts; + + *size = 0; + + gscan_result = (wl_gscan_result_t *)data; + + if (!gscan_result) { + DHD_ERROR(("Invalid gscan result (NULL pointer)\n")); + goto exit; + } + if (!gscan_result->bss_info) { + DHD_ERROR(("Invalid gscan bss info (NULL pointer)\n")); + goto exit; + } + bi = &gscan_result->bss_info[0].info; + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(gscan_result->buflen) - + WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) { + DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + if (bi->SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", bi->SSID_len)); + bi->SSID_len = DOT11_MAX_SSID_LEN; + } + + mem_needed = OFFSETOF(wifi_gscan_result_t, ie_data) + bi->ie_length; + result = kmalloc(mem_needed, GFP_KERNEL); + + if (!result) { + DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n", + __FUNCTION__, mem_needed)); + goto exit; + } + + memcpy(result->ssid, bi->SSID, bi->SSID_len); + result->ssid[bi->SSID_len] = '\0'; + channel = wf_chspec_ctlchan(bi->chanspec); + result->channel = wf_channel2mhz(channel, + (channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->rssi = (int32) bi->RSSI; + result->rtt = 0; + result->rtt_sd = 0; + get_monotonic_boottime(&ts); + result->ts = (uint64) TIMESPEC_TO_US(ts); + result->beacon_period = dtoh16(bi->beacon_period); + result->capability = dtoh16(bi->capability); + result->ie_length = dtoh32(bi->ie_length); + memcpy(&result->macaddr, &bi->BSSID, ETHER_ADDR_LEN); + memcpy(result->ie_data, ((uint8 *)bi + bi->ie_offset), bi->ie_length); + *size = mem_needed; +exit: + return result; +} + +void * +dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type) +{ + void *ptr = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_scanresults_t *results = (wl_pfn_scanresults_t *)event_data; + wifi_gscan_result_t *hotlist_found_array; + wl_pfn_net_info_t *plnetinfo; + gscan_results_cache_t *gscan_hotlist_cache; + int malloc_size = 0, i, total = 0; + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (!results->count) { + *send_evt_bytes = 0; + return ptr; + } + + malloc_size = sizeof(gscan_results_cache_t) + + ((results->count - 1) * sizeof(wifi_gscan_result_t)); + gscan_hotlist_cache = (gscan_results_cache_t *) kmalloc(malloc_size, GFP_KERNEL); + + if (!gscan_hotlist_cache) { + DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size)); + *send_evt_bytes = 0; + return ptr; + } + + if (type == HOTLIST_FOUND) { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = gscan_hotlist_cache; + DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, results->count)); + } else { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = gscan_hotlist_cache; + DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, results->count)); + } + + gscan_hotlist_cache->tot_count = results->count; + gscan_hotlist_cache->tot_consumed = 0; + plnetinfo = results->netinfo; + + for (i = 0; i < results->count; i++, plnetinfo++) { + hotlist_found_array = &gscan_hotlist_cache->results[i]; + hotlist_found_array->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel, + (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + hotlist_found_array->rssi = (int32) plnetinfo->RSSI; + /* Info not available & not expected */ + hotlist_found_array->beacon_period = 0; + hotlist_found_array->capability = 0; + hotlist_found_array->ie_length = 0; + + hotlist_found_array->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp); + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", + plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.SSID, + plnetinfo->pfnsubnet.SSID_len); + hotlist_found_array->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; + + memcpy(&hotlist_found_array->macaddr, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid, + hotlist_found_array->macaddr.octet[0], + hotlist_found_array->macaddr.octet[1], + hotlist_found_array->macaddr.octet[2], + hotlist_found_array->macaddr.octet[3], + hotlist_found_array->macaddr.octet[4], + hotlist_found_array->macaddr.octet[5], + hotlist_found_array->rssi)); + } + + + if (results->status == PFN_COMPLETE) { + ptr = (void *) gscan_hotlist_cache; + while (gscan_hotlist_cache) { + total += gscan_hotlist_cache->tot_count; + gscan_hotlist_cache = gscan_hotlist_cache->next; + } + *send_evt_bytes = total * sizeof(wifi_gscan_result_t); + } + + return ptr; +} +#endif /* GSCAN_SUPPORT */ +int +dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int err = BCME_OK; + uint status, event_type, flags, datalen; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + datalen = ntoh32(event->datalen); + DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type)); + switch (event_type) { + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BSSID_NET_LOST: + /* TODO : need to implement event logic using generic netlink */ + break; + case WLC_E_PFN_BEST_BATCHING: +#ifndef GSCAN_SUPPORT + { + struct dhd_pno_batch_params *params_batch; + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (!waitqueue_active(&_pno_state->get_batch_done.wait)) { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__)); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + schedule_work(&_pno_state->work); + } else + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING" + "will skip this event\n", __FUNCTION__)); + break; + } +#else + break; +#endif /* !GSCAN_SUPPORT */ + default: + DHD_ERROR(("unknown event : %d\n", event_type)); + } +exit: + return err; +} + +int dhd_pno_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + UNUSED_PARAMETER(_dhd_pno_suspend); + if (dhd->pno_state) + goto exit; + dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t)); + NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err); + memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t)); + /* need to check whether current firmware support batching and hotlist scan */ + _pno_state = PNO_GET_PNOSTATE(dhd); + _pno_state->wls_supported = TRUE; + _pno_state->dhd = dhd; + mutex_init(&_pno_state->pno_mutex); + INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler); + init_completion(&_pno_state->get_batch_done); +#ifdef GSCAN_SUPPORT + init_waitqueue_head(&_pno_state->batch_get_wait); +#endif /* GSCAN_SUPPORT */ + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0); + if (err == BCME_UNSUPPORTED) { + _pno_state->wls_supported = FALSE; + DHD_INFO(("Current firmware doesn't support" + " Android Location Service\n")); + } else { + DHD_ERROR(("%s: Support Android Location Service\n", + __FUNCTION__)); + } +exit: + return err; +} + +int dhd_pno_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + NULL_CHECK(_pno_state, "pno_state is NULL", err); + /* may need to free legacy ssid_list */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(_params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + /* clear resource if the BATCH MODE is on */ + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + } + cancel_work_sync(&_pno_state->work); + MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t)); + dhd->pno_state = NULL; + return err; +} +#endif /* PNO_SUPPORT */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.h b/drivers/net/wireless/bcmdhd/dhd_pno.h new file mode 100644 index 000000000000..990ec6ce4ad1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pno.h @@ -0,0 +1,498 @@ +/* + * Header file of Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pno.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef __DHD_PNO_H__ +#define __DHD_PNO_H__ + +#if defined(PNO_SUPPORT) +#define PNO_TLV_PREFIX 'S' +#define PNO_TLV_VERSION '1' +#define PNO_TLV_SUBTYPE_LEGACY_PNO '2' +#define PNO_TLV_RESERVED '0' +#define PNO_BATCHING_SET "SET" +#define PNO_BATCHING_GET "GET" +#define PNO_BATCHING_STOP "STOP" +#define PNO_PARAMS_DELIMETER " " +#define PNO_PARAM_CHANNEL_DELIMETER "," +#define PNO_PARAM_VALUE_DELLIMETER '=' +#define PNO_PARAM_SCANFREQ "SCANFREQ" +#define PNO_PARAM_BESTN "BESTN" +#define PNO_PARAM_MSCAN "MSCAN" +#define PNO_PARAM_CHANNEL "CHANNEL" +#define PNO_PARAM_RTT "RTT" + +#define PNO_TLV_TYPE_SSID_IE 'S' +#define PNO_TLV_TYPE_TIME 'T' +#define PNO_TLV_FREQ_REPEAT 'R' +#define PNO_TLV_FREQ_EXPO_MAX 'M' + +#define MAXNUM_SSID_PER_ADD 16 +#define MAXNUM_PNO_PARAMS 2 +#define PNO_TLV_COMMON_LENGTH 1 +#define DEFAULT_BATCH_MSCAN 16 + +#define RESULTS_END_MARKER "----\n" +#define SCAN_END_MARKER "####\n" +#define AP_END_MARKER "====\n" +#define PNO_RSSI_MARGIN_DBM 30 + +#ifdef GSCAN_SUPPORT + +#define GSCAN_MAX_CH_BUCKETS 8 +#define GSCAN_BG_BAND_MASK (1 << 0) +#define GSCAN_A_BAND_MASK (1 << 1) +#define GSCAN_DFS_MASK (1 << 2) +#define GSCAN_ABG_BAND_MASK (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK) +#define GSCAN_BAND_MASK (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK) + +#define GSCAN_FLUSH_HOTLIST_CFG (1 << 0) +#define GSCAN_FLUSH_SIGNIFICANT_CFG (1 << 1) +#define GSCAN_FLUSH_SCAN_CFG (1 << 2) +#define GSCAN_FLUSH_ALL_CFG (GSCAN_FLUSH_SCAN_CFG | \ + GSCAN_FLUSH_SIGNIFICANT_CFG | \ + GSCAN_FLUSH_HOTLIST_CFG) +/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */ +#define GSCAN_BATCH_RETRIEVAL_COMPLETE 0 +#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS 1 +#define GSCAN_BATCH_NO_THR_SET 101 +#define GSCAN_LOST_AP_WINDOW_DEFAULT 4 +#define GSCAN_MIN_BSSID_TIMEOUT 90 +#define GSCAN_BATCH_GET_MAX_WAIT 500 +#define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF +#define GSCAN_RETRY_THRESHOLD 3 +#endif /* GSCAN_SUPPORT */ + +enum scan_status { + /* SCAN ABORT by other scan */ + PNO_STATUS_ABORT, + /* RTT is presence or not */ + PNO_STATUS_RTT_PRESENCE, + /* Disable PNO by Driver */ + PNO_STATUS_DISABLE, + /* NORMAL BATCHING GET */ + PNO_STATUS_NORMAL, + /* WLC_E_PFN_BEST_BATCHING */ + PNO_STATUS_EVENT, + PNO_STATUS_MAX +}; +#define PNO_STATUS_ABORT_MASK 0x0001 +#define PNO_STATUS_RTT_MASK 0x0002 +#define PNO_STATUS_DISABLE_MASK 0x0004 +#define PNO_STATUS_OOM_MASK 0x0010 + +enum index_mode { + INDEX_OF_LEGACY_PARAMS, + INDEX_OF_BATCH_PARAMS, + INDEX_OF_HOTLIST_PARAMS, + /* GSCAN includes hotlist scan and they do not run + * independent of each other + */ +#ifdef GSCAN_SUPPORT + INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS, +#endif /* GSCAN_SUPPORT */ + INDEX_MODE_MAX +}; +enum dhd_pno_status { + DHD_PNO_DISABLED, + DHD_PNO_ENABLED, + DHD_PNO_SUSPEND +}; +typedef struct cmd_tlv { + char prefix; + char version; + char subtype; + char reserved; +} cmd_tlv_t; +#ifdef GSCAN_SUPPORT +typedef enum { + WIFI_BAND_UNSPECIFIED, + WIFI_BAND_BG = 1, /* 2.4 GHz */ + WIFI_BAND_A = 2, /* 5 GHz without DFS */ + WIFI_BAND_A_DFS = 4, /* 5 GHz DFS only */ + WIFI_BAND_A_WITH_DFS = 6, /* 5 GHz with DFS */ + WIFI_BAND_ABG = 3, /* 2.4 GHz + 5 GHz; no DFS */ + WIFI_BAND_ABG_WITH_DFS = 7, /* 2.4 GHz + 5 GHz with DFS */ +} gscan_wifi_band_t; + +typedef enum { + HOTLIST_LOST, + HOTLIST_FOUND +} hotlist_type_t; + +typedef enum dhd_pno_gscan_cmd_cfg { + DHD_PNO_BATCH_SCAN_CFG_ID, + DHD_PNO_GEOFENCE_SCAN_CFG_ID, + DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, + DHD_PNO_SCAN_CFG_ID, + DHD_PNO_GET_CAPABILITIES, + DHD_PNO_GET_BATCH_RESULTS, + DHD_PNO_GET_CHANNEL_LIST +} dhd_pno_gscan_cmd_cfg_t; + +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)), + /* Wi-Fi Google Android SCAN Mode */ + DHD_PNO_GSCAN_MODE = (1 << (3)) +} dhd_pno_mode_t; +#else +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)) +} dhd_pno_mode_t; +#endif /* GSCAN_SUPPORT */ +struct dhd_pno_ssid { + bool hidden; + uint32 SSID_len; + uchar SSID[DOT11_MAX_SSID_LEN]; + struct list_head list; +}; +struct dhd_pno_bssid { + struct ether_addr macaddr; + /* Bit4: suppress_lost, Bit3: suppress_found */ + uint16 flags; + struct list_head list; +}; +typedef struct dhd_pno_bestnet_entry { + struct ether_addr BSSID; + uint8 SSID_len; + uint8 SSID[DOT11_MAX_SSID_LEN]; + int8 RSSI; + uint8 channel; + uint32 timestamp; + uint16 rtt0; /* distance_cm based on RTT */ + uint16 rtt1; /* distance_cm based on sample standard deviation */ + unsigned long recorded_time; + struct list_head list; +} dhd_pno_bestnet_entry_t; +#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t)) + +typedef struct dhd_pno_bestnet_header { + struct dhd_pno_bestnet_header *next; + uint8 reason; + uint32 tot_cnt; + uint32 tot_size; + struct list_head entry_list; +} dhd_pno_best_header_t; +#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t)) + +typedef struct dhd_pno_scan_results { + dhd_pno_best_header_t *bestnetheader; + uint8 cnt_header; + struct list_head list; +} dhd_pno_scan_results_t; +#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t)) + +struct dhd_pno_get_batch_info { + /* info related to get batch */ + char *buf; + bool batch_started; + uint32 tot_scan_cnt; + uint32 expired_tot_scan_cnt; + uint32 top_node_cnt; + uint32 bufsize; + uint32 bytes_written; + int reason; + struct list_head scan_results_list; + struct list_head expired_scan_results_list; +}; +struct dhd_pno_legacy_params { + uint16 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + int pno_repeat; + int pno_freq_expo_max; + int nssid; + struct list_head ssid_list; +}; +struct dhd_pno_batch_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 band; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 rtt; + struct dhd_pno_get_batch_info get_batch; +}; +struct dhd_pno_hotlist_params { + uint8 band; + int32 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 nbssid; + struct list_head bssid_list; +}; +#ifdef GSCAN_SUPPORT +typedef struct dhd_pno_gscan_channel_bucket { + uint16 bucket_freq_multiple; + /* band = 1 All bg band channels, + * band = 2 All a band channels, + * band = 0 chan_list channels + */ + uint16 band; + uint8 report_flag; + uint8 num_channels; + uint16 chan_list[GSCAN_MAX_CH_BUCKETS]; +} dhd_pno_gscan_channel_bucket_t; + +typedef struct dhd_pno_swc_evt_param { + uint16 results_rxed_so_far; + wl_pfn_significant_net_t *change_array; +} dhd_pno_swc_evt_param_t; + +typedef struct wifi_gscan_result { + uint64 ts; /* Time of discovery */ + char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */ + struct ether_addr macaddr; /* BSSID */ + uint32 channel; /* channel frequency in MHz */ + int32 rssi; /* in db */ + uint64 rtt; /* in nanoseconds */ + uint64 rtt_sd; /* standard deviation in rtt */ + uint16 beacon_period; /* units are Kusec */ + uint16 capability; /* Capability information */ + uint32 ie_length; /* byte length of Information Elements */ + char ie_data[1]; /* IE data to follow */ +} wifi_gscan_result_t; + +typedef struct gscan_results_cache { + struct gscan_results_cache *next; + uint8 scan_id; + uint8 flag; + uint8 tot_count; + uint8 tot_consumed; + wifi_gscan_result_t results[1]; +} gscan_results_cache_t; + +typedef struct dhd_pno_gscan_capabilities { + int max_scan_cache_size; + int max_scan_buckets; + int max_ap_cache_per_scan; + int max_rssi_sample_size; + int max_scan_reporting_threshold; + int max_hotlist_aps; + int max_significant_wifi_change_aps; +} dhd_pno_gscan_capabilities_t; + +struct dhd_pno_gscan_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; + uint8 swc_nbssid_threshold; + uint8 swc_rssi_window_size; + uint8 lost_ap_window; + uint8 nchannel_buckets; + uint8 reason; + uint8 get_batch_flag; + uint8 send_all_results_flag; + uint16 max_ch_bucket_freq; + gscan_results_cache_t *gscan_batch_cache; + gscan_results_cache_t *gscan_hotlist_found; + gscan_results_cache_t *gscan_hotlist_lost; + uint16 nbssid_significant_change; + uint16 nbssid_hotlist; + struct dhd_pno_swc_evt_param param_significant; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; + struct list_head hotlist_bssid_list; + struct list_head significant_bssid_list; +}; + +typedef struct gscan_scan_params { + int32 scan_fr; + uint16 nchannel_buckets; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; +} gscan_scan_params_t; + +typedef struct gscan_batch_params { + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; +} gscan_batch_params_t; + +struct bssid_t { + struct ether_addr macaddr; + int16 rssi_reporting_threshold; /* 0 -> no reporting threshold */ +}; + +typedef struct gscan_hotlist_scan_params { + uint16 lost_ap_window; /* number of scans to declare LOST */ + uint16 nbssid; /* number of bssids */ + struct bssid_t bssid[1]; /* n bssids to follow */ +} gscan_hotlist_scan_params_t; + +/* SWC (Significant WiFi Change) params */ +typedef struct gscan_swc_params { + /* Rssi averaging window size */ + uint8 rssi_window; + /* Number of scans that the AP has to be absent before + * being declared LOST + */ + uint8 lost_ap_window; + /* if x Aps have a significant change generate an event. */ + uint8 swc_threshold; + uint8 nbssid; + wl_pfn_significant_bssid_t bssid_elem_list[1]; +} gscan_swc_params_t; + +typedef struct dhd_pno_significant_bssid { + struct ether_addr BSSID; + int8 rssi_low_threshold; + int8 rssi_high_threshold; + struct list_head list; +} dhd_pno_significant_bssid_t; +#endif /* GSCAN_SUPPORT */ +typedef union dhd_pno_params { + struct dhd_pno_legacy_params params_legacy; + struct dhd_pno_batch_params params_batch; + struct dhd_pno_hotlist_params params_hotlist; +#ifdef GSCAN_SUPPORT + struct dhd_pno_gscan_params params_gscan; +#endif /* GSCAN_SUPPORT */ +} dhd_pno_params_t; +typedef struct dhd_pno_status_info { + uint8 pno_oui[DOT11_OUI_LEN]; + dhd_pub_t *dhd; + struct work_struct work; + struct mutex pno_mutex; +#ifdef GSCAN_SUPPORT + wait_queue_head_t batch_get_wait; +#endif /* GSCAN_SUPPORT */ + struct completion get_batch_done; + bool wls_supported; /* wifi location service supported or not */ + enum dhd_pno_status pno_status; + enum dhd_pno_mode pno_mode; + dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX]; + struct list_head head_list; +} dhd_pno_status_info_t; + +/* wrapper functions */ +extern int +dhd_dev_pno_enable(struct net_device *dev, int enable); + +extern int +dhd_dev_pno_stop_for_ssid(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int +dhd_dev_pno_set_for_batch(struct net_device *dev, + struct dhd_pno_batch_params *batch_params); + +extern int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize); + +extern int +dhd_dev_pno_stop_for_batch(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); +extern int dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui); +#ifdef GSCAN_SUPPORT +extern int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush); +extern void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +void dhd_dev_pno_lock_access_batch_results(struct net_device *dev); +void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev); +extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush); +extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time); +extern void * dhd_dev_swc_scan_event(struct net_device *dev, const void *data, + int *send_evt_bytes); +int dhd_retreive_batch_scan_results(dhd_pub_t *dhd); +extern void * dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type); +void * dhd_dev_process_full_gscan_result(struct net_device *dev, + const void *data, int *send_evt_bytes); +extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev); +extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type); +extern void dhd_dev_wait_batch_results_complete(struct net_device *dev); +#endif /* GSCAN_SUPPORT */ +/* dhd pno fuctions */ +extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd); +extern int dhd_pno_enable(dhd_pub_t *dhd, int enable); +extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params); + +extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason); + + +extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd); + +extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); + +extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd); + +extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); +extern int dhd_pno_init(dhd_pub_t *dhd); +extern int dhd_pno_deinit(dhd_pub_t *dhd); +extern bool dhd_is_pno_supported(dhd_pub_t *dhd); +extern int dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui); +#ifdef GSCAN_SUPPORT +extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush); +extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +extern void dhd_pno_lock_batch_results(dhd_pub_t *dhd); +extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd); +extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush); +extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag); +extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf); +extern int dhd_dev_retrieve_batch_scan(struct net_device *dev); +extern void *dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes); +extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type); +extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes); +extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd); +extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type); +extern void dhd_wait_batch_results_complete(dhd_pub_t *dhd); +#endif /* GSCAN_SUPPORT */ +#endif + +#endif /* __DHD_PNO_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h new file mode 100644 index 000000000000..6dcb56328140 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_proto.h @@ -0,0 +1,169 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_proto.h 604483 2015-12-07 14:47:36Z $ + */ + +#ifndef _dhd_proto_h_ +#define _dhd_proto_h_ + +#include +#include +#ifdef BCMPCIE +#include +#endif + +#define DEFAULT_IOCTL_RESP_TIMEOUT 2000 +#ifndef IOCTL_RESP_TIMEOUT +/* In milli second default value for Production FW */ +#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT +#endif /* IOCTL_RESP_TIMEOUT */ + +#ifndef MFG_IOCTL_RESP_TIMEOUT +#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */ +#endif /* MFG_IOCTL_RESP_TIMEOUT */ + +#define DEFAULT_D3_ACK_RESP_TIMEOUT 4000 +#ifndef D3_ACK_RESP_TIMEOUT +#define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT +#endif /* D3_ACK_RESP_TIMEOUT */ + +#define DEFAULT_DHD_BUS_BUSY_TIMEOUT (IOCTL_RESP_TIMEOUT + 1000) +#ifndef DHD_BUS_BUSY_TIMEOUT +#define DHD_BUS_BUSY_TIMEOUT DEFAULT_DHD_BUS_BUSY_TIMEOUT +#endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */ + +#define IOCTL_DISABLE_TIMEOUT 0 +/* + * Exported from the dhd protocol module (dhd_cdc, dhd_rndis) + */ + +/* Linkage, sets prot link and updates hdrlen in pub */ +extern int dhd_prot_attach(dhd_pub_t *dhdp); + +/* Initilizes the index block for dma'ing indices */ +extern int dhd_prot_dma_indx_init(dhd_pub_t *dhdp, uint32 rw_index_sz, + uint8 type, uint32 length); + +/* Unlink, frees allocated protocol memory (including dhd_prot) */ +extern void dhd_prot_detach(dhd_pub_t *dhdp); + +/* Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +extern int dhd_sync_with_dongle(dhd_pub_t *dhdp); + +/* Protocol initialization needed for IOCTL/IOVAR path */ +extern int dhd_prot_init(dhd_pub_t *dhd); + +/* Stop protocol: sync w/dongle state. */ +extern void dhd_prot_stop(dhd_pub_t *dhdp); + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp); +extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp); + +/* Remove any protocol-specific data header. */ +extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len); + +/* Use protocol to issue ioctl to dongle */ +extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len); + +/* Handles a protocol control response asynchronously */ +extern int dhd_prot_ctl_complete(dhd_pub_t *dhd); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add prot dump output to a buffer */ +extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Update local copy of dongle statistics */ +extern void dhd_prot_dstats(dhd_pub_t *dhdp); + +extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen); + +extern int dhd_preinit_ioctls(dhd_pub_t *dhd); + +extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count); + +#ifdef BCMPCIE +extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound); +extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound); +extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd); +extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd); +extern int dhd_post_dummy_msg(dhd_pub_t *dhd); +extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len); +extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset); +extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx); +extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay); + +extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma); +extern void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, + uint16 flowid, void *msgbuf_ring); +extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex); +extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b); +extern uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val); +extern uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd); +extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx); +extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx); +extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, + struct bcmstrbuf *strbuf, const char * fmt); +extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf); +extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info); +extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock); +extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val); +extern void dhd_prot_reset(dhd_pub_t *dhd); +#ifdef DHD_LB +extern void dhd_lb_tx_compl_handler(unsigned long data); +extern void dhd_lb_rx_compl_handler(unsigned long data); +extern void dhd_lb_rx_process_handler(unsigned long data); +#endif /* DHD_LB */ +void dhd_prot_collect_memdump(dhd_pub_t *dhd); +#endif /* BCMPCIE */ +/******************************** + * For version-string expansion * + */ +#if defined(BDC) +#define DHD_PROTOCOL "bdc" +#elif defined(CDC) +#define DHD_PROTOCOL "cdc" +#else +#define DHD_PROTOCOL "unknown" +#endif /* proto */ + +#endif /* _dhd_proto_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_rtt.c b/drivers/net/wireless/bcmdhd/dhd_rtt.c new file mode 100644 index 000000000000..cc0ebb2ecd2d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_rtt.c @@ -0,0 +1,731 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_rtt.c 606280 2015-12-15 05:28:25Z $ + */ +#ifdef RTT_SUPPORT +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state) +static DEFINE_SPINLOCK(noti_list_lock); +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) + +#define RTT_TWO_SIDED(capability) \ + do { \ + if ((capability & RTT_CAP_ONE_WAY) == (uint8) (RTT_CAP_ONE_WAY)) \ + return FALSE; \ + else \ + return TRUE; \ + } while (0) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) +struct rtt_noti_callback { + struct list_head list; + void *ctx; + dhd_rtt_compl_noti_fn noti_fn; +}; + +typedef struct rtt_status_info { + dhd_pub_t *dhd; + int8 status; /* current status for the current entry */ + int8 cur_idx; /* current entry to do RTT */ + int32 capability; /* rtt capability */ + struct mutex rtt_mutex; + rtt_config_params_t rtt_config; + struct work_struct work; + struct list_head noti_fn_list; + struct list_head rtt_results_cache; /* store results for RTT */ +} rtt_status_info_t; + +static int dhd_rtt_start(dhd_pub_t *dhd); + +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info_t channel) +{ + int bw; + /* set witdh to 20MHZ for 2.4G HZ */ + if (channel.center_freq >= 2400 && channel.center_freq <= 2500) { + channel.width = WIFI_CHAN_WIDTH_20; + } + switch (channel.width) { + case WIFI_CHAN_WIDTH_20: + bw = WL_CHANSPEC_BW_20; + break; + case WIFI_CHAN_WIDTH_40: + bw = WL_CHANSPEC_BW_40; + break; + case WIFI_CHAN_WIDTH_80: + bw = WL_CHANSPEC_BW_80; + break; + case WIFI_CHAN_WIDTH_160: + bw = WL_CHANSPEC_BW_160; + break; + default: + DHD_ERROR(("doesn't support this bandwith : %d", channel.width)); + bw = -1; + break; + } + return wf_channel2chspec(wf_mhz2channel(channel.center_freq, 0), bw); +} + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params) +{ + int err = BCME_OK; + int idx; + rtt_status_info_t *rtt_status; + NULL_CHECK(params, "params is NULL", err); + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->capability == RTT_CAP_NONE) { + DHD_ERROR(("doesn't support RTT \n")); + return BCME_ERROR; + } + if (rtt_status->status == RTT_STARTED) { + DHD_ERROR(("rtt is already started\n")); + return BCME_BUSY; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + bcopy(params, &rtt_status->rtt_config, sizeof(rtt_config_params_t)); + rtt_status->status = RTT_STARTED; + /* start to measure RTT from 1th device */ + /* find next target to trigger RTT */ + for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx)); + schedule_work(&rtt_status->work); + } + return err; +} + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) +{ + int err = BCME_OK; + int i = 0, j = 0; + rtt_status_info_t *rtt_status; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->status == RTT_STOPPED) { + DHD_ERROR(("rtt is not started\n")); + return BCME_OK; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + mutex_lock(&rtt_status->rtt_mutex); + for (i = 0; i < mac_cnt; i++) { + for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) { + if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr, + ETHER_ADDR_LEN)) { + rtt_status->rtt_config.target_info[j].disable = TRUE; + } + } + } + mutex_unlock(&rtt_status->rtt_mutex); + return err; +} + +static int +dhd_rtt_start(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mpc = 0; + int nss, mcs, bw; + uint32 rspec = 0; + int8 eabuf[ETHER_ADDR_STR_LEN]; + int8 chanbuf[CHANSPEC_STR_LEN]; + bool set_mpc = FALSE; + wl_proxd_iovar_t proxd_iovar; + wl_proxd_params_iovar_t proxd_params; + wl_proxd_params_iovar_t proxd_tune; + wl_proxd_params_tof_method_t *tof_params = &proxd_params.u.tof_params; + rtt_status_info_t *rtt_status; + rtt_target_info_t *rtt_target; + NULL_CHECK(dhd, "dhd is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + /* turn off mpc in case of non-associted */ + if (!dhd_is_associated(dhd, 0, NULL)) { + err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_tune\n", __FUNCTION__)); + goto exit; + } + set_mpc = TRUE; + } + + if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) { + err = BCME_RANGE; + goto exit; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + bzero(&proxd_tune, sizeof(proxd_tune)); + bzero(&proxd_params, sizeof(proxd_params)); + mutex_lock(&rtt_status->rtt_mutex); + /* Get a target information */ + rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + mutex_unlock(&rtt_status->rtt_mutex); + /* set role */ + proxd_iovar.method = PROXD_TOF_METHOD; + proxd_iovar.mode = WL_PROXD_MODE_INITIATOR; + + /* make sure that proxd is stop */ + /* dhd_iovar(dhd, 0, "proxd_stop", (char *)NULL, 0, 1); */ + + err = dhd_iovar(dhd, 0, "proxd", (char *)&proxd_iovar, sizeof(proxd_iovar), 1); + if (err < 0 && err != BCME_BUSY) { + DHD_ERROR(("%s : failed to set proxd %d\n", __FUNCTION__, err)); + goto exit; + } + if (err == BCME_BUSY) { + DHD_RTT(("BCME_BUSY occurred\n")); + } + /* mac address */ + bcopy(&rtt_target->addr, &tof_params->tgt_mac, ETHER_ADDR_LEN); + /* frame count */ + if (rtt_target->ftm_cnt > RTT_MAX_FRAME_CNT) { + rtt_target->ftm_cnt = RTT_MAX_FRAME_CNT; + } + + if (rtt_target->ftm_cnt) { + tof_params->ftm_cnt = htol16(rtt_target->ftm_cnt); + } else { + tof_params->ftm_cnt = htol16(DEFAULT_FTM_CNT); + } + + if (rtt_target->retry_cnt > RTT_MAX_RETRY_CNT) { + rtt_target->retry_cnt = RTT_MAX_RETRY_CNT; + } + + /* retry count */ + if (rtt_target->retry_cnt) { + tof_params->retry_cnt = htol16(rtt_target->retry_cnt); + } else { + tof_params->retry_cnt = htol16(DEFAULT_RETRY_CNT); + } + + /* chanspec */ + tof_params->chanspec = htol16(rtt_target->chanspec); + /* set parameter */ + DHD_RTT(("Target addr(Idx %d) %s, Channel : %s for RTT (ftm_cnt %d, rety_cnt : %d)\n", + rtt_status->cur_idx, + bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, eabuf), + wf_chspec_ntoa(rtt_target->chanspec, chanbuf), rtt_target->ftm_cnt, + rtt_target->retry_cnt)); + + if (rtt_target->type == RTT_ONE_WAY) { + proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_ONEWAY); + /* report RTT results for initiator */ + proxd_tune.u.tof_tune.flags |= htol32(WL_PROXD_FLAG_INITIATOR_RPTRTT); + proxd_tune.u.tof_tune.vhtack = 0; + tof_params->tx_rate = htol16(WL_RATE_6M); + tof_params->vht_rate = htol16((WL_RATE_6M >> 16)); + } else { /* RTT TWO WAY */ + /* initiator will send the rtt result to the target */ + proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_INITIATOR_REPORT); + tof_params->timeout = 10; /* 10ms for timeout */ + rspec = WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ + nss = 1; /* default Nss = 1 */ + mcs = 0; /* default MCS 0 */ + rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; + bw = 0; + switch (CHSPEC_BW(rtt_target->chanspec)) { + case WL_CHANSPEC_BW_20: + bw = WL_RSPEC_BW_20MHZ; + break; + case WL_CHANSPEC_BW_40: + bw = WL_RSPEC_BW_40MHZ; + break; + case WL_CHANSPEC_BW_80: + bw = WL_RSPEC_BW_80MHZ; + break; + case WL_CHANSPEC_BW_160: + bw = WL_RSPEC_BW_160MHZ; + break; + default: + DHD_ERROR(("CHSPEC_BW not supported : %d", + CHSPEC_BW(rtt_target->chanspec))); + goto exit; + } + rspec |= bw; + tof_params->tx_rate = htol16(rspec & 0xffff); + tof_params->vht_rate = htol16(rspec >> 16); + } + + /* Set Method to TOF */ + proxd_tune.method = PROXD_TOF_METHOD; + err = dhd_iovar(dhd, 0, "proxd_tune", (char *)&proxd_tune, sizeof(proxd_tune), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_tune %d\n", __FUNCTION__, err)); + goto exit; + } + + /* Set Method to TOF */ + proxd_params.method = PROXD_TOF_METHOD; + err = dhd_iovar(dhd, 0, "proxd_params", (char *)&proxd_params, sizeof(proxd_params), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_params %d\n", __FUNCTION__, err)); + goto exit; + } + err = dhd_iovar(dhd, 0, "proxd_find", (char *)NULL, 0, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_find %d\n", __FUNCTION__, err)); + goto exit; + } +exit: + if (err < 0) { + rtt_status->status = RTT_STOPPED; + if (set_mpc) { + /* enable mpc again in case of error */ + mpc = 1; + err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1); + } + } + return err; +} + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + if (iter->noti_fn == noti_fn) { + goto exit; + } + } + cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC); + if (!cb) { + err = -ENOMEM; + goto exit; + } + cb->noti_fn = noti_fn; + cb->ctx = ctx; + list_add(&cb->list, &rtt_status->noti_fn_list); +exit: + spin_unlock_bh(¬i_list_lock); + return err; +} + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + if (iter->noti_fn == noti_fn) { + cb = iter; + list_del(&cb->list); + break; + } + } + spin_unlock_bh(¬i_list_lock); + if (cb) { + kfree(cb); + } + return err; +} + +static int +dhd_rtt_convert_to_host(rtt_result_t *rtt_results, const wl_proxd_event_data_t* evp) +{ + int err = BCME_OK; + int i; + char eabuf[ETHER_ADDR_STR_LEN]; + char diststr[40]; + struct timespec ts; + NULL_CHECK(rtt_results, "rtt_results is NULL", err); + NULL_CHECK(evp, "evp is NULL", err); + DHD_RTT(("%s enter\n", __FUNCTION__)); + rtt_results->distance = ntoh32(evp->distance); + rtt_results->sdrtt = ntoh32(evp->sdrtt); + rtt_results->ftm_cnt = ntoh16(evp->ftm_cnt); + rtt_results->avg_rssi = ntoh16(evp->avg_rssi); + rtt_results->validfrmcnt = ntoh16(evp->validfrmcnt); + rtt_results->meanrtt = ntoh32(evp->meanrtt); + rtt_results->modertt = ntoh32(evp->modertt); + rtt_results->medianrtt = ntoh32(evp->medianrtt); + rtt_results->err_code = evp->err_code; + rtt_results->tx_rate.preamble = (evp->OFDM_frame_type == TOF_FRAME_RATE_VHT)? 3 : 0; + rtt_results->tx_rate.nss = 0; /* 1 x 1 */ + rtt_results->tx_rate.bw = + (evp->bandwidth == TOF_BW_80MHZ)? 2 : (evp->bandwidth == TOF_BW_40MHZ)? 1 : 0; + rtt_results->TOF_type = evp->TOF_type; + if (evp->TOF_type == TOF_TYPE_ONE_WAY) { + /* convert to 100kbps unit */ + rtt_results->tx_rate.bitrate = WL_RATE_6M * 5; + rtt_results->tx_rate.rateMcsIdx = WL_RATE_6M; + } else { + rtt_results->tx_rate.bitrate = WL_RATE_6M * 5; + rtt_results->tx_rate.rateMcsIdx = 0; /* MCS 0 */ + } + memset(diststr, 0, sizeof(diststr)); + if (rtt_results->distance == 0xffffffff || rtt_results->distance == 0) { + sprintf(diststr, "distance=-1m\n"); + } else { + sprintf(diststr, "distance=%d.%d m\n", + rtt_results->distance >> 4, ((rtt_results->distance & 0xf) * 125) >> 1); + } + + if (ntoh32(evp->mode) == WL_PROXD_MODE_INITIATOR) { + DHD_RTT(("Target:(%s) %s;\n", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr)); + DHD_RTT(("RTT : mean %d mode %d median %d\n", rtt_results->meanrtt, + rtt_results->modertt, rtt_results->medianrtt)); + } else { + DHD_RTT(("Initiator:(%s) %s; ", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr)); + } + if (rtt_results->sdrtt > 0) { + DHD_RTT(("sigma:%d.%d\n", rtt_results->sdrtt/10, rtt_results->sdrtt % 10)); + } else { + DHD_RTT(("sigma:0\n")); + } + + DHD_RTT(("rssi:%d validfrmcnt %d, err_code : %d\n", rtt_results->avg_rssi, + rtt_results->validfrmcnt, evp->err_code)); + + switch (evp->err_code) { + case TOF_REASON_OK: + rtt_results->err_code = RTT_REASON_SUCCESS; + break; + case TOF_REASON_TIMEOUT: + rtt_results->err_code = RTT_REASON_TIMEOUT; + break; + case TOF_REASON_NOACK: + rtt_results->err_code = RTT_REASON_NO_RSP; + break; + case TOF_REASON_ABORT: + rtt_results->err_code = RTT_REASON_ABORT; + break; + default: + rtt_results->err_code = RTT_REASON_FAILURE; + break; + } + rtt_results->peer_mac = evp->peer_mac; + /* get the time elapsed from boot time */ + get_monotonic_boottime(&ts); + rtt_results->ts = (uint64) TIMESPEC_TO_US(ts); + + for (i = 0; i < rtt_results->ftm_cnt; i++) { + rtt_results->ftm_buff[i].value = ltoh32(evp->ftm_buff[i].value); + rtt_results->ftm_buff[i].rssi = ltoh32(evp->ftm_buff[i].rssi); + } + return err; +} + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int err = BCME_OK; + int len = 0; + int idx; + uint status, event_type, flags, reason, ftm_cnt; + rtt_status_info_t *rtt_status; + wl_proxd_event_data_t* evp; + struct rtt_noti_callback *iter; + rtt_result_t *rtt_result, *entry, *next; + gfp_t kflags; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + event_type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + reason = ntoh32_ua((void *)&event->reason); + + if (event_type != WLC_E_PROXD) { + goto exit; + } + kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL; + evp = (wl_proxd_event_data_t*)event_data; + DHD_RTT(("%s enter : mode: %s, reason :%d \n", __FUNCTION__, + (ntoh16(evp->mode) == WL_PROXD_MODE_INITIATOR)? + "initiator":"target", reason)); + switch (reason) { + case WLC_E_PROXD_STOP: + DHD_RTT(("WLC_E_PROXD_STOP\n")); + break; + case WLC_E_PROXD_ERROR: + case WLC_E_PROXD_COMPLETED: + if (reason == WLC_E_PROXD_ERROR) { + DHD_RTT(("WLC_E_PROXD_ERROR\n")); + } else { + DHD_RTT(("WLC_E_PROXD_COMPLETED\n")); + } + + if (!in_atomic()) { + mutex_lock(&rtt_status->rtt_mutex); + } + ftm_cnt = ntoh16(evp->ftm_cnt); + + if (ftm_cnt > 0) { + len = OFFSETOF(rtt_result_t, ftm_buff); + } else { + len = sizeof(rtt_result_t); + } + /* check whether the results is already reported or not */ + list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { + if (!memcmp(&entry->peer_mac, &evp->peer_mac, ETHER_ADDR_LEN)) { + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + goto exit; + } + } + rtt_result = kzalloc(len + sizeof(ftm_sample_t) * ftm_cnt, kflags); + if (!rtt_result) { + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + err = -ENOMEM; + goto exit; + } + /* point to target_info in status struct and increase pointer */ + rtt_result->target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + /* find next target to trigger RTT */ + for (idx = (rtt_status->cur_idx + 1); + idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + /* convert the event results to host format */ + dhd_rtt_convert_to_host(rtt_result, evp); + list_add_tail(&rtt_result->list, &rtt_status->rtt_results_cache); + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + /* restart to measure RTT from next device */ + schedule_work(&rtt_status->work); + } else { + DHD_RTT(("RTT_STOPPED\n")); + rtt_status->status = RTT_STOPPED; + /* to turn on mpc mode */ + schedule_work(&rtt_status->work); + /* notify the completed information to others */ + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } + /* remove the rtt results in cache */ + list_for_each_entry_safe(rtt_result, next, + &rtt_status->rtt_results_cache, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + /* reinit the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + bzero(&rtt_status->rtt_config, sizeof(rtt_status->rtt_config)); + rtt_status->cur_idx = 0; + } + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + + break; + case WLC_E_PROXD_GONE: + DHD_RTT(("WLC_E_PROXD_GONE\n")); + break; + case WLC_E_PROXD_START: + /* event for targets / accesspoints */ + DHD_RTT(("WLC_E_PROXD_START\n")); + break; + case WLC_E_PROXD_COLLECT_START: + DHD_RTT(("WLC_E_PROXD_COLLECT_START\n")); + break; + case WLC_E_PROXD_COLLECT_STOP: + DHD_RTT(("WLC_E_PROXD_COLLECT_STOP\n")); + break; + case WLC_E_PROXD_COLLECT_COMPLETED: + DHD_RTT(("WLC_E_PROXD_COLLECT_COMPLETED\n")); + break; + case WLC_E_PROXD_COLLECT_ERROR: + DHD_RTT(("WLC_E_PROXD_COLLECT_ERROR; ")); + break; + default: + DHD_ERROR(("WLC_E_PROXD: supported EVENT reason code:%d\n", reason)); + break; + } + +exit: + return err; +} + +static void +dhd_rtt_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status; + dhd_pub_t *dhd; + rtt_status = container_of(work, rtt_status_info_t, work); + if (rtt_status == NULL) { + DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__)); + return; + } + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + (void) dhd_rtt_start(dhd); +} + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa) +{ + rtt_status_info_t *rtt_status; + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + NULL_CHECK(capa, "capa is NULL", err); + bzero(capa, sizeof(rtt_capabilities_t)); + + if (rtt_status->capability & RTT_CAP_ONE_WAY) { + capa->rtt_one_sided_supported = 1; + } + if (rtt_status->capability & RTT_CAP_11V_WAY) { + capa->rtt_11v_supported = 1; + } + if (rtt_status->capability & RTT_CAP_11MC_WAY) { + capa->rtt_ftm_supported = 1; + } + if (rtt_status->capability & RTT_CAP_VS_WAY) { + capa->rtt_vs_supported = 1; + } + + return err; +} + +int +dhd_rtt_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + if (dhd->rtt_state) { + goto exit; + } + dhd->rtt_state = MALLOC(dhd->osh, sizeof(rtt_status_info_t)); + if (dhd->rtt_state == NULL) { + DHD_ERROR(("failed to create rtt_state\n")); + goto exit; + } + bzero(dhd->rtt_state, sizeof(rtt_status_info_t)); + rtt_status = GET_RTTSTATE(dhd); + rtt_status->dhd = dhd; + err = dhd_iovar(dhd, 0, "proxd_params", NULL, 0, 1); + if (err != BCME_UNSUPPORTED) { + rtt_status->capability |= RTT_CAP_ONE_WAY; + rtt_status->capability |= RTT_CAP_VS_WAY; + DHD_ERROR(("%s: Support RTT Service\n", __FUNCTION__)); + } + mutex_init(&rtt_status->rtt_mutex); + INIT_LIST_HEAD(&rtt_status->noti_fn_list); + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + INIT_WORK(&rtt_status->work, dhd_rtt_work); +exit: + return err; +} + +int +dhd_rtt_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + rtt_result_t *rtt_result, *next; + struct rtt_noti_callback *iter, *iter2; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + rtt_status->status = RTT_STOPPED; + /* clear evt callback list */ + if (!list_empty(&rtt_status->noti_fn_list)) { + list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + /* remove the rtt results */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + list_for_each_entry_safe(rtt_result, next, &rtt_status->rtt_results_cache, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + } + MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t)); + dhd->rtt_state = NULL; + return err; +} +#endif /* RTT_SUPPORT */ diff --git a/drivers/net/wireless/bcmdhd/dhd_rtt.h b/drivers/net/wireless/bcmdhd/dhd_rtt.h new file mode 100644 index 000000000000..2fbb9c973cd3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_rtt.h @@ -0,0 +1,234 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_rtt.h 558438 2015-05-22 06:05:11Z $ + */ +#ifndef __DHD_RTT_H__ +#define __DHD_RTT_H__ + +#include "dngl_stats.h" + +#define RTT_MAX_TARGET_CNT 10 +#define RTT_MAX_FRAME_CNT 25 +#define RTT_MAX_RETRY_CNT 10 +#define DEFAULT_FTM_CNT 6 +#define DEFAULT_RETRY_CNT 6 + + +/* DSSS, CCK and 802.11n rates in [500kbps] units */ +#define WL_MAXRATE 108 /* in 500kbps units */ +#define WL_RATE_1M 2 /* in 500kbps units */ +#define WL_RATE_2M 4 /* in 500kbps units */ +#define WL_RATE_5M5 11 /* in 500kbps units */ +#define WL_RATE_11M 22 /* in 500kbps units */ +#define WL_RATE_6M 12 /* in 500kbps units */ +#define WL_RATE_9M 18 /* in 500kbps units */ +#define WL_RATE_12M 24 /* in 500kbps units */ +#define WL_RATE_18M 36 /* in 500kbps units */ +#define WL_RATE_24M 48 /* in 500kbps units */ +#define WL_RATE_36M 72 /* in 500kbps units */ +#define WL_RATE_48M 96 /* in 500kbps units */ +#define WL_RATE_54M 108 /* in 500kbps units */ + + +enum rtt_role { + RTT_INITIATOR = 0, + RTT_TARGET = 1 +}; +enum rtt_status { + RTT_STOPPED = 0, + RTT_STARTED = 1 +}; +typedef int64_t wifi_timestamp; /* In microseconds (us) */ +typedef int64_t wifi_timespan; +typedef int wifi_rssi; + +typedef enum { + RTT_INVALID, + RTT_ONE_WAY, + RTT_TWO_WAY, + RTT_AUTO +} rtt_type_t; + +typedef enum { + RTT_PEER_STA, + RTT_PEER_AP, + RTT_PEER_P2P, + RTT_PEER_NAN, + RTT_PEER_INVALID +} rtt_peer_type_t; + +typedef enum rtt_reason { + RTT_REASON_SUCCESS, + RTT_REASON_FAILURE, + RTT_REASON_NO_RSP, + RTT_REASON_REJECTED, + RTT_REASON_NOT_SCHEDULED_YET, + RTT_REASON_TIMEOUT, + RTT_REASON_AP_ON_DIFF_CH, + RTT_REASON_AP_NO_CAP, + RTT_REASON_ABORT +} rtt_reason_t; + +typedef enum rtt_capability { + RTT_CAP_NONE = 0, + RTT_CAP_ONE_WAY = (1 << (0)), + RTT_CAP_11V_WAY = (1 << (1)), /* IEEE802.11v */ + RTT_CAP_11MC_WAY = (1 << (2)), /* IEEE802.11mc */ + RTT_CAP_VS_WAY = (1 << (3)) /* BRCM vendor specific */ +} rtt_capability_t; + +typedef struct wifi_channel_info { + wifi_channel_width_t width; + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center freq (MHz) first segment */ + wifi_channel center_freq1; /* center freq (MHz) second segment valid for 80 + 80 */ +} wifi_channel_info_t; + +typedef struct wifi_rate { + uint32 preamble :3; /* 0: OFDM, 1: CCK, 2 : HT, 3: VHT, 4..7 reserved */ + uint32 nss :2; /* 0 : 1x1, 1: 2x2, 3: 3x3, 4: 4x4 */ + uint32 bw :3; /* 0: 20Mhz, 1: 40Mhz, 2: 80Mhz, 3: 160Mhz */ + /* OFDM/CCK rate code would be as per IEEE std in the unit of 0.5 mb + * HT/VHT it would be mcs index + */ + uint32 rateMcsIdx :8; + uint32 reserved :16; /* reserved */ + uint32 bitrate; /* unit of 100 Kbps */ +} wifi_rate_t; + +typedef struct rtt_target_info { + struct ether_addr addr; + rtt_type_t type; /* rtt_type */ + rtt_peer_type_t peer; /* peer type */ + wifi_channel_info_t channel; /* channel information */ + chanspec_t chanspec; /* chanspec for channel */ + int8 continuous; /* 0 = single shot or 1 = continous raging */ + bool disable; /* disable for RTT measurement */ + uint32 interval; /* interval of RTT measurement (unit ms) when continuous = true */ + uint32 measure_cnt; /* total number of RTT measurement when continuous */ + uint32 ftm_cnt; /* num of packets in each RTT measurement */ + uint32 retry_cnt; /* num of retries if sampling fails */ +} rtt_target_info_t; + +typedef struct rtt_result { + struct list_head list; + uint16 ver; /* version */ + rtt_target_info_t *target_info; /* target info */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + wifi_rate_t tx_rate; /* tx rate */ + struct ether_addr peer_mac; /* (e.g for tgt:initiator's */ + int32 distance; /* dst to tgt, units (meter * 16) */ + uint32 meanrtt; /* mean delta */ + uint32 modertt; /* Mode delta */ + uint32 medianrtt; /* median RTT */ + uint32 sdrtt; /* Standard deviation of RTT */ + int16 avg_rssi; /* avg rssi across the ftm frames */ + int16 validfrmcnt; /* Firmware's valid frame counts */ + wifi_timestamp ts; /* the time elapsed from boot time when driver get this result */ + uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */ + ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ +} rtt_result_t; + +typedef struct rtt_report { + struct ether_addr addr; + uint num_measurement; /* measurement number in case of continous raging */ + rtt_reason_t status; /* raging status */ + rtt_type_t type; /* rtt type */ + rtt_peer_type_t peer; /* peer type */ + wifi_channel_info_t channel; /* channel information */ + wifi_rssi rssi; /* avg rssi accroos the ftm frames */ + wifi_rssi rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */ + wifi_rate_t tx_rate; /* tx rate */ + wifi_timespan rtt; /* round trip time in nanoseconds */ + wifi_timespan rtt_sd; /* rtt standard deviation in nanoseconds */ + wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */ + int32 distance; /* distance in cm (optional) */ + int32 distance_sd; /* standard deviation in cm (optional) */ + int32 distance_spread; /* difference between max and min distance recorded (optional) */ + wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */ +} rtt_report_t; + +/* RTT Capabilities */ +typedef struct rtt_capabilities { + uint8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */ + uint8 rtt_11v_supported; /* if 11v rtt data collection is supported */ + uint8 rtt_ftm_supported; /* if ftm rtt data collection is supported */ + uint8 rtt_vs_supported; /* if vendor specific data collection supported */ +} rtt_capabilities_t; + +typedef struct rtt_config_params { + int8 rtt_target_cnt; + rtt_target_info_t target_info[RTT_MAX_TARGET_CNT]; +} rtt_config_params_t; + +typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data); +/* Linux wrapper to call common dhd_rtt_set_cfg */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf); + +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt); + +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, + dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa); + +/* export to upper layer */ +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info_t channel); + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params); + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt); + + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa); + +int +dhd_rtt_init(dhd_pub_t *dhd); + +int +dhd_rtt_deinit(dhd_pub_t *dhd); +#endif /* __DHD_RTT_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c new file mode 100644 index 000000000000..2a9830e3dc03 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c @@ -0,0 +1,8407 @@ +/* + * DHD Bus Module for SDIO + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_sdio.c 593728 2015-10-19 09:20:32Z $ + */ + +#include +#include +#include + +#ifdef BCMEMBEDIMAGE +#include BCMEMBEDIMAGE +#endif /* BCMEMBEDIMAGE */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef PROP_TXSTATUS +#include +#endif +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +bool dhd_mp_halting(dhd_pub_t *dhdp); +extern void bcmsdh_waitfor_iodrain(void *sdh); +extern void bcmsdh_reject_ioreqs(void *sdh, bool reject); +extern bool bcmsdh_fatal_error(void *sdh); + +#ifndef DHDSDIO_MEM_DUMP_FNAME +#define DHDSDIO_MEM_DUMP_FNAME "mem_dump" +#endif + +#define QLEN (1024) /* bulk rx and tx queue lengths */ +#define FCHI (QLEN - 10) +#define FCLOW (FCHI / 2) +#define PRIOMASK 7 + +#define TXRETRIES 2 /* # of retries for tx frames */ +#define READ_FRM_CNT_RETRIES 3 +#ifndef DHD_RXBOUND +#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */ +#endif + +#ifndef DHD_TXBOUND +#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */ +#endif + +#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */ + +#ifndef DHD_FIRSTREAD +#define DHD_FIRSTREAD 32 +#endif +#if !ISPOWEROF2(DHD_FIRSTREAD) +#error DHD_FIRSTREAD is not a power of 2! +#endif + +/* Total length of frame header for dongle protocol */ +#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN) +#define SDPCM_HDRLEN_TXGLOM (SDPCM_HDRLEN + SDPCM_HWEXT_LEN) +#define MAX_TX_PKTCHAIN_CNT SDPCM_MAXGLOM_SIZE + +#ifdef SDTEST +#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN) +#else +#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN) +#endif + +/* Space for header read, limit for data packets */ +#ifndef MAX_HDR_READ +#define MAX_HDR_READ 32 +#endif +#if !ISPOWEROF2(MAX_HDR_READ) +#error MAX_HDR_READ is not a power of 2! +#endif + +#define MAX_RX_DATASZ 2048 + +/* Maximum milliseconds to wait for F2 to come up */ +#define DHD_WAIT_F2RDY 3000 + +/* Bump up limit on waiting for HT to account for first startup; + * if the image is doing a CRC calculation before programming the PMU + * for HT availability, it could take a couple hundred ms more, so + * max out at a 1 second (1000000us). + */ +#if (PMU_MAX_TRANSITION_DLY <= 1000000) +#undef PMU_MAX_TRANSITION_DLY +#define PMU_MAX_TRANSITION_DLY 1000000 +#endif + +/* hooks for limiting threshold custom tx num in rx processing */ +#define DEFAULT_TXINRX_THRES 0 +#ifndef CUSTOM_TXINRX_THRES +#define CUSTOM_TXINRX_THRES DEFAULT_TXINRX_THRES +#endif + +/* Value for ChipClockCSR during initial setup */ +#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ) +#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP) + +/* Flags for SDH calls */ +#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) + +/* Packet free applicable unconditionally for sdio and sdspi. Conditional if + * bufpool was present for gspi bus. + */ +#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \ + PKTFREE(bus->dhd->osh, pkt, FALSE); +DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); + + +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX 2024 +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; + +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) +#define KSO_ENAB(bus) ((bus)->kso) +#define SR_ENAB(bus) ((bus)->_srenab) +#define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto)) +#define MIN_RSRC_ADDR (SI_ENUM_BASE + 0x618) +#define MIN_RSRC_SR 0x3 +#define CORE_CAPEXT_ADDR (SI_ENUM_BASE + 0x64c) +#define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1) +#define RCTL_MACPHY_DISABLE_MASK (1 << 26) +#define RCTL_LOGIC_DISABLE_MASK (1 << 27) + +#define OOB_WAKEUP_ENAB(bus) ((bus)->_oobwakeup) +#define GPIO_DEV_SRSTATE 16 /* Host gpio17 mapped to device gpio0 SR state */ +#define GPIO_DEV_SRSTATE_TIMEOUT 320000 /* 320ms */ +#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */ +#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0) +#define CC_CHIPCTRL3_SR_ENG_ENABLE (1 << 2) +#define OVERFLOW_BLKSZ512_WM 96 +#define OVERFLOW_BLKSZ512_MES 80 + +#define CC_PMUCC3 (0x3) +/* Private data for SDIO bus interaction */ +typedef struct dhd_bus { + dhd_pub_t *dhd; + + bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */ + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + + sdpcmd_regs_t *regs; /* Registers for SDIO core */ + uint sdpcmrev; /* SDIO core revision */ + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 bus_num; /* bus number */ + uint32 slot_num; /* slot ID */ + uint32 hostintmask; /* Copy of Host Interrupt Mask */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ + + uint blocksize; /* Block size of SDIO transfers */ + uint roundup; /* Max roundup limit */ + + struct pktq txq; /* Queue length used for flow-control */ + uint8 flowcontrol; /* per prio flow control bitmask */ + uint8 tx_seq; /* Transmit sequence number (next) */ + uint8 tx_max; /* Maximum transmit sequence allowed */ + + uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN]; + uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ + uint16 nextlen; /* Next Read Len from last header */ + uint8 rx_seq; /* Receive sequence number (expected) */ + bool rxskip; /* Skip receive (awaiting NAK ACK) */ + + void *glomd; /* Packet containing glomming descriptor */ + void *glom; /* Packet chain for glommed superframe */ + uint glomerr; /* Glom packet read errors */ + + uint8 *rxbuf; /* Buffer for receiving control packets */ + uint rxblen; /* Allocated length of rxbuf */ + uint8 *rxctl; /* Aligned pointer into rxbuf */ + uint8 *databuf; /* Buffer for receiving big glom packet */ + uint8 *dataptr; /* Aligned pointer into databuf */ + uint rxlen; /* Length of valid data in buffer */ + + uint8 sdpcm_ver; /* Bus protocol reported by dongle */ + + bool intr; /* Use interrupts */ + bool poll; /* Use polling */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + uint spurious; /* Count of spurious interrupts */ + uint pollrate; /* Ticks between device polls */ + uint polltick; /* Tick counter */ + uint pollcnt; /* Count of active polls */ + +#ifdef DHD_DEBUG + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ +#endif /* DHD_DEBUG */ + + uint regfails; /* Count of R_REG/W_REG failures */ + + uint clkstate; /* State of sd and backplane clock(s) */ + bool activity; /* Activity flag for clock down */ + int32 idletime; /* Control for activity timeout */ + int32 idlecount; /* Activity timeout counter */ + int32 idleclock; /* How to set bus driver when idle */ + int32 sd_divisor; /* Speed control to bus driver */ + int32 sd_mode; /* Mode control to bus driver */ + int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */ + bool use_rxchain; /* If dhd should use PKT chains */ + bool sleeping; /* Is SDIO bus sleeping? */ +#if defined(SUPPORT_P2P_GO_PS) + wait_queue_head_t bus_sleep; +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + uint rxflow_mode; /* Rx flow control mode */ + bool rxflow; /* Is rx flow control on */ + uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */ + bool alp_only; /* Don't use HT clock (ALP only) */ + /* Field to decide if rx of control frames happen in rxbuf or lb-pool */ + bool usebufpool; + int32 txinrx_thres; /* num of in-queued pkts */ + int32 dotxinrx; /* tx first in dhdsdio_readframes */ +#ifdef SDTEST + /* external loopback */ + bool ext_loop; + uint8 loopid; + + /* pktgen configuration */ + uint pktgen_freq; /* Ticks between bursts */ + uint pktgen_count; /* Packets to send each burst */ + uint pktgen_print; /* Bursts between count displays */ + uint pktgen_total; /* Stop after this many */ + uint pktgen_minlen; /* Minimum packet data len */ + uint pktgen_maxlen; /* Maximum packet data len */ + uint pktgen_mode; /* Configured mode: tx, rx, or echo */ + uint pktgen_stop; /* Number of tx failures causing stop */ + + /* active pktgen fields */ + uint pktgen_tick; /* Tick counter for bursts */ + uint pktgen_ptick; /* Burst counter for printing */ + uint pktgen_sent; /* Number of test packets generated */ + uint pktgen_rcvd; /* Number of test packets received */ + uint pktgen_prev_time; /* Time at which previous stats where printed */ + uint pktgen_prev_sent; /* Number of test packets generated when + * previous stats were printed + */ + uint pktgen_prev_rcvd; /* Number of test packets received when + * previous stats were printed + */ + uint pktgen_fail; /* Number of failed send attempts */ + uint16 pktgen_len; /* Length of next packet to send */ +#define PKTGEN_RCV_IDLE (0) +#define PKTGEN_RCV_ONGOING (1) + uint16 pktgen_rcv_state; /* receive state */ + uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */ +#endif /* SDTEST */ + + /* Some additional counters */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ +#ifdef DHDENABLE_TAILPAD + uint tx_tailpad_chain; /* Number of tail padding by chaining pad_pkt */ + uint tx_tailpad_pktget; /* Number of tail padding by new PKTGET */ +#endif /* DHDENABLE_TAILPAD */ + uint8 *ctrl_frame_buf; + uint32 ctrl_frame_len; + bool ctrl_frame_stat; + uint32 rxint_mode; /* rx interrupt mode */ + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + bool kso; + bool _slpauto; + bool _oobwakeup; + bool _srenab; + bool readframes; + bool reqbussleep; + uint32 resetinstr; + uint32 dongle_ram_base; + + void *glom_pkt_arr[SDPCM_MAXGLOM_SIZE]; /* Array of pkts for glomming */ + uint32 txglom_cnt; /* Number of pkts in the glom array */ + uint32 txglom_total_len; /* Total length of pkts in glom array */ + bool txglom_enable; /* Flag to indicate whether tx glom is enabled/disabled */ + uint32 txglomsize; /* Glom size limitation */ +#ifdef DHDENABLE_TAILPAD + void *pad_pkt; +#endif /* DHDENABLE_TAILPAD */ +} dhd_bus_t; + +/* clkstate */ +#define CLK_NONE 0 +#define CLK_SDONLY 1 +#define CLK_PENDING 2 /* Not used yet */ +#define CLK_AVAIL 3 + +#define DHD_NOPMU(dhd) (FALSE) + +#if defined(BCMSDIOH_STD) +#define BLK_64_MAXTXGLOM 20 +#endif /* BCMSDIOH_STD */ + +#ifdef DHD_DEBUG +static int qcount[NUMPRIO]; +static int tx_packets[NUMPRIO]; +#endif /* DHD_DEBUG */ + +/* Deferred transmit */ +const uint dhd_deferred_tx = 1; + +extern uint dhd_watchdog_ms; + +extern void dhd_os_wd_timer(void *bus, uint wdtick); +int dhd_enableOOB(dhd_pub_t *dhd, bool sleep); + +/* Tx/Rx bounds */ +uint dhd_txbound; +uint dhd_rxbound; +uint dhd_txminmax = DHD_TXMINMAX; + +/* override the RAM size if possible */ +#define DONGLE_MIN_RAMSIZE (128 *1024) +int dhd_dongle_ramsize; + +uint dhd_doflow = TRUE; +uint dhd_dpcpoll = FALSE; + +module_param(dhd_doflow, uint, 0644); +module_param(dhd_dpcpoll, uint, 0644); + +static bool dhd_alignctl; + +static bool sd1idle; + +static bool retrydata; +#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata) + +static uint watermark = 8; +static uint mesbusyctrl = 0; +static const uint firstread = DHD_FIRSTREAD; + +/* Retry count for register access failures */ +static const uint retry_limit = 2; + +/* Force even SD lengths (some host controllers mess up on odd bytes) */ +static bool forcealign; + +#define ALIGNMENT 4 + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) +extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable); +#endif + +#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) +#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD +#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */ +#define PKTALIGN(osh, p, len, align) \ + do { \ + uintptr datalign; \ + datalign = (uintptr)PKTDATA((osh), (p)); \ + datalign = ROUNDUP(datalign, (align)) - datalign; \ + ASSERT(datalign < (align)); \ + ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \ + if (datalign) \ + PKTPULL((osh), (p), (uint)datalign); \ + PKTSETLEN((osh), (p), (len)); \ + } while (0) + +/* Limit on rounding up frames */ +static const uint max_roundup = 512; + +/* Try doing readahead */ +static bool dhd_readahead; + +/* To check if there's window offered */ +#define DATAOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* To check if there's window offered for ctrl frame */ +#define TXCTLOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* Number of pkts available in dongle for data RX */ +#define DATABUFCNT(bus) \ + ((uint8)(bus->tx_max - bus->tx_seq) - 1) + +/* Macros to get register read/write status */ +/* NOTE: these assume a local dhdsdio_bus_t *bus! */ +#define R_SDREG(regvar, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + regvar = R_REG(bus->dhd->osh, regaddr); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) { \ + DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + regvar = 0; \ + } \ + } \ +} while (0) + +#define W_SDREG(regval, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + W_REG(bus->dhd->osh, regaddr, regval); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) \ + DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + } \ +} while (0) + +#define BUS_WAKE(bus) \ + do { \ + bus->idlecount = 0; \ + if ((bus)->sleeping) \ + dhdsdio_bussleep((bus), FALSE); \ + } while (0); + +/* + * pktavail interrupts from dongle to host can be managed in 3 different ways + * whenever there is a packet available in dongle to transmit to host. + * + * Mode 0: Dongle writes the software host mailbox and host is interrupted. + * Mode 1: (sdiod core rev >= 4) + * Device sets a new bit in the intstatus whenever there is a packet + * available in fifo. Host can't clear this specific status bit until all the + * packets are read from the FIFO. No need to ack dongle intstatus. + * Mode 2: (sdiod core rev >= 4) + * Device sets a bit in the intstatus, and host acks this by writing + * one to this bit. Dongle won't generate anymore packet interrupts + * until host reads all the packets from the dongle and reads a zero to + * figure that there are no more packets. No need to disable host ints. + * Need to ack the intstatus. + */ + +#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */ +#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */ +#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */ + + +#define FRAME_AVAIL_MASK(bus) \ + ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL) + +#define DHD_BUS SDIO_BUS + +#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus))) + +#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) + +#define GSPI_PR55150_BAILOUT + +#ifdef SDTEST +static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq); +static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count); +#endif + +static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size); +#ifdef DHD_DEBUG +static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror); +#endif /* DHD_DEBUG */ + +#if defined(DHD_FW_COREDUMP) +static int dhdsdio_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ +static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap); +static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); + +static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_disconnect(void *ptr); +static bool dhdsdio_chipmatch(uint16 chipid); +static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh, + void * regsva, uint16 devid); +static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh); +static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh); +static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, + bool reset_flag); + +static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size); +static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle); +static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry); +static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt); +static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq, + int prev_chain_total_len, bool last_chained_pkt, + int *pad_pkt_len, void **new_pkt); +static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt); + +static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_firmware(dhd_bus_t *bus); + +static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path); +static int dhdsdio_download_nvram(dhd_bus_t *bus); +#ifdef BCMEMBEDIMAGE +static int dhdsdio_download_code_array(dhd_bus_t *bus); +#endif +static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep); +static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok); +static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus); + +#ifdef WLMEDIA_HTSF +#include +extern uint32 dhd_get_htsf(void *dhd, int ifidx); +#endif /* WLMEDIA_HTSF */ + +static void +dhdsdio_tune_fifoparam(struct dhd_bus *bus) +{ + int err; + uint8 devctl, wm, mes; + + if (bus->sih->buscorerev >= 15) { + /* See .ppt in PR for these recommended values */ + if (bus->blocksize == 512) { + wm = OVERFLOW_BLKSZ512_WM; + mes = OVERFLOW_BLKSZ512_MES; + } else { + mes = bus->blocksize/4; + wm = bus->blocksize/4; + } + + watermark = wm; + mesbusyctrl = mes; + } else { + DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n", + bus->sih->buscorerev)); + return; + } + + /* Update watermark */ + if (wm > 0) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err); + + devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + /* Update MES */ + if (mes > 0) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, + (mes | SBSDIO_MESBUSYCTRL_ENAB), &err); + } + + DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n", + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err), + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err), + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err))); +} + +static void +dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_RAMSIZE; + /* Restrict the ramsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_ramsize, min_size)); + if ((dhd_dongle_ramsize > min_size) && + (dhd_dongle_ramsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_ramsize; +} + +static int +dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address) +{ + int err = 0; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + return err; +} + + +#ifdef USE_OOB_GPIO1 +static int +dhdsdio_oobwakeup_init(dhd_bus_t *bus) +{ + uint32 val, addr, data; + + bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP); + + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + + /* Set device for gpio1 wakeup */ + bcmsdh_reg_write(bus->sdh, addr, 4, 2); + val = bcmsdh_reg_read(bus->sdh, data, 4); + val |= CC_CHIPCTRL2_GPIO1_WAKEUP; + bcmsdh_reg_write(bus->sdh, data, 4, val); + + bus->_oobwakeup = TRUE; + + return 0; +} +#endif /* USE_OOB_GPIO1 */ + +/* + * Query if FW is in SR mode + */ +static bool +dhdsdio_sr_cap(dhd_bus_t *bus) +{ + bool cap = FALSE; + uint32 core_capext, addr, data; + + if (bus->sih->chip == BCM43430_CHIP_ID) { + /* check if fw initialized sr engine */ + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, sr_control1); + if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0) + cap = TRUE; + + return cap; + } + if (bus->sih->chip == BCM4324_CHIP_ID) { + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + bcmsdh_reg_write(bus->sdh, addr, 4, 3); + core_capext = bcmsdh_reg_read(bus->sdh, data, 4); + } else if (bus->sih->chip == BCM4330_CHIP_ID) { + core_capext = FALSE; + } else if ((bus->sih->chip == BCM4335_CHIP_ID) || + (bus->sih->chip == BCM4339_CHIP_ID) || + (bus->sih->chip == BCM43349_CHIP_ID) || + (bus->sih->chip == BCM4345_CHIP_ID) || + (bus->sih->chip == BCM43454_CHIP_ID) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4356_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (BCM4349_CHIP(bus->sih->chip)) || + (bus->sih->chip == BCM4350_CHIP_ID)) { + core_capext = TRUE; + } else { + core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4); + core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK); + } + if (!(core_capext)) + return FALSE; + + if (bus->sih->chip == BCM4324_CHIP_ID) { + /* FIX: Should change to query SR control register instead */ + cap = TRUE; + } else if ((bus->sih->chip == BCM4335_CHIP_ID) || + (bus->sih->chip == BCM4339_CHIP_ID) || + (bus->sih->chip == BCM43349_CHIP_ID) || + (bus->sih->chip == BCM4345_CHIP_ID) || + (bus->sih->chip == BCM43454_CHIP_ID) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4356_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM4350_CHIP_ID)) { + uint32 enabval = 0; + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3); + enabval = bcmsdh_reg_read(bus->sdh, data, 4); + + if ((bus->sih->chip == BCM4350_CHIP_ID) || + (bus->sih->chip == BCM4345_CHIP_ID) || + (bus->sih->chip == BCM43454_CHIP_ID) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4356_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID)) + enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE; + + if (enabval) + cap = TRUE; + } else { + data = bcmsdh_reg_read(bus->sdh, + SI_ENUM_BASE + OFFSETOF(chipcregs_t, retention_ctl), 4); + if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0) + cap = TRUE; + } + + return cap; +} + +static int +dhdsdio_srwar_init(dhd_bus_t *bus) +{ + bcmsdh_gpio_init(bus->sdh); + +#ifdef USE_OOB_GPIO1 + dhdsdio_oobwakeup_init(bus); +#endif + + + return 0; +} + +static int +dhdsdio_sr_init(dhd_bus_t *bus) +{ + uint8 val; + int err = 0; + + if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) + dhdsdio_srwar_init(bus); + + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, + 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err); + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + +#ifdef USE_CMD14 + /* Add CMD14 Support */ + dhdsdio_devcap_set(bus, + (SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT)); +#endif /* USE_CMD14 */ + + dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err); + + bus->_slpauto = dhd_slpauto ? TRUE : FALSE; + + bus->_srenab = TRUE; + + return 0; +} + +/* + * FIX: Be sure KSO bit is enabled + * Currently, it's defaulting to 0 which should be 1. + */ +static int +dhdsdio_clk_kso_init(dhd_bus_t *bus) +{ + uint8 val; + int err = 0; + + /* set flag */ + bus->kso = TRUE; + + /* + * Enable KeepSdioOn (KSO) bit for normal operation + * Default is 0 (4334A0) so set it. Fixed in B0. + */ + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL); + if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err); + if (err) + DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err)); + } + + return 0; +} + +#define KSO_DBG(x) +#define KSO_WAIT_US 50 +#define KSO_WAIT_MS 1 +#define KSO_SLEEP_RETRY_COUNT 20 +#define KSO_WAKE_RETRY_COUNT 100 +#define ERROR_BCME_NODEVICE_MAX 1 + +#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US) +#ifndef CUSTOM_MAX_KSO_ATTEMPTS +#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS +#endif + +static int +dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on) +{ + uint8 wr_val = 0, rd_val, cmp_val, bmask; + int err = 0; + int try_cnt = 0; + + KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"))); + + wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + + if (on) { + cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK; + bmask = cmp_val; + + OSL_SLEEP(3); + + } else { + /* Put device to sleep, turn off KSO */ + cmp_val = 0; + bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK; + } + + do { + rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + if (((rd_val & bmask) == cmp_val) && !err) + break; + + KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err)); + + if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) { + OSL_SLEEP(KSO_WAIT_MS); + } else + OSL_DELAY(KSO_WAIT_US); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + } while (try_cnt++ < CUSTOM_MAX_KSO_ATTEMPTS); + + + if (try_cnt > 2) + KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n", + __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); + + if (try_cnt > CUSTOM_MAX_KSO_ATTEMPTS) { + DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n", + __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); + } + + return err; +} + +static int +dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on) +{ + int err = 0; + + if (on == FALSE) { + + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); + dhdsdio_clk_kso_enab(bus, FALSE); + } else { + DHD_ERROR(("%s: KSO enable\n", __FUNCTION__)); + + /* Make sure we have SD bus access */ + if (bus->clkstate == CLK_NONE) { + DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__)); + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + + dhdsdio_clk_kso_enab(bus, TRUE); + + DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__, + dhdsdio_sleepcsr_get(bus))); + } + + bus->kso = on; + BCM_REFERENCE(err); + + return 0; +} + +static uint8 +dhdsdio_sleepcsr_get(dhd_bus_t *bus) +{ + int err = 0; + uint8 val = 0; + + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + if (err) + DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err)); + + return val; +} + +uint8 +dhdsdio_devcap_get(dhd_bus_t *bus) +{ + return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL); +} + +static int +dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap) +{ + int err = 0; + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err); + if (err) + DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err)); + + return 0; +} + +static int +dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) +{ + int err = 0, retry; + uint8 val; + + retry = 0; + if (on == TRUE) { + /* Enter Sleep */ + + /* Be sure we request clk before going to sleep + * so we can wake-up with clk request already set + * else device can go back to sleep immediately + */ + if (!SLPAUTO_ENAB(bus)) + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + else { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if ((val & SBSDIO_CSR_MASK) == 0) { + DHD_ERROR(("%s: No clock before enter sleep:0x%x\n", + __FUNCTION__, val)); + + /* Reset clock request */ + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_ALP_AVAIL_REQ, &err); + DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); + } + } + + DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); +#ifdef USE_CMD14 + err = bcmsdh_sleep(bus->sdh, TRUE); +#else + + + err = dhdsdio_clk_kso_enab(bus, FALSE); + if (OOB_WAKEUP_ENAB(bus)) + { + err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */ + } +#endif /* USE_CMD14 */ + } else { + /* Exit Sleep */ + /* Make sure we have SD bus access */ + if (bus->clkstate == CLK_NONE) { + DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__)); + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + + if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) { + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE), + GPIO_DEV_SRSTATE_TIMEOUT); + + if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) { + DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n")); + } + } +#ifdef USE_CMD14 + err = bcmsdh_sleep(bus->sdh, FALSE); + if (SLPAUTO_ENAB(bus) && (err != 0)) { + OSL_DELAY(10000); + DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__)); + + /* Toggle sleep to resync with host and device */ + err = bcmsdh_sleep(bus->sdh, TRUE); + OSL_DELAY(10000); + err = bcmsdh_sleep(bus->sdh, FALSE); + + if (err) { + OSL_DELAY(10000); + DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__)); + + /* Toggle sleep to resync with host and device */ + err = bcmsdh_sleep(bus->sdh, TRUE); + OSL_DELAY(10000); + err = bcmsdh_sleep(bus->sdh, FALSE); + if (err) { + DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__)); + DHD_ERROR(("%s: FATAL: Device non-response!\n", + __FUNCTION__)); + err = 0; + } + } + } +#else + if (OOB_WAKEUP_ENAB(bus)) + { + err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */ + } + do { + err = dhdsdio_clk_kso_enab(bus, TRUE); + if (err) + OSL_SLEEP(10); + } while ((err != 0) && (++retry < 3)); + + if (err != 0) { + DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry)); + err = 0; /* continue anyway */ + } + + +#endif /* !USE_CMD14 */ + + if (err == 0) { + uint8 csr; + + /* Wait for device ready during transition to wake-up */ + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (((csr = dhdsdio_sleepcsr_get(bus)) & + SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) != + (SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000)); + + DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr)); + + if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) { + DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n", + __FUNCTION__, csr)); + err = BCME_NODEVICE; + } + + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) != + (SBSDIO_HT_AVAIL)), (10000)); + + DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr)); + if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) { + DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n", + __FUNCTION__, csr)); + err = BCME_NODEVICE; + } + } + } + + /* Update if successful */ + if (err == 0) + bus->kso = on ? FALSE : TRUE; + else { + DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n", + __FUNCTION__, bus->kso, on, err)); + if (!on && retry > 2) + bus->kso = FALSE; + } + + return err; +} + +/* Turn backplane clock on or off */ +static int +dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) +{ +#define HT_AVAIL_ERROR_MAX 10 + static int ht_avail_error = 0; + int err; + uint8 clkctl, clkreq, devctl; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + clkctl = 0; + sdh = bus->sdh; + + + if (!KSO_ENAB(bus)) + return BCME_OK; + + if (SLPAUTO_ENAB(bus)) { + bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY); + return BCME_OK; + } + + if (on) { + /* Request HT Avail */ + clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; + + + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + if (err) { + ht_avail_error++; + if (ht_avail_error < HT_AVAIL_ERROR_MAX) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + else if (ht_avail_error == HT_AVAIL_ERROR_MAX) { + bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR; + dhd_os_send_hang_message(bus->dhd); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ + return BCME_ERROR; + } else { + ht_avail_error = 0; + } + + + /* Check current status */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + +#if !defined(OOB_INTR_ONLY) + /* Go to pending and await interrupt if appropriate */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { + /* Allow only clock-available interrupt */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: Devctl access error setting CA: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + DHD_INFO(("CLKCTL: set PENDING\n")); + bus->clkstate = CLK_PENDING; + return BCME_OK; + } else +#endif /* !defined (OOB_INTR_ONLY) */ + { + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + } + + /* Otherwise, wait here (polling) for HT Avail */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)), + !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY); + } + if (err) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n", + __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl)); + return BCME_ERROR; + } + + /* Mark clock available */ + bus->clkstate = CLK_AVAIL; + DHD_INFO(("CLKCTL: turned ON\n")); + +#if defined(DHD_DEBUG) + if (bus->alp_only == TRUE) { +#if !defined(BCMLXSDMMC) + if (!SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__)); + } +#endif /* !defined(BCMLXSDMMC) */ + } else { + if (SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__)); + } + } +#endif /* defined (DHD_DEBUG) */ + + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } else { + clkreq = 0; + + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + bus->clkstate = CLK_SDONLY; + if (!SR_ENAB(bus)) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + DHD_INFO(("CLKCTL: turned OFF\n")); + if (err) { + DHD_ERROR(("%s: Failed access turning clock off: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + } + return BCME_OK; +} + +/* Change idle/active SD state */ +static int +dhdsdio_sdclk(dhd_bus_t *bus, bool on) +{ + int err; + int32 iovalue; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (on) { + if (bus->idleclock == DHD_IDLE_STOP) { + /* Turn on clock and restore mode */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error enabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + iovalue = bus->sd_mode; + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_mode: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Restore clock speed */ + iovalue = bus->sd_divisor; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error restoring sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_SDONLY; + } else { + /* Stop or slow the SD clock itself */ + if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) { + DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n", + __FUNCTION__, bus->sd_divisor, bus->sd_mode)); + return BCME_ERROR; + } + if (bus->idleclock == DHD_IDLE_STOP) { + if (sd1idle) { + /* Change to SD1 mode and turn off clock */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + + iovalue = 0; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error disabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Set divisor to idle value */ + iovalue = bus->idleclock; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_NONE; + } + + return BCME_OK; +} + +/* Transition SD and backplane clock readiness */ +static int +dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) +{ + int ret = BCME_OK; +#ifdef DHD_DEBUG + uint oldstate = bus->clkstate; +#endif /* DHD_DEBUG */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Early exit if we're already there */ + if (bus->clkstate == target) { + if (target == CLK_AVAIL) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } + return ret; + } + + switch (target) { + case CLK_AVAIL: + /* Make sure SD clock is available */ + if (bus->clkstate == CLK_NONE) + dhdsdio_sdclk(bus, TRUE); + /* Now request HT Avail on the backplane */ + ret = dhdsdio_htclk(bus, TRUE, pendok); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } + break; + + case CLK_SDONLY: + /* Remove HT request, or bring up SD clock */ + if (bus->clkstate == CLK_NONE) + ret = dhdsdio_sdclk(bus, TRUE); + else if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + else + DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n", + bus->clkstate, target)); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + } + break; + + case CLK_NONE: + /* Make sure to remove HT request */ + if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + /* Now remove the SD clock */ + ret = dhdsdio_sdclk(bus, FALSE); +#ifdef DHD_DEBUG + if (dhd_console_ms == 0) +#endif /* DHD_DEBUG */ + if (bus->poll == 0) + dhd_os_wd_timer(bus->dhd, 0); + break; + } +#ifdef DHD_DEBUG + DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate)); +#endif /* DHD_DEBUG */ + + return ret; +} + +static int +dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) +{ + int err = 0; + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n", + (sleep ? "SLEEP" : "WAKE"), + (bus->sleeping ? "SLEEP" : "WAKE"))); + + if (bus->dhd->hang_was_sent) + return BCME_ERROR; + + /* Done if we're already in the requested state */ + if (sleep == bus->sleeping) + return BCME_OK; + + /* Going to sleep: set the alarm and turn off the lights... */ + if (sleep) { + /* Don't sleep if something is pending */ +#ifdef DHD_USE_IDLECOUNT + if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq) || bus->readframes || + bus->ctrl_frame_stat) +#else + if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq)) +#endif /* DHD_USE_IDLECOUNT */ + return BCME_BUSY; + + + if (!SLPAUTO_ENAB(bus)) { + /* Disable SDIO interrupts (no longer interested) */ + bcmsdh_intr_disable(bus->sdh); + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); + + /* Isolate the bus */ + if (bus->sih->chip != BCM4329_CHIP_ID && + bus->sih->chip != BCM4319_CHIP_ID) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, + SBSDIO_DEVCTL_PADS_ISO, NULL); + } + } else { + /* Leave interrupts enabled since device can exit sleep and + * interrupt host + */ + err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */); + } + + /* Change state */ + bus->sleeping = TRUE; +#if defined(SUPPORT_P2P_GO_PS) + wake_up(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + } else { + /* Waking up: bus power up is ok, set local state */ + + if (!SLPAUTO_ENAB(bus)) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err); + + /* Force pad isolation off if possible (in case power never toggled) */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL); + + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Enable interrupts again */ + if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) { + bus->intdis = FALSE; + bcmsdh_intr_enable(bus->sdh); + } + } else { + err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */); + } + + if (err == 0) { + /* Change state */ + bus->sleeping = FALSE; + } + } + + return err; +} + +#ifdef USE_DYNAMIC_F2_BLKSIZE +int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size) +{ + int func_blk_size = function_num; + int bcmerr = 0; + int result; + + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size, + sizeof(int), &result, sizeof(int), IOV_GET); + + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num)); + return BCME_ERROR; + } + + if (result != block_size) { + DHD_TRACE_HW4(("%s: F%d Block size set from %d to %d\n", + __FUNCTION__, function_num, result, block_size)); + func_blk_size = function_num << 16 | block_size; + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL, + 0, &func_blk_size, sizeof(int32), IOV_SET); + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__)); + return BCME_ERROR; + } + } + + return BCME_OK; +} +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + +#if defined(OOB_INTR_ONLY) +void +dhd_enable_oob_intr(struct dhd_bus *bus, bool enable) +{ +#if defined(HW_OOB) + bcmsdh_enable_hw_oob_intr(bus->sdh, enable); +#else + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (enable == TRUE) { + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + } else { + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + } + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); +#endif /* !defined(HW_OOB) */ +} +#endif + +int +dhd_bus_txdata(struct dhd_bus *bus, void *pkt) +{ + int ret = BCME_ERROR; + osl_t *osh; + uint datalen, prec; +#if defined(DHD_TX_DUMP) + uint8 *dump_data; + uint16 protocol; +#endif /* DHD_TX_DUMP */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + osh = bus->dhd->osh; + datalen = PKTLEN(osh, pkt); + +#ifdef SDTEST + /* Push the test header if doing loopback */ + if (bus->ext_loop) { + uint8* data; + PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN); + data = PKTDATA(osh, pkt); + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->loopid++; + *data++ = (datalen >> 0); + *data++ = (datalen >> 8); + datalen += SDPCM_TEST_HDRLEN; + } +#else /* SDTEST */ + BCM_REFERENCE(datalen); +#endif /* SDTEST */ + +#if defined(DHD_TX_DUMP) + dump_data = PKTDATA(osh, pkt); + dump_data += 4; /* skip 4 bytes header */ + protocol = (dump_data[12] << 8) | dump_data[13]; + + if (protocol == ETHER_TYPE_802_1X) { + DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n", + dump_data[14], dump_data[15], dump_data[30])); + } +#endif /* DHD_TX_DUMP */ + +#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP) + { + int i; + DHD_ERROR(("TX DUMP\n")); + + for (i = 0; i < (datalen - 4); i++) { + DHD_ERROR(("%02X ", dump_data[i])); + if ((i & 15) == 15) + printk("\n"); + } + DHD_ERROR(("\n")); + } +#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */ + + prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK)); + + /* Check for existing queue, current flow-control, pending event, or pending clock */ + if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched || + (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) || + (bus->clkstate != CLK_AVAIL)) { + bool deq_ret; + int pkq_len; + + DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, pktq_len(&bus->txq))); + bus->fcqueued++; + + /* Priority based enq */ + dhd_os_sdlock_txq(bus->dhd); + deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec); + dhd_os_sdunlock_txq(bus->dhd); + + if (!deq_ret) { +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0) +#endif /* PROP_TXSTATUS */ + { +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + dhd_txcomplete(bus->dhd, pkt, FALSE); + PKTFREE(osh, pkt, TRUE); + } + ret = BCME_NORESOURCE; + } else + ret = BCME_OK; + + dhd_os_sdlock_txq(bus->dhd); + pkq_len = pktq_len(&bus->txq); + dhd_os_sdunlock_txq(bus->dhd); + if (pkq_len >= FCHI) { + bool wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) != + WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled && dhd_doflow) { + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); + } + } + +#ifdef DHD_DEBUG + dhd_os_sdlock_txq(bus->dhd); + if (pktq_plen(&bus->txq, prec) > qcount[prec]) + qcount[prec] = pktq_plen(&bus->txq, prec); + dhd_os_sdunlock_txq(bus->dhd); +#endif + + /* Schedule DPC if needed to send queued packet(s) */ + if (dhd_deferred_tx && !bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } else { + int chan = SDPCM_DATA_CHANNEL; + +#ifdef SDTEST + chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL); +#endif + /* Lock: we're about to use shared data/code (and SDIO) */ + dhd_os_sdlock(bus->dhd); + + /* Otherwise, send it now */ + BUS_WAKE(bus); + /* Make sure back plane ht clk is on, no pending allowed */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + + ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE); + + if (ret != BCME_OK) + bus->dhd->tx_errors++; + else + bus->dhd->dstats.tx_bytes += datalen; + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + } + + return ret; +} + +/* align packet data pointer and packet length to n-byte boundary, process packet headers, + * a new packet may be allocated if there is not enough head and/or tail from for padding. + * the caller is responsible for updating the glom size in the head packet (when glom is + * used) + * + * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter + * is taken in tx glom mode only + * + * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment + * padding, NULL if not needed, the caller is responsible for freeing the new packet + * + * return: positive value - length of the packet, including head and tail padding + * negative value - errors + */ +static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq, + int prev_chain_total_len, bool last_chained_pkt, + int *pad_pkt_len, void **new_pkt) +{ + osl_t *osh; + uint8 *frame; + int pkt_len; + int modulo; + int head_padding; + int tail_padding = 0; + uint32 swheader; + uint32 swhdr_offset; + bool alloc_new_pkt = FALSE; + uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; + + *new_pkt = NULL; + osh = bus->dhd->osh; + +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + + /* Add space for the SDPCM hardware/software headers */ + PKTPUSH(osh, pkt, sdpcm_hdrlen); + ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2)); + + frame = (uint8*)PKTDATA(osh, pkt); + pkt_len = (uint16)PKTLEN(osh, pkt); + +#ifdef WLMEDIA_HTSF + frame = (uint8*)PKTDATA(osh, pkt); + if (PKTLEN(osh, pkt) >= 100) { + htsf_ts = (htsfts_t*) (frame + HTSF_HOSTOFFSET + 12); + if (htsf_ts->magic == HTSFMAGIC) { + htsf_ts->c20 = get_cycles(); + htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0); + } + } +#endif /* WLMEDIA_HTSF */ +#ifdef DHD_DEBUG + if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) + tx_packets[PKTPRIO(pkt)]++; +#endif /* DHD_DEBUG */ + + /* align the data pointer, allocate a new packet if there is not enough space (new + * packet data pointer will be aligned thus no padding will be needed) + */ + head_padding = (ulong)frame % DHD_SDALIGN; + if (PKTHEADROOM(osh, pkt) < head_padding) { + head_padding = 0; + alloc_new_pkt = TRUE; + } else { + uint cur_chain_total_len; + int chain_tail_padding = 0; + + /* All packets need to be aligned by DHD_SDALIGN */ + modulo = (pkt_len + head_padding) % DHD_SDALIGN; + tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; + + /* Total pkt chain length needs to be aligned by block size, + * unless it is a single pkt chain with total length less than one block size, + * which we prefer sending by byte mode. + * + * Do the chain alignment here if + * 1. This is the last pkt of the chain of multiple pkts or a single pkt. + * 2-1. This chain is of multiple pkts, or + * 2-2. This is a single pkt whose size is longer than one block size. + */ + cur_chain_total_len = prev_chain_total_len + + (head_padding + pkt_len + tail_padding); + if (last_chained_pkt && bus->blocksize != 0 && + (cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { + modulo = cur_chain_total_len % bus->blocksize; + chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; + } + +#ifdef DHDENABLE_TAILPAD + if (PKTTAILROOM(osh, pkt) < tail_padding) { + /* We don't have tail room to align by DHD_SDALIGN */ + alloc_new_pkt = TRUE; + bus->tx_tailpad_pktget++; + } else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) { + /* We have tail room for tail_padding of this pkt itself, but not for + * total pkt chain alignment by block size. + * Use the padding packet to avoid memory copy if applicable, + * otherwise, just allocate a new pkt. + */ + if (bus->pad_pkt) { + *pad_pkt_len = chain_tail_padding; + bus->tx_tailpad_chain++; + } else { + alloc_new_pkt = TRUE; + bus->tx_tailpad_pktget++; + } + } else + /* This last pkt's tailroom is sufficient to hold both tail_padding + * of the pkt itself and chain_tail_padding of total pkt chain + */ +#endif /* DHDENABLE_TAILPAD */ + tail_padding += chain_tail_padding; + } + + DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n", + __FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len)); + + if (alloc_new_pkt) { + void *tmp_pkt; + int newpkt_size; + int cur_total_len; + + ASSERT(*pad_pkt_len == 0); + + DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__)); + + /* head pointer is aligned now, no padding needed */ + head_padding = 0; + + /* update the tail padding as it depends on the head padding, since a new packet is + * allocated, the head padding is non longer needed and packet length is chagned + */ + + cur_total_len = prev_chain_total_len + pkt_len; + if (last_chained_pkt && bus->blocksize != 0 && + (cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { + modulo = cur_total_len % bus->blocksize; + tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; + } + else { + modulo = pkt_len % DHD_SDALIGN; + tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; + } + + newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN; + bus->dhd->tx_realloc++; + tmp_pkt = PKTGET(osh, newpkt_size, TRUE); + if (tmp_pkt == NULL) { + DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size)); + return BCME_NOMEM; + } + PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN); + bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt)); + *new_pkt = tmp_pkt; + pkt = tmp_pkt; + } + + if (head_padding) + PKTPUSH(osh, pkt, head_padding); + + frame = (uint8*)PKTDATA(osh, pkt); + bzero(frame, head_padding + sdpcm_hdrlen); + pkt_len = (uint16)PKTLEN(osh, pkt); + + /* the header has the followming format + * 4-byte HW frame tag: length, ~length (for glom this is the total length) + * + * 8-byte HW extesion flags (glom mode only) as the following: + * 2-byte packet length, excluding HW tag and padding + * 2-byte frame channel and frame flags (e.g. next frame following) + * 2-byte header length + * 2-byte tail padding size + * + * 8-byte SW frame tags as the following + * 4-byte flags: host tx seq, channel, data offset + * 4-byte flags: TBD + */ + + swhdr_offset = SDPCM_FRAMETAG_LEN; + + /* hardware frame tag: + * + * in tx-glom mode, dongle only checks the hardware frame tag in the first + * packet and sees it as the total lenght of the glom (including tail padding), + * for each packet in the glom, the packet length needs to be updated, (see + * below PKTSETLEN) + * + * in non tx-glom mode, PKTLEN still need to include tail padding as to be + * referred to in sdioh_request_buffer(). The tail length will be excluded in + * dhdsdio_txpkt_postprocess(). + */ + *(uint16*)frame = (uint16)htol16(pkt_len); + *(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len); + pkt_len += tail_padding; + + /* hardware extesion flags */ + if (bus->txglom_enable) { + uint32 hwheader1; + uint32 hwheader2; + + swhdr_offset += SDPCM_HWEXT_LEN; + hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) | + (last_chained_pkt << 24); + hwheader2 = (tail_padding) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + } + PKTSETLEN((osh), (pkt), (pkt_len)); + + /* software frame tags */ + swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | (txseq % SDPCM_SEQUENCE_WRAP) | + (((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + swhdr_offset); + htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader)); + + return pkt_len; +} + +static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt) +{ + osl_t *osh; + uint8 *frame; + int data_offset; + int tail_padding; + int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0); + + (void)osh; + osh = bus->dhd->osh; + + /* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */ + frame = (uint8*)PKTDATA(osh, pkt); + + DHD_INFO(("%s PKTLEN before postprocess %d", + __FUNCTION__, PKTLEN(osh, pkt))); + + /* PKTLEN still includes tail_padding, so exclude it. + * We shall have head_padding + original pkt_len for PKTLEN afterwards. + */ + if (bus->txglom_enable) { + /* txglom pkts have tail_padding length in HW ext header */ + tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16; + PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding); + DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n", + tail_padding, PKTLEN(osh, pkt))); + } else { + /* non-txglom pkts have head_padding + original pkt length in HW frame tag. + * We cannot refer to this field for txglom pkts as the first pkt of the chain will + * have the field for the total length of the chain. + */ + PKTSETLEN(osh, pkt, *(uint16*)frame); + DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n", + *(uint16*)frame, PKTLEN(osh, pkt))); + } + + data_offset = ltoh32_ua(frame + swhdr_offset); + data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT; + /* Get rid of sdpcm header + head_padding */ + PKTPULL(osh, pkt, data_offset); + + DHD_INFO(("%s data_offset %d, PKTLEN %d\n", + __FUNCTION__, data_offset, PKTLEN(osh, pkt))); + + return BCME_OK; +} + +static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt) +{ + int i; + int ret = 0; + osl_t *osh; + bcmsdh_info_t *sdh; + void *pkt = NULL; + void *pkt_chain; + int total_len = 0; + void *head_pkt = NULL; + void *prev_pkt = NULL; + int pad_pkt_len = 0; + int new_pkt_num = 0; + void *new_pkts[MAX_TX_PKTCHAIN_CNT]; + bool wlfc_enabled = FALSE; + + if (bus->dhd->dongle_reset) + return BCME_NOTREADY; + + sdh = bus->sdh; + osh = bus->dhd->osh; + /* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */ + new_pkts[0] = NULL; + + for (i = 0; i < num_pkt; i++) { + int pkt_len; + bool last_pkt; + void *new_pkt = NULL; + + pkt = pkts[i]; + ASSERT(pkt); + last_pkt = (i == num_pkt - 1); + pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i, + total_len, last_pkt, &pad_pkt_len, &new_pkt); + if (pkt_len <= 0) + goto done; + if (new_pkt) { + pkt = new_pkt; + new_pkts[new_pkt_num++] = new_pkt; + } + total_len += pkt_len; + + PKTSETNEXT(osh, pkt, NULL); + /* insert the packet into the list */ + head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt); + prev_pkt = pkt; + + } + + /* Update the HW frame tag (total length) in the first pkt of the glom */ + if (bus->txglom_enable) { + uint8 *frame; + + total_len += pad_pkt_len; + frame = (uint8*)PKTDATA(osh, head_pkt); + *(uint16*)frame = (uint16)htol16(total_len); + *(((uint16*)frame) + 1) = (uint16)htol16(~total_len); + + } + +#ifdef DHDENABLE_TAILPAD + /* if a padding packet if needed, insert it to the end of the link list */ + if (pad_pkt_len) { + PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len); + PKTSETNEXT(osh, pkt, bus->pad_pkt); + } +#endif /* DHDENABLE_TAILPAD */ + + /* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet + * parameter is not NULL, for non packet chian we pass NULL pkt pointer + * so it will take the aligned length and buffer pointer. + */ + pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL; + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP; + + /* if a padding packet was needed, remove it from the link list as it not a data pkt */ + if (pad_pkt_len && pkt) + PKTSETNEXT(osh, pkt, NULL); + +done: + pkt = head_pkt; + while (pkt) { + void *pkt_next = PKTNEXT(osh, pkt); + PKTSETNEXT(osh, pkt, NULL); + dhdsdio_txpkt_postprocess(bus, pkt); + pkt = pkt_next; + } + + /* new packets might be allocated due to insufficient room for padding, but we + * still have to indicate the original packets to upper layer + */ + for (i = 0; i < num_pkt; i++) { + pkt = pkts[i]; + wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) { + wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) != + WLFC_UNSUPPORTED); + } +#endif /* PROP_TXSTATUS */ + if (!wlfc_enabled) { + PKTSETNEXT(osh, pkt, NULL); + dhd_txcomplete(bus->dhd, pkt, ret != 0); + if (free_pkt) + PKTFREE(osh, pkt, TRUE); + } + } + + for (i = 0; i < new_pkt_num; i++) + PKTFREE(osh, new_pkts[i], TRUE); + + return ret; +} + +static uint +dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) +{ + uint cnt = 0; + uint8 tx_prec_map; + uint16 txpktqlen = 0; + uint32 intstatus = 0; + uint retries = 0; + osl_t *osh; + uint datalen = 0; + dhd_pub_t *dhd = bus->dhd; + sdpcmd_regs_t *regs = bus->regs; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + osh = dhd->osh; + tx_prec_map = ~bus->flowcontrol; +#ifdef DHD_LOSSLESS_ROAMING + tx_prec_map &= dhd->dequeue_prec_map; +#endif + for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) { + int i; + int num_pkt = 1; + void *pkts[MAX_TX_PKTCHAIN_CNT]; + int prec_out; + + dhd_os_sdlock_txq(bus->dhd); + if (bus->txglom_enable) { + uint32 glomlimit = (uint32)bus->txglomsize; +#if defined(BCMSDIOH_STD) + if (bus->blocksize == 64) { + glomlimit = MIN((uint32)bus->txglomsize, BLK_64_MAXTXGLOM); + } +#endif /* BCMSDIOH_STD */ + num_pkt = MIN((uint32)DATABUFCNT(bus), glomlimit); + num_pkt = MIN(num_pkt, ARRAYSIZE(pkts)); + } + num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map)); + for (i = 0; i < num_pkt; i++) { + pkts[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out); + if (!pkts[i]) { + DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n", + __FUNCTION__)); + ASSERT(0); + break; + } + PKTORPHAN(pkts[i]); + datalen += PKTLEN(osh, pkts[i]); + } + dhd_os_sdunlock_txq(bus->dhd); + + if (i == 0) + break; + if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK) + dhd->tx_errors++; + else + dhd->dstats.tx_bytes += datalen; + cnt += i; + + /* In poll mode, need to check for other events */ + if (!bus->intr && cnt) + { + /* Check device status, signal pending interrupt */ + R_SDREG(intstatus, ®s->intstatus, retries); + bus->f2txdata++; + if (bcmsdh_regfail(bus->sdh)) + break; + if (intstatus & bus->hostintmask) + bus->ipend = TRUE; + } + + } + + dhd_os_sdlock_txq(bus->dhd); + txpktqlen = pktq_len(&bus->txq); + dhd_os_sdunlock_txq(bus->dhd); + + /* Do flow-control if needed */ + if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) { + bool wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled && dhd_doflow && dhd->txoff) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } + + return cnt; +} + +static void +dhdsdio_sendpendctl(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + int ret; + uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN; + + if (bus->txglom_enable) + frame_seq += SDPCM_HWEXT_LEN; + + if (*frame_seq != bus->tx_seq) { + DHD_INFO(("%s IOCTL frame seq lag detected!" + " frm_seq:%d != bus->tx_seq:%d, corrected\n", + __FUNCTION__, *frame_seq, bus->tx_seq)); + *frame_seq = bus->tx_seq; + } + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len, + NULL, NULL, NULL, 1); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + + bus->ctrl_frame_stat = FALSE; + dhd_wait_event_wakeup(bus->dhd); +} + +int +dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + static int err_nodevice = 0; + uint8 *frame; + uint16 len; + uint32 swheader; + bcmsdh_info_t *sdh = bus->sdh; + uint8 doff = 0; + int ret = -1; + uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Back the pointer to make a room for bus header */ + frame = msg - sdpcm_hdrlen; + len = (msglen += sdpcm_hdrlen); + + /* Add alignment padding (optional for ctl frames) */ + if (dhd_alignctl) { + if ((doff = ((uintptr)frame % DHD_SDALIGN))) { + frame -= doff; + len += doff; + msglen += doff; + bzero(frame, doff + sdpcm_hdrlen); + } + ASSERT(doff < DHD_SDALIGN); + } + doff += sdpcm_hdrlen; + + /* Round send length to next SDIO block */ + if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { + uint16 pad = bus->blocksize - (len % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize)) + len += pad; + } else if (len % DHD_SDALIGN) { + len += DHD_SDALIGN - (len % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (len & (ALIGNMENT - 1))) + len = ROUNDUP(len, ALIGNMENT); + + ASSERT(ISALIGNED((uintptr)frame, 2)); + + + /* Need to lock here to protect txseq and SDIO tx calls */ + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ + *(uint16*)frame = htol16((uint16)msglen); + *(((uint16*)frame) + 1) = htol16(~msglen); + + if (bus->txglom_enable) { + uint32 hwheader1, hwheader2; + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq + | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + + SDPCM_HWEXT_LEN + sizeof(swheader)); + + hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24); + hwheader2 = (len - (msglen)) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + + *(uint16*)frame = htol16(len); + *(((uint16*)frame) + 1) = htol16(~(len)); + } else { + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + } + if (!TXCTLOK(bus)) { + DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n", + __FUNCTION__, bus->tx_max, bus->tx_seq)); + bus->ctrl_frame_stat = TRUE; + /* Send from dpc */ + bus->ctrl_frame_buf = frame; + bus->ctrl_frame_len = len; + + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + if (bus->ctrl_frame_stat) { + dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + } + + if (bus->ctrl_frame_stat == FALSE) { + DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); + ret = 0; + } else { + bus->dhd->txcnt_timeout++; + if (!bus->dhd->hang_was_sent) { +#ifdef CUSTOMER_HW4_DEBUG + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n", + __FUNCTION__, status)); + DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n", + __FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate)); +#endif /* CUSTOMER_HW4_DEBUG */ + DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n", + __FUNCTION__, bus->dhd->txcnt_timeout)); + } + ret = -1; + bus->ctrl_frame_stat = FALSE; + goto done; + } + } + + bus->dhd->txcnt_timeout = 0; + bus->ctrl_frame_stat = TRUE; + + if (ret == -1) { +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("Tx Frame", frame, len); + } else if (DHD_HDRS_ON()) { + prhex("TxHdr", frame, MIN(len, 16)); + } +#endif + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + frame, len, NULL, NULL, NULL, TXRETRIES); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + } + bus->ctrl_frame_stat = FALSE; + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + if (ret) + bus->dhd->tx_ctlerrs++; + else + bus->dhd->tx_ctlpkts++; + + if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) + return -ETIMEDOUT; + + if (ret == BCME_NODEVICE) + err_nodevice++; + else + err_nodevice = 0; + + return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0; +} + +int +dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + int timeleft; + uint rxlen = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Wait until control frame is available */ + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen); + + dhd_os_sdlock(bus->dhd); + rxlen = bus->rxlen; + bcopy(bus->rxctl, msg, MIN(msglen, rxlen)); + bus->rxlen = 0; + dhd_os_sdunlock(bus->dhd); + + if (rxlen) { + DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n", + __FUNCTION__, rxlen, msglen)); + } else if (timeleft == 0) { +#ifdef DHD_DEBUG + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n", + __FUNCTION__, status)); +#else + DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); +#endif /* DHD_DEBUG */ + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } else { + DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } + if (timeleft == 0) { + if (rxlen == 0) + bus->dhd->rxcnt_timeout++; + DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__, + bus->dhd->rxcnt_timeout, rxlen)); + } + else + bus->dhd->rxcnt_timeout = 0; + + if (rxlen) + bus->dhd->rx_ctlpkts++; + else + bus->dhd->rx_ctlerrs++; + + if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) + return -ETIMEDOUT; + + if (bus->dhd->dongle_trap_occured) + return -EREMOTEIO; + + return rxlen ? (int)rxlen : -EIO; +} + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_POLLRATE, + IOV_SDREG, + IOV_SBREG, + IOV_SDCIS, + IOV_MEMBYTES, + IOV_RAMSIZE, + IOV_RAMSTART, +#ifdef DHD_DEBUG + IOV_CHECKDIED, + IOV_SERIALCONS, +#endif /* DHD_DEBUG */ + IOV_SET_DOWNLOAD_STATE, + IOV_SOCRAM_STATE, + IOV_FORCEEVEN, + IOV_SDIOD_DRIVE, + IOV_READAHEAD, + IOV_SDRXCHAIN, + IOV_ALIGNCTL, + IOV_SDALIGN, + IOV_DEVRESET, + IOV_CPU, +#if defined(USE_SDIOFIFO_IOVAR) + IOV_WATERMARK, + IOV_MESBUSYCTRL, +#endif /* USE_SDIOFIFO_IOVAR */ +#ifdef SDTEST + IOV_PKTGEN, + IOV_EXTLOOP, +#endif /* SDTEST */ + IOV_SPROM, + IOV_TXBOUND, + IOV_RXBOUND, + IOV_TXMINMAX, + IOV_IDLETIME, + IOV_IDLECLOCK, + IOV_SD1IDLE, + IOV_SLEEP, + IOV_DONGLEISOLATION, + IOV_KSO, + IOV_DEVSLEEP, + IOV_DEVCAP, + IOV_VARS, +#ifdef SOFTAP + IOV_FWPATH, +#endif + IOV_TXGLOMSIZE, + IOV_TXGLOMMODE, + IOV_HANGREPORT, + IOV_TXINRX_THRES +}; + +const bcm_iovar_t dhdsdio_iovars[] = { + {"intr", IOV_INTR, 0, IOVT_BOOL, 0 }, + {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 }, + {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 }, + {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, + {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 }, + {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 }, + {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 }, + {"socram_state", IOV_SOCRAM_STATE, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, + {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 }, + {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 }, + {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 }, + {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 }, + {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 }, + {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 }, + {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, + {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, + {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 }, + {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 }, + {"serial", IOV_SERIALCONS, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ +#endif /* DHD_DEBUG */ +#ifdef SDTEST + {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 }, + {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) }, +#endif /* SDTEST */ +#if defined(USE_SDIOFIFO_IOVAR) + {"watermark", IOV_WATERMARK, 0, IOVT_UINT32, 0 }, + {"mesbusyctrl", IOV_MESBUSYCTRL, 0, IOVT_UINT32, 0 }, +#endif /* USE_SDIOFIFO_IOVAR */ + {"devcap", IOV_DEVCAP, 0, IOVT_UINT32, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 }, + {"kso", IOV_KSO, 0, IOVT_UINT32, 0 }, + {"devsleep", IOV_DEVSLEEP, 0, IOVT_UINT32, 0 }, +#ifdef SOFTAP + {"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 }, +#endif + {"txglomsize", IOV_TXGLOMSIZE, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 }, + {"txinrx_thres", IOV_TXINRX_THRES, 0, IOVT_INT32, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +static void +dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div) +{ + uint q1, q2; + + if (!div) { + bcm_bprintf(strbuf, "%s N/A", desc); + } else { + q1 = num / div; + q2 = (100 * (num - (q1 * div))) / div; + bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2); + } +} + +void +dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_bus_t *bus = dhdp->bus; + + bcm_bprintf(strbuf, "Bus SDIO structure:\n"); + bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n", + bus->hostintmask, bus->intstatus, bus->sdpcm_ver); + bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n", + bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip, + bus->rxlen, bus->rx_seq); + bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n", + bus->intr, bus->intrcount, bus->lastintrs, bus->spurious); + bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n", + bus->pollrate, bus->pollcnt, bus->regfails); + + bcm_bprintf(strbuf, "\nAdditional counters:\n"); +#ifdef DHDENABLE_TAILPAD + bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n", + bus->tx_tailpad_chain, bus->tx_tailpad_pktget); +#endif /* DHDENABLE_TAILPAD */ + bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n", + bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong, + bus->rxc_errors); + bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n", + bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq); + bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n", + bus->fc_rcvd, bus->fc_xoff, bus->fc_xon); + bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n", + bus->rxglomfail, bus->rxglomframes, bus->rxglompkts); + bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n", + (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata, + bus->f2txdata, bus->f1regdata); + { + dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts), + bus->dhd->rx_packets); + dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets, + (bus->f2txdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Total: pkts/f2rw", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount); + bcm_bprintf(strbuf, "\n\n"); + } + +#ifdef SDTEST + if (bus->pktgen_count) { + bcm_bprintf(strbuf, "pktgen config and count:\n"); + bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n", + bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print, + bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen); + bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n", + bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + } +#endif /* SDTEST */ +#ifdef DHD_DEBUG + bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n", + bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not ")); + bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup); +#endif /* DHD_DEBUG */ + bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n", + bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping); +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0; + bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0; + bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0; +#ifdef DHDENABLE_TAILPAD + bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0; +#endif /* DHDENABLE_TAILPAD */ + bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0; + bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0; + bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0; +} + +#ifdef SDTEST +static int +dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + + pktgen.version = DHD_PKTGEN_VERSION; + pktgen.freq = bus->pktgen_freq; + pktgen.count = bus->pktgen_count; + pktgen.print = bus->pktgen_print; + pktgen.total = bus->pktgen_total; + pktgen.minlen = bus->pktgen_minlen; + pktgen.maxlen = bus->pktgen_maxlen; + pktgen.numsent = bus->pktgen_sent; + pktgen.numrcvd = bus->pktgen_rcvd; + pktgen.numfail = bus->pktgen_fail; + pktgen.mode = bus->pktgen_mode; + pktgen.stop = bus->pktgen_stop; + + bcopy(&pktgen, arg, sizeof(pktgen)); + + return 0; +} + +static int +dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + uint oldcnt, oldmode; + + bcopy(arg, &pktgen, sizeof(pktgen)); + if (pktgen.version != DHD_PKTGEN_VERSION) + return BCME_BADARG; + + oldcnt = bus->pktgen_count; + oldmode = bus->pktgen_mode; + + bus->pktgen_freq = pktgen.freq; + bus->pktgen_count = pktgen.count; + bus->pktgen_print = pktgen.print; + bus->pktgen_total = pktgen.total; + bus->pktgen_minlen = pktgen.minlen; + bus->pktgen_maxlen = pktgen.maxlen; + bus->pktgen_mode = pktgen.mode; + bus->pktgen_stop = pktgen.stop; + + bus->pktgen_tick = bus->pktgen_ptick = 0; + bus->pktgen_prev_time = jiffies; + bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen); + bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen); + + /* Clear counts for a new pktgen (mode change, or was stopped) */ + if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) { + bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0; + bus->pktgen_prev_rcvd = bus->pktgen_fail = 0; + } + + return 0; +} +#endif /* SDTEST */ + +static void +dhdsdio_devram_remap(dhd_bus_t *bus, bool val) +{ + uint8 enable, protect, remap; + + si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); + remap = val ? TRUE : FALSE; + si_socdevram(bus->sih, TRUE, &enable, &protect, &remap); +} + +static int +dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size) +{ + int bcmerror = 0; + uint32 sdaddr; + uint dsize; + + /* In remap mode, adjust address beyond socram and redirect + * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize + * is not backplane accessible + */ + if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) { + address -= bus->orig_ramsize; + address += SOCDEVRAM_BP_ADDR; + } + + /* Determine initial transfer parameters */ + sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; + if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) + dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); + else + dsize = size; + + /* Set the backplane window to include the start address */ + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + goto xfer_done; + } + + /* Do the transfer(s) */ + while (size) { + DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n", + __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr, + (address & SBSDIO_SBWINDOW_MASK))); + if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) { + DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__)); + break; + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + break; + } + sdaddr = 0; + dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size); + } + + } + +xfer_done: + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) { + DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__, + bcmsdh_cur_sbwad(bus->sdh))); + } + + return bcmerror; +} + +static int +dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) +{ + uint32 addr; + int rv, i; + uint32 shaddr = 0; + + if (bus->sih == NULL) { + if (bus->dhd && bus->dhd->dongle_reset) { + DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__)); + return BCME_NOTREADY; + } else { + ASSERT(bus->dhd); + ASSERT(bus->sih); + DHD_ERROR(("%s: The address of sih is invalid\n", __FUNCTION__)); + return BCME_ERROR; + } + } + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID && !dhdsdio_sr_cap(bus)) + bus->srmemsize = 0; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + i = 0; + do { + /* Read last word in memory to determine address of sdpcm_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0) + return rv; + + addr = ltoh32(addr); + + DHD_INFO(("sdpcm_shared address 0x%08X\n", addr)); + + /* + * Check if addr is valid. + * NVRAM length at the end of memory should have been overwritten. + */ + if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) { + if ((bus->srmemsize > 0) && (i++ == 0)) { + shaddr -= bus->srmemsize; + } else { + DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", + __FUNCTION__, addr)); + return BCME_ERROR; + } + } else + break; + } while (i < 2); + + /* Read hndrte_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0) + return rv; + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1) + return BCME_OK; + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { + DHD_ERROR(("%s: sdpcm_shared version %d in dhd " + "is different than sdpcm_shared version %d in dongle\n", + __FUNCTION__, SDPCM_SHARED_VERSION, + sh->flags & SDPCM_SHARED_VERSION_MASK)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#define CONSOLE_LINE_MAX 192 + +#ifdef DHD_DEBUG +static int +dhdsdio_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return 0; + + if (!KSO_ENAB(bus)) + return 0; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + } + + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + /* Read the console buffer */ + addr = ltoh32(c->log.buf); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. Instead, back up + * the buffer pointer and output this line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); +#ifdef LOG_INTO_TCPDUMP + dhd_sendup_log(bus->dhd, line, n); +#endif /* LOG_INTO_TCPDUMP */ + } + } +break2: + + return BCME_OK; +} +#endif /* DHD_DEBUG */ + +static int +dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + char *console_buffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + trap_t tr; + sdpcm_shared_t sdpcm_shared; + struct bcmstrbuf strbuf; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, i, addr; + int rv; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (DHD_NOCHECKDIED_ON()) + return 0; + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + + if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0) + goto done; + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr); + + if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (sdpcm_shared.assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (sdpcm_shared.assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line); + } + + if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + bus->dhd->dongle_trap_occured = TRUE; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.trap_addr, + (uint8*)&tr, sizeof(trap_t))) < 0) + goto done; + + bcm_bprintf(&strbuf, + "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + "lp 0x%x, rpc 0x%x Trap offset 0x%x, " + "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), + ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), + ltoh32(sdpcm_shared.trap_addr), + ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3), + ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7)); + + addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) + goto printbuf; + + addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) + goto printbuf; + + addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) + goto printbuf; + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) + goto printbuf; + + if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) + goto printbuf; + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + if (dhd_msg_level & DHD_ERROR_VAL) + printf("CONSOLE: %s\n", line); + } + } + } + } + +printbuf: + if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { + DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); + } + +#if defined(DHD_FW_COREDUMP) + if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + /* Mem dump to a file on device */ + dhdsdio_mem_dump(bus); + } +#endif /* #if defined(DHD_FW_COREDUMP) */ + +done: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + + return bcmerror; +} + +#if defined(DHD_FW_COREDUMP) +static int +dhdsdio_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start = bus->dongle_ram_base; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + + /* Get full mem size */ + size = bus->ramsize; + buf = MALLOC(bus->dhd->osh, size); + if (!buf) { + printf("%s: Out of memory (%d bytes)\n", __FUNCTION__, size); + return -1; + } + + /* Read mem content */ + printf("Dump dongle memory"); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size))) + { + printf("%s: Error membytes %d\n", __FUNCTION__, ret); + if (buf) { + MFREE(bus->dhd->osh, buf, size); + } + return -1; + } + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + printf("Done\n"); + + dhd_save_fwdump(bus->dhd, buf, bus->ramsize); + /* free buf before return !!! */ + if (write_to_file(bus->dhd, buf, bus->ramsize)) + { + printf("%s: Error writing to files\n", __FUNCTION__); + return -1; + } + + /* buf free handled in write_to_file, not here */ + return 0; +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t * bus) +{ +#if defined(DHD_FW_COREDUMP) + return (dhdsdio_mem_dump(bus)); +#else + return -1; +#endif +} + +int +dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Basic sanity checks */ + if (bus->dhd->up) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); +err: + return bcmerror; +} + +#ifdef DHD_DEBUG + +#define CC_PLL_CHIPCTRL_SERIAL_ENAB (1 << 24) +#define CC_CHIPCTRL_JTAG_SEL (1 << 3) +#define CC_CHIPCTRL_GPIO_SEL (0x3) +#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334 (1 << 28) + +static int +dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror) +{ + int int_val; + uint32 addr, data, uart_enab = 0; + uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL; + uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL; + + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + *bcmerror = 0; + + bcmsdh_reg_write(bus->sdh, addr, 4, 1); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + int_val = bcmsdh_reg_read(bus->sdh, data, 4); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + if (bus->sih->chip == BCM4330_CHIP_ID) { + uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB; + } + else if (bus->sih->chip == BCM4334_CHIP_ID || + bus->sih->chip == BCM43340_CHIP_ID || + bus->sih->chip == BCM43341_CHIP_ID || + bus->sih->chip == BCM43342_CHIP_ID || + 0) { + if (enable) { + /* Moved to PMU chipcontrol 1 from 4330 */ + int_val &= ~gpio_sel; + int_val |= jtag_sel; + } else { + int_val |= gpio_sel; + int_val &= ~jtag_sel; + } + uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334; + } + + if (!set) + return (int_val & uart_enab); + if (enable) + int_val |= uart_enab; + else + int_val &= ~uart_enab; + bcmsdh_reg_write(bus->sdh, data, 4, int_val); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + if (bus->sih->chip == BCM4330_CHIP_ID) { + uint32 chipcontrol; + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol); + chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4); + chipcontrol &= ~jtag_sel; + if (enable) { + chipcontrol |= jtag_sel; + chipcontrol &= ~gpio_sel; + } + bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol); + } + + return (int_val & uart_enab); +} +#endif + +static int +dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + + /* Some ioctls use the bus */ + dhd_os_sdlock(bus->dhd); + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + /* + * Special handling for keepSdioOn: New SDIO Wake-up Mechanism + */ + if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) { + dhdsdio_clk_kso_iovar(bus, bool_val); + goto exit; + } else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) { + { + dhdsdio_clk_devsleep_iovar(bus, bool_val); + if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) { + DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n", + bus->dpc_sched)); + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + } + goto exit; + } + + /* Handle sleep stuff before any clock mucking */ + if (vi->varid == IOV_SLEEP) { + if (IOV_ISSET(actionid)) { + bcmerror = dhdsdio_bussleep(bus, bool_val); + } else { + int_val = (int32)bus->sleeping; + bcopy(&int_val, arg, val_size); + } + goto exit; + } + + /* Request clock to allow SDIO accesses */ + if (!bus->dhd->dongle_reset) { + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + + switch (actionid) { + case IOV_GVAL(IOV_INTR): + int_val = (int32)bus->intr; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_INTR): + bus->intr = bool_val; + bus->intdis = FALSE; + if (bus->dhd->up) { + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + } + break; + + case IOV_GVAL(IOV_POLLRATE): + int_val = (int32)bus->pollrate; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POLLRATE): + bus->pollrate = (uint)int_val; + bus->poll = (bus->pollrate != 0); + break; + + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; + + case IOV_GVAL(IOV_IDLECLOCK): + int_val = (int32)bus->idleclock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLECLOCK): + bus->idleclock = int_val; + break; + + case IOV_GVAL(IOV_SD1IDLE): + int_val = (int32)sd1idle; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SD1IDLE): + sd1idle = bool_val; + break; + + + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__, + (set ? "write" : "read"), size, address)); + + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* + * If address is start of RAM (i.e. a downloaded image), + * store the reset instruction to be written in 0 + */ + if (set && address == bus->dongle_ram_base) { + bus->resetinstr = *(((uint32*)params) + 2); + } + } else { + /* If we know about SOCRAM, check for a fit */ + if ((bus->orig_ramsize) && + ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) + { + uint8 enable, protect, remap; + si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); + if (!enable || protect) { + DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n", + __FUNCTION__, bus->orig_ramsize, size, address)); + DHD_ERROR(("%s: socram enable %d, protect %d\n", + __FUNCTION__, enable, protect)); + bcmerror = BCME_BADARG; + break; + } + + if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) { + uint32 devramsize = si_socdevram_size(bus->sih); + if ((address < SOCDEVRAM_ARM_ADDR) || + (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) { + DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n", + __FUNCTION__, address, size)); + DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n", + __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize)); + bcmerror = BCME_BADARG; + break; + } + /* move it such that address is real now */ + address -= SOCDEVRAM_ARM_ADDR; + address += SOCDEVRAM_BP_ADDR; + DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n", + __FUNCTION__, (set ? "write" : "read"), size, address)); + } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) { + /* Can not access remap region while devram remap bit is set + * ROM content would be returned in this case + */ + DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n", + __FUNCTION__, address)); + bcmerror = BCME_ERROR; + break; + } + } + } + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dhdsdio_membytes(bus, set, address, data, size); + + break; + } + + case IOV_GVAL(IOV_RAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_RAMSTART): + int_val = (int32)bus->dongle_ram_base; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_SDIOD_DRIVE): + int_val = (int32)dhd_sdiod_drive_strength; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIOD_DRIVE): + dhd_sdiod_drive_strength = int_val; + si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength); + break; + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_SOCRAM_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdsdio_downloadvars(bus, arg, len); + break; + + case IOV_GVAL(IOV_READAHEAD): + int_val = (int32)dhd_readahead; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_READAHEAD): + if (bool_val && !dhd_readahead) + bus->nextlen = 0; + dhd_readahead = bool_val; + break; + + case IOV_GVAL(IOV_SDRXCHAIN): + int_val = (int32)bus->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDRXCHAIN): + if (bool_val && !bus->sd_rxchain) + bcmerror = BCME_UNSUPPORTED; + else + bus->use_rxchain = bool_val; + break; + case IOV_GVAL(IOV_ALIGNCTL): + int_val = (int32)dhd_alignctl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ALIGNCTL): + dhd_alignctl = bool_val; + break; + + case IOV_GVAL(IOV_SDALIGN): + int_val = DHD_SDALIGN; + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_VARS): + if (bus->varsz < (uint)len) + bcopy(bus->vars, arg, bus->varsz); + else + bcmerror = BCME_BUFTOOSHORT; + break; +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uint32 addr, size; + + sd_ptr = (sdreg_t *)params; + + addr = (uint32)((ulong)bus->regs + sd_ptr->offset); + size = sd_ptr->func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uint32 addr, size; + + sd_ptr = (sdreg_t *)params; + + addr = (uint32)((ulong)bus->regs + sd_ptr->offset); + size = sd_ptr->func; + bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + /* Same as above, but offset is not backplane (not SDIO core) */ + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE + sdreg.offset; + size = sdreg.func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE + sdreg.offset; + size = sdreg.func; + bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + case IOV_GVAL(IOV_SDCIS): + { + *(char *)arg = 0; + + bcmstrcat(arg, "\nFunc 0\n"); + bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 1\n"); + bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 2\n"); + bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + break; + } + + case IOV_GVAL(IOV_FORCEEVEN): + int_val = (int32)forcealign; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FORCEEVEN): + forcealign = bool_val; + break; + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_TXMINMAX): + int_val = (int32)dhd_txminmax; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXMINMAX): + dhd_txminmax = (uint)int_val; + break; + + case IOV_GVAL(IOV_SERIALCONS): + int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror); + if (bcmerror != 0) + break; + + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SERIALCONS): + dhd_serialconsole(bus, TRUE, bool_val, &bcmerror); + break; + + +#endif /* DHD_DEBUG */ + + +#ifdef SDTEST + case IOV_GVAL(IOV_EXTLOOP): + int_val = (int32)bus->ext_loop; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_EXTLOOP): + bus->ext_loop = bool_val; + break; + + case IOV_GVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_get(bus, arg); + break; + + case IOV_SVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_set(bus, arg); + break; +#endif /* SDTEST */ + +#if defined(USE_SDIOFIFO_IOVAR) + case IOV_GVAL(IOV_WATERMARK): + int_val = (int32)watermark; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WATERMARK): + watermark = (uint)int_val; + watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark; + DHD_ERROR(("Setting watermark as 0x%x.\n", watermark)); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL); + break; + + case IOV_GVAL(IOV_MESBUSYCTRL): + int_val = (int32)mesbusyctrl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MESBUSYCTRL): + mesbusyctrl = (uint)int_val; + mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK) + ? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl; + DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl)); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, + ((uint8)mesbusyctrl | 0x80), NULL); + break; +#endif + + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_SVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n", + __FUNCTION__, bool_val, bus->dhd->dongle_reset, + bus->dhd->busstate)); + + ASSERT(bus->dhd->osh); + /* ASSERT(bus->cl_devid); */ + + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + + break; + /* + * softap firmware is updated through module parameter or android private command + */ + + case IOV_GVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__)); + + /* Get its status */ + int_val = (bool) bus->dhd->dongle_reset; + bcopy(&int_val, arg, val_size); + + break; + + case IOV_GVAL(IOV_KSO): + int_val = dhdsdio_sleepcsr_get(bus); + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DEVCAP): + int_val = dhdsdio_devcap_get(bus); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DEVCAP): + dhdsdio_devcap_set(bus, (uint8) int_val); + break; + case IOV_GVAL(IOV_TXGLOMSIZE): + int_val = (int32)bus->txglomsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXGLOMSIZE): + if (int_val > SDPCM_MAXGLOM_SIZE) { + bcmerror = BCME_ERROR; + } else { + bus->txglomsize = (uint)int_val; + } + break; + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TXINRX_THRES): + int_val = bus->txinrx_thres; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TXINRX_THRES): + if (int_val < 0) { + bcmerror = BCME_BADARG; + } else { + bus->txinrx_thres = int_val; + } + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + return bcmerror; +} + +static int +dhdsdio_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize, phys_size; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + varaddr += bus->dongle_ram_base; + + if (bus->vars) { + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) { + if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) { + DHD_ERROR(("PR85623WAR in place\n")); + varsize += 4; + varaddr -= 4; + } + } + + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + + /* Write the vars list */ + bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize); +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) + return BCME_NOMEM; + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; + + phys_size += bus->dongle_ram_base; + + /* adjust to the user specified RAM */ + DHD_INFO(("Physical memory size: %d, usable memory size: %d\n", + phys_size, bus->ramsize)); + DHD_INFO(("Vars are at %d, orig varsize is %d\n", + varaddr, varsize)); + varsize = ((phys_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + varsizew = htol32(varsizew); + } + + DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} + +static int +dhdsdio_download_state(dhd_bus_t *bus, bool enter) +{ + uint retries; + int bcmerror = 0; + int foundcr4 = 0; + + if (!bus->sih) + return BCME_ERROR; + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + bus->alp_only = TRUE; + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + foundcr4 = 1; + } else { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } + + if (!foundcr4) { + si_core_disable(bus->sih, 0); + if (bcmsdh_regfail(bus->sdh)) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", + __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Disable remap for download */ + if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih)) + dhdsdio_devram_remap(bus, FALSE); + + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID) { + /* Disabling Remap for SRAM_3 */ + si_socram_set_bankpda(bus->sih, 0x3, 0x0); + } + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, + (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + } else { + /* For CR4, + * Halt ARM + * Remove ARM reset + * Read RAM base address [0x18_0000] + * [next] Download firmware + * [done at else] Populate the reset vector + * [done at else] Remove ARM halt + */ + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + } + } else { + if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + /* Enable remap before ARM reset but after vars. + * No backplane access in remap mode + */ + if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih)) + dhdsdio_devram_remap(bus, TRUE); + + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } else { + /* cr4 has no socram, but tcm's */ + /* write vars */ + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + /* write address 0 with reset instruction */ + bcmerror = dhdsdio_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + /* verify write */ + bcmerror = dhdsdio_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + + /* now remove reset and halt and continue to run CR4 */ + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to SDIOD core */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) + si_setcore(bus->sih, SDIOD_CORE_ID, 0); + + return bcmerror; +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) { + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Turn on clock in case SD command needs backplane */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set); + + /* Check for bus configuration changes of interest */ + + /* If it was divisor change, read the new one */ + if (set && strcmp(name, "sd_divisor") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_divisor = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_divisor)); + } + } + /* If it was a mode change, read the new one */ + if (set && strcmp(name, "sd_mode") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_mode = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_mode)); + } + } + /* Similar check for blocksize change */ + if (set && strcmp(name, "sd_blocksize") == 0) { + int32 fnum = 2; + if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + + dhdsdio_tune_fifoparam(bus); + } + } + bus->roundup = MIN(max_roundup, bus->blocksize); + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +void +dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + osl_t *osh; + uint32 local_hostintmask; + uint8 saveclk; + uint retries; + int err; + bool wlfc_enabled = FALSE; + + if (!bus->dhd) + return; + + osh = bus->dhd->osh; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_waitlockfree(bus->sdh); + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) { + /* if Firmware already hangs disbale any interrupt */ + bus->dhd->busstate = DHD_BUS_DOWN; + bus->hostintmask = 0; + bcmsdh_intr_disable(bus->sdh); + } else { + + BUS_WAKE(bus); + + if (KSO_ENAB(bus)) { + + /* Enable clock for device interrupts */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Disable and clear interrupts at the chip level also */ + W_SDREG(0, &bus->regs->hostintmask, retries); + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Change our idea of bus state */ + bus->dhd->busstate = DHD_BUS_DOWN; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", + __FUNCTION__, err)); + } + + /* Turn off the bus (F2), free any pending packets */ + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); + + /* Clear any pending interrupts now that F2 is disabled */ + W_SDREG(local_hostintmask, &bus->regs->intstatus, retries); + } + + /* Turn off the backplane clock (only) */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled) { +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + /* Clear the data packet queues */ + pktq_flush(osh, &bus->txq, TRUE, NULL, 0); + } + + /* Clear any held glomming stuff */ + if (bus->glomd) + PKTFREE(osh, bus->glomd, FALSE); + + if (bus->glom) + PKTFREE(osh, bus->glom, FALSE); + + bus->glom = bus->glomd = NULL; + + /* Clear rx control and wake any waiters */ + bus->rxlen = 0; + dhd_os_ioctl_resp_wake(bus->dhd); + + /* Reset some F2 state stuff */ + bus->rxskip = FALSE; + bus->tx_seq = bus->rx_seq = 0; + + bus->tx_max = 4; + + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); +} + +#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD) +extern uint sd_txglom; +#endif +void +dhd_txglom_enable(dhd_pub_t *dhdp, bool enable) +{ + /* can't enable host txglom by default, some platforms have no + * (or crappy) ADMA support and txglom will cause kernel assertions (e.g. + * panda board) + */ + dhd_bus_t *bus = dhdp->bus; +#ifdef BCMSDIOH_TXGLOM + char buf[256]; + uint32 rxglom; + int32 ret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BCMSDIOH_STD + if (enable) + enable = sd_txglom; +#endif /* BCMSDIOH_STD */ + + if (enable) { + rxglom = 1; + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("bus:rxglom", (void *)&rxglom, 4, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret >= 0) + bus->txglom_enable = TRUE; + else { +#ifdef BCMSDIOH_STD + sd_txglom = 0; +#endif /* BCMSDIOH_STD */ + bus->txglom_enable = FALSE; + } + } else +#endif /* BCMSDIOH_TXGLOM */ + bus->txglom_enable = FALSE; +} + +int +dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + dhd_timeout_t tmo; + uint retries = 0; + uint8 ready, enable; + int err, ret = 0; + uint8 saveclk; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + /* Make sure backplane clock is on, needed to generate F2 interrupt */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (bus->clkstate != CLK_AVAIL) { + DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate)); + ret = -1; + goto exit; + } + + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); + ret = -1; + goto exit; + } + + /* Enable function 2 (frame transfers) */ + W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT), + &bus->regs->tosbmailboxdata, retries); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + + /* Give the dongle some time to do its thing and set IOR2 */ + dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000); + + ready = 0; + while (ready != enable && !dhd_timeout_expired(&tmo)) + ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL); + + DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n", + __FUNCTION__, enable, ready, tmo.elapsed)); + + + /* If F2 successfully enabled, set core and enable interrupts */ + if (ready == enable) { + /* Make sure we're talking to the core. */ + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0))) + bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0); + ASSERT(bus->regs != NULL); + + /* Set up the interrupt mask and enable interrupts */ + bus->hostintmask = HOSTINTMASK; + /* corerev 4 could use the newer interrupt logic to detect the frames */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) && + (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) { + bus->hostintmask &= ~I_HMB_FRAME_IND; + bus->hostintmask |= I_XMTDATA_AVAIL; + } + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + + if (bus->sih->buscorerev < 15) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, + (uint8)watermark, &err); + } + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + + /* Need to set fn2 block size to match fn1 block size. + * Requests to fn2 go thru fn1. * + * faltwig has this code contitioned with #if !BCMSPI_ANDROID. + * It would be cleaner to use the ->sdh->block_sz[fno] instead of + * 64, but this layer has no access to sdh types. + */ + + /* bcmsdh_intr_unmask(bus->sdh); */ + + bus->intdis = FALSE; + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + + } + + + else { + /* Disable F2 again */ + enable = SDIO_FUNC_ENABLE_1; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + } + + if (dhdsdio_sr_cap(bus)) { + dhdsdio_sr_init(bus); + /* Masking the chip active interrupt permanantly */ + bus->hostintmask &= ~I_CHIPACTIVE; + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n", + __FUNCTION__, bus->hostintmask)); + } + else + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); + + /* If we didn't come up, turn off backplane clock */ + if (dhdp->busstate != DHD_BUS_DATA) + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + +exit: + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); + + return ret; +} + +static void +dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + uint16 lastrbc; + uint8 hi, lo; + int err; + + DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__, + (abort ? "abort command, " : ""), (rtx ? ", send NAK" : ""))); + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return; + } + + if (abort) { + bcmsdh_abort(sdh, SDIO_FUNC_2); + } + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err); + if (err) { + DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__)); + goto fail; + } + bus->f1regdata++; + + /* Wait until the packet has been flushed (device/FIFO stable) */ + for (lastrbc = retries = 0xffff; retries > 0; retries--) { + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err); + if (err) { + DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__)); + goto fail; + } + + bus->f1regdata += 2; + + if ((hi == 0) && (lo == 0)) + break; + + if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { + DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n", + __FUNCTION__, lastrbc, ((hi << 8) + lo))); + } + lastrbc = (hi << 8) + lo; + } + + if (!retries) { + DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc)); + } else { + DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries))); + } + + if (rtx) { + bus->rxrtx++; + W_SDREG(SMB_NAK, ®s->tosbmailbox, retries); + bus->f1regdata++; + if (retries <= retry_limit) { + bus->rxskip = TRUE; + } + } + + /* Clear partial in any case */ + bus->nextlen = 0; + +fail: + /* If we can't reach the device, signal failure */ + if (err || bcmsdh_regfail(sdh)) + bus->dhd->busstate = DHD_BUS_DOWN; +} + +static void +dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff) +{ + bcmsdh_info_t *sdh = bus->sdh; + uint rdlen, pad; + + int sdret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Control data already received in aligned rxctl */ + if ((bus->bus == SPI_BUS) && (!bus->usebufpool)) + goto gotpkt; + + ASSERT(bus->rxbuf); + /* Set rxctl for frame (w/optional alignment) */ + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + + /* Copy the already-read portion over */ + bcopy(hdr, bus->rxctl, firstread); + if (len <= firstread) + goto gotpkt; + + /* Copy the full data pkt in gSPI case and process ioctl. */ + if (bus->bus == SPI_BUS) { + bcopy(hdr, bus->rxctl, len); + goto gotpkt; + } + + /* Raise rdlen to next SDIO block to avoid tail command */ + rdlen = len - firstread; + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((len + pad) < bus->dhd->maxctl)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + /* Drop if the read is too big or it exceeds our maximum */ + if ((rdlen + firstread) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n", + __FUNCTION__, rdlen, bus->dhd->maxctl)); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + if ((len - doff) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", + __FUNCTION__, len, (len - doff), bus->dhd->maxctl)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + + /* Read remainder of frame body into the rxctl buffer */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (bus->rxctl + firstread), rdlen, NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret)); + bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */ + dhdsdio_rxfail(bus, TRUE, TRUE); + goto done; + } + +gotpkt: + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("RxCtrl", bus->rxctl, len); + } +#endif + + /* Point to valid data and indicate its length */ + bus->rxctl += doff; + bus->rxlen = len - doff; + +done: + /* Awake any waiters */ + dhd_os_ioctl_resp_wake(bus->dhd); +} +int +dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, + void **pkt, uint32 *pkt_count); + +static uint8 +dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) +{ + uint16 dlen, totlen; + uint8 *dptr, num = 0; + + uint16 sublen, check; + void *pfirst, *plast, *pnext; + void * list_tail[DHD_MAX_IFS] = { NULL }; + void * list_head[DHD_MAX_IFS] = { NULL }; + uint8 idx; + osl_t *osh = bus->dhd->osh; + + int errcode; + uint8 chan, seq, doff, sfdoff; + uint8 txmax; + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + + int ifidx = 0; + bool usechain = bus->use_rxchain; + + /* If packets, issue read(s) and send up packet chain */ + /* Return sequence numbers consumed? */ + + DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom)); + + /* If there's a descriptor, generate the packet chain */ + if (bus->glomd) { + dhd_os_sdlock_rxq(bus->dhd); + + pfirst = plast = pnext = NULL; + dlen = (uint16)PKTLEN(osh, bus->glomd); + dptr = PKTDATA(osh, bus->glomd); + if (!dlen || (dlen & 1)) { + DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n", + __FUNCTION__, dlen)); + dlen = 0; + } + + for (totlen = num = 0; dlen; num++) { + /* Get (and move past) next length */ + sublen = ltoh16_ua(dptr); + dlen -= sizeof(uint16); + dptr += sizeof(uint16); + if ((sublen < SDPCM_HDRLEN) || + ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) { + DHD_ERROR(("%s: descriptor len %d bad: %d\n", + __FUNCTION__, num, sublen)); + pnext = NULL; + break; + } + if (sublen % DHD_SDALIGN) { + DHD_ERROR(("%s: sublen %d not a multiple of %d\n", + __FUNCTION__, sublen, DHD_SDALIGN)); + usechain = FALSE; + } + totlen += sublen; + + /* For last frame, adjust read len so total is a block multiple */ + if (!dlen) { + sublen += (ROUNDUP(totlen, bus->blocksize) - totlen); + totlen = ROUNDUP(totlen, bus->blocksize); + } + + /* Allocate/chain packet for next subframe */ + if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) { + DHD_ERROR(("%s: PKTGET failed, num %d len %d\n", + __FUNCTION__, num, sublen)); + break; + } + ASSERT(!PKTLINK(pnext)); + if (!pfirst) { + ASSERT(!plast); + pfirst = plast = pnext; + } else { + ASSERT(plast); + PKTSETNEXT(osh, plast, pnext); + plast = pnext; + } + + /* Adhere to start alignment requirements */ + PKTALIGN(osh, pnext, sublen, DHD_SDALIGN); + } + + /* If all allocations succeeded, save packet chain in bus structure */ + if (pnext) { + DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n", + __FUNCTION__, totlen, num)); + if (DHD_GLOM_ON() && bus->nextlen) { + if (totlen != bus->nextlen) { + DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d " + "rxseq %d\n", __FUNCTION__, bus->nextlen, + totlen, rxseq)); + } + } + bus->glom = pfirst; + pfirst = pnext = NULL; + } else { + if (pfirst) + PKTFREE(osh, pfirst, FALSE); + bus->glom = NULL; + num = 0; + } + + /* Done with descriptor packet */ + PKTFREE(osh, bus->glomd, FALSE); + bus->glomd = NULL; + bus->nextlen = 0; + + dhd_os_sdunlock_rxq(bus->dhd); + } + + /* Ok -- either we just generated a packet chain, or had one from before */ + if (bus->glom) { + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__)); + for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) { + DHD_GLOM((" %p: %p len 0x%04x (%d)\n", + pnext, (uint8*)PKTDATA(osh, pnext), + PKTLEN(osh, pnext), PKTLEN(osh, pnext))); + } + } + + pfirst = bus->glom; + dlen = (uint16)pkttotlen(osh, pfirst); + + /* Do an SDIO read for the superframe. Configurable iovar to + * read directly into the chained packet, or allocate a large + * packet and and copy into the chain. + */ + if (usechain) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, (uint8*)PKTDATA(osh, pfirst), + dlen, pfirst, NULL, NULL); + } else if (bus->dataptr) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, bus->dataptr, + dlen, NULL, NULL, NULL); + sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr); + if (sublen != dlen) { + DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n", + __FUNCTION__, dlen, sublen)); + errcode = -1; + } + pnext = NULL; + } else { + DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen)); + errcode = -1; + } + bus->f2rxdata++; + ASSERT(errcode != BCME_PENDING); + + /* On failure, kill the superframe, allow a couple retries */ + if (errcode < 0) { + DHD_ERROR(("%s: glom read of %d bytes failed: %d\n", + __FUNCTION__, dlen, errcode)); + bus->dhd->rx_errors++; + + if (bus->glomerr++ < 3) { + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + return 0; + } + +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("SUPERFRAME", PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 48)); + } +#endif + + + /* Validate the superframe header */ + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + errcode = 0; + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, sublen, check)); + errcode = -1; + } else if (ROUNDUP(sublen, bus->blocksize) != dlen) { + DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n", + __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen)); + errcode = -1; + } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) { + DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__, + SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]))); + errcode = -1; + } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) { + DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || + (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) { + DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n", + __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), + SDPCM_HDRLEN)); + errcode = -1; + } + + /* Check sequence number of superframe SW header */ + if (rxseq != seq) { + DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Remove superframe header, remember offset */ + PKTPULL(osh, pfirst, doff); + sfdoff = doff; + + /* Validate all the subframe headers */ + for (num = 0, pnext = pfirst; pnext && !errcode; + num++, pnext = PKTNEXT(osh, pnext)) { + dptr = (uint8 *)PKTDATA(osh, pnext); + dlen = (uint16)PKTLEN(osh, pnext); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("subframe", dptr, 32); + } +#endif + + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (subframe %d): HW hdr error: " + "len/check 0x%04x/0x%04x\n", + __FUNCTION__, num, sublen, check)); + errcode = -1; + } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) { + DHD_ERROR(("%s (subframe %d): length mismatch: " + "len 0x%04x, expect 0x%04x\n", + __FUNCTION__, num, sublen, dlen)); + errcode = -1; + } else if ((chan != SDPCM_DATA_CHANNEL) && + (chan != SDPCM_EVENT_CHANNEL)) { + DHD_ERROR(("%s (subframe %d): bad channel %d\n", + __FUNCTION__, num, chan)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) { + DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n", + __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN)); + errcode = -1; + } + } + + if (errcode) { + /* Terminate frame on error, request a couple retries */ + if (bus->glomerr++ < 3) { + /* Restore superframe header space */ + PKTPUSH(osh, pfirst, sfdoff); + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + bus->nextlen = 0; + return 0; + } + + /* Basic SD framing looks ok - process each packet (header) */ + bus->glom = NULL; + plast = NULL; + + dhd_os_sdlock_rxq(bus->dhd); + for (num = 0; pfirst; rxseq++, pfirst = pnext) { + pnext = PKTNEXT(osh, pfirst); + PKTSETNEXT(osh, pfirst, NULL); + + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n", + __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst), + PKTLEN(osh, pfirst), sublen, chan, seq)); + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL)); + + if (rxseq != seq) { + DHD_GLOM(("%s: rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Subframe Data", dptr, dlen); + } +#endif + + PKTSETLEN(osh, pfirst, sublen); + PKTPULL(osh, pfirst, doff); + + reorder_info_len = sizeof(reorder_info_buf); + + if (PKTLEN(osh, pfirst) == 0) { + PKTFREE(bus->dhd->osh, pfirst, FALSE); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf, + &reorder_info_len) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + bus->dhd->rx_errors++; + PKTFREE(osh, pfirst, FALSE); + continue; + } + if (reorder_info_len) { + uint32 free_buf_count; + void *ppfirst; + + ppfirst = pfirst; + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, + reorder_info_len, &ppfirst, &free_buf_count); + + if (free_buf_count == 0) { + continue; + } + else { + void *temp; + + /* go to the end of the chain and attach the pnext there */ + temp = ppfirst; + while (PKTNEXT(osh, temp) != NULL) { + temp = PKTNEXT(osh, temp); + } + pfirst = temp; + if (list_tail[ifidx] == NULL) + list_head[ifidx] = ppfirst; + else + PKTSETNEXT(osh, list_tail[ifidx], ppfirst); + list_tail[ifidx] = pfirst; + } + + num += (uint8)free_buf_count; + } + else { + /* this packet will go up, link back into chain and count it */ + + if (list_tail[ifidx] == NULL) { + list_head[ifidx] = list_tail[ifidx] = pfirst; + } + else { + PKTSETNEXT(osh, list_tail[ifidx], pfirst); + list_tail[ifidx] = pfirst; + } + num++; + } +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n", + __FUNCTION__, num, pfirst, + PKTDATA(osh, pfirst), PKTLEN(osh, pfirst), + PKTNEXT(osh, pfirst), PKTLINK(pfirst))); + prhex("", (uint8 *)PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 32)); + } +#endif /* DHD_DEBUG */ + } + dhd_os_sdunlock_rxq(bus->dhd); + + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + if (list_head[idx]) { + void *temp; + uint8 cnt = 0; + temp = list_head[idx]; + do { + temp = PKTNEXT(osh, temp); + cnt++; + } while (temp); + if (cnt) { + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0); + dhd_os_sdlock(bus->dhd); + } + } + } + bus->rxglomframes++; + bus->rxglompkts += num; + } + return num; +} + + +/* Return TRUE if there may be more frames to read */ +static uint +dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) +{ + osl_t *osh = bus->dhd->osh; + bcmsdh_info_t *sdh = bus->sdh; + + uint16 len, check; /* Extracted hardware header fields */ + uint8 chan, seq, doff; /* Extracted software header fields */ + uint8 fcbits; /* Extracted fcbits from software header */ + uint8 delta; + + void *pkt; /* Packet for event or data frames */ + uint16 pad; /* Number of pad bytes to read */ + uint16 rdlen; /* Total number of bytes to read */ + uint8 rxseq; /* Next sequence number to expect */ + uint rxleft = 0; /* Remaining number of frames allowed */ + int sdret; /* Return code from bcmsdh calls */ + uint8 txmax; /* Maximum tx sequence offered */ + bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */ + uint8 *rxbuf; + int ifidx = 0; + uint rxcount = 0; /* Total frames read */ + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + uint pkt_count; + +#if defined(DHD_DEBUG) || defined(SDTEST) + bool sdtest = FALSE; /* To limit message spew from test mode */ +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bus->readframes = TRUE; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: KSO off\n", __FUNCTION__)); + bus->readframes = FALSE; + return 0; + } + + ASSERT(maxframes); + +#ifdef SDTEST + /* Allow pktgen to override maxframes */ + if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) { + maxframes = bus->pktgen_count; + sdtest = TRUE; + } +#endif + + /* Not finished unless we encounter no more frames indication */ + *finished = FALSE; + + + for (rxseq = bus->rx_seq, rxleft = maxframes; + !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN; + rxseq++, rxleft--) { +#ifdef DHDTCPACK_SUP_DBG + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) { + if (bus->dotxinrx == FALSE) + DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n", + __FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode)); + } +#ifdef DEBUG_COUNTER + else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) { + tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++; + } +#endif /* DEBUG_COUNTER */ +#endif /* DHDTCPACK_SUP_DBG */ + /* tx more to improve rx performance */ + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) { + dhdsdio_sendpendctl(bus); + } else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) && + !bus->fcstate && DATAOK(bus) && + (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) { + dhdsdio_sendfromq(bus, dhd_txbound); +#ifdef DHDTCPACK_SUPPRESS + /* In TCPACK_SUP_DELAYTX mode, do txinrx only if + * 1. Any DATA packet to TX + * 2. TCPACK to TCPDATA PSH packets. + * in bus txq. + */ + bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ? + FALSE : TRUE; +#endif + } + + /* Handle glomming separately */ + if (bus->glom || bus->glomd) { + uint8 cnt; + DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n", + __FUNCTION__, bus->glomd, bus->glom)); + cnt = dhdsdio_rxglom(bus, rxseq); + DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt)); + rxseq += cnt - 1; + rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; + continue; + } + + /* Try doing single read if we can */ + if (dhd_readahead && bus->nextlen) { + uint16 nextlen = bus->nextlen; + bus->nextlen = 0; + + if (bus->bus == SPI_BUS) { + rdlen = len = nextlen; + } + else { + rdlen = len = nextlen << 4; + + /* Pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + } + + /* We use bus->rxctl buffer in WinXP for initial control pkt receives. + * Later we use buffer-poll for data as well as control packets. + * This is required because dhd receives full frame in gSPI unlike SDIO. + * After the frame is received we have to distinguish whether it is data + * or non-data frame. + */ + /* Allocate a packet buffer */ + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) { + if (bus->bus == SPI_BUS) { + bus->usebufpool = FALSE; + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + rxbuf = bus->rxctl; + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + /* dhd.rx_ctlerrs is higher level */ + bus->rxc_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } else { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d " + "expected rxseq %d\n", + __FUNCTION__, len, rdlen, rxseq)); + /* Just go try again w/normal header read */ + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } else { + if (bus->bus == SPI_BUS) + bus->usebufpool = TRUE; + + ASSERT(!PKTLINK(pkt)); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + rxbuf = (uint8 *)PKTDATA(osh, pkt); + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + PKTFREE(bus->dhd->osh, pkt, FALSE); + bus->dhd->rx_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + /* Force retry w/normal header read. Don't attempt NAK for + * gSPI + */ + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } + dhd_os_sdunlock_rxq(bus->dhd); + + /* Now check the header */ + bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN); + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means readahead info was bad */ + if (!(len|check)) { + DHD_INFO(("%s (nextlen): read zeros in HW header???\n", + __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check" + " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen, + len, check)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n", + __FUNCTION__, len)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Check for consistency with readahead info */ + len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4)); + if (len_consistent) { + /* Mismatch, force retry w/normal header (may be >4K) */ + DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; " + "expected rxseq %d\n", + __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE); + GSPI_PR55150_BAILOUT; + continue; + } + + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + bus->nextlen = + bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large" + " (%d), seq %d\n", __FUNCTION__, bus->nextlen, + seq)); + bus->nextlen = 0; + } + + bus->dhd->rx_readahead_cnt ++; + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", rxbuf, len); + } else if (DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + if (chan == SDPCM_CONTROL_CHANNEL) { + if (bus->bus == SPI_BUS) { + dhdsdio_read_control(bus, rxbuf, len, doff); + if (bus->usebufpool) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + } + continue; + } else { + DHD_ERROR(("%s (nextlen): readahead on control" + " packet %d?\n", __FUNCTION__, seq)); + /* Force retry w/normal header read */ + bus->nextlen = 0; + dhdsdio_rxfail(bus, FALSE, TRUE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } + + if ((bus->bus == SPI_BUS) && !bus->usebufpool) { + DHD_ERROR(("Received %d bytes on %d channel. Running out of " + "rx pktbuf's or not yet malloced.\n", len, chan)); + continue; + } + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* All done with this one -- now deliver the packet */ + goto deliver; + } + /* gSPI frames should not be handled in fractions */ + if (bus->bus == SPI_BUS) { + break; + } + + /* Read frame header (hardware and software) */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + bus->rxhdr, firstread, NULL, NULL, NULL); + bus->f2rxhdrs++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret)); + bus->rx_hdrfail++; + dhdsdio_rxfail(bus, TRUE, TRUE); + continue; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() || DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means no more frames */ + if (!(len|check)) { + *finished = TRUE; + break; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, len, check)); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len)); + continue; + } + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN, seq)); + bus->rx_badhdr++; + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Save the readahead length if there is one */ + bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Call a separate function for control frames */ + if (chan == SDPCM_CONTROL_CHANNEL) { + dhdsdio_read_control(bus, bus->rxhdr, len, doff); + continue; + } + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) || + (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL)); + + /* Length to read */ + rdlen = (len > firstread) ? (len - firstread) : 0; + + /* May pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + if ((rdlen + firstread) > MAX_RX_DATASZ) { + /* Too long -- skip this frame */ + DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n", + __FUNCTION__, rdlen, chan)); + bus->dhd->rx_dropped++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan)); + continue; + } + dhd_os_sdunlock_rxq(bus->dhd); + + ASSERT(!PKTLINK(pkt)); + + /* Leave room for what we already read, and align remainder */ + ASSERT(firstread < (PKTLEN(osh, pkt))); + PKTPULL(osh, pkt, firstread); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + + /* Read the remaining frame data */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen, + ((chan == SDPCM_EVENT_CHANNEL) ? "event" : + ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan)); + continue; + } + + /* Copy the already-read portion */ + PKTPUSH(osh, pkt, firstread); + bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread); + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", PKTDATA(osh, pkt), len); + } +#endif + +deliver: + /* Save superframe descriptor and allocate packet frame */ + if (chan == SDPCM_GLOM_CHANNEL) { + if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { + DHD_GLOM(("%s: got glom descriptor, %d bytes:\n", + __FUNCTION__, len)); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("Glom Data", PKTDATA(osh, pkt), len); + } +#endif + PKTSETLEN(osh, pkt, len); + ASSERT(doff == SDPCM_HDRLEN); + PKTPULL(osh, pkt, SDPCM_HDRLEN); + bus->glomd = pkt; + } else { + DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__)); + dhdsdio_rxfail(bus, FALSE, FALSE); + } + continue; + } + + /* Fill in packet len and prio, deliver upward */ + PKTSETLEN(osh, pkt, len); + PKTPULL(osh, pkt, doff); + +#ifdef SDTEST + /* Test channel packets are processed separately */ + if (chan == SDPCM_TEST_CHANNEL) { + dhdsdio_testrcv(bus, pkt, seq); + continue; + } +#endif /* SDTEST */ + + if (PKTLEN(osh, pkt) == 0) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf, + &reorder_info_len) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + continue; + } + if (reorder_info_len) { + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len, + &pkt, &pkt_count); + if (pkt_count == 0) + continue; + } + else + pkt_count = 1; + + /* Unlock during rx call */ + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan); + dhd_os_sdlock(bus->dhd); + } + rxcount = maxframes - rxleft; +#ifdef DHD_DEBUG + /* Message if we hit the limit */ + if (!rxleft && !sdtest) + DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes)); + else +#endif /* DHD_DEBUG */ + DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount)); + /* Back off rxseq if awaiting rtx, update rx_seq */ + if (bus->rxskip) + rxseq--; + bus->rx_seq = rxseq; + + if (bus->reqbussleep) + { + dhdsdio_bussleep(bus, TRUE); + bus->reqbussleep = FALSE; + } + bus->readframes = FALSE; + + return rxcount; +} + +static uint32 +dhdsdio_hostmail(dhd_bus_t *bus) +{ + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus = 0; + uint32 hmb_data; + uint8 fcbits; + uint retries = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Read mailbox data and ack that we did so */ + R_SDREG(hmb_data, ®s->tohostmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_INT_ACK, ®s->tosbmailbox, retries); + bus->f1regdata += 2; + + /* Dongle recomposed rx frames, accept them again */ + if (hmb_data & HMB_DATA_NAKHANDLED) { + DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq)); + if (!bus->rxskip) { + DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__)); + } + bus->rxskip = FALSE; + intstatus |= FRAME_AVAIL_MASK(bus); + } + + /* + * DEVREADY does not occur with gSPI. + */ + if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) { + bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT; + if (bus->sdpcm_ver != SDPCM_PROT_VERSION) + DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n", + bus->sdpcm_ver, SDPCM_PROT_VERSION)); + else + DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver)); + /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) { + uint32 val; + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(bus->dhd->osh, &bus->regs->corecontrol, val); + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + } + +#ifdef DHD_DEBUG + /* Retrieve console state address now that firmware should have updated it */ + { + sdpcm_shared_t shared; + if (dhdsdio_readshared(bus, &shared) == 0) + bus->console_addr = shared.console_addr; + } +#endif /* DHD_DEBUG */ + } + + /* + * Flow Control has been moved into the RX headers and this out of band + * method isn't used any more. Leave this here for possibly remaining backward + * compatible with older dongles + */ + if (hmb_data & HMB_DATA_FC) { + fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT; + + if (fcbits & ~bus->flowcontrol) + bus->fc_xoff++; + if (bus->flowcontrol & ~fcbits) + bus->fc_xon++; + + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* At least print a message if FW halted */ + if (hmb_data & HMB_DATA_FWHALT) { + DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n")); + dhdsdio_checkdied(bus, NULL, 0); + bus->dhd->busstate = DHD_BUS_DOWN; + } + + /* Shouldn't be any others */ + if (hmb_data & ~(HMB_DATA_DEVREADY | + HMB_DATA_FWHALT | + HMB_DATA_NAKHANDLED | + HMB_DATA_FC | + HMB_DATA_FWREADY | + HMB_DATA_FCDATA_MASK | + HMB_DATA_VERSION_MASK)) { + DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data)); + } + + return intstatus; +} + +static bool +dhdsdio_dpc(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus, newstatus = 0; + uint retries = 0; + uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */ + uint txlimit = dhd_txbound; /* Tx frames to send before resched */ + uint framecnt = 0; /* Temporary counter of tx/rx frames */ + bool rxdone = TRUE; /* Flag for no more read data */ + bool resched = FALSE; /* Flag indicating resched wanted */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool is_resched_by_readframe = FALSE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_sdlock(bus->dhd); + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + dhd_os_sdunlock(bus->dhd); + return 0; + } + + /* Start with leftover status bits */ + intstatus = bus->intstatus; + + if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + goto exit; + } + + /* If waiting for HTAVAIL, check status */ + if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) { + int err; + uint8 clkctl, devctl = 0; + +#ifdef DHD_DEBUG + /* Check for inconsistent device control */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } else { + ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY); + } +#endif /* DHD_DEBUG */ + + /* Read CSR, if clock on switch to AVAIL, else ignore */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + + DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl)); + + if (SBSDIO_HTAV(clkctl)) { + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + if (err) { + DHD_ERROR(("%s: error writing DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + bus->clkstate = CLK_AVAIL; + } else { + goto clkwait; + } + } + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + if (bus->clkstate != CLK_AVAIL) + goto clkwait; + + /* Pending interrupt indicates new device status */ + if (bus->ipend) { + bus->ipend = FALSE; + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata++; + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + newstatus &= bus->hostintmask; + bus->fcstate = !!(newstatus & I_HMB_FC_STATE); + if (newstatus) { + bus->f1regdata++; + if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) && + (newstatus == I_XMTDATA_AVAIL)) { + } + else + W_SDREG(newstatus, ®s->intstatus, retries); + } + } + + /* Merge new bits with previous */ + intstatus |= newstatus; + bus->intstatus = 0; + + /* Handle flow-control change: read new state in case our ack + * crossed another change interrupt. If change still set, assume + * FC ON for safety, let next loop through do the debounce. + */ + if (intstatus & I_HMB_FC_CHANGE) { + intstatus &= ~I_HMB_FC_CHANGE; + W_SDREG(I_HMB_FC_CHANGE, ®s->intstatus, retries); + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata += 2; + bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); + intstatus |= (newstatus & bus->hostintmask); + } + + /* Just being here means nothing more to do for chipactive */ + if (intstatus & I_CHIPACTIVE) { + /* ASSERT(bus->clkstate == CLK_AVAIL); */ + intstatus &= ~I_CHIPACTIVE; + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus); + } + + /* Generally don't ask for these, can get CRC errors... */ + if (intstatus & I_WR_OOSYNC) { + DHD_ERROR(("Dongle reports WR_OOSYNC\n")); + intstatus &= ~I_WR_OOSYNC; + } + + if (intstatus & I_RD_OOSYNC) { + DHD_ERROR(("Dongle reports RD_OOSYNC\n")); + intstatus &= ~I_RD_OOSYNC; + } + + if (intstatus & I_SBINT) { + DHD_ERROR(("Dongle reports SBINT\n")); + intstatus &= ~I_SBINT; + } + + /* Would be active due to wake-wlan in gSPI */ + if (intstatus & I_CHIPACTIVE) { + DHD_INFO(("Dongle reports CHIPACTIVE\n")); + intstatus &= ~I_CHIPACTIVE; + } + + if (intstatus & I_HMB_FC_STATE) { + DHD_INFO(("Dongle reports HMB_FC_STATE\n")); + intstatus &= ~I_HMB_FC_STATE; + } + + /* Ignore frame indications if rxskip is set */ + if (bus->rxskip) { + intstatus &= ~FRAME_AVAIL_MASK(bus); + } + + /* On frame indication, read available frames */ + if (PKT_AVAILABLE(bus, intstatus)) { + framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone); + if (rxdone || bus->rxskip) + intstatus &= ~FRAME_AVAIL_MASK(bus); + rxlimit -= MIN(framecnt, rxlimit); + } + + /* Keep still-pending events for next scheduling */ + bus->intstatus = intstatus; + +clkwait: + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + bcmsdh_intr_enable(sdh); + } + +#if defined(OOB_INTR_ONLY) && !defined(HW_OOB) + /* In case of SW-OOB(using edge trigger), + * Check interrupt status in the dongle again after enable irq on the host. + * and rechedule dpc if interrupt is pended in the dongle. + * There is a chance to miss OOB interrupt while irq is disabled on the host. + * No need to do this with HW-OOB(level trigger) + */ + R_SDREG(newstatus, ®s->intstatus, retries); + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + if (newstatus & bus->hostintmask) { + bus->ipend = TRUE; + resched = TRUE; + } +#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */ + +#ifdef PROP_TXSTATUS + dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE); +#endif + + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) + dhdsdio_sendpendctl(bus); + + /* Send queued frames (limit 1 if rx may still be pending) */ + else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && + pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) { + framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax); + framecnt = dhdsdio_sendfromq(bus, framecnt); + txlimit -= framecnt; + } + /* Resched the DPC if ctrl cmd is pending on bus credit */ + if (bus->ctrl_frame_stat) + resched = TRUE; + + /* Resched if events or tx frames are pending, else await next interrupt */ + /* On failed register access, all bets are off: no resched or interrupts */ + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) { + if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) & + SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + /* Bus failed because of KSO */ + DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__)); + bus->kso = FALSE; + } else { + DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n", + __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->intstatus = 0; + } + } else if (bus->clkstate == CLK_PENDING) { + /* Awaiting I_CHIPACTIVE; don't resched */ + } else if (bus->intstatus || bus->ipend || + (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) || + PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */ + resched = TRUE; + } + + bus->dpc_sched = resched; + + /* If we're done for now, turn off clock request. */ + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + +exit: + + if (!resched && dhd_dpcpoll) { + if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) { + resched = TRUE; +#ifdef DEBUG_DPC_THREAD_WATCHDOG + is_resched_by_readframe = TRUE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + } + + dhd_os_sdunlock(bus->dhd); +#ifdef DEBUG_DPC_THREAD_WATCHDOG + if (bus->dhd->dhd_bug_on) { + DHD_INFO(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x" + " ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n", + __FUNCTION__, resched, bus->ctrl_frame_stat, + bus->intstatus, bus->ipend, + pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe)); + + bus->dhd->dhd_bug_on = FALSE; + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + return resched; +} + +bool +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched; + + /* Call the DPC directly. */ + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + resched = dhdsdio_dpc(bus); + + return resched; +} + +void +dhdsdio_isr(void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus) { + DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__)); + return; + } + sdh = bus->sdh; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Count the interrupt call */ + bus->intrcount++; + bus->ipend = TRUE; + + /* Shouldn't get this interrupt if we're sleeping? */ + if (!SLPAUTO_ENAB(bus)) { + if (bus->sleeping) { + DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n")); + return; + } else if (!KSO_ENAB(bus)) { + DHD_ERROR(("ISR in devsleep 1\n")); + } + } + + /* Disable additional interrupts (is this needed now)? */ + if (bus->intr) { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + } else { + DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n")); + } + + bcmsdh_intr_disable(sdh); + bus->intdis = TRUE; + +#if defined(SDIO_ISR_THREAD) + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + dhdsdio_dpc(bus); + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + +#endif /* defined(SDIO_ISR_THREAD) */ + +} + +#ifdef SDTEST +static void +dhdsdio_pktgen_init(dhd_bus_t *bus) +{ + /* Default to specified length, or full range */ + if (dhd_pktgen_len) { + bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN); + bus->pktgen_minlen = bus->pktgen_maxlen; + } else { + bus->pktgen_maxlen = MAX_PKTGEN_LEN; + bus->pktgen_minlen = 0; + } + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Default to per-watchdog burst with 10s print time */ + bus->pktgen_freq = 1; + bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0; + bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000; + + /* Default to echo mode */ + bus->pktgen_mode = DHD_PKTGEN_ECHO; + bus->pktgen_stop = 1; +} + +static void +dhdsdio_pktgen(dhd_bus_t *bus) +{ + void *pkt; + uint8 *data; + uint pktcount; + uint fillbyte; + osl_t *osh = bus->dhd->osh; + uint16 len; + ulong time_lapse; + uint sent_pkts; + uint rcvd_pkts; + + /* Display current count if appropriate */ + if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) { + bus->pktgen_ptick = 0; + printf("%s: send attempts %d, rcvd %d, errors %d\n", + __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + + /* Print throughput stats only for constant length packet runs */ + if (bus->pktgen_minlen == bus->pktgen_maxlen) { + time_lapse = jiffies - bus->pktgen_prev_time; + bus->pktgen_prev_time = jiffies; + sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent; + bus->pktgen_prev_sent = bus->pktgen_sent; + rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd; + bus->pktgen_prev_rcvd = bus->pktgen_rcvd; + + printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n", + __FUNCTION__, + (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8, + (rcvd_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8); + } + } + + /* For recv mode, just make sure dongle has started sending */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) { + bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING; + dhdsdio_sdtest_set(bus, bus->pktgen_total); + } + return; + } + + /* Otherwise, generate or request the specified number of packets */ + for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) { + /* Stop if total has been reached */ + if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) { + bus->pktgen_count = 0; + break; + } + + /* Allocate an appropriate-sized packet */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) { + len = SDPCM_TEST_PKT_CNT_FLD_LEN; + } else { + len = bus->pktgen_len; + } + if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN), + TRUE))) {; + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + break; + } + PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Write test header cmd and extra based on mode */ + switch (bus->pktgen_mode) { + case DHD_PKTGEN_ECHO: + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_SEND: + *data++ = SDPCM_TEST_DISCARD; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_RXBURST: + *data++ = SDPCM_TEST_BURST; + *data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */ + break; + + default: + DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode)); + PKTFREE(osh, pkt, TRUE); + bus->pktgen_count = 0; + return; + } + + /* Write test header length field */ + *data++ = (bus->pktgen_len >> 0); + *data++ = (bus->pktgen_len >> 8); + + /* Write frame count in a 4 byte field adjucent to SDPCM test header for + * burst mode + */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) { + *data++ = (uint8)(bus->pktgen_count >> 0); + *data++ = (uint8)(bus->pktgen_count >> 8); + *data++ = (uint8)(bus->pktgen_count >> 16); + *data++ = (uint8)(bus->pktgen_count >> 24); + } else { + + /* Then fill in the remainder -- N/A for burst */ + for (fillbyte = 0; fillbyte < len; fillbyte++) + *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent); + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN); + } +#endif + + /* Send it */ + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) { + bus->pktgen_fail++; + if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail) + bus->pktgen_count = 0; + } + bus->pktgen_sent++; + + /* Bump length if not fixed, wrap at max */ + if (++bus->pktgen_len > bus->pktgen_maxlen) + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Special case for burst mode: just send one request! */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) + break; + } +} + +static void +dhdsdio_sdtest_set(dhd_bus_t *bus, uint count) +{ + void *pkt; + uint8 *data; + osl_t *osh = bus->dhd->osh; + + /* Allocate the packet */ + if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + + SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) { + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + return; + } + PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + + SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Fill in the test header */ + *data++ = SDPCM_TEST_SEND; + *data++ = (count > 0)?TRUE:FALSE; + *data++ = (bus->pktgen_maxlen >> 0); + *data++ = (bus->pktgen_maxlen >> 8); + *data++ = (uint8)(count >> 0); + *data++ = (uint8)(count >> 8); + *data++ = (uint8)(count >> 16); + *data++ = (uint8)(count >> 24); + + /* Send it */ + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) + bus->pktgen_fail++; +} + + +static void +dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq) +{ + osl_t *osh = bus->dhd->osh; + uint8 *data; + uint pktlen; + + uint8 cmd; + uint8 extra; + uint16 len; + uint16 offset; + + /* Check for min length */ + if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen)); + PKTFREE(osh, pkt, FALSE); + return; + } + + /* Extract header fields */ + data = PKTDATA(osh, pkt); + cmd = *data++; + extra = *data++; + len = *data++; len += *data++ << 8; + DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len)); + /* Check length for relevant commands */ + if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) { + if (pktlen != len + SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + return; + } + } + + /* Process as per command */ + switch (cmd) { + case SDPCM_TEST_ECHOREQ: + /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */ + *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP; + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) { + bus->pktgen_sent++; + } else { + bus->pktgen_fail++; + PKTFREE(osh, pkt, FALSE); + } + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_ECHORSP: + if (bus->ext_loop) { + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + } + + for (offset = 0; offset < len; offset++, data++) { + if (*data != SDPCM_TEST_FILL(offset, extra)) { + DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: " + "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n", + offset, len, SDPCM_TEST_FILL(offset, extra), *data)); + break; + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_DISCARD: + { + int i = 0; + uint8 *prn = data; + uint8 testval = extra; + for (i = 0; i < len; i++) { + if (*prn != testval) { + DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n", + i, bus->pktgen_rcvd_rcvsession, testval, *prn)); + prn++; testval++; + } + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_BURST: + case SDPCM_TEST_SEND: + default: + DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + break; + } + + /* For recv mode, stop at limit (and tell dongle to stop sending) */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) { + bus->pktgen_rcvd_rcvsession++; + + if (bus->pktgen_total && + (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) { + bus->pktgen_count = 0; + DHD_ERROR(("Pktgen:rcv test complete!\n")); + bus->pktgen_rcv_state = PKTGEN_RCV_IDLE; + dhdsdio_sdtest_set(bus, FALSE); + bus->pktgen_rcvd_rcvsession = 0; + } + } + } +} +#endif /* SDTEST */ + +int dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +{ + int err = 0; + +#if defined(OOB_INTR_ONLY) + err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus); +#endif + return err; +} + +void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +{ +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_unregister(dhdp->bus->sdh); +#endif +} + +void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +{ +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(dhdp->bus->sdh, enable); +#endif +} + +void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub) +{ + bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh); +} + +void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub) +{ + bcmsdh_dev_relax(dhdpub->bus->sdh); +} + +bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub) +{ + bool enabled = FALSE; + + enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh); + return enabled; +} + +extern bool +dhd_bus_watchdog(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus; + + DHD_TIMER(("%s: Enter\n", __FUNCTION__)); + + bus = dhdp->bus; + + if (bus->dhd->dongle_reset) + return FALSE; + + if (bus->dhd->hang_was_sent) { + dhd_os_wd_timer(bus->dhd, 0); + return FALSE; + } + + /* Ignore the timer if simulating bus down */ + if (!SLPAUTO_ENAB(bus) && bus->sleeping) + return FALSE; + + if (dhdp->busstate == DHD_BUS_DOWN) + return FALSE; + + dhd_os_sdlock(bus->dhd); + + /* Poll period: check device if appropriate. */ + if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) { + uint32 intstatus = 0; + + /* Reset poll tick */ + bus->polltick = 0; + + /* Check device if no interrupts */ + if (!bus->intr || (bus->intrcount == bus->lastintrs)) { + + if (!bus->dpc_sched) { + uint8 devpend; + devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, + SDIOD_CCCR_INTPEND, NULL); + intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2); + } + + /* If there is something, make like the ISR and schedule the DPC */ + if (intstatus) { + bus->pollcnt++; + bus->ipend = TRUE; + if (bus->intr) { + bcmsdh_intr_disable(bus->sdh); + } + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + + /* Update interrupt tracking */ + bus->lastintrs = bus->intrcount; + } + +#ifdef DHD_DEBUG + /* Poll for console output periodically */ + if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhd_console_ms) { + bus->console.count -= dhd_console_ms; + /* Make sure backplane clock is on */ + if (SLPAUTO_ENAB(bus)) + dhdsdio_bussleep(bus, FALSE); + else + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (dhdsdio_readconsole(bus) < 0) + dhd_console_ms = 0; /* On error, stop trying */ + } + } +#endif /* DHD_DEBUG */ + +#ifdef SDTEST + /* Generate packets if configured */ + if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) { + /* Make sure backplane clock is on */ + if (SLPAUTO_ENAB(bus)) + dhdsdio_bussleep(bus, FALSE); + else + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + bus->pktgen_tick = 0; + dhdsdio_pktgen(bus); + } +#endif + + /* On idle timeout clear activity flag and/or turn off clock */ +#ifdef DHD_USE_IDLECOUNT + if (bus->activity) + bus->activity = FALSE; + else { + bus->idlecount++; + + if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { + DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__)); + if (SLPAUTO_ENAB(bus)) { + if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY) + dhd_os_wd_timer(bus->dhd, 0); + } else + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + + bus->idlecount = 0; + } + } +#else + if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) { + if (++bus->idlecount >= bus->idletime) { + bus->idlecount = 0; + if (bus->activity) { + bus->activity = FALSE; + if (SLPAUTO_ENAB(bus)) { + if (!bus->readframes) + dhdsdio_bussleep(bus, TRUE); + else + bus->reqbussleep = TRUE; + } + else + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + } + } +#endif /* DHD_USE_IDLECOUNT */ + + dhd_os_sdunlock(bus->dhd); + + return bus->ipend; +} + +#ifdef DHD_DEBUG +extern int +dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhdp->bus; + uint32 addr, val; + int rv; + void *pkt; + + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Exclusive bus access */ + dhd_os_sdlock(bus->dhd); + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + dhd_os_sdunlock(bus->dhd); + return BCME_NOTREADY; + } + + /* Request clock to allow SDIO accesses */ + BUS_WAKE(bus); + /* No pend allowed since txpkt is called later, ht clk has to be on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); + val = htol32(0); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Bump dongle by sending an empty packet on the event channel. + * sdpcm_sendup (RX) checks for virtual console input. + */ + if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL) + rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE); + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + return rv; +} +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG +static void +dhd_dump_cis(uint fn, uint8 *cis) +{ + uint byte, tag, tdata; + DHD_INFO(("Function %d CIS:\n", fn)); + + for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) { + if ((byte % 16) == 0) + DHD_INFO((" ")); + DHD_INFO(("%02x ", cis[byte])); + if ((byte % 16) == 15) + DHD_INFO(("\n")); + if (!tdata--) { + tag = cis[byte]; + if (tag == 0xff) + break; + else if (!tag) + tdata = 0; + else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT) + tdata = cis[byte + 1] + 1; + else + DHD_INFO(("]")); + } + } + if ((byte % 16) != 15) + DHD_INFO(("\n")); +} +#endif /* DHD_DEBUG */ + +static bool +dhdsdio_chipmatch(uint16 chipid) +{ + if (chipid == BCM4325_CHIP_ID) + return TRUE; + if (chipid == BCM4329_CHIP_ID) + return TRUE; + if (chipid == BCM4315_CHIP_ID) + return TRUE; + if (chipid == BCM4319_CHIP_ID) + return TRUE; + if (chipid == BCM4336_CHIP_ID) + return TRUE; + if (chipid == BCM4330_CHIP_ID) + return TRUE; + if (chipid == BCM43237_CHIP_ID) + return TRUE; + if (chipid == BCM43362_CHIP_ID) + return TRUE; + if (chipid == BCM4314_CHIP_ID) + return TRUE; + if (chipid == BCM43242_CHIP_ID) + return TRUE; + if (chipid == BCM43340_CHIP_ID) + return TRUE; + if (chipid == BCM43341_CHIP_ID) + return TRUE; + if (chipid == BCM43143_CHIP_ID) + return TRUE; + if (chipid == BCM43342_CHIP_ID) + return TRUE; + if (chipid == BCM4334_CHIP_ID) + return TRUE; + if (chipid == BCM43239_CHIP_ID) + return TRUE; + if (chipid == BCM4324_CHIP_ID) + return TRUE; + if (chipid == BCM4335_CHIP_ID) + return TRUE; + if (chipid == BCM4339_CHIP_ID) + return TRUE; + if (chipid == BCM43349_CHIP_ID) + return TRUE; + if (chipid == BCM4345_CHIP_ID || chipid == BCM43454_CHIP_ID) + return TRUE; + if (chipid == BCM4350_CHIP_ID) + return TRUE; + if (chipid == BCM4354_CHIP_ID) + return TRUE; + if (chipid == BCM4356_CHIP_ID) + return TRUE; + if (chipid == BCM4358_CHIP_ID) + return TRUE; + if (chipid == BCM43430_CHIP_ID) + return TRUE; + if (BCM4349_CHIP(chipid)) + return TRUE; + return FALSE; +} + +static void * +dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, + uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh) +{ + int ret; + dhd_bus_t *bus; + + + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. Initialization + * of globals as part of the declaration results in non-deterministic + * behavior since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent initializations. + */ + dhd_txbound = DHD_TXBOUND; + dhd_rxbound = DHD_RXBOUND; + dhd_alignctl = TRUE; + sd1idle = TRUE; + dhd_readahead = TRUE; + retrydata = FALSE; + +#ifdef DISABLE_FLOW_CONTROL + dhd_doflow = FALSE; +#endif /* DISABLE_FLOW_CONTROL */ + dhd_dongle_ramsize = 0; + dhd_txminmax = DHD_TXMINMAX; + + forcealign = TRUE; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid)); + + /* We make assumptions about address window mappings */ + ASSERT((uintptr)regsva == SI_ENUM_BASE); + + /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start + * means early parse could fail, so here we should get either an ID + * we recognize OR (-1) indicating we must request power first. + */ + /* Check the Vendor ID */ + switch (venid) { + case 0x0000: + case VENDOR_BROADCOM: + break; + default: + DHD_ERROR(("%s: unknown vendor: 0x%04x\n", + __FUNCTION__, venid)); + goto forcereturn; + } + + /* Check the Device ID and make sure it's one that we support */ + switch (devid) { + case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */ + case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */ + case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */ + DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__)); + break; + case BCM4329_D11N_ID: /* 4329 802.11n dualband device */ + case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */ + case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */ + case 0x4329: + DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__)); + break; + case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */ + case BCM4315_D11G_ID: /* 4315 802.11g id */ + case BCM4315_D11A_ID: /* 4315 802.11a id */ + DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__)); + break; + case BCM4319_D11N_ID: /* 4319 802.11n id */ + case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */ + case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */ + DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__)); + break; + case 0: + DHD_INFO(("%s: allow device id 0, will check chip internals\n", + __FUNCTION__)); + break; + + default: + DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n", + __FUNCTION__, venid, devid)); + goto forcereturn; + } + + if (osh == NULL) { + DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__)); + goto forcereturn; + } + + /* Allocate private bus interface state */ + if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + goto fail; + } + bzero(bus, sizeof(dhd_bus_t)); + bus->sdh = sdh; + bus->cl_devid = (uint16)devid; + bus->bus = DHD_BUS; + bus->bus_num = bus_no; + bus->slot_num = slot; + bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; + bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */ + +#if defined(SUPPORT_P2P_GO_PS) + init_waitqueue_head(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + + /* attempt to attach to the dongle */ + if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) { + DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Attach to the dhd/OS/network interface */ + if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Allocate buffers */ + if (!(dhdsdio_probe_malloc(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__)); + goto fail; + } + + if (!(dhdsdio_probe_init(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__)); + goto fail; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__)); + bcmsdh_intr_disable(sdh); + if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) { + DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n", + __FUNCTION__, ret)); + goto fail; + } + DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__)); + } else { + DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n", + __FUNCTION__)); + } + + DHD_INFO(("%s: completed!!\n", __FUNCTION__)); + + /* if firmware path present try to download and bring up bus */ + bus->dhd->hang_report = TRUE; + if (dhd_download_fw_on_driverload) { + if ((ret = dhd_bus_start(bus->dhd)) != 0) { + DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__)); + goto fail; + } + } + else { + /* Set random MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } + /* Ok, have the per-port tell the stack we're open for business */ + if (dhd_register_if(bus->dhd, 0, TRUE) != 0) { + DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__)); + goto fail; + } + +#ifdef BCMHOST_XTAL_PU_TIME_MOD + bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11); +#ifdef BCM4330_CHIP + bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x0000F801); +#else + bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001); +#endif /* BCM4330_CHIP */ +#endif /* BCMHOST_XTAL_PU_TIME_MOD */ + + + return bus; + +fail: + dhdsdio_release(bus, osh); + +forcereturn: + + return NULL; +} + +static bool +dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, + uint16 devid) +{ + int err = 0; + uint8 clkctl = 0; + + bus->alp_only = TRUE; + bus->sih = NULL; + + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) { + DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__)); + } + +#if defined(DHD_DEBUG) && !defined(CUSTOMER_HW4_DEBUG) + DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n", + bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4))); +#endif /* DHD_DEBUG && !CUSTOMER_HW4_DEBUG */ + + + /* Force PLL off until si_attach() programs PLL control regs */ + + + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err); + if (!err) + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + + if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) { + DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", + err, DHD_INIT_CLKCTL1, clkctl)); + goto fail; + } + +#ifdef DHD_DEBUG + if (DHD_INFO_ON()) { + uint fn, numfn; + uint8 *cis[SDIOD_MAX_IOFUNCS]; + int err = 0; + + numfn = bcmsdh_query_iofnum(sdh); + ASSERT(numfn <= SDIOD_MAX_IOFUNCS); + + /* Make sure ALP is available before trying to read CIS */ + SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), + !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY); + + /* Now request ALP be put on the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + DHD_INIT_CLKCTL2, &err); + OSL_DELAY(65); + + for (fn = 0; fn <= numfn; fn++) { + if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn)); + break; + } + bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err)); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + break; + } + dhd_dump_cis(fn, cis[fn]); + } + + while (fn-- > 0) { + ASSERT(cis[fn]); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + } + + if (err) { + DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n")); + goto fail; + } + } +#endif /* DHD_DEBUG */ + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + +#ifdef DHD_DEBUG + DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n", + bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg)); +#endif /* DHD_DEBUG */ + + + bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev); + + if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) { + DHD_ERROR(("%s: unsupported chip: 0x%04x\n", + __FUNCTION__, bus->sih->chip)); + goto fail; + } + + if (bus->sih->buscorerev >= 12) + dhdsdio_clk_kso_init(bus); + else + bus->kso = TRUE; + + if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) { + } + + si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength); + + + /* Get info on the ARM and SOCRAM cores... */ + if (!DHD_NOPMU(bus)) { + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + } else { + /* cr4 has a different way to find the RAM size from TCM's */ + if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { + DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4335_CHIP_ID: + case BCM4339_CHIP_ID: + case BCM43349_CHIP_ID: + bus->dongle_ram_base = CR4_4335_RAM_BASE; + break; + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM4356_CHIP_ID: + case BCM4358_CHIP_ID: + bus->dongle_ram_base = CR4_4350_RAM_BASE; + break; + case BCM4360_CHIP_ID: + bus->dongle_ram_base = CR4_4360_RAM_BASE; + break; + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + bus->dongle_ram_base = (bus->sih->chiprev < 6) /* from 4345C0 */ + ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; + break; + case BCM4349_CHIP_GRPID: + /* RAM base changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9); + break; + default: + bus->dongle_ram_base = 0; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + } + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_ramsize) + dhd_dongle_setramsize(bus, dhd_dongle_ramsize); + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", + bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); + + bus->srmemsize = si_socram_srmem_size(bus->sih); + } + + /* ...but normally deal with the SDPCMDEV core */ + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) && + !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) { + DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__)); + goto fail; + } + bus->sdpcmrev = si_corerev(bus->sih); + + /* Set core control so an SDIO reset does a backplane reset */ + OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN); + bus->rxint_mode = SDIO_DEVICE_HMB_RXINT; + + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) + { + uint32 val; + + val = R_REG(osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(osh, &bus->regs->corecontrol, val); + } + + + pktq_init(&bus->txq, (PRIOMASK + 1), QLEN); + + /* Locate an appropriately-aligned portion of hdrbuf */ + bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN); + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + if ((bus->poll = (bool)dhd_poll)) + bus->pollrate = 1; + + /* Setting default Glom size */ + bus->txglomsize = SDPCM_DEFGLOM_SIZE; + + return TRUE; + +fail: + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + return FALSE; +} + +static bool +dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->maxctl) { + bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN; + if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) { + DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n", + __FUNCTION__, bus->rxblen)); + goto fail; + } + } + /* Allocate buffer to receive glomed packet */ + if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) { + DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n", + __FUNCTION__, MAX_DATA_BUF)); + /* release rxbuf which was already located as above */ + if (!bus->rxblen) + DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen); + goto fail; + } + + /* Align the buffer */ + if ((uintptr)bus->databuf % DHD_SDALIGN) + bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN)); + else + bus->dataptr = bus->databuf; + + return TRUE; + +fail: + return FALSE; +} + +static bool +dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + int32 fnum; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bus->_srenab = FALSE; + +#ifdef SDTEST + dhdsdio_pktgen_init(bus); +#endif /* SDTEST */ + + /* Disable F2 to clear any intermediate frame state on the dongle */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); + + bus->dhd->busstate = DHD_BUS_DOWN; + bus->sleeping = FALSE; + bus->rxflow = FALSE; + bus->prev_rxlim_hit = 0; + + /* Done with backplane-dependent accesses, can drop clock... */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); + + /* ...and initialize clock/power states */ + bus->clkstate = CLK_SDONLY; + bus->idletime = (int32)dhd_idletime; + bus->idleclock = DHD_IDLE_ACTIVE; + + /* Query the SD clock speed */ + if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor")); + bus->sd_divisor = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_divisor", bus->sd_divisor)); + } + + /* Query the SD bus mode */ + if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode")); + bus->sd_mode = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_mode", bus->sd_mode)); + } + + /* Query the F2 block size, set roundup accordingly */ + fnum = 2; + if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + + dhdsdio_tune_fifoparam(bus); + } + bus->roundup = MIN(max_roundup, bus->blocksize); + +#ifdef DHDENABLE_TAILPAD + if (bus->pad_pkt) + PKTFREE(osh, bus->pad_pkt, FALSE); + bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE); + if (bus->pad_pkt == NULL) + DHD_ERROR(("failed to allocate padding packet\n")); + else { + int alignment_offset = 0; + uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt); + if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN))) + PKTPUSH(osh, bus->pad_pkt, alignment_offset); + PKTSETNEXT(osh, bus->pad_pkt, NULL); + } +#endif /* DHDENABLE_TAILPAD */ + + /* Query if bus module supports packet chaining, default to use if supported */ + if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0, + &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_rxchain = FALSE; + } else { + DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n", + __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support"))); + } + bus->use_rxchain = (bool)bus->sd_rxchain; + bus->txinrx_thres = CUSTOM_TXINRX_THRES; + /* TX first in dhdsdio_readframes() */ + bus->dotxinrx = TRUE; + + return TRUE; +} + +int +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path) +{ + int ret; + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + + ret = dhdsdio_download_firmware(bus, osh, bus->sdh); + + + return ret; +} + +static int +dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + int ret; + + + DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + DHD_OS_WAKE_LOCK(bus->dhd); + + /* Download the firmware */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + ret = _dhdsdio_download_firmware(bus); + + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} + +/* Detach and free everything */ +static void +dhdsdio_release(dhd_bus_t *bus, osl_t *osh) +{ + bool dongle_isolation = FALSE; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + ASSERT(osh); + + if (bus->dhd) { + dongle_isolation = bus->dhd->dongle_isolation; + dhd_detach(bus->dhd); + } + + /* De-register interrupt handler */ + bcmsdh_intr_disable(bus->sdh); + bcmsdh_intr_dereg(bus->sdh); + + if (bus->dhd) { + dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_free(bus->dhd); + bus->dhd = NULL; + } + + dhdsdio_release_malloc(bus, osh); + +#ifdef DHD_DEBUG + if (bus->console.buf != NULL) + MFREE(osh, bus->console.buf, bus->console.bufsize); +#endif + +#ifdef DHDENABLE_TAILPAD + if (bus->pad_pkt) + PKTFREE(osh, bus->pad_pkt, FALSE); +#endif /* DHDENABLE_TAILPAD */ + + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->rxbuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->rxbuf, bus->rxblen); +#endif + bus->rxctl = bus->rxbuf = NULL; + bus->rxlen = 0; + } + + if (bus->databuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->databuf, MAX_DATA_BUF); +#endif + bus->databuf = NULL; + } + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + +} + + +static void +dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) + return; + + if (bus->sih) { +#if !defined(BCMLXSDMMC) + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + if (KSO_ENAB(bus) && (dongle_isolation == FALSE)) + si_watchdog(bus->sih, 4); +#endif /* !defined(BCMLXSDMMC) */ + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + si_detach(bus->sih); + bus->sih = NULL; + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_disconnect(void *ptr) +{ + dhd_bus_t *bus = (dhd_bus_t *)ptr; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + + + if (bus) { + ASSERT(bus->dhd); + dhdsdio_release(bus, bus->dhd->osh); + } + + + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static int +dhdsdio_suspend(void *context) +{ + int ret = 0; + + dhd_bus_t *bus = (dhd_bus_t*)context; +#ifdef SUPPORT_P2P_GO_PS + int wait_time = 0; + + if (bus->idletime > 0) { + wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms); + } +#endif /* SUPPORT_P2P_GO_PS */ + ret = dhd_os_check_wakelock(bus->dhd); +#ifdef SUPPORT_P2P_GO_PS + if ((!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) { + if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) { + if (!bus->sleeping) { + return 1; + } + } + } +#endif /* SUPPORT_P2P_GO_PS */ + return ret; +} + +static int +dhdsdio_resume(void *context) +{ +#if defined(OOB_INTR_ONLY) + dhd_bus_t *bus = (dhd_bus_t*)context; + + if (dhd_os_check_if_up(bus->dhd)) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif + return 0; +} + + +/* Register/Unregister functions are called by the main DHD entry + * point (e.g. module insertion) to link with the bus driver, in + * order to look for or await the device. + */ + +static bcmsdh_driver_t dhd_sdio = { + dhdsdio_probe, + dhdsdio_disconnect, + dhdsdio_suspend, + dhdsdio_resume +}; + +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return bcmsdh_register(&dhd_sdio); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_unregister(); +} + +#if defined(BCMLXSDMMC) +/* Register a dummy SDIO client driver in order to be notified of new SDIO device */ +int dhd_bus_reg_sdio_notify(void* semaphore) +{ + return bcmsdh_reg_sdio_notify(semaphore); +} + +void dhd_bus_unreg_sdio_notify(void) +{ + bcmsdh_unreg_sdio_notify(); +} +#endif /* defined(BCMLXSDMMC) */ + +#ifdef BCMEMBEDIMAGE +static int +dhdsdio_download_code_array(struct dhd_bus *bus) +{ + int bcmerror = -1; + int offset = 0; + unsigned char *ularray = NULL; + + DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__)); + + /* Download image */ + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + + if (offset == 0) { + bus->resetinstr = *(((uint32*)dlarray)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + (uint8 *) (dlarray + offset), MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + (uint8 *) (dlarray + offset), sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + +#ifdef DHD_DEBUG + /* Upload and compare the downloaded code */ + { + ularray = MALLOC(bus->dhd->osh, bus->ramsize); + /* Upload image to verify downloaded contents. */ + offset = 0; + memset(ularray, 0xaa, bus->ramsize); + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, + ularray + offset, sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + + if (memcmp(dlarray, ularray, sizeof(dlarray))) { + DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n", + __FUNCTION__, dlimagename, dlimagever, dlimagedate)); + goto err; + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n", + __FUNCTION__, dlimagename, dlimagever, dlimagedate)); + + } +#endif /* DHD_DEBUG */ + +err: + if (ularray) + MFREE(bus->dhd->osh, ularray, bus->ramsize); + return bcmerror; +} +#endif /* BCMEMBEDIMAGE */ + +static int +dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = -1; + int offset = 0; + int len; + void *image = NULL; + uint8 *memblock = NULL, *memptr; + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); + + image = dhd_os_open_image(pfw_path); + if (image == NULL) + goto err; + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) { + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +static int +dhdsdio_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = -1; + uint len; + void * image = NULL; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + + /* For Get nvram from UEFI */ + if (nvram_file_exists) + image = dhd_os_open_image(pnv_path); + + memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAX_NVRAMBUF_SIZE)); + goto err; + } + + /* For Get nvram from image or UEFI (when image == NULL ) */ + len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image); + + if (len > 0 && len < MAX_NVRAMBUF_SIZE) { + bufp = (char *)memblock; + bufp[len] = 0; + len = process_nvram_vars(bufp, len); + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } + else { + DHD_ERROR(("%s: error reading nvram file: %d\n", + __FUNCTION__, len)); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +static int +_dhdsdio_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + return 0; +#endif + } + + /* Keep arm in reset */ + if (dhdsdio_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdsdio_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__)); +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + goto err; +#endif + } + else { + embed = FALSE; + dlok = TRUE; + } + } + +#ifdef BCMEMBEDIMAGE + if (embed) { + if (dhdsdio_download_code_array(bus)) { + DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); + goto err; + } + else { + dlok = TRUE; + } + } +#else + BCM_REFERENCE(embed); +#endif + if (!dlok) { + DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__)); + goto err; + } + + /* External nvram takes precedence if specified */ + if (dhdsdio_download_nvram(bus)) { + DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdsdio_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} + +static int +dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) +{ + int status; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle); + + return status; +} + +static int +dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry) +{ + int ret; + int i = 0; + int retries = 0; + bcmsdh_info_t *sdh; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + sdh = bus->sdh; + do { + ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, + pkt, complete, handle); + + bus->f2txdata++; + ASSERT(ret != BCME_PENDING); + + if (ret == BCME_NODEVICE) { + DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__)); + } else if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + bus->f1regdata++; + bus->dhd->tx_errors++; + bcmsdh_abort(sdh, SDIO_FUNC_2); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + for (i = 0; i < READ_FRM_CNT_RETRIES; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI, + NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO, + NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + } + } while ((ret < 0) && retrydata && ++retries < max_retry); + + return ret; +} + +uint8 +dhd_bus_is_ioready(struct dhd_bus *bus) +{ + uint8 enable; + bcmsdh_info_t *sdh; + ASSERT(bus); + ASSERT(bus->sih != NULL); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + sdh = bus->sdh; + return (enable == bcmsdh_cfg_read(sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL)); +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chiprev; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_sih(struct dhd_bus *bus) +{ + return (void *)bus->sih; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +uint +dhd_bus_hdrlen(struct dhd_bus *bus) +{ + return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; +} + +void +dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val) +{ + bus->dotxinrx = val; +} + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + int bcmerror = 0; + dhd_bus_t *bus; + + bus = dhdp->bus; + + if (flag == TRUE) { + if (!bus->dhd->dongle_reset) { + dhd_os_sdlock(dhdp); + dhd_os_wd_timer(dhdp, 0); +#if !defined(IGNORE_ETH0_DOWN) + /* Force flow control as protection when stop come before ifconfig_down */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); +#endif /* !defined(IGNORE_ETH0_DOWN) */ + /* Expect app to have torn down any connection before calling */ + /* Stop the bus, disable F2 */ + dhd_bus_stop(bus, FALSE); + +#if defined(OOB_INTR_ONLY) + /* Clean up any pending IRQ */ + dhd_enable_oob_intr(bus, FALSE); + bcmsdh_oob_intr_set(bus->sdh, FALSE); + bcmsdh_oob_intr_unregister(bus->sdh); +#endif + + /* Clean tx/rx buffer pointers, detach from the dongle */ + dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE); + + bus->dhd->dongle_reset = TRUE; + bus->dhd->up = FALSE; + dhd_txglom_enable(dhdp, FALSE); + dhd_os_sdunlock(dhdp); + + DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__)); + /* App can now remove power from device */ + } else + bcmerror = BCME_SDIO_ERROR; + } else { + /* App must have restored power to device before calling */ + + DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) { + /* Turn on WLAN */ + dhd_os_sdlock(dhdp); + /* Reset SD client */ + bcmsdh_reset(bus->sdh); + + /* Attempt to re-attach & download */ + if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh, + (uint32 *)SI_ENUM_BASE, + bus->cl_devid)) { + /* Attempt to download binary to the dongle */ + if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) && + dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) { + + /* Re-init bus, enable F2 transfer */ + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { +#if defined(OOB_INTR_ONLY) + dhd_enable_oob_intr(bus, TRUE); + bcmsdh_oob_intr_register(bus->sdh, + dhdsdio_isr, bus); + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif + + bus->dhd->dongle_reset = FALSE; + bus->dhd->up = TRUE; + +#if !defined(IGNORE_ETH0_DOWN) + /* Restore flow control */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); +#endif + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + + DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else { + dhd_bus_stop(bus, FALSE); + dhdsdio_release_dongle(bus, bus->dhd->osh, + TRUE, FALSE); + } + } else { + DHD_ERROR(("%s Failed to download binary to the dongle\n", + __FUNCTION__)); + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + bcmerror = BCME_SDIO_ERROR; + } + } else + bcmerror = BCME_SDIO_ERROR; + + dhd_os_sdunlock(dhdp); + } else { + bcmerror = BCME_SDIO_ERROR; + DHD_INFO(("%s called when dongle is not in reset\n", + __FUNCTION__)); + DHD_INFO(("Will call dhd_bus_start instead\n")); + dhd_bus_resume(dhdp, 1); + if ((bcmerror = dhd_bus_start(dhdp)) != 0) + DHD_ERROR(("%s: dhd_bus_start fail with %d\n", + __FUNCTION__, bcmerror)); + } + } + return bcmerror; +} + +int dhd_bus_suspend(dhd_pub_t *dhdpub) +{ + return bcmsdh_stop(dhdpub->bus->sdh); +} + +int dhd_bus_resume(dhd_pub_t *dhdpub, int stage) +{ + return bcmsdh_start(dhdpub->bus->sdh, stage); +} + +/* Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chip; +} + +/* Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chiprev; +} + +/* Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chippkg; +} + +int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num) +{ + *bus_type = bus->bus; + *bus_num = bus->bus_num; + *slot_num = bus->slot_num; + return 0; +} + +int +dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size) +{ + dhd_bus_t *bus; + + bus = dhdp->bus; + return dhdsdio_membytes(bus, set, address, data, size); +} + + +void +dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path) +{ + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; +} + +int +dhd_enableOOB(dhd_pub_t *dhd, bool sleep) +{ + dhd_bus_t *bus = dhd->bus; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + if (sleep) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) { + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + return BCME_BUSY; + } + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } else { + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + return BCME_OK; +} + +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bool wlfc_enabled = FALSE; + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled) { +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + /* Clear the data packet queues */ + pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0); + } +} + +#ifdef BCMSDIO +int +dhd_sr_config(dhd_pub_t *dhd, bool on) +{ + dhd_bus_t *bus = dhd->bus; + + if (!bus->_srenab) + return -1; + + return dhdsdio_clk_devsleep_iovar(bus, on); +} + +uint16 +dhd_get_chipid(dhd_pub_t *dhd) +{ + dhd_bus_t *bus = dhd->bus; + + if (bus && bus->sih) + return (uint16)bus->sih->chip; + else + return 0; +} +#endif /* BCMSDIO */ + +#ifdef DEBUGGER +uint32 dhd_sdio_reg_read(void *h, uint32 addr) +{ + uint32 rval; + struct dhd_bus *bus = (struct dhd_bus *) h; + + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + rval = bcmsdh_reg_read(bus->sdh, addr, 4); + + dhd_os_sdunlock(bus->dhd); + + return rval; +} + +void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val) +{ + struct dhd_bus *bus = (struct dhd_bus *) h; + + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmsdh_reg_write(bus->sdh, addr, 4, val); + + dhd_os_sdunlock(bus->dhd); +} +#endif /* DEBUGGER */ diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.c b/drivers/net/wireless/bcmdhd/dhd_wlfc.c new file mode 100644 index 000000000000..0e74318d173a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.c @@ -0,0 +1,4507 @@ +/* + * DHD PROP_TXSTATUS Module. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_wlfc.c 579277 2015-08-14 04:49:50Z $ + * + */ + + +#include +#include + +#include +#include + +#include +#include + +#include + +#include + +#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */ +#include +#include +#endif + +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + + +/* + * wlfc naming and lock rules: + * + * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation. + * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed. + * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation. + * + */ + +#if defined(DHD_WLFC_THREAD) +#define WLFC_THREAD_QUICK_RETRY_WAIT_MS 10 /* 10 msec */ +#define WLFC_THREAD_RETRY_WAIT_MS 10000 /* 10 sec */ +#endif /* defined (DHD_WLFC_THREAD) */ + + +#ifdef PROP_TXSTATUS + +#define DHD_WLFC_QMON_COMPLETE(entry) + +#define LIMIT_BORROW + + +/** reordering related */ + +#if defined(DHD_WLFC_THREAD) +static void +_dhd_wlfc_thread_wakeup(dhd_pub_t *dhdp) +{ + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); +} +#endif /* DHD_WLFC_THREAD */ + +static uint16 +_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq) +{ + uint16 seq; + + if (!p) { + return 0xffff; + } + + seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + if (seq < current_seq) { + /* wrap around */ + seq += 256; + } + + return seq; +} + +/** + * Enqueue a caller supplied packet on a caller supplied precedence queue, optionally reorder + * suppressed packets. + * @param[in] pq caller supplied packet queue to enqueue the packet on + * @param[in] prec precedence of the to-be-queued packet + * @param[in] p transmit packet to enqueue + * @param[in] qHead if TRUE, enqueue to head instead of tail. Used to maintain d11 seq order. + * @param[in] current_seq + * @param[in] reOrder reOrder on odd precedence (=suppress queue) + */ +static void +_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead, + uint8 current_seq, bool reOrder) +{ + struct pktq_prec *q; + uint16 seq, seq2; + void *p2, *p2_prev; + + if (!p) + return; + + ASSERT(prec >= 0 && prec < pq->num_prec); + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + PKTSETLINK(p, NULL); + if (q->head == NULL) { + /* empty queue */ + q->head = p; + q->tail = p; + } else { + if (reOrder && (prec & 1)) { + seq = _dhd_wlfc_adjusted_seq(p, current_seq); + p2 = qHead ? q->head : q->tail; + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + + if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) { + /* need reorder */ + p2 = q->head; + p2_prev = NULL; + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + + while (seq > seq2) { + p2_prev = p2; + p2 = PKTLINK(p2); + if (!p2) { + break; + } + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + } + + if (p2_prev == NULL) { + /* insert head */ + PKTSETLINK(p, q->head); + q->head = p; + } else if (p2 == NULL) { + /* insert tail */ + PKTSETLINK(p2_prev, p); + q->tail = p; + } else { + /* insert after p2_prev */ + PKTSETLINK(p, PKTLINK(p2_prev)); + PKTSETLINK(p2_prev, p); + } + goto exit; + } + } + + if (qHead) { + PKTSETLINK(p, q->head); + q->head = p; + } else { + PKTSETLINK(q->tail, p); + q->tail = p; + } + } + +exit: + + q->len++; + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; +} /* _dhd_wlfc_prec_enque */ + +/** + * Create a place to store all packet pointers submitted to the firmware until a status comes back, + * suppress or otherwise. + * + * hang-er: noun, a contrivance on which things are hung, as a hook. + */ +/** @deprecated soon */ +static void* +_dhd_wlfc_hanger_create(dhd_pub_t *dhd, int max_items) +{ + int i; + wlfc_hanger_t* hanger; + + /* allow only up to a specific size for now */ + ASSERT(max_items == WLFC_HANGER_MAXITEMS); + + if ((hanger = (wlfc_hanger_t*)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_HANGER, + WLFC_HANGER_SIZE(max_items))) == NULL) { + return NULL; + } + memset(hanger, 0, WLFC_HANGER_SIZE(max_items)); + hanger->max_items = max_items; + + for (i = 0; i < hanger->max_items; i++) { + hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + } + return hanger; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_delete(dhd_pub_t *dhd, void* hanger) +{ + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + DHD_OS_PREFREE(dhd, h, WLFC_HANGER_SIZE(h->max_items)); + return BCME_OK; + } + return BCME_BADARG; +} + +/** @deprecated soon */ +static uint16 +_dhd_wlfc_hanger_get_free_slot(void* hanger) +{ + uint32 i; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + i = h->slot_pos + 1; + if (i == h->max_items) { + i = 0; + } + while (i != h->slot_pos) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->slot_pos = i; + return (uint16)i; + } + i++; + if (i == h->max_items) + i = 0; + } + h->failed_slotfind++; + } + return WLFC_HANGER_MAXITEMS; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + *gen = 0xff; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { + *gen = h->items[slot_id].gen; + } + else { + DHD_ERROR(("Error: %s():%d item not used\n", + __FUNCTION__, __LINE__)); + rc = BCME_NOTFOUND; + } + + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h && (slot_id < WLFC_HANGER_MAXITEMS)) { + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE; + h->items[slot_id].pkt = pkt; + h->items[slot_id].pkt_state = 0; + h->items[slot_id].pkt_txstatus = 0; + h->pushed++; + } else { + h->failed_to_push++; + rc = BCME_NOTFOUND; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + *pktout = NULL; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { + *pktout = h->items[slot_id].pkt; + if (remove_from_hanger) { + h->items[slot_id].state = + WLFC_HANGER_ITEM_STATE_FREE; + h->items[slot_id].pkt = NULL; + h->items[slot_id].gen = 0xff; + h->items[slot_id].identifier = 0; + h->popped++; + } + } else { + h->failed_to_pop++; + rc = BCME_NOTFOUND; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + if (h) { + h->items[slot_id].gen = gen; + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED; + } else { + rc = BCME_BADARG; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** remove reference of specific packet in hanger */ +/** @deprecated soon */ +static bool +_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt) +{ + int i; + + if (!h || !pkt) { + return FALSE; + } + + i = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(pkt))); + + if ((i < h->max_items) && (pkt == h->items[i].pkt)) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + h->items[i].pkt = NULL; + h->items[i].gen = 0xff; + h->items[i].identifier = 0; + return TRUE; + } else { + DHD_ERROR(("Error: %s():%d item not suppressed\n", + __FUNCTION__, __LINE__)); + } + } + + return FALSE; +} + +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ +static int +_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p) +{ + wlfc_mac_descriptor_t* entry; + uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + + if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[entry_idx]; + else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pktq_penq(&entry->afq, prec, p); + + return BCME_OK; +} + +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ +static int +_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec, + void **pktout) +{ + wlfc_mac_descriptor_t *entry; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!ctx) { + DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout)); + return BCME_BADARG; + } + + if (pktout) { + *pktout = NULL; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->afq; + + ASSERT(prec < pq->num_prec); + + q = &pq->q[prec]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) + { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + bcm_pkt_validate_chk(p); + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt, + WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head))))); + ctx->stats.ooo_pkts[prec]++; + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->len--; + pq->len--; + + PKTSETLINK(p, NULL); + + if (pktout) { + *pktout = p; + } + + return BCME_OK; +} /* _dhd_wlfc_deque_afq */ + +/** + * Flow control information piggy backs on packets, in the form of one or more TLVs. This function + * pushes one or more TLVs onto a packet that is going to be sent towards the dongle. + * + * @param[in] ctx + * @param[in/out] packet + * @param[in] tim_signal TRUE if parameter 'tim_bmp' is valid + * @param[in] tim_bmp + * @param[in] mac_handle + * @param[in] htodtag + * @param[in] htodseq d11 seqno for seqno reuse, only used if 'seq reuse' was agreed upon + * earlier between host and firmware. + * @param[in] skip_wlfc_hdr + */ +static int +_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal, + uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr) +{ + uint32 wl_pktinfo = 0; + uint8* wlh; + uint8 dataOffset = 0; + uint8 fillers; + uint8 tim_signal_len = 0; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + struct bdc_header *h; + void *p = *packet; + + if (skip_wlfc_hdr) + goto push_bdc_hdr; + + if (tim_signal) { + tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + } + + /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ + dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len; + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + dataOffset += WLFC_CTL_VALUE_LEN_SEQ; + } + + fillers = ROUNDUP(dataOffset, 4) - dataOffset; + dataOffset += fillers; + + PKTPUSH(ctx->osh, p, dataOffset); + wlh = (uint8*) PKTDATA(ctx->osh, p); + + wl_pktinfo = htol32(htodtag); + + wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG; + wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG; + memcpy(&wlh[TLV_HDR_LEN] /* dst */, &wl_pktinfo, sizeof(uint32)); + + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + uint16 wl_seqinfo = htol16(htodseq); + wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ; + memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo, + WLFC_CTL_VALUE_LEN_SEQ); + } + + if (tim_signal_len) { + wlh[dataOffset - fillers - tim_signal_len ] = + WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 1] = + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle; + wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp; + } + if (fillers) + memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers); + +push_bdc_hdr: + PKTPUSH(ctx->osh, p, BDC_HEADER_LEN); + h = (struct bdc_header *)PKTDATA(ctx->osh, p); + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(p)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + + h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = dataOffset >> 2; + BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p))); + *packet = p; + return BCME_OK; +} /* _dhd_wlfc_pushheader */ + +/** + * Removes (PULLs) flow control related headers from the caller supplied packet, is invoked eg + * when a packet is about to be freed. + */ +static int +_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf) +{ + struct bdc_header *h; + + if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf); + + /* pull BDC header */ + PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN); + + if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2))); + return BCME_ERROR; + } + + /* pull wl-header */ + PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2)); + return BCME_OK; +} + +/** + * @param[in/out] p packet + */ +static wlfc_mac_descriptor_t* +_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p) +{ + int i; + wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes; + uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p)); + uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p)); + wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p)); + int iftype = ctx->destination_entries.interfaces[ifid].iftype; + + /* saved one exists, return it */ + if (entry) + return entry; + + /* Multicast destination, STA and P2P clients get the interface entry. + * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations + * have their own entry. + */ + if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) || + iftype == WLC_E_IF_ROLE_P2P_CLIENT) && + (ctx->destination_entries.interfaces[ifid].occupied)) { + entry = &ctx->destination_entries.interfaces[ifid]; + } + + if (entry && ETHER_ISMULTI(dstn)) { + DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); + return entry; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (table[i].occupied) { + if (table[i].interface_id == ifid) { + if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) { + entry = &table[i]; + break; + } + } + } + } + + if (entry == NULL) + entry = &ctx->destination_entries.other; + + DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); + + return entry; +} /* _dhd_wlfc_find_table_entry */ + +/** + * In case a packet must be dropped (because eg the queues are full), various tallies have to be + * be updated. Called from several other functions. + * @param[in] dhdp pointer to public DHD structure + * @param[in] prec precedence of the packet + * @param[in] p the packet to be dropped + * @param[in] bPktInQ TRUE if packet is part of a queue + */ +static int +_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ) +{ + athost_wl_status_info_t* ctx; + void *pout = NULL; + + ASSERT(dhdp && p); + ASSERT(prec >= 0 && prec <= WLFC_PSQ_PREC_COUNT); + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) { + /* suppressed queue, need pop from hanger */ + _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG + (PKTTAG(p))), &pout, TRUE); + ASSERT(p == pout); + } + + if (!(prec & 1)) { +#ifdef DHDTCPACK_SUPPRESS + /* pkt in delayed q, so fake push BDC header for + * dhd_tcpack_check_xmit() and dhd_txcomplete(). + */ + _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, 0, 0, TRUE); + + /* This packet is about to be freed, so remove it from tcp_ack_info_tbl + * This must be one of... + * 1. A pkt already in delayQ is evicted by another pkt with higher precedence + * in _dhd_wlfc_prec_enq_with_drop() + * 2. A pkt could not be enqueued to delayQ because it is full, + * in _dhd_wlfc_enque_delayq(). + * 3. A pkt could not be enqueued to delayQ because it is full, + * in _dhd_wlfc_rollback_packet_toq(). + */ + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!" + " Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + } + + if (bPktInQ) { + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; + } + + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--; + ctx->stats.pktout++; + ctx->stats.drop_pkts[prec]++; + + dhd_txcomplete(dhdp, p, FALSE); + PKTFREE(ctx->osh, p, TRUE); + + return 0; +} /* _dhd_wlfc_prec_drop */ + +/** + * Called when eg the host handed a new packet over to the driver, or when the dongle reported + * that a packet could currently not be transmitted (=suppressed). This function enqueues a transmit + * packet in the host driver to be (re)transmitted at a later opportunity. + * @param[in] dhdp pointer to public DHD structure + * @param[in] qHead When TRUE, queue packet at head instead of tail, to preserve d11 sequence + */ +static bool +_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead, + uint8 current_seq) +{ + void *p = NULL; + int eprec = -1; /* precedence to evict from */ + athost_wl_status_info_t* ctx; + + ASSERT(dhdp && pq && pkt); + ASSERT(prec >= 0 && prec < pq->num_prec); + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktq_pfull(pq, prec) && !pktq_full(pq)) { + goto exit; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktq_pfull(pq, prec)) { + eprec = prec; + } else if (pktq_full(pq)) { + p = pktq_peek_tail(pq, &eprec); + if (!p) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + if ((eprec > prec) || (eprec < 0)) { + if (!pktq_pempty(pq, prec)) { + eprec = prec; + } else { + return FALSE; + } + } + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktq_pempty(pq, eprec)); + /* Evict all fragmented frames */ + dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop); + } + +exit: + /* Enqueue */ + _dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq, + WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)); + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++; + ctx->pkt_cnt_per_ac[prec>>1]++; + ctx->pkt_cnt_in_psq++; + + return TRUE; +} /* _dhd_wlfc_prec_enq_with_drop */ + +/** + * Called during eg the 'committing' of a transmit packet from the OS layer to a lower layer, in + * the event that this 'commit' failed. + */ +static int +_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx, + void* p, ewlfc_packet_state_t pkt_type, uint32 hslot) +{ + /* + * put the packet back to the head of queue + * - suppressed packet goes back to suppress sub-queue + * - pull out the header, if new or delayed packet + * + * Note: hslot is used only when header removal is done. + */ + wlfc_mac_descriptor_t* entry; + int rc = BCME_OK; + int prec, fifo_id; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + fifo_id = prec << 1; + if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) + fifo_id += 1; + if (entry != NULL) { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the firmware (for pspoll etc.) + */ + if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) + entry->requested_credit++; + + if (pkt_type == eWLFC_PKTTYPE_DELAYED) { + /* decrement sequence count */ + WLFC_DECR_SEQCOUNT(entry, prec); + /* remove header first */ + rc = _dhd_wlfc_pullheader(ctx, p); + if (rc != BCME_OK) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + goto exit; + } + } + + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE, + WLFC_SEQCOUNT(entry, fifo_id>>1)) + == FALSE) { + /* enque failed */ + DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n", + __FUNCTION__, __LINE__, fifo_id)); + rc = BCME_ERROR; + } + } else { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + rc = BCME_ERROR; + } + +exit: + if (rc != BCME_OK) { + ctx->stats.rollback_failed++; + _dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE); + } else { + ctx->stats.rollback++; + } + + return rc; +} /* _dhd_wlfc_rollback_packet_toq */ + +/** Returns TRUE if host OS -> DHD flow control is allowed on the caller supplied interface */ +static bool +_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid) +{ + int prec, ac_traffic = WLFC_NO_TRAFFIC; + + for (prec = 0; prec < AC_COUNT; prec++) { + if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) { + if (ac_traffic == WLFC_NO_TRAFFIC) + ac_traffic = prec + 1; + else if (ac_traffic != (prec + 1)) + ac_traffic = WLFC_MULTI_TRAFFIC; + } + } + + if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) { + /* single AC (BE/BK/VI/VO) in queue */ + if (ctx->allow_fc) { + return TRUE; + } else { + uint32 delta; + uint32 curr_t = OSL_SYSUPTIME(); + + if (ctx->fc_defer_timestamp == 0) { + /* first single ac scenario */ + ctx->fc_defer_timestamp = curr_t; + return FALSE; + } + + /* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */ + delta = curr_t - ctx->fc_defer_timestamp; + if (delta >= WLFC_FC_DEFER_PERIOD_MS) { + ctx->allow_fc = TRUE; + } + } + } else { + /* multiple ACs or BCMC in queue */ + ctx->allow_fc = FALSE; + ctx->fc_defer_timestamp = 0; + } + + return ctx->allow_fc; +} /* _dhd_wlfc_allow_fc */ + +/** + * Starts or stops the flow of transmit packets from the host OS towards the DHD, depending on + * low/high watermarks. + */ +static void +_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id) +{ + dhd_pub_t *dhdp; + + ASSERT(ctx); + + dhdp = (dhd_pub_t *)ctx->dhdp; + ASSERT(dhdp); + + if (dhdp->skip_fc && dhdp->skip_fc()) + return; + + if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id)) + return; + + if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) { + /* start traffic */ + ctx->hostif_flow_state[if_id] = OFF; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n", + pq->len, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("F")); + + dhd_txflowcontrol(dhdp, if_id, OFF); + + ctx->toggle_host_if = 0; + } + + if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) { + /* stop traffic */ + ctx->hostif_flow_state[if_id] = ON; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n", + pq->len, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("N")); + + dhd_txflowcontrol(dhdp, if_id, ON); + + ctx->host_ifidx = if_id; + ctx->toggle_host_if = 1; + } + + return; +} /* _dhd_wlfc_flow_control_check */ + +static int +_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 ta_bmp) +{ + int rc = BCME_OK; + void* p = NULL; + int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + if (dhdp->proptxstatus_txoff) { + rc = BCME_NORESOURCE; + return rc; + } + + /* allocate a dummy packet */ + p = PKTGET(ctx->osh, dummylen, TRUE); + if (p) { + PKTPULL(ctx->osh, p, dummylen); + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0); + _dhd_wlfc_pushheader(ctx, &p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE); + DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1); +#ifdef PROP_TXSTATUS_DEBUG + ctx->stats.signal_only_pkts_sent++; +#endif + +#if defined(BCMPCIE) + rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx); +#else + rc = dhd_bus_txdata(dhdp->bus, p); +#endif + if (rc != BCME_OK) { + _dhd_wlfc_pullheader(ctx, p); + PKTFREE(ctx->osh, p, TRUE); + } + } else { + DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", + __FUNCTION__, dummylen)); + rc = BCME_NOMEM; + dhdp->tx_pktgetfail++; + } + + return rc; +} /* _dhd_wlfc_send_signalonly_packet */ + +/** + * Called on eg receiving 'mac close' indication from dongle. Updates the per-MAC administration + * maintained in caller supplied parameter 'entry'. + * + * @param[in/out] entry administration about a remote MAC entity + * @param[in] prec precedence queue for this remote MAC entitity + * + * Return value: TRUE if traffic availability changed + */ +static bool +_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + int prec) +{ + bool rc = FALSE; + + if (entry->state == WLFC_STATE_CLOSE) { + if ((pktq_plen(&entry->psq, (prec << 1)) == 0) && + (pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) { + /* no packets in both 'normal' and 'suspended' queues */ + if (entry->traffic_pending_bmp & NBITVAL(prec)) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp & ~ NBITVAL(prec); + } + } else { + /* packets are queued in host for transmission to dongle */ + if (!(entry->traffic_pending_bmp & NBITVAL(prec))) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp | NBITVAL(prec); + } + } + } + + if (rc) { + /* request a TIM update to firmware at the next piggyback opportunity */ + if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) { + entry->send_tim_signal = 1; + _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp); + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + entry->send_tim_signal = 0; + } else { + rc = FALSE; + } + } + + return rc; +} /* _dhd_wlfc_traffic_pending_check */ + +/** + * Called on receiving a 'd11 suppressed' or 'wl suppressed' tx status from the firmware. Enqueues + * the packet to transmit to firmware again at a later opportunity. + */ +static int +_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p) +{ + wlfc_mac_descriptor_t* entry; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_NOTFOUND; + } + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE, + WLFC_SEQCOUNT(entry, prec)) + == FALSE) { + ctx->stats.delayq_full_error++; + /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */ + WLFC_DBGMESG(("s")); + return BCME_ERROR; + } + + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p))); + return BCME_OK; +} + +/** + * Called when a transmit packet is about to be 'committed' from the OS layer to a lower layer + * towards the dongle (eg the DBUS layer). Updates wlfc administration. May modify packet. + * + * @param[in/out] ctx driver specific flow control administration + * @param[in/out] entry The remote MAC entity for which the packet is destined. + * @param[in/out] packet Packet to send. This function optionally adds TLVs to the packet. + * @param[in] header_needed True if packet is 'new' to flow control + * @param[out] slot Handle to container in which the packet was 'parked' + */ +static int +_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot) +{ + int rc = BCME_OK; + int hslot = WLFC_HANGER_MAXITEMS; + bool send_tim_update = FALSE; + uint32 htod = 0; + uint16 htodseq = 0; + uint8 free_ctr; + int gen = 0xff; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + void * p = *packet; + + *slot = hslot; + + if (entry == NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, p); + } + + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + + if (entry->send_tim_signal) { + /* sends a traffic indication bitmap to the dongle */ + send_tim_update = TRUE; + entry->send_tim_signal = 0; + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + } + + if (header_needed) { + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + hslot = (uint)(entry - &ctx->destination_entries.nodes[0]); + } else { + hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger); + } + gen = entry->generation; + free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); + } else { + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p)); + } + + hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + + if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) { + gen = entry->generation; + } else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + } else { + _dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen); + } + + free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + /* remove old header */ + _dhd_wlfc_pullheader(ctx, p); + } + + if (hslot >= WLFC_HANGER_MAXITEMS) { + DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__)); + return BCME_ERROR; + } + + WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr); + WL_TXSTATUS_SET_HSLOT(htod, hslot); + WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p))); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + WL_TXSTATUS_SET_GENERATION(htod, gen); + DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1); + + if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) { + /* + Indicate that this packet is being sent in response to an + explicit request from the firmware side. + */ + WLFC_PKTFLAG_SET_PKTREQUESTED(htod); + } else { + WLFC_PKTFLAG_CLR_PKTREQUESTED(htod); + } + + rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update, + entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE); + if (rc == BCME_OK) { + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod); + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + wlfc_hanger_t *h = (wlfc_hanger_t*)(ctx->hanger); + if (header_needed) { + /* + a new header was created for this packet. + push to hanger slot and scrub q. Since bus + send succeeded, increment seq number as well. + */ + rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + if (rc == BCME_OK) { +#ifdef PROP_TXSTATUS_DEBUG + h->items[hslot].push_time = + OSL_SYSUPTIME(); +#endif + } else { + DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n", + __FUNCTION__, rc)); + } + } else { + /* clear hanger state */ + if (((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt != p) + DHD_ERROR(("%s() pkt not match: cur %p, hanger pkt %p\n", + __FUNCTION__, p, h->items[hslot].pkt)); + ASSERT(h->items[hslot].pkt == p); + bcm_object_feature_set(h->items[hslot].pkt, + BCM_OBJECT_FEATURE_PKT_STATE, 0); + h->items[hslot].pkt_state = 0; + h->items[hslot].pkt_txstatus = 0; + h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE; + } + } else if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + /* clear hanger state */ + ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_state = 0; + ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_txstatus = 0; + } + + if ((rc == BCME_OK) && header_needed) { + /* increment free running sequence count */ + WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); + } + } + *slot = hslot; + *packet = p; + return rc; +} /* _dhd_wlfc_pretx_pktprocess */ + +/** + * A remote wireless mac may be temporarily 'closed' due to power management. Returns '1' if remote + * mac is in the 'open' state, otherwise '0'. + */ +static int +_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, int prec) +{ + if (entry->interface_id >= WLFC_MAX_IFNUM) { + ASSERT(&ctx->destination_entries.other == entry); + return 1; + } + + if (ctx->destination_entries.interfaces[entry->interface_id].iftype == + WLC_E_IF_ROLE_P2P_GO) { + /* - destination interface is of type p2p GO. + For a p2pGO interface, if the destination is OPEN but the interface is + CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is + destination-specific-credit left send packets. This is because the + firmware storing the destination-specific-requested packet in queue. + */ + if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + (entry->requested_packet == 0)) { + return 0; + } + } + + /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */ + if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + (entry->requested_packet == 0)) || + (!(entry->ac_bitmap & (1 << prec)))) { + return 0; + } + + return 1; +} /* _dhd_wlfc_is_destination_open */ + +/** + * Dequeues a suppressed or delayed packet from a queue + * @param[in/out] ctx Driver specific flow control administration + * @param[in] prec Precedence of queue to dequeue from + * @param[out] ac_credit_spent Boolean, returns 0 or 1 + * @param[out] needs_hdr Boolean, returns 0 or 1 + * @param[out] entry_out The remote MAC for which the packet is destined + * @param[in] only_no_credit If TRUE, searches all entries instead of just the active ones + * + * Return value: the dequeued packet + */ +static void* +_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec, + uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out, + bool only_no_credit) +{ + wlfc_mac_descriptor_t* entry; + int total_entries; + void* p = NULL; + int i; + uint8 credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1; + + *entry_out = NULL; + /* most cases a packet will count against FIFO credit */ + *ac_credit_spent = credit_spent; + + /* search all entries, include nodes as well as interfaces */ + if (only_no_credit) { + total_entries = ctx->requested_entry_count; + } else { + total_entries = ctx->active_entry_count; + } + + for (i = 0; i < total_entries; i++) { + if (only_no_credit) { + entry = ctx->requested_entry[i]; + } else { + entry = ctx->active_entry_head; + /* move head to ensure fair round-robin */ + ctx->active_entry_head = ctx->active_entry_head->next; + } + ASSERT(entry); + + if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) && + (entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) && + (!entry->suppressed)) { + *ac_credit_spent = credit_spent; + if (entry->state == WLFC_STATE_CLOSE) { + *ac_credit_spent = 0; + } + + /* higher precedence will be picked up first, + * i.e. suppressed packets before delayed ones + */ + p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec)); + *needs_hdr = 0; + if (p == NULL) { + /* De-Q from delay Q */ + p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec)); + *needs_hdr = 1; + } + + if (p != NULL) { + bcm_pkt_validate_chk(p); + /* did the packet come from suppress sub-queue? */ + if (entry->requested_credit > 0) { + entry->requested_credit--; +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_sent_packets++; +#endif + } else if (entry->requested_packet > 0) { + entry->requested_packet--; + DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p)); + } + + *entry_out = entry; + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + ctx->pkt_cnt_in_psq--; + _dhd_wlfc_flow_control_check(ctx, &entry->psq, + DHD_PKTTAG_IF(PKTTAG(p))); + /* + * A packet has been picked up, update traffic availability bitmap, + * if applicable. + */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + return p; + } + } + } + return NULL; +} /* _dhd_wlfc_deque_delayedq */ + +/** Enqueues caller supplied packet on either a 'suppressed' or 'delayed' queue */ +static int +_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec) +{ + wlfc_mac_descriptor_t* entry; + + if (pktbuf != NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, pktbuf); + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1), + FALSE, WLFC_SEQCOUNT(entry, prec)) + == FALSE) { + WLFC_DBGMESG(("D")); + ctx->stats.delayq_full_error++; + return BCME_ERROR; + } + + + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + } + + return BCME_OK; +} /* _dhd_wlfc_enque_delayq */ + +/** Returns TRUE if caller supplied packet is destined for caller supplied interface */ +static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid) +{ + if (!p || !p_ifid) + return FALSE; + + return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p)))); +} + +/** Returns TRUE if caller supplied packet is destined for caller supplied remote MAC */ +static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry) +{ + if (!p || !entry) + return FALSE; + + return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p)))); +} + +static void +_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt) +{ + dhd_pub_t *dhdp; + bool credit_return = FALSE; + + if (!wlfc || !pkt) { + return; + } + + dhdp = (dhd_pub_t *)(wlfc->dhdp); + if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) && + DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) { + int lender, credit_returned = 0; + uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt)); + + credit_return = TRUE; + + /* Note that borrower is fifo_id */ + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; lender >= 0; lender--) { + if (wlfc->credits_borrowed[fifo_id][lender] > 0) { + wlfc->FIFO_credit[lender]++; + wlfc->credits_borrowed[fifo_id][lender]--; + credit_returned = 1; + break; + } + } + + if (!credit_returned) { + wlfc->FIFO_credit[fifo_id]++; + } + } + + BCM_REFERENCE(credit_return); +#if defined(DHD_WLFC_THREAD) + if (credit_return) { + _dhd_wlfc_thread_wakeup(dhdp); + } +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** Removes and frees a packet from the hanger. Called during eg tx complete. */ +static void +_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state, + int pkt_txstatus) +{ + wlfc_hanger_t* hanger; + wlfc_hanger_item_t* item; + + if (!wlfc) + return; + + hanger = (wlfc_hanger_t*)wlfc->hanger; + if (!hanger) + return; + + if (slot_id == WLFC_HANGER_MAXITEMS) + return; + + item = &hanger->items[slot_id]; + + if (item->pkt) { + item->pkt_state |= pkt_state; + if (pkt_txstatus != -1) + item->pkt_txstatus = (uint8)pkt_txstatus; + bcm_object_feature_set(item->pkt, BCM_OBJECT_FEATURE_PKT_STATE, item->pkt_state); + if (item->pkt_state == WLFC_HANGER_PKT_STATE_COMPLETE) { + void *p = NULL; + void *pkt = item->pkt; + uint8 old_state = item->state; + int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE); + BCM_REFERENCE(ret); + BCM_REFERENCE(pkt); + ASSERT((ret == BCME_OK) && p && (pkt == p)); + if (old_state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + printf("ERROR: free a suppressed pkt %p state %d pkt_state %d\n", + pkt, old_state, item->pkt_state); + } + ASSERT(old_state != WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED); + + /* free packet */ + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))] + [DHD_PKTTAG_FIFO(PKTTAG(p))]--; + wlfc->stats.pktout++; + dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus); + PKTFREE(wlfc->osh, p, TRUE); + } + } else { + /* free slot */ + if (item->state == WLFC_HANGER_ITEM_STATE_FREE) + DHD_ERROR(("Error: %s():%d Multiple TXSTATUS or BUSRETURNED: %d (%d)\n", + __FUNCTION__, __LINE__, item->pkt_state, pkt_state)); + item->state = WLFC_HANGER_ITEM_STATE_FREE; + } +} /* _dhd_wlfc_hanger_free_pkt */ + +/** Called during eg detach() */ +static void +_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, + bool dir, f_processpkt_t fn, void *arg, q_type_t q_type) +{ + int prec; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + ASSERT(dhdp); + + /* Optimize flush, if pktq len = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->len == 0) { + return; + } + + for (prec = 0; prec < pq->num_prec; prec++) { + struct pktq_prec *q; + void *p, *prev = NULL; + + q = &pq->q[prec]; + p = q->head; + while (p) { + bcm_pkt_validate_chk(p); + if (fn == NULL || (*fn)(p, arg)) { + bool head = (p == q->head); + if (head) + q->head = PKTLINK(p); + else + PKTSETLINK(prev, PKTLINK(p)); + if (q_type == Q_TYPE_PSQ) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) { + _dhd_wlfc_hanger_remove_reference(ctx->hanger, p); + } + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; + ctx->stats.cleanup_psq_cnt++; + if (!(prec & 1)) { + /* pkt in delayed q, so fake push BDC header for + * dhd_tcpack_check_xmit() and dhd_txcomplete(). + */ + _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, + 0, 0, TRUE); +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!" + " Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, + TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + } + } else if (q_type == Q_TYPE_AFQ) { + wlfc_mac_descriptor_t* entry = + _dhd_wlfc_find_table_entry(ctx, p); + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + _dhd_wlfc_return_implied_credit(ctx, p); + ctx->stats.cleanup_fw_cnt++; + } + PKTSETLINK(p, NULL); + if (dir) { + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->stats.pktout++; + dhd_txcomplete(dhdp, p, FALSE); + } + PKTFREE(ctx->osh, p, dir); + + q->len--; + pq->len--; + p = (head ? q->head : PKTLINK(prev)); + } else { + prev = p; + p = PKTLINK(p); + } + } + + if (q->head == NULL) { + ASSERT(q->len == 0); + q->tail = NULL; + } + + } + + if (fn == NULL) + ASSERT(pq->len == 0); +} /* _dhd_wlfc_pktq_flush */ + + +/** !BCMDBUS specific function. Dequeues a packet from the caller supplied queue. */ +static void* +_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + break; + } else { + prev = p; + p = PKTLINK(p); + } + } + if (p == NULL) + return NULL; + + bcm_pkt_validate_chk(p); + + if (prev == NULL) { + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + PKTSETLINK(prev, PKTLINK(p)); + if (q->tail == p) { + q->tail = prev; + } + } + + q->len--; + + pq->len--; + + PKTSETLINK(p, NULL); + + return p; +} + +/** !BCMDBUS specific function */ +static void +_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + int prec; + void *pkt = NULL, *head = NULL, *tail = NULL; + struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus); + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + wlfc_mac_descriptor_t* entry; + + dhd_os_sdlock_txq(dhd); + for (prec = 0; prec < txq->num_prec; prec++) { + while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) { +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + if (!head) { + head = pkt; + } + if (tail) { + PKTSETLINK(tail, pkt); + } + tail = pkt; + } + } + dhd_os_sdunlock_txq(dhd); + + + while ((pkt = head)) { + head = PKTLINK(pkt); + PKTSETLINK(pkt, NULL); + entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode) && + !_dhd_wlfc_hanger_remove_reference(h, pkt)) { + DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n", + __FUNCTION__, pkt)); + } + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + _dhd_wlfc_return_implied_credit(wlfc, pkt); + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--; + wlfc->stats.pktout++; + wlfc->stats.cleanup_txq_cnt++; + dhd_txcomplete(dhd, pkt, FALSE); + PKTFREE(wlfc->osh, pkt, TRUE); + } +} /* _dhd_wlfc_cleanup_txq */ + +/** called during eg detach */ +void +_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + int i; + int total_entries; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + + wlfc->stats.cleanup_txq_cnt = 0; + wlfc->stats.cleanup_psq_cnt = 0; + wlfc->stats.cleanup_fw_cnt = 0; + + /* + * flush sequence should be txq -> psq -> hanger/afq, hanger has to be last one + */ + /* flush bus->txq */ + _dhd_wlfc_cleanup_txq(dhd, fn, arg); + + /* flush psq, search all entries, include nodes as well as interfaces */ + total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t); + table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries; + + for (i = 0; i < total_entries; i++) { + if (table[i].occupied) { + /* release packets held in PSQ (both delayed and suppressed) */ + if (table[i].psq.len) { + WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n", + __FUNCTION__, i, table[i].psq.len)); + _dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE, + fn, arg, Q_TYPE_PSQ); + } + + /* free packets held in AFQ */ + if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.len)) { + _dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE, + fn, arg, Q_TYPE_AFQ); + } + + if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) { + table[i].occupied = 0; + if (table[i].transit_count || table[i].suppr_transit_count) { + DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n", + __FUNCTION__, i, + table[i].transit_count, + table[i].suppr_transit_count)); + } + } + } + } + + /* + . flush remained pkt in hanger queue, not in bus->txq nor psq. + . the remained pkt was successfully downloaded to dongle already. + . hanger slot state cannot be set to free until receive txstatus update. + */ + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + for (i = 0; i < h->max_items; i++) { + if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) || + (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) { + if (fn == NULL || (*fn)(h->items[i].pkt, arg)) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FLUSHED; + } + } + } + } + + return; +} /* _dhd_wlfc_cleanup */ + +/** Called after eg the dongle signalled a new remote MAC that it connected with to the DHD */ +static int +_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 action, uint8 ifid, uint8 iftype, uint8* ea, + f_processpkt_t fn, void *arg) +{ + int rc = BCME_OK; + + + if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) { + entry->occupied = 1; + entry->state = WLFC_STATE_OPEN; + entry->requested_credit = 0; + entry->interface_id = ifid; + entry->iftype = iftype; + entry->ac_bitmap = 0xff; /* update this when handling APSD */ + + /* for an interface entry we may not care about the MAC address */ + if (ea != NULL) + memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN); + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + entry->suppressed = FALSE; + entry->transit_count = 0; + entry->suppr_transit_count = 0; + entry->onbus_pkts_count = 0; + } + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + + pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN); + + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN); + } + + if (entry->next == NULL) { + /* not linked to anywhere, add to tail */ + if (ctx->active_entry_head) { + entry->prev = ctx->active_entry_head->prev; + ctx->active_entry_head->prev->next = entry; + ctx->active_entry_head->prev = entry; + entry->next = ctx->active_entry_head; + } else { + ASSERT(ctx->active_entry_count == 0); + entry->prev = entry->next = entry; + ctx->active_entry_head = entry; + } + ctx->active_entry_count++; + } else { + DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__, + (int)(entry - &ctx->destination_entries.nodes[0]))); + } + } + } else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) { + /* When the entry is deleted, the packets that are queued in the entry must be + cleanup. The cleanup action should be before the occupied is set as 0. + */ + _dhd_wlfc_cleanup(ctx->dhdp, fn, arg); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid); + + entry->occupied = 0; + entry->state = WLFC_STATE_CLOSE; + memset(&entry->ea[0], 0, ETHER_ADDR_LEN); + + if (entry->next) { + /* not floating, remove from Q */ + if (ctx->active_entry_count <= 1) { + /* last item */ + ctx->active_entry_head = NULL; + ctx->active_entry_count = 0; + } else { + entry->prev->next = entry->next; + entry->next->prev = entry->prev; + if (entry == ctx->active_entry_head) { + ctx->active_entry_head = entry->next; + } + ctx->active_entry_count--; + } + entry->next = entry->prev = NULL; + } else { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + } + } + return rc; +} /* _dhd_wlfc_mac_entry_update */ + + +#ifdef LIMIT_BORROW + +/** LIMIT_BORROW specific function */ +static int +_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac, + bool bBorrowAll) +{ + int lender_ac, borrow_limit = 0; + int rc = -1; + + if (ctx == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return -1; + } + + /* Borrow from lowest priority available AC (including BC/MC credits) */ + for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) { + if (!bBorrowAll) { + borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO; + } else { + borrow_limit = 0; + } + + if (ctx->FIFO_credit[lender_ac] > borrow_limit) { + ctx->credits_borrowed[borrower_ac][lender_ac]++; + ctx->FIFO_credit[lender_ac]--; + rc = lender_ac; + break; + } + } + + return rc; +} + +/** LIMIT_BORROW specific function */ +static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac) +{ + if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) || + (borrower_ac < 0) || (borrower_ac > AC_COUNT)) { + DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n", + __FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac)); + + return BCME_BADARG; + } + + ctx->credits_borrowed[borrower_ac][lender_ac]--; + ctx->FIFO_credit[lender_ac]++; + + return BCME_OK; +} + +#endif /* LIMIT_BORROW */ + +/** + * Called on an interface event (WLC_E_IF) indicated by firmware. + * @param action : eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ +static int +_dhd_wlfc_interface_entry_update(void* state, + uint8 action, uint8 ifid, uint8 iftype, uint8* ea) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + wlfc_mac_descriptor_t* entry; + + if (ifid >= WLFC_MAX_IFNUM) + return BCME_BADARG; + + entry = &ctx->destination_entries.interfaces[ifid]; + + return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea, + _dhd_wlfc_ifpkt_fn, &ifid); +} + +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ +static int +_dhd_wlfc_BCMCCredit_support_update(void* state) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + + ctx->bcmc_credit_supported = TRUE; + return BCME_OK; +} + +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ +static int +_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + int i; + + for (i = 0; i <= 4; i++) { + if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) { + DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n", + __FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i])); + } + } + + /* update the AC FIFO credit map */ + ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]); + ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]); + ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]); + ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]); + ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]); + + ctx->Init_FIFO_credit[0] = credits[0]; + ctx->Init_FIFO_credit[1] = credits[1]; + ctx->Init_FIFO_credit[2] = credits[2]; + ctx->Init_FIFO_credit[3] = credits[3]; + ctx->Init_FIFO_credit[4] = credits[4]; + + /* credit for ATIM FIFO is not used yet. */ + ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0; + + return BCME_OK; +} + +/** + * Called during committing of a transmit packet from the OS DHD layer to the next layer towards + * the dongle (eg the DBUS layer). All transmit packets flow via this function to the next layer. + * + * @param[in/out] ctx Driver specific flow control administration + * @param[in] ac Access Category (QoS) of called supplied packet + * @param[in] commit_info Contains eg the packet to send + * @param[in] fcommit Function pointer to transmit function of next software layer + * @param[in] commit_ctx Opaque context used when calling next layer + */ +static int +_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, + dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx) +{ + uint32 hslot; + int rc; + dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + + /* + if ac_fifo_credit_spent = 0 + + This packet will not count against the FIFO credit. + To ensure the txstatus corresponding to this packet + does not provide an implied credit (default behavior) + mark the packet accordingly. + + if ac_fifo_credit_spent = 1 + + This is a normal packet and it counts against the FIFO + credit count. + */ + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent); + rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, &commit_info->p, + commit_info->needs_hdr, &hslot); + + if (rc == BCME_OK) { + rc = fcommit(commit_ctx, commit_info->p); + if (rc == BCME_OK) { + uint8 gen = WL_TXSTATUS_GET_GENERATION( + DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))); + ctx->stats.pkt2bus++; + if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) { + ctx->stats.send_pkts[ac]++; + WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac); + } + + if (gen != commit_info->mac_entry->generation) { + /* will be suppressed back by design */ + if (!commit_info->mac_entry->suppressed) { + commit_info->mac_entry->suppressed = TRUE; + } + commit_info->mac_entry->suppr_transit_count++; + } + commit_info->mac_entry->transit_count++; + commit_info->mac_entry->onbus_pkts_count++; + } else if (commit_info->needs_hdr) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + void *pout = NULL; + /* pop hanger for delayed packet */ + _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT( + DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE); + ASSERT(commit_info->p == pout); + } + } + } else { + ctx->stats.generic_error++; + } + + if (rc != BCME_OK) { + /* + pretx pkt process or bus commit has failed, rollback. + - remove wl-header for a delayed packet + - save wl-header header for suppressed packets + - reset credit check flag + */ + _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot); + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0); + } + + return rc; +} /* _dhd_wlfc_handle_packet_commit */ + +/** Returns remote MAC descriptor for caller supplied MAC address */ +static uint8 +_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8 *ea) +{ + wlfc_mac_descriptor_t* table = + ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes; + uint8 table_index; + + if (ea != NULL) { + for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) { + if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) && + table[table_index].occupied) + return table_index; + } + } + return WLFC_MAC_DESC_ID_INVALID; +} + +/** + * Called when the host receives a WLFC_CTL_TYPE_TXSTATUS event from the dongle, indicating the + * status of a frame that the dongle attempted to transmit over the wireless medium. + */ +static int +dhd_wlfc_suppressed_acked_update(dhd_pub_t *dhd, uint16 hslot, uint8 prec, uint8 hcnt) +{ + athost_wl_status_info_t* ctx; + wlfc_mac_descriptor_t* entry = NULL; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!dhd) { + DHD_ERROR(("%s: dhd(%p)\n", __FUNCTION__, dhd)); + return BCME_BADARG; + } + ctx = (athost_wl_status_info_t*)dhd->wlfc_state; + if (!ctx) { + DHD_ERROR(("%s: ctx(%p)\n", __FUNCTION__, ctx)); + return BCME_ERROR; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->psq; + + ASSERT(((prec << 1) + 1) < pq->num_prec); + + q = &pq->q[((prec << 1) + 1)]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->len--; + pq->len--; + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + + PKTSETLINK(p, NULL); + + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(ctx, p); + } else { + _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + } + + entry->transit_count++; + + return BCME_OK; +} + +static int +_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac) +{ + uint8 status_flag_ori, status_flag; + uint32 status; + int ret = BCME_OK; + int remove_from_hanger_ori, remove_from_hanger = 1; + void* pktbuf = NULL; + uint8 fifo_id = 0, gen = 0, count = 0, hcnt; + uint16 hslot; + wlfc_mac_descriptor_t* entry = NULL; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + uint16 seq = 0, seq_fromfw = 0, seq_num = 0; + + memcpy(&status, pkt_info, sizeof(uint32)); + status = ltoh32(status); + status_flag = WL_TXSTATUS_GET_FLAGS(status); + hcnt = WL_TXSTATUS_GET_FREERUNCTR(status); + hslot = WL_TXSTATUS_GET_HSLOT(status); + fifo_id = WL_TXSTATUS_GET_FIFO(status); + gen = WL_TXSTATUS_GET_GENERATION(status); + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ); + seq = ltoh16(seq); + seq_fromfw = WL_SEQ_GET_FROMFW(seq); + seq_num = WL_SEQ_GET_NUM(seq); + } + + wlfc->stats.txstatus_in += len; + + if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) { + wlfc->stats.d11_suppress += len; + remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) { + wlfc->stats.wl_suppress += len; + remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) { + wlfc->stats.wlc_tossed_pkts += len; + } + + else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + wlfc->stats.pkt_freed += len; + } + + if (dhd->proptxstatus_txstatus_ignore) { + if (!remove_from_hanger) { + DHD_ERROR(("suppress txstatus: %d\n", status_flag)); + } + return BCME_OK; + } + + status_flag_ori = status_flag; + remove_from_hanger_ori = remove_from_hanger; + + while (count < len) { + if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + dhd_wlfc_suppressed_acked_update(dhd, hslot, fifo_id, hcnt); + } + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf); + } else { + status_flag = status_flag_ori; + remove_from_hanger = remove_from_hanger_ori; + ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE); + if (!pktbuf) { + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_TXSTATUS, -1); + goto cont; + } else { + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + if (h->items[hslot].state == WLFC_HANGER_ITEM_STATE_FLUSHED) { + status_flag = WLFC_CTL_PKTFLAG_DISCARD; + remove_from_hanger = 1; + } + } + } + + if ((ret != BCME_OK) || !pktbuf) { + goto cont; + } + + bcm_pkt_validate_chk(pktbuf); + + /* set fifo_id to correct value because not all FW does that */ + fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + + entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf); + + if (!remove_from_hanger) { + /* this packet was suppressed */ + if (!entry->suppressed || (entry->generation != gen)) { + if (!entry->suppressed) { + entry->suppr_transit_count = entry->transit_count; + if (p_mac) { + *p_mac = entry; + } + } else { + DHD_ERROR(("gen(%d), entry->generation(%d)\n", + gen, entry->generation)); + } + entry->suppressed = TRUE; + + } + entry->generation = gen; + } + +#ifdef PROP_TXSTATUS_DEBUG + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) + { + uint32 new_t = OSL_SYSUPTIME(); + uint32 old_t; + uint32 delta; + old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time; + + + wlfc->stats.latency_sample_count++; + if (new_t > old_t) + delta = new_t - old_t; + else + delta = 0xffffffff + new_t - old_t; + wlfc->stats.total_status_latency += delta; + wlfc->stats.latency_most_recent = delta; + + wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta; + if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32)) + wlfc->stats.idx_delta = 0; + } +#endif /* PROP_TXSTATUS_DEBUG */ + + /* pick up the implicit credit from this packet */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) { + _dhd_wlfc_return_implied_credit(wlfc, pktbuf); + } else { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the destination entry (for pspoll etc.) + */ + if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) { + entry->requested_credit++; +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* DHD_WLFC_THREAD */ + } +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_acks++; +#endif + } + + if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) || + (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) { + /* save generation bit inside packet */ + WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen); + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + WL_SEQ_SET_FROMDRV(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw); + WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num); + } + + ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf); + if (ret != BCME_OK) { + /* delay q is full, drop this packet */ + DHD_WLFC_QMON_COMPLETE(entry); + _dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE); + } else { + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + /* Mark suppressed to avoid a double free + during wlfc cleanup + */ + _dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen); + } + } + } else { + + DHD_WLFC_QMON_COMPLETE(entry); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE); + } else { + dhd_txcomplete(dhd, pktbuf, TRUE); + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))] + [DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--; + wlfc->stats.pktout++; + /* free the packet */ + PKTFREE(wlfc->osh, pktbuf, TRUE); + } + } + /* pkt back from firmware side */ + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + +cont: + hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK; + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK; + } + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) { + seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK; + } + + count++; + } + + return BCME_OK; +} /* _dhd_wlfc_compressed_txstatus_update */ + +/** + * Called when eg host receives a 'WLFC_CTL_TYPE_FIFO_CREDITBACK' event from the dongle. + * @param[in] credits caller supplied credit that will be added to the host credit. + */ +static int +_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) +{ + int i; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.fifo_credits_back[i] += credits[i]; +#endif + + /* update FIFO credits */ + if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT) + { + int lender; /* Note that borrower is i */ + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) { + if (wlfc->credits_borrowed[i][lender] > 0) { + if (credits[i] >= wlfc->credits_borrowed[i][lender]) { + credits[i] -= + (uint8)wlfc->credits_borrowed[i][lender]; + wlfc->FIFO_credit[lender] += + wlfc->credits_borrowed[i][lender]; + wlfc->credits_borrowed[i][lender] = 0; + } else { + wlfc->credits_borrowed[i][lender] -= credits[i]; + wlfc->FIFO_credit[lender] += credits[i]; + credits[i] = 0; + } + } + } + + /* If we have more credits left over, these must belong to the AC */ + if (credits[i] > 0) { + wlfc->FIFO_credit[i] += credits[i]; + } + + if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) { + wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i]; + } + } + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + + return BCME_OK; +} /* _dhd_wlfc_fifocreditback_indicate */ + + +/** !BCMDBUS specific function */ +static void +_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* entry; + int prec; + void *pkt = NULL, *head = NULL, *tail = NULL; + struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus); + uint8 results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ]; + uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0}; + uint32 htod = 0; + uint16 htodseq = 0; + bool bCreditUpdate = FALSE; + + dhd_os_sdlock_txq(dhd); + for (prec = 0; prec < txq->num_prec; prec++) { + while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) { + if (!head) { + head = pkt; + } + if (tail) { + PKTSETLINK(tail, pkt); + } + tail = pkt; + } + } + dhd_os_sdunlock_txq(dhd); + + while ((pkt = head)) { + head = PKTLINK(pkt); + PKTSETLINK(pkt, NULL); + + entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + if (entry) { + if (entry->onbus_pkts_count > 0) + entry->onbus_pkts_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + + /* fake a suppression txstatus */ + htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt)); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS); + WL_TXSTATUS_SET_GENERATION(htod, entry->generation); + htod = htol32(htod); + memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS); + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt)); + if (WL_SEQ_GET_FROMDRV(htodseq)) { + WL_SEQ_SET_FROMFW(htodseq, 1); + WL_SEQ_SET_FROMDRV(htodseq, 0); + } + htodseq = htol16(htodseq); + memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq, + WLFC_CTL_VALUE_LEN_SEQ); + } + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(wlfc, pkt); + } + _dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL); + + /* fake a fifo credit back */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) { + credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++; + bCreditUpdate = TRUE; + } + } + + if (bCreditUpdate) { + _dhd_wlfc_fifocreditback_indicate(dhd, credits); + } +} /* _dhd_wlfc_suppress_txq */ + +static int +_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value) +{ + uint32 timestamp; + + (void)dhd; + + bcopy(&value[2], ×tamp, sizeof(uint32)); + timestamp = ltoh32(timestamp); + DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp)); + return BCME_OK; +} + +static int +_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi) +{ + (void)dhd; + (void)rssi; + return BCME_OK; +} + +static void +_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) +{ + int i; + + if (!wlfc || !entry) { + return; + } + + for (i = 0; i < wlfc->requested_entry_count; i++) { + if (entry == wlfc->requested_entry[i]) { + break; + } + } + + if (i == wlfc->requested_entry_count) { + /* no match entry found */ + ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1)); + wlfc->requested_entry[wlfc->requested_entry_count++] = entry; + } +} + +/** called on eg receiving 'mac open' event from the dongle. */ +static void +_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) +{ + int i; + + if (!wlfc || !entry) { + return; + } + + for (i = 0; i < wlfc->requested_entry_count; i++) { + if (entry == wlfc->requested_entry[i]) { + break; + } + } + + if (i < wlfc->requested_entry_count) { + /* found */ + ASSERT(wlfc->requested_entry_count > 0); + wlfc->requested_entry_count--; + if (i != wlfc->requested_entry_count) { + wlfc->requested_entry[i] = + wlfc->requested_entry[wlfc->requested_entry_count]; + } + wlfc->requested_entry[wlfc->requested_entry_count] = NULL; + } +} + +/** called on eg receiving a WLFC_CTL_TYPE_MACDESC_ADD TLV from the dongle */ +static int +_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + int rc; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 existing_index; + uint8 table_index; + uint8 ifid; + uint8* ea; + + WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n", + __FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7], + ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"), + WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0])); + + table = wlfc->destination_entries.nodes; + table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]); + ifid = value[1]; + ea = &value[2]; + + _dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]); + if (type == WLFC_CTL_TYPE_MACDESC_ADD) { + existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]); + if ((existing_index != WLFC_MAC_DESC_ID_INVALID) && + (existing_index != table_index) && table[existing_index].occupied) { + /* + there is an existing different entry, free the old one + and move it to new index if necessary. + */ + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index], + eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id, + table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn, + &table[existing_index]); + } + + if (!table[table_index].occupied) { + /* this new MAC entry does not exist, create one */ + table[table_index].mac_handle = value[0]; + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_ADD, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea, NULL, NULL); + } else { + /* the space should have been empty, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + + if (type == WLFC_CTL_TYPE_MACDESC_DEL) { + if (table[table_index].occupied) { + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_DEL, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea, _dhd_wlfc_entrypkt_fn, &table[table_index]); + } else { + /* the space should have been occupied, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + BCM_REFERENCE(rc); + return BCME_OK; +} /* _dhd_wlfc_mac_table_update */ + +/** Called on a 'mac open' or 'mac close' event indicated by the dongle */ +static int +_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; /* a table maps from mac handle to mac descriptor */ + uint8 mac_handle = value[0]; + int i; + + table = wlfc->destination_entries.nodes; + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + if (type == WLFC_CTL_TYPE_MAC_OPEN) { + desc->state = WLFC_STATE_OPEN; + desc->ac_bitmap = 0xff; + DHD_WLFC_CTRINC_MAC_OPEN(desc); + desc->requested_credit = 0; + desc->requested_packet = 0; + _dhd_wlfc_remove_requested_entry(wlfc, desc); + } else { + desc->state = WLFC_STATE_CLOSE; + DHD_WLFC_CTRINC_MAC_CLOSE(desc); + /* Indicate to firmware if there is any traffic pending. */ + for (i = 0; i < AC_COUNT; i++) { + _dhd_wlfc_traffic_pending_check(wlfc, desc, i); + } + } + } else { + wlfc->stats.psmode_update_failed++; + } + + return BCME_OK; +} /* _dhd_wlfc_psmode_update */ + +/** called upon receiving 'interface open' or 'interface close' event from the dongle */ +static int +_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 if_id = value[0]; + + if (if_id < WLFC_MAX_IFNUM) { + table = wlfc->destination_entries.interfaces; + if (table[if_id].occupied) { + if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) { + table[if_id].state = WLFC_STATE_OPEN; + /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */ + } else { + table[if_id].state = WLFC_STATE_CLOSE; + /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */ + } + return BCME_OK; + } + } + wlfc->stats.interface_update_failed++; + + return BCME_OK; +} + +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_CREDIT TLV from the dongle */ +static int +_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 credit; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + credit = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_credit = credit; + + desc->ac_bitmap = value[2] & (~(1<stats.credit_request_failed++; + } + + return BCME_OK; +} + +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_PACKET TLV from the dongle */ +static int +_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 packet_count; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + packet_count = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_packet = packet_count; + + desc->ac_bitmap = value[2] & (~(1<stats.packet_request_failed++; + } + + return BCME_OK; +} + +/** Called when host receives a WLFC_CTL_TYPE_HOST_REORDER_RXPKTS TLV from the dongle */ +static void +_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len) +{ + if (info_len) { + if (info_buf) { + bcopy(val, info_buf, len); + *info_len = len; + } else { + *info_len = 0; + } + } +} + +/* + * public functions + */ + +bool dhd_wlfc_is_supported(dhd_pub_t *dhd) +{ + bool rc = TRUE; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rc = FALSE; + } + + dhd_os_wlfc_unblock(dhd); + + return rc; +} + +int dhd_wlfc_enable(dhd_pub_t *dhd) +{ + int i, rc = BCME_OK; + athost_wl_status_info_t* wlfc; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_enabled || dhd->wlfc_state) { + rc = BCME_OK; + goto exit; + } + + /* allocate space to track txstatus propagated from firmware */ + dhd->wlfc_state = DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_INFO, + sizeof(athost_wl_status_info_t)); + if (dhd->wlfc_state == NULL) { + rc = BCME_NOMEM; + goto exit; + } + + /* initialize state space */ + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + memset(wlfc, 0, sizeof(athost_wl_status_info_t)); + + /* remember osh & dhdp */ + wlfc->osh = dhd->osh; + wlfc->dhdp = dhd; + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + wlfc->hanger = _dhd_wlfc_hanger_create(dhd, WLFC_HANGER_MAXITEMS); + if (wlfc->hanger == NULL) { + DHD_OS_PREFREE(dhd, dhd->wlfc_state, + sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + rc = BCME_NOMEM; + goto exit; + } + } + + dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT; + /* default to check rx pkt */ + dhd->wlfc_rxpkt_chk = TRUE; + if (dhd->op_mode & DHD_FLAG_IBSS_MODE) { + dhd->wlfc_rxpkt_chk = FALSE; + } + + /* initialize all interfaces to accept traffic */ + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + wlfc->hostif_flow_state[i] = OFF; + } + + _dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other, + eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL); + + wlfc->allow_credit_borrow = 0; + wlfc->single_ac = 0; + wlfc->single_ac_timestamp = 0; + + +exit: + dhd_os_wlfc_unblock(dhd); + + return rc; +} /* dhd_wlfc_enable */ + +#ifdef SUPPORT_P2P_GO_PS + +/** + * Called when the host platform enters a lower power mode, eg right before a system hibernate. + * SUPPORT_P2P_GO_PS specific function. + */ +int +dhd_wlfc_suspend(dhd_pub_t *dhd) +{ + uint32 tlv = 0; + + DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__)); + if (!dhd->wlfc_enabled) + return -1; + + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) + return -1; + if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0) + return 0; + tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) + return -1; + + return 0; +} + +/** + * Called when the host platform resumes from a power management operation, eg resume after a + * system hibernate. SUPPORT_P2P_GO_PS specific function. + */ +int +dhd_wlfc_resume(dhd_pub_t *dhd) +{ + uint32 tlv = 0; + + DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__)); + if (!dhd->wlfc_enabled) + return -1; + + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) + return -1; + if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == + (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) + return 0; + tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) + return -1; + + return 0; +} + +#endif /* SUPPORT_P2P_GO_PS */ + +/** A flow control header was received from firmware, containing one or more TLVs */ +int +dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf, + uint *reorder_info_len) +{ + uint8 type, len; + uint8* value; + uint8* tmpbuf; + uint16 remainder = (uint16)tlv_hdr_len; + uint16 processed = 0; + athost_wl_status_info_t* wlfc = NULL; + void* entry; + + if ((dhd == NULL) || (pktbuf == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) { + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + } + + tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf); + + if (remainder) { + while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) { + type = tmpbuf[processed]; + if (type == WLFC_CTL_TYPE_FILLER) { + remainder -= 1; + processed += 1; + continue; + } + + len = tmpbuf[processed + 1]; + value = &tmpbuf[processed + 2]; + + if (remainder < (2 + len)) + break; + + remainder -= 2 + len; + processed += 2 + len; + entry = NULL; + + DHD_INFO(("%s():%d type %d remainder %d processed %d\n", + __FUNCTION__, __LINE__, type, remainder, processed)); + + if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS) + _dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf, + reorder_info_len); + + if (wlfc == NULL) { + ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER); + + if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS && + type != WLFC_CTL_TYPE_TRANS_ID) + DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!" + " type %d remainder %d processed %d\n", + __FUNCTION__, __LINE__, type, remainder, processed)); + continue; + } + + if (type == WLFC_CTL_TYPE_TXSTATUS) { + _dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry); + } else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) { + uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS; + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ; + } + _dhd_wlfc_compressed_txstatus_update(dhd, value, + value[compcnt_offset], &entry); + } else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) { + _dhd_wlfc_fifocreditback_indicate(dhd, value); + } else if (type == WLFC_CTL_TYPE_RSSI) { + _dhd_wlfc_rssi_indicate(dhd, value); + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) { + _dhd_wlfc_credit_request(dhd, value); + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) { + _dhd_wlfc_packet_request(dhd, value); + } else if ((type == WLFC_CTL_TYPE_MAC_OPEN) || + (type == WLFC_CTL_TYPE_MAC_CLOSE)) { + _dhd_wlfc_psmode_update(dhd, value, type); + } else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) || + (type == WLFC_CTL_TYPE_MACDESC_DEL)) { + _dhd_wlfc_mac_table_update(dhd, value, type); + } else if (type == WLFC_CTL_TYPE_TRANS_ID) { + _dhd_wlfc_dbg_senum_check(dhd, value); + } else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) || + (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) { + _dhd_wlfc_interface_update(dhd, value, type); + } + + if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) { + /* suppress all packets for this mac entry from bus->txq */ + _dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry); + } + } /* while */ + + if (remainder != 0 && wlfc) { + /* trouble..., something is not right */ + wlfc->stats.tlv_parse_failed++; + } + } /* if */ + + if (wlfc) + wlfc->stats.dhd_hdrpulls++; + + dhd_os_wlfc_unblock(dhd); + return BCME_OK; +} /* dhd_wlfc_parse_header_info */ + +KERNEL_THREAD_RETURN_TYPE +dhd_wlfc_transfer_packets(void *data) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)data; + int ac, single_ac = 0, rc = BCME_OK; + dhd_wlfc_commit_info_t commit_info; + athost_wl_status_info_t* ctx; + int bus_retry_count = 0; + int pkt_send = 0; + + uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */ + uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */ + uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */ + bool no_credit = FALSE; + + int lender; + +#if defined(DHD_WLFC_THREAD) + /* wait till someone wakeup me up, will change it at running time */ + int wait_msec = msecs_to_jiffies(0xFFFFFFFF); +#endif /* defined(DHD_WLFC_THREAD) */ + +#if defined(DHD_WLFC_THREAD) + while (1) { + bus_retry_count = 0; + pkt_send = 0; + tx_map = 0; + rx_map = 0; + packets_map = 0; + wait_msec = wait_event_interruptible_timeout(dhdp->wlfc_wqhead, + dhdp->wlfc_thread_go, wait_msec); + if (kthread_should_stop()) { + break; + } + dhdp->wlfc_thread_go = FALSE; + + dhd_os_wlfc_block(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; +#if defined(DHD_WLFC_THREAD) + if (!ctx) + goto exit; +#endif /* defined(DHD_WLFC_THREAD) */ + + memset(&commit_info, 0, sizeof(commit_info)); + + /* + Commit packets for regular AC traffic. Higher priority first. + First, use up FIFO credits available to each AC. Based on distribution + and credits left, borrow from other ACs as applicable + + -NOTE: + If the bus between the host and firmware is overwhelmed by the + traffic from host, it is possible that higher priority traffic + starves the lower priority queue. If that occurs often, we may + have to employ weighted round-robin or ucode scheme to avoid + low priority packet starvation. + */ + + for (ac = AC_COUNT; ac >= 0; ac--) { + if (dhdp->wlfc_rxpkt_chk) { + /* check rx packet */ + uint32 curr_t = OSL_SYSUPTIME(), delta; + + delta = curr_t - ctx->rx_timestamp[ac]; + if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) { + rx_map |= (1 << ac); + } + } + + if (ctx->pkt_cnt_per_ac[ac] == 0) { + continue; + } + + tx_map |= (1 << ac); + single_ac = ac + 1; + while (FALSE == dhdp->proptxstatus_txoff) { + /* packets from delayQ with less priority are fresh and + * they'd need header and have no MAC entry + */ + no_credit = (ctx->FIFO_credit[ac] < 1); + if (dhdp->proptxstatus_credit_ignore || + ((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) { + no_credit = FALSE; + } + + lender = -1; +#ifdef LIMIT_BORROW + if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map)) { + /* try borrow from lower priority */ + lender = _dhd_wlfc_borrow_credit(ctx, ac - 1, ac, FALSE); + if (lender != -1) { + no_credit = FALSE; + } + } +#endif + commit_info.needs_hdr = 1; + commit_info.mac_entry = NULL; + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry), + no_credit); + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + if (commit_info.p == NULL) { +#ifdef LIMIT_BORROW + if (lender != -1) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + break; + } + + if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) { + ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent); + } + /* here we can ensure have credit or no credit needed */ + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + pkt_send++; + if (commit_info.ac_fifo_credit_spent && (lender == -1)) { + ctx->FIFO_credit[ac]--; + } +#ifdef LIMIT_BORROW + else if (!commit_info.ac_fifo_credit_spent && (lender != -1)) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + } else { +#ifdef LIMIT_BORROW + if (lender != -1) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc)); + goto exit; + } + } + } + + if (ctx->pkt_cnt_per_ac[ac]) { + packets_map |= (1 << ac); + } + } + + if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) { + /* nothing send out or remain in queue */ + rc = BCME_OK; + goto exit; + } + + if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) { + /* only one tx ac exist and no higher rx ac */ + if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) { + ac = single_ac - 1; + } else { + uint32 delta; + uint32 curr_t = OSL_SYSUPTIME(); + + if (single_ac != ctx->single_ac) { + /* new single ac traffic (first single ac or different single ac) */ + ctx->allow_credit_borrow = 0; + ctx->single_ac_timestamp = curr_t; + ctx->single_ac = (uint8)single_ac; + rc = BCME_OK; + goto exit; + } + /* same ac traffic, check if it lasts enough time */ + delta = curr_t - ctx->single_ac_timestamp; + + if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) { + /* wait enough time, can borrow now */ + ctx->allow_credit_borrow = 1; + ac = single_ac - 1; + } else { + rc = BCME_OK; + goto exit; + } + } + } else { + /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */ + ctx->allow_credit_borrow = 0; + ctx->single_ac_timestamp = 0; + ctx->single_ac = 0; + rc = BCME_OK; + goto exit; + } + + if (packets_map == 0) { + /* nothing to send, skip borrow */ + rc = BCME_OK; + goto exit; + } + + /* At this point, borrow all credits only for ac */ + while (FALSE == dhdp->proptxstatus_txoff) { +#ifdef LIMIT_BORROW + if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) { + break; + } +#endif + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry), + FALSE); + if (commit_info.p == NULL) { + /* before borrow only one ac exists and now this only ac is empty */ +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + break; + } + + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + pkt_send++; + if (commit_info.ac_fifo_credit_spent) { +#ifndef LIMIT_BORROW + ctx->FIFO_credit[ac]--; +#endif + } else { +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + } + } else { +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc)); + goto exit; + } + } + } + + BCM_REFERENCE(pkt_send); + +exit: +#if defined(DHD_WLFC_THREAD) + dhd_os_wlfc_unblock(dhdp); + if (ctx && ctx->pkt_cnt_in_psq && pkt_send) { + wait_msec = msecs_to_jiffies(WLFC_THREAD_QUICK_RETRY_WAIT_MS); + } else { + wait_msec = msecs_to_jiffies(WLFC_THREAD_RETRY_WAIT_MS); + } + } + return 0; +#else + return rc; +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** + * Enqueues a transmit packet in the next layer towards the dongle, eg the DBUS layer. Called by + * eg dhd_sendpkt(). + * @param[in] dhdp Pointer to public DHD structure + * @param[in] fcommit Pointer to transmit function of next layer + * @param[in] commit_ctx Opaque context used when calling next layer + * @param[in] pktbuf Packet to send + * @param[in] need_toggle_host_if If TRUE, resets flag ctx->toggle_host_if + */ +int +dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf, + bool need_toggle_host_if) +{ + int rc = BCME_OK; + athost_wl_status_info_t* ctx; + +#if defined(DHD_WLFC_THREAD) + if (!pktbuf) + return BCME_OK; +#endif /* defined(DHD_WLFC_THREAD) */ + + if ((dhdp == NULL) || (fcommit == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + if (pktbuf) { + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0); + } + rc = WLFC_UNSUPPORTED; + goto exit; + } + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + + if (dhdp->proptxstatus_module_ignore) { + if (pktbuf) { + uint32 htod = 0; + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE); + if (fcommit(commit_ctx, pktbuf)) { + /* free it if failed, otherwise do it in tx complete cb */ + PKTFREE(ctx->osh, pktbuf, TRUE); + } + rc = BCME_OK; + } + goto exit; + } + + if (pktbuf) { + int ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + ASSERT(ac <= AC_COUNT); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1); + /* en-queue the packets to respective queue. */ + rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac); + if (rc) { + _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE); + } else { + ctx->stats.pktin++; + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++; + } + } + + if (!ctx->fcommit) { + ctx->fcommit = fcommit; + } else { + ASSERT(ctx->fcommit == fcommit); + } + if (!ctx->commit_ctx) { + ctx->commit_ctx = commit_ctx; + } else { + ASSERT(ctx->commit_ctx == commit_ctx); + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhdp); +#else + dhd_wlfc_transfer_packets(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + +exit: + dhd_os_wlfc_unblock(dhdp); + return rc; +} /* dhd_wlfc_commit_packets */ + +/** + * Called when the (lower) DBUS layer indicates completion (succesfull or not) of a transmit packet + */ +int +dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) +{ + athost_wl_status_info_t* wlfc; + wlfc_mac_descriptor_t *entry; + void* pout = NULL; + int rtn = BCME_OK; + if ((dhd == NULL) || (txp == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + bcm_pkt_validate_chk(txp); + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rtn = WLFC_UNSUPPORTED; + goto EXIT; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.signal_only_pkts_freed++; +#endif + /* is this a signal-only packet? */ + _dhd_wlfc_pullheader(wlfc, txp); + PKTFREE(wlfc->osh, txp, TRUE); + goto EXIT; + } + + entry = _dhd_wlfc_find_table_entry(wlfc, txp); + ASSERT(entry); + + if (!success || dhd->proptxstatus_txstatus_ignore) { + WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n", + __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp)))); + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT( + DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE); + ASSERT(txp == pout); + } + + /* indicate failure and free the packet */ + dhd_txcomplete(dhd, txp, success); + + /* return the credit, if necessary */ + _dhd_wlfc_return_implied_credit(wlfc, txp); + + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) + entry->suppr_transit_count--; + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--; + wlfc->stats.pktout++; + PKTFREE(wlfc->osh, txp, TRUE); + } else { + /* bus confirmed pkt went to firmware side */ + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(wlfc, txp); + } else { + int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp))); + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_BUSRETURNED, -1); + } + } + + ASSERT(entry->onbus_pkts_count > 0); + if (entry->onbus_pkts_count > 0) + entry->onbus_pkts_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; +EXIT: + dhd_os_wlfc_unblock(dhd); + return rtn; +} /* dhd_wlfc_txcomplete */ + +int +dhd_wlfc_init(dhd_pub_t *dhd) +{ + /* enable all signals & indicate host proptxstatus logic is active */ + uint32 tlv, mode, fw_caps; + int ret = 0; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + if (dhd->wlfc_enabled) { + DHD_INFO(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__)); + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + dhd->wlfc_enabled = TRUE; + dhd_os_wlfc_unblock(dhd); + + tlv = WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE | + WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + + + /* + try to enable/disable signaling by sending "tlv" iovar. if that fails, + fallback to no flow control? Print a message for now. + */ + + /* enable proptxtstatus signaling by default */ + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_INFO(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n", + dhd->wlfc_enabled?"enabled":"disabled", tlv)); + } + + mode = 0; + + /* query caps */ + ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0); + + if (!ret) { + DHD_INFO(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps)); + + if (WLFC_IS_OLD_DEF(fw_caps)) { + /* enable proptxtstatus v2 by default */ + mode = WLFC_MODE_AFQ; + } else { + WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps)); + WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps)); + WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps)); + } + ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0); + } + + dhd_os_wlfc_block(dhd); + + dhd->wlfc_mode = 0; + if (ret >= 0) { + if (WLFC_IS_OLD_DEF(mode)) { + WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ)); + } else { + dhd->wlfc_mode = mode; + } + } + + DHD_INFO(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret)); + + dhd_os_wlfc_unblock(dhd); + + if (dhd->plat_init) + dhd->plat_init((void *)dhd); + + return BCME_OK; +} /* dhd_wlfc_init */ + +/** AMPDU host reorder specific function */ +int +dhd_wlfc_hostreorder_init(dhd_pub_t *dhd) +{ + /* enable only ampdu hostreorder here */ + uint32 tlv; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__)); + + tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + + /* enable proptxtstatus signaling by default */ + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n", + __FUNCTION__)); + } else { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n", + __FUNCTION__, tlv)); + } + + dhd_os_wlfc_block(dhd); + dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER; + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +int +dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + _dhd_wlfc_cleanup_txq(dhd, fn, arg); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** release all packet resources */ +int +dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + _dhd_wlfc_cleanup(dhd, fn, arg); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +int +dhd_wlfc_deinit(dhd_pub_t *dhd) +{ + /* cleanup all psq related resources */ + athost_wl_status_info_t* wlfc; + uint32 tlv = 0; + uint32 hostreorder = 0; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + if (!dhd->wlfc_enabled) { + DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__)); + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + + dhd->wlfc_enabled = FALSE; + dhd_os_wlfc_unblock(dhd); + + /* query ampdu hostreorder */ + (void) dhd_wl_ioctl_get_intiovar(dhd, "ampdu_hostreorder", + &hostreorder, WLC_GET_VAR, FALSE, 0); + + if (hostreorder) { + tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n", + __FUNCTION__, __LINE__)); + } + + /* Disable proptxtstatus signaling for deinit */ + (void) dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0); + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + _dhd_wlfc_cleanup(dhd, NULL, NULL); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + int i; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + for (i = 0; i < h->max_items; i++) { + if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) { + _dhd_wlfc_hanger_free_pkt(wlfc, i, + WLFC_HANGER_PKT_STATE_COMPLETE, TRUE); + } + } + + /* delete hanger */ + _dhd_wlfc_hanger_delete(dhd, h); + } + + + /* free top structure */ + DHD_OS_PREFREE(dhd, dhd->wlfc_state, + sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + dhd->proptxstatus_mode = hostreorder ? + WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE; + + dhd_os_wlfc_unblock(dhd); + + if (dhd->plat_deinit) + dhd->plat_deinit((void *)dhd); + return BCME_OK; +} /* dhd_wlfc_init */ + +/** + * Called on an interface event (WLC_E_IF) indicated by firmware + * @param[in] dhdp Pointer to public DHD structure + * @param[in] action eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ +int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea); + + dhd_os_wlfc_unblock(dhdp); + return rc; +} + +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ +int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data); + + dhd_os_wlfc_unblock(dhdp); + + return rc; +} + +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ +int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state); + + dhd_os_wlfc_unblock(dhdp); + return rc; +} + +/** debug specific function */ +int +dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + int i; + uint8* ea; + athost_wl_status_info_t* wlfc; + wlfc_hanger_t* h; + wlfc_mac_descriptor_t* mac_table; + wlfc_mac_descriptor_t* interfaces; + char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"}; + + if (!dhdp || !strbuf) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state; + + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } + + mac_table = wlfc->destination_entries.nodes; + interfaces = wlfc->destination_entries.interfaces; + bcm_bprintf(strbuf, "---- wlfc stats ----\n"); + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } else { + bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push," + "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n", + h->pushed, + h->popped, + h->failed_to_push, + h->failed_to_pop, + h->failed_slotfind, + (h->pushed - h->popped)); + } + } + + bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), " + "(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n", + wlfc->stats.tlv_parse_failed, + wlfc->stats.credit_request_failed, + wlfc->stats.mac_update_failed, + wlfc->stats.psmode_update_failed, + wlfc->stats.delayq_full_error, + wlfc->stats.rollback_failed); + + bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) " + "(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d]," + "AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n", + wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0], + wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0], + wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1], + wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1], + wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2], + wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2], + wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3], + wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3], + wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4], + wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]); + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (interfaces[i].occupied) { + char* iftype_desc; + + if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT) + iftype_desc = "hostif_flow_state[i] == OFF) + ? " OFF":" ON")); + + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", + i, + interfaces[i].psq.len, + ((interfaces[i].state == + WLFC_STATE_OPEN) ? "OPEN":"CLOSE"), + interfaces[i].requested_credit, + interfaces[i].transit_count, + interfaces[i].suppr_transit_count, + interfaces[i].onbus_pkts_count); + + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ" + "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," + "(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d)," + "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n", + i, + interfaces[i].psq.q[0].len, + interfaces[i].psq.q[1].len, + interfaces[i].afq.q[0].len, + interfaces[i].psq.q[2].len, + interfaces[i].psq.q[3].len, + interfaces[i].afq.q[1].len, + interfaces[i].psq.q[4].len, + interfaces[i].psq.q[5].len, + interfaces[i].afq.q[2].len, + interfaces[i].psq.q[6].len, + interfaces[i].psq.q[7].len, + interfaces[i].afq.q[3].len, + interfaces[i].psq.q[8].len, + interfaces[i].psq.q[9].len, + interfaces[i].afq.q[4].len); + } + } + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (mac_table[i].occupied) { + ea = mac_table[i].ea; + bcm_bprintf(strbuf, "MAC_table[%d].ea = " + "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i, + ea[0], ea[1], ea[2], ea[3], ea[4], ea[5], + mac_table[i].interface_id); + + bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", + i, + mac_table[i].psq.len, + ((mac_table[i].state == + WLFC_STATE_OPEN) ? " OPEN":"CLOSE"), + mac_table[i].requested_credit, + mac_table[i].transit_count, + mac_table[i].suppr_transit_count, + mac_table[i].onbus_pkts_count); +#ifdef PROP_TXSTATUS_DEBUG + bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n", + i, mac_table[i].opened_ct, mac_table[i].closed_ct); +#endif + bcm_bprintf(strbuf, "MAC_table[%d].PSQ" + "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," + "(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d)," + "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n", + i, + mac_table[i].psq.q[0].len, + mac_table[i].psq.q[1].len, + mac_table[i].afq.q[0].len, + mac_table[i].psq.q[2].len, + mac_table[i].psq.q[3].len, + mac_table[i].afq.q[1].len, + mac_table[i].psq.q[4].len, + mac_table[i].psq.q[5].len, + mac_table[i].afq.q[2].len, + mac_table[i].psq.q[6].len, + mac_table[i].psq.q[7].len, + mac_table[i].afq.q[3].len, + mac_table[i].psq.q[8].len, + mac_table[i].psq.q[9].len, + mac_table[i].afq.q[4].len); + + } + } + +#ifdef PROP_TXSTATUS_DEBUG + { + int avg; + int moving_avg = 0; + int moving_samples; + + if (wlfc->stats.latency_sample_count) { + moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32); + + for (i = 0; i < moving_samples; i++) + moving_avg += wlfc->stats.deltas[i]; + moving_avg /= moving_samples; + + avg = (100 * wlfc->stats.total_status_latency) / + wlfc->stats.latency_sample_count; + bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = " + "(%d.%d, %03d, %03d)\n", + moving_samples, avg/100, (avg - (avg/100)*100), + wlfc->stats.latency_most_recent, + moving_avg); + } + } + + bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), " + "back = (%d,%d,%d,%d,%d,%d)\n", + wlfc->stats.fifo_credits_sent[0], + wlfc->stats.fifo_credits_sent[1], + wlfc->stats.fifo_credits_sent[2], + wlfc->stats.fifo_credits_sent[3], + wlfc->stats.fifo_credits_sent[4], + wlfc->stats.fifo_credits_sent[5], + + wlfc->stats.fifo_credits_back[0], + wlfc->stats.fifo_credits_back[1], + wlfc->stats.fifo_credits_back[2], + wlfc->stats.fifo_credits_back[3], + wlfc->stats.fifo_credits_back[4], + wlfc->stats.fifo_credits_back[5]); + { + uint32 fifo_cr_sent = 0; + uint32 fifo_cr_acked = 0; + uint32 request_cr_sent = 0; + uint32 request_cr_ack = 0; + uint32 bc_mc_cr_ack = 0; + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) { + fifo_cr_sent += wlfc->stats.fifo_credits_sent[i]; + } + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) { + fifo_cr_acked += wlfc->stats.fifo_credits_back[i]; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_sent += + wlfc->destination_entries.nodes[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_sent += + wlfc->destination_entries.interfaces[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_ack += + wlfc->destination_entries.nodes[i].dstncredit_acks; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_ack += + wlfc->destination_entries.interfaces[i].dstncredit_acks; + } + } + bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d)," + "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)", + fifo_cr_sent, fifo_cr_acked, + request_cr_sent, request_cr_ack, + wlfc->destination_entries.other.dstncredit_acks, + bc_mc_cr_ack, + wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed); + } +#endif /* PROP_TXSTATUS_DEBUG */ + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),(dropped,hdr_only,wlc_tossed)" + "(freed,free_err,rollback)) = " + "((%d,%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.pktin, + wlfc->stats.pkt2bus, + wlfc->stats.txstatus_in, + wlfc->stats.dhd_hdrpulls, + wlfc->stats.pktout, + + wlfc->stats.pktdropped, + wlfc->stats.wlfc_header_only_pkt, + wlfc->stats.wlc_tossed_pkts, + + wlfc->stats.pkt_freed, + wlfc->stats.pkt_free_err, wlfc->stats.rollback); + + bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = " + "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.d11_suppress, + wlfc->stats.wl_suppress, + wlfc->stats.bad_suppress, + + wlfc->stats.psq_d11sup_enq, + wlfc->stats.psq_wlsup_enq, + wlfc->stats.psq_hostq_enq, + wlfc->stats.mac_handle_notfound, + + wlfc->stats.psq_d11sup_retx, + wlfc->stats.psq_wlsup_retx, + wlfc->stats.psq_hostq_retx); + + bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n", + wlfc->stats.cleanup_txq_cnt, + wlfc->stats.cleanup_psq_cnt, + wlfc->stats.cleanup_fw_cnt); + + bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error); + + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i, + wlfc->pkt_cnt_in_q[i][0], + wlfc->pkt_cnt_in_q[i][1], + wlfc->pkt_cnt_in_q[i][2], + wlfc->pkt_cnt_in_q[i][3], + wlfc->pkt_cnt_in_q[i][4]); + } + bcm_bprintf(strbuf, "\n"); + + dhd_os_wlfc_unblock(dhdp); + return BCME_OK; +} /* dhd_wlfc_dump */ + +int dhd_wlfc_clear_counts(dhd_pub_t *dhd) +{ + athost_wl_status_info_t* wlfc; + wlfc_hanger_t* hanger; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t)); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + hanger = (wlfc_hanger_t*)wlfc->hanger; + + hanger->pushed = 0; + hanger->popped = 0; + hanger->failed_slotfind = 0; + hanger->failed_to_pop = 0; + hanger->failed_to_push = 0; + } + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** returns TRUE if flow control is enabled */ +int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_enabled; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called via an IOVAR */ +int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called via an IOVAR */ +int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (dhd->wlfc_state) { + dhd->proptxstatus_mode = val & 0xff; + } + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called when rx frame is received from the dongle */ +bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf) +{ + athost_wl_status_info_t* wlfc; + bool rc = FALSE; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return FALSE; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + if (PKTLEN(wlfc->osh, pktbuf) == 0) { + wlfc->stats.wlfc_header_only_pkt++; + rc = TRUE; + } + + dhd_os_wlfc_unblock(dhd); + + return rc; +} + +int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock) +{ + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + if (bAcquireLock) { + dhd_os_wlfc_block(dhdp); + } + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) || + dhdp->proptxstatus_module_ignore) { + if (bAcquireLock) { + dhd_os_wlfc_unblock(dhdp); + } + return WLFC_UNSUPPORTED; + } + + if (state != dhdp->proptxstatus_txoff) { + dhdp->proptxstatus_txoff = state; + } + + if (bAcquireLock) { + dhd_os_wlfc_unblock(dhdp); + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + + return BCME_OK; +} + +/** Called when eg an rx frame is received from the dongle */ +int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio) +{ + athost_wl_status_info_t* wlfc; + int rx_path_ac = -1; + + if ((dhd == NULL) || (prio >= NUMPRIO)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_rxpkt_chk) { + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + rx_path_ac = prio2fifo[prio]; + wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME(); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_module_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val) +{ + uint32 tlv = 0; + bool bChanged = FALSE; + + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if ((bool)val != dhd->proptxstatus_module_ignore) { + dhd->proptxstatus_module_ignore = (val != 0); + /* force txstatus_ignore sync with proptxstatus_module_ignore */ + dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore; + if (FALSE == dhd->proptxstatus_module_ignore) { + tlv = WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE; + } + /* always enable host reorder */ + tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + bChanged = TRUE; + } + + dhd_os_wlfc_unblock(dhd); + + if (bChanged) { + /* select enable proptxtstatus signaling */ + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n", + __FUNCTION__, tlv)); + } else { + DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n", + __FUNCTION__, tlv)); + } + } + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_credit_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->proptxstatus_credit_ignore = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_txstatus_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->proptxstatus_txstatus_ignore = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_rxpkt_chk; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->wlfc_rxpkt_chk = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +#endif /* PROP_TXSTATUS */ diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h new file mode 100644 index 000000000000..a6fd465e35fd --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h @@ -0,0 +1,554 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_wlfc.h 557035 2015-05-15 18:48:57Z $ + * + */ +#ifndef __wlfc_host_driver_definitions_h__ +#define __wlfc_host_driver_definitions_h__ + + +/* #define OOO_DEBUG */ + +#define KERNEL_THREAD_RETURN_TYPE int + +typedef int (*f_commitpkt_t)(void* ctx, void* p); +typedef bool (*f_processpkt_t)(void* p, void* arg); + +#define WLFC_UNSUPPORTED -9999 + +#define WLFC_NO_TRAFFIC -1 +#define WLFC_MULTI_TRAFFIC 0 + +#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */ + +/** 16 bits will provide an absolute max of 65536 slots */ +#define WLFC_HANGER_MAXITEMS 3072 + +#define WLFC_HANGER_ITEM_STATE_FREE 1 +#define WLFC_HANGER_ITEM_STATE_INUSE 2 +#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3 +#define WLFC_HANGER_ITEM_STATE_FLUSHED 4 + +#define WLFC_HANGER_PKT_STATE_TXSTATUS 1 +#define WLFC_HANGER_PKT_STATE_BUSRETURNED 2 +#define WLFC_HANGER_PKT_STATE_COMPLETE \ + (WLFC_HANGER_PKT_STATE_TXSTATUS | WLFC_HANGER_PKT_STATE_BUSRETURNED) + +typedef enum { + Q_TYPE_PSQ, /**< Power Save Queue, contains both delayed and suppressed packets */ + Q_TYPE_AFQ /**< At Firmware Queue */ +} q_type_t; + +typedef enum ewlfc_packet_state { + eWLFC_PKTTYPE_NEW, /**< unused in the code (Jan 2015) */ + eWLFC_PKTTYPE_DELAYED, /**< packet did not enter wlfc yet */ + eWLFC_PKTTYPE_SUPPRESSED, /**< packet entered wlfc and was suppressed by the dongle */ + eWLFC_PKTTYPE_MAX +} ewlfc_packet_state_t; + +typedef enum ewlfc_mac_entry_action { + eWLFC_MAC_ENTRY_ACTION_ADD, + eWLFC_MAC_ENTRY_ACTION_DEL, + eWLFC_MAC_ENTRY_ACTION_UPDATE, + eWLFC_MAC_ENTRY_ACTION_MAX +} ewlfc_mac_entry_action_t; + +typedef struct wlfc_hanger_item { + uint8 state; + uint8 gen; + uint8 pkt_state; /**< bitmask containing eg WLFC_HANGER_PKT_STATE_TXCOMPLETE */ + uint8 pkt_txstatus; + uint32 identifier; + void* pkt; +#ifdef PROP_TXSTATUS_DEBUG + uint32 push_time; +#endif + struct wlfc_hanger_item *next; +} wlfc_hanger_item_t; + +/** hanger contains packets that have been posted by the dhd to the dongle and are expected back */ +typedef struct wlfc_hanger { + int max_items; + uint32 pushed; + uint32 popped; + uint32 failed_to_push; + uint32 failed_to_pop; + uint32 failed_slotfind; + uint32 slot_pos; + wlfc_hanger_item_t items[1]; +} wlfc_hanger_t; + +#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \ + sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t))) + +#define WLFC_STATE_OPEN 1 /**< remote MAC is able to receive packets */ +#define WLFC_STATE_CLOSE 2 /**< remote MAC is in power save mode */ + +#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */ +#define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1) + +#define WLFC_PSQ_LEN 2048 + +#define WLFC_FLOWCONTROL_HIWATER (2048 - 256) +#define WLFC_FLOWCONTROL_LOWATER 256 + +#if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256)) +#undef WLFC_FLOWCONTROL_HIWATER +#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - 256) +#undef WLFC_FLOWCONTROL_LOWATER +#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4) +#endif + +#define WLFC_LOG_BUF_SIZE (1024*1024) + +/** Properties related to a remote MAC entity */ +typedef struct wlfc_mac_descriptor { + uint8 occupied; /**< if 0, this descriptor is unused and thus can be (re)used */ + uint8 interface_id; + uint8 iftype; /**< eg WLC_E_IF_ROLE_STA */ + uint8 state; /**< eg WLFC_STATE_OPEN */ + uint8 ac_bitmap; /**< automatic power save delivery (APSD) */ + uint8 requested_credit; + uint8 requested_packet; /**< unit: [number of packets] */ + uint8 ea[ETHER_ADDR_LEN]; + + /** maintain (MAC,AC) based seq count for packets going to the device. As well as bc/mc. */ + uint8 seq[AC_COUNT + 1]; + uint8 generation; /**< toggles between 0 and 1 */ + struct pktq psq; /**< contains both 'delayed' and 'suppressed' packets */ + /** packets at firmware queue */ + struct pktq afq; + /** The AC pending bitmap that was reported to the fw at last change */ + uint8 traffic_lastreported_bmp; + /** The new AC pending bitmap */ + uint8 traffic_pending_bmp; + /** 1= send on next opportunity */ + uint8 send_tim_signal; + uint8 mac_handle; /**< mac handles are assigned by the dongle */ + /** Number of packets at dongle for this entry. */ + int transit_count; + /** Number of suppression to wait before evict from delayQ */ + int suppr_transit_count; + /** pkt sent to bus but no bus TX complete yet */ + int onbus_pkts_count; + /** flag. TRUE when remote MAC is in suppressed state */ + uint8 suppressed; + + +#ifdef PROP_TXSTATUS_DEBUG + uint32 dstncredit_sent_packets; + uint32 dstncredit_acks; + uint32 opened_ct; + uint32 closed_ct; +#endif + struct wlfc_mac_descriptor* prev; + struct wlfc_mac_descriptor* next; +} wlfc_mac_descriptor_t; + +/** A 'commit' is the hand over of a packet from the host OS layer to the layer below (eg DBUS) */ +typedef struct dhd_wlfc_commit_info { + uint8 needs_hdr; + uint8 ac_fifo_credit_spent; + ewlfc_packet_state_t pkt_type; + wlfc_mac_descriptor_t* mac_entry; + void* p; +} dhd_wlfc_commit_info_t; + +#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\ + entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0) + +#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++ +#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)] + +typedef struct athost_wl_stat_counters { + uint32 pktin; + uint32 pktout; + uint32 pkt2bus; + uint32 pktdropped; + uint32 tlv_parse_failed; + uint32 rollback; + uint32 rollback_failed; + uint32 delayq_full_error; + uint32 credit_request_failed; + uint32 packet_request_failed; + uint32 mac_update_failed; + uint32 psmode_update_failed; + uint32 interface_update_failed; + uint32 wlfc_header_only_pkt; + uint32 txstatus_in; + uint32 d11_suppress; + uint32 wl_suppress; + uint32 bad_suppress; + uint32 pkt_freed; + uint32 pkt_free_err; + uint32 psq_wlsup_retx; + uint32 psq_wlsup_enq; + uint32 psq_d11sup_retx; + uint32 psq_d11sup_enq; + uint32 psq_hostq_retx; + uint32 psq_hostq_enq; + uint32 mac_handle_notfound; + uint32 wlc_tossed_pkts; + uint32 dhd_hdrpulls; + uint32 generic_error; + /* an extra one for bc/mc traffic */ + uint32 send_pkts[AC_COUNT + 1]; + uint32 drop_pkts[WLFC_PSQ_PREC_COUNT]; + uint32 ooo_pkts[AC_COUNT + 1]; +#ifdef PROP_TXSTATUS_DEBUG + /** all pkt2bus -> txstatus latency accumulated */ + uint32 latency_sample_count; + uint32 total_status_latency; + uint32 latency_most_recent; + int idx_delta; + uint32 deltas[10]; + uint32 fifo_credits_sent[6]; + uint32 fifo_credits_back[6]; + uint32 dropped_qfull[6]; + uint32 signal_only_pkts_sent; + uint32 signal_only_pkts_freed; +#endif + uint32 cleanup_txq_cnt; + uint32 cleanup_psq_cnt; + uint32 cleanup_fw_cnt; +} athost_wl_stat_counters_t; + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_back[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \ + (ctx)->stats.dropped_qfull[(ac)]++;} while (0) +#else +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0) +#endif + +#define WLFC_FCMODE_NONE 0 +#define WLFC_FCMODE_IMPLIED_CREDIT 1 +#define WLFC_FCMODE_EXPLICIT_CREDIT 2 +#define WLFC_ONLY_AMPDU_HOSTREORDER 3 + +/** Reserved credits ratio when borrowed by hihger priority */ +#define WLFC_BORROW_LIMIT_RATIO 4 + +/** How long to defer borrowing in milliseconds */ +#define WLFC_BORROW_DEFER_PERIOD_MS 100 + +/** How long to defer flow control in milliseconds */ +#define WLFC_FC_DEFER_PERIOD_MS 200 + +/** How long to detect occurance per AC in miliseconds */ +#define WLFC_RX_DETECTION_THRESHOLD_MS 100 + +/** Mask to represent available ACs (note: BC/MC is ignored) */ +#define WLFC_AC_MASK 0xF + +/** flow control specific information, only 1 instance during driver lifetime */ +typedef struct athost_wl_status_info { + uint8 last_seqid_to_wlc; + + /** OSL handle */ + osl_t *osh; + /** dhd public struct pointer */ + void *dhdp; + + f_commitpkt_t fcommit; + void* commit_ctx; + + /** statistics */ + athost_wl_stat_counters_t stats; + + /** incremented on eg receiving a credit map event from the dongle */ + int Init_FIFO_credit[AC_COUNT + 2]; + /** the additional ones are for bc/mc and ATIM FIFO */ + int FIFO_credit[AC_COUNT + 2]; + /** Credit borrow counts for each FIFO from each of the other FIFOs */ + int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2]; + + /** packet hanger and MAC->handle lookup table */ + void *hanger; + + struct { + /** table for individual nodes */ + wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE]; + /** table for interfaces */ + wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM]; + /* OS may send packets to unknown (unassociated) destinations */ + /** A place holder for bc/mc and packets to unknown destinations */ + wlfc_mac_descriptor_t other; + } destination_entries; + + wlfc_mac_descriptor_t *active_entry_head; /**< a chain of MAC descriptors */ + int active_entry_count; + + wlfc_mac_descriptor_t *requested_entry[WLFC_MAC_DESC_TABLE_SIZE]; + int requested_entry_count; + + /* pkt counts for each interface and ac */ + int pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1]; + int pkt_cnt_per_ac[AC_COUNT+1]; + int pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1]; + int pkt_cnt_in_psq; + uint8 allow_fc; /**< Boolean */ + uint32 fc_defer_timestamp; + uint32 rx_timestamp[AC_COUNT+1]; + + /** ON/OFF state for flow control to the host network interface */ + uint8 hostif_flow_state[WLFC_MAX_IFNUM]; + uint8 host_ifidx; + + /** to flow control an OS interface */ + uint8 toggle_host_if; + + /** To borrow credits */ + uint8 allow_credit_borrow; + + /** ac number for the first single ac traffic */ + uint8 single_ac; + + /** Timestamp for the first single ac traffic */ + uint32 single_ac_timestamp; + + bool bcmc_credit_supported; + +} athost_wl_status_info_t; + +/** Please be mindful that total pkttag space is 32 octets only */ +typedef struct dhd_pkttag { + +#ifdef BCM_OBJECT_TRACE + /* if use this field, keep it at the first 4 bytes */ + uint32 sn; +#endif /* BCM_OBJECT_TRACE */ + + /** + b[15] - 1 = wlfc packet + b[14:13] - encryption exemption + b[12 ] - 1 = event channel + b[11 ] - 1 = this packet was sent in response to one time packet request, + do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET]. + b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on] + b[9 ] - 1 = packet is host->firmware (transmit direction) + - 0 = packet received from firmware (firmware->host) + b[8 ] - 1 = packet was sent due to credit_request (pspoll), + packet does not count against FIFO credit. + - 0 = normal transaction, packet counts against FIFO credit + b[7 ] - 1 = AP, 0 = STA + b[6:4] - AC FIFO number + b[3:0] - interface index + */ + uint16 if_flags; + + /** + * destination MAC address for this packet so that not every module needs to open the packet + * to find this + */ + uint8 dstn_ether[ETHER_ADDR_LEN]; + + /** This 32-bit goes from host to device for every packet. */ + uint32 htod_tag; + + /** This 16-bit is original seq number for every suppress packet. */ + uint16 htod_seq; + + /** This address is mac entry for every packet. */ + void *entry; + + /** bus specific stuff */ + union { + struct { + void *stuff; + uint32 thing1; + uint32 thing2; + } sd; + + struct { + void *bus; + void *urb; + } usb; + } bus_specific; +} dhd_pkttag_t; + +#define DHD_PKTTAG_WLFCPKT_MASK 0x1 +#define DHD_PKTTAG_WLFCPKT_SHIFT 15 +#define DHD_PKTTAG_WLFCPKT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \ + (((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT) +#define DHD_PKTTAG_WLFCPKT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK) + +#define DHD_PKTTAG_EXEMPT_MASK 0x3 +#define DHD_PKTTAG_EXEMPT_SHIFT 13 +#define DHD_PKTTAG_EXEMPT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \ + (((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT) +#define DHD_PKTTAG_EXEMPT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK) + +#define DHD_PKTTAG_EVENT_MASK 0x1 +#define DHD_PKTTAG_EVENT_SHIFT 12 +#define DHD_PKTTAG_SETEVENT(tag, event) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \ + (((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT) +#define DHD_PKTTAG_EVENT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK) + +#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1 +#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11 +#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \ + (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) +#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK) + +#define DHD_PKTTAG_SIGNALONLY_MASK 0x1 +#define DHD_PKTTAG_SIGNALONLY_SHIFT 10 +#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \ + (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT) +#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK) + +#define DHD_PKTTAG_PKTDIR_MASK 0x1 +#define DHD_PKTTAG_PKTDIR_SHIFT 9 +#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \ + (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT) +#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK) + +#define DHD_PKTTAG_CREDITCHECK_MASK 0x1 +#define DHD_PKTTAG_CREDITCHECK_SHIFT 8 +#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \ + (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT) +#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK) + +#define DHD_PKTTAG_IFTYPE_MASK 0x1 +#define DHD_PKTTAG_IFTYPE_SHIFT 7 +#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \ + (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT) +#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK) + +#define DHD_PKTTAG_FIFO_MASK 0x7 +#define DHD_PKTTAG_FIFO_SHIFT 4 +#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \ + (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT) +#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK) + +#define DHD_PKTTAG_IF_MASK 0xf +#define DHD_PKTTAG_IF_SHIFT 0 +#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \ + (((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT) +#define DHD_PKTTAG_IF(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK) + +#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \ + (dstn_MAC_ea), ETHER_ADDR_LEN) +#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether + +#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue) +#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag) + +#define DHD_PKTTAG_SET_H2DSEQ(tag, seq) ((dhd_pkttag_t*)(tag))->htod_seq = (seq) +#define DHD_PKTTAG_H2DSEQ(tag) (((dhd_pkttag_t*)(tag))->htod_seq) + +#define DHD_PKTTAG_SET_ENTRY(tag, entry) ((dhd_pkttag_t*)(tag))->entry = (entry) +#define DHD_PKTTAG_ENTRY(tag) (((dhd_pkttag_t*)(tag))->entry) + +#define PSQ_SUP_IDX(x) (x * 2 + 1) +#define PSQ_DLY_IDX(x) (x * 2) + +#ifdef PROP_TXSTATUS_DEBUG +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0) +#else +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0) +#endif + +#ifdef BCM_OBJECT_TRACE +#define DHD_PKTTAG_SET_SN(tag, val) ((dhd_pkttag_t*)(tag))->sn = (val) +#define DHD_PKTTAG_SN(tag) (((dhd_pkttag_t*)(tag))->sn) +#endif /* BCM_OBJECT_TRACE */ + +/* public functions */ +int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, + uchar *reorder_info_buf, uint *reorder_info_len); +KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data); +int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, + void* commit_ctx, void *pktbuf, bool need_toggle_host_if); +int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success); +int dhd_wlfc_init(dhd_pub_t *dhd); +#ifdef SUPPORT_P2P_GO_PS +int dhd_wlfc_suspend(dhd_pub_t *dhd); +int dhd_wlfc_resume(dhd_pub_t *dhd); +#endif /* SUPPORT_P2P_GO_PS */ +int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd); +int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg); +int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg); +int dhd_wlfc_deinit(dhd_pub_t *dhd); +int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea); +int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data); +int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp); +int dhd_wlfc_enable(dhd_pub_t *dhdp); +int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +int dhd_wlfc_clear_counts(dhd_pub_t *dhd); +int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val); +int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val); +bool dhd_wlfc_is_supported(dhd_pub_t *dhd); +bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf); +int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock); +int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio); + +int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val); +int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val); +int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val); + +int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val); + +#endif /* __wlfc_host_driver_definitions_h__ */ diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h new file mode 100644 index 000000000000..66e4f4528f7d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dngl_stats.h @@ -0,0 +1,283 @@ +/* + * Common stats definitions for clients of dongle + * ports + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dngl_stats.h 523030 2014-12-25 17:28:07Z $ + */ + +#ifndef _dngl_stats_h_ +#define _dngl_stats_h_ + +#include +#include + +typedef struct { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +} dngl_stats_t; + +typedef int32 wifi_radio; +typedef int32 wifi_channel; +typedef int32 wifi_rssi; +typedef struct { uint16 version; uint16 length; } ver_len; + +typedef enum wifi_channel_width { + WIFI_CHAN_WIDTH_20 = 0, + WIFI_CHAN_WIDTH_40 = 1, + WIFI_CHAN_WIDTH_80 = 2, + WIFI_CHAN_WIDTH_160 = 3, + WIFI_CHAN_WIDTH_80P80 = 4, + WIFI_CHAN_WIDTH_5 = 5, + WIFI_CHAN_WIDTH_10 = 6, + WIFI_CHAN_WIDTH_INVALID = -1 +} wifi_channel_width_t; + +typedef enum { + WIFI_DISCONNECTED = 0, + WIFI_AUTHENTICATING = 1, + WIFI_ASSOCIATING = 2, + WIFI_ASSOCIATED = 3, + WIFI_EAPOL_STARTED = 4, /* if done by firmware/driver */ + WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */ +} wifi_connection_state; + +typedef enum { + WIFI_ROAMING_IDLE = 0, + WIFI_ROAMING_ACTIVE = 1 +} wifi_roam_state; + +typedef enum { + WIFI_INTERFACE_STA = 0, + WIFI_INTERFACE_SOFTAP = 1, + WIFI_INTERFACE_IBSS = 2, + WIFI_INTERFACE_P2P_CLIENT = 3, + WIFI_INTERFACE_P2P_GO = 4, + WIFI_INTERFACE_NAN = 5, + WIFI_INTERFACE_MESH = 6 +} wifi_interface_mode; + +#define WIFI_CAPABILITY_QOS 0x00000001 /* set for QOS association */ +#define WIFI_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11 + * beacon frame control protected bit set) + */ +#define WIFI_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities + * element interworking bit is set + */ +#define WIFI_CAPABILITY_HS20 0x00000008 /* set for HS20 association */ +#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities + * element UTF-8 SSID bit is set + */ +#define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */ + +typedef struct { + wifi_interface_mode mode; /* interface mode */ + uint8 mac_addr[6]; /* interface mac address (self) */ + wifi_connection_state state; /* connection state (valid for STA, CLI only) */ + wifi_roam_state roaming; /* roaming state */ + uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */ + uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */ + uint8 bssid[ETHER_ADDR_LEN]; /* bssid */ + uint8 ap_country_str[3]; /* country string advertised by AP */ + uint8 country_str[3]; /* country string for this association */ +} wifi_interface_info; + +typedef wifi_interface_info *wifi_interface_handle; + +/* channel information */ +typedef struct { + wifi_channel_width_t width; /* channel width (20, 40, 80, 80+80, 160) */ + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center frequency (MHz) first segment */ + wifi_channel center_freq1; /* center frequency (MHz) second segment */ +} wifi_channel_info; + +/* wifi rate */ +typedef struct { + uint32 preamble; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */ + uint32 nss; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */ + uint32 bw; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */ + uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std + * in the units of 0.5mbps + */ + /* HT/VHT it would be mcs index */ + uint32 reserved; /* reserved */ + uint32 bitrate; /* units of 100 Kbps */ +} wifi_rate; + +/* channel statistics */ +typedef struct { + wifi_channel_info channel; /* channel */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number + * accruing over time) + */ +} wifi_channel_stat; + +/* radio statistics */ +typedef struct { + struct { + uint16 version; + uint16 length; + }; + wifi_radio radio; /* wifi radio (if multiple radio supported) */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 tx_time; /* msecs the radio is transmitting (32 bits + * number accruing over time) + */ + uint32 rx_time; /* msecs the radio is in active receive (32 bits + * number accruing over time) + */ + uint32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits + * number accruing over time) + */ + uint32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits + * number accruing over time) + */ + uint32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits + * number accruing over time) + */ + uint32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits + * number accruing over time) + */ + uint32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits + * number accruing over time) + */ + uint32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and + * GAS exchange (32 bits number accruing over time) + */ + uint32 num_channels; /* number of channels */ + wifi_channel_stat channels[1]; /* channel statistics */ +} wifi_radio_stat; + +/* per rate statistics */ +typedef struct { + struct { + uint16 version; + uint16 length; + }; + uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ + uint32 rx_mpdu; /* number of received data pkts */ + uint32 mpdu_lost; /* number of data packet losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + wifi_rate rate; /* rate information */ +} wifi_rate_stat; + +/* access categories */ +typedef enum { + WIFI_AC_VO = 0, + WIFI_AC_VI = 1, + WIFI_AC_BE = 2, + WIFI_AC_BK = 3, + WIFI_AC_MAX = 4 +} wifi_traffic_ac; + +/* wifi peer type */ +typedef enum +{ + WIFI_PEER_STA, + WIFI_PEER_AP, + WIFI_PEER_P2P_GO, + WIFI_PEER_P2P_CLIENT, + WIFI_PEER_NAN, + WIFI_PEER_TDLS, + WIFI_PEER_INVALID +} wifi_peer_type; + +/* per peer statistics */ +typedef struct { + wifi_peer_type type; /* peer type (AP, TDLS, GO etc.) */ + uint8 peer_mac_address[6]; /* mac address */ + uint32 capabilities; /* peer WIFI_CAPABILITY_XXX */ + uint32 num_rate; /* number of rates */ + wifi_rate_stat rate_stats[1]; /* per rate statistics, number of entries = num_rate */ +} wifi_peer_info; + +/* per access category statistics */ +typedef struct { + wifi_traffic_ac ac; /* access category (VI, VO, BE, BK) */ + uint32 tx_mpdu; /* number of successfully transmitted unicast data pkts + * (ACK rcvd) + */ + uint32 rx_mpdu; /* number of received unicast mpdus */ + uint32 tx_mcast; /* number of succesfully transmitted multicast + * data packets + */ + /* STA case: implies ACK received from AP for the + * unicast packet in which mcast pkt was sent + */ + uint32 rx_mcast; /* number of received multicast data packets */ + uint32 rx_ampdu; /* number of received unicast a-mpdus */ + uint32 tx_ampdu; /* number of transmitted unicast a-mpdus */ + uint32 mpdu_lost; /* number of data pkt losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + uint32 contention_time_min; /* data pkt min contention time (usecs) */ + uint32 contention_time_max; /* data pkt max contention time (usecs) */ + uint32 contention_time_avg; /* data pkt avg contention time (usecs) */ + uint32 contention_num_samples; /* num of data pkts used for contention statistics */ +} wifi_wmm_ac_stat; + +/* interface statistics */ +typedef struct { + wifi_interface_handle iface; /* wifi interface */ + wifi_interface_info info; /* current state of the interface */ + uint32 beacon_rx; /* access point beacon received count from + * connected AP + */ + uint32 mgmt_rx; /* access point mgmt frames received count from + * connected AP (including Beacon) + */ + uint32 mgmt_action_rx; /* action frames received count */ + uint32 mgmt_action_tx; /* action frames transmit count */ + wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI + * (averaged) + */ + wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from + * connected AP + */ + wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from + * connected AP + */ + wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */ + uint32 num_peers; /* number of peers */ + wifi_peer_info peer_info[1]; /* per peer statistics */ +} wifi_iface_stat; + +#endif /* _dngl_stats_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h new file mode 100644 index 000000000000..93e0b5a5b69d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h @@ -0,0 +1,43 @@ +/* + * Dongle WL Header definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dngl_wlhdr.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _dngl_wlhdr_h_ +#define _dngl_wlhdr_h_ + +typedef struct wl_header { + uint8 type; /* Header type */ + uint8 version; /* Header version */ + int8 rssi; /* RSSI */ + uint8 pad; /* Unused */ +} wl_header_t; + +#define WL_HEADER_LEN sizeof(wl_header_t) +#define WL_HEADER_TYPE 0 +#define WL_HEADER_VER 1 +#endif /* _dngl_wlhdr_h_ */ diff --git a/drivers/net/wireless/bcmdhd/hnd_pktpool.c b/drivers/net/wireless/bcmdhd/hnd_pktpool.c new file mode 100644 index 000000000000..f3555e40ce91 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/hnd_pktpool.c @@ -0,0 +1,1131 @@ +/* + * HND generic packet pool operation primitives + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktpool.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include +#include +#include +#include + +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif + +/* Registry size is one larger than max pools, as slot #0 is reserved */ +#define PKTPOOLREG_RSVD_ID (0U) +#define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead)) +#define PKTPOOLREG_FREE_PTR (POOLPTR(NULL)) + +#define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp))) +#define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp))) + +/* Tag a registry entry as free for use */ +#define PKTPOOL_REGISTRY_CLR(id) \ + PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR) +#define PKTPOOL_REGISTRY_ISCLR(id) \ + (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR)) + +/* Tag registry entry 0 as reserved */ +#define PKTPOOL_REGISTRY_RSV() \ + PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR) +#define PKTPOOL_REGISTRY_ISRSVD() \ + (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)) + +/* Walk all un-reserved entries in registry */ +#define PKTPOOL_REGISTRY_FOREACH(id) \ + for ((id) = 1U; (id) <= pktpools_max; (id)++) + +enum pktpool_empty_cb_state { + EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */ + EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */ + EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */ +}; + +uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */ +pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */ + +/* Register/Deregister a pktpool with registry during pktpool_init/deinit */ +static int pktpool_register(pktpool_t * poolptr); +static int pktpool_deregister(pktpool_t * poolptr); + +/** add declaration */ +static int pktpool_avail_notify(pktpool_t *pktp); + +/** accessor functions required when ROMming this file, forced into RAM */ + + +pktpool_t * +BCMRAMFN(get_pktpools_registry)(int id) +{ + return pktpools_registry[id]; +} + +static void +BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp) +{ + pktpools_registry[id] = pp; +} + +static bool +BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp) +{ + return pktpools_registry[id] == pp; +} + +int /* Construct a pool registry to serve a maximum of total_pools */ +pktpool_attach(osl_t *osh, uint32 total_pools) +{ + uint32 poolid; + + if (pktpools_max != 0U) { + return BCME_ERROR; + } + + ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID); + + /* Initialize registry: reserve slot#0 and tag others as free */ + PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */ + + PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */ + PKTPOOL_REGISTRY_CLR(poolid); + } + + pktpools_max = total_pools; + + return (int)pktpools_max; +} + +int /* Destruct the pool registry. Ascertain all pools were first de-inited */ +pktpool_dettach(osl_t *osh) +{ + uint32 poolid; + + if (pktpools_max == 0U) { + return BCME_OK; + } + + /* Ascertain that no pools are still registered */ + ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */ + + PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */ + ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid)); + } + + pktpools_max = 0U; /* restore boot state */ + + return BCME_OK; +} + +static int /* Register a pool in a free slot; return the registry slot index */ +pktpool_register(pktpool_t * poolptr) +{ + uint32 poolid; + + if (pktpools_max == 0U) { + return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */ + } + + ASSERT(pktpools_max != 0U); + + /* find an empty slot in pktpools_registry */ + PKTPOOL_REGISTRY_FOREACH(poolid) { + if (PKTPOOL_REGISTRY_ISCLR(poolid)) { + PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */ + return (int)poolid; /* return pool ID */ + } + } /* FOREACH */ + + return PKTPOOL_INVALID_ID; /* error: registry is full */ +} + +static int /* Deregister a pktpool, given the pool pointer; tag slot as free */ +pktpool_deregister(pktpool_t * poolptr) +{ + uint32 poolid; + + ASSERT(POOLPTR(poolptr) != POOLPTR(NULL)); + + poolid = POOLID(poolptr); + ASSERT(poolid <= pktpools_max); + + /* Asertain that a previously registered poolptr is being de-registered */ + if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) { + PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */ + } else { + ASSERT(0); + return BCME_ERROR; /* mismatch in registry */ + } + + return BCME_OK; +} + + +/* + * pktpool_init: + * User provides a pktpool_t sturcture and specifies the number of packets to + * be pre-filled into the pool (pplen). The size of all packets in a pool must + * be the same and is specified by plen. + * pktpool_init first attempts to register the pool and fetch a unique poolid. + * If registration fails, it is considered an BCME_ERR, caused by either the + * registry was not pre-created (pktpool_attach) or the registry is full. + * If registration succeeds, then the requested number of packets will be filled + * into the pool as part of initialization. In the event that there is no + * available memory to service the request, then BCME_NOMEM will be returned + * along with the count of how many packets were successfully allocated. + * In dongle builds, prior to memory reclaimation, one should limit the number + * of packets to be allocated during pktpool_init and fill the pool up after + * reclaim stage. + */ +int +pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type) +{ + int i, err = BCME_OK; + int pktplen; + uint8 pktp_id; + + ASSERT(pktp != NULL); + ASSERT(osh != NULL); + ASSERT(pplen != NULL); + + pktplen = *pplen; + + bzero(pktp, sizeof(pktpool_t)); + + /* assign a unique pktpool id */ + if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) { + return BCME_ERROR; + } + POOLSETID(pktp, pktp_id); + + pktp->inited = TRUE; + pktp->istx = istx ? TRUE : FALSE; + pktp->plen = (uint16)plen; + pktp->type = type; + + if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + + pktp->maxlen = PKTPOOL_LEN_MAX; + pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen); + + for (i = 0; i < pktplen; i++) { + void *p; + p = PKTGET(osh, plen, TRUE); + + if (p == NULL) { + /* Not able to allocate all requested pkts + * so just return what was actually allocated + * We can add to the pool later + */ + if (pktp->freelist == NULL) /* pktpool free list is empty */ + err = BCME_NOMEM; + + goto exit; + } + + PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */ + + PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */ + pktp->freelist = p; + + pktp->avail++; + +#ifdef BCMDBG_POOL + pktp->dbg_q[pktp->dbg_qlen++].p = p; +#endif + } + +exit: + pktp->len = pktp->avail; + + *pplen = pktp->len; + return err; +} + +/* + * pktpool_deinit: + * Prior to freeing a pktpool, all packets must be first freed into the pktpool. + * Upon pktpool_deinit, all packets in the free pool will be freed to the heap. + * An assert is in place to ensure that there are no packets still lingering + * around. Packets freed to a pool after the deinit will cause a memory + * corruption as the pktpool_t structure no longer exists. + */ +int +pktpool_deinit(osl_t *osh, pktpool_t *pktp) +{ + uint16 freed = 0; + + ASSERT(osh != NULL); + ASSERT(pktp != NULL); + +#ifdef BCMDBG_POOL + { + int i; + for (i = 0; i <= pktp->len; i++) { + pktp->dbg_q[i].p = NULL; + } + } +#endif + + while (pktp->freelist != NULL) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + freed++; + ASSERT(freed <= pktp->len); + } + + pktp->avail -= freed; + ASSERT(pktp->avail == 0); + + pktp->len -= freed; + + pktpool_deregister(pktp); /* release previously acquired unique pool id */ + POOLSETID(pktp, PKTPOOL_INVALID_ID); + + if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + pktp->inited = FALSE; + + /* Are there still pending pkts? */ + ASSERT(pktp->len == 0); + + return 0; +} + +int +pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal) +{ + void *p; + int err = 0; + int len, psize, maxlen; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(pktp->plen != 0); + + maxlen = pktp->maxlen; + psize = minimal ? (maxlen >> 2) : maxlen; + for (len = (int)pktp->len; len < psize; len++) { + + p = PKTGET(osh, pktp->len, TRUE); + + if (p == NULL) { + err = BCME_NOMEM; + break; + } + + if (pktpool_add(pktp, p) != BCME_OK) { + PKTFREE(osh, p, FALSE); + err = BCME_ERROR; + break; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (pktp->cbcnt) { + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } + + return err; +} + +static void * +pktpool_deq(pktpool_t *pktp) +{ + void *p = NULL; + + if (pktp->avail == 0) + return NULL; + + ASSERT(pktp->freelist != NULL); + + p = pktp->freelist; /* dequeue packet from head of pktpool free list */ + pktp->freelist = PKTFREELIST(p); /* free list points to next packet */ + PKTSETFREELIST(p, NULL); + + pktp->avail--; + + return p; +} + +static void +pktpool_enq(pktpool_t *pktp, void *p) +{ + ASSERT(p != NULL); + + PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */ + pktp->freelist = p; /* free list points to newly inserted packet */ + + pktp->avail++; + ASSERT(pktp->avail <= pktp->len); +} + +/* utility for registering host addr fill function called from pciedev */ +int +/* BCMATTACHFN */ +(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + ASSERT(pktp->cbext.cb == NULL); + pktp->cbext.cb = cb; + pktp->cbext.arg = arg; + return 0; +} + +int +pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + if (pktp == NULL) + return BCME_ERROR; + ASSERT(pktp->rxcplidfn.cb == NULL); + pktp->rxcplidfn.cb = cb; + pktp->rxcplidfn.arg = arg; + return 0; +} +/* Callback functions for split rx modes */ +/* when evr host posts rxbuffer, invike dma_rxfill from pciedev layer */ +void +pktpool_invoke_dmarxfill(pktpool_t *pktp) +{ + ASSERT(pktp->dmarxfill.cb); + ASSERT(pktp->dmarxfill.arg); + + if (pktp->dmarxfill.cb) + pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg); +} +int +pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + pktp->dmarxfill.cb = cb; + pktp->dmarxfill.arg = arg; + + return 0; +} +/* No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function */ +int +pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb != NULL); + + i = pktp->cbcnt; + if (i == PKTPOOL_CB_MAX_AVL) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->cbs[i].cb == NULL); + pktp->cbs[i].cb = cb; + pktp->cbs[i].arg = arg; + pktp->cbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +int +pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb != NULL); + + i = pktp->ecbcnt; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->ecbs[i].cb == NULL); + pktp->ecbs[i].cb = cb; + pktp->ecbs[i].arg = arg; + pktp->ecbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +static int +pktpool_empty_notify(pktpool_t *pktp) +{ + int i; + + pktp->empty = TRUE; + for (i = 0; i < pktp->ecbcnt; i++) { + ASSERT(pktp->ecbs[i].cb != NULL); + pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg); + } + pktp->empty = FALSE; + + return 0; +} + +#ifdef BCMDBG_POOL +int +pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb); + + i = pktp->dbg_cbcnt; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->dbg_cbs[i].cb == NULL); + pktp->dbg_cbs[i].cb = cb; + pktp->dbg_cbs[i].arg = arg; + pktp->dbg_cbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +int pktpool_dbg_notify(pktpool_t *pktp); + +int +pktpool_dbg_notify(pktpool_t *pktp) +{ + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + for (i = 0; i < pktp->dbg_cbcnt; i++) { + ASSERT(pktp->dbg_cbs[i].cb); + pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg); + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_dbg_dump(pktpool_t *pktp) +{ + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen); + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p); + printf("%d, p: 0x%x dur:%lu us state:%d\n", i, + pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p)); + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats) +{ + int i; + int state; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + bzero(stats, sizeof(pktpool_stats_t)); + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + state = PKTPOOLSTATE(pktp->dbg_q[i].p); + switch (state) { + case POOL_TXENQ: + stats->enq++; break; + case POOL_TXDH: + stats->txdh++; break; + case POOL_TXD11: + stats->txd11++; break; + case POOL_RXDH: + stats->rxdh++; break; + case POOL_RXD11: + stats->rxd11++; break; + case POOL_RXFILL: + stats->rxfill++; break; + case POOL_IDLE: + stats->idle++; break; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_start_trigger(pktpool_t *pktp, void *p) +{ + uint32 cycles, i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (!PKTPOOL(OSH_NULL, p)) + goto done; + + OSL_GETCYCLES(cycles); + + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + if (pktp->dbg_q[i].p == p) { + pktp->dbg_q[i].cycles = cycles; + break; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int pktpool_stop_trigger(pktpool_t *pktp, void *p); +int +pktpool_stop_trigger(pktpool_t *pktp, void *p) +{ + uint32 cycles, i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (!PKTPOOL(OSH_NULL, p)) + goto done; + + OSL_GETCYCLES(cycles); + + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + if (pktp->dbg_q[i].p == p) { + if (pktp->dbg_q[i].cycles == 0) + break; + + if (cycles >= pktp->dbg_q[i].cycles) + pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles; + else + pktp->dbg_q[i].dur = + (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1; + + pktp->dbg_q[i].cycles = 0; + break; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} +#endif /* BCMDBG_POOL */ + +int +pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp) +{ + ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + pktp->availcb_excl = NULL; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) +{ + int i; + int err; + + ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(pktp->availcb_excl == NULL); + for (i = 0; i < pktp->cbcnt; i++) { + if (cb == pktp->cbs[i].cb) { + pktp->availcb_excl = &pktp->cbs[i]; + break; + } + } + + if (pktp->availcb_excl == NULL) + err = BCME_ERROR; + else + err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +static int +pktpool_avail_notify(pktpool_t *pktp) +{ + int i, k, idx; + int avail; + + ASSERT(pktp); + if (pktp->availcb_excl != NULL) { + pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg); + return 0; + } + + k = pktp->cbcnt - 1; + for (i = 0; i < pktp->cbcnt; i++) { + avail = pktp->avail; + + if (avail) { + if (pktp->cbtoggle) + idx = i; + else + idx = k--; + + ASSERT(pktp->cbs[idx].cb != NULL); + pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg); + } + } + + /* Alternate between filling from head or tail + */ + pktp->cbtoggle ^= 1; + + return 0; +} + +void * +pktpool_get(pktpool_t *pktp) +{ + void *p; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + + p = pktpool_deq(pktp); + + if (p == NULL) { + /* Notify and try to reclaim tx pkts */ + if (pktp->ecbcnt) + pktpool_empty_notify(pktp); + + p = pktpool_deq(pktp); + if (p == NULL) + goto done; + } + + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktpool_free(pktpool_t *pktp, void *p) +{ + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + ASSERT(p != NULL); +#ifdef BCMDBG_POOL + /* pktpool_stop_trigger(pktp, p); */ +#endif + + pktpool_enq(pktp, p); + + /** + * Feed critical DMA with freshly freed packets, to avoid DMA starvation. + * If any avail callback functions are registered, send a notification + * that a new packet is available in the pool. + */ + if (pktp->cbcnt) { + /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * This allows to feed on burst basis as opposed to inefficient per-packet basis. + */ + if (pktp->emptycb_disable == EMPTYCB_ENABLED) { + /** + * If the call originated from pktpool_empty_notify, the just freed packet + * is needed in pktpool_get. + * Therefore don't call pktpool_avail_notify. + */ + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } else { + /** + * The callback is temporarily disabled, log that a packet has been freed. + */ + pktp->emptycb_disable = EMPTYCB_SKIPPED; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return; +} + +int +pktpool_add(pktpool_t *pktp, void *p) +{ + int err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(p != NULL); + + if (pktp->len == pktp->maxlen) { + err = BCME_RANGE; + goto done; + } + + /* pkts in pool have same length */ + ASSERT(pktp->plen == PKTLEN(OSH_NULL, p)); + PKTSETPOOL(OSH_NULL, p, TRUE, pktp); + + pktp->len++; + pktpool_enq(pktp, p); + +#ifdef BCMDBG_POOL + pktp->dbg_q[pktp->dbg_qlen++].p = p; +#endif + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +/* Force pktpool_setmaxlen () into RAM as it uses a constant + * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips. + */ +int +BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen) +{ + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (maxlen > PKTPOOL_LEN_MAX) + maxlen = PKTPOOL_LEN_MAX; + + /* if pool is already beyond maxlen, then just cap it + * since we currently do not reduce the pool len + * already allocated + */ + pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return pktp->maxlen; +} + +void +pktpool_emptycb_disable(pktpool_t *pktp, bool disable) +{ + ASSERT(pktp); + + /** + * To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * If callback is going to be re-enabled, check if any packet got + * freed and added back to the pool while callback was disabled. + * When this is the case do the callback now, provided that callback functions + * are registered and this call did not originate from pktpool_empty_notify. + */ + if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) && + (pktp->emptycb_disable == EMPTYCB_SKIPPED)) { + pktpool_avail_notify(pktp); + } + + /* Enable or temporarily disable callback when packet becomes available. */ + pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED; +} + +bool +pktpool_emptycb_disabled(pktpool_t *pktp) +{ + ASSERT(pktp); + return pktp->emptycb_disable != EMPTYCB_ENABLED; +} + +#ifdef BCMPKTPOOL +#include + +pktpool_t *pktpool_shared = NULL; + +#ifdef BCMFRAGPOOL +pktpool_t *pktpool_shared_lfrag = NULL; +#endif /* BCMFRAGPOOL */ + +pktpool_t *pktpool_shared_rxlfrag = NULL; + +static osl_t *pktpool_osh = NULL; + +void +hnd_pktpool_init(osl_t *osh) +{ + int n; + + /* Construct a packet pool registry before initializing packet pools */ + n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID); + if (n != PKTPOOL_MAXIMUM_ID) { + ASSERT(0); + return; + } + + pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared == NULL) { + ASSERT(0); + goto error1; + } + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_lfrag == NULL) { + ASSERT(0); + goto error2; + } +#endif + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_rxlfrag == NULL) { + ASSERT(0); + goto error3; + } +#endif + + + /* + * At this early stage, there's not enough memory to allocate all + * requested pkts in the shared pool. Need to add to the pool + * after reclaim + * + * n = NRXBUFPOST + SDPCMD_RXBUFS; + * + * Initialization of packet pools may fail (BCME_ERROR), if the packet pool + * registry is not initialized or the registry is depleted. + * + * A BCME_NOMEM error only indicates that the requested number of packets + * were not filled into the pool. + */ + n = 1; + if (pktpool_init(osh, pktpool_shared, + &n, PKTBUFSZ, FALSE, lbuf_basic) == BCME_ERROR) { + ASSERT(0); + goto error4; + } + pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN); + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + n = 1; + if (pktpool_init(osh, pktpool_shared_lfrag, + &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) { + ASSERT(0); + goto error5; + } + pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN); +#endif +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + n = 1; + if (pktpool_init(osh, pktpool_shared_rxlfrag, + &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag) == BCME_ERROR) { + ASSERT(0); + goto error6; + } + pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN); +#endif + + pktpool_osh = osh; + + return; + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) +error6: +#endif + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_deinit(osh, pktpool_shared_lfrag); +error5: +#endif + +#if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \ + (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)) + pktpool_deinit(osh, pktpool_shared); +#endif + +error4: +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + hnd_free(pktpool_shared_rxlfrag); + pktpool_shared_rxlfrag = (pktpool_t *)NULL; +error3: +#endif /* BCMRXFRAGPOOL */ + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + hnd_free(pktpool_shared_lfrag); + pktpool_shared_lfrag = (pktpool_t *)NULL; +error2: +#endif /* BCMFRAGPOOL */ + + hnd_free(pktpool_shared); + pktpool_shared = (pktpool_t *)NULL; + +error1: + pktpool_dettach(osh); +} + +void +hnd_pktpool_fill(pktpool_t *pktpool, bool minimal) +{ + pktpool_fill(pktpool_osh, pktpool, minimal); +} + +/* refill pktpools after reclaim */ +void +hnd_pktpool_refill(bool minimal) +{ + if (POOL_ENAB(pktpool_shared)) { + pktpool_fill(pktpool_osh, pktpool_shared, minimal); + } +/* fragpool reclaim */ +#ifdef BCMFRAGPOOL + if (POOL_ENAB(pktpool_shared_lfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal); + } +#endif /* BCMFRAGPOOL */ +/* rx fragpool reclaim */ +#ifdef BCMRXFRAGPOOL + if (POOL_ENAB(pktpool_shared_rxlfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal); + } +#endif +} +#endif /* BCMPKTPOOL */ diff --git a/drivers/net/wireless/bcmdhd/hnd_pktq.c b/drivers/net/wireless/bcmdhd/hnd_pktq.c new file mode 100644 index 000000000000..4d1a7804d092 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/hnd_pktq.c @@ -0,0 +1,888 @@ +/* + * HND generic pktq operation primitives + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktq.c 605726 2015-12-11 07:08:16Z $ + */ + +#include +#include +#include +#include +#include + +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTQ_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTQ_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTQ_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif + +/* + * osl multiple-precedence packet queue + * hi_prec is always >= the number of the highest non-empty precedence + */ +void * BCMFASTPATH +pktq_penq(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, p); + else + q->head = p; + + q->tail = p; + q->len++; + + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_penq_head(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head == NULL) + q->tail = p; + + PKTSETLINK(p, q->head); + q->head = p; + q->len++; + + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +/* + * Append spktq 'list' to the tail of pktq 'pq' + */ +void BCMFASTPATH +pktq_append(struct pktq *pq, int prec, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q[0]; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, list_q->head); + else + q->head = list_q->head; + + q->tail = list_q->tail; + q->len += list_q->len; + pq->len += list_q->len; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + list_q->head = NULL; + list_q->tail = NULL; + list_q->len = 0; + list->len = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* + * Prepend spktq 'list' to the head of pktq 'pq' + */ +void BCMFASTPATH +pktq_prepend(struct pktq *pq, int prec, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q[0]; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + /* set the tail packet of list to point at the former pq head */ + PKTSETLINK(list_q->tail, q->head); + /* the new q head is the head of list */ + q->head = list_q->head; + + /* If the q tail was non-null, then it stays as is. + * If the q tail was null, it is now the tail of list + */ + if (q->tail == NULL) { + q->tail = list_q->tail; + } + + q->len += list_q->len; + pq->len += list_q->len; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + list_q->head = NULL; + list_q->tail = NULL; + list_q->len = 0; + list->len = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +void * BCMFASTPATH +pktq_pdeq(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + pq->len--; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p) +{ + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if (prev_p == NULL) + goto done; + + if ((p = PKTLINK(prev_p)) == NULL) + goto done; + + q->len--; + + pq->len--; + + PKTSETLINK(prev_p, PKTLINK(p)); + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + break; + } else { + prev = p; + p = PKTLINK(p); + } + } + if (p == NULL) + goto done; + + if (prev == NULL) { + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + PKTSETLINK(prev, PKTLINK(p)); + if (q->tail == p) { + q->tail = prev; + } + } + + q->len--; + + pq->len--; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_tail(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p, *prev; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->len--; + + pq->len--; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg) +{ + struct pktq_prec *q; + void *p, *next, *prev = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + q = &pq->q[prec]; + p = q->head; + while (p) { + next = PKTLINK(p); + if (fn == NULL || (*fn)(p, arg)) { + bool head = (p == q->head); + if (head) + q->head = next; + else + PKTSETLINK(prev, next); + PKTSETLINK(p, NULL); + PKTFREE(osh, p, dir); + q->len--; + pq->len--; + } else { + prev = p; + } + p = next; + } + + q->tail = prev; + + if (q->head == NULL) { + ASSERT(q->len == 0); + ASSERT(q->tail == NULL); + } + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +bool BCMFASTPATH +pktq_pdel(struct pktq *pq, void *pktbuf, int prec) +{ + bool ret = FALSE; + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + /* Should this just assert pktbuf? */ + if (!pktbuf) + goto done; + + q = &pq->q[prec]; + + if (q->head == pktbuf) { + if ((q->head = PKTLINK(pktbuf)) == NULL) + q->tail = NULL; + } else { + for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) + ; + if (p == NULL) + goto done; + + PKTSETLINK(p, PKTLINK(pktbuf)); + if (q->tail == pktbuf) + q->tail = p; + } + + q->len--; + pq->len--; + PKTSETLINK(pktbuf, NULL); + ret = TRUE; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +bool +pktq_init(struct pktq *pq, int num_prec, int max_len) +{ + int prec; + + if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); + + /* pq is variable size; only zero out what's requested */ + bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); + + pq->num_prec = (uint16)num_prec; + + pq->max = (uint16)max_len; + + for (prec = 0; prec < num_prec; prec++) + pq->q[prec].max = pq->max; + + return TRUE; +} + +bool +pktq_deinit(struct pktq *pq) +{ + if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return TRUE; +} + +void +pktq_set_max_plen(struct pktq *pq, int prec, int max_len) +{ + ASSERT(prec >= 0 && prec < pq->num_prec); + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + if (prec < pq->num_prec) + pq->q[prec].max = (uint16)max_len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +void * BCMFASTPATH +pktq_deq(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_deq_tail(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL, *prev; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->len--; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +pktq_peek(struct pktq *pq, int *prec_out) +{ + int prec; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + if (prec_out) + *prec_out = prec; + + p = pq->q[prec].head; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +pktq_peek_tail(struct pktq *pq, int *prec_out) +{ + int prec; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + if (prec_out) + *prec_out = prec; + + p = pq->q[prec].tail; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg) +{ + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* Optimize flush, if pktq len = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->len == 0) + goto done; + + for (prec = 0; prec < pq->num_prec; prec++) + pktq_pflush(osh, pq, prec, dir, fn, arg); + if (fn == NULL) + ASSERT(pq->len == 0); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* Return sum of lengths of a specific set of precedences */ +int +pktq_mlen(struct pktq *pq, uint prec_bmp) +{ + int prec, len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + len = 0; + + for (prec = 0; prec <= pq->hi_prec; prec++) + if (prec_bmp & (1 << prec)) + len += pq->q[prec].len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return len; +} + +/* Priority peek from a specific set of precedences */ +void * BCMFASTPATH +pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) + if (prec-- == 0) + goto done; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if (prec_out) + *prec_out = prec; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} +/* Priority dequeue from a specific set of precedences */ +void * BCMFASTPATH +pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0)) + if (prec-- == 0) + goto done; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + if (prec_out) + *prec_out = prec; + + pq->len--; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +#ifdef HND_PKTQ_THREAD_SAFE +int +pktq_pavail(struct pktq *pq, int prec) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].max - pq->q[prec].len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktq_pfull(struct pktq *pq, int prec) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].len >= pq->q[prec].max; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +int +pktq_avail(struct pktq *pq) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ret = pq->max - pq->len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktq_full(struct pktq *pq) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ret = pq->len >= pq->max; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} +#endif /* HND_PKTQ_THREAD_SAFE */ diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c new file mode 100644 index 000000000000..c0c658203dda --- /dev/null +++ b/drivers/net/wireless/bcmdhd/hndpmu.c @@ -0,0 +1,292 @@ +/* + * Misc utility routines for accessing PMU corerev specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.c 530092 2015-01-29 04:44:58Z $ + */ + + +/* + * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs. + * However, in the context of this file the baseband ('BB') PLL/FLL is referred to. + * + * Throughout this code, the prefixes 'pmu0_', 'pmu1_' and 'pmu2_' are used. + * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012) + * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports + * fractional frequency generation. pmu2_ does not support fractional frequency generation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PMU_ERROR(args) + +#define PMU_MSG(args) + +/* To check in verbose debugging messages not intended + * to be on except on private builds. + */ +#define PMU_NONE(args) + +/** contains resource bit positions for a specific chip */ +struct rsc_per_chip_s { + uint8 ht_avail; + uint8 macphy_clkavail; + uint8 ht_start; + uint8 otp_pu; +}; + +typedef struct rsc_per_chip_s rsc_per_chip_t; + + +/* SDIO Pad drive strength to select value mappings. + * The last strength value in each table must be 0 (the tri-state value). + */ +typedef struct { + uint8 strength; /* Pad Drive Strength in mA */ + uint8 sel; /* Chip-specific select value */ +} sdiod_drive_str_t; + +/* SDIO Drive Strength to sel value table for PMU Rev 1 */ +static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = { + {4, 0x2}, + {2, 0x3}, + {1, 0x0}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */ +static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = { + {12, 0x7}, + {10, 0x6}, + {8, 0x5}, + {6, 0x4}, + {4, 0x2}, + {2, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = { + {32, 0x7}, + {26, 0x6}, + {22, 0x5}, + {16, 0x4}, + {12, 0x3}, + {8, 0x2}, + {4, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = { + {32, 0x6}, + {26, 0x7}, + {22, 0x4}, + {16, 0x5}, + {12, 0x2}, + {8, 0x3}, + {4, 0x0}, + {0, 0x1} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */ + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */ + +/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = { + {6, 0x7}, + {5, 0x6}, + {4, 0x5}, + {3, 0x4}, + {2, 0x2}, + {1, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */ + +/** SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab6_1v8[] = { + {3, 0x3}, + {2, 0x2}, + {1, 0x1}, + {0, 0x0} }; + + +/** + * SDIO Drive Strength to sel value table for 43143 PMU Rev 17, see Confluence 43143 Toplevel + * architecture page, section 'PMU Chip Control 1 Register definition', click link to picture + * BCM43143_sel_sdio_signals.jpg. Valid after PMU Chip Control 0 Register, bit31 (override) has + * been written '1'. + */ +#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33 + +static const sdiod_drive_str_t sdiod_drive_strength_tab7_3v3[] = { + /* note: for 14, 10, 6 and 2mA hw timing is not met according to rtl team */ + {16, 0x7}, + {12, 0x5}, + {8, 0x3}, + {4, 0x1} }; /* note: 43143 does not support tristate */ + +#else + +static const sdiod_drive_str_t sdiod_drive_strength_tab7_1v8[] = { + /* note: for 7, 5, 3 and 1mA hw timing is not met according to rtl team */ + {8, 0x7}, + {6, 0x5}, + {4, 0x3}, + {2, 0x1} }; /* note: 43143 does not support tristate */ + +#endif /* BCM_SDIO_VDDIO */ + +#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) + +/** + * Balance between stable SDIO operation and power consumption is achieved using this function. + * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this + * function should read the VDDIO itself to select the correct table. For now it has been solved + * with the 'BCM_SDIO_VDDIO' preprocessor constant. + * + * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if + * hardware supports this), if no hw support drive strength is not programmed. + */ +void +si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) +{ + sdiod_drive_str_t *str_tab = NULL; + uint32 str_mask = 0; /* only alter desired bits in PMU chipcontrol 1 register */ + uint32 str_shift = 0; + uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */ + uint32 str_ovr_pmuval = 0; /* position of bit within this register */ + pmuregs_t *pmu; + uint origidx; + + if (!(sih->cccaps & CC_CAP_PMU)) { + return; + } + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (SDIOD_DRVSTR_KEY(CHIPID(sih->chip), sih->pmurev)) { + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1; + str_mask = 0x30000000; + str_shift = 28; + break; + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2): + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3): + case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8): + case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11): + if (sih->pmurev == 8) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3; + } + else if (sih->pmurev == 11) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8; + } + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab6_1v8; + str_mask = 0x00001800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17): +#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33 + if (drivestrength >= ARRAYLAST(sdiod_drive_strength_tab7_3v3)->strength) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_3v3; + } +#else + if (drivestrength >= ARRAYLAST(sdiod_drive_strength_tab7_1v8)->strength) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_1v8; + } +#endif /* BCM_SDIO_VDDIO */ + str_mask = 0x00000007; + str_ovr_pmuval = PMU43143_CC0_SDIO_DRSTR_OVR; + break; + default: + PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", + bcm_chipname( + CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), sih->pmurev)); + break; + } + + if (str_tab != NULL) { + uint32 cc_data_temp; + int i; + + /* Pick the lowest available drive strength equal or greater than the + * requested strength. Drive strength of 0 requests tri-state. + */ + for (i = 0; drivestrength < str_tab[i].strength; i++) + ; + + if (i > 0 && drivestrength > str_tab[i].strength) + i--; + + W_REG(osh, &pmu->chipcontrol_addr, PMU_CHIPCTL1); + cc_data_temp = R_REG(osh, &pmu->chipcontrol_data); + cc_data_temp &= ~str_mask; + cc_data_temp |= str_tab[i].sel << str_shift; + W_REG(osh, &pmu->chipcontrol_data, cc_data_temp); + if (str_ovr_pmuval) { /* enables the selected drive strength */ + W_REG(osh, &pmu->chipcontrol_addr, str_ovr_pmuctl); + OR_REG(osh, &pmu->chipcontrol_data, str_ovr_pmuval); + } + PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n", + drivestrength, str_tab[i].strength)); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} /* si_sdiod_drive_strength_init */ diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h new file mode 100644 index 000000000000..6654364b9103 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/aidmp.h @@ -0,0 +1,402 @@ +/* + * Broadcom AMBA Interconnect definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: aidmp.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _AIDMP_H +#define _AIDMP_H + +/* Manufacturer Ids */ +#define MFGID_ARM 0x43b +#define MFGID_BRCM 0x4bf +#define MFGID_MIPS 0x4a7 + +/* Component Classes */ +#define CC_SIM 0 +#define CC_EROM 1 +#define CC_CORESIGHT 9 +#define CC_VERIF 0xb +#define CC_OPTIMO 0xd +#define CC_GEN 0xe +#define CC_PRIMECELL 0xf + +/* Enumeration ROM registers */ +#define ER_EROMENTRY 0x000 +#define ER_REMAPCONTROL 0xe00 +#define ER_REMAPSELECT 0xe04 +#define ER_MASTERSELECT 0xe10 +#define ER_ITCR 0xf00 +#define ER_ITIP 0xf04 + +/* Erom entries */ +#define ER_TAG 0xe +#define ER_TAG1 0x6 +#define ER_VALID 1 +#define ER_CI 0 +#define ER_MP 2 +#define ER_ADD 4 +#define ER_END 0xe +#define ER_BAD 0xffffffff +#define ER_SZ_MAX 4096 /* 4KB */ + +/* EROM CompIdentA */ +#define CIA_MFG_MASK 0xfff00000 +#define CIA_MFG_SHIFT 20 +#define CIA_CID_MASK 0x000fff00 +#define CIA_CID_SHIFT 8 +#define CIA_CCL_MASK 0x000000f0 +#define CIA_CCL_SHIFT 4 + +/* EROM CompIdentB */ +#define CIB_REV_MASK 0xff000000 +#define CIB_REV_SHIFT 24 +#define CIB_NSW_MASK 0x00f80000 +#define CIB_NSW_SHIFT 19 +#define CIB_NMW_MASK 0x0007c000 +#define CIB_NMW_SHIFT 14 +#define CIB_NSP_MASK 0x00003e00 +#define CIB_NSP_SHIFT 9 +#define CIB_NMP_MASK 0x000001f0 +#define CIB_NMP_SHIFT 4 + +/* EROM MasterPortDesc */ +#define MPD_MUI_MASK 0x0000ff00 +#define MPD_MUI_SHIFT 8 +#define MPD_MP_MASK 0x000000f0 +#define MPD_MP_SHIFT 4 + +/* EROM AddrDesc */ +#define AD_ADDR_MASK 0xfffff000 +#define AD_SP_MASK 0x00000f00 +#define AD_SP_SHIFT 8 +#define AD_ST_MASK 0x000000c0 +#define AD_ST_SHIFT 6 +#define AD_ST_SLAVE 0x00000000 +#define AD_ST_BRIDGE 0x00000040 +#define AD_ST_SWRAP 0x00000080 +#define AD_ST_MWRAP 0x000000c0 +#define AD_SZ_MASK 0x00000030 +#define AD_SZ_SHIFT 4 +#define AD_SZ_4K 0x00000000 +#define AD_SZ_8K 0x00000010 +#define AD_SZ_16K 0x00000020 +#define AD_SZ_SZD 0x00000030 +#define AD_AG32 0x00000008 +#define AD_ADDR_ALIGN 0x00000fff +#define AD_SZ_BASE 0x00001000 /* 4KB */ + +/* EROM SizeDesc */ +#define SD_SZ_MASK 0xfffff000 +#define SD_SG32 0x00000008 +#define SD_SZ_ALIGN 0x00000fff + + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _aidmp { + uint32 oobselina30; /* 0x000 */ + uint32 oobselina74; /* 0x004 */ + uint32 PAD[6]; + uint32 oobselinb30; /* 0x020 */ + uint32 oobselinb74; /* 0x024 */ + uint32 PAD[6]; + uint32 oobselinc30; /* 0x040 */ + uint32 oobselinc74; /* 0x044 */ + uint32 PAD[6]; + uint32 oobselind30; /* 0x060 */ + uint32 oobselind74; /* 0x064 */ + uint32 PAD[38]; + uint32 oobselouta30; /* 0x100 */ + uint32 oobselouta74; /* 0x104 */ + uint32 PAD[6]; + uint32 oobseloutb30; /* 0x120 */ + uint32 oobseloutb74; /* 0x124 */ + uint32 PAD[6]; + uint32 oobseloutc30; /* 0x140 */ + uint32 oobseloutc74; /* 0x144 */ + uint32 PAD[6]; + uint32 oobseloutd30; /* 0x160 */ + uint32 oobseloutd74; /* 0x164 */ + uint32 PAD[38]; + uint32 oobsynca; /* 0x200 */ + uint32 oobseloutaen; /* 0x204 */ + uint32 PAD[6]; + uint32 oobsyncb; /* 0x220 */ + uint32 oobseloutben; /* 0x224 */ + uint32 PAD[6]; + uint32 oobsyncc; /* 0x240 */ + uint32 oobseloutcen; /* 0x244 */ + uint32 PAD[6]; + uint32 oobsyncd; /* 0x260 */ + uint32 oobseloutden; /* 0x264 */ + uint32 PAD[38]; + uint32 oobaextwidth; /* 0x300 */ + uint32 oobainwidth; /* 0x304 */ + uint32 oobaoutwidth; /* 0x308 */ + uint32 PAD[5]; + uint32 oobbextwidth; /* 0x320 */ + uint32 oobbinwidth; /* 0x324 */ + uint32 oobboutwidth; /* 0x328 */ + uint32 PAD[5]; + uint32 oobcextwidth; /* 0x340 */ + uint32 oobcinwidth; /* 0x344 */ + uint32 oobcoutwidth; /* 0x348 */ + uint32 PAD[5]; + uint32 oobdextwidth; /* 0x360 */ + uint32 oobdinwidth; /* 0x364 */ + uint32 oobdoutwidth; /* 0x368 */ + uint32 PAD[37]; + uint32 ioctrlset; /* 0x400 */ + uint32 ioctrlclear; /* 0x404 */ + uint32 ioctrl; /* 0x408 */ + uint32 PAD[61]; + uint32 iostatus; /* 0x500 */ + uint32 PAD[127]; + uint32 ioctrlwidth; /* 0x700 */ + uint32 iostatuswidth; /* 0x704 */ + uint32 PAD[62]; + uint32 resetctrl; /* 0x800 */ + uint32 resetstatus; /* 0x804 */ + uint32 resetreadid; /* 0x808 */ + uint32 resetwriteid; /* 0x80c */ + uint32 PAD[60]; + uint32 errlogctrl; /* 0x900 */ + uint32 errlogdone; /* 0x904 */ + uint32 errlogstatus; /* 0x908 */ + uint32 errlogaddrlo; /* 0x90c */ + uint32 errlogaddrhi; /* 0x910 */ + uint32 errlogid; /* 0x914 */ + uint32 errloguser; /* 0x918 */ + uint32 errlogflags; /* 0x91c */ + uint32 PAD[56]; + uint32 intstatus; /* 0xa00 */ + uint32 PAD[255]; + uint32 config; /* 0xe00 */ + uint32 PAD[63]; + uint32 itcr; /* 0xf00 */ + uint32 PAD[3]; + uint32 itipooba; /* 0xf10 */ + uint32 itipoobb; /* 0xf14 */ + uint32 itipoobc; /* 0xf18 */ + uint32 itipoobd; /* 0xf1c */ + uint32 PAD[4]; + uint32 itipoobaout; /* 0xf30 */ + uint32 itipoobbout; /* 0xf34 */ + uint32 itipoobcout; /* 0xf38 */ + uint32 itipoobdout; /* 0xf3c */ + uint32 PAD[4]; + uint32 itopooba; /* 0xf50 */ + uint32 itopoobb; /* 0xf54 */ + uint32 itopoobc; /* 0xf58 */ + uint32 itopoobd; /* 0xf5c */ + uint32 PAD[4]; + uint32 itopoobain; /* 0xf70 */ + uint32 itopoobbin; /* 0xf74 */ + uint32 itopoobcin; /* 0xf78 */ + uint32 itopoobdin; /* 0xf7c */ + uint32 PAD[4]; + uint32 itopreset; /* 0xf90 */ + uint32 PAD[15]; + uint32 peripherialid4; /* 0xfd0 */ + uint32 peripherialid5; /* 0xfd4 */ + uint32 peripherialid6; /* 0xfd8 */ + uint32 peripherialid7; /* 0xfdc */ + uint32 peripherialid0; /* 0xfe0 */ + uint32 peripherialid1; /* 0xfe4 */ + uint32 peripherialid2; /* 0xfe8 */ + uint32 peripherialid3; /* 0xfec */ + uint32 componentid0; /* 0xff0 */ + uint32 componentid1; /* 0xff4 */ + uint32 componentid2; /* 0xff8 */ + uint32 componentid3; /* 0xffc */ +} aidmp_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* Out-of-band Router registers */ +#define OOB_BUSCONFIG 0x020 +#define OOB_STATUSA 0x100 +#define OOB_STATUSB 0x104 +#define OOB_STATUSC 0x108 +#define OOB_STATUSD 0x10c +#define OOB_ENABLEA0 0x200 +#define OOB_ENABLEA1 0x204 +#define OOB_ENABLEA2 0x208 +#define OOB_ENABLEA3 0x20c +#define OOB_ENABLEB0 0x280 +#define OOB_ENABLEB1 0x284 +#define OOB_ENABLEB2 0x288 +#define OOB_ENABLEB3 0x28c +#define OOB_ENABLEC0 0x300 +#define OOB_ENABLEC1 0x304 +#define OOB_ENABLEC2 0x308 +#define OOB_ENABLEC3 0x30c +#define OOB_ENABLED0 0x380 +#define OOB_ENABLED1 0x384 +#define OOB_ENABLED2 0x388 +#define OOB_ENABLED3 0x38c +#define OOB_ITCR 0xf00 +#define OOB_ITIPOOBA 0xf10 +#define OOB_ITIPOOBB 0xf14 +#define OOB_ITIPOOBC 0xf18 +#define OOB_ITIPOOBD 0xf1c +#define OOB_ITOPOOBA 0xf30 +#define OOB_ITOPOOBB 0xf34 +#define OOB_ITOPOOBC 0xf38 +#define OOB_ITOPOOBD 0xf3c + +/* DMP wrapper registers */ +#define AI_OOBSELINA30 0x000 +#define AI_OOBSELINA74 0x004 +#define AI_OOBSELINB30 0x020 +#define AI_OOBSELINB74 0x024 +#define AI_OOBSELINC30 0x040 +#define AI_OOBSELINC74 0x044 +#define AI_OOBSELIND30 0x060 +#define AI_OOBSELIND74 0x064 +#define AI_OOBSELOUTA30 0x100 +#define AI_OOBSELOUTA74 0x104 +#define AI_OOBSELOUTB30 0x120 +#define AI_OOBSELOUTB74 0x124 +#define AI_OOBSELOUTC30 0x140 +#define AI_OOBSELOUTC74 0x144 +#define AI_OOBSELOUTD30 0x160 +#define AI_OOBSELOUTD74 0x164 +#define AI_OOBSYNCA 0x200 +#define AI_OOBSELOUTAEN 0x204 +#define AI_OOBSYNCB 0x220 +#define AI_OOBSELOUTBEN 0x224 +#define AI_OOBSYNCC 0x240 +#define AI_OOBSELOUTCEN 0x244 +#define AI_OOBSYNCD 0x260 +#define AI_OOBSELOUTDEN 0x264 +#define AI_OOBAEXTWIDTH 0x300 +#define AI_OOBAINWIDTH 0x304 +#define AI_OOBAOUTWIDTH 0x308 +#define AI_OOBBEXTWIDTH 0x320 +#define AI_OOBBINWIDTH 0x324 +#define AI_OOBBOUTWIDTH 0x328 +#define AI_OOBCEXTWIDTH 0x340 +#define AI_OOBCINWIDTH 0x344 +#define AI_OOBCOUTWIDTH 0x348 +#define AI_OOBDEXTWIDTH 0x360 +#define AI_OOBDINWIDTH 0x364 +#define AI_OOBDOUTWIDTH 0x368 + + +#define AI_IOCTRLSET 0x400 +#define AI_IOCTRLCLEAR 0x404 +#define AI_IOCTRL 0x408 +#define AI_IOSTATUS 0x500 +#define AI_RESETCTRL 0x800 +#define AI_RESETSTATUS 0x804 + +#define AI_IOCTRLWIDTH 0x700 +#define AI_IOSTATUSWIDTH 0x704 + +#define AI_RESETREADID 0x808 +#define AI_RESETWRITEID 0x80c +#define AI_ERRLOGCTRL 0x900 +#define AI_ERRLOGDONE 0x904 +#define AI_ERRLOGSTATUS 0x908 +#define AI_ERRLOGADDRLO 0x90c +#define AI_ERRLOGADDRHI 0x910 +#define AI_ERRLOGID 0x914 +#define AI_ERRLOGUSER 0x918 +#define AI_ERRLOGFLAGS 0x91c +#define AI_INTSTATUS 0xa00 +#define AI_CONFIG 0xe00 +#define AI_ITCR 0xf00 +#define AI_ITIPOOBA 0xf10 +#define AI_ITIPOOBB 0xf14 +#define AI_ITIPOOBC 0xf18 +#define AI_ITIPOOBD 0xf1c +#define AI_ITIPOOBAOUT 0xf30 +#define AI_ITIPOOBBOUT 0xf34 +#define AI_ITIPOOBCOUT 0xf38 +#define AI_ITIPOOBDOUT 0xf3c +#define AI_ITOPOOBA 0xf50 +#define AI_ITOPOOBB 0xf54 +#define AI_ITOPOOBC 0xf58 +#define AI_ITOPOOBD 0xf5c +#define AI_ITOPOOBAIN 0xf70 +#define AI_ITOPOOBBIN 0xf74 +#define AI_ITOPOOBCIN 0xf78 +#define AI_ITOPOOBDIN 0xf7c +#define AI_ITOPRESET 0xf90 +#define AI_PERIPHERIALID4 0xfd0 +#define AI_PERIPHERIALID5 0xfd4 +#define AI_PERIPHERIALID6 0xfd8 +#define AI_PERIPHERIALID7 0xfdc +#define AI_PERIPHERIALID0 0xfe0 +#define AI_PERIPHERIALID1 0xfe4 +#define AI_PERIPHERIALID2 0xfe8 +#define AI_PERIPHERIALID3 0xfec +#define AI_COMPONENTID0 0xff0 +#define AI_COMPONENTID1 0xff4 +#define AI_COMPONENTID2 0xff8 +#define AI_COMPONENTID3 0xffc + +/* resetctrl */ +#define AIRC_RESET 1 + +/* errlogctrl */ +#define AIELC_TO_EXP_MASK 0x0000001f0 /* backplane timeout exponent */ +#define AIELC_TO_EXP_SHIFT 4 +#define AIELC_TO_ENAB_SHIFT 9 /* backplane timeout enable */ + +/* errlogdone */ +#define AIELD_ERRDONE_MASK 0x3 + +/* errlogstatus */ +#define AIELS_TIMEOUT_MASK 0x3 + +/* config */ +#define AICFG_OOB 0x00000020 +#define AICFG_IOS 0x00000010 +#define AICFG_IOC 0x00000008 +#define AICFG_TO 0x00000004 +#define AICFG_ERRL 0x00000002 +#define AICFG_RST 0x00000001 + +/* bit defines for AI_OOBSELOUTB74 reg */ +#define OOB_SEL_OUTEN_B_5 15 +#define OOB_SEL_OUTEN_B_6 23 + +/* AI_OOBSEL for A/B/C/D, 0-7 */ +#define AI_OOBSEL_MASK 0x1F +#define AI_OOBSEL_0_SHIFT 0 +#define AI_OOBSEL_1_SHIFT 8 +#define AI_OOBSEL_2_SHIFT 16 +#define AI_OOBSEL_3_SHIFT 24 +#define AI_OOBSEL_4_SHIFT 0 +#define AI_OOBSEL_5_SHIFT 8 +#define AI_OOBSEL_6_SHIFT 16 +#define AI_OOBSEL_7_SHIFT 24 +#define AI_IOCTRL_ENABLE_D11_PME (1 << 14) + +#endif /* _AIDMP_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcm_cfg.h b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h new file mode 100644 index 000000000000..e71f5c82da6c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h @@ -0,0 +1,32 @@ +/* + * BCM common config options + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_cfg.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _bcm_cfg_h_ +#define _bcm_cfg_h_ +#endif /* _bcm_cfg_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h new file mode 100644 index 000000000000..79ae0f5d4a9c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h @@ -0,0 +1,364 @@ +/* + * Memory pools library, Public interface + * + * API Overview + * + * This package provides a memory allocation subsystem based on pools of + * homogenous objects. + * + * Instrumentation is available for reporting memory utilization both + * on a per-data-structure basis and system wide. + * + * There are two main types defined in this API. + * + * pool manager: A singleton object that acts as a factory for + * pool allocators. It also is used for global + * instrumentation, such as reporting all blocks + * in use across all data structures. The pool manager + * creates and provides individual memory pools + * upon request to application code. + * + * memory pool: An object for allocating homogenous memory blocks. + * + * Global identifiers in this module use the following prefixes: + * bcm_mpm_* Memory pool manager + * bcm_mp_* Memory pool + * + * There are two main types of memory pools: + * + * prealloc: The contiguous memory block of objects can either be supplied + * by the client or malloc'ed by the memory manager. The objects are + * allocated out of a block of memory and freed back to the block. + * + * heap: The memory pool allocator uses the heap (malloc/free) for memory. + * In this case, the pool allocator is just providing statistics + * and instrumentation on top of the heap, without modifying the heap + * allocation implementation. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_mpool_pub.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _BCM_MPOOL_PUB_H +#define _BCM_MPOOL_PUB_H 1 + +#include /* needed for uint16 */ + + +/* +************************************************************************** +* +* Type definitions, handles +* +************************************************************************** +*/ + +/* Forward declaration of OSL handle. */ +struct osl_info; + +/* Forward declaration of string buffer. */ +struct bcmstrbuf; + +/* + * Opaque type definition for the pool manager handle. This object is used for global + * memory pool operations such as obtaining a new pool, deleting a pool, iterating and + * instrumentation/debugging. + */ +struct bcm_mpm_mgr; +typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h; + +/* + * Opaque type definition for an instance of a pool. This handle is used for allocating + * and freeing memory through the pool, as well as management/instrumentation on this + * specific pool. + */ +struct bcm_mp_pool; +typedef struct bcm_mp_pool *bcm_mp_pool_h; + + +/* + * To make instrumentation more readable, every memory + * pool must have a readable name. Pool names are up to + * 8 bytes including '\0' termination. (7 printable characters.) + */ +#define BCM_MP_NAMELEN 8 + + +/* + * Type definition for pool statistics. + */ +typedef struct bcm_mp_stats { + char name[BCM_MP_NAMELEN]; /* Name of this pool. */ + unsigned int objsz; /* Object size allocated in this pool */ + uint16 nobj; /* Total number of objects in this pool */ + uint16 num_alloc; /* Number of objects currently allocated */ + uint16 high_water; /* Max number of allocated objects. */ + uint16 failed_alloc; /* Failed allocations. */ +} bcm_mp_stats_t; + + +/* +************************************************************************** +* +* API Routines on the pool manager. +* +************************************************************************** +*/ + +/* + * bcm_mpm_init() - initialize the whole memory pool system. + * + * Parameters: + * osh: INPUT Operating system handle. Needed for heap memory allocation. + * max_pools: INPUT Maximum number of mempools supported. + * mgr: OUTPUT The handle is written with the new pools manager object/handle. + * + * Returns: + * BCME_OK Object initialized successfully. May be used. + * BCME_NOMEM Initialization failed due to no memory. Object must not be used. + */ +int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp); + + +/* + * bcm_mpm_deinit() - de-initialize the whole memory pool system. + * + * Parameters: + * mgr: INPUT Pointer to pool manager handle. + * + * Returns: + * BCME_OK Memory pool manager successfully de-initialized. + * other Indicated error occured during de-initialization. + */ +int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp); + +/* + * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The + * pool uses a contiguous block of pre-alloced + * memory. The memory block may either be provided + * by the client or dynamically allocated by the + * pool manager. + * + * Parameters: + * mgr: INPUT The handle to the pool manager + * obj_sz: INPUT Size of objects that will be allocated by the new pool + * Must be >= sizeof(void *). + * nobj: INPUT Maximum number of concurrently existing objects to support + * memstart INPUT Pointer to the memory to use, or NULL to malloc() + * memsize INPUT Number of bytes referenced from memstart (for error checking). + * Must be 0 if 'memstart' is NULL. + * poolname INPUT For instrumentation, the name of the pool + * newp: OUTPUT The handle for the new pool, if creation is successful + * + * Returns: + * BCME_OK Pool created ok. + * other Pool not created due to indicated error. newpoolp set to NULL. + * + * + */ +int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr, + unsigned int obj_sz, + int nobj, + void *memstart, + unsigned int memsize, + const char poolname[BCM_MP_NAMELEN], + bcm_mp_pool_h *newp); + + +/* + * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after + * all memory objects have been freed back to the pool. + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * pool: INPUT The handle of the pool to delete + * + * Returns: + * BCME_OK Pool deleted ok. + * other Pool not deleted due to indicated error. + * + */ +int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); + +/* + * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory + * pool allocator uses the heap (malloc/free) for memory. + * In this case, the pool allocator is just providing + * statistics and instrumentation on top of the heap, + * without modifying the heap allocation implementation. + * + * Parameters: + * mgr: INPUT The handle to the pool manager + * obj_sz: INPUT Size of objects that will be allocated by the new pool + * poolname INPUT For instrumentation, the name of the pool + * newp: OUTPUT The handle for the new pool, if creation is successful + * + * Returns: + * BCME_OK Pool created ok. + * other Pool not created due to indicated error. newpoolp set to NULL. + * + * + */ +int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz, + const char poolname[BCM_MP_NAMELEN], + bcm_mp_pool_h *newp); + + +/* + * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after + * all memory objects have been freed back to the pool. + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * pool: INPUT The handle of the pool to delete + * + * Returns: + * BCME_OK Pool deleted ok. + * other Pool not deleted due to indicated error. + * + */ +int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); + + +/* + * bcm_mpm_stats() - Return stats for all pools + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * stats: OUTPUT Array of pool statistics. + * nentries: MOD Max elements in 'stats' array on INPUT. Actual number + * of array elements copied to 'stats' on OUTPUT. + * + * Returns: + * BCME_OK Ok + * other Error getting stats. + * + */ +int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries); + + +/* + * bcm_mpm_dump() - Display statistics on all pools + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * b: OUTPUT Output buffer. + * + * Returns: + * BCME_OK Ok + * other Error during dump. + * + */ +int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b); + + +/* + * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to + * compensate for alignment requirements of the objects. + * This function provides the padded object size. If clients + * pre-allocate a memory slab for a memory pool, the + * padded object size should be used by the client to allocate + * the memory slab (in order to provide sufficent space for + * the maximum number of objects). + * + * Parameters: + * mgr: INPUT The handle to the pools manager. + * obj_sz: INPUT Input object size. + * padded_obj_sz: OUTPUT Padded object size. + * + * Returns: + * BCME_OK Ok + * BCME_BADARG Bad arguments. + * + */ +int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz); + + +/* +*************************************************************************** +* +* API Routines on a specific pool. +* +*************************************************************************** +*/ + + +/* + * bcm_mp_alloc() - Allocate a memory pool object. + * + * Parameters: + * pool: INPUT The handle to the pool. + * + * Returns: + * A pointer to the new object. NULL on error. + * + */ +void* bcm_mp_alloc(bcm_mp_pool_h pool); + +/* + * bcm_mp_free() - Free a memory pool object. + * + * Parameters: + * pool: INPUT The handle to the pool. + * objp: INPUT A pointer to the object to free. + * + * Returns: + * BCME_OK Ok + * other Error during free. + * + */ +int bcm_mp_free(bcm_mp_pool_h pool, void *objp); + +/* + * bcm_mp_stats() - Return stats for this pool + * + * Parameters: + * pool: INPUT The handle to the pool + * stats: OUTPUT Pool statistics + * + * Returns: + * BCME_OK Ok + * other Error getting statistics. + * + */ +int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats); + + +/* + * bcm_mp_dump() - Dump a pool + * + * Parameters: + * pool: INPUT The handle to the pool + * b OUTPUT Output buffer + * + * Returns: + * BCME_OK Ok + * other Error during dump. + * + */ +int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b); + + +#endif /* _BCM_MPOOL_PUB_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcm_ring.h b/drivers/net/wireless/bcmdhd/include/bcm_ring.h new file mode 100644 index 000000000000..5f1b38c65e3c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcm_ring.h @@ -0,0 +1,616 @@ +#ifndef __bcm_ring_included__ +#define __bcm_ring_included__ + +/* + * +---------------------------------------------------------------------------- + * + * bcm_ring.h : Ring context abstraction + * + * The ring context tracks the WRITE and READ indices where elements may be + * produced and consumed respectively. All elements in the ring need to be + * fixed size. + * + * NOTE: A ring of size N, may only hold N-1 elements. + * + * +---------------------------------------------------------------------------- + * + * API Notes: + * + * Ring manipulation API allows for: + * Pending operations: Often before some work can be completed, it may be + * desired that several resources are available, e.g. space for production in + * a ring. Approaches such as, #1) reserve resources one by one and return them + * if another required resource is not available, or #2) employ a two pass + * algorithm of first testing whether all resources are available, have a + * an impact on performance critical code. The approach taken here is more akin + * to approach #2, where a test for resource availability essentially also + * provides the index for production in an un-committed state. + * The same approach is taken for the consumer side. + * + * - Pending production: Fetch the next index where a ring element may be + * produced. The caller may not commit the WRITE of the element. + * - Pending consumption: Fetch the next index where a ring element may be + * consumed. The caller may not commut the READ of the element. + * + * Producer side API: + * - bcm_ring_is_full : Test whether ring is full + * - bcm_ring_prod : Fetch index where an element may be produced (commit) + * - bcm_ring_prod_pend: Fetch index where an element may be produced (pending) + * - bcm_ring_prod_done: Commit a previous pending produce fetch + * - bcm_ring_prod_avail: Fetch total number free slots eligible for production + * + * Consumer side API: + * - bcm_ring_is_empty : Test whether ring is empty + * - bcm_ring_cons : Fetch index where an element may be consumed (commit) + * - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending) + * - bcm_ring_cons_done: Commit a previous pending consume fetch + * - bcm_ring_cons_avail: Fetch total number elements eligible for consumption + * + * - bcm_ring_sync_read: Sync read offset in peer ring, from local ring + * - bcm_ring_sync_write: Sync write offset in peer ring, from local ring + * + * +---------------------------------------------------------------------------- + * + * Design Notes: + * Following items are not tracked in a ring context (design decision) + * - width of a ring element. + * - depth of the ring. + * - base of the buffer, where the elements are stored. + * - count of number of free slots in the ring + * + * Implementation Notes: + * - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init(). + * - BCM_RING_EMPTY and BCM_RING_FULL are (-1) + * + * +---------------------------------------------------------------------------- + * + * Usage Notes: + * An application may incarnate a ring of some fixed sized elements, by defining + * - a ring data buffer to store the ring elements. + * - depth of the ring (max number of elements managed by ring context). + * Preferrably, depth may be represented as a constant. + * - width of a ring element: to be used in pointer arithmetic with the ring's + * data buffer base and an index to fetch the ring element. + * + * Use bcm_workq_t to instantiate a pair of workq constructs, one for the + * producer and the other for the consumer, both pointing to the same circular + * buffer. The producer may operate on it's own local workq and flush the write + * index to the consumer. Likewise the consumer may use its local workq and + * flush the read index to the producer. This way we do not repeatedly access + * the peer's context. The two peers may reside on different CPU cores with a + * private L1 data cache. + * +---------------------------------------------------------------------------- + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id: bcm_ring.h 591283 2015-10-07 11:52:00Z $ + * + * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- + * vim: set ts=4 noet sw=4 tw=80: + * + * +---------------------------------------------------------------------------- + */ + +#ifdef ____cacheline_aligned +#define __ring_aligned ____cacheline_aligned +#else +#define __ring_aligned +#endif + +/* Conditional compile for debug */ +/* #define BCM_RING_DEBUG */ + +#define BCM_RING_EMPTY (-1) +#define BCM_RING_FULL (-1) +#define BCM_RING_NULL ((bcm_ring_t *)NULL) + +#if defined(BCM_RING_DEBUG) +#define RING_ASSERT(exp) ASSERT(exp) +#define BCM_RING_IS_VALID(ring) (((ring) != BCM_RING_NULL) && \ + ((ring)->self == (ring))) +#else /* ! BCM_RING_DEBUG */ +#define RING_ASSERT(exp) do {} while (0) +#define BCM_RING_IS_VALID(ring) ((ring) != BCM_RING_NULL) +#endif /* ! BCM_RING_DEBUG */ + +#define BCM_RING_SIZE_IS_VALID(ring_size) ((ring_size) > 0) + +/* + * +---------------------------------------------------------------------------- + * Ring Context + * +---------------------------------------------------------------------------- + */ +typedef struct bcm_ring { /* Ring context */ +#if defined(BCM_RING_DEBUG) + struct bcm_ring *self; /* ptr to self for IS VALID test */ +#endif /* BCM_RING_DEBUG */ + int write __ring_aligned; /* WRITE index in a circular ring */ + int read __ring_aligned; /* READ index in a circular ring */ +} bcm_ring_t; + + +static INLINE void bcm_ring_init(bcm_ring_t *ring); +static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from); +static INLINE bool bcm_ring_is_empty(bcm_ring_t *ring); + +static INLINE int __bcm_ring_next_write(bcm_ring_t *ring, const int ring_size); + +static INLINE bool __bcm_ring_full(bcm_ring_t *ring, int next_write); +static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write); +static INLINE int bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, + const int ring_size); +static INLINE int bcm_ring_prod(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read); +static INLINE int bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, + const int ring_size); +static INLINE int bcm_ring_cons(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self); +static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self); + +static INLINE int bcm_ring_prod_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE int bcm_ring_cons_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE void bcm_ring_cons_all(bcm_ring_t *ring); + + +/** + * bcm_ring_init - initialize a ring context. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_init(bcm_ring_t *ring) +{ + ASSERT(ring != (bcm_ring_t *)NULL); +#if defined(BCM_RING_DEBUG) + ring->self = ring; +#endif /* BCM_RING_DEBUG */ + ring->write = 0; + ring->read = 0; +} + +/** + * bcm_ring_copy - copy construct a ring + * @to: pointer to the new ring context + * @from: pointer to orig ring context + */ +static INLINE void +bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from) +{ + bcm_ring_init(to); + + to->write = from->write; + to->read = from->read; +} + +/** + * bcm_ring_is_empty - "Boolean" test whether ring is empty. + * @ring: pointer to a ring context + * + * PS. does not return BCM_RING_EMPTY value. + */ +static INLINE bool +bcm_ring_is_empty(bcm_ring_t *ring) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + return (ring->read == ring->write); +} + + +/** + * __bcm_ring_next_write - determine the index where the next write may occur + * (with wrap-around). + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE int +__bcm_ring_next_write(bcm_ring_t *ring, const int ring_size) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + return ((ring->write + 1) % ring_size); +} + + +/** + * __bcm_ring_full - support function for ring full test. + * @ring: pointer to a ring context + * @next_write: next location in ring where an element is to be produced + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE bool +__bcm_ring_full(bcm_ring_t *ring, int next_write) +{ + return (next_write == ring->read); +} + + +/** + * bcm_ring_is_full - "Boolean" test whether a ring is full. + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PS. does not return BCM_RING_FULL value. + */ +static INLINE bool +bcm_ring_is_full(bcm_ring_t *ring, const int ring_size) +{ + int next_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + next_write = __bcm_ring_next_write(ring, ring_size); + return __bcm_ring_full(ring, next_write); +} + + +/** + * bcm_ring_prod_done - commit a previously pending index where production + * was requested. + * @ring: pointer to a ring context + * @write: index into ring upto where production was done. + * +---------------------------------------------------------------------------- + */ +static INLINE void +bcm_ring_prod_done(bcm_ring_t *ring, int write) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->write = write; +} + + +/** + * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be + * produced. + * @ring: pointer to a ring context + * @pend_write: next index, after the returned index + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + *pend_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, *pend_write)) { + *pend_write = BCM_RING_FULL; + rtn = BCM_RING_FULL; + } else { + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->write; + } + return rtn; +} + + +/** + * bcm_ring_prod - Fetch and "commit" the next index where a ring element may + * be produced. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod(bcm_ring_t *ring, const int ring_size) +{ + int next_write, prod_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + + next_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, next_write)) { + prod_write = BCM_RING_FULL; + } else { + prod_write = ring->write; + bcm_ring_prod_done(ring, next_write); /* "commit" production */ + } + return prod_write; +} + + +/** + * bcm_ring_cons_done - commit a previously pending read + * @ring: pointer to a ring context + * @read: index upto which elements have been consumed. + */ +static INLINE void +bcm_ring_cons_done(bcm_ring_t *ring, int read) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->read = read; +} + + +/** + * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring + * element may be consumed. + * @ring: pointer to a ring context + * @pend_read: index into ring upto which elements may be consumed. + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + *pend_read = BCM_RING_EMPTY; + rtn = BCM_RING_EMPTY; + } else { + *pend_read = (ring->read + 1) % ring_size; + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->read; + } + return rtn; +} + + +/** + * bcm_ring_cons - fetch and "commit" the next index where a ring element may + * be consumed. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons(bcm_ring_t *ring, const int ring_size) +{ + int cons_read; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + cons_read = BCM_RING_EMPTY; + } else { + cons_read = ring->read; + ring->read = (ring->read + 1) % ring_size; /* read is committed */ + } + return cons_read; +} + + +/** + * bcm_ring_sync_read - on consumption, update peer's read index. + * @peer: pointer to peer's producer ring context + * @self: pointer to consumer's ring context + */ +static INLINE void +bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->read = self->read; /* flush read update to peer producer */ +} + + +/** + * bcm_ring_sync_write - on consumption, update peer's write index. + * @peer: pointer to peer's consumer ring context + * @self: pointer to producer's ring context + */ +static INLINE void +bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->write = self->write; /* flush write update to peer consumer */ +} + + +/** + * bcm_ring_prod_avail - fetch total number of available empty slots in the + * ring for production. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size) +{ + int prod_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->write >= ring->read) { + prod_avail = (ring_size - (ring->write - ring->read) - 1); + } else { + prod_avail = (ring->read - (ring->write + 1)); + } + ASSERT(prod_avail < ring_size); + return prod_avail; +} + + +/** + * bcm_ring_cons_avail - fetch total number of available elements for consumption. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size) +{ + int cons_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->read == ring->write) { + cons_avail = 0; + } else if (ring->read > ring->write) { + cons_avail = ((ring_size - ring->read) + ring->write); + } else { + cons_avail = ring->write - ring->read; + } + ASSERT(cons_avail < ring_size); + return cons_avail; +} + + +/** + * bcm_ring_cons_all - set ring in state where all elements are consumed. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_cons_all(bcm_ring_t *ring) +{ + ring->read = ring->write; +} + + +/** + * Work Queue + * A work Queue is composed of a ring of work items, of a specified depth. + * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a + * producer/consumer circular ring. + */ + +struct bcm_workq { + bcm_ring_t ring; /* Ring context abstraction */ + struct bcm_workq *peer; /* Peer workq context */ + void *buffer; /* Buffer storage for work items in workQ */ + int ring_size; /* Depth of workQ */ +} __ring_aligned; + +typedef struct bcm_workq bcm_workq_t; + + +/* #define BCM_WORKQ_DEBUG */ +#if defined(BCM_WORKQ_DEBUG) +#define WORKQ_ASSERT(exp) ASSERT(exp) +#else /* ! BCM_WORKQ_DEBUG */ +#define WORKQ_ASSERT(exp) do {} while (0) +#endif /* ! BCM_WORKQ_DEBUG */ + +#define WORKQ_AUDIT(workq) \ + WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \ + WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size); + +#define BCM_WORKQ_NULL ((bcm_workq_t *)NULL) + +#define WORKQ_PEER(workq) ((workq)->peer) +#define WORKQ_RING(workq) (&((workq)->ring)) +#define WORKQ_PEER_RING(workq) (&((workq)->peer->ring)) + +#define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \ + WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \ + ((__elem_type *)((__workq)->buffer)) + (__index); \ +}) + + +static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size); + +static INLINE bool bcm_workq_is_empty(bcm_workq_t *workq_prod); + +static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons); + +static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons); + +/** + * bcm_workq_init - initialize a workq + * @workq: pointer to a workq context + * @buffer: pointer to a pre-allocated circular buffer to serve as a ring + * @ring_size: size of the ring in terms of max number of elements. + */ +static INLINE void +bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size) +{ + ASSERT(workq != BCM_WORKQ_NULL); + ASSERT(workq_peer != BCM_WORKQ_NULL); + ASSERT(buffer != NULL); + ASSERT(ring_size > 0); + + WORKQ_PEER(workq) = workq_peer; + WORKQ_PEER(workq_peer) = workq; + + bcm_ring_init(WORKQ_RING(workq)); + bcm_ring_init(WORKQ_RING(workq_peer)); + + workq->buffer = workq_peer->buffer = buffer; + workq->ring_size = workq_peer->ring_size = ring_size; +} + +/** + * bcm_workq_empty - test whether there is work + * @workq_prod: producer's workq + */ +static INLINE bool +bcm_workq_is_empty(bcm_workq_t *workq_prod) +{ + return bcm_ring_is_empty(WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring + * @workq_prod: producer's workq whose write index must be synced to peer + */ +static INLINE void +bcm_workq_prod_sync(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring + * @workq_cons: consumer's workq whose read index must be synced to peer + */ +static INLINE void +bcm_workq_cons_sync(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons)); +} + + +/** + * bcm_workq_prod_refresh - Fetch the updated consumer's read index + * @workq_prod: producer's workq whose read index must be refreshed from peer + */ +static INLINE void +bcm_workq_prod_refresh(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod)); +} + +/** + * bcm_workq_cons_refresh - Fetch the updated producer's write index + * @workq_cons: consumer's workq whose write index must be refreshed from peer + */ +static INLINE void +bcm_workq_cons_refresh(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons)); +} + + +#endif /* ! __bcm_ring_h_included__ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h new file mode 100644 index 000000000000..a95dc31c27bd --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h @@ -0,0 +1,135 @@ +/* + * CDC network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmcdc.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _bcmcdc_h_ +#define _bcmcdc_h_ +#include + +typedef struct cdc_ioctl { + uint32 cmd; /* ioctl command value */ + uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */ + uint32 flags; /* flag defns given below */ + uint32 status; /* status code returned from the device */ +} cdc_ioctl_t; + +/* Max valid buffer size that can be sent to the dongle */ +#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN + +/* len field is divided into input and output buffer lengths */ +#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */ + /* excluding IOCTL header */ +#define CDCL_IOC_OUTLEN_SHIFT 0 +#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */ +#define CDCL_IOC_INLEN_SHIFT 16 + +/* CDC flag definitions */ +#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */ +#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */ +#define CDCF_IOC_OVL_IDX_MASK 0x3c /* overlay region index mask */ +#define CDCF_IOC_OVL_RSV 0x40 /* 1=reserve this overlay region */ +#define CDCF_IOC_OVL 0x80 /* 1=this ioctl corresponds to an overlay */ +#define CDCF_IOC_ACTION_MASK 0xfe /* SET/GET, OVL_IDX, OVL_RSV, OVL mask */ +#define CDCF_IOC_ACTION_SHIFT 1 /* SET/GET, OVL_IDX, OVL_RSV, OVL shift */ +#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */ +#define CDCF_IOC_IF_SHIFT 12 +#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */ +#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */ + +#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT) +#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT) + +#define CDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)) +#define CDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT))) + +/* + * BDC header + * + * The BDC header is used on data packets to convey priority across USB. + */ + +struct bdc_header { + uint8 flags; /* Flags */ + uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 USB flow control info */ + uint8 flags2; + uint8 dataOffset; /* Offset from end of BDC header to packet data, in + * 4-byte words. Leaves room for optional headers. + */ +}; + +#define BDC_HEADER_LEN 4 + +/* flags field bitmap */ +#define BDC_FLAG_80211_PKT 0x01 /* Packet is in 802.11 format (dongle -> host) */ +#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */ +#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums: host->device */ +#define BDC_FLAG_EVENT_MSG 0x08 /* Payload contains an event msg: device->host */ +#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ +#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ + +/* priority field bitmap */ +#define BDC_PRIORITY_MASK 0x07 +#define BDC_PRIORITY_FC_MASK 0xf0 /* flow control info mask */ +#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */ + +/* flags2 field bitmap */ +#define BDC_FLAG2_IF_MASK 0x0f /* interface index (host <-> dongle) */ +#define BDC_FLAG2_IF_SHIFT 0 +#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */ + /* FLOW CONTROL info only */ + +/* version numbers */ +#define BDC_PROTO_VER_1 1 /* Old Protocol version */ +#define BDC_PROTO_VER 2 /* Protocol version */ + +/* flags2.if field access macros */ +#define BDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT)) +#define BDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT))) + +#define BDC_FLAG2_PAD_MASK 0xf0 +#define BDC_FLAG_PAD_MASK 0x03 +#define BDC_FLAG2_PAD_SHIFT 2 +#define BDC_FLAG_PAD_SHIFT 0 +#define BDC_FLAG2_PAD_IDX 0x3c +#define BDC_FLAG_PAD_IDX 0x03 +#define BDC_GET_PAD_LEN(hdr) \ + ((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \ + ((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT))) +#define BDC_SET_PAD_LEN(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \ + (((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \ + ((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \ + (((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT))) + +#endif /* _bcmcdc_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h new file mode 100644 index 000000000000..a02499996f61 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h @@ -0,0 +1,382 @@ +/* + * Misc system wide definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmdefs.h 601026 2015-11-20 06:53:19Z $ + */ + +#ifndef _bcmdefs_h_ +#define _bcmdefs_h_ + +/* + * One doesn't need to include this file explicitly, gets included automatically if + * typedefs.h is included. + */ + +/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function + * arguments or local variables. + */ +#define BCM_REFERENCE(data) ((void)(data)) + +/* Allow for suppressing unused variable warnings. */ +#ifdef __GNUC__ +#define UNUSED_VAR __attribute__ ((unused)) +#else +#define UNUSED_VAR +#endif + +/* Compile-time assert can be used in place of ASSERT if the expression evaluates + * to a constant at compile time. + */ +#define STATIC_ASSERT(expr) { \ + /* Make sure the expression is constant. */ \ + typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \ + /* Make sure the expression is true. */ \ + typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \ +} + +/* Reclaiming text and data : + * The following macros specify special linker sections that can be reclaimed + * after a system is considered 'up'. + * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN, + * as in most cases, the attach function calls the detach function to clean up on error). + */ + +#define bcmreclaimed 0 +#define _data _data +#define _fn _fn +#define BCMPREATTACHDATA(_data) _data +#define BCMPREATTACHFN(_fn) _fn +#define _data _data +#define _fn _fn +#define _fn _fn +#define BCMNMIATTACHFN(_fn) _fn +#define BCMNMIATTACHDATA(_data) _data +#define CONST const + +#undef BCM47XX_CA9 + +#ifndef BCMFASTPATH +#define BCMFASTPATH +#define BCMFASTPATH_HOST +#endif /* BCMFASTPATH */ + + +/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from + * ROM). This should eliminate the need to manually specify these functions in the ROM config file. + * It should only be used in special cases where the function must be in RAM for *all* ROM-based + * chips. + */ + #define BCMRAMFN(_fn) _fn + +#define STATIC static + +/* Bus types */ +#define SI_BUS 0 /* SOC Interconnect */ +#define PCI_BUS 1 /* PCI target */ +#define PCMCIA_BUS 2 /* PCMCIA target */ +#define SDIO_BUS 3 /* SDIO target */ +#define JTAG_BUS 4 /* JTAG */ +#define USB_BUS 5 /* USB (does not support R/W REG) */ +#define SPI_BUS 6 /* gSPI target */ +#define RPC_BUS 7 /* RPC target */ + +/* Allows size optimization for single-bus image */ +#ifdef BCMBUSTYPE +#define BUSTYPE(bus) (BCMBUSTYPE) +#else +#define BUSTYPE(bus) (bus) +#endif + +/* Allows size optimization for single-backplane image */ +#ifdef BCMCHIPTYPE +#define CHIPTYPE(bus) (BCMCHIPTYPE) +#else +#define CHIPTYPE(bus) (bus) +#endif + + +/* Allows size optimization for SPROM support */ +#if defined(BCMSPROMBUS) +#define SPROMBUS (BCMSPROMBUS) +#elif defined(SI_PCMCIA_SROM) +#define SPROMBUS (PCMCIA_BUS) +#else +#define SPROMBUS (PCI_BUS) +#endif + +/* Allows size optimization for single-chip image */ +#ifdef BCMCHIPID +#define CHIPID(chip) (BCMCHIPID) +#else +#define CHIPID(chip) (chip) +#endif + +#ifdef BCMCHIPREV +#define CHIPREV(rev) (BCMCHIPREV) +#else +#define CHIPREV(rev) (rev) +#endif + +#ifdef BCMPCIEREV +#define PCIECOREREV(rev) (BCMPCIEREV) +#else +#define PCIECOREREV(rev) (rev) +#endif + +/* Defines for DMA Address Width - Shared between OSL and HNDDMA */ +#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */ +#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */ +#define DMADDR_MASK_26 0xFC000000 /* Address maks for 26-bits */ +#define DMADDR_MASK_0 0xffffffff /* Address mask for 0-bits (hi-part) */ + +#define DMADDRWIDTH_26 26 /* 26-bit addressing capability */ +#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */ +#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */ +#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */ +#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */ + +typedef struct { + uint32 loaddr; + uint32 hiaddr; +} dma64addr_t; + +#define PHYSADDR64HI(_pa) ((_pa).hiaddr) +#define PHYSADDR64HISET(_pa, _val) \ + do { \ + (_pa).hiaddr = (_val); \ + } while (0) +#define PHYSADDR64LO(_pa) ((_pa).loaddr) +#define PHYSADDR64LOSET(_pa, _val) \ + do { \ + (_pa).loaddr = (_val); \ + } while (0) + +#ifdef BCMDMA64OSL +typedef dma64addr_t dmaaddr_t; +#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa) +#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val) +#define PHYSADDRLO(_pa) PHYSADDR64LO(_pa) +#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val) +#define PHYSADDRTOULONG(_pa, _ulong) \ + do { \ + _ulong = ((unsigned long)(_pa).hiaddr << 32) | ((_pa).loaddr); \ + } while (0) + +#else +typedef unsigned long dmaaddr_t; +#define PHYSADDRHI(_pa) (0) +#define PHYSADDRHISET(_pa, _val) +#define PHYSADDRLO(_pa) ((_pa)) +#define PHYSADDRLOSET(_pa, _val) \ + do { \ + (_pa) = (_val); \ + } while (0) +#endif /* BCMDMA64OSL */ +#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0) + +/* One physical DMA segment */ +typedef struct { + dmaaddr_t addr; + uint32 length; +} hnddma_seg_t; + +#define MAX_DMA_SEGS 8 + + +typedef struct { + void *oshdmah; /* Opaque handle for OSL to store its information */ + uint origsize; /* Size of the virtual packet */ + uint nsegs; + hnddma_seg_t segs[MAX_DMA_SEGS]; +} hnddma_seg_map_t; + + +/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF). + * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL. + * There is a compile time check in wlc.c which ensure that this value is at least as big + * as TXOFF. This value is used in dma_rxfill (hnddma.c). + */ + +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY) +/* add 40 bytes to allow for extra RPC header and info */ +#define BCMEXTRAHDROOM 260 +#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ +#define BCMEXTRAHDROOM 204 +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef SDALIGN +#define SDALIGN 32 +#endif + +/* Headroom required for dongle-to-host communication. Packets allocated + * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should + * leave this much room in front for low-level message headers which may + * be needed to get across the dongle bus to the host. (These messages + * don't go over the network, so room for the full WL header above would + * be a waste.). +*/ +#define BCMDONGLEHDRSZ 12 +#define BCMDONGLEPADSZ 16 + +#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ) + + +#if defined(NO_BCMDBG_ASSERT) +# undef BCMDBG_ASSERT +# undef BCMASSERT_LOG +#endif + +#if defined(BCMASSERT_LOG) +#define BCMASSERT_SUPPORT +#endif + +/* Macros for doing definition and get/set of bitfields + * Usage example, e.g. a three-bit field (bits 4-6): + * #define _M BITFIELD_MASK(3) + * #define _S 4 + * ... + * regval = R_REG(osh, ®s->regfoo); + * field = GFIELD(regval, ); + * regval = SFIELD(regval, , 1); + * W_REG(osh, ®s->regfoo, regval); + */ +#define BITFIELD_MASK(width) \ + (((unsigned)1 << (width)) - 1) +#define GFIELD(val, field) \ + (((val) >> field ## _S) & field ## _M) +#define SFIELD(val, field, bits) \ + (((val) & (~(field ## _M << field ## _S))) | \ + ((unsigned)(bits) << field ## _S)) + +/* define BCMSMALL to remove misc features for memory-constrained environments */ +#ifdef BCMSMALL +#undef BCMSPACE +#define bcmspace FALSE /* if (bcmspace) code is discarded */ +#else +#define BCMSPACE +#define bcmspace TRUE /* if (bcmspace) code is retained */ +#endif + +/* Max. nvram variable table size */ +#ifndef MAXSZ_NVRAM_VARS +#ifdef LARGE_NVRAM_MAXSZ +#define MAXSZ_NVRAM_VARS LARGE_NVRAM_MAXSZ +#else +/* SROM12 changes */ +#define MAXSZ_NVRAM_VARS 6144 +#endif /* LARGE_NVRAM_MAXSZ */ +#endif /* !MAXSZ_NVRAM_VARS */ + + + +/* WL_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also + * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles). + */ + + +#ifdef BCMLFRAG /* BCMLFRAG support enab macros */ + extern bool _bcmlfrag; + #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMLFRAG_ENAB() (_bcmlfrag) + #elif defined(BCMLFRAG_DISABLED) + #define BCMLFRAG_ENAB() (0) + #else + #define BCMLFRAG_ENAB() (1) + #endif +#else + #define BCMLFRAG_ENAB() (0) +#endif /* BCMLFRAG_ENAB */ +#define RXMODE1 1 /* descriptor split */ +#define RXMODE2 2 /* descriptor split + classification */ +#define RXMODE3 3 /* fifo split + classification */ +#define RXMODE4 4 /* fifo split + classification + hdr conversion */ + +#ifdef BCMSPLITRX /* BCMLFRAG support enab macros */ + extern bool _bcmsplitrx; + extern uint8 _bcmsplitrx_mode; + #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMSPLITRX_ENAB() (_bcmsplitrx) + #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) + #elif defined(BCMSPLITRX_DISABLED) + #define BCMSPLITRX_ENAB() (0) + #define BCMSPLITRX_MODE() (0) + #else + #define BCMSPLITRX_ENAB() (1) + #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) + #endif +#else + #define BCMSPLITRX_ENAB() (0) + #define BCMSPLITRX_MODE() (0) +#endif /* BCMSPLITRX */ + +#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */ +extern bool _pciedevenab; + #if defined(WL_ENAB_RUNTIME_CHECK) + #define BCMPCIEDEV_ENAB() (_pciedevenab) + #elif defined(BCMPCIEDEV_ENABLED) + #define BCMPCIEDEV_ENAB() 1 + #else + #define BCMPCIEDEV_ENAB() 0 + #endif +#else + #define BCMPCIEDEV_ENAB() 0 +#endif /* BCMPCIEDEV */ + +#define SPLIT_RXMODE1() ((BCMSPLITRX_MODE() == RXMODE1)) +#define SPLIT_RXMODE2() ((BCMSPLITRX_MODE() == RXMODE2)) +#define SPLIT_RXMODE3() ((BCMSPLITRX_MODE() == RXMODE3)) +#define SPLIT_RXMODE4() ((BCMSPLITRX_MODE() == RXMODE4)) + +#define PKT_CLASSIFY() (SPLIT_RXMODE2() || SPLIT_RXMODE3() || SPLIT_RXMODE4()) +#define RXFIFO_SPLIT() (SPLIT_RXMODE3() || SPLIT_RXMODE4()) +#define HDR_CONV() (SPLIT_RXMODE4()) + +#define PKT_CLASSIFY_EN(x) ((PKT_CLASSIFY()) && (PKT_CLASSIFY_FIFO == (x))) +#ifdef BCM_SPLITBUF + extern bool _bcmsplitbuf; + #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCM_SPLITBUF_ENAB() (_bcmsplitbuf) + #elif defined(BCM_SPLITBUF_DISABLED) + #define BCM_SPLITBUF_ENAB() (0) + #else + #define BCM_SPLITBUF_ENAB() (1) + #endif +#else + #define BCM_SPLITBUF_ENAB() (0) +#endif /* BCM_SPLITBUF */ + +/* Max size for reclaimable NVRAM array */ +#ifdef DL_NVRAM +#define NVRAM_ARRAY_MAXSIZE DL_NVRAM +#else +#define NVRAM_ARRAY_MAXSIZE MAXSZ_NVRAM_VARS +#endif /* DL_NVRAM */ + +extern uint32 gFWID; + + +#endif /* _bcmdefs_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h new file mode 100644 index 000000000000..2b0ec490584b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h @@ -0,0 +1,803 @@ +/* + * Broadcom device-specific manifest constants. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmdevs.h 582052 2015-08-26 09:30:53Z $ + */ + +#ifndef _BCMDEVS_H +#define _BCMDEVS_H + +/* PCI vendor IDs */ +#define VENDOR_EPIGRAM 0xfeda +#define VENDOR_BROADCOM 0x14e4 +#define VENDOR_3COM 0x10b7 +#define VENDOR_NETGEAR 0x1385 +#define VENDOR_DIAMOND 0x1092 +#define VENDOR_INTEL 0x8086 +#define VENDOR_DELL 0x1028 +#define VENDOR_HP 0x103c +#define VENDOR_HP_COMPAQ 0x0e11 +#define VENDOR_APPLE 0x106b +#define VENDOR_SI_IMAGE 0x1095 /* Silicon Image, used by Arasan SDIO Host */ +#define VENDOR_BUFFALO 0x1154 /* Buffalo vendor id */ +#define VENDOR_TI 0x104c /* Texas Instruments */ +#define VENDOR_RICOH 0x1180 /* Ricoh */ +#define VENDOR_JMICRON 0x197b + + +/* PCMCIA vendor IDs */ +#define VENDOR_BROADCOM_PCMCIA 0x02d0 + +/* SDIO vendor IDs */ +#define VENDOR_BROADCOM_SDIO 0x00BF + +/* DONGLE VID/PIDs */ +#define BCM_DNGL_VID 0x0a5c +#define BCM_DNGL_BL_PID_4328 0xbd12 +#define BCM_DNGL_BL_PID_4322 0xbd13 +#define BCM_DNGL_BL_PID_4319 0xbd16 +#define BCM_DNGL_BL_PID_43236 0xbd17 +#define BCM_DNGL_BL_PID_4332 0xbd18 +#define BCM_DNGL_BL_PID_4330 0xbd19 +#define BCM_DNGL_BL_PID_4334 0xbd1a +#define BCM_DNGL_BL_PID_43239 0xbd1b +#define BCM_DNGL_BL_PID_4324 0xbd1c +#define BCM_DNGL_BL_PID_4360 0xbd1d +#define BCM_DNGL_BL_PID_43143 0xbd1e +#define BCM_DNGL_BL_PID_43242 0xbd1f +#define BCM_DNGL_BL_PID_43342 0xbd21 +#define BCM_DNGL_BL_PID_4335 0xbd20 +#define BCM_DNGL_BL_PID_43341 0xbd22 +#define BCM_DNGL_BL_PID_4350 0xbd23 +#define BCM_DNGL_BL_PID_4345 0xbd24 +#define BCM_DNGL_BL_PID_4349 0xbd25 +#define BCM_DNGL_BL_PID_4354 0xbd26 +#define BCM_DNGL_BL_PID_43569 0xbd27 +#define BCM_DNGL_BL_PID_43909 0xbd28 + +#define BCM_DNGL_BDC_PID 0x0bdc +#define BCM_DNGL_JTAG_PID 0x4a44 + +/* HW USB BLOCK [CPULESS USB] PIDs */ +#define BCM_HWUSB_PID_43239 43239 + +/* PCI Device IDs */ +#define BCM4210_DEVICE_ID 0x1072 /* never used */ +#define BCM4230_DEVICE_ID 0x1086 /* never used */ +#define BCM4401_ENET_ID 0x170c /* 4401b0 production enet cards */ +#define BCM3352_DEVICE_ID 0x3352 /* bcm3352 device id */ +#define BCM3360_DEVICE_ID 0x3360 /* bcm3360 device id */ +#define BCM4211_DEVICE_ID 0x4211 +#define BCM4231_DEVICE_ID 0x4231 +#define BCM4303_D11B_ID 0x4303 /* 4303 802.11b */ +#define BCM4311_D11G_ID 0x4311 /* 4311 802.11b/g id */ +#define BCM4311_D11DUAL_ID 0x4312 /* 4311 802.11a/b/g id */ +#define BCM4311_D11A_ID 0x4313 /* 4311 802.11a id */ +#define BCM4328_D11DUAL_ID 0x4314 /* 4328/4312 802.11a/g id */ +#define BCM4328_D11G_ID 0x4315 /* 4328/4312 802.11g id */ +#define BCM4328_D11A_ID 0x4316 /* 4328/4312 802.11a id */ +#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */ +#define BCM4318_D11DUAL_ID 0x4319 /* 4318 802.11a/b/g id */ +#define BCM4318_D11A_ID 0x431a /* 4318 802.11a id */ +#define BCM4325_D11DUAL_ID 0x431b /* 4325 802.11a/g id */ +#define BCM4325_D11G_ID 0x431c /* 4325 802.11g id */ +#define BCM4325_D11A_ID 0x431d /* 4325 802.11a id */ +#define BCM4306_D11G_ID 0x4320 /* 4306 802.11g */ +#define BCM4306_D11A_ID 0x4321 /* 4306 802.11a */ +#define BCM4306_UART_ID 0x4322 /* 4306 uart */ +#define BCM4306_V90_ID 0x4323 /* 4306 v90 codec */ +#define BCM4306_D11DUAL_ID 0x4324 /* 4306 dual A+B */ +#define BCM4306_D11G_ID2 0x4325 /* BCM4306_D11G_ID; INF w/loose binding war */ +#define BCM4321_D11N_ID 0x4328 /* 4321 802.11n dualband id */ +#define BCM4321_D11N2G_ID 0x4329 /* 4321 802.11n 2.4Ghz band id */ +#define BCM4321_D11N5G_ID 0x432a /* 4321 802.11n 5Ghz band id */ +#define BCM4322_D11N_ID 0x432b /* 4322 802.11n dualband device */ +#define BCM4322_D11N2G_ID 0x432c /* 4322 802.11n 2.4GHz device */ +#define BCM4322_D11N5G_ID 0x432d /* 4322 802.11n 5GHz device */ +#define BCM4329_D11N_ID 0x432e /* 4329 802.11n dualband device */ +#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */ +#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */ +#define BCM4315_D11DUAL_ID 0x4334 /* 4315 802.11a/g id */ +#define BCM4315_D11G_ID 0x4335 /* 4315 802.11g id */ +#define BCM4315_D11A_ID 0x4336 /* 4315 802.11a id */ +#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */ +#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */ +#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */ +#define BCM43231_D11N2G_ID 0x4340 /* 43231 802.11n 2.4GHz device */ +#define BCM43221_D11N2G_ID 0x4341 /* 43221 802.11n 2.4GHz device */ +#define BCM43222_D11N_ID 0x4350 /* 43222 802.11n dualband device */ +#define BCM43222_D11N2G_ID 0x4351 /* 43222 802.11n 2.4GHz device */ +#define BCM43222_D11N5G_ID 0x4352 /* 43222 802.11n 5GHz device */ +#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */ +#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db device */ +#define BCM43226_D11N_ID 0x4354 /* 43226 802.11n dualband device */ +#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */ +#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */ +#define BCM43236_D11N5G_ID 0x4348 /* 43236 802.11n 5GHz device */ +#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */ +#define BCM43421_D11N_ID 0xA99D /* 43421 802.11n dualband device */ +#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ +#define BCM4330_D11N_ID 0x4360 /* 4330 802.11n dualband device */ +#define BCM4330_D11N2G_ID 0x4361 /* 4330 802.11n 2.4G device */ +#define BCM4330_D11N5G_ID 0x4362 /* 4330 802.11n 5G device */ +#define BCM4336_D11N_ID 0x4343 /* 4336 802.11n 2.4GHz device */ +#define BCM6362_D11N_ID 0x435f /* 6362 802.11n dualband device */ +#define BCM6362_D11N2G_ID 0x433f /* 6362 802.11n 2.4Ghz band id */ +#define BCM6362_D11N5G_ID 0x434f /* 6362 802.11n 5Ghz band id */ +#define BCM4331_D11N_ID 0x4331 /* 4331 802.11n dualband id */ +#define BCM4331_D11N2G_ID 0x4332 /* 4331 802.11n 2.4Ghz band id */ +#define BCM4331_D11N5G_ID 0x4333 /* 4331 802.11n 5Ghz band id */ +#define BCM43237_D11N_ID 0x4355 /* 43237 802.11n dualband device */ +#define BCM43237_D11N5G_ID 0x4356 /* 43237 802.11n 5GHz device */ +#define BCM43227_D11N2G_ID 0x4358 /* 43228 802.11n 2.4GHz device */ +#define BCM43228_D11N_ID 0x4359 /* 43228 802.11n DualBand device */ +#define BCM43228_D11N5G_ID 0x435a /* 43228 802.11n 5GHz device */ +#define BCM43362_D11N_ID 0x4363 /* 43362 802.11n 2.4GHz device */ +#define BCM43239_D11N_ID 0x4370 /* 43239 802.11n dualband device */ +#define BCM4324_D11N_ID 0x4374 /* 4324 802.11n dualband device */ +#define BCM43217_D11N2G_ID 0x43a9 /* 43217 802.11n 2.4GHz device */ +#define BCM43131_D11N2G_ID 0x43aa /* 43131 802.11n 2.4GHz device */ +#define BCM4314_D11N2G_ID 0x4364 /* 4314 802.11n 2.4G device */ +#define BCM43142_D11N2G_ID 0x4365 /* 43142 802.11n 2.4G device */ +#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */ +#define BCM4334_D11N_ID 0x4380 /* 4334 802.11n dualband device */ +#define BCM4334_D11N2G_ID 0x4381 /* 4334 802.11n 2.4G device */ +#define BCM4334_D11N5G_ID 0x4382 /* 4334 802.11n 5G device */ +#define BCM43342_D11N_ID 0x4383 /* 43342 802.11n dualband device */ +#define BCM43342_D11N2G_ID 0x4384 /* 43342 802.11n 2.4G device */ +#define BCM43342_D11N5G_ID 0x4385 /* 43342 802.11n 5G device */ +#define BCM43341_D11N_ID 0x4386 /* 43341 802.11n dualband device */ +#define BCM43341_D11N2G_ID 0x4387 /* 43341 802.11n 2.4G device */ +#define BCM43341_D11N5G_ID 0x4388 /* 43341 802.11n 5G device */ +#define BCM4360_D11AC_ID 0x43a0 +#define BCM4360_D11AC2G_ID 0x43a1 +#define BCM4360_D11AC5G_ID 0x43a2 +#define BCM4345_D11AC_ID 0x43ab /* 4345 802.11ac dualband device */ +#define BCM4345_D11AC2G_ID 0x43ac /* 4345 802.11ac 2.4G device */ +#define BCM4345_D11AC5G_ID 0x43ad /* 4345 802.11ac 5G device */ +#define BCM4335_D11AC_ID 0x43ae +#define BCM4335_D11AC2G_ID 0x43af +#define BCM4335_D11AC5G_ID 0x43b0 +#define BCM4352_D11AC_ID 0x43b1 /* 4352 802.11ac dualband device */ +#define BCM4352_D11AC2G_ID 0x43b2 /* 4352 802.11ac 2.4G device */ +#define BCM4352_D11AC5G_ID 0x43b3 /* 4352 802.11ac 5G device */ +#define BCM43602_D11AC_ID 0x43ba /* ac dualband PCI devid SPROM programmed */ +#define BCM43602_D11AC2G_ID 0x43bb /* 43602 802.11ac 2.4G device */ +#define BCM43602_D11AC5G_ID 0x43bc /* 43602 802.11ac 5G device */ +#define BCM4349_D11AC_ID 0x4349 /* 4349 802.11ac dualband device */ +#define BCM4349_D11AC2G_ID 0x43dd /* 4349 802.11ac 2.4G device */ +#define BCM4349_D11AC5G_ID 0x43de /* 4349 802.11ac 5G device */ +#define BCM53573_D11AC_ID 0x43b4 /* 53573 802.11ac dualband device */ +#define BCM53573_D11AC2G_ID 0x43b5 /* 53573 802.11ac 2.4G device */ +#define BCM53573_D11AC5G_ID 0x43b6 /* 53573 802.11ac 5G device */ +#define BCM47189_D11AC_ID 0x43c6 /* 47189 802.11ac dualband device */ +#define BCM47189_D11AC2G_ID 0x43c7 /* 47189 802.11ac 2.4G device */ +#define BCM47189_D11AC5G_ID 0x43c8 /* 47189 802.11ac 5G device */ +#define BCM4355_D11AC_ID 0x43dc /* 4355 802.11ac dualband device */ +#define BCM4355_D11AC2G_ID 0x43fc /* 4355 802.11ac 2.4G device */ +#define BCM4355_D11AC5G_ID 0x43fd /* 4355 802.11ac 5G device */ +#define BCM4359_D11AC_ID 0x43ef /* 4359 802.11ac dualband device */ +#define BCM4359_D11AC2G_ID 0x43fe /* 4359 802.11ac 2.4G device */ +#define BCM4359_D11AC5G_ID 0x43ff /* 4359 802.11ac 5G device */ +#define BCM43596_D11AC_ID 0x4415 /* 43596 802.11ac dualband device */ +#define BCM43596_D11AC2G_ID 0x4416 /* 43596 802.11ac 2.4G device */ +#define BCM43596_D11AC5G_ID 0x4417 /* 43596 802.11ac 5G device */ +#define BCM43909_D11AC_ID 0x43d0 /* 43909 802.11ac dualband device */ +#define BCM43909_D11AC2G_ID 0x43d1 /* 43909 802.11ac 2.4G device */ +#define BCM43909_D11AC5G_ID 0x43d2 /* 43909 802.11ac 5G device */ + +/* PCI Subsystem ID */ +#define BCM943228HMB_SSID_VEN1 0x0607 +#define BCM94313HMGBL_SSID_VEN1 0x0608 +#define BCM94313HMG_SSID_VEN1 0x0609 +#define BCM943142HM_SSID_VEN1 0x0611 + +#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */ + +#define BCM43242_D11N_ID 0x4367 /* 43242 802.11n dualband device */ +#define BCM43242_D11N2G_ID 0x4368 /* 43242 802.11n 2.4G device */ +#define BCM43242_D11N5G_ID 0x4369 /* 43242 802.11n 5G device */ + +#define BCM4350_D11AC_ID 0x43a3 +#define BCM4350_D11AC2G_ID 0x43a4 +#define BCM4350_D11AC5G_ID 0x43a5 + +#define BCM43556_D11AC_ID 0x43b7 +#define BCM43556_D11AC2G_ID 0x43b8 +#define BCM43556_D11AC5G_ID 0x43b9 + +#define BCM43558_D11AC_ID 0x43c0 +#define BCM43558_D11AC2G_ID 0x43c1 +#define BCM43558_D11AC5G_ID 0x43c2 + +#define BCM43566_D11AC_ID 0x43d3 +#define BCM43566_D11AC2G_ID 0x43d4 +#define BCM43566_D11AC5G_ID 0x43d5 + +#define BCM43568_D11AC_ID 0x43d6 +#define BCM43568_D11AC2G_ID 0x43d7 +#define BCM43568_D11AC5G_ID 0x43d8 + +#define BCM43569_D11AC_ID 0x43d9 +#define BCM43569_D11AC2G_ID 0x43da +#define BCM43569_D11AC5G_ID 0x43db + +#define BCM43570_D11AC_ID 0x43d9 +#define BCM43570_D11AC2G_ID 0x43da +#define BCM43570_D11AC5G_ID 0x43db + +#define BCM4354_D11AC_ID 0x43df /* 4354 802.11ac dualband device */ +#define BCM4354_D11AC2G_ID 0x43e0 /* 4354 802.11ac 2.4G device */ +#define BCM4354_D11AC5G_ID 0x43e1 /* 4354 802.11ac 5G device */ +#define BCM43430_D11N2G_ID 0x43e2 /* 43430 802.11n 2.4G device */ + + +#define BCM4365_D11AC_ID 0x43ca +#define BCM4365_D11AC2G_ID 0x43cb +#define BCM4365_D11AC5G_ID 0x43cc + +#define BCM4366_D11AC_ID 0x43c3 +#define BCM4366_D11AC2G_ID 0x43c4 +#define BCM4366_D11AC5G_ID 0x43c5 + +#define BCM43349_D11N_ID 0x43e6 /* 43349 802.11n dualband id */ +#define BCM43349_D11N2G_ID 0x43e7 /* 43349 802.11n 2.4Ghz band id */ +#define BCM43349_D11N5G_ID 0x43e8 /* 43349 802.11n 5Ghz band id */ + +#define BCM4358_D11AC_ID 0x43e9 /* 4358 802.11ac dualband device */ +#define BCM4358_D11AC2G_ID 0x43ea /* 4358 802.11ac 2.4G device */ +#define BCM4358_D11AC5G_ID 0x43eb /* 4358 802.11ac 5G device */ + +#define BCM4356_D11AC_ID 0x43ec /* 4356 802.11ac dualband device */ +#define BCM4356_D11AC2G_ID 0x43ed /* 4356 802.11ac 2.4G device */ +#define BCM4356_D11AC5G_ID 0x43ee /* 4356 802.11ac 5G device */ + +#define BCMGPRS_UART_ID 0x4333 /* Uart id used by 4306/gprs card */ +#define BCMGPRS2_UART_ID 0x4344 /* Uart id used by 4306/gprs card */ +#define FPGA_JTAGM_ID 0x43f0 /* FPGA jtagm device id */ +#define BCM_JTAGM_ID 0x43f1 /* BCM jtagm device id */ +#define SDIOH_FPGA_ID 0x43f2 /* sdio host fpga */ +#define BCM_SDIOH_ID 0x43f3 /* BCM sdio host id */ +#define SDIOD_FPGA_ID 0x43f4 /* sdio device fpga */ +#define SPIH_FPGA_ID 0x43f5 /* PCI SPI Host Controller FPGA */ +#define BCM_SPIH_ID 0x43f6 /* Synopsis SPI Host Controller */ +#define MIMO_FPGA_ID 0x43f8 /* FPGA mimo minimacphy device id */ +#define BCM_JTAGM2_ID 0x43f9 /* BCM alternate jtagm device id */ +#define SDHCI_FPGA_ID 0x43fa /* Standard SDIO Host Controller FPGA */ +#define BCM4402_ENET_ID 0x4402 /* 4402 enet */ +#define BCM4402_V90_ID 0x4403 /* 4402 v90 codec */ +#define BCM4410_DEVICE_ID 0x4410 /* bcm44xx family pci iline */ +#define BCM4412_DEVICE_ID 0x4412 /* bcm44xx family pci enet */ +#define BCM4430_DEVICE_ID 0x4430 /* bcm44xx family cardbus iline */ +#define BCM4432_DEVICE_ID 0x4432 /* bcm44xx family cardbus enet */ +#define BCM4704_ENET_ID 0x4706 /* 4704 enet (Use 47XX_ENET_ID instead!) */ +#define BCM4710_DEVICE_ID 0x4710 /* 4710 primary function 0 */ +#define BCM47XX_AUDIO_ID 0x4711 /* 47xx audio codec */ +#define BCM47XX_V90_ID 0x4712 /* 47xx v90 codec */ +#define BCM47XX_ENET_ID 0x4713 /* 47xx enet */ +#define BCM47XX_EXT_ID 0x4714 /* 47xx external i/f */ +#define BCM47XX_GMAC_ID 0x4715 /* 47xx Unimac based GbE */ +#define BCM47XX_USBH_ID 0x4716 /* 47xx usb host */ +#define BCM47XX_USBD_ID 0x4717 /* 47xx usb device */ +#define BCM47XX_IPSEC_ID 0x4718 /* 47xx ipsec */ +#define BCM47XX_ROBO_ID 0x4719 /* 47xx/53xx roboswitch core */ +#define BCM47XX_USB20H_ID 0x471a /* 47xx usb 2.0 host */ +#define BCM47XX_USB20D_ID 0x471b /* 47xx usb 2.0 device */ +#define BCM47XX_ATA100_ID 0x471d /* 47xx parallel ATA */ +#define BCM47XX_SATAXOR_ID 0x471e /* 47xx serial ATA & XOR DMA */ +#define BCM47XX_GIGETH_ID 0x471f /* 47xx GbE (5700) */ +#define BCM4712_MIPS_ID 0x4720 /* 4712 base devid */ +#define BCM4716_DEVICE_ID 0x4722 /* 4716 base devid */ +#define BCM47XX_USB30H_ID 0x472a /* 47xx usb 3.0 host */ +#define BCM47XX_USB30D_ID 0x472b /* 47xx usb 3.0 device */ +#define BCM47XX_SMBUS_EMU_ID 0x47fe /* 47xx emulated SMBus device */ +#define BCM47XX_XOR_EMU_ID 0x47ff /* 47xx emulated XOR engine */ +#define EPI41210_DEVICE_ID 0xa0fa /* bcm4210 */ +#define EPI41230_DEVICE_ID 0xa10e /* bcm4230 */ +#define JINVANI_SDIOH_ID 0x4743 /* Jinvani SDIO Gold Host */ +#define BCM27XX_SDIOH_ID 0x2702 /* BCM27xx Standard SDIO Host */ +#define PCIXX21_FLASHMEDIA_ID 0x803b /* TI PCI xx21 Standard Host Controller */ +#define PCIXX21_SDIOH_ID 0x803c /* TI PCI xx21 Standard Host Controller */ +#define R5C822_SDIOH_ID 0x0822 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */ +#define JMICRON_SDIOH_ID 0x2381 /* JMicron Standard SDIO Host Controller */ + +/* Chip IDs */ +#define BCM4306_CHIP_ID 0x4306 /* 4306 chipcommon chipid */ +#define BCM4311_CHIP_ID 0x4311 /* 4311 PCIe 802.11a/b/g */ +#define BCM43111_CHIP_ID 43111 /* 43111 chipcommon chipid (OTP chipid) */ +#define BCM43112_CHIP_ID 43112 /* 43112 chipcommon chipid (OTP chipid) */ +#define BCM4312_CHIP_ID 0x4312 /* 4312 chipcommon chipid */ +#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ +#define BCM43131_CHIP_ID 43131 /* 43131 chip id (OTP chipid) */ +#define BCM4315_CHIP_ID 0x4315 /* 4315 chip id */ +#define BCM4318_CHIP_ID 0x4318 /* 4318 chipcommon chipid */ +#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */ +#define BCM4320_CHIP_ID 0x4320 /* 4320 chipcommon chipid */ +#define BCM4321_CHIP_ID 0x4321 /* 4321 chipcommon chipid */ +#define BCM43217_CHIP_ID 43217 /* 43217 chip id (OTP chipid) */ +#define BCM4322_CHIP_ID 0x4322 /* 4322 chipcommon chipid */ +#define BCM43221_CHIP_ID 43221 /* 43221 chipcommon chipid (OTP chipid) */ +#define BCM43222_CHIP_ID 43222 /* 43222 chipcommon chipid */ +#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */ +#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */ +#define BCM43227_CHIP_ID 43227 /* 43227 chipcommon chipid */ +#define BCM43228_CHIP_ID 43228 /* 43228 chipcommon chipid */ +#define BCM43226_CHIP_ID 43226 /* 43226 chipcommon chipid */ +#define BCM43231_CHIP_ID 43231 /* 43231 chipcommon chipid (OTP chipid) */ +#define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */ +#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ +#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */ +#define BCM43237_CHIP_ID 43237 /* 43237 chipcommon chipid */ +#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */ +#define BCM43239_CHIP_ID 43239 /* 43239 chipcommon chipid */ +#define BCM43420_CHIP_ID 43420 /* 43222 chipcommon chipid (OTP, RBBU) */ +#define BCM43421_CHIP_ID 43421 /* 43224 chipcommon chipid (OTP, RBBU) */ +#define BCM43428_CHIP_ID 43428 /* 43228 chipcommon chipid (OTP, RBBU) */ +#define BCM43431_CHIP_ID 43431 /* 4331 chipcommon chipid (OTP, RBBU) */ +#define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */ +#define BCM4325_CHIP_ID 0x4325 /* 4325 chip id */ +#define BCM4328_CHIP_ID 0x4328 /* 4328 chip id */ +#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */ +#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */ +#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */ +#define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */ +#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */ +#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */ +#define BCM4314_CHIP_ID 0x4314 /* 4314 chipcommon chipid */ +#define BCM43142_CHIP_ID 43142 /* 43142 chipcommon chipid */ +#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */ +#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */ +#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */ +#define BCM43243_CHIP_ID 43243 /* 43243 chipcommon chipid */ +#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */ +#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */ +#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */ +#define BCM43349_CHIP_ID 43349 /* 43349(0xA955) chipcommon chipid */ +#define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */ +#define BCM4364_CHIP_ID 0x4364 /* 4364 chipcommon chipid */ +#define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */ +#define BCM43526_CHIP_ID 0xAA06 +#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */ +#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */ +#define BCM43342_CHIP_ID 43342 /* 43342 chipcommon chipid */ +#define BCM4350_CHIP_ID 0x4350 /* 4350 chipcommon chipid */ +#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */ +#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */ +#define BCM43556_CHIP_ID 0xAA24 /* 43556 chipcommon chipid */ +#define BCM43558_CHIP_ID 0xAA26 /* 43558 chipcommon chipid */ +#define BCM43566_CHIP_ID 0xAA2E /* 43566 chipcommon chipid */ +#define BCM43567_CHIP_ID 0xAA2F /* 43567 chipcommon chipid */ +#define BCM43568_CHIP_ID 0xAA30 /* 43568 chipcommon chipid */ +#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */ +#define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */ +#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */ +#define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */ +#define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \ + (CHIPID(chipid) == BCM4354_CHIP_ID) || \ + (CHIPID(chipid) == BCM4356_CHIP_ID) || \ + (CHIPID(chipid) == BCM43556_CHIP_ID) || \ + (CHIPID(chipid) == BCM43558_CHIP_ID) || \ + (CHIPID(chipid) == BCM43566_CHIP_ID) || \ + (CHIPID(chipid) == BCM43567_CHIP_ID) || \ + (CHIPID(chipid) == BCM43568_CHIP_ID) || \ + (CHIPID(chipid) == BCM43569_CHIP_ID) || \ + (CHIPID(chipid) == BCM43570_CHIP_ID) || \ + (CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */ +#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */ +#define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */ +#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */ +#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */ +#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */ +#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */ +#define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */ +#define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */ +#define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */ +#define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \ + (CHIPID(chipid) == BCM4355_CHIP_ID) || \ + (CHIPID(chipid) == BCM4359_CHIP_ID)) + +#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \ + CHIPID(chipid) == BCM43454_CHIP_ID || \ + CHIPID(chipid) == BCM43455_CHIP_ID || \ + CHIPID(chipid) == BCM43457_CHIP_ID || \ + CHIPID(chipid) == BCM43458_CHIP_ID) + +#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \ + case BCM43454_CHIP_ID: /* fallthrough */ \ + case BCM43455_CHIP_ID: /* fallthrough */ \ + case BCM43457_CHIP_ID: /* fallthrough */ \ + case BCM43458_CHIP_ID + +#define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \ + case BCM4355_CHIP_ID: \ + case BCM4359_CHIP_ID + +#define BCM4365_CHIP_ID 0x4365 /* 4365 chipcommon chipid */ +#define BCM4366_CHIP_ID 0x4366 /* 4366 chipcommon chipid */ + +#define BCM43909_CHIP_ID 0xab85 /* 43909 chipcommon chipid */ + +#define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */ +#define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */ +#define BCM43522_CHIP_ID 0xaa02 /* 43522 chipcommon chipid */ +#define BCM43602_CHIP(chipid) ((CHIPID(chipid) == BCM43602_CHIP_ID) || \ + (CHIPID(chipid) == BCM43462_CHIP_ID) || \ + (CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */ +#define CASE_BCM43602_CHIP case BCM43602_CHIP_ID: /* fallthrough */ \ + case BCM43462_CHIP_ID: /* fallthrough */ \ + case BCM43522_CHIP_ID + +#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */ +#define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */ +#define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */ +#define BCM4706_CHIP_ID 0x5300 /* 4706 chipcommon chipid */ +#define BCM4707_CHIP_ID 53010 /* 4707 chipcommon chipid */ +#define BCM47094_CHIP_ID 53030 /* 47094 chipcommon chipid */ +#define BCM53018_CHIP_ID 53018 /* 53018 chipcommon chipid */ +#define BCM4707_CHIP(chipid) (((chipid) == BCM4707_CHIP_ID) || \ + ((chipid) == BCM53018_CHIP_ID) || \ + ((chipid) == BCM47094_CHIP_ID)) +#define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */ +#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */ +#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */ +#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */ +#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */ +#define BCM4749_CHIP_ID 0x4749 /* 5357 chipcommon chipid (OTP, RBBU) */ +#define BCM4785_CHIP_ID 0x4785 /* 4785 chipcommon chipid */ +#define BCM5350_CHIP_ID 0x5350 /* 5350 chipcommon chipid */ +#define BCM5352_CHIP_ID 0x5352 /* 5352 chipcommon chipid */ +#define BCM5354_CHIP_ID 0x5354 /* 5354 chipcommon chipid */ +#define BCM5365_CHIP_ID 0x5365 /* 5365 chipcommon chipid */ +#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */ +#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */ +#define BCM53572_CHIP_ID 53572 /* 53572 chipcommon chipid */ +#define BCM53573_CHIP_ID 53573 /* 53573 chipcommon chipid */ +#define BCM53573_CHIP(chipid) (CHIPID(chipid) == BCM53573_CHIP_ID) +#define BCM53573_CHIP_GRPID BCM53573_CHIP_ID + +/* Package IDs */ +#define BCM4303_PKG_ID 2 /* 4303 package id */ +#define BCM4309_PKG_ID 1 /* 4309 package id */ +#define BCM4712LARGE_PKG_ID 0 /* 340pin 4712 package id */ +#define BCM4712SMALL_PKG_ID 1 /* 200pin 4712 package id */ +#define BCM4712MID_PKG_ID 2 /* 225pin 4712 package id */ +#define BCM4328USBD11G_PKG_ID 2 /* 4328 802.11g USB package id */ +#define BCM4328USBDUAL_PKG_ID 3 /* 4328 802.11a/g USB package id */ +#define BCM4328SDIOD11G_PKG_ID 4 /* 4328 802.11g SDIO package id */ +#define BCM4328SDIODUAL_PKG_ID 5 /* 4328 802.11a/g SDIO package id */ +#define BCM4329_289PIN_PKG_ID 0 /* 4329 289-pin package id */ +#define BCM4329_182PIN_PKG_ID 1 /* 4329N 182-pin package id */ +#define BCM5354E_PKG_ID 1 /* 5354E package id */ +#define BCM4716_PKG_ID 8 /* 4716 package id */ +#define BCM4717_PKG_ID 9 /* 4717 package id */ +#define BCM4718_PKG_ID 10 /* 4718 package id */ +#define BCM5356_PKG_NONMODE 1 /* 5356 package without nmode suppport */ +#define BCM5358U_PKG_ID 8 /* 5358U package id */ +#define BCM5358_PKG_ID 9 /* 5358 package id */ +#define BCM47186_PKG_ID 10 /* 47186 package id */ +#define BCM5357_PKG_ID 11 /* 5357 package id */ +#define BCM5356U_PKG_ID 12 /* 5356U package id */ +#define BCM53572_PKG_ID 8 /* 53572 package id */ +#define BCM5357C0_PKG_ID 8 /* 5357c0 package id (the same as 53572) */ +#define BCM47188_PKG_ID 9 /* 47188 package id */ +#define BCM5358C0_PKG_ID 0xa /* 5358c0 package id */ +#define BCM5356C0_PKG_ID 0xb /* 5356c0 package id */ +#define BCM4331TT_PKG_ID 8 /* 4331 12x12 package id */ +#define BCM4331TN_PKG_ID 9 /* 4331 12x9 package id */ +#define BCM4331TNA0_PKG_ID 0xb /* 4331 12x9 package id */ +#define BCM47189_PKG_ID 1 /* 47189 package id */ +#define BCM53573_PKG_ID 0 /* 53573 package id */ +#define BCM4706L_PKG_ID 1 /* 4706L package id */ + +#define HDLSIM5350_PKG_ID 1 /* HDL simulator package id for a 5350 */ +#define HDLSIM_PKG_ID 14 /* HDL simulator package id */ +#define HWSIM_PKG_ID 15 /* Hardware simulator package id */ +#define BCM43224_FAB_CSM 0x8 /* the chip is manufactured by CSM */ +#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */ +#define BCM4336_WLBGA_PKG_ID 0x8 +#define BCM4330_WLBGA_PKG_ID 0x0 +#define BCM4314PCIE_ARM_PKG_ID (8 | 0) /* 4314 QFN PCI package id, bit 3 tie high */ +#define BCM4314SDIO_PKG_ID (8 | 1) /* 4314 QFN SDIO package id */ +#define BCM4314PCIE_PKG_ID (8 | 2) /* 4314 QFN PCI (ARM-less) package id */ +#define BCM4314SDIO_ARM_PKG_ID (8 | 3) /* 4314 QFN SDIO (ARM-less) package id */ +#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4) /* 4314 FpBGA SDIO package id */ +#define BCM4314DEV_PKG_ID (8 | 6) /* 4314 Developement package id */ + +#define BCM4707_PKG_ID 1 /* 4707 package id */ +#define BCM4708_PKG_ID 2 /* 4708 package id */ +#define BCM4709_PKG_ID 0 /* 4709 package id */ + +#define PCIXX21_FLASHMEDIA0_ID 0x8033 /* TI PCI xx21 Standard Host Controller */ +#define PCIXX21_SDIOH0_ID 0x8034 /* TI PCI xx21 Standard Host Controller */ + +#define BCM4335_WLCSP_PKG_ID (0x0) /* WLCSP Module/Mobile SDIO/HSIC. */ +#define BCM4335_FCBGA_PKG_ID (0x1) /* FCBGA PC/Embeded/Media PCIE/SDIO */ +#define BCM4335_WLBGA_PKG_ID (0x2) /* WLBGA COB/Mobile SDIO/HSIC. */ +#define BCM4335_FCBGAD_PKG_ID (0x3) /* FCBGA Debug Debug/Dev All if's. */ +#define BCM4335_PKG_MASK (0x3) +#define BCM43602_12x12_PKG_ID (0x1) /* 12x12 pins package, used for e.g. router designs */ + +/* boardflags */ +#define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */ +#define BFL_BTCOEX 0x00000001 /* Board supports BTCOEX */ +#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */ +#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio 13 radio disable indication, UNUSED */ +#define BFL_ADCDIV 0x00000008 /* Board has the rssi ADC divider */ +#define BFL_DIS_256QAM 0x00000008 +#define BFL_ENETROBO 0x00000010 /* Board has robo switch or core */ +#define BFL_TSSIAVG 0x00000010 /* TSSI averaging for ACPHY chips */ +#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */ +#define BFL_CCKHIPWR 0x00000040 /* Can do high-power CCK transmission */ +#define BFL_ENETADM 0x00000080 /* Board has ADMtek switch */ +#define BFL_ENETVLAN 0x00000100 /* Board has VLAN capability */ +#define BFL_LTECOEX 0x00000200 /* LTE Coex enabled */ +#define BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ +#define BFL_FEM 0x00000800 /* Board supports the Front End Module */ +#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ +#define BFL_HGPA 0x00002000 /* Board has a high gain PA */ +#define BFL_BTC2WIRE_ALTGPIO 0x00004000 /* Board's BTC 2wire is in the alternate gpios */ +#define BFL_ALTIQ 0x00008000 /* Alternate I/Q settings */ +#define BFL_NOPA 0x00010000 /* Board has no PA */ +#define BFL_RSSIINV 0x00020000 /* Board's RSSI uses positive slope(not TSSI) */ +#define BFL_PAREF 0x00040000 /* Board uses the PARef LDO */ +#define BFL_3TSWITCH 0x00080000 /* Board uses a triple throw switch shared with BT */ +#define BFL_PHASESHIFT 0x00100000 /* Board can support phase shifter */ +#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */ +#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */ +#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */ +#define BFL_CCKFAVOREVM 0x01000000 /* Favor CCK EVM over spectral mask */ +#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */ +#define BFL_LNLDO2_2P5 0x04000000 /* Select 2.5V as LNLDO2 output voltage */ +#define BFL_FASTPWR 0x08000000 +#define BFL_UCPWRCTL_MININDX 0x08000000 /* Enforce min power index to avoid FEM damage */ +#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ +#define BFL_TRSW_1by2 0x20000000 /* Board has 2 TRSW's in 1by2 designs */ +#define BFL_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ +#define BFL_LO_TRSW_R_5GHz 0x40000000 /* In 5G do not throw TRSW to T for clipLO gain */ +#define BFL_ELNA_GAINDEF 0x80000000 /* Backoff InitGain based on elna_2g/5g field + * when this flag is set + */ +#define BFL_EXTLNA_TX 0x20000000 /* Temp boardflag to indicate to */ + +/* boardflags2 */ +#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */ +#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ +#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */ +#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */ +#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */ +#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */ +#define BFL2_CAESERS_BRD 0x00000040 /* Board is Caesers brd (unused by sw) */ +#define BFL2_BTC3WIRE 0x00000080 /* Board support legacy 3 wire or 4 wire */ +#define BFL2_BTCLEGACY 0x00000080 /* Board support legacy 3/4 wire, to replace + * BFL2_BTC3WIRE + */ +#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */ +#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */ +#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */ +#define BFL2_TRISTATE_LED 0x00000800 /* Tri-state the LED */ +#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */ +#define BFL2_BPHY_ALL_TXCORES 0x00004000 /* Transmit bphy frames using all tx cores */ +#define BFL2_FCC_BANDEDGE_WAR 0x00008000 /* Activates WAR to improve FCC bandedge performance */ +#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000 /* Reducing DAC Spurs */ +#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */ +#define BFL2_REDUCED_PA_TURNONTIME 0x00010000 /* Flag to reduce PA turn on Time */ +#define BFL2_IPALVLSHIFT_3P3 0x00020000 +#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */ +#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio on */ + /* Most drivers will turn it off without this flag */ + /* to save power. */ + +#define BFL2_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are controlled by analog PA ctrl lines */ +#define BFL2_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are controlled by analog PA ctrl lines */ +#define BFL2_ELNACTRL_TRSW_2G 0x00400000 /* AZW4329: 2G gmode_elna_gain controls TR Switch */ +#define BFL2_BT_SHARE_ANT0 0x00800000 /* share core0 antenna with BT */ +#define BFL2_TEMPSENSE_HIGHER 0x01000000 /* The tempsense threshold can sustain higher value + * than programmed. The exact delta is decided by + * driver per chip/boardtype. This can be used + * when tempsense qualification happens after shipment + */ +#define BFL2_BTC3WIREONLY 0x02000000 /* standard 3 wire btc only. 4 wire not supported */ +#define BFL2_PWR_NOMINAL 0x04000000 /* 0: power reduction on, 1: no power reduction */ +#define BFL2_EXTLNA_PWRSAVE 0x08000000 /* boardflag to enable ucode to apply power save */ + /* ucode control of eLNA during Tx */ +#define BFL2_4313_RADIOREG 0x10000000 + /* board rework */ +#define BFL2_DYNAMIC_VMID 0x10000000 /* enable dynamic Vmid in idle TSSI CAL for 4331 */ + +#define BFL2_SDR_EN 0x20000000 /* SDR enabled or disabled */ +#define BFL2_DYNAMIC_VMID 0x10000000 /* boardflag to enable dynamic Vmid idle TSSI CAL */ +#define BFL2_LNA1BYPFORTR2G 0x40000000 /* acphy, enable lna1 bypass for clip gain, 2g */ +#define BFL2_LNA1BYPFORTR5G 0x80000000 /* acphy, enable lna1 bypass for clip gain, 5g */ + +/* SROM 11 - 11ac boardflag definitions */ +#define BFL_SROM11_BTCOEX 0x00000001 /* Board supports BTCOEX */ +#define BFL_SROM11_WLAN_BT_SH_XTL 0x00000002 /* bluetooth and wlan share same crystal */ +#define BFL_SROM11_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ +#define BFL_SROM11_EPA_TURNON_TIME 0x00018000 /* 2 bits for different PA turn on times */ +#define BFL_SROM11_EPA_TURNON_TIME_SHIFT 15 +#define BFL_SROM11_PRECAL_TX_IDX 0x00040000 /* Dedicated TX IQLOCAL IDX values */ + /* per subband, as derived from 43602A1 MCH5 */ +#define BFL_SROM11_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ +#define BFL_SROM11_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ +#define BFL2_SROM11_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ +#define BFL2_SROM11_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */ +#define BFL2_SROM11_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */ +#define BFL2_SROM11_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_SROM11_EPA_ON_DURING_TXIQLOCAL 0x00020000 /* Keep ext. PA's on in TX IQLO CAL */ + +/* boardflags3 */ +#define BFL3_FEMCTRL_SUB 0x00000007 /* acphy, subrevs of femctrl on top of srom_femctrl */ +#define BFL3_RCAL_WAR 0x00000008 /* acphy, rcal war active on this board (4335a0) */ +#define BFL3_TXGAINTBLID 0x00000070 /* acphy, txgain table id */ +#define BFL3_TXGAINTBLID_SHIFT 0x4 /* acphy, txgain table id shift bit */ +#define BFL3_TSSI_DIV_WAR 0x00000080 /* acphy, Seperate paparam for 20/40/80 */ +#define BFL3_TSSI_DIV_WAR_SHIFT 0x7 /* acphy, Seperate paparam for 20/40/80 shift bit */ +#define BFL3_FEMTBL_FROM_NVRAM 0x00000100 /* acphy, femctrl table is read from nvram */ +#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8 /* acphy, femctrl table is read from nvram */ +#define BFL3_AGC_CFG_2G 0x00000200 /* acphy, gain control configuration for 2G */ +#define BFL3_AGC_CFG_5G 0x00000400 /* acphy, gain control configuration for 5G */ +#define BFL3_PPR_BIT_EXT 0x00000800 /* acphy, bit position for 1bit extension for ppr */ +#define BFL3_PPR_BIT_EXT_SHIFT 11 /* acphy, bit shift for 1bit extension for ppr */ +#define BFL3_BBPLL_SPR_MODE_DIS 0x00001000 /* acphy, disables bbpll spur modes */ +#define BFL3_RCAL_OTP_VAL_EN 0x00002000 /* acphy, to read rcal_trim value from otp */ +#define BFL3_2GTXGAINTBL_BLANK 0x00004000 /* acphy, blank the first X ticks of 2g gaintbl */ +#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14 /* acphy, blank the first X ticks of 2g gaintbl */ +#define BFL3_5GTXGAINTBL_BLANK 0x00008000 /* acphy, blank the first X ticks of 5g gaintbl */ +#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15 /* acphy, blank the first X ticks of 5g gaintbl */ +#define BFL3_PHASETRACK_MAX_ALPHABETA 0x00010000 /* acphy, to max out alpha,beta to 511 */ +#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16 /* acphy, to max out alpha,beta to 511 */ +/* acphy, to use backed off gaintbl for lte-coex */ +#define BFL3_LTECOEX_GAINTBL_EN 0x00060000 +/* acphy, to use backed off gaintbl for lte-coex */ +#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17 +#define BFL3_5G_SPUR_WAR 0x00080000 /* acphy, enable spur WAR in 5G band */ +#define BFL3_1X1_RSDB_ANT 0x01000000 /* to find if 2-ant RSDB board or 1-ant RSDB board */ +#define BFL3_1X1_RSDB_ANT_SHIFT 24 + +/* acphy: lpmode2g and lpmode_5g related boardflags */ +#define BFL3_ACPHY_LPMODE_2G 0x00300000 /* bits 20:21 for lpmode_2g choice */ +#define BFL3_ACPHY_LPMODE_2G_SHIFT 20 + +#define BFL3_ACPHY_LPMODE_5G 0x00C00000 /* bits 22:23 for lpmode_5g choice */ +#define BFL3_ACPHY_LPMODE_5G_SHIFT 22 + +#define BFL3_EXT_LPO_ISCLOCK 0x02000000 /* External LPO is clock, not x-tal */ +#define BFL3_FORCE_INT_LPO_SEL 0x04000000 /* Force internal lpo */ +#define BFL3_FORCE_EXT_LPO_SEL 0x08000000 /* Force external lpo */ + +#define BFL3_EN_BRCM_IMPBF 0x10000000 /* acphy, Allow BRCM Implicit TxBF */ +#define BFL3_AVVMID_FROM_NVRAM 0x40000000 /* Read Av Vmid from NVRAM */ +#define BFL3_VLIN_EN_FROM_NVRAM 0x80000000 /* Read Vlin En from NVRAM */ + +#define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */ +#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */ + +/* boardflags4 for SROM12 */ +#define BFL4_SROM12_4dBPAD (1 << 0) /* To distinguigh between normal and 4dB pad board */ + + +/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */ +#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */ +#define BOARD_GPIO_BTC3W_OUT 0x020 /* bit 5 is TX_CONF */ +#define BOARD_GPIO_BTCMOD_IN 0x010 /* bit 4 is the alternate BT Coexistence Input */ +#define BOARD_GPIO_BTCMOD_OUT 0x020 /* bit 5 is the alternate BT Coexistence Out */ +#define BOARD_GPIO_BTC_IN 0x080 /* bit 7 is BT Coexistence Input */ +#define BOARD_GPIO_BTC_OUT 0x100 /* bit 8 is BT Coexistence Out */ +#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */ +#define BOARD_GPIO_12 0x1000 /* gpio 12 */ +#define BOARD_GPIO_13 0x2000 /* gpio 13 */ +#define BOARD_GPIO_BTC4_IN 0x0800 /* gpio 11, coex4, in */ +#define BOARD_GPIO_BTC4_BT 0x2000 /* gpio 12, coex4, bt active */ +#define BOARD_GPIO_BTC4_STAT 0x4000 /* gpio 14, coex4, status */ +#define BOARD_GPIO_BTC4_WLAN 0x8000 /* gpio 15, coex4, wlan active */ +#define BOARD_GPIO_1_WLAN_PWR 0x02 /* throttle WLAN power on X21 board */ +#define BOARD_GPIO_2_WLAN_PWR 0x04 /* throttle WLAN power on X29C board */ +#define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */ +#define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */ +#define BOARD_GPIO_13_WLAN_PWR 0x2000 /* throttle WLAN power on X14 board */ + +#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */ +#define GPIO_BTC4W_OUT_43224 0x020 /* bit 5 is BT_IODISABLE */ +#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0 /* bit 5 is BT_IODISABLE */ +#define GPIO_BTC4W_OUT_43225 0x0e0 /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */ +#define GPIO_BTC4W_OUT_43421 0x020 /* bit 5 is BT_IODISABLE */ +#define GPIO_BTC4W_OUT_4313 0x060 /* bit 5 SW_BT, bit 6 SW_WL */ +#define GPIO_BTC4W_OUT_4331_SHARED 0x010 /* GPIO 4 */ + +#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define PCI_CFG_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */ +#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */ + +/* power control defines */ +#define PLL_DELAY 150 /* us pll on delay */ +#define FREF_DELAY 200 /* us fref change delay */ +#define MIN_SLOW_CLK 32 /* us Slow clock period */ +#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */ + + +/* 43341 Boards */ +#define BCM943341WLABGS_SSID 0x062d + +/* 43342 Boards */ +#define BCM943342FCAGBI_SSID 0x0641 + +/* 43602 Boards, unclear yet what boards will be created. */ +#define BCM943602RSVD1_SSID 0x06a5 +#define BCM943602RSVD2_SSID 0x06a6 +#define BCM943602X87 0X0133 +#define BCM943602X87P2 0X0143 +#define BCM943602X238 0X0132 +#define BCM943602X238D 0X014A + +/* # of GPIO pins */ +#define GPIO_NUMPINS 32 + +/* These values are used by dhd host driver. */ +#define RDL_RAM_BASE_4319 0x60000000 +#define RDL_RAM_BASE_4329 0x60000000 +#define RDL_RAM_SIZE_4319 0x48000 +#define RDL_RAM_SIZE_4329 0x48000 +#define RDL_RAM_SIZE_43236 0x70000 +#define RDL_RAM_BASE_43236 0x60000000 +#define RDL_RAM_SIZE_4328 0x60000 +#define RDL_RAM_BASE_4328 0x80000000 +#define RDL_RAM_SIZE_4322 0x60000 +#define RDL_RAM_BASE_4322 0x60000000 +#define RDL_RAM_SIZE_4360 0xA0000 +#define RDL_RAM_BASE_4360 0x60000000 +#define RDL_RAM_SIZE_43242 0x90000 +#define RDL_RAM_BASE_43242 0x60000000 +#define RDL_RAM_SIZE_43143 0x70000 +#define RDL_RAM_BASE_43143 0x60000000 +#define RDL_RAM_SIZE_4350 0xC0000 +#define RDL_RAM_BASE_4350 0x180800 + +/* generic defs for nvram "muxenab" bits +* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options. +*/ +#define MUXENAB_UART 0x00000001 +#define MUXENAB_GPIO 0x00000002 +#define MUXENAB_ERCX 0x00000004 /* External Radio BT coex */ +#define MUXENAB_JTAG 0x00000008 +#define MUXENAB_HOST_WAKE 0x00000010 /* configure GPIO for SDIO host_wake */ +#define MUXENAB_I2S_EN 0x00000020 +#define MUXENAB_I2S_MASTER 0x00000040 +#define MUXENAB_I2S_FULL 0x00000080 +#define MUXENAB_SFLASH 0x00000100 +#define MUXENAB_RFSWCTRL0 0x00000200 +#define MUXENAB_RFSWCTRL1 0x00000400 +#define MUXENAB_RFSWCTRL2 0x00000800 +#define MUXENAB_SECI 0x00001000 +#define MUXENAB_BT_LEGACY 0x00002000 +#define MUXENAB_HOST_WAKE1 0x00004000 /* configure alternative GPIO for SDIO host_wake */ + +/* Boot flags */ +#define FLASH_KERNEL_NFLASH 0x00000001 +#define FLASH_BOOT_NFLASH 0x00000002 + +#endif /* _BCMDEVS_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h new file mode 100644 index 000000000000..27f237947324 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h @@ -0,0 +1,332 @@ +/* + * Byte order utilities + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmendian.h 514727 2014-11-12 03:02:48Z $ + * + * This file by default provides proper behavior on little-endian architectures. + * On big-endian architectures, IL_BIGENDIAN should be defined. + */ + +#ifndef _BCMENDIAN_H_ +#define _BCMENDIAN_H_ + +#include + +/* Reverse the bytes in a 16-bit value */ +#define BCMSWAP16(val) \ + ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \ + (((uint16)(val) & (uint16)0xff00U) >> 8))) + +/* Reverse the bytes in a 32-bit value */ +#define BCMSWAP32(val) \ + ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \ + (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \ + (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \ + (((uint32)(val) & (uint32)0xff000000U) >> 24))) + +/* Reverse the two 16-bit halves of a 32-bit value */ +#define BCMSWAP32BY16(val) \ + ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \ + (((uint32)(val) & (uint32)0xffff0000U) >> 16))) + +/* Reverse the bytes in a 64-bit value */ +#define BCMSWAP64(val) \ + ((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \ + (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \ + (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \ + (((uint64)(val) & 0x00000000ff000000ULL) << 8) | \ + (((uint64)(val) & 0x000000ff00000000ULL) >> 8) | \ + (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \ + (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \ + (((uint64)(val) & 0xff00000000000000ULL) >> 56))) + +/* Reverse the two 32-bit halves of a 64-bit value */ +#define BCMSWAP64BY32(val) \ + ((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \ + (((uint64)(val) & 0xffffffff00000000ULL) >> 32))) + + +/* Byte swapping macros + * Host <=> Network (Big Endian) for 16- and 32-bit values + * Host <=> Little-Endian for 16- and 32-bit values + */ +#ifndef hton16 +#define HTON16(i) BCMSWAP16(i) +#define hton16(i) bcmswap16(i) +#define HTON32(i) BCMSWAP32(i) +#define hton32(i) bcmswap32(i) +#define NTOH16(i) BCMSWAP16(i) +#define ntoh16(i) bcmswap16(i) +#define NTOH32(i) BCMSWAP32(i) +#define ntoh32(i) bcmswap32(i) +#define LTOH16(i) (i) +#define ltoh16(i) (i) +#define LTOH32(i) (i) +#define ltoh32(i) (i) +#define HTOL16(i) (i) +#define htol16(i) (i) +#define HTOL32(i) (i) +#define htol32(i) (i) +#define HTOL64(i) (i) +#define htol64(i) (i) +#endif /* hton16 */ + +#define ltoh16_buf(buf, i) +#define htol16_buf(buf, i) + +/* Unaligned loads and stores in host byte order */ +#define load32_ua(a) ltoh32_ua(a) +#define store32_ua(a, v) htol32_ua_store(v, a) +#define load16_ua(a) ltoh16_ua(a) +#define store16_ua(a, v) htol16_ua_store(v, a) + +#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8)) +#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24)) +#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1]) +#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3]) + +#define ltoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#define ntoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#ifdef __GNUC__ + +/* GNU macro versions avoid referencing the argument multiple times, while also + * avoiding the -fno-inline used in ROM builds. + */ + +#define bcmswap16(val) ({ \ + uint16 _val = (val); \ + BCMSWAP16(_val); \ +}) + +#define bcmswap32(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32(_val); \ +}) + +#define bcmswap64(val) ({ \ + uint64 _val = (val); \ + BCMSWAP64(_val); \ +}) + +#define bcmswap32by16(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32BY16(_val); \ +}) + +#define bcmswap16_buf(buf, len) ({ \ + uint16 *_buf = (uint16 *)(buf); \ + uint _wds = (len) / 2; \ + while (_wds--) { \ + *_buf = bcmswap16(*_buf); \ + _buf++; \ + } \ +}) + +#define htol16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = _val >> 8; \ +}) + +#define htol32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = (_val >> 8) & 0xff; \ + _bytes[2] = (_val >> 16) & 0xff; \ + _bytes[3] = _val >> 24; \ +}) + +#define hton16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 8; \ + _bytes[1] = _val & 0xff; \ +}) + +#define hton32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 24; \ + _bytes[1] = (_val >> 16) & 0xff; \ + _bytes[2] = (_val >> 8) & 0xff; \ + _bytes[3] = _val & 0xff; \ +}) + +#define ltoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH16_UA(_bytes); \ +}) + +#define ltoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH32_UA(_bytes); \ +}) + +#define ntoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH16_UA(_bytes); \ +}) + +#define ntoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH32_UA(_bytes); \ +}) + +#else /* !__GNUC__ */ + +/* Inline versions avoid referencing the argument multiple times */ +static INLINE uint16 +bcmswap16(uint16 val) +{ + return BCMSWAP16(val); +} + +static INLINE uint32 +bcmswap32(uint32 val) +{ + return BCMSWAP32(val); +} + +static INLINE uint64 +bcmswap64(uint64 val) +{ + return BCMSWAP64(val); +} + +static INLINE uint32 +bcmswap32by16(uint32 val) +{ + return BCMSWAP32BY16(val); +} + +/* Reverse pairs of bytes in a buffer (not for high-performance use) */ +/* buf - start of buffer of shorts to swap */ +/* len - byte length of buffer */ +static INLINE void +bcmswap16_buf(uint16 *buf, uint len) +{ + len = len / 2; + + while (len--) { + *buf = bcmswap16(*buf); + buf++; + } +} + +/* + * Store 16-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = val >> 8; +} + +/* + * Store 32-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = (val >> 8) & 0xff; + bytes[2] = (val >> 16) & 0xff; + bytes[3] = val >> 24; +} + +/* + * Store 16-bit value to unaligned network-(big-)endian byte array. + */ +static INLINE void +hton16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val >> 8; + bytes[1] = val & 0xff; +} + +/* + * Store 32-bit value to unaligned network-(big-)endian byte array. + */ +static INLINE void +hton32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val >> 24; + bytes[1] = (val >> 16) & 0xff; + bytes[2] = (val >> 8) & 0xff; + bytes[3] = val & 0xff; +} + +/* + * Load 16-bit value from unaligned little-endian byte array. + */ +static INLINE uint16 +ltoh16_ua(const void *bytes) +{ + return _LTOH16_UA((const uint8 *)bytes); +} + +/* + * Load 32-bit value from unaligned little-endian byte array. + */ +static INLINE uint32 +ltoh32_ua(const void *bytes) +{ + return _LTOH32_UA((const uint8 *)bytes); +} + +/* + * Load 16-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint16 +ntoh16_ua(const void *bytes) +{ + return _NTOH16_UA((const uint8 *)bytes); +} + +/* + * Load 32-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint32 +ntoh32_ua(const void *bytes) +{ + return _NTOH32_UA((const uint8 *)bytes); +} + +#endif /* !__GNUC__ */ +#endif /* !_BCMENDIAN_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h new file mode 100644 index 000000000000..ab1375ea854d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h @@ -0,0 +1,863 @@ +/* + * MSGBUF network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmmsgbuf.h 541060 2015-03-13 23:28:01Z $ + */ +#ifndef _bcmmsgbuf_h_ +#define _bcmmsgbuf_h_ + +#include +#include +#include + +#define MSGBUF_MAX_MSG_SIZE ETHER_MAX_LEN + +#define D2H_EPOCH_MODULO 253 /* sequence number wrap */ +#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1) + +#define H2D_EPOCH_MODULO 253 /* sequence number wrap */ +#define H2D_EPOCH_INIT_VAL (H2D_EPOCH_MODULO + 1) + +#define H2DRING_TXPOST_ITEMSIZE 48 +#define H2DRING_RXPOST_ITEMSIZE 32 +#define H2DRING_CTRL_SUB_ITEMSIZE 40 +#define D2HRING_TXCMPLT_ITEMSIZE 16 +#define D2HRING_RXCMPLT_ITEMSIZE 32 +#define D2HRING_CTRL_CMPLT_ITEMSIZE 24 + +#define H2DRING_TXPOST_MAX_ITEM 512 +#define H2DRING_RXPOST_MAX_ITEM 512 +#define H2DRING_CTRL_SUB_MAX_ITEM 64 +#define D2HRING_TXCMPLT_MAX_ITEM 1024 +#define D2HRING_RXCMPLT_MAX_ITEM 512 + +#define D2HRING_CTRL_CMPLT_MAX_ITEM 64 + +enum { + DNGL_TO_HOST_MSGBUF, + HOST_TO_DNGL_MSGBUF +}; + +enum { + HOST_TO_DNGL_TXP_DATA, + HOST_TO_DNGL_RXP_DATA, + HOST_TO_DNGL_CTRL, + DNGL_TO_HOST_DATA, + DNGL_TO_HOST_CTRL +}; + +#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE + +#ifdef PCIE_API_REV1 + +#define BCMMSGBUF_DUMMY_REF(a, b) do {BCM_REFERENCE((a));BCM_REFERENCE((b));} while (0) + +#define BCMMSGBUF_API_IFIDX(a) 0 +#define BCMMSGBUF_API_SEQNUM(a) 0 +#define BCMMSGBUF_IOCTL_XTID(a) 0 +#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->cmd_id) + +#define BCMMSGBUF_SET_API_IFIDX(a, b) BCMMSGBUF_DUMMY_REF(a, b) +#define BCMMSGBUF_SET_API_SEQNUM(a, b) BCMMSGBUF_DUMMY_REF(a, b) +#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID(a) = (b)) +#define BCMMSGBUF_IOCTL_SET_XTID(a, b) BCMMSGBUF_DUMMY_REF(a, b) + +#else /* PCIE_API_REV1 */ + +#define BCMMSGBUF_API_IFIDX(a) ((a)->if_id) +#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->pkt_id) +#define BCMMSGBUF_API_SEQNUM(a) ((a)->u.seq.seq_no) +#define BCMMSGBUF_IOCTL_XTID(a) ((a)->xt_id) + +#define BCMMSGBUF_SET_API_IFIDX(a, b) (BCMMSGBUF_API_IFIDX((a)) = (b)) +#define BCMMSGBUF_SET_API_SEQNUM(a, b) (BCMMSGBUF_API_SEQNUM((a)) = (b)) +#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID((a)) = (b)) +#define BCMMSGBUF_IOCTL_SET_XTID(a, b) (BCMMSGBUF_IOCTL_XTID((a)) = (b)) + +#endif /* PCIE_API_REV1 */ + +/* utility data structures */ + +union addr64 { + struct { + uint32 low; + uint32 high; + }; + struct { + uint32 low_addr; + uint32 high_addr; + }; + uint64 u64; +} DECLSPEC_ALIGN(8); + +typedef union addr64 bcm_addr64_t; + +/* IOCTL req Hdr */ +/* cmn Msg Hdr */ +typedef struct cmn_msg_hdr { + /** message type */ + uint8 msg_type; + /** interface index this is valid for */ + uint8 if_id; + /* flags */ + uint8 flags; + /** sequence number */ + uint8 epoch; + /** packet Identifier for the associated host buffer */ + uint32 request_id; +} cmn_msg_hdr_t; + +/** message type */ +typedef enum bcmpcie_msgtype { + MSG_TYPE_GEN_STATUS = 0x1, + MSG_TYPE_RING_STATUS = 0x2, + MSG_TYPE_FLOW_RING_CREATE = 0x3, + MSG_TYPE_FLOW_RING_CREATE_CMPLT = 0x4, + MSG_TYPE_FLOW_RING_DELETE = 0x5, + MSG_TYPE_FLOW_RING_DELETE_CMPLT = 0x6, + MSG_TYPE_FLOW_RING_FLUSH = 0x7, + MSG_TYPE_FLOW_RING_FLUSH_CMPLT = 0x8, + MSG_TYPE_IOCTLPTR_REQ = 0x9, + MSG_TYPE_IOCTLPTR_REQ_ACK = 0xA, + MSG_TYPE_IOCTLRESP_BUF_POST = 0xB, + MSG_TYPE_IOCTL_CMPLT = 0xC, + MSG_TYPE_EVENT_BUF_POST = 0xD, + MSG_TYPE_WL_EVENT = 0xE, + MSG_TYPE_TX_POST = 0xF, + MSG_TYPE_TX_STATUS = 0x10, + MSG_TYPE_RXBUF_POST = 0x11, + MSG_TYPE_RX_CMPLT = 0x12, + MSG_TYPE_LPBK_DMAXFER = 0x13, + MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14, + MSG_TYPE_FLOW_RING_RESUME = 0x15, + MSG_TYPE_FLOW_RING_RESUME_CMPLT = 0x16, + MSG_TYPE_FLOW_RING_SUSPEND = 0x17, + MSG_TYPE_FLOW_RING_SUSPEND_CMPLT = 0x18, + MSG_TYPE_INFO_BUF_POST = 0x19, + MSG_TYPE_INFO_BUF_CMPLT = 0x1A, + MSG_TYPE_H2D_RING_CREATE = 0x1B, + MSG_TYPE_D2H_RING_CREATE = 0x1C, + MSG_TYPE_H2D_RING_CREATE_CMPLT = 0x1D, + MSG_TYPE_D2H_RING_CREATE_CMPLT = 0x1E, + MSG_TYPE_H2D_RING_CONFIG = 0x1F, + MSG_TYPE_D2H_RING_CONFIG = 0x20, + MSG_TYPE_H2D_RING_CONFIG_CMPLT = 0x21, + MSG_TYPE_D2H_RING_CONFIG_CMPLT = 0x22, + MSG_TYPE_H2D_MAILBOX_DATA = 0x23, + MSG_TYPE_D2H_MAILBOX_DATA = 0x24, + + MSG_TYPE_API_MAX_RSVD = 0x3F +} bcmpcie_msg_type_t; + +typedef enum bcmpcie_msgtype_int { + MSG_TYPE_INTERNAL_USE_START = 0x40, + MSG_TYPE_EVENT_PYLD = 0x41, + MSG_TYPE_IOCT_PYLD = 0x42, + MSG_TYPE_RX_PYLD = 0x43, + MSG_TYPE_HOST_FETCH = 0x44, + MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45, + MSG_TYPE_TXMETADATA_PYLD = 0x46, + MSG_TYPE_INDX_UPDATE = 0x47 +} bcmpcie_msgtype_int_t; + +typedef enum bcmpcie_msgtype_u { + MSG_TYPE_TX_BATCH_POST = 0x80, + MSG_TYPE_IOCTL_REQ = 0x81, + MSG_TYPE_HOST_EVNT = 0x82, /* console related */ + MSG_TYPE_LOOPBACK = 0x83 +} bcmpcie_msgtype_u_t; + +/** + * D2H ring host wakeup soft doorbell, override the PCIE doorbell. + * Host configures an <32bit address,value> tuple, and dongle uses SBTOPCIE + * Transl0 to write specified value to host address. + * + * Use case: 32bit Address mapped to HW Accelerator Core/Thread Wakeup Register + * and value is Core/Thread context. Host will ensure routing the 32bit address + * offerred to PCIE to the mapped register. + * + * D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL + */ +typedef struct bcmpcie_soft_doorbell { + uint32 value; /* host defined value to be written, eg HW threadid */ + bcm_addr64_t haddr; /* host address, eg thread wakeup register address */ + uint16 items; /* interrupt coalescing: item count before wakeup */ + uint16 msecs; /* interrupt coalescing: timeout in millisecs */ +} bcmpcie_soft_doorbell_t; + + +/* if_id */ +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5 +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX 0x7 +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK \ + (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT) +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT 0 +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX 0x1F +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK \ + (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT) + +/* flags */ +#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX 0x1 +#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR 0x2 +#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT 0x80 + + +/* IOCTL request message */ +typedef struct ioctl_req_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** ioctl command type */ + uint32 cmd; + /** ioctl transaction ID, to pair with a ioctl response */ + uint16 trans_id; + /** input arguments buffer len */ + uint16 input_buf_len; + /** expected output len */ + uint16 output_buf_len; + /** to align the host address on 8 byte boundary */ + uint16 rsvd[3]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; + /* rsvd */ + uint32 rsvd1[2]; +} ioctl_req_msg_t; + +/** buffer post messages for device to use to return IOCTL responses, Events */ +typedef struct ioctl_resp_evt_buf_post_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** length of the host buffer supplied */ + uint16 host_buf_len; + /** to align the host address on 8 byte boundary */ + uint16 reserved[3]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + uint32 rsvd[4]; +} ioctl_resp_evt_buf_post_msg_t; + + +typedef struct pcie_dma_xfer_params { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; + + /** always align on 8 byte boundary */ + bcm_addr64_t host_ouput_buf_addr; + + /** length of transfer */ + uint32 xfer_len; + /** delay before doing the src txfer */ + uint32 srcdelay; + /** delay before doing the dest txfer */ + uint32 destdelay; + uint32 rsvd; +} pcie_dma_xfer_params_t; + +/** Complete msgbuf hdr for flow ring update from host to dongle */ +typedef struct tx_flowring_create_request { + cmn_msg_hdr_t msg; + uint8 da[ETHER_ADDR_LEN]; + uint8 sa[ETHER_ADDR_LEN]; + uint8 tid; + uint8 if_flags; + uint16 flow_ring_id; + uint8 tc; + uint8 priority; + uint16 int_vector; + uint16 max_items; + uint16 len_item; + bcm_addr64_t flow_ring_ptr; +} tx_flowring_create_request_t; + +typedef struct tx_flowring_delete_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_flowring_delete_request_t; + +typedef struct tx_flowring_flush_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_flowring_flush_request_t; + +/** Subtypes for ring_config_req control message */ +typedef enum ring_config_subtype { + /** Default D2H PCIE doorbell override using ring_config_req msg */ + D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL = 1, /* Software doorbell */ + D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL = 2 /* MSI configuration */ +} ring_config_subtype_t; + +typedef struct ring_config_req { + cmn_msg_hdr_t msg; + uint16 subtype; + uint16 ring_id; + uint32 rsvd; + union { + uint32 data[6]; + /** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */ + bcmpcie_soft_doorbell_t soft_doorbell; + }; +} ring_config_req_t; + +typedef union ctrl_submit_item { + ioctl_req_msg_t ioctl_req; + ioctl_resp_evt_buf_post_msg_t resp_buf_post; + pcie_dma_xfer_params_t dma_xfer; + tx_flowring_create_request_t flow_create; + tx_flowring_delete_request_t flow_delete; + tx_flowring_flush_request_t flow_flush; + ring_config_req_t ring_config_req; + unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE]; +} ctrl_submit_item_t; + +/** Control Completion messages (20 bytes) */ +typedef struct compl_msg_hdr { + /** status for the completion */ + int16 status; + /** submisison flow ring id which generated this status */ + uint16 flow_ring_id; +} compl_msg_hdr_t; + +/** XOR checksum or a magic number to audit DMA done */ +typedef uint32 dma_done_t; + +/* completion header status codes */ +#define BCMPCIE_SUCCESS 0 +#define BCMPCIE_NOTFOUND 1 +#define BCMPCIE_NOMEM 2 +#define BCMPCIE_BADOPTION 3 +#define BCMPCIE_RING_IN_USE 4 +#define BCMPCIE_RING_ID_INVALID 5 +#define BCMPCIE_PKT_FLUSH 6 +#define BCMPCIE_NO_EVENT_BUF 7 +#define BCMPCIE_NO_RX_BUF 8 +#define BCMPCIE_NO_IOCTLRESP_BUF 9 +#define BCMPCIE_MAX_IOCTLRESP_BUF 10 +#define BCMPCIE_MAX_EVENT_BUF 11 + +/** IOCTL completion response */ +typedef struct ioctl_compl_resp_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** response buffer len where a host buffer is involved */ + uint16 resp_len; + /** transaction id to pair with a request */ + uint16 trans_id; + /** cmd id */ + uint32 cmd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ioctl_comp_resp_msg_t; + +/** IOCTL request acknowledgement */ +typedef struct ioctl_req_ack_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** cmd id */ + uint32 cmd; + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ioctl_req_ack_msg_t; + +/** WL event message: send from device to host */ +typedef struct wlevent_req_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** event data len valid with the event buffer */ + uint16 event_data_len; + /** sequence number */ + uint16 seqnum; + /** rsvd */ + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} wlevent_req_msg_t; + +/** dma xfer complete message */ +typedef struct pcie_dmaxfer_cmplt { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_dmaxfer_cmplt_t; + +/** general status message */ +typedef struct pcie_gen_status { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_gen_status_t; + +/** ring status message */ +typedef struct pcie_ring_status { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** message which firmware couldn't decode */ + uint16 write_idx; + uint16 rsvd[3]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_ring_status_t; + +typedef struct tx_flowring_create_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_create_response_t; + +typedef struct tx_flowring_delete_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_delete_response_t; + +typedef struct tx_flowring_flush_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_flush_response_t; + +/** Common layout of all d2h control messages */ +typedef struct ctrl_compl_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ctrl_compl_msg_t; + +typedef struct ring_config_resp { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ring_config_resp_t; + +typedef union ctrl_completion_item { + ioctl_comp_resp_msg_t ioctl_resp; + wlevent_req_msg_t event; + ioctl_req_ack_msg_t ioct_ack; + pcie_dmaxfer_cmplt_t pcie_xfer_cmplt; + pcie_gen_status_t pcie_gen_status; + pcie_ring_status_t pcie_ring_status; + tx_flowring_create_response_t txfl_create_resp; + tx_flowring_delete_response_t txfl_delete_resp; + tx_flowring_flush_response_t txfl_flush_resp; + ctrl_compl_msg_t ctrl_compl; + ring_config_resp_t ring_config_resp; + unsigned char check[D2HRING_CTRL_CMPLT_ITEMSIZE]; +} ctrl_completion_item_t; + +/** H2D Rxpost ring work items */ +typedef struct host_rxbuf_post { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len to receive data */ + uint16 data_buf_len; + /** alignment to make the host buffers start on 8 byte boundary */ + uint32 rsvd; + /** provided meta data buffer */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; +} host_rxbuf_post_t; + +typedef union rxbuf_submit_item { + host_rxbuf_post_t rxpost; + unsigned char check[H2DRING_RXPOST_ITEMSIZE]; +} rxbuf_submit_item_t; + + +/** D2H Rxcompletion ring work items */ +typedef struct host_rxbuf_cmpl { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** filled up meta data len */ + uint16 metadata_len; + /** filled up buffer len to receive data */ + uint16 data_len; + /** offset in the host rx buffer where the data starts */ + uint16 data_offset; + /** offset in the host rx buffer where the data starts */ + uint16 flags; + /** rx status */ + uint32 rx_status_0; + uint32 rx_status_1; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} host_rxbuf_cmpl_t; + +typedef union rxbuf_complete_item { + host_rxbuf_cmpl_t rxcmpl; + unsigned char check[D2HRING_RXCMPLT_ITEMSIZE]; +} rxbuf_complete_item_t; + + +typedef struct host_txbuf_post { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** eth header */ + uint8 txhdr[ETHER_HDR_LEN]; + /** flags */ + uint8 flags; + /** number of segments */ + uint8 seg_cnt; + + /** provided meta data buffer for txstatus */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len to receive data */ + uint16 data_len; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} host_txbuf_post_t; + +#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01 +#define BCMPCIE_PKT_FLAGS_FRAME_802_11 0x02 + +#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */ +#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */ + + +#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5 +#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT) + +/* These are added to fix up compile issues */ +#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3 +#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11 BCMPCIE_PKT_FLAGS_FRAME_802_11 +#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT +#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK + +/** H2D Txpost ring work items */ +typedef union txbuf_submit_item { + host_txbuf_post_t txpost; + unsigned char check[H2DRING_TXPOST_ITEMSIZE]; +} txbuf_submit_item_t; + +/** D2H Txcompletion ring work items */ +typedef struct host_txbuf_cmpl { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + union { + struct { + /** provided meta data len */ + uint16 metadata_len; + /** WLAN side txstatus */ + uint16 tx_status; + }; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; + }; +} host_txbuf_cmpl_t; + +typedef union txbuf_complete_item { + host_txbuf_cmpl_t txcmpl; + unsigned char check[D2HRING_TXCMPLT_ITEMSIZE]; +} txbuf_complete_item_t; + +#define BCMPCIE_D2H_METADATA_HDRLEN 4 +#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4) + +/** ret buf struct */ +typedef struct ret_buf_ptr { + uint32 low_addr; + uint32 high_addr; +} ret_buf_t; + + +#ifdef PCIE_API_REV1 + +/* ioctl specific hdr */ +typedef struct ioctl_hdr { + uint16 cmd; + uint16 retbuf_len; + uint32 cmd_id; +} ioctl_hdr_t; + +typedef struct ioctlptr_hdr { + uint16 cmd; + uint16 retbuf_len; + uint16 buflen; + uint16 rsvd; + uint32 cmd_id; +} ioctlptr_hdr_t; + +#else /* PCIE_API_REV1 */ + +typedef struct ioctl_req_hdr { + uint32 pkt_id; /**< Packet ID */ + uint32 cmd; /**< IOCTL ID */ + uint16 retbuf_len; + uint16 buflen; + uint16 xt_id; /**< transaction ID */ + uint16 rsvd[1]; +} ioctl_req_hdr_t; + +#endif /* PCIE_API_REV1 */ + + +/** Complete msgbuf hdr for ioctl from host to dongle */ +typedef struct ioct_reqst_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + ioctl_hdr_t ioct_hdr; +#else + ioctl_req_hdr_t ioct_hdr; +#endif + ret_buf_t ret_buf; +} ioct_reqst_hdr_t; + +typedef struct ioctptr_reqst_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + ioctlptr_hdr_t ioct_hdr; +#else + ioctl_req_hdr_t ioct_hdr; +#endif + ret_buf_t ret_buf; + ret_buf_t ioct_buf; +} ioctptr_reqst_hdr_t; + +/** ioctl response header */ +typedef struct ioct_resp_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + uint32 cmd_id; +#else + uint32 pkt_id; +#endif + uint32 status; + uint32 ret_len; + uint32 inline_data; +#ifdef PCIE_API_REV1 +#else + uint16 xt_id; /**< transaction ID */ + uint16 rsvd[1]; +#endif +} ioct_resp_hdr_t; + +/* ioct resp header used in dongle */ +/* ret buf hdr will be stripped off inside dongle itself */ +typedef struct msgbuf_ioctl_resp { + ioct_resp_hdr_t ioct_hdr; + ret_buf_t ret_buf; /**< ret buf pointers */ +} msgbuf_ioct_resp_t; + +/** WL event hdr info */ +typedef struct wl_event_hdr { + cmn_msg_hdr_t msg; + uint16 event; + uint8 flags; + uint8 rsvd; + uint16 retbuf_len; + uint16 rsvd1; + uint32 rxbufid; +} wl_event_hdr_t; + +#define TXDESCR_FLOWID_PCIELPBK_1 0xFF +#define TXDESCR_FLOWID_PCIELPBK_2 0xFE + +typedef struct txbatch_lenptr_tup { + uint32 pktid; + uint16 pktlen; + uint16 rsvd; + ret_buf_t ret_buf; /**< ret buf pointers */ +} txbatch_lenptr_tup_t; + +typedef struct txbatch_cmn_msghdr { + cmn_msg_hdr_t msg; + uint8 priority; + uint8 hdrlen; + uint8 pktcnt; + uint8 flowid; + uint8 txhdr[ETHER_HDR_LEN]; + uint16 rsvd; +} txbatch_cmn_msghdr_t; + +typedef struct txbatch_msghdr { + txbatch_cmn_msghdr_t txcmn; + txbatch_lenptr_tup_t tx_tup[0]; /**< Based on packet count */ +} txbatch_msghdr_t; + +/* TX desc posting header */ +typedef struct tx_lenptr_tup { + uint16 pktlen; + uint16 rsvd; + ret_buf_t ret_buf; /**< ret buf pointers */ +} tx_lenptr_tup_t; + +typedef struct txdescr_cmn_msghdr { + cmn_msg_hdr_t msg; + uint8 priority; + uint8 hdrlen; + uint8 descrcnt; + uint8 flowid; + uint32 pktid; +} txdescr_cmn_msghdr_t; + +typedef struct txdescr_msghdr { + txdescr_cmn_msghdr_t txcmn; + uint8 txhdr[ETHER_HDR_LEN]; + uint16 rsvd; + tx_lenptr_tup_t tx_tup[0]; /**< Based on descriptor count */ +} txdescr_msghdr_t; + +/** Tx status header info */ +typedef struct txstatus_hdr { + cmn_msg_hdr_t msg; + uint32 pktid; +} txstatus_hdr_t; + +/** RX bufid-len-ptr tuple */ +typedef struct rx_lenptr_tup { + uint32 rxbufid; + uint16 len; + uint16 rsvd2; + ret_buf_t ret_buf; /**< ret buf pointers */ +} rx_lenptr_tup_t; + +/** Rx descr Post hdr info */ +typedef struct rxdesc_msghdr { + cmn_msg_hdr_t msg; + uint16 rsvd0; + uint8 rsvd1; + uint8 descnt; + rx_lenptr_tup_t rx_tup[0]; +} rxdesc_msghdr_t; + +/** RX complete tuples */ +typedef struct rxcmplt_tup { + uint16 retbuf_len; + uint16 data_offset; + uint32 rxstatus0; + uint32 rxstatus1; + uint32 rxbufid; +} rxcmplt_tup_t; + +/** RX complete messge hdr */ +typedef struct rxcmplt_hdr { + cmn_msg_hdr_t msg; + uint16 rsvd0; + uint16 rxcmpltcnt; + rxcmplt_tup_t rx_tup[0]; +} rxcmplt_hdr_t; + +typedef struct hostevent_hdr { + cmn_msg_hdr_t msg; + uint32 evnt_pyld; +} hostevent_hdr_t; + +typedef struct dma_xfer_params { + uint32 src_physaddr_hi; + uint32 src_physaddr_lo; + uint32 dest_physaddr_hi; + uint32 dest_physaddr_lo; + uint32 len; + uint32 srcdelay; + uint32 destdelay; +} dma_xfer_params_t; + +enum { + HOST_EVENT_CONS_CMD = 1 +}; + +/* defines for flags */ +#define MSGBUF_IOC_ACTION_MASK 0x1 + +#define MAX_SUSPEND_REQ 15 + +typedef struct tx_idle_flowring_suspend_request { + cmn_msg_hdr_t msg; + uint16 ring_id[MAX_SUSPEND_REQ]; /**< ring Id's */ + uint16 num; /**< number of flowid's to suspend */ +} tx_idle_flowring_suspend_request_t; + +typedef struct tx_idle_flowring_suspend_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_suspend_response_t; + +typedef struct tx_idle_flowring_resume_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_idle_flowring_resume_request_t; + +typedef struct tx_idle_flowring_resume_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_resume_response_t; + +#endif /* _bcmmsgbuf_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmnvram.h b/drivers/net/wireless/bcmdhd/include/bcmnvram.h new file mode 100644 index 000000000000..e3ba9b4166fb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmnvram.h @@ -0,0 +1,310 @@ +/* + * NVRAM variable manipulation + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmnvram.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _bcmnvram_h_ +#define _bcmnvram_h_ + +#ifndef _LANGUAGE_ASSEMBLY + +#include +#include + +struct nvram_header { + uint32 magic; + uint32 len; + uint32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ + uint32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ + uint32 config_ncdl; /* ncdl values for memc */ +}; + +struct nvram_tuple { + char *name; + char *value; + struct nvram_tuple *next; +}; + +/* + * Get default value for an NVRAM variable + */ +extern char *nvram_default_get(const char *name); +/* + * validate/restore all per-interface related variables + */ +extern void nvram_validate_all(char *prefix, bool restore); + +/* + * restore specific per-interface variable + */ +extern void nvram_restore_var(char *prefix, char *name); + +/* + * Initialize NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern int nvram_init(void *sih); +extern int nvram_deinit(void *sih); + + +/* + * Append a chunk of nvram variables to the global list + */ +extern int nvram_append(void *si, char *vars, uint varsz); + +extern void nvram_get_global_vars(char **varlst, uint *varsz); + + +/* + * Check for reset button press for restoring factory defaults. + */ +extern int nvram_reset(void *sih); + +/* + * Disable NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern void nvram_exit(void *sih); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get(const char *name); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @param bit bit value to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get_bitflag(const char *name, const int bit); + +/* + * Read the reset GPIO value from the nvram and set the GPIO + * as input + */ +extern int nvram_resetgpio_init(void *sih); + +/* + * Get the value of an NVRAM variable. + * @param name name of variable to get + * @return value of variable or NUL if undefined + */ +static INLINE char * +nvram_safe_get(const char *name) +{ + char *p = nvram_get(name); + return p ? p : ""; +} + +/* + * Match an NVRAM variable. + * @param name name of variable to match + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is string equal + * to match or FALSE otherwise + */ +static INLINE int +nvram_match(const char *name, const char *match) +{ + const char *value = nvram_get(name); + return (value && !strcmp(value, match)); +} + +/* + * Match an NVRAM variable. + * @param name name of variable to match + * @param bit bit value to get + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is string equal + * to match or FALSE otherwise + */ +static INLINE int +nvram_match_bitflag(const char *name, const int bit, const char *match) +{ + const char *value = nvram_get_bitflag(name, bit); + return (value && !strcmp(value, match)); +} + +/* + * Inversely match an NVRAM variable. + * @param name name of variable to match + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is not string + * equal to invmatch or FALSE otherwise + */ +static INLINE int +nvram_invmatch(const char *name, const char *invmatch) +{ + const char *value = nvram_get(name); + return (value && strcmp(value, invmatch)); +} + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set(const char *name, const char *value); + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param bit bit value to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set_bitflag(const char *name, const int bit, const int value); +/* + * Unset an NVRAM variable. Pointers to previously set values + * remain valid until a set. + * @param name name of variable to unset + * @return 0 on success and errno on failure + * NOTE: use nvram_commit to commit this change to flash. + */ +extern int nvram_unset(const char *name); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @param nvram_corrupt true to corrupt nvram, false otherwise. + * @return 0 on success and errno on failure + */ +extern int nvram_commit_internal(bool nvram_corrupt); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @return 0 on success and errno on failure + */ +extern int nvram_commit(void); + +/* + * Get all NVRAM variables (format name=value\0 ... \0\0). + * @param buf buffer to store variables + * @param count size of buffer in bytes + * @return 0 on success and errno on failure + */ +extern int nvram_getall(char *nvram_buf, int count); + +/* + * returns the crc value of the nvram + * @param nvh nvram header pointer + */ +uint8 nvram_calc_crc(struct nvram_header * nvh); + +extern int nvram_space; +#endif /* _LANGUAGE_ASSEMBLY */ + +/* The NVRAM version number stored as an NVRAM variable */ +#define NVRAM_SOFTWARE_VERSION "1" + +#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */ +#define NVRAM_CLEAR_MAGIC 0x0 +#define NVRAM_INVALID_MAGIC 0xFFFFFFFF +#define NVRAM_VERSION 1 +#define NVRAM_HEADER_SIZE 20 +/* This definition is for precommit staging, and will be removed */ +#define NVRAM_SPACE 0x8000 +/* For CFE builds this gets passed in thru the makefile */ +#ifndef MAX_NVRAM_SPACE +#define MAX_NVRAM_SPACE 0x10000 +#endif +#define DEF_NVRAM_SPACE 0x8000 +#define ROM_ENVRAM_SPACE 0x1000 +#define NVRAM_LZMA_MAGIC 0x4c5a4d41 /* 'LZMA' */ + +#define NVRAM_MAX_VALUE_LEN 255 +#define NVRAM_MAX_PARAM_LEN 64 + +#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */ +#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */ + +/* Offsets to embedded nvram area */ +#define NVRAM_START_COMPRESSED 0x400 +#define NVRAM_START 0x1000 + +#define BCM_JUMBO_NVRAM_DELIMIT '\n' +#define BCM_JUMBO_START "Broadcom Jumbo Nvram file" + + +#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \ + defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__)) +#define IMAGE_SIZE "image_size" +#define BOOTPARTITION "bootpartition" +#define IMAGE_BOOT BOOTPARTITION +#define PARTIALBOOTS "partialboots" +#define MAXPARTIALBOOTS "maxpartialboots" +#define IMAGE_1ST_FLASH_TRX "flash0.trx" +#define IMAGE_1ST_FLASH_OS "flash0.os" +#define IMAGE_2ND_FLASH_TRX "flash0.trx2" +#define IMAGE_2ND_FLASH_OS "flash0.os2" +#define IMAGE_FIRST_OFFSET "image_first_offset" +#define IMAGE_SECOND_OFFSET "image_second_offset" +#define LINUX_FIRST "linux" +#define LINUX_SECOND "linux2" +#endif + +#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \ + defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__)) +/* Shared by all: CFE, Linux Kernel, and Ap */ +#define IMAGE_BOOT "image_boot" +#define BOOTPARTITION IMAGE_BOOT +/* CFE variables */ +#define IMAGE_1ST_FLASH_TRX "flash0.trx" +#define IMAGE_1ST_FLASH_OS "flash0.os" +#define IMAGE_2ND_FLASH_TRX "flash0.trx2" +#define IMAGE_2ND_FLASH_OS "flash0.os2" +#define IMAGE_SIZE "image_size" + +/* CFE and Linux Kernel shared variables */ +#define IMAGE_FIRST_OFFSET "image_first_offset" +#define IMAGE_SECOND_OFFSET "image_second_offset" + +/* Linux application variables */ +#define LINUX_FIRST "linux" +#define LINUX_SECOND "linux2" +#define POLICY_TOGGLE "toggle" +#define LINUX_PART_TO_FLASH "linux_to_flash" +#define LINUX_FLASH_POLICY "linux_flash_policy" + +#endif /* defined(DUAL_IMAGE||CONFIG_DUAL_IMAGE)||__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__ */ + +#endif /* _bcmnvram_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcie.h b/drivers/net/wireless/bcmdhd/include/bcmpcie.h new file mode 100644 index 000000000000..0c15055a0353 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmpcie.h @@ -0,0 +1,318 @@ +/* + * Broadcom PCIE + * Software-specific definitions shared between device and host side + * Explains the shared area between host and dongle + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmpcie.h 604490 2015-12-07 15:48:45Z $ + */ + + +#ifndef _bcmpcie_h_ +#define _bcmpcie_h_ + +#include + +#define ADDR_64(x) (x.addr) +#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr)) +#define LOW_ADDR_32(x) ((uint32) (((sh_addr_t) x).low_addr)) + +typedef struct { + uint32 low_addr; + uint32 high_addr; +} sh_addr_t; + + +/* May be overridden by 43xxxxx-roml.mk */ +#if !defined(BCMPCIE_MAX_TX_FLOWS) +#define BCMPCIE_MAX_TX_FLOWS 40 +#endif /* ! BCMPCIE_MAX_TX_FLOWS */ + +/** + * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that + * is located in device memory. + */ +#define PCIE_SHARED_VERSION 0x00005 +#define PCIE_SHARED_VERSION_MASK 0x000FF +#define PCIE_SHARED_ASSERT_BUILT 0x00100 +#define PCIE_SHARED_ASSERT 0x00200 +#define PCIE_SHARED_TRAP 0x00400 +#define PCIE_SHARED_IN_BRPT 0x00800 +#define PCIE_SHARED_SET_BRPT 0x01000 +#define PCIE_SHARED_PENDING_BRPT 0x02000 +#define PCIE_SHARED_TXPUSH_SPRT 0x04000 +#define PCIE_SHARED_EVT_SEQNUM 0x08000 +#define PCIE_SHARED_DMA_INDEX 0x10000 + +/** + * There are host types where a device interrupt can 'race ahead' of data written by the device into + * host memory. The dongle can avoid this condition using a variety of techniques (read barrier, + * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately + * these techniques have drawbacks on router platforms. For these platforms, it was decided to not + * avoid the condition, but to detect the condition instead and act on it. + * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM + */ +#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000 +#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000 +#define PCIE_SHARED_D2H_SYNC_MODE_MASK \ + (PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM) +#define PCIE_SHARED_IDLE_FLOW_RING 0x80000 +#define PCIE_SHARED_2BYTE_INDICES 0x100000 + + +#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09 +#define PCIE_SHARED_H2D_MAGIC 0x12345678 + +/** + * Message rings convey messages between host and device. They are unidirectional, and are located + * in host memory. + * + * This is the minimal set of message rings, known as 'common message rings': + */ +#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0 +#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1 +#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2 +#define BCMPCIE_D2H_MSGRING_TX_COMPLETE 3 +#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4 +#define BCMPCIE_COMMON_MSGRING_MAX_ID 4 + +#define BCMPCIE_H2D_COMMON_MSGRINGS 2 +#define BCMPCIE_D2H_COMMON_MSGRINGS 3 +#define BCMPCIE_COMMON_MSGRINGS 5 + +#define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows)) + +/** + * H2D and D2H, WR and RD index, are maintained in the following arrays: + * - Array of all H2D WR Indices + * - Array of all H2D RD Indices + * - Array of all D2H WR Indices + * - Array of all D2H RD Indices + * + * The offset of the WR or RD indexes (for common rings) in these arrays are + * listed below. Arrays ARE NOT indexed by a ring's id. + * + * D2H common rings WR and RD index start from 0, even though their ringids + * start from BCMPCIE_H2D_COMMON_MSGRINGS + */ + +#define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id) + +enum h2dring_idx { + /* H2D common rings */ + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT), + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT), + + /* First TxPost's WR or RD index starts after all H2D common rings */ + BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS) +}; + +#define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \ + ((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS) + +enum d2hring_idx { + /* D2H Common Rings */ + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE), + BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE), + BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE) +}; + +/** + * Macros for managing arrays of RD WR indices: + * rw_index_sz: + * - in dongle, rw_index_sz is known at compile time + * - in host/DHD, rw_index_sz is derived from advertized pci_shared flags + * + * ring_idx: See h2dring_idx and d2hring_idx + */ + +/** Offset of a RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \ + ((rw_index_sz) * (ring_idx)) + +/** Fetch the address of RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \ + (void *)((uint32)(indices_array_base) + \ + BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx))) + +/** H2D DMA Indices array size: given max flow rings */ +#define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \ + ((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows)) + +/** D2H DMA Indices array size */ +#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \ + ((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS) + +/** + * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used + * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated + * both in host as well as device memory. + */ +typedef struct ring_mem { + uint16 idx; /* ring id */ + uint8 type; + uint8 rsvd; + uint16 max_item; /* Max number of items in flow ring */ + uint16 len_items; /* Items are fixed size. Length in bytes of one item */ + sh_addr_t base_addr; /* 64 bits address, either in host or device memory */ +} ring_mem_t; + + +/** + * Per flow ring, information is maintained in device memory, e.g. at what address the ringmem and + * ringstate are located. The flow ring itself can be instantiated in either host or device memory. + * + * Perhaps this type should be renamed to make clear that it resides in device memory only. + */ +typedef struct ring_info { + uint32 ringmem_ptr; /* ring mem location in dongle memory */ + + /* Following arrays are indexed using h2dring_idx and d2hring_idx, and not + * by a ringid. + */ + + /* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */ + uint32 h2d_w_idx_ptr; /* Array of all H2D ring's WR indices */ + uint32 h2d_r_idx_ptr; /* Array of all H2D ring's RD indices */ + uint32 d2h_w_idx_ptr; /* Array of all D2H ring's WR indices */ + uint32 d2h_r_idx_ptr; /* Array of all D2H ring's RD indices */ + + /* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host. + * Host may directly fetch WR and RD indices from these host-side arrays. + * + * 64bit ptr to arrays of WR or RD indices for all rings in host memory. + */ + sh_addr_t h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */ + sh_addr_t h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */ + sh_addr_t d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */ + sh_addr_t d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */ + + uint16 max_sub_queues; /* maximum number of H2D rings: common + flow */ + uint16 rsvd; +} ring_info_t; + +/** + * A structure located in TCM that is shared between host and device, primarily used during + * initialization. + */ +typedef struct { + /** shared area version captured at flags 7:0 */ + uint32 flags; + + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /**< Address of hnd_cons_t */ + + uint32 msgtrace_addr; + + uint32 fwid; + + /* Used for debug/flow control */ + uint16 total_lfrag_pkt_cnt; + uint16 max_host_rxbufs; /* rsvd in spec */ + + uint32 dma_rxoffset; /* rsvd in spec */ + + /** these will be used for sleep request/ack, d3 req/ack */ + uint32 h2d_mb_data_ptr; + uint32 d2h_mb_data_ptr; + + /* information pertinent to host IPC/msgbuf channels */ + /** location in the TCM memory which has the ring_info */ + uint32 rings_info_ptr; + + /** block of host memory for the scratch buffer */ + uint32 host_dma_scratch_buffer_len; + sh_addr_t host_dma_scratch_buffer; + + /** block of host memory for the dongle to push the status into */ + uint32 device_rings_stsblk_len; + sh_addr_t device_rings_stsblk; + + uint32 buzzz; /* BUZZZ state format strings and trace buffer */ + +} pciedev_shared_t; + +extern pciedev_shared_t pciedev_shared; + +/** + * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware + * support. + */ + +/* H2D mail box Data */ +#define H2D_HOST_D3_INFORM 0x00000001 +#define H2D_HOST_DS_ACK 0x00000002 +#define H2D_HOST_DS_NAK 0x00000004 +#define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */ +#define H2D_FW_TRAP 0x20000000 /**< dump HW reg info for Livelock issue */ +#define H2D_HOST_D0_INFORM_IN_USE 0x00000008 +#define H2D_HOST_D0_INFORM 0x00000010 + +/* D2H mail box Data */ +#define D2H_DEV_D3_ACK 0x00000001 +#define D2H_DEV_DS_ENTER_REQ 0x00000002 +#define D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define D2H_DEV_FWHALT 0x10000000 +#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \ + D2H_DEV_DS_EXIT_NOTE | D2H_DEV_FWHALT) +#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK)) + +/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */ +#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1)) +#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w))) +#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1) + +/* Function can be used to notify host of FW halt */ +#define READ_AVAIL_SPACE(w, r, d) \ + ((w >= r) ? (w - r) : (d - r)) + +#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w)) +#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1) +#define CHECK_WRITE_SPACE(r, w, d) \ + MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d)) + + +#define WRT_PEND(x) ((x)->wr_pending) +#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) +#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a)) + +#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) +#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a)) + +#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr) +#define RING_MAX_ITEM(x) ((x)->ringmem->max_item) +#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items) + +#endif /* _bcmpcie_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h new file mode 100644 index 000000000000..66c783c4aeff --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h @@ -0,0 +1,184 @@ +/* + * Broadcom PCI-SPI Host Controller Register Definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmpcispi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_PCI_SPI_H +#define _BCM_PCI_SPI_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + + +typedef volatile struct { + uint32 spih_ctrl; /* 0x00 SPI Control Register */ + uint32 spih_stat; /* 0x04 SPI Status Register */ + uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */ + uint32 spih_ext; /* 0x0C SPI Extension Register */ + uint32 PAD[4]; /* 0x10-0x1F PADDING */ + + uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */ + uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */ + uint32 PAD[6]; /* 0x28-0x3F PADDING */ + + uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */ + uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */ + /* 1=Active High) */ + uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */ + uint32 spih_int_status; /* 0x4C SPI Interrupt Status */ + uint32 PAD[4]; /* 0x50-0x5F PADDING */ + + uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */ + uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */ + uint32 PAD[1]; /* 0x68 PADDING */ + uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */ + uint32 PAD[4]; /* 0x70-0x7F PADDING */ + uint32 PAD[8]; /* 0x80-0x9F PADDING */ + uint32 PAD[8]; /* 0xA0-0xBF PADDING */ + uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */ + uint32 spih_pll_status; /* 0xC4 PLL Status Register */ + uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */ + uint32 spih_clk_count; /* 0xCC External Clock Count Register */ + +} spih_regs_t; + +typedef volatile struct { + uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */ + uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */ + + uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */ + uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */ + uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */ + uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */ + uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */ + uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */ + uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */ + uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */ + uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */ + uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */ + uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */ + uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */ + uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */ + uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */ + uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */ + uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */ + uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */ + uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */ + uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */ + uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */ + uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */ + uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */ + uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */ + uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */ + uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */ + uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */ + + uint32 PAD[5]; /* 0x16C-0x17F PADDING */ + + uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */ + uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */ + uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */ + uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */ + uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */ + uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */ + uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */ + uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */ + uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */ + uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */ + uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */ + uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */ + uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */ + uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */ + uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */ + uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */ + uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */ + uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */ + uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */ + uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */ + uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */ + uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */ + uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */ + uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */ + uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */ + uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */ + + uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */ + uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */ + uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */ +} spih_pciregs_t; + +/* + * PCI Core interrupt enable and status bit definitions. + */ + +/* PCI Core ICR Register bit definitions */ +#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */ +#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */ +#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */ +#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */ +#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */ +#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */ + + +/* PCI Core ISR Register bit definitions */ +#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */ +#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */ +#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */ +#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */ +#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */ + + +/* Registers on the Wishbone bus */ +#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */ +#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */ +#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */ + +/* GPIO Bit definitions */ +#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */ +#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */ +#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */ + +/* SPI Status Register Bit definitions */ +#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */ +#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */ +#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */ +#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */ +#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */ +#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */ + +#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */ + +#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */ +#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */ + +/* Spin bit loop bound check */ +#define SPI_SPIN_BOUND 0xf4240 /* 1 million */ + +#endif /* _BCM_PCI_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h new file mode 100644 index 000000000000..823c3b62f09a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h @@ -0,0 +1,39 @@ +/* + * Performance counters software interface. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmperf.h 514727 2014-11-12 03:02:48Z $ + */ +/* essai */ +#ifndef _BCMPERF_H_ +#define _BCMPERF_H_ +/* get cache hits and misses */ +#define BCMPERF_ENABLE_INSTRCOUNT() +#define BCMPERF_ENABLE_ICACHE_MISS() +#define BCMPERF_ENABLE_ICACHE_HIT() +#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) +#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) +#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) +#endif /* _BCMPERF_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h new file mode 100644 index 000000000000..ce75ffa367c1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h @@ -0,0 +1,146 @@ +/* + * Definitions for API from sdio common code (bcmsdh) to individual + * host controller drivers. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdbus.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _sdio_api_h_ +#define _sdio_api_h_ + + +#define SDIOH_API_RC_SUCCESS (0x00) +#define SDIOH_API_RC_FAIL (0x01) +#define SDIOH_API_SUCCESS(status) (status == 0) + +#define SDIOH_READ 0 /* Read request */ +#define SDIOH_WRITE 1 /* Write request */ + +#define SDIOH_DATA_FIX 0 /* Fixed addressing */ +#define SDIOH_DATA_INC 1 /* Incremental addressing */ + +#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */ +#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */ +#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */ + +#define SDIOH_DATA_PIO 0 /* PIO mode */ +#define SDIOH_DATA_DMA 1 /* DMA mode */ + +/* Max number of glommed pkts */ +#ifdef CUSTOM_MAX_TXGLOM_SIZE +#define SDPCM_MAXGLOM_SIZE CUSTOM_MAX_TXGLOM_SIZE +#else +#define SDPCM_MAXGLOM_SIZE 40 +#endif /* CUSTOM_MAX_TXGLOM_SIZE */ + +#define SDPCM_TXGLOM_CPY 0 /* SDIO 2.0 should use copy mode */ +#define SDPCM_TXGLOM_MDESC 1 /* SDIO 3.0 should use multi-desc mode */ + +#ifdef CUSTOM_DEF_TXGLOM_SIZE +#define SDPCM_DEFGLOM_SIZE CUSTOM_DEF_TXGLOM_SIZE +#else +#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE +#endif /* CUSTOM_DEF_TXGLOM_SIZE */ + +#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE +#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!" +#undef SDPCM_DEFGLOM_SIZE +#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE +#endif + +typedef int SDIOH_API_RC; + +/* SDio Host structure */ +typedef struct sdioh_info sdioh_info_t; + +/* callback function, taking one arg */ +typedef void (*sdioh_cb_fn_t)(void *); + +extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh); +extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si); + +/* query whether SD interrupt is enabled or not */ +extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff); + +/* enable or disable SD interrupt */ +extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable); + +#if defined(DHD_DEBUG) +extern bool sdioh_interrupt_pending(sdioh_info_t *si); +#endif + +/* read or write one byte using cmd52 */ +extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte); + +/* read or write 2/4 bytes using cmd53 */ +extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc, + uint addr, uint32 *word, uint nbyte); + +/* read or write any buffer using cmd53 */ +extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc, + uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer, + void *pkt); + +/* get cis data */ +extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length); + +extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); +extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); + +/* query number of io functions */ +extern uint sdioh_query_iofnum(sdioh_info_t *si); + +/* handle iovars */ +extern int sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Issue abort to the specified function and clear controller as needed */ +extern int sdioh_abort(sdioh_info_t *si, uint fnc); + +/* Start and Stop SDIO without re-enumerating the SD card. */ +extern int sdioh_start(sdioh_info_t *si, int stage); +extern int sdioh_stop(sdioh_info_t *si); + +/* Wait system lock free */ +extern int sdioh_waitlockfree(sdioh_info_t *si); + +/* Reset and re-initialize the device */ +extern int sdioh_sdio_reset(sdioh_info_t *si); + + + +#if defined(BCMSDIOH_STD) + #define SDIOH_SLEEP_ENABLED +#endif +extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab); + +/* GPIO support */ +extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd); +extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab); + +#endif /* _sdio_api_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h new file mode 100644 index 000000000000..3b3e6b6ab45d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h @@ -0,0 +1,255 @@ +/* + * SDIO host client driver interface of Broadcom HNBU + * export functions to client drivers + * abstract OS and BUS specific details of SDIO + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh.h 514727 2014-11-12 03:02:48Z $ + */ + +/** + * @file bcmsdh.h + */ + +#ifndef _bcmsdh_h_ +#define _bcmsdh_h_ + +#define BCMSDH_ERROR_VAL 0x0001 /* Error */ +#define BCMSDH_INFO_VAL 0x0002 /* Info */ +extern const uint bcmsdh_msglevel; + +#define BCMSDH_ERROR(x) +#define BCMSDH_INFO(x) + +#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || \ + defined(BCMSDIOH_SPI)) +#define BCMSDH_ADAPTER +#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */ + +/* forward declarations */ +typedef struct bcmsdh_info bcmsdh_info_t; +typedef void (*bcmsdh_cb_fn_t)(void *); + +extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva); +/** + * BCMSDH API context + */ +struct bcmsdh_info +{ + bool init_success; /* underlying driver successfully attached */ + void *sdioh; /* handler for sdioh */ + uint32 vendevid; /* Target Vendor and Device ID on SD bus */ + osl_t *osh; + bool regfail; /* Save status of last reg_read/reg_write call */ + uint32 sbwad; /* Save backplane window address */ + void *os_cxt; /* Pointer to per-OS private data */ +}; + +/* Detach - freeup resources allocated in attach */ +extern int bcmsdh_detach(osl_t *osh, void *sdh); + +/* Query if SD device interrupts are enabled */ +extern bool bcmsdh_intr_query(void *sdh); + +/* Enable/disable SD interrupt */ +extern int bcmsdh_intr_enable(void *sdh); +extern int bcmsdh_intr_disable(void *sdh); + +/* Register/deregister device interrupt handler. */ +extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); +extern int bcmsdh_intr_dereg(void *sdh); +/* Enable/disable SD card interrupt forward */ +extern void bcmsdh_intr_forward(void *sdh, bool pass); + +#if defined(DHD_DEBUG) +/* Query pending interrupt status from the host controller */ +extern bool bcmsdh_intr_pending(void *sdh); +#endif + +/* Register a callback to be called if and when bcmsdh detects + * device removal. No-op in the case of non-removable/hardwired devices. + */ +extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); + +/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface). + * fn: function number + * addr: unmodified SDIO-space address + * data: data byte to write + * err: pointer to error code (or NULL) + */ +extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err); +extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err); + +/* Read/Write 4bytes from/to cfg space */ +extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err); +extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err); + +/* Read CIS content for specified function. + * fn: function whose CIS is being requested (0 is common CIS) + * cis: pointer to memory location to place results + * length: number of bytes to read + * Internally, this routine uses the values from the cis base regs (0x9-0xB) + * to form an SDIO-space address to read the data from. + */ +extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length); + +/* Synchronous access to device (client) core registers via CMD53 to F1. + * addr: backplane address (i.e. >= regsva from attach) + * size: register width in bytes (2 or 4) + * data: data for register write + */ +extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size); +extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data); + +/* set sb address window */ +extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set); + +/* Indicate if last reg read/write failed */ +extern bool bcmsdh_regfail(void *sdh); + +/* Buffer transfer to/from device (client) core via cmd53. + * fn: function number + * addr: backplane address (i.e. >= regsva from attach) + * flags: backplane width, address increment, sync/async + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * pkt: pointer to packet associated with buf (if any) + * complete: callback function for command completion (async only) + * handle: handle for completion callback (first arg in callback) + * Returns 0 or error code. + * NOTE: Async operation is not currently supported. + */ +typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting); +extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle); +extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle); + +extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len); +extern void bcmsdh_glom_clear(void *sdh); +extern uint bcmsdh_set_mode(void *sdh, uint mode); +extern bool bcmsdh_glom_enabled(void); +/* Flags bits */ +#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */ +#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */ +#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */ +#define SDIO_BYTE_MODE 0x8 /* Byte mode request(non-block mode) */ + +/* Pending (non-error) return code */ +#define BCME_PENDING 1 + +/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only). + * rw: read or write (0/1) + * addr: direct SDIO address + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * Returns 0 or error code. + */ +extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes); + +/* Issue an abort to the specified function */ +extern int bcmsdh_abort(void *sdh, uint fn); + +/* Start SDIO Host Controller communication */ +extern int bcmsdh_start(void *sdh, int stage); + +/* Stop SDIO Host Controller communication */ +extern int bcmsdh_stop(void *sdh); + +/* Wait system lock free */ +extern int bcmsdh_waitlockfree(void *sdh); + +/* Returns the "Device ID" of target device on the SDIO bus. */ +extern int bcmsdh_query_device(void *sdh); + +/* Returns the number of IO functions reported by the device */ +extern uint bcmsdh_query_iofnum(void *sdh); + +/* Miscellaneous knob tweaker. */ +extern int bcmsdh_iovar_op(void *sdh, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Reset and reinitialize the device */ +extern int bcmsdh_reset(bcmsdh_info_t *sdh); + +/* helper functions */ + +/* callback functions */ +typedef struct { + /* probe the device */ + void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot, + uint16 func, uint bustype, void * regsva, osl_t * osh, + void * param); + /* remove the device */ + void (*remove)(void *context); + /* can we suspend now */ + int (*suspend)(void *context); + /* resume from suspend */ + int (*resume)(void *context); +} bcmsdh_driver_t; + +/* platform specific/high level functions */ +extern int bcmsdh_register(bcmsdh_driver_t *driver); +extern void bcmsdh_unregister(void); +extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device); +extern void bcmsdh_device_remove(void * sdh); + +extern int bcmsdh_reg_sdio_notify(void* semaphore); +extern void bcmsdh_unreg_sdio_notify(void); + +#if defined(OOB_INTR_ONLY) +extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler, + void* oob_irq_handler_context); +extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh); +extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable); +#endif +extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh); +extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh); +extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh); + +int bcmsdh_suspend(bcmsdh_info_t *bcmsdh); +int bcmsdh_resume(bcmsdh_info_t *bcmsdh); + +/* Function to pass device-status bits to DHD. */ +extern uint32 bcmsdh_get_dstatus(void *sdh); + +/* Function to return current window addr */ +extern uint32 bcmsdh_cur_sbwad(void *sdh); + +/* Function to pass chipid and rev to lower layers for controlling pr's */ +extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev); + + +extern int bcmsdh_sleep(void *sdh, bool enab); + +/* GPIO support */ +extern int bcmsdh_gpio_init(void *sd); +extern bool bcmsdh_gpioin(void *sd, uint32 gpio); +extern int bcmsdh_gpioouten(void *sd, uint32 gpio); +extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab); + +#endif /* _bcmsdh_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h new file mode 100644 index 000000000000..07903f99fce3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h @@ -0,0 +1,119 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc.h 591160 2015-10-07 06:01:58Z $ + */ + +#ifndef __BCMSDH_SDMMC_H__ +#define __BCMSDH_SDMMC_H__ + +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SD4 2 +#define CLIENT_INTR 0x100 /* Get rid of this! */ +#define SDIOH_SDMMC_MAX_SG_ENTRIES 32 + +struct sdioh_info { + osl_t *osh; /* osh handler */ + void *bcmsdh; /* upper layer handle */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + uint16 intmask; /* Current active interrupts */ + + int intrcount; /* Client interrupts */ + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + bool use_rxchain; + struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES]; + struct sdio_func fake_func0; + struct sdio_func *func[SDIOD_MAX_IOFUNCS]; + +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdh_sdmmc.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); + + +/************************************************************** + * Internal interfaces: bcmsdh_sdmmc.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size); +extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq); +extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd); + +extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); +#endif /* __BCMSDH_SDMMC_H__ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h new file mode 100644 index 000000000000..5c0adff8e8ad --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h @@ -0,0 +1,281 @@ +/* + * Broadcom SDIO/PCMCIA + * Software-specific definitions shared between device and host side + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdpcm.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _bcmsdpcm_h_ +#define _bcmsdpcm_h_ + +/* + * Software allocation of To SB Mailbox resources + */ + +/* intstatus bits */ +#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */ +#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */ +#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */ +#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */ + +#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT) + +/* tosbmailbox bits corresponding to intstatus bits */ +#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */ +#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */ +#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */ +#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */ +#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */ + +/* tosbmailboxdata */ +#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */ +#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */ + +/* + * Software allocation of To Host Mailbox resources + */ + +/* intstatus bits */ +#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */ +#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */ +#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */ +#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */ + +#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT) + +/* tohostmailbox bits corresponding to intstatus bits */ +#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */ +#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */ +#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */ +#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */ +#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */ + +/* tohostmailboxdata */ +#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */ +#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */ +#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */ +#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */ +#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */ + +#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */ +#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */ + +#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */ +#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */ + +/* + * Software-defined protocol header + */ + +/* Current protocol version */ +#define SDPCM_PROT_VERSION 4 + +/* SW frame header */ +#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */ +#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */ + +#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */ +#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */ +#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */ + +#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */ +#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */ +#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */ + +/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */ +#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */ +#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */ +#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */ +#define SDPCM_NEXTLEN_OFFSET 2 + +/* Data Offset from SOF (HW Tag, SW Tag, Pad) */ +#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */ +#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff) +#define SDPCM_DOFFSET_MASK 0xff000000 +#define SDPCM_DOFFSET_SHIFT 24 + +#define SDPCM_FCMASK_OFFSET 4 /* Flow control */ +#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff) +#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */ +#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff) +#define SDPCM_VERSION_OFFSET 6 /* Version # */ +#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff) +#define SDPCM_UNUSED_OFFSET 7 /* Spare */ +#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff) + +#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */ + +/* logical channel numbers */ +#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */ +#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */ +#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */ +#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */ +#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */ +#define SDPCM_MAX_CHANNEL 15 + +#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */ + +#define SDPCM_FLAG_RESVD0 0x01 +#define SDPCM_FLAG_RESVD1 0x02 +#define SDPCM_FLAG_GSPI_TXENAB 0x04 +#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */ + +/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */ +#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT) + +#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80) + +/* For TEST_CHANNEL packets, define another 4-byte header */ +#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2); + * Semantics of Ext byte depend on command. + * Len is current or requested frame length, not + * including test header; sent little-endian. + */ +#define SDPCM_TEST_PKT_CNT_FLD_LEN 4 /* Packet count filed legth */ +#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */ +#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */ +#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */ +#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count + * (Backward compatabilty) Set frame count in a + * 4 byte filed adjacent to the HDR + */ +#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off + * Set frame count in a 4 byte filed adjacent to + * the HDR + */ + +/* Handy macro for filling in datagen packets with a pattern */ +#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno)) + +/* + * Software counters (first part matches hardware counters) + */ + +typedef volatile struct { + uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */ + uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */ + uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */ + uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */ + uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */ + uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */ + uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */ + uint32 rxdescuflo; /* receive descriptor underflows */ + uint32 rxfifooflo; /* receive fifo overflows */ + uint32 txfifouflo; /* transmit fifo underflows */ + uint32 runt; /* runt (too short) frames recv'd from bus */ + uint32 badlen; /* frame's rxh len does not match its hw tag len */ + uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */ + uint32 seqbreak; /* break in sequence # space from one rx frame to the next */ + uint32 rxfcrc; /* frame rx header indicates crc error */ + uint32 rxfwoos; /* frame rx header indicates write out of sync */ + uint32 rxfwft; /* frame rx header indicates write frame termination */ + uint32 rxfabort; /* frame rx header indicates frame aborted */ + uint32 woosint; /* write out of sync interrupt */ + uint32 roosint; /* read out of sync interrupt */ + uint32 rftermint; /* read frame terminate interrupt */ + uint32 wftermint; /* write frame terminate interrupt */ +} sdpcmd_cnt_t; + +/* + * Register Access Macros + */ + +#define SDIODREV_IS(var, val) ((var) == (val)) +#define SDIODREV_GE(var, val) ((var) >= (val)) +#define SDIODREV_GT(var, val) ((var) > (val)) +#define SDIODREV_LT(var, val) ((var) < (val)) +#define SDIODREV_LE(var, val) ((var) <= (val)) + +#define SDIODDMAREG32(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv)) + +#define SDIODDMAREG64(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv)) + +#define SDIODDMAREG(h, dir, chnl) \ + (SDIODREV_LT((h)->corerev, 1) ? \ + SDIODDMAREG32((h), (dir), (chnl)) : \ + SDIODDMAREG64((h), (dir), (chnl))) + +#define PCMDDMAREG(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv)) + +#define SDPCMDMAREG(h, dir, chnl, coreid) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODDMAREG(h, dir, chnl) : \ + PCMDDMAREG(h, dir, chnl)) + +#define SDIODFIFOREG(h, corerev) \ + (SDIODREV_LT((corerev), 1) ? \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo))) + +#define PCMDFIFOREG(h) \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo)) + +#define SDPCMFIFOREG(h, coreid, corerev) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODFIFOREG(h, corerev) : \ + PCMDFIFOREG(h)) + +/* + * Shared structure between dongle and the host. + * The structure contains pointers to trap or assert information. + */ +#define SDPCM_SHARED_VERSION 0x0001 +#define SDPCM_SHARED_VERSION_MASK 0x00FF +#define SDPCM_SHARED_ASSERT_BUILT 0x0100 +#define SDPCM_SHARED_ASSERT 0x0200 +#define SDPCM_SHARED_TRAP 0x0400 +#define SDPCM_SHARED_IN_BRPT 0x0800 +#define SDPCM_SHARED_SET_BRPT 0x1000 +#define SDPCM_SHARED_PENDING_BRPT 0x2000 + +typedef struct { + uint32 flags; + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /* Address of hnd_cons_t */ + uint32 msgtrace_addr; + uint32 fwid; +} sdpcm_shared_t; + +extern sdpcm_shared_t sdpcm_shared; + +#endif /* _bcmsdpcm_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h new file mode 100644 index 000000000000..b1831db8b19b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h @@ -0,0 +1,138 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdspi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SD_SPI_H +#define _BCM_SD_SPI_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + + uint lockcount; /* nest count of sdspi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + bool got_hcint; /* Host Controller interrupt. */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current register transfer size */ + uint32 cmd53_wr_data; /* Used to pass CMD53 write data */ + uint32 card_response; /* Used to pass back response status byte */ + uint32 card_rsp_data; /* Used to pass back response data word */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdspi.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmsdspi.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size); +extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size); + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#endif /* _BCM_SD_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h new file mode 100644 index 000000000000..24df8de685d7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h @@ -0,0 +1,285 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdstd.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SD_STD_H +#define _BCM_SD_STD_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#define sd_dma(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); +/* Allocate/init/free per-OS private data */ +extern int sdstd_osinit(sdioh_info_t *sd); +extern void sdstd_osfree(sdioh_info_t *sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 +#define SDIOH_MODE_SD1 1 +#define SDIOH_MODE_SD4 2 + +#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */ +#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */ + +#define SDIOH_TYPE_ARASAN_HDK 1 +#define SDIOH_TYPE_BCM27XX 2 +#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */ +#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */ +#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */ + +/* For linux, allow yielding for dongle */ +#define BCMSDYIELD + +/* Expected card status value for CMD7 */ +#define SDIOH_CMD7_EXP_STATUS 0x00001E00 + +#define RETRIES_LARGE 100000 +#define sdstd_os_yield(sd) do {} while (0) +#define RETRIES_SMALL 100 + + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +#define USE_FIFO 0x8 /* Fifo vs non-fifo */ + +#define CLIENT_INTR 0x100 /* Get rid of this! */ + +#define HC_INTR_RETUNING 0x1000 + + +#ifdef BCMSDIOH_TXGLOM +/* Total glom pkt can not exceed 64K + * need one more slot for glom padding packet + */ +#define SDIOH_MAXGLOM_SIZE (40+1) + +typedef struct glom_buf { + uint32 count; /* Total number of pkts queued */ + void *dma_buf_arr[SDIOH_MAXGLOM_SIZE]; /* Frame address */ + ulong dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */ + uint16 nbytes[SDIOH_MAXGLOM_SIZE]; /* Size of each frame */ +} glom_buf_t; +#endif + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint32 curr_caps; /* max current capabilities reg */ + + osl_t *osh; /* osh handler */ + volatile char *mem_space; /* pci device memory va */ + uint lockcount; /* nest count of sdstd_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint target_dev; /* Target device ID */ + uint16 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + void *bcmsdh; /* handler to upper layer stack (bcmsdh) */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + int intrcount; /* Client interrupts */ + int local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; /* DMA Buffer virtual address */ + ulong dma_phys; /* DMA Buffer physical address */ + void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */ + ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */ + + /* adjustments needed to make the dma align properly */ + void *dma_start_buf; + ulong dma_start_phys; + uint alloced_dma_size; + void *adma2_dscr_start_buf; + ulong adma2_dscr_start_phys; + uint alloced_adma2_dscr_size; + + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + bool got_hcint; /* local interrupt flag */ + uint16 last_intrstatus; /* to cache intrstatus */ + int host_UHSISupported; /* whether UHSI is supported for HC. */ + int card_UHSI_voltage_Supported; /* whether UHSI is supported for + * Card in terms of Voltage [1.8 or 3.3]. + */ + int global_UHSI_Supp; /* type of UHSI support in both host and card. + * HOST_SDR_UNSUPP: capabilities not supported/matched + * HOST_SDR_12_25: SDR12 and SDR25 supported + * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd + */ + volatile int sd3_dat_state; /* data transfer state used for retuning check */ + volatile int sd3_tun_state; /* tuning state used for retuning check */ + bool sd3_tuning_reqd; /* tuning requirement parameter */ + uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */ +#ifdef BCMSDIOH_TXGLOM + glom_buf_t glom_info; /* pkt information used for glomming */ + uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */ +#endif +}; + +#define DMA_MODE_NONE 0 +#define DMA_MODE_SDMA 1 +#define DMA_MODE_ADMA1 2 +#define DMA_MODE_ADMA2 3 +#define DMA_MODE_ADMA2_64 4 +#define DMA_MODE_AUTO -1 + +#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE)) + +/* States for Tuning and corr data */ +#define TUNING_IDLE 0 +#define TUNING_START 1 +#define TUNING_START_AFTER_DAT 2 +#define TUNING_ONGOING 3 + +#define DATA_TRANSFER_IDLE 0 +#define DATA_TRANSFER_ONGOING 1 + +#define CHECK_TUNING_PRE_DATA 1 +#define CHECK_TUNING_POST_DATA 2 + + +#ifdef DHD_DEBUG +#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01 +#define SD_DHD_ENABLE_PERIODIC_TUNING 0x00 +#endif + + +/************************************************************ + * Internal interfaces: per-port references into bcmsdstd.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdstd_devintr_on(sdioh_info_t *sd); +extern void sdstd_devintr_off(sdioh_info_t *sd); + +/* Enable/disable interrupts for local controller events */ +extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err); +extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err); + +/* Wait for specified interrupt and error bits to be set */ +extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err); + + +/************************************************************** + * Internal interfaces: bcmsdstd.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdstd_reg_map(osl_t *osh, ulong addr, int size); +extern void sdstd_reg_unmap(osl_t *osh, ulong addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdstd_register_irq(sdioh_info_t *sd, uint irq); +extern void sdstd_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void sdstd_lock(sdioh_info_t *sd); +extern void sdstd_unlock(sdioh_info_t *sd); +extern void sdstd_waitlockfree(sdioh_info_t *sd); + +/* OS-specific wrappers for safe concurrent register access */ +extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags); +extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags); + +/* OS-specific wait-for-interrupt-or-status */ +extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits); + +/* used by bcmsdstd_linux [implemented in sdstd] */ +extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd); +extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd); +extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd); +extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param); +extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd); +extern int sdstd_3_get_tune_state(sdioh_info_t *sd); +extern int sdstd_3_get_data_state(sdioh_info_t *sd); +extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state); +extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state); +extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd); +extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd); +extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode); + +/* used by sdstd [implemented in bcmsdstd_linux/ndis] */ +extern void sdstd_3_start_tuning(sdioh_info_t *sd); +extern void sdstd_3_osinit_tuning(sdioh_info_t *sd); +extern void sdstd_3_osclean_tuning(sdioh_info_t *sd); + +extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val); + +extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); +#endif /* _BCM_SD_STD_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h new file mode 100644 index 000000000000..e9a906e79734 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h @@ -0,0 +1,43 @@ +/* + * Broadcom SPI Low-Level Hardware Driver API + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SPI_H +#define _BCM_SPI_H + +extern void spi_devintr_off(sdioh_info_t *sd); +extern void spi_devintr_on(sdioh_info_t *sd); +extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor); +extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode); +extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr); +extern bool spi_hw_attach(sdioh_info_t *sd); +extern bool spi_hw_detach(sdioh_info_t *sd); +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +extern void spi_spinbits(sdioh_info_t *sd); +extern void spi_waitbits(sdioh_info_t *sd, bool yield); + +#endif /* _BCM_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h new file mode 100644 index 000000000000..7c2bfc4653c1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h @@ -0,0 +1,165 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspibrcm.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SPI_BRCM_H +#define _BCM_SPI_BRCM_H + +#ifndef SPI_MAX_IOFUNCS +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 +#endif +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#if defined(DHD_DEBUG) +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#else +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#endif + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_F1 64 +#define BLOCK_SIZE_F2 2048 +#define BLOCK_SIZE_F3 2048 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 +#define ERROR_UF 2 +#define ERROR_OF 3 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + void *bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + uint lockcount; /* nest count of spi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 card_dstatus; /* 32bit device status */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SPI_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + uint32 wordlen; /* host processor 16/32bits */ + uint32 prev_fun; + uint32 chip; + uint32 chiprev; + bool resp_delay_all; + bool dwordmode; + bool resp_delay_new; + + struct spierrstats_t spierrstats; +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmspibrcm.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmspibrcm.c references to per-port code + */ + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */ +#define SPI_RW_FLAG_S 31 +#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */ +#define SPI_ACCESS_S 30 +#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */ +#define SPI_FUNCTION_S 28 +#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */ +#define SPI_REG_ADDR_S 11 +#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */ +#define SPI_LEN_S 0 + +#endif /* _BCM_SPI_BRCM_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h new file mode 100644 index 000000000000..a40bd569da34 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h @@ -0,0 +1,965 @@ +/* + * SROM format definition. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsrom_fmt.h 553280 2015-04-29 07:55:29Z $ + */ + +#ifndef _bcmsrom_fmt_h_ +#define _bcmsrom_fmt_h_ + +#define SROM_MAXREV 13 /* max revision supported by driver */ + +/* Maximum srom: 12 Kilobits == 1536 bytes */ + +#define SROM_MAX 1536 +#define SROM_MAXW 594 + +#ifdef LARGE_NVRAM_MAXSZ +#define VARS_MAX LARGE_NVRAM_MAXSZ +#else +#define VARS_MAX 4096 +#endif /* LARGE_NVRAM_MAXSZ */ + +/* PCI fields */ +#define PCI_F0DEVID 48 + + +#define SROM_WORDS 64 + +#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */ + +#define SROM_SSID 2 +#define SROM_SVID 3 + +#define SROM_WL1LHMAXP 29 + +#define SROM_WL1LPAB0 30 +#define SROM_WL1LPAB1 31 +#define SROM_WL1LPAB2 32 + +#define SROM_WL1HPAB0 33 +#define SROM_WL1HPAB1 34 +#define SROM_WL1HPAB2 35 + +#define SROM_MACHI_IL0 36 +#define SROM_MACMID_IL0 37 +#define SROM_MACLO_IL0 38 +#define SROM_MACHI_ET0 39 +#define SROM_MACMID_ET0 40 +#define SROM_MACLO_ET0 41 +#define SROM_MACHI_ET1 42 +#define SROM_MACMID_ET1 43 +#define SROM_MACLO_ET1 44 +#define SROM3_MACHI 37 +#define SROM3_MACMID 38 +#define SROM3_MACLO 39 + +#define SROM_BXARSSI2G 40 +#define SROM_BXARSSI5G 41 + +#define SROM_TRI52G 42 +#define SROM_TRI5GHL 43 + +#define SROM_RXPO52G 45 + +#define SROM2_ENETPHY 45 + +#define SROM_AABREV 46 +/* Fields in AABREV */ +#define SROM_BR_MASK 0x00ff +#define SROM_CC_MASK 0x0f00 +#define SROM_CC_SHIFT 8 +#define SROM_AA0_MASK 0x3000 +#define SROM_AA0_SHIFT 12 +#define SROM_AA1_MASK 0xc000 +#define SROM_AA1_SHIFT 14 + +#define SROM_WL0PAB0 47 +#define SROM_WL0PAB1 48 +#define SROM_WL0PAB2 49 + +#define SROM_LEDBH10 50 +#define SROM_LEDBH32 51 + +#define SROM_WL10MAXP 52 + +#define SROM_WL1PAB0 53 +#define SROM_WL1PAB1 54 +#define SROM_WL1PAB2 55 + +#define SROM_ITT 56 + +#define SROM_BFL 57 +#define SROM_BFL2 28 +#define SROM3_BFL2 61 + +#define SROM_AG10 58 + +#define SROM_CCODE 59 + +#define SROM_OPO 60 + +#define SROM3_LEDDC 62 + +#define SROM_CRCREV 63 + +/* SROM Rev 4: Reallocate the software part of the srom to accomodate + * MIMO features. It assumes up to two PCIE functions and 440 bytes + * of useable srom i.e. the useable storage in chips with OTP that + * implements hardware redundancy. + */ + +#define SROM4_WORDS 220 + +#define SROM4_SIGN 32 +#define SROM4_SIGNATURE 0x5372 + +#define SROM4_BREV 33 + +#define SROM4_BFL0 34 +#define SROM4_BFL1 35 +#define SROM4_BFL2 36 +#define SROM4_BFL3 37 +#define SROM5_BFL0 37 +#define SROM5_BFL1 38 +#define SROM5_BFL2 39 +#define SROM5_BFL3 40 + +#define SROM4_MACHI 38 +#define SROM4_MACMID 39 +#define SROM4_MACLO 40 +#define SROM5_MACHI 41 +#define SROM5_MACMID 42 +#define SROM5_MACLO 43 + +#define SROM4_CCODE 41 +#define SROM4_REGREV 42 +#define SROM5_CCODE 34 +#define SROM5_REGREV 35 + +#define SROM4_LEDBH10 43 +#define SROM4_LEDBH32 44 +#define SROM5_LEDBH10 59 +#define SROM5_LEDBH32 60 + +#define SROM4_LEDDC 45 +#define SROM5_LEDDC 45 + +#define SROM4_AA 46 +#define SROM4_AA2G_MASK 0x00ff +#define SROM4_AA2G_SHIFT 0 +#define SROM4_AA5G_MASK 0xff00 +#define SROM4_AA5G_SHIFT 8 + +#define SROM4_AG10 47 +#define SROM4_AG32 48 + +#define SROM4_TXPID2G 49 +#define SROM4_TXPID5G 51 +#define SROM4_TXPID5GL 53 +#define SROM4_TXPID5GH 55 + +#define SROM4_TXRXC 61 +#define SROM4_TXCHAIN_MASK 0x000f +#define SROM4_TXCHAIN_SHIFT 0 +#define SROM4_RXCHAIN_MASK 0x00f0 +#define SROM4_RXCHAIN_SHIFT 4 +#define SROM4_SWITCH_MASK 0xff00 +#define SROM4_SWITCH_SHIFT 8 + + +/* Per-path fields */ +#define MAX_PATH_SROM 4 +#define SROM4_PATH0 64 +#define SROM4_PATH1 87 +#define SROM4_PATH2 110 +#define SROM4_PATH3 133 + +#define SROM4_2G_ITT_MAXP 0 +#define SROM4_2G_PA 1 +#define SROM4_5G_ITT_MAXP 5 +#define SROM4_5GLH_MAXP 6 +#define SROM4_5G_PA 7 +#define SROM4_5GL_PA 11 +#define SROM4_5GH_PA 15 + +/* Fields in the ITT_MAXP and 5GLH_MAXP words */ +#define B2G_MAXP_MASK 0xff +#define B2G_ITT_SHIFT 8 +#define B5G_MAXP_MASK 0xff +#define B5G_ITT_SHIFT 8 +#define B5GH_MAXP_MASK 0xff +#define B5GL_MAXP_SHIFT 8 + +/* All the miriad power offsets */ +#define SROM4_2G_CCKPO 156 +#define SROM4_2G_OFDMPO 157 +#define SROM4_5G_OFDMPO 159 +#define SROM4_5GL_OFDMPO 161 +#define SROM4_5GH_OFDMPO 163 +#define SROM4_2G_MCSPO 165 +#define SROM4_5G_MCSPO 173 +#define SROM4_5GL_MCSPO 181 +#define SROM4_5GH_MCSPO 189 +#define SROM4_CDDPO 197 +#define SROM4_STBCPO 198 +#define SROM4_BW40PO 199 +#define SROM4_BWDUPPO 200 + +#define SROM4_CRCREV 219 + + +/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6. + * This is acombined srom for both MIMO and SISO boards, usable in + * the .130 4Kilobit OTP with hardware redundancy. + */ + +#define SROM8_SIGN 64 + +#define SROM8_BREV 65 + +#define SROM8_BFL0 66 +#define SROM8_BFL1 67 +#define SROM8_BFL2 68 +#define SROM8_BFL3 69 + +#define SROM8_MACHI 70 +#define SROM8_MACMID 71 +#define SROM8_MACLO 72 + +#define SROM8_CCODE 73 +#define SROM8_REGREV 74 + +#define SROM8_LEDBH10 75 +#define SROM8_LEDBH32 76 + +#define SROM8_LEDDC 77 + +#define SROM8_AA 78 + +#define SROM8_AG10 79 +#define SROM8_AG32 80 + +#define SROM8_TXRXC 81 + +#define SROM8_BXARSSI2G 82 +#define SROM8_BXARSSI5G 83 +#define SROM8_TRI52G 84 +#define SROM8_TRI5GHL 85 +#define SROM8_RXPO52G 86 + +#define SROM8_FEM2G 87 +#define SROM8_FEM5G 88 +#define SROM8_FEM_ANTSWLUT_MASK 0xf800 +#define SROM8_FEM_ANTSWLUT_SHIFT 11 +#define SROM8_FEM_TR_ISO_MASK 0x0700 +#define SROM8_FEM_TR_ISO_SHIFT 8 +#define SROM8_FEM_PDET_RANGE_MASK 0x00f8 +#define SROM8_FEM_PDET_RANGE_SHIFT 3 +#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006 +#define SROM8_FEM_EXTPA_GAIN_SHIFT 1 +#define SROM8_FEM_TSSIPOS_MASK 0x0001 +#define SROM8_FEM_TSSIPOS_SHIFT 0 + +#define SROM8_THERMAL 89 + +/* Temp sense related entries */ +#define SROM8_MPWR_RAWTS 90 +#define SROM8_TS_SLP_OPT_CORRX 91 +/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */ +#define SROM8_FOC_HWIQ_IQSWP 92 + +#define SROM8_EXTLNAGAIN 93 + +/* Temperature delta for PHY calibration */ +#define SROM8_PHYCAL_TEMPDELTA 94 + +/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */ +#define SROM8_MPWR_1_AND_2 95 + + +/* Per-path offsets & fields */ +#define SROM8_PATH0 96 +#define SROM8_PATH1 112 +#define SROM8_PATH2 128 +#define SROM8_PATH3 144 + +#define SROM8_2G_ITT_MAXP 0 +#define SROM8_2G_PA 1 +#define SROM8_5G_ITT_MAXP 4 +#define SROM8_5GLH_MAXP 5 +#define SROM8_5G_PA 6 +#define SROM8_5GL_PA 9 +#define SROM8_5GH_PA 12 + +/* All the miriad power offsets */ +#define SROM8_2G_CCKPO 160 + +#define SROM8_2G_OFDMPO 161 +#define SROM8_5G_OFDMPO 163 +#define SROM8_5GL_OFDMPO 165 +#define SROM8_5GH_OFDMPO 167 + +#define SROM8_2G_MCSPO 169 +#define SROM8_5G_MCSPO 177 +#define SROM8_5GL_MCSPO 185 +#define SROM8_5GH_MCSPO 193 + +#define SROM8_CDDPO 201 +#define SROM8_STBCPO 202 +#define SROM8_BW40PO 203 +#define SROM8_BWDUPPO 204 + +/* SISO PA parameters are in the path0 spaces */ +#define SROM8_SISO 96 + +/* Legacy names for SISO PA paramters */ +#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP) +#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA) +#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1) +#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2) +#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP) +#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP) +#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA) +#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1) +#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2) +#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA) +#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1) +#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2) +#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA) +#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1) +#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2) + +#define SROM8_CRCREV 219 + +/* SROM REV 9 */ +#define SROM9_2GPO_CCKBW20 160 +#define SROM9_2GPO_CCKBW20UL 161 +#define SROM9_2GPO_LOFDMBW20 162 +#define SROM9_2GPO_LOFDMBW20UL 164 + +#define SROM9_5GLPO_LOFDMBW20 166 +#define SROM9_5GLPO_LOFDMBW20UL 168 +#define SROM9_5GMPO_LOFDMBW20 170 +#define SROM9_5GMPO_LOFDMBW20UL 172 +#define SROM9_5GHPO_LOFDMBW20 174 +#define SROM9_5GHPO_LOFDMBW20UL 176 + +#define SROM9_2GPO_MCSBW20 178 +#define SROM9_2GPO_MCSBW20UL 180 +#define SROM9_2GPO_MCSBW40 182 + +#define SROM9_5GLPO_MCSBW20 184 +#define SROM9_5GLPO_MCSBW20UL 186 +#define SROM9_5GLPO_MCSBW40 188 +#define SROM9_5GMPO_MCSBW20 190 +#define SROM9_5GMPO_MCSBW20UL 192 +#define SROM9_5GMPO_MCSBW40 194 +#define SROM9_5GHPO_MCSBW20 196 +#define SROM9_5GHPO_MCSBW20UL 198 +#define SROM9_5GHPO_MCSBW40 200 + +#define SROM9_PO_MCS32 202 +#define SROM9_PO_LOFDM40DUP 203 +#define SROM9_EU_EDCRSTH 204 +#define SROM10_EU_EDCRSTH 204 +#define SROM8_RXGAINERR_2G 205 +#define SROM8_RXGAINERR_5GL 206 +#define SROM8_RXGAINERR_5GM 207 +#define SROM8_RXGAINERR_5GH 208 +#define SROM8_RXGAINERR_5GU 209 +#define SROM8_SUBBAND_PPR 210 +#define SROM8_PCIEINGRESS_WAR 211 +#define SROM8_EU_EDCRSTH 212 +#define SROM9_SAR 212 + +#define SROM8_NOISELVL_2G 213 +#define SROM8_NOISELVL_5GL 214 +#define SROM8_NOISELVL_5GM 215 +#define SROM8_NOISELVL_5GH 216 +#define SROM8_NOISELVL_5GU 217 +#define SROM8_NOISECALOFFSET 218 + +#define SROM9_REV_CRC 219 + +#define SROM10_CCKPWROFFSET 218 +#define SROM10_SIGN 219 +#define SROM10_SWCTRLMAP_2G 220 +#define SROM10_CRCREV 229 + +#define SROM10_WORDS 230 +#define SROM10_SIGNATURE SROM4_SIGNATURE + + +/* SROM REV 11 */ +#define SROM11_BREV 65 + +#define SROM11_BFL0 66 +#define SROM11_BFL1 67 +#define SROM11_BFL2 68 +#define SROM11_BFL3 69 +#define SROM11_BFL4 70 +#define SROM11_BFL5 71 + +#define SROM11_MACHI 72 +#define SROM11_MACMID 73 +#define SROM11_MACLO 74 + +#define SROM11_CCODE 75 +#define SROM11_REGREV 76 + +#define SROM11_LEDBH10 77 +#define SROM11_LEDBH32 78 + +#define SROM11_LEDDC 79 + +#define SROM11_AA 80 + +#define SROM11_AGBG10 81 +#define SROM11_AGBG2A0 82 +#define SROM11_AGA21 83 + +#define SROM11_TXRXC 84 + +#define SROM11_FEM_CFG1 85 +#define SROM11_FEM_CFG2 86 + +/* Masks and offsets for FEM_CFG */ +#define SROM11_FEMCTRL_MASK 0xf800 +#define SROM11_FEMCTRL_SHIFT 11 +#define SROM11_PAPDCAP_MASK 0x0400 +#define SROM11_PAPDCAP_SHIFT 10 +#define SROM11_TWORANGETSSI_MASK 0x0200 +#define SROM11_TWORANGETSSI_SHIFT 9 +#define SROM11_PDGAIN_MASK 0x01f0 +#define SROM11_PDGAIN_SHIFT 4 +#define SROM11_EPAGAIN_MASK 0x000e +#define SROM11_EPAGAIN_SHIFT 1 +#define SROM11_TSSIPOSSLOPE_MASK 0x0001 +#define SROM11_TSSIPOSSLOPE_SHIFT 0 +#define SROM11_GAINCTRLSPH_MASK 0xf800 +#define SROM11_GAINCTRLSPH_SHIFT 11 + +#define SROM11_THERMAL 87 +#define SROM11_MPWR_RAWTS 88 +#define SROM11_TS_SLP_OPT_CORRX 89 +#define SROM11_XTAL_FREQ 90 +#define SROM11_5GB0_4080_W0_A1 91 +#define SROM11_PHYCAL_TEMPDELTA 92 +#define SROM11_MPWR_1_AND_2 93 +#define SROM11_5GB0_4080_W1_A1 94 +#define SROM11_TSSIFLOOR_2G 95 +#define SROM11_TSSIFLOOR_5GL 96 +#define SROM11_TSSIFLOOR_5GM 97 +#define SROM11_TSSIFLOOR_5GH 98 +#define SROM11_TSSIFLOOR_5GU 99 + +/* Masks and offsets for Thermal parameters */ +#define SROM11_TEMPS_PERIOD_MASK 0xf0 +#define SROM11_TEMPS_PERIOD_SHIFT 4 +#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f +#define SROM11_TEMPS_HYSTERESIS_SHIFT 0 +#define SROM11_TEMPCORRX_MASK 0xfc +#define SROM11_TEMPCORRX_SHIFT 2 +#define SROM11_TEMPSENSE_OPTION_MASK 0x3 +#define SROM11_TEMPSENSE_OPTION_SHIFT 0 + +#define SROM11_PDOFF_2G_40M_A0_MASK 0x000f +#define SROM11_PDOFF_2G_40M_A0_SHIFT 0 +#define SROM11_PDOFF_2G_40M_A1_MASK 0x00f0 +#define SROM11_PDOFF_2G_40M_A1_SHIFT 4 +#define SROM11_PDOFF_2G_40M_A2_MASK 0x0f00 +#define SROM11_PDOFF_2G_40M_A2_SHIFT 8 +#define SROM11_PDOFF_2G_40M_VALID_MASK 0x8000 +#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15 + +#define SROM11_PDOFF_2G_40M 100 +#define SROM11_PDOFF_40M_A0 101 +#define SROM11_PDOFF_40M_A1 102 +#define SROM11_PDOFF_40M_A2 103 +#define SROM11_5GB0_4080_W2_A1 103 +#define SROM11_PDOFF_80M_A0 104 +#define SROM11_PDOFF_80M_A1 105 +#define SROM11_PDOFF_80M_A2 106 +#define SROM11_5GB1_4080_W0_A1 106 + +#define SROM11_SUBBAND5GVER 107 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_11 3 +#define SROM11_PATH0 108 +#define SROM11_PATH1 128 +#define SROM11_PATH2 148 + +#define SROM11_2G_MAXP 0 +#define SROM11_5GB1_4080_PA 0 +#define SROM11_2G_PA 1 +#define SROM11_5GB2_4080_PA 2 +#define SROM11_RXGAINS1 4 +#define SROM11_RXGAINS 5 +#define SROM11_5GB3_4080_PA 5 +#define SROM11_5GB1B0_MAXP 6 +#define SROM11_5GB3B2_MAXP 7 +#define SROM11_5GB0_PA 8 +#define SROM11_5GB1_PA 11 +#define SROM11_5GB2_PA 14 +#define SROM11_5GB3_PA 17 + +/* Masks and offsets for rxgains */ +#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS2GTRISOA_MASK 0x0078 +#define SROM11_RXGAINS2GTRISOA_SHIFT 3 +#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0 +#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GHTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078 +#define SROM11_RXGAINS5GMTRISOA_SHIFT 3 +#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0 + +/* Power per rate */ +#define SROM11_CCKBW202GPO 168 +#define SROM11_CCKBW20UL2GPO 169 +#define SROM11_MCSBW202GPO 170 +#define SROM11_MCSBW202GPO_1 171 +#define SROM11_MCSBW402GPO 172 +#define SROM11_MCSBW402GPO_1 173 +#define SROM11_DOT11AGOFDMHRBW202GPO 174 +#define SROM11_OFDMLRBW202GPO 175 + +#define SROM11_MCSBW205GLPO 176 +#define SROM11_MCSBW205GLPO_1 177 +#define SROM11_MCSBW405GLPO 178 +#define SROM11_MCSBW405GLPO_1 179 +#define SROM11_MCSBW805GLPO 180 +#define SROM11_MCSBW805GLPO_1 181 +#define SROM11_RPCAL_2G 182 +#define SROM11_RPCAL_5GL 183 +#define SROM11_MCSBW205GMPO 184 +#define SROM11_MCSBW205GMPO_1 185 +#define SROM11_MCSBW405GMPO 186 +#define SROM11_MCSBW405GMPO_1 187 +#define SROM11_MCSBW805GMPO 188 +#define SROM11_MCSBW805GMPO_1 189 +#define SROM11_RPCAL_5GM 190 +#define SROM11_RPCAL_5GH 191 +#define SROM11_MCSBW205GHPO 192 +#define SROM11_MCSBW205GHPO_1 193 +#define SROM11_MCSBW405GHPO 194 +#define SROM11_MCSBW405GHPO_1 195 +#define SROM11_MCSBW805GHPO 196 +#define SROM11_MCSBW805GHPO_1 197 +#define SROM11_RPCAL_5GU 198 +#define SROM11_PDOFF_2G_CCK 199 +#define SROM11_MCSLR5GLPO 200 +#define SROM11_MCSLR5GMPO 201 +#define SROM11_MCSLR5GHPO 202 + +#define SROM11_SB20IN40HRPO 203 +#define SROM11_SB20IN80AND160HR5GLPO 204 +#define SROM11_SB40AND80HR5GLPO 205 +#define SROM11_SB20IN80AND160HR5GMPO 206 +#define SROM11_SB40AND80HR5GMPO 207 +#define SROM11_SB20IN80AND160HR5GHPO 208 +#define SROM11_SB40AND80HR5GHPO 209 +#define SROM11_SB20IN40LRPO 210 +#define SROM11_SB20IN80AND160LR5GLPO 211 +#define SROM11_SB40AND80LR5GLPO 212 +#define SROM11_TXIDXCAP2G 212 +#define SROM11_SB20IN80AND160LR5GMPO 213 +#define SROM11_SB40AND80LR5GMPO 214 +#define SROM11_TXIDXCAP5G 214 +#define SROM11_SB20IN80AND160LR5GHPO 215 +#define SROM11_SB40AND80LR5GHPO 216 + +#define SROM11_DOT11AGDUPHRPO 217 +#define SROM11_DOT11AGDUPLRPO 218 + +/* MISC */ +#define SROM11_PCIEINGRESS_WAR 220 +#define SROM11_SAR 221 + +#define SROM11_NOISELVL_2G 222 +#define SROM11_NOISELVL_5GL 223 +#define SROM11_NOISELVL_5GM 224 +#define SROM11_NOISELVL_5GH 225 +#define SROM11_NOISELVL_5GU 226 + +#define SROM11_RXGAINERR_2G 227 +#define SROM11_RXGAINERR_5GL 228 +#define SROM11_RXGAINERR_5GM 229 +#define SROM11_RXGAINERR_5GH 230 +#define SROM11_RXGAINERR_5GU 231 + +#define SROM11_EU_EDCRSTH 232 +#define SROM12_EU_EDCRSTH 232 + +#define SROM11_SIGN 64 +#define SROM11_CRCREV 233 + +#define SROM11_WORDS 234 +#define SROM11_SIGNATURE 0x0634 + + +/* SROM REV 12 */ +#define SROM12_SIGN 64 +#define SROM12_WORDS 512 +#define SROM12_SIGNATURE 0x8888 +#define SROM12_CRCREV 511 + +#define SROM12_BFL6 486 +#define SROM12_BFL7 487 + +#define SROM12_MCSBW205GX1PO 234 +#define SROM12_MCSBW205GX1PO_1 235 +#define SROM12_MCSBW405GX1PO 236 +#define SROM12_MCSBW405GX1PO_1 237 +#define SROM12_MCSBW805GX1PO 238 +#define SROM12_MCSBW805GX1PO_1 239 +#define SROM12_MCSLR5GX1PO 240 +#define SROM12_SB40AND80LR5GX1PO 241 +#define SROM12_SB20IN80AND160LR5GX1PO 242 +#define SROM12_SB20IN80AND160HR5GX1PO 243 +#define SROM12_SB40AND80HR5GX1PO 244 + +#define SROM12_MCSBW205GX2PO 245 +#define SROM12_MCSBW205GX2PO_1 246 +#define SROM12_MCSBW405GX2PO 247 +#define SROM12_MCSBW405GX2PO_1 248 +#define SROM12_MCSBW805GX2PO 249 +#define SROM12_MCSBW805GX2PO_1 250 +#define SROM12_MCSLR5GX2PO 251 +#define SROM12_SB40AND80LR5GX2PO 252 +#define SROM12_SB20IN80AND160LR5GX2PO 253 +#define SROM12_SB20IN80AND160HR5GX2PO 254 +#define SROM12_SB40AND80HR5GX2PO 255 + +/* MISC */ +#define SROM12_RXGAINS10 483 +#define SROM12_RXGAINS11 484 +#define SROM12_RXGAINS12 485 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_12 3 +#define SROM12_PATH0 256 +#define SROM12_PATH1 328 +#define SROM12_PATH2 400 + +#define SROM12_5GB42G_MAXP 0 +#define SROM12_2GB0_PA 1 +#define SROM12_2GB0_PA_W0 1 +#define SROM12_2GB0_PA_W1 2 +#define SROM12_2GB0_PA_W2 3 +#define SROM12_2GB0_PA_W3 4 + +#define SROM12_RXGAINS 5 +#define SROM12_5GB1B0_MAXP 6 +#define SROM12_5GB3B2_MAXP 7 + +#define SROM12_5GB0_PA 8 +#define SROM12_5GB0_PA_W0 8 +#define SROM12_5GB0_PA_W1 9 +#define SROM12_5GB0_PA_W2 10 +#define SROM12_5GB0_PA_W3 11 + +#define SROM12_5GB1_PA 12 +#define SROM12_5GB1_PA_W0 12 +#define SROM12_5GB1_PA_W1 13 +#define SROM12_5GB1_PA_W2 14 +#define SROM12_5GB1_PA_W3 15 + +#define SROM12_5GB2_PA 16 +#define SROM12_5GB2_PA_W0 16 +#define SROM12_5GB2_PA_W1 17 +#define SROM12_5GB2_PA_W2 18 +#define SROM12_5GB2_PA_W3 19 + +#define SROM12_5GB3_PA 20 +#define SROM12_5GB3_PA_W0 20 +#define SROM12_5GB3_PA_W1 21 +#define SROM12_5GB3_PA_W2 22 +#define SROM12_5GB3_PA_W3 23 + +#define SROM12_5GB4_PA 24 +#define SROM12_5GB4_PA_W0 24 +#define SROM12_5GB4_PA_W1 25 +#define SROM12_5GB4_PA_W2 26 +#define SROM12_5GB4_PA_W3 27 + +#define SROM12_2G40B0_PA 28 +#define SROM12_2G40B0_PA_W0 28 +#define SROM12_2G40B0_PA_W1 29 +#define SROM12_2G40B0_PA_W2 30 +#define SROM12_2G40B0_PA_W3 31 + +#define SROM12_5G40B0_PA 32 +#define SROM12_5G40B0_PA_W0 32 +#define SROM12_5G40B0_PA_W1 33 +#define SROM12_5G40B0_PA_W2 34 +#define SROM12_5G40B0_PA_W3 35 + +#define SROM12_5G40B1_PA 36 +#define SROM12_5G40B1_PA_W0 36 +#define SROM12_5G40B1_PA_W1 37 +#define SROM12_5G40B1_PA_W2 38 +#define SROM12_5G40B1_PA_W3 39 + +#define SROM12_5G40B2_PA 40 +#define SROM12_5G40B2_PA_W0 40 +#define SROM12_5G40B2_PA_W1 41 +#define SROM12_5G40B2_PA_W2 42 +#define SROM12_5G40B2_PA_W3 43 + +#define SROM12_5G40B3_PA 44 +#define SROM12_5G40B3_PA_W0 44 +#define SROM12_5G40B3_PA_W1 45 +#define SROM12_5G40B3_PA_W2 46 +#define SROM12_5G40B3_PA_W3 47 + +#define SROM12_5G40B4_PA 48 +#define SROM12_5G40B4_PA_W0 48 +#define SROM12_5G40B4_PA_W1 49 +#define SROM12_5G40B4_PA_W2 50 +#define SROM12_5G40B4_PA_W3 51 + +#define SROM12_5G80B0_PA 52 +#define SROM12_5G80B0_PA_W0 52 +#define SROM12_5G80B0_PA_W1 53 +#define SROM12_5G80B0_PA_W2 54 +#define SROM12_5G80B0_PA_W3 55 + +#define SROM12_5G80B1_PA 56 +#define SROM12_5G80B1_PA_W0 56 +#define SROM12_5G80B1_PA_W1 57 +#define SROM12_5G80B1_PA_W2 58 +#define SROM12_5G80B1_PA_W3 59 + +#define SROM12_5G80B2_PA 60 +#define SROM12_5G80B2_PA_W0 60 +#define SROM12_5G80B2_PA_W1 61 +#define SROM12_5G80B2_PA_W2 62 +#define SROM12_5G80B2_PA_W3 63 + +#define SROM12_5G80B3_PA 64 +#define SROM12_5G80B3_PA_W0 64 +#define SROM12_5G80B3_PA_W1 65 +#define SROM12_5G80B3_PA_W2 66 +#define SROM12_5G80B3_PA_W3 67 + +#define SROM12_5G80B4_PA 68 +#define SROM12_5G80B4_PA_W0 68 +#define SROM12_5G80B4_PA_W1 69 +#define SROM12_5G80B4_PA_W2 70 +#define SROM12_5G80B4_PA_W3 71 + +/* PD offset */ +#define SROM12_PDOFF_2G_CCK 472 + +#define SROM12_PDOFF_20in40M_5G_B0 473 +#define SROM12_PDOFF_20in40M_5G_B1 474 +#define SROM12_PDOFF_20in40M_5G_B2 475 +#define SROM12_PDOFF_20in40M_5G_B3 476 +#define SROM12_PDOFF_20in40M_5G_B4 477 + +#define SROM12_PDOFF_40in80M_5G_B0 478 +#define SROM12_PDOFF_40in80M_5G_B1 479 +#define SROM12_PDOFF_40in80M_5G_B2 480 +#define SROM12_PDOFF_40in80M_5G_B3 481 +#define SROM12_PDOFF_40in80M_5G_B4 482 + +#define SROM12_PDOFF_20in80M_5G_B0 488 +#define SROM12_PDOFF_20in80M_5G_B1 489 +#define SROM12_PDOFF_20in80M_5G_B2 490 +#define SROM12_PDOFF_20in80M_5G_B3 491 +#define SROM12_PDOFF_20in80M_5G_B4 492 + +#define SROM13_PDOFFSET20IN40M5GCORE3 98 +#define SROM13_PDOFFSET20IN40M5GCORE3_1 99 +#define SROM13_PDOFFSET20IN80M5GCORE3 510 +#define SROM13_PDOFFSET20IN80M5GCORE3_1 511 +#define SROM13_PDOFFSET40IN80M5GCORE3 105 +#define SROM13_PDOFFSET40IN80M5GCORE3_1 106 + +#define SROM13_PDOFFSET20IN40M2G 94 +#define SROM13_PDOFFSET20IN40M2GCORE3 95 + +#define SROM12_GPDN_L 91 /* GPIO pull down bits [15:0] */ +#define SROM12_GPDN_H 233 /* GPIO pull down bits [31:16] */ + +#define SROM13_SIGN 64 +#define SROM13_WORDS 590 +#define SROM13_SIGNATURE 0x4d55 +#define SROM13_CRCREV 589 + + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_13 4 +#define SROM13_PATH0 256 +#define SROM13_PATH1 328 +#define SROM13_PATH2 400 +#define SROM13_PATH3 512 +#define SROM13_RXGAINS 5 + +#define SROM13_XTALFREQ 90 + +#define SROM13_PDOFFSET20IN40M2G 94 +#define SROM13_PDOFFSET20IN40M2GCORE3 95 +#define SROM13_SB20IN40HRLRPOX 96 + +#define SROM13_RXGAINS1CORE3 97 + +#define SROM13_PDOFFSET20IN40M5GCORE3 98 +#define SROM13_PDOFFSET20IN40M5GCORE3_1 99 + +#define SROM13_ANTGAIN_BANDBGA 100 + +#define SROM13_RXGAINS2CORE0 101 +#define SROM13_RXGAINS2CORE1 102 +#define SROM13_RXGAINS2CORE2 103 +#define SROM13_RXGAINS2CORE3 104 + +#define SROM13_PDOFFSET40IN80M5GCORE3 105 +#define SROM13_PDOFFSET40IN80M5GCORE3_1 106 + +/* power per rate */ +#define SROM13_MCS1024QAM2GPO 108 +#define SROM13_MCS1024QAM5GLPO 109 +#define SROM13_MCS1024QAM5GLPO_1 110 +#define SROM13_MCS1024QAM5GMPO 111 +#define SROM13_MCS1024QAM5GMPO_1 112 +#define SROM13_MCS1024QAM5GHPO 113 +#define SROM13_MCS1024QAM5GHPO_1 114 +#define SROM13_MCS1024QAM5GX1PO 115 +#define SROM13_MCS1024QAM5GX1PO_1 116 +#define SROM13_MCS1024QAM5GX2PO 117 +#define SROM13_MCS1024QAM5GX2PO_1 118 + +#define SROM13_MCSBW1605GLPO 119 +#define SROM13_MCSBW1605GLPO_1 120 +#define SROM13_MCSBW1605GMPO 121 +#define SROM13_MCSBW1605GMPO_1 122 +#define SROM13_MCSBW1605GHPO 123 +#define SROM13_MCSBW1605GHPO_1 124 + +#define SROM13_MCSBW1605GX1PO 125 +#define SROM13_MCSBW1605GX1PO_1 126 +#define SROM13_MCSBW1605GX2PO 127 +#define SROM13_MCSBW1605GX2PO_1 128 + +#define SROM13_ULBPPROFFS5GB0 129 +#define SROM13_ULBPPROFFS5GB1 130 +#define SROM13_ULBPPROFFS5GB2 131 +#define SROM13_ULBPPROFFS5GB3 132 +#define SROM13_ULBPPROFFS5GB4 133 +#define SROM13_ULBPPROFFS2G 134 + +#define SROM13_MCS8POEXP 135 +#define SROM13_MCS8POEXP_1 136 +#define SROM13_MCS9POEXP 137 +#define SROM13_MCS9POEXP_1 138 +#define SROM13_MCS10POEXP 139 +#define SROM13_MCS10POEXP_1 140 +#define SROM13_MCS11POEXP 141 +#define SROM13_MCS11POEXP_1 142 +#define SROM13_ULBPDOFFS5GB0A0 143 +#define SROM13_ULBPDOFFS5GB0A1 144 +#define SROM13_ULBPDOFFS5GB0A2 145 +#define SROM13_ULBPDOFFS5GB0A3 146 +#define SROM13_ULBPDOFFS5GB1A0 147 +#define SROM13_ULBPDOFFS5GB1A1 148 +#define SROM13_ULBPDOFFS5GB1A2 149 +#define SROM13_ULBPDOFFS5GB1A3 150 +#define SROM13_ULBPDOFFS5GB2A0 151 +#define SROM13_ULBPDOFFS5GB2A1 152 +#define SROM13_ULBPDOFFS5GB2A2 153 +#define SROM13_ULBPDOFFS5GB2A3 154 +#define SROM13_ULBPDOFFS5GB3A0 155 +#define SROM13_ULBPDOFFS5GB3A1 156 +#define SROM13_ULBPDOFFS5GB3A2 157 +#define SROM13_ULBPDOFFS5GB3A3 158 +#define SROM13_ULBPDOFFS5GB4A0 159 +#define SROM13_ULBPDOFFS5GB4A1 160 +#define SROM13_ULBPDOFFS5GB4A2 161 +#define SROM13_ULBPDOFFS5GB4A3 162 +#define SROM13_ULBPDOFFS2GA0 163 +#define SROM13_ULBPDOFFS2GA1 164 +#define SROM13_ULBPDOFFS2GA2 165 +#define SROM13_ULBPDOFFS2GA3 166 + +#define SROM13_RPCAL5GB4 199 + +#define SROM13_EU_EDCRSTH 232 + +#define SROM13_SWCTRLMAP4_CFG 493 +#define SROM13_SWCTRLMAP4_TX2G_FEM3TO0 494 +#define SROM13_SWCTRLMAP4_RX2G_FEM3TO0 495 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0 496 +#define SROM13_SWCTRLMAP4_MISC2G_FEM3TO0 497 +#define SROM13_SWCTRLMAP4_TX5G_FEM3TO0 498 +#define SROM13_SWCTRLMAP4_RX5G_FEM3TO0 499 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0 500 +#define SROM13_SWCTRLMAP4_MISC5G_FEM3TO0 501 +#define SROM13_SWCTRLMAP4_TX2G_FEM7TO4 502 +#define SROM13_SWCTRLMAP4_RX2G_FEM7TO4 503 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4 504 +#define SROM13_SWCTRLMAP4_MISC2G_FEM7TO4 505 +#define SROM13_SWCTRLMAP4_TX5G_FEM7TO4 506 +#define SROM13_SWCTRLMAP4_RX5G_FEM7TO4 507 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4 508 +#define SROM13_SWCTRLMAP4_MISC5G_FEM7TO4 509 + +#define SROM13_PDOFFSET20IN80M5GCORE3 510 +#define SROM13_PDOFFSET20IN80M5GCORE3_1 511 + +#define SROM13_NOISELVLCORE3 584 +#define SROM13_NOISELVLCORE3_1 585 +#define SROM13_RXGAINERRCORE3 586 +#define SROM13_RXGAINERRCORE3_1 587 + + +typedef struct { + uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */ + uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */ + uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */ + uint8 triso; /* TR switch isolation */ + uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */ +} srom_fem_t; + +#endif /* _bcmsrom_fmt_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h new file mode 100644 index 000000000000..f2775fbba1c5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h @@ -0,0 +1,1400 @@ +/* + * Table that encodes the srom formats for PCI/PCIe NICs. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsrom_tbl.h 553564 2015-04-30 06:19:30Z $ + */ + +#ifndef _bcmsrom_tbl_h_ +#define _bcmsrom_tbl_h_ + +#include "sbpcmcia.h" +#include "wlioctl.h" +#include + +typedef struct { + const char *name; + uint32 revmask; + uint32 flags; + uint16 off; + uint16 mask; +} sromvar_t; + +#define SRFL_MORE 1 /* value continues as described by the next entry */ +#define SRFL_NOFFS 2 /* value bits can't be all one's */ +#define SRFL_PRHEX 4 /* value is in hexdecimal format */ +#define SRFL_PRSIGN 8 /* value is in signed decimal format */ +#define SRFL_CCODE 0x10 /* value is in country code format */ +#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */ +#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */ +#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */ +#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST + * ONE in the array should have this flag set. + */ + + +#define SROM_DEVID_PCIE 48 + +/** + * Assumptions: + * - Ethernet address spans across 3 consecutive words + * + * Table rules: + * - Add multiple entries next to each other if a value spans across multiple words + * (even multiple fields in the same word) with each entry except the last having + * it's SRFL_MORE bit set. + * - Ethernet address entry does not follow above rule and must not have SRFL_MORE + * bit set. Its SRFL_ETHADDR bit implies it takes multiple words. + * - The last entry's name field must be NULL to indicate the end of the table. Other + * entries must have non-NULL name. + */ +static const sromvar_t pci_sromvars[] = { +/* name revmask flags off mask */ +#if defined(CABLECPE) + {"devid", 0xffffff00, SRFL_PRHEX, PCI_F0DEVID, 0xffff}, +#elif defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED) + {"devid", 0xffffff00, SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff}, +#else + {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff}, +#endif + {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK}, + {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff}, + {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff}, + {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff}, + {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM_BFL2, 0xffff}, + {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM3_BFL2, 0xffff}, + {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff}, + {"", 0, 0, SROM4_BFL1, 0xffff}, + {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff}, + {"", 0, 0, SROM5_BFL1, 0xffff}, + {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff}, + {"", 0, 0, SROM8_BFL1, 0xffff}, + {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff}, + {"", 0, 0, SROM4_BFL3, 0xffff}, + {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff}, + {"", 0, 0, SROM5_BFL3, 0xffff}, + {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff}, + {"", 0, 0, SROM8_BFL3, 0xffff}, + {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff}, + {"subvid", 0xfffffffc, SRFL_PRHEX, SROM_SVID, 0xffff}, + {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff}, + {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff}, + {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff}, + {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff}, + {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff}, + {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK}, + {"regrev", 0x00000008, 0, SROM_OPO, 0xff00}, + {"regrev", 0x00000010, 0, SROM4_REGREV, 0x00ff}, + {"regrev", 0x000000e0, 0, SROM5_REGREV, 0x00ff}, + {"regrev", 0x00000700, 0, SROM8_REGREV, 0x00ff}, + {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff}, + {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00}, + {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff}, + {"ledbh3", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00}, + {"ledbh0", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff}, + {"ledbh1", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00}, + {"ledbh2", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff}, + {"ledbh3", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00}, + {"ledbh0", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff}, + {"ledbh1", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00}, + {"ledbh2", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff}, + {"ledbh3", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00}, + {"ledbh0", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff}, + {"ledbh1", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0xff00}, + {"ledbh2", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff}, + {"ledbh3", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0xff00}, + {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff}, + {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff}, + {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff}, + {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff}, + {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff}, + {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff}, + {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff}, + {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff}, + {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00}, + {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff}, + {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff}, + {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff}, + {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK}, + {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff}, + {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff}, + {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK}, + {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00}, + {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00}, + {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff}, + {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00}, + {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff}, + {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00}, + {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff}, + {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00}, + {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff}, + {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00}, + {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff}, + {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00}, + {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff}, + {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff}, + {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff}, + {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff}, + {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff}, + {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff}, + {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff}, + {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff}, + {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff}, + {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00}, + {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00}, + {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00}, + {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff}, + {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff}, + {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff}, + {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff}, + {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff}, + {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff}, + {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff}, + {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff}, + {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff}, + {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff}, + {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00}, + {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff}, + {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00}, + {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff}, + {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f}, + {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f}, + {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f}, + {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f}, + {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff}, + {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00}, + {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00}, + {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff}, + {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00}, + {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00}, + {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00}, + {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00}, + {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK}, + {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK}, + {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK}, + {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK}, + {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff}, + {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00}, + {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff}, + {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00}, + {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff}, + {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00}, + {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff}, + {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00}, + {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff}, + {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00}, + {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff}, + {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00}, + {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff}, + {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00}, + {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff}, + {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00}, + + {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff}, + {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff}, + {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff}, + {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff}, + {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff}, + {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff}, + {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff}, + {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff}, + {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff}, + {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff}, + {"leddc", 0x00000700, SRFL_NOFFS|SRFL_LEDDC, SROM8_LEDDC, 0xffff}, + {"leddc", 0x000000e0, SRFL_NOFFS|SRFL_LEDDC, SROM5_LEDDC, 0xffff}, + {"leddc", 0x00000010, SRFL_NOFFS|SRFL_LEDDC, SROM4_LEDDC, 0xffff}, + {"leddc", 0x00000008, SRFL_NOFFS|SRFL_LEDDC, SROM3_LEDDC, 0xffff}, + + {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00}, + {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff}, + {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff}, + {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300}, + {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f}, + {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010}, + {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020}, + {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff}, + {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00}, + {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80}, + + {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff}, + {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff}, + {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff}, + {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff}, + {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff}, + {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff}, + {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff}, + {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff}, + {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff}, + {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff}, + {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff}, + {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff}, + {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff}, + {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff}, + + /* power per rate from sromrev 9 */ + {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff}, + {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff}, + {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff}, + {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff}, + {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff}, + {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff}, + {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf}, + {"eu_edthresh2g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0xff00}, + {"eu_edthresh2g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0xff00}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800}, + {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f}, + {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0}, + {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800}, + {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f}, + {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0}, + {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800}, + {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f}, + {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0}, + {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800}, + {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800}, + {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff}, + {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00}, + {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00}, + {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f}, + {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0}, + {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00}, + {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f}, + {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0}, + {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00}, + {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f}, + {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0}, + {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00}, + {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f}, + {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0}, + {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00}, + {"noisecaloffset", 0x00000300, 0, SROM8_NOISECALOFFSET, 0x00ff}, + {"noisecaloffset5g", 0x00000300, 0, SROM8_NOISECALOFFSET, 0xff00}, + {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7}, + + {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff}, + {"eu_edthresh2g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0xff00}, + /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */ + {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff}, + {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff}, + + /* sromrev 11 */ + {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL4, 0xffff}, + {"", 0, 0, SROM11_BFL5, 0xffff}, + {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff}, + {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff}, + {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff}, + {"regrev", 0xfffff800, 0, SROM11_REGREV, 0x00ff}, + {"ledbh0", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0x00ff}, + {"ledbh1", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0xff00}, + {"ledbh2", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0x00ff}, + {"ledbh3", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0xff00}, + {"leddc", 0xfffff800, SRFL_NOFFS|SRFL_LEDDC, SROM11_LEDDC, 0xffff}, + {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff}, + {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00}, + {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0xff00}, + {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0x00ff}, + {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00}, + {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff}, + {"aga1", 0xfffff800, 0, SROM11_AGA21, 0xff00}, + {"aga2", 0xfffff800, 0, SROM11_AGA21, 0x00ff}, + {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK}, + + {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001}, + {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e}, + {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0}, + {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200}, + {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400}, + {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800}, + + {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001}, + {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e}, + {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0}, + {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200}, + {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400}, + {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800}, + + {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00}, + {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff}, + {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff}, + {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300}, + {"xtalfreq", 0xfffff800, 0, SROM11_XTAL_FREQ, 0xffff}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */ + {"pa5gbw4080a1", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff}, + {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80}, + {"tssifloor2g", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_2G, 0x03ff}, + {"tssifloor5g", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_5GU, 0x03ff}, + {"pdoffset2g40ma0", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x000f}, + {"pdoffset2g40ma1", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x00f0}, + {"pdoffset2g40ma2", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x0f00}, + {"pdoffset2g40mvalid", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x8000}, + {"pdoffset40ma0", 0xfffff800, 0, SROM11_PDOFF_40M_A0, 0xffff}, + {"pdoffset40ma1", 0xfffff800, 0, SROM11_PDOFF_40M_A1, 0xffff}, + {"pdoffset40ma2", 0xfffff800, 0, SROM11_PDOFF_40M_A2, 0xffff}, + {"pdoffset80ma0", 0xfffff800, 0, SROM11_PDOFF_80M_A0, 0xffff}, + {"pdoffset80ma1", 0xfffff800, 0, SROM11_PDOFF_80M_A1, 0xffff}, + {"pdoffset80ma2", 0xfffff800, 0, SROM11_PDOFF_80M_A2, 0xffff}, + + {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff}, + {"paparambwver", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xf000}, + {"rx5ggainwar", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0x2000}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */ + {"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 40 MHz BW */ + {"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 80 MHz BW */ + {"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 2G Band, CCK */ + {"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff}, + + /* power per rate */ + {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff}, + {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff}, + {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff}, + {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff}, + {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff}, + {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff}, + {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff}, + {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff}, + {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff}, + {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff}, + {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff}, + {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff}, + {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff}, + {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff}, + {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff}, + {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0x0fff}, + {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff}, + {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff}, + {"sb20in40hrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff}, + {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff}, + {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff}, + {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff}, + {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff}, + {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff}, + {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff}, + {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff}, + {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff}, + {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff}, + {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff}, + {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff}, + {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff}, + {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff}, + {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff}, + {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff}, + + /* Misc */ + {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff}, + {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00}, + + {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00}, + {"noiselvl5ga0", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x001f}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f}, + {"noiselvl5ga1", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x03e0}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0}, + {"noiselvl5ga2", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x7c00}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00}, + {"eu_edthresh2g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0xff00}, + + {"rxgainerr2ga0", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0xf800}, + {"rxgainerr5ga0", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x003f}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5ga1", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x07c0}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5ga2", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xf800}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0xf800}, + {"rpcal2g", 0xfffff800, 0, SROM11_RPCAL_2G, 0xffff}, + {"rpcal5gb0", 0xfffff800, 0, SROM11_RPCAL_5GL, 0xffff}, + {"rpcal5gb1", 0xfffff800, 0, SROM11_RPCAL_5GM, 0xffff}, + {"rpcal5gb2", 0xfffff800, 0, SROM11_RPCAL_5GH, 0xffff}, + {"rpcal5gb3", 0xfffff800, 0, SROM11_RPCAL_5GU, 0xffff}, + {"txidxcap2g", 0xfffff800, 0, SROM11_TXIDXCAP2G, 0x0ff0}, + {"txidxcap5g", 0xfffff800, 0, SROM11_TXIDXCAP5G, 0x0ff0}, + {"pdoffsetcckma0", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x000f}, + {"pdoffsetcckma1", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x00f0}, + {"pdoffsetcckma2", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x0f00}, + + /* sromrev 12 */ + {"boardflags4", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_BFL6, 0xffff}, + {"", 0, 0, SROM12_BFL7, 0xffff}, + {"pdoffsetcck", 0xfffff000, 0, SROM12_PDOFF_2G_CCK, 0xffff}, + {"pdoffset20in40m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B0, 0xffff}, + {"pdoffset20in40m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B1, 0xffff}, + {"pdoffset20in40m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B2, 0xffff}, + {"pdoffset20in40m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B3, 0xffff}, + {"pdoffset20in40m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B4, 0xffff}, + {"pdoffset40in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B0, 0xffff}, + {"pdoffset40in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B1, 0xffff}, + {"pdoffset40in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B2, 0xffff}, + {"pdoffset40in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B3, 0xffff}, + {"pdoffset40in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B4, 0xffff}, + {"pdoffset20in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B0, 0xffff}, + {"pdoffset20in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B1, 0xffff}, + {"pdoffset20in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B2, 0xffff}, + {"pdoffset20in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B3, 0xffff}, + {"pdoffset20in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B4, 0xffff}, + + {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff}, + {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff}, + {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff}, + {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff}, + {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff}, + {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff}, + + {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff}, + {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff}, + + /* power per rate */ + {"mcsbw205gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX1PO_1, 0xffff}, + {"mcsbw405gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX1PO_1, 0xffff}, + {"mcsbw805gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX1PO_1, 0xffff}, + {"mcsbw205gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX2PO_1, 0xffff}, + {"mcsbw405gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX2PO_1, 0xffff}, + {"mcsbw805gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX2PO_1, 0xffff}, + + {"sb20in80and160hr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160lr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160hr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + {"sb20in80and160lr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + + {"rxgains5gmelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0007}, + {"rxgains5gmelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0007}, + {"rxgains5gmelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0007}, + {"rxgains5gmtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0078}, + {"rxgains5gmtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0078}, + {"rxgains5gmtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0078}, + {"rxgains5gmtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0080}, + {"rxgains5gmtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0080}, + {"rxgains5gmtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0080}, + {"rxgains5ghelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0700}, + {"rxgains5ghelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0700}, + {"rxgains5ghelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0700}, + {"rxgains5ghtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x7800}, + {"rxgains5ghtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x7800}, + {"rxgains5ghtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x7800}, + {"rxgains5ghtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x8000}, + {"rxgains5ghtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x8000}, + {"rxgains5ghtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x8000}, + {"eu_edthresh2g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0xff00}, + + {"gpdn", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_GPDN_L, 0xffff}, + {"", 0, 0, SROM12_GPDN_H, 0xffff}, + + {"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00}, + + {"agbg3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0xff00}, + {"aga3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0x00ff}, + {"noiselvl2ga3", 0xffffe000, 0, SROM13_NOISELVLCORE3, 0x001f}, + {"noiselvl5ga3", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_NOISELVLCORE3_1, 0x03e0}, + {"rxgainerr2ga3", 0xffffe000, 0, SROM13_RXGAINERRCORE3, 0x001f}, + {"rxgainerr5ga3", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_RXGAINERRCORE3_1, 0x03e0}, + {"rxgains5gmelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0007}, + {"rxgains5gmtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0078}, + {"rxgains5gmtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0080}, + {"rxgains5ghelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0700}, + {"rxgains5ghtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x7800}, + {"rxgains5ghtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x8000}, + + /* power per rate */ + {"mcs1024qam2gpo", 0xffffe000, 0, SROM13_MCS1024QAM2GPO, 0xffff}, + {"mcs1024qam5glpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GLPO_1, 0xffff}, + {"mcs1024qam5gmpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GMPO_1, 0xffff}, + {"mcs1024qam5ghpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GHPO_1, 0xffff}, + {"mcs1024qam5gx1po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX1PO_1, 0xffff}, + {"mcs1024qam5gx2po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX2PO_1, 0xffff}, + + {"mcsbw1605glpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GLPO_1, 0xffff}, + {"mcsbw1605gmpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GMPO_1, 0xffff}, + {"mcsbw1605ghpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GHPO_1, 0xffff}, + {"mcsbw1605gx1po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX1PO_1, 0xffff}, + {"mcsbw1605gx2po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX2PO_1, 0xffff}, + + {"ulbpproffs2g", 0xffffe000, 0, SROM13_ULBPPROFFS2G, 0xffff}, + + {"mcs8poexp", 0xffffe000, SRFL_MORE, SROM13_MCS8POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS8POEXP_1, 0xffff}, + {"mcs9poexp", 0xffffe000, SRFL_MORE, SROM13_MCS9POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS9POEXP_1, 0xffff}, + {"mcs10poexp", 0xffffe000, SRFL_MORE, SROM13_MCS10POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS10POEXP_1, 0xffff}, + {"mcs11poexp", 0xffffe000, SRFL_MORE, SROM13_MCS11POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS11POEXP_1, 0xffff}, + + {"ulbpdoffs5gb0a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A0, 0xffff}, + {"ulbpdoffs5gb0a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A1, 0xffff}, + {"ulbpdoffs5gb0a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A2, 0xffff}, + {"ulbpdoffs5gb0a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A3, 0xffff}, + {"ulbpdoffs5gb1a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A0, 0xffff}, + {"ulbpdoffs5gb1a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A1, 0xffff}, + {"ulbpdoffs5gb1a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A2, 0xffff}, + {"ulbpdoffs5gb1a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A3, 0xffff}, + {"ulbpdoffs5gb2a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A0, 0xffff}, + {"ulbpdoffs5gb2a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A1, 0xffff}, + {"ulbpdoffs5gb2a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A2, 0xffff}, + {"ulbpdoffs5gb2a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A3, 0xffff}, + {"ulbpdoffs5gb3a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A0, 0xffff}, + {"ulbpdoffs5gb3a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A1, 0xffff}, + {"ulbpdoffs5gb3a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A2, 0xffff}, + {"ulbpdoffs5gb3a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A3, 0xffff}, + {"ulbpdoffs5gb4a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A0, 0xffff}, + {"ulbpdoffs5gb4a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A1, 0xffff}, + {"ulbpdoffs5gb4a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A2, 0xffff}, + {"ulbpdoffs5gb4a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A3, 0xffff}, + {"ulbpdoffs2ga0", 0xffffe000, 0, SROM13_ULBPDOFFS2GA0, 0xffff}, + {"ulbpdoffs2ga1", 0xffffe000, 0, SROM13_ULBPDOFFS2GA1, 0xffff}, + {"ulbpdoffs2ga2", 0xffffe000, 0, SROM13_ULBPDOFFS2GA2, 0xffff}, + {"ulbpdoffs2ga3", 0xffffe000, 0, SROM13_ULBPDOFFS2GA3, 0xffff}, + + {"rpcal5gb4", 0xffffe000, 0, SROM13_RPCAL5GB4, 0xffff}, + + {"sb20in40hrlrpox", 0xffffe000, 0, SROM13_SB20IN40HRLRPOX, 0xffff}, + + {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff}, + {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff}, + + {"pdoffset20in40m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff}, + {"pdoffset40in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff}, + {"pdoffset20in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff}, + + {"swctrlmap4_cfg", 0xffffe000, 0, SROM13_SWCTRLMAP4_CFG, 0xffff}, + {"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM7TO4, 0xffff}, + {"swctrlmap4_TX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff}, + {NULL, 0, 0, 0, 0} +}; + +static const sromvar_t perpath_pci_sromvars[] = { + {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff}, + {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff}, + {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff}, + {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff}, + {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff}, + {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff}, + {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff}, + {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff}, + {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff}, + {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff}, + {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff}, + {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff}, + {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff}, + {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff}, + {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff}, + {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff}, + {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff}, + {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff}, + + /* sromrev 11 */ + {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff}, + {"pa2ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff}, + {"rxgains5gmelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0007}, + {"rxgains5gmtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x0078}, + {"rxgains5gmtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x0080}, + {"rxgains5ghelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0700}, + {"rxgains5ghtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x7800}, + {"rxgains5ghtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x8000}, + {"rxgains2gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x8000}, + {"maxp5ga", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff}, + {"", 0x00000800, 0, SROM11_5GB3B2_MAXP, 0xff00}, + {"pa5ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff}, + + /* sromrev 12 */ + {"maxp5gb4a", 0xfffff000, 0, SROM12_5GB42G_MAXP, 0x00ff00}, + {"pa2ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2GB0_PA_W3, 0x00ffff}, + + {"pa2g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2G40B0_PA_W3, 0x00ffff}, + {"maxp5gb0a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff}, + {"maxp5gb1a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff00}, + {"maxp5gb2a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff}, + {"maxp5gb3a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff00}, + + {"pa5ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5GB4_PA_W3, 0x00ffff}, + + {"pa5g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G40B4_PA_W3, 0x00ffff}, + + {"pa5g80a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G80B4_PA_W3, 0x00ffff}, + /* sromrev 13 */ + {"rxgains2gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x8000}, + {NULL, 0, 0, 0, 0} +}; + +#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) +#define PHY_TYPE_HT 7 /* HT-Phy value */ +#define PHY_TYPE_N 4 /* N-Phy value */ +#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) */ +#if !defined(PHY_TYPE_AC) +#define PHY_TYPE_AC 11 /* AC-Phy value */ +#endif /* !defined(PHY_TYPE_AC) */ +#if !defined(PHY_TYPE_LCN20) +#define PHY_TYPE_LCN20 12 /* LCN20-Phy value */ +#endif /* !defined(PHY_TYPE_LCN20) */ +#if !defined(PHY_TYPE_NULL) +#define PHY_TYPE_NULL 0xf /* Invalid Phy value */ +#endif /* !defined(PHY_TYPE_NULL) */ + +typedef struct { + uint16 phy_type; + uint16 bandrange; + uint16 chain; + const char *vars; +} pavars_t; + +static const pavars_t pavars[] = { + /* HTPHY */ + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1, "pa5glw0a3 pa5glw1a3 pa5glw2a3"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"}, + /* NPHY */ + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"}, + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5ga2"}, + /* LCN20PHY */ + {PHY_TYPE_LCN20, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + + +static const pavars_t pavars_SROM12[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +static const pavars_t pavars_SROM13[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2ga3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 3, "pa2g40a3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 3, "pa5ga3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 3, "pa5g40a3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 3, "pa5g80a3"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 1 */ +static const pavars_t pavars_bwver_1[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gccka0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5gbw40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw80a0"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 2 */ +static const pavars_t pavars_bwver_2[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 3 */ +static const pavars_t pavars_bwver_3[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gccka0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2gccka1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +typedef struct { + uint16 phy_type; + uint16 bandrange; + const char *vars; +} povars_t; + +static const povars_t povars[] = { + /* NPHY */ + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 " + "mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 " + "mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 " + "mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 " + "mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"}, + {PHY_TYPE_NULL, 0, ""} +}; + +typedef struct { + uint8 tag; /* Broadcom subtag name */ + uint32 revmask; /* Supported cis_sromrev bitmask. Some of the parameters in + * different tuples have the same name. Therefore, the MFGc tool + * needs to know which tuple to generate when seeing these + * parameters (given that we know sromrev from user input, like the + * nvram file). + */ + uint8 len; /* Length field of the tuple, note that it includes the + * subtag name (1 byte): 1 + tuple content length + */ + const char *params; +} cis_tuple_t; + +#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */ +#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */ +#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */ +#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */ + +/** this array is used by CIS creating/writing applications */ +static const cis_tuple_t cis_hnbuvars[] = { +/* tag revmask len params */ + {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */ + {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */ + {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */ + /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */ + {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"}, + {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"}, + /* NOTE: subdevid is also written to boardtype. + * Need to write HNBU_BOARDTYPE to change it if it is different. + */ + {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"}, + {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"}, + {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"}, + {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"}, + {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */ + {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"}, + {HNBU_BOARDFLAGS, 0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 " + "4boardflags4 4boardflags5 "}, + {HNBU_LEDS, 0xffffffff, 17, "1ledbh0 1ledbh1 1ledbh2 1ledbh3 1ledbh4 1ledbh5 " + "1ledbh6 1ledbh7 1ledbh8 1ledbh9 1ledbh10 1ledbh11 1ledbh12 1ledbh13 1ledbh14 1ledbh15"}, + {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"}, + {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"}, + {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"}, + {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 " + "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit " + "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"}, + {HNBU_RDLID, 0xffffffff, 3, "2rdlid"}, + {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g " + "0rssisav2g 0bxa2g"}, /* special case */ + {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g " + "0rssisav5g 0bxa5g"}, /* special case */ + {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"}, + {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"}, + {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"}, + {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"}, + {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"}, + {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"}, + {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */ + {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"}, + {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"}, + {HNBU_LEDDC, 0xffffffff, 3, "2leddc"}, + {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"}, + {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"}, + {HNBU_REGREV, 0xffffffff, 2, "1regrev"}, + {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g " + "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */ + {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 " + "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 " + "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"}, + {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 " + "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 " + "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"}, + {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo " + "4ofdm5ghpo"}, + {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 " + "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"}, + {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 " + "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"}, + {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 " + "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 " + "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 " + "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"}, + {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"}, + {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"}, + {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"}, + {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"}, + {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"}, + {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"}, + {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"}, + {HNBU_USBFS, 0xffffffff, 2, "1usbfs"}, + {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"}, + {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"}, + {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"}, + {OTP_RAW, 0xffffffff, 0, ""}, /* special case */ + {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"}, + {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"}, + {HNBU_CCKBW202GPO, 0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"}, + {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"}, + {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo " + "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"}, + {HNBU_MCS2GPO, 0xffffffff, 17, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"}, + {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"}, + {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"}, + {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"}, + {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"}, + {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"}, + {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis " + "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option " + "1phycal_tempdelta"}, /* special case */ + {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"}, + {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g " + "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g " + "0tssiposslope5g"}, /* special case */ + {HNBU_ACPA_C0, 0xfffff800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 " + "1*4maxp5ga0 2*12pa5ga0"}, + {HNBU_ACPA_C1, 0xfffff800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"}, + {HNBU_ACPA_C2, 0xfffff800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"}, + {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"}, + {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 " + "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"}, + {HNBU_ACPPR_2GPO, 0xfffff800, 13, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo " + "2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo " + "2sb20in80ofdmlrbw202gpo"}, + {HNBU_ACPPR_5GPO, 0xfffff800, 59, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo " + "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo " + "4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po " + "2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"}, + {HNBU_MCS5Gx1PO, 0xfffff800, 9, "4mcsbw205gx1po 4mcsbw405gx1po"}, + {HNBU_ACPPR_SBPO, 0xfffff800, 49, "2sb20in40hrpo 2sb20in80and160hr5glpo " + "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo " + "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo " + "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo " + "4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo " + "2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po " + }, + {HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo " + "2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo " + "2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo " + "2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo " + "2sb20in80p80lr5gpo 2dot11agduppo"}, + {HNBU_NOISELVL, 0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 " + "1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"}, + {HNBU_RXGAIN_ERR, 0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 " + "1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"}, + {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"}, + {HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"}, + {HNBU_UUID, 0xffffffff, 17, "16uuid"}, + {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"}, + {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 " + "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 " + "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 " + "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */ + {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 " + "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 " + "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 " + "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */ + {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 " + "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 " + "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 " + "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */ + {HNBU_TXDUTY, 0xfffff800, 9, "2tx_duty_cycle_ofdm_40_5g " + "2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"}, + {HNBU_PDOFF_2G, 0xfffff800, 3, "0pdoffset2g40ma0 0pdoffset2g40ma1 " + "0pdoffset2g40ma2 0pdoffset2g40mvalid"}, + {HNBU_ACPA_CCK, 0xfffff800, 7, "2*3pa2gccka0"}, + {HNBU_ACPA_40, 0xfffff800, 25, "2*12pa5gbw40a0"}, + {HNBU_ACPA_80, 0xfffff800, 25, "2*12pa5gbw80a0"}, + {HNBU_ACPA_4080, 0xfffff800, 49, "2*12pa5gbw4080a0 2*12pa5gbw4080a1"}, + {HNBU_SUBBAND5GVER, 0xfffff800, 3, "2subband5gver"}, + {HNBU_PAPARAMBWVER, 0xfffff800, 2, "1paparambwver"}, + {HNBU_TXBFRPCALS, 0xfffff800, 11, + "2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */ + {HNBU_GPIO_PULL_DOWN, 0xffffffff, 5, "4gpdn"}, + {0xFF, 0xffffffff, 0, ""} +}; + +#endif /* _bcmsrom_tbl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h new file mode 100644 index 000000000000..6cc925c599d9 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h @@ -0,0 +1,1302 @@ +/* + * Misc useful os-independent macros and functions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmutils.h 563776 2015-06-15 15:51:15Z $ + */ + +#ifndef _bcmutils_h_ +#define _bcmutils_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + + +#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count)) +#define bcm_strncat_s(dst, noOfElements, src, count) strncat((dst), (src), (count)) +#define bcm_snprintf_s snprintf +#define bcm_sprintf_s snprintf + +/* + * #define bcm_strcpy_s(dst, count, src) strncpy((dst), (src), (count)) + * Use bcm_strcpy_s instead as it is a safer option + * bcm_strcat_s: Use bcm_strncat_s as a safer option + * + */ + +/* ctype replacement */ +#define _BCM_U 0x01 /* upper */ +#define _BCM_L 0x02 /* lower */ +#define _BCM_D 0x04 /* digit */ +#define _BCM_C 0x08 /* cntrl */ +#define _BCM_P 0x10 /* punct */ +#define _BCM_S 0x20 /* white space (space/lf/tab) */ +#define _BCM_X 0x40 /* hex digit */ +#define _BCM_SP 0x80 /* hard space (0x20) */ + +extern const unsigned char bcm_ctype[]; +#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)]) + +#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0) +#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0) +#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0) +#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0) +#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0) +#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0) +#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0) +#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0) +#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0) +#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c)) + +#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx) + +#define KB(bytes) (((bytes) + 1023) / 1024) + +/* Buffer structure for collecting string-formatted data +* using bcm_bprintf() API. +* Use bcm_binit() to initialize before use +*/ + +struct bcmstrbuf { + char *buf; /* pointer to current position in origbuf */ + unsigned int size; /* current (residual) size in bytes */ + char *origbuf; /* unmodified pointer to orignal buffer */ + unsigned int origsize; /* unmodified orignal buffer size in bytes */ +}; + +#define BCMSTRBUF_LEN(b) (b->size) +#define BCMSTRBUF_BUF(b) (b->buf) + +/* ** driver-only section ** */ +#ifdef BCMDRIVER +#include +#include +#include + +#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */ + +/* + * Spin at most 'us' microseconds while 'exp' is true. + * Caller should explicitly test 'exp' when this completes + * and take appropriate error action if 'exp' is still true. + */ +#ifndef SPINWAIT_POLL_PERIOD +#define SPINWAIT_POLL_PERIOD 10 +#endif + +#define SPINWAIT(exp, us) { \ + uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1); \ + while ((exp) && (countdown >= SPINWAIT_POLL_PERIOD)) { \ + OSL_DELAY(SPINWAIT_POLL_PERIOD); \ + countdown -= SPINWAIT_POLL_PERIOD; \ + } \ +} + +/* forward definition of ether_addr structure used by some function prototypes */ + +struct ether_addr; + +extern int ether_isbcast(const void *ea); +extern int ether_isnulladdr(const void *ea); + +#define BCM_MAC_RXCPL_IDX_BITS 12 +#define BCM_MAX_RXCPL_IDX_INVALID 0 +#define BCM_MAC_RXCPL_IFIDX_BITS 3 +#define BCM_MAC_RXCPL_DOT11_BITS 1 +#define BCM_MAX_RXCPL_IFIDX ((1 << BCM_MAC_RXCPL_IFIDX_BITS) - 1) +#define BCM_MAC_RXCPL_FLAG_BITS 4 +#define BCM_RXCPL_FLAGS_IN_TRANSIT 0x1 +#define BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST 0x2 +#define BCM_RXCPL_FLAGS_RXCPLVALID 0x4 +#define BCM_RXCPL_FLAGS_RSVD 0x8 + +#define BCM_RXCPL_SET_IN_TRANSIT(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_IN_TRANSIT) +#define BCM_RXCPL_CLR_IN_TRANSIT(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_IN_TRANSIT) +#define BCM_RXCPL_IN_TRANSIT(a) ((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_IN_TRANSIT) + +#define BCM_RXCPL_SET_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) +#define BCM_RXCPL_CLR_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) +#define BCM_RXCPL_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) + +#define BCM_RXCPL_SET_VALID_INFO(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_RXCPLVALID) +#define BCM_RXCPL_CLR_VALID_INFO(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_RXCPLVALID) +#define BCM_RXCPL_VALID_INFO(a) (((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_RXCPLVALID) ? TRUE : FALSE) + +#define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */ + +struct reorder_rxcpl_id_list { + uint16 head; + uint16 tail; + uint32 cnt; +}; + +typedef struct rxcpl_id { + uint32 idx : BCM_MAC_RXCPL_IDX_BITS; + uint32 next_idx : BCM_MAC_RXCPL_IDX_BITS; + uint32 ifidx : BCM_MAC_RXCPL_IFIDX_BITS; + uint32 dot11 : BCM_MAC_RXCPL_DOT11_BITS; + uint32 flags : BCM_MAC_RXCPL_FLAG_BITS; +} rxcpl_idx_id_t; + +typedef struct rxcpl_data_len { + uint32 metadata_len_w : 6; + uint32 dataoffset: 10; + uint32 datalen : 16; +} rxcpl_data_len_t; + +typedef struct rxcpl_info { + rxcpl_idx_id_t rxcpl_id; + uint32 host_pktref; + union { + rxcpl_data_len_t rxcpl_len; + struct rxcpl_info *free_next; + }; +} rxcpl_info_t; + +/* rx completion list */ +typedef struct bcm_rxcplid_list { + uint32 max; + uint32 avail; + rxcpl_info_t *rxcpl_ptr; + rxcpl_info_t *free_list; +} bcm_rxcplid_list_t; + +extern bool bcm_alloc_rxcplid_list(osl_t *osh, uint32 max); +extern rxcpl_info_t * bcm_alloc_rxcplinfo(void); +extern void bcm_free_rxcplinfo(rxcpl_info_t *ptr); +extern void bcm_chain_rxcplid(uint16 first, uint16 next); +extern rxcpl_info_t *bcm_id2rxcplinfo(uint16 id); +extern uint16 bcm_rxcplinfo2id(rxcpl_info_t *ptr); +extern rxcpl_info_t *bcm_rxcpllist_end(rxcpl_info_t *ptr, uint32 *count); + +/* externs */ +/* packet */ +extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pkttotlen(osl_t *osh, void *p); +extern void *pktlast(osl_t *osh, void *p); +extern uint pktsegcnt(osl_t *osh, void *p); +extern uint pktsegcnt_war(osl_t *osh, void *p); +extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset); +extern void *pktoffset(osl_t *osh, void *p, uint offset); + +/* Get priority from a packet and pass it back in scb (or equiv) */ +#define PKTPRIO_VDSCP 0x100 /* DSCP prio found after VLAN tag */ +#define PKTPRIO_VLAN 0x200 /* VLAN prio found */ +#define PKTPRIO_UPD 0x400 /* DSCP used to update VLAN prio */ +#define PKTPRIO_DSCP 0x800 /* DSCP prio found */ + +/* DSCP type definitions (RFC4594) */ +/* AF1x: High-Throughput Data (RFC2597) */ +#define DSCP_AF11 0x0A +#define DSCP_AF12 0x0C +#define DSCP_AF13 0x0E +/* AF2x: Low-Latency Data (RFC2597) */ +#define DSCP_AF21 0x12 +#define DSCP_AF22 0x14 +#define DSCP_AF23 0x16 +/* AF3x: Multimedia Streaming (RFC2597) */ +#define DSCP_AF31 0x1A +#define DSCP_AF32 0x1C +#define DSCP_AF33 0x1E +/* EF: Telephony (RFC3246) */ +#define DSCP_EF 0x2E + +extern uint pktsetprio(void *pkt, bool update_vtag); +extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag); +extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp); + +/* string */ +extern int bcm_atoi(const char *s); +extern ulong bcm_strtoul(const char *cp, char **endp, uint base); +extern char *bcmstrstr(const char *haystack, const char *needle); +extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len); +extern char *bcmstrcat(char *dest, const char *src); +extern char *bcmstrncat(char *dest, const char *src, uint size); +extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); +char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); +int bcmstricmp(const char *s1, const char *s2); +int bcmstrnicmp(const char* s1, const char* s2, int cnt); + + +/* ethernet address */ +extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf); +extern int bcm_ether_atoe(const char *p, struct ether_addr *ea); + +/* ip address */ +struct ipv4_addr; +extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf); +extern char *bcm_ipv6_ntoa(void *ipv6, char *buf); +extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip); + +/* delay */ +extern void bcm_mdelay(uint ms); +/* variable access */ +#define NVRAM_RECLAIM_CHECK(name) + +extern char *getvar(char *vars, const char *name); +extern int getintvar(char *vars, const char *name); +extern int getintvararray(char *vars, const char *name, int index); +extern int getintvararraysize(char *vars, const char *name); +extern uint getgpiopin(char *vars, char *pin_name, uint def_pin); +#define bcm_perf_enable() +#define bcmstats(fmt) +#define bcmlog(fmt, a1, a2) +#define bcmdumplog(buf, size) *buf = '\0' +#define bcmdumplogent(buf, idx) -1 + +#define TSF_TICKS_PER_MS 1000 +#define TS_ENTER 0xdeadbeef /* Timestamp profiling enter */ +#define TS_EXIT 0xbeefcafe /* Timestamp profiling exit */ + +#define bcmtslog(tstamp, fmt, a1, a2) +#define bcmprinttslogs() +#define bcmprinttstamp(us) +#define bcmdumptslog(b) + +extern char *bcm_nvram_vars(uint *length); +extern int bcm_nvram_cache(void *sih); + +/* Support for sharing code across in-driver iovar implementations. + * The intent is that a driver use this structure to map iovar names + * to its (private) iovar identifiers, and the lookup function to + * find the entry. Macros are provided to map ids and get/set actions + * into a single number space for a switch statement. + */ + +/* iovar structure */ +typedef struct bcm_iovar { + const char *name; /* name for lookup and display */ + uint16 varid; /* id for switch */ + uint16 flags; /* driver-specific flag bits */ + uint16 type; /* base type of argument */ + uint16 minlen; /* min length for buffer vars */ +} bcm_iovar_t; + +/* varid definitions are per-driver, may use these get/set bits */ + +/* IOVar action bits for id mapping */ +#define IOV_GET 0 /* Get an iovar */ +#define IOV_SET 1 /* Set an iovar */ + +/* Varid to actionid mapping */ +#define IOV_GVAL(id) ((id) * 2) +#define IOV_SVAL(id) ((id) * 2 + IOV_SET) +#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET) +#define IOV_ID(actionid) (actionid >> 1) + +/* flags are per-driver based on driver attributes */ + +extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name); +extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set); +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); +#endif +#endif /* BCMDRIVER */ + +/* Base type definitions */ +#define IOVT_VOID 0 /* no value (implictly set only) */ +#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */ +#define IOVT_INT8 2 /* integer values are range-checked */ +#define IOVT_UINT8 3 /* unsigned int 8 bits */ +#define IOVT_INT16 4 /* int 16 bits */ +#define IOVT_UINT16 5 /* unsigned int 16 bits */ +#define IOVT_INT32 6 /* int 32 bits */ +#define IOVT_UINT32 7 /* unsigned int 32 bits */ +#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */ +#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER) + +/* Initializer for IOV type strings */ +#define BCM_IOV_TYPE_INIT { \ + "void", \ + "bool", \ + "int8", \ + "uint8", \ + "int16", \ + "uint16", \ + "int32", \ + "uint32", \ + "buffer", \ + "" } + +#define BCM_IOVT_IS_INT(type) (\ + (type == IOVT_BOOL) || \ + (type == IOVT_INT8) || \ + (type == IOVT_UINT8) || \ + (type == IOVT_INT16) || \ + (type == IOVT_UINT16) || \ + (type == IOVT_INT32) || \ + (type == IOVT_UINT32)) + +/* ** driver/apps-shared section ** */ + +#define BCME_STRLEN 64 /* Max string length for BCM errors */ +#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST)) + + +/* + * error codes could be added but the defined ones shouldn't be changed/deleted + * these error codes are exposed to the user code + * when ever a new error code is added to this list + * please update errorstring table with the related error string and + * update osl files with os specific errorcode map +*/ + +#define BCME_OK 0 /* Success */ +#define BCME_ERROR -1 /* Error generic */ +#define BCME_BADARG -2 /* Bad Argument */ +#define BCME_BADOPTION -3 /* Bad option */ +#define BCME_NOTUP -4 /* Not up */ +#define BCME_NOTDOWN -5 /* Not down */ +#define BCME_NOTAP -6 /* Not AP */ +#define BCME_NOTSTA -7 /* Not STA */ +#define BCME_BADKEYIDX -8 /* BAD Key Index */ +#define BCME_RADIOOFF -9 /* Radio Off */ +#define BCME_NOTBANDLOCKED -10 /* Not band locked */ +#define BCME_NOCLK -11 /* No Clock */ +#define BCME_BADRATESET -12 /* BAD Rate valueset */ +#define BCME_BADBAND -13 /* BAD Band */ +#define BCME_BUFTOOSHORT -14 /* Buffer too short */ +#define BCME_BUFTOOLONG -15 /* Buffer too long */ +#define BCME_BUSY -16 /* Busy */ +#define BCME_NOTASSOCIATED -17 /* Not Associated */ +#define BCME_BADSSIDLEN -18 /* Bad SSID len */ +#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */ +#define BCME_BADCHAN -20 /* Bad Channel */ +#define BCME_BADADDR -21 /* Bad Address */ +#define BCME_NORESOURCE -22 /* Not Enough Resources */ +#define BCME_UNSUPPORTED -23 /* Unsupported */ +#define BCME_BADLEN -24 /* Bad length */ +#define BCME_NOTREADY -25 /* Not Ready */ +#define BCME_EPERM -26 /* Not Permitted */ +#define BCME_NOMEM -27 /* No Memory */ +#define BCME_ASSOCIATED -28 /* Associated */ +#define BCME_RANGE -29 /* Not In Range */ +#define BCME_NOTFOUND -30 /* Not Found */ +#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */ +#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */ +#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */ +#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */ +#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */ +#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */ +#define BCME_VERSION -37 /* Incorrect version */ +#define BCME_TXFAIL -38 /* TX failure */ +#define BCME_RXFAIL -39 /* RX failure */ +#define BCME_NODEVICE -40 /* Device not present */ +#define BCME_NMODE_DISABLED -41 /* NMODE disabled */ +#define BCME_NONRESIDENT -42 /* access to nonresident overlay */ +#define BCME_SCANREJECT -43 /* reject scan request */ +#define BCME_USAGE_ERROR -44 /* WLCMD usage error */ +#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */ +#define BCME_SERIAL_PORT_ERR -46 /* RWL serial port error */ +#define BCME_DISABLED -47 /* Disabled in this build */ +#define BCME_DECERR -48 /* Decrypt error */ +#define BCME_ENCERR -49 /* Encrypt error */ +#define BCME_MICERR -50 /* Integrity/MIC error */ +#define BCME_REPLAY -51 /* Replay */ +#define BCME_IE_NOTFOUND -52 /* IE not found */ +#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */ +#define BCME_LAST BCME_DATA_NOTFOUND + +#define BCME_NOTENABLED BCME_DISABLED + +/* These are collection of BCME Error strings */ +#define BCMERRSTRINGTABLE { \ + "OK", \ + "Undefined error", \ + "Bad Argument", \ + "Bad Option", \ + "Not up", \ + "Not down", \ + "Not AP", \ + "Not STA", \ + "Bad Key Index", \ + "Radio Off", \ + "Not band locked", \ + "No clock", \ + "Bad Rate valueset", \ + "Bad Band", \ + "Buffer too short", \ + "Buffer too long", \ + "Busy", \ + "Not Associated", \ + "Bad SSID len", \ + "Out of Range Channel", \ + "Bad Channel", \ + "Bad Address", \ + "Not Enough Resources", \ + "Unsupported", \ + "Bad length", \ + "Not Ready", \ + "Not Permitted", \ + "No Memory", \ + "Associated", \ + "Not In Range", \ + "Not Found", \ + "WME Not Enabled", \ + "TSPEC Not Found", \ + "ACM Not Supported", \ + "Not WME Association", \ + "SDIO Bus Error", \ + "Dongle Not Accessible", \ + "Incorrect version", \ + "TX Failure", \ + "RX Failure", \ + "Device Not Present", \ + "NMODE Disabled", \ + "Nonresident overlay access", \ + "Scan Rejected", \ + "WLCMD usage error", \ + "WLCMD ioctl error", \ + "RWL serial port error", \ + "Disabled", \ + "Decrypt error", \ + "Encrypt error", \ + "MIC error", \ + "Replay", \ + "IE not found", \ + "Data not found", \ +} + +#ifndef ABS +#define ABS(a) (((a) < 0) ? -(a) : (a)) +#endif /* ABS */ + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif /* MIN */ + +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif /* MAX */ + +/* limit to [min, max] */ +#ifndef LIMIT_TO_RANGE +#define LIMIT_TO_RANGE(x, min, max) \ + ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x))) +#endif /* LIMIT_TO_RANGE */ + +/* limit to max */ +#ifndef LIMIT_TO_MAX +#define LIMIT_TO_MAX(x, max) \ + (((x) > (max) ? (max) : (x))) +#endif /* LIMIT_TO_MAX */ + +/* limit to min */ +#ifndef LIMIT_TO_MIN +#define LIMIT_TO_MIN(x, min) \ + (((x) < (min) ? (min) : (x))) +#endif /* LIMIT_TO_MIN */ + +#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \ + (0xffffffff - (prev) + (curr) + 1)) +#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) +#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define ROUNDDN(p, align) ((p) & ~((align) - 1)) +#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0) +#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ISPOWEROF2(x) ((((x) - 1) & (x)) == 0) +#define VALID_MASK(mask) !((mask) & ((mask) + 1)) + +#ifndef OFFSETOF +#ifdef __ARMCC_VERSION +/* + * The ARM RVCT compiler complains when using OFFSETOF where a constant + * expression is expected, such as an initializer for a static object. + * offsetof from the runtime library doesn't have that problem. + */ +#include +#define OFFSETOF(type, member) offsetof(type, member) +#else +# if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8)) +/* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */ +# define OFFSETOF(type, member) __builtin_offsetof(type, member) +# else +# define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member) +# endif /* GCC 4.8 or newer */ +#endif /* __ARMCC_VERSION */ +#endif /* OFFSETOF */ + +#ifndef ARRAYSIZE +#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0])) +#endif + +#ifndef ARRAYLAST /* returns pointer to last array element */ +#define ARRAYLAST(a) (&a[ARRAYSIZE(a)-1]) +#endif + +/* Reference a function; used to prevent a static function from being optimized out */ +extern void *_bcmutils_dummy_fn; +#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f)) + +/* bit map related macros */ +#ifndef setbit +#ifndef NBBY /* the BSD family defines NBBY */ +#define NBBY 8 /* 8 bits per byte */ +#endif /* #ifndef NBBY */ +#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS +extern void setbit(void *array, uint bit); +extern void clrbit(void *array, uint bit); +extern bool isset(const void *array, uint bit); +extern bool isclr(const void *array, uint bit); +#else +#define setbit(a, i) (((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY)) +#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY))) +#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) +#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0) +#endif +#endif /* setbit */ +extern void set_bitrange(void *array, uint start, uint end, uint maxbit); + +#define isbitset(a, i) (((a) & (1 << (i))) != 0) + +#define NBITS(type) (sizeof(type) * 8) +#define NBITVAL(nbits) (1 << (nbits)) +#define MAXBITVAL(nbits) ((1 << (nbits)) - 1) +#define NBITMASK(nbits) MAXBITVAL(nbits) +#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8) + +extern void bcm_bitprint32(const uint32 u32); + +/* + * ---------------------------------------------------------------------------- + * Multiword map of 2bits, nibbles + * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val) + * getbit2 getbit4 (void *ptr, uint32 ix) + * ---------------------------------------------------------------------------- + */ + +#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK) \ +static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val) \ +{ \ + uint32 *addr = (uint32 *)ptr; \ + uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */ \ + uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */ \ + uint32 mask = (MSK << pos); \ + uint32 tmp = *a & ~mask; \ + *a = tmp | (val << pos); \ +} \ +static INLINE uint32 getbit##NB(void *ptr, uint32 ix) \ +{ \ + uint32 *addr = (uint32 *)ptr; \ + uint32 *a = addr + (ix >> RSH); \ + uint32 pos = (ix & OFF) << LSH; \ + return ((*a >> pos) & MSK); \ +} + +DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */ +DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */ +DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */ + +/* basic mux operation - can be optimized on several architectures */ +#define MUX(pred, true, false) ((pred) ? (true) : (false)) + +/* modulo inc/dec - assumes x E [0, bound - 1] */ +#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1) +#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1) + +/* modulo inc/dec, bound = 2^k */ +#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1)) +#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1)) + +/* modulo add/sub - assumes x, y E [0, bound - 1] */ +#define MODADD(x, y, bound) \ + MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y)) +#define MODSUB(x, y, bound) \ + MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y)) + +/* module add/sub, bound = 2^k */ +#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1)) +#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1)) + +/* crc defines */ +#define CRC8_INIT_VALUE 0xff /* Initial CRC8 checksum value */ +#define CRC8_GOOD_VALUE 0x9f /* Good final CRC8 checksum value */ +#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */ +#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */ +#define CRC32_INIT_VALUE 0xffffffff /* Initial CRC32 checksum value */ +#define CRC32_GOOD_VALUE 0xdebb20e3 /* Good final CRC32 checksum value */ + +/* use for direct output of MAC address in printf etc */ +#define MACF "%02x:%02x:%02x:%02x:%02x:%02x" +#define ETHERP_TO_MACF(ea) ((struct ether_addr *) (ea))->octet[0], \ + ((struct ether_addr *) (ea))->octet[1], \ + ((struct ether_addr *) (ea))->octet[2], \ + ((struct ether_addr *) (ea))->octet[3], \ + ((struct ether_addr *) (ea))->octet[4], \ + ((struct ether_addr *) (ea))->octet[5] + +#define ETHER_TO_MACF(ea) (ea).octet[0], \ + (ea).octet[1], \ + (ea).octet[2], \ + (ea).octet[3], \ + (ea).octet[4], \ + (ea).octet[5] +#if !defined(SIMPLE_MAC_PRINT) +#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5] +#else +#define MACDBG "%02x:%02x:%02x" +#define MAC2STRDBG(ea) (ea)[0], (ea)[4], (ea)[5] +#endif /* SIMPLE_MAC_PRINT */ + +/* bcm_format_flags() bit description structure */ +typedef struct bcm_bit_desc { + uint32 bit; + const char* name; +} bcm_bit_desc_t; + +/* bcm_format_field */ +typedef struct bcm_bit_desc_ex { + uint32 mask; + const bcm_bit_desc_t *bitfield; +} bcm_bit_desc_ex_t; + +/* buffer length for ethernet address from bcm_ether_ntoa() */ +#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */ + +static INLINE uint32 /* 32bit word aligned xor-32 */ +bcm_compute_xor32(volatile uint32 *u32_val, int num_u32) +{ + int idx; + uint32 xor32 = 0; + for (idx = 0; idx < num_u32; idx++) + xor32 ^= *(u32_val + idx); + return xor32; +} + +/* crypto utility function */ +/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */ +static INLINE void +xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst) +{ + if ( +#ifdef __i386__ + 1 || +#endif + (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) { + /* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */ + /* x86 supports unaligned. This version runs 6x-9x faster on x86. */ + ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0]; + ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1]; + ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2]; + ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3]; + } else { + /* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */ + int k; + for (k = 0; k < 16; k++) + dst[k] = src1[k] ^ src2[k]; + } +} + +/* externs */ +/* crc */ +extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc); +extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc); +extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc); + +/* format/print */ +#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \ + defined(WLMSG_ASSOC) +/* print out the value a field has: fields may have 1-32 bits and may hold any value */ +extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len); +/* print out which bits in flags are set */ +extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len); +#endif + +extern int bcm_format_hex(char *str, const void *bytes, int len); + +extern const char *bcm_crypto_algo_name(uint algo); +extern char *bcm_chipname(uint chipid, char *buf, uint len); +extern char *bcm_brev_str(uint32 brev, char *buf); +extern void printbig(char *buf); +extern void prhex(const char *msg, uchar *buf, uint len); + +/* IE parsing */ + +/* packing is required if struct is passed across the bus */ +#include +/* tag_ID/length/value_buffer tuple */ +typedef struct bcm_tlv { + uint8 id; + uint8 len; + uint8 data[1]; +} bcm_tlv_t; + +/* bcm tlv w/ 16 bit id/len */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_xtlv { + uint16 id; + uint16 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT bcm_xtlv_t; +#include + + +/* descriptor of xtlv data src or dst */ +typedef struct { + uint16 type; + uint16 len; + void *ptr; /* ptr to memory location */ +} xtlv_desc_t; + +/* xtlv options */ +#define BCM_XTLV_OPTION_NONE 0x0000 +#define BCM_XTLV_OPTION_ALIGN32 0x0001 + +typedef uint16 bcm_xtlv_opts_t; +struct bcm_xtlvbuf { + bcm_xtlv_opts_t opts; + uint16 size; + uint8 *head; /* point to head of buffer */ + uint8 *buf; /* current position of buffer */ + /* allocated buffer may follow, but not necessarily */ +}; +typedef struct bcm_xtlvbuf bcm_xtlvbuf_t; + +#define BCM_TLV_MAX_DATA_SIZE (255) +#define BCM_XTLV_MAX_DATA_SIZE (65535) +#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data)) + +#define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data)) +/* LEN only stores the value's length without padding */ +#define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len)) +#define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id)) +/* entire size of the XTLV including header, data, and optional padding */ +#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts) +#define bcm_valid_xtlv(elt, buflen, opts) (elt && ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt, opts))) + +/* Check that bcm_tlv_t fits into the given buflen */ +#define bcm_valid_tlv(elt, buflen) (\ + ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \ + ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len))) + + +extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen); +extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key); +extern bcm_tlv_t *bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen); + +extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key); + +extern bcm_tlv_t *bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, + int type_len); + +extern uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst); +extern uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, + int dst_maxlen); + +extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst); +extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen); + +/* xtlv */ + +/* return the next xtlv element, and update buffer len (remaining). Buffer length + * updated includes padding as specified by options + */ +extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts); + +/* initialize an xtlv buffer. Use options specified for packing/unpacking using + * the buffer. Caller is responsible for allocating both buffers. + */ +extern int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, + bcm_xtlv_opts_t opts); + +extern uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf); +extern uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf); +extern uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf); +extern uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf); +extern int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen); +extern int bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data); +extern int bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data); +extern int bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data); +extern int bcm_unpack_xtlv_entry(uint8 **buf, uint16 xpct_type, uint16 xpct_len, + void *dst, bcm_xtlv_opts_t opts); +extern int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len, + void *src, bcm_xtlv_opts_t opts); +extern int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); + +/* callback for unpacking xtlv from a buffer into context. */ +typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, uint8 *buf, uint16 type, uint16 len); + +/* unpack a tlv buffer using buffer, options, and callback */ +extern int bcm_unpack_xtlv_buf(void *ctx, uint8 *buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn); + +/* unpack a set of tlvs from the buffer using provided xtlv desc */ +extern int bcm_unpack_xtlv_buf_to_mem(void *buf, int *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts); + +/* pack a set of tlvs into buffer using provided xtlv desc */ +extern int bcm_pack_xtlv_buf_from_mem(void **buf, uint16 *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts); + +/* return data pointer of a given ID from xtlv buffer + * xtlv data length is given to *datalen_out, if the pointer is valid + */ +extern void *bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id, + uint16 *datalen_out, bcm_xtlv_opts_t opts); + +/* callback to return next tlv id and len to pack, if there is more tlvs to come and + * options e.g. alignment + */ +typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len); + +/* callback to pack the tlv into length validated buffer */ +typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx, + uint16 tlv_id, uint16 tlv_len, uint8* buf); + +/* pack a set of tlvs into buffer using get_next to interate */ +int bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next, + bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen); + +/* bcmerror */ +extern const char *bcmerrorstr(int bcmerror); + +/* multi-bool data type: set of bools, mbool is true if any is set */ +typedef uint32 mbool; +#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */ +#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */ +#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* TRUE if one bool is set */ +#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val))) + +/* generic datastruct to help dump routines */ +struct fielddesc { + const char *nameandfmt; + uint32 offset; + uint32 len; +}; + +extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size); +extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, + const uint8 *buf, int len); + +extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount); +extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes); +extern void bcm_print_bytes(const char *name, const uchar *cdata, int len); + +typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset); +extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str, + char *buf, uint32 bufsize); +extern uint bcm_bitcount(uint8 *bitmap, uint bytelength); + +extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); + +/* power conversion */ +extern uint16 bcm_qdbm_to_mw(uint8 qdbm); +extern uint8 bcm_mw_to_qdbm(uint16 mw); +extern uint bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint len); + +unsigned int process_nvram_vars(char *varbuf, unsigned int len); + +/* trace any object allocation / free, with / without features (flags) set to the object */ + +#define BCM_OBJDBG_ADD 1 +#define BCM_OBJDBG_REMOVE 2 +#define BCM_OBJDBG_ADD_PKT 3 + +/* object feature: set or clear flags */ +#define BCM_OBJECT_FEATURE_FLAG 1 +#define BCM_OBJECT_FEATURE_PKT_STATE 2 +/* object feature: flag bits */ +#define BCM_OBJECT_FEATURE_0 (1 << 0) +#define BCM_OBJECT_FEATURE_1 (1 << 1) +#define BCM_OBJECT_FEATURE_2 (1 << 2) +/* object feature: clear flag bits field set with this flag */ +#define BCM_OBJECT_FEATURE_CLEAR (1 << 31) +#ifdef BCM_OBJECT_TRACE +#define bcm_pkt_validate_chk(obj) do { \ + void * pkttag; \ + bcm_object_trace_chk(obj, 0, 0, \ + __FUNCTION__, __LINE__); \ + if ((pkttag = PKTTAG(obj))) { \ + bcm_object_trace_chk(obj, 1, DHD_PKTTAG_SN(pkttag), \ + __FUNCTION__, __LINE__); \ + } \ +} while (0) +extern void bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line); +extern void bcm_object_trace_upd(void *obj, void *obj_new); +extern void bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line); +extern void bcm_object_feature_set(void *obj, uint32 type, uint32 value); +extern int bcm_object_feature_get(void *obj, uint32 type, uint32 value); +extern void bcm_object_trace_init(void); +extern void bcm_object_trace_deinit(void); +#else +#define bcm_pkt_validate_chk(obj) +#define bcm_object_trace_opr(a, b, c, d) +#define bcm_object_trace_upd(a, b) +#define bcm_object_trace_chk(a, b, c, d, e) +#define bcm_object_feature_set(a, b, c) +#define bcm_object_feature_get(a, b, c) +#define bcm_object_trace_init() +#define bcm_object_trace_deinit() +#endif /* BCM_OBJECT_TRACE */ + +/* calculate a * b + c */ +extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c); +/* calculate a / b */ +extern void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b); + + +/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */ + +/* Table driven count set bits. */ +static const uint8 /* Table only for use by bcm_cntsetbits */ +_CSBTBL[256] = +{ +# define B2(n) n, n + 1, n + 1, n + 2 +# define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2) +# define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2) + B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2) +}; + +static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */ +bcm_cntsetbits(const uint32 u32arg) +{ + /* function local scope declaration of const _CSBTBL[] */ + const uint8 * p = (const uint8 *)&u32arg; + return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]); +} + + +static INLINE int /* C equivalent count of leading 0's in a u32 */ +C_bcm_count_leading_zeros(uint32 u32arg) +{ + int shifts = 0; + while (u32arg) { + shifts++; u32arg >>= 1; + } + return (32U - shifts); +} + +#ifdef BCMDRIVER +/* + * Assembly instructions: Count Leading Zeros + * "clz" : MIPS, ARM + * "cntlzw" : PowerPC + * "BSF" : x86 + * "lzcnt" : AMD, SPARC + */ + +#if defined(__arm__) +#if defined(__ARM_ARCH_7M__) /* Cortex M3 */ +#define __USE_ASM_CLZ__ +#endif /* __ARM_ARCH_7M__ */ +#if defined(__ARM_ARCH_7R__) /* Cortex R4 */ +#define __USE_ASM_CLZ__ +#endif /* __ARM_ARCH_7R__ */ +#endif /* __arm__ */ + +static INLINE int +bcm_count_leading_zeros(uint32 u32arg) +{ +#if defined(__USE_ASM_CLZ__) + int zeros; + __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32arg)); + return zeros; +#else /* C equivalent */ + return C_bcm_count_leading_zeros(u32arg); +#endif /* C equivalent */ +} + +/* + * Macro to count leading zeroes + * + */ +#if defined(__GNUC__) +#define CLZ(x) __builtin_clzl(x) +#elif defined(__arm__) +#define CLZ(x) __clz(x) +#else +#define CLZ(x) bcm_count_leading_zeros(x) +#endif /* __GNUC__ */ + +/* INTERFACE: Multiword bitmap based small id allocator. */ +struct bcm_mwbmap; /* forward declaration for use as an opaque mwbmap handle */ + +#define BCM_MWBMAP_INVALID_HDL ((struct bcm_mwbmap *)NULL) +#define BCM_MWBMAP_INVALID_IDX ((uint32)(~0U)) + +/* Incarnate a multiword bitmap based small index allocator */ +extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max); + +/* Free up the multiword bitmap index allocator */ +extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl); + +/* Allocate a unique small index using a multiword bitmap index allocator */ +extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl); + +/* Force an index at a specified position to be in use */ +extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Free a previously allocated index back into the multiword bitmap allocator */ +extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Fetch the toal number of free indices in the multiword bitmap allocator */ +extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl); + +/* Determine whether an index is inuse or free */ +extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Debug dump a multiword bitmap allocator */ +extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl); + +extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl); +/* End - Multiword bitmap based small Id allocator. */ + + +/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */ + +#define ID16_INVALID ((uint16)(~0)) +#define ID16_UNDEFINED (ID16_INVALID) + +/* + * Construct a 16bit id allocator, managing 16bit ids in the range: + * [start_val16 .. start_val16+total_ids) + * Note: start_val16 is inclusive. + * Returns an opaque handle to the 16bit id allocator. + */ +extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16); +extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl); +extern void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16); + +/* Allocate a unique 16bit id */ +extern uint16 id16_map_alloc(void * id16_map_hndl); + +/* Free a 16bit id value into the id16 allocator */ +extern void id16_map_free(void * id16_map_hndl, uint16 val16); + +/* Get the number of failures encountered during id allocation. */ +extern uint32 id16_map_failures(void * id16_map_hndl); + +/* Audit the 16bit id allocator state. */ +extern bool id16_map_audit(void * id16_map_hndl); +/* End - Simple 16bit Id Allocator. */ +#endif /* BCMDRIVER */ + +extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b); + +void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset); +void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset); + +/* calculate checksum for ip header, tcp / udp header / data */ +uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum); + +#ifndef _dll_t_ +#define _dll_t_ +/* + * ----------------------------------------------------------------------------- + * Double Linked List Macros + * ----------------------------------------------------------------------------- + * + * All dll operations must be performed on a pre-initialized node. + * Inserting an uninitialized node into a list effectively initialized it. + * + * When a node is deleted from a list, you may initialize it to avoid corruption + * incurred by double deletion. You may skip initialization if the node is + * immediately inserted into another list. + * + * By placing a dll_t element at the start of a struct, you may cast a dll_t * + * to the struct or vice versa. + * + * Example of declaring an initializing someList and inserting nodeA, nodeB + * + * typedef struct item { + * dll_t node; + * int someData; + * } Item_t; + * Item_t nodeA, nodeB, nodeC; + * nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333; + * + * dll_t someList; + * dll_init(&someList); + * + * dll_append(&someList, (dll_t *) &nodeA); + * dll_prepend(&someList, &nodeB.node); + * dll_insert((dll_t *)&nodeC, &nodeA.node); + * + * dll_delete((dll_t *) &nodeB); + * + * Example of a for loop to walk someList of node_p + * + * extern void mydisplay(Item_t * item_p); + * + * dll_t * item_p, * next_p; + * for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p); + * item_p = next_p) + * { + * next_p = dll_next_p(item_p); + * ... use item_p at will, including removing it from list ... + * mydisplay((PItem_t)item_p); + * } + * + * ----------------------------------------------------------------------------- + */ +typedef struct dll { + struct dll * next_p; + struct dll * prev_p; +} dll_t; + +static INLINE void +dll_init(dll_t *node_p) +{ + node_p->next_p = node_p; + node_p->prev_p = node_p; +} +/* dll macros returing a pointer to dll_t */ + +static INLINE dll_t * +dll_head_p(dll_t *list_p) +{ + return list_p->next_p; +} + + +static INLINE dll_t * +dll_tail_p(dll_t *list_p) +{ + return (list_p)->prev_p; +} + + +static INLINE dll_t * +dll_next_p(dll_t *node_p) +{ + return (node_p)->next_p; +} + + +static INLINE dll_t * +dll_prev_p(dll_t *node_p) +{ + return (node_p)->prev_p; +} + + +static INLINE bool +dll_empty(dll_t *list_p) +{ + return ((list_p)->next_p == (list_p)); +} + + +static INLINE bool +dll_end(dll_t *list_p, dll_t * node_p) +{ + return (list_p == node_p); +} + + +/* inserts the node new_p "after" the node at_p */ +static INLINE void +dll_insert(dll_t *new_p, dll_t * at_p) +{ + new_p->next_p = at_p->next_p; + new_p->prev_p = at_p; + at_p->next_p = new_p; + (new_p->next_p)->prev_p = new_p; +} + +static INLINE void +dll_append(dll_t *list_p, dll_t *node_p) +{ + dll_insert(node_p, dll_tail_p(list_p)); +} + +static INLINE void +dll_prepend(dll_t *list_p, dll_t *node_p) +{ + dll_insert(node_p, list_p); +} + + +/* deletes a node from any list that it "may" be in, if at all. */ +static INLINE void +dll_delete(dll_t *node_p) +{ + node_p->prev_p->next_p = node_p->next_p; + node_p->next_p->prev_p = node_p->prev_p; +} +#endif /* ! defined(_dll_t_) */ + +/* Elements managed in a double linked list */ + +typedef struct dll_pool { + dll_t free_list; + uint16 free_count; + uint16 elems_max; + uint16 elem_size; + dll_t elements[1]; +} dll_pool_t; + +dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size); +void * dll_pool_alloc(dll_pool_t * dll_pool_p); +void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p); +void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p); +typedef void (* dll_elem_dump)(void * elem_p); +void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size); + +#ifdef __cplusplus + } +#endif + +/* #define DEBUG_COUNTER */ +#ifdef DEBUG_COUNTER +#define CNTR_TBL_MAX 10 +typedef struct _counter_tbl_t { + char name[16]; /* name of this counter table */ + uint32 prev_log_print; /* Internal use. Timestamp of the previous log print */ + uint log_print_interval; /* Desired interval to print logs in ms */ + uint needed_cnt; /* How many counters need to be used */ + uint32 cnt[CNTR_TBL_MAX]; /* Counting entries to increase at desired places */ + bool enabled; /* Whether to enable printing log */ +} counter_tbl_t; + + +void counter_printlog(counter_tbl_t *ctr_tbl); +#endif /* DEBUG_COUNTER */ + +/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ +static INLINE uint32 next_larger_power2(uint32 num) +{ + num--; + num |= (num >> 1); + num |= (num >> 2); + num |= (num >> 4); + num |= (num >> 8); + num |= (num >> 16); + return (num + 1); +} + +#endif /* _bcmutils_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h new file mode 100644 index 000000000000..888863117105 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h @@ -0,0 +1,68 @@ +/* + * Definitions for nl80211 vendor command/event access to host driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: brcm_nl80211.h 556083 2015-05-12 14:03:00Z $ + * + */ + +#ifndef _brcm_nl80211_h_ +#define _brcm_nl80211_h_ + +#define OUI_BRCM 0x001018 + +enum wl_vendor_subcmd { + BRCM_VENDOR_SCMD_UNSPEC, + BRCM_VENDOR_SCMD_PRIV_STR, + BRCM_VENDOR_SCMD_BCM_STR +}; + + +struct bcm_nlmsg_hdr { + uint cmd; /* common ioctl definition */ + int len; /* expected return buffer length */ + uint offset; /* user buffer offset */ + uint set; /* get or set request optional */ + uint magic; /* magic number for verification */ +}; + +enum bcmnl_attrs { + BCM_NLATTR_UNSPEC, + + BCM_NLATTR_LEN, + BCM_NLATTR_DATA, + + __BCM_NLATTR_AFTER_LAST, + BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1 +}; + +struct nl_prv_data { + int err; /* return result */ + void *data; /* ioctl return buffer pointer */ + uint len; /* ioctl return buffer length */ + struct bcm_nlmsg_hdr *nlioc; /* bcm_nlmsg_hdr header pointer */ +}; + +#endif /* _brcm_nl80211_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/dbus.h b/drivers/net/wireless/bcmdhd/include/dbus.h new file mode 100644 index 000000000000..b066c67a5dad --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/dbus.h @@ -0,0 +1,591 @@ +/* + * Dongle BUS interface Abstraction layer + * target serial buses like USB, SDIO, SPI, etc. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus.h 553311 2015-04-29 10:23:08Z $ + */ + +#ifndef __DBUS_H__ +#define __DBUS_H__ + +#include "typedefs.h" + +#define DBUSTRACE(args) +#define DBUSERR(args) +#define DBUSINFO(args) +#define DBUSDBGLOCK(args) + +enum { + DBUS_OK = 0, + DBUS_ERR = -200, + DBUS_ERR_TIMEOUT, + DBUS_ERR_DISCONNECT, + DBUS_ERR_NODEVICE, + DBUS_ERR_UNSUPPORTED, + DBUS_ERR_PENDING, + DBUS_ERR_NOMEM, + DBUS_ERR_TXFAIL, + DBUS_ERR_TXTIMEOUT, + DBUS_ERR_TXDROP, + DBUS_ERR_RXFAIL, + DBUS_ERR_RXDROP, + DBUS_ERR_TXCTLFAIL, + DBUS_ERR_RXCTLFAIL, + DBUS_ERR_REG_PARAM, + DBUS_STATUS_CANCELLED, + DBUS_ERR_NVRAM, + DBUS_JUMBO_NOMATCH, + DBUS_JUMBO_BAD_FORMAT, + DBUS_NVRAM_NONTXT, + DBUS_ERR_RXZLP +}; + +#define BCM_OTP_SIZE_43236 84 /* number of 16 bit values */ +#define BCM_OTP_SW_RGN_43236 24 /* start offset of SW config region */ +#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */ + +#define ERR_CBMASK_TXFAIL 0x00000001 +#define ERR_CBMASK_RXFAIL 0x00000002 +#define ERR_CBMASK_ALL 0xFFFFFFFF + +#define DBUS_CBCTL_WRITE 0 +#define DBUS_CBCTL_READ 1 +#if defined(INTR_EP_ENABLE) +#define DBUS_CBINTR_POLL 2 +#endif /* defined(INTR_EP_ENABLE) */ + +#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */ +#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */ + +#define DBUS_BUFFER_SIZE_TX 32000 +#define DBUS_BUFFER_SIZE_RX 24000 + +#define DBUS_BUFFER_SIZE_TX_NOAGG 2048 +#define DBUS_BUFFER_SIZE_RX_NOAGG 2048 + +/** DBUS types */ +enum { + DBUS_USB, + DBUS_SDIO, + DBUS_SPI, + DBUS_UNKNOWN +}; + +enum dbus_state { + DBUS_STATE_DL_PENDING, + DBUS_STATE_DL_DONE, + DBUS_STATE_UP, + DBUS_STATE_DOWN, + DBUS_STATE_PNP_FWDL, + DBUS_STATE_DISCONNECT, + DBUS_STATE_SLEEP, + DBUS_STATE_DL_NEEDED +}; + +enum dbus_pnp_state { + DBUS_PNP_DISCONNECT, + DBUS_PNP_SLEEP, + DBUS_PNP_RESUME +}; + +enum dbus_file { + DBUS_FIRMWARE, + DBUS_NVFILE +}; + +typedef enum _DEVICE_SPEED { + INVALID_SPEED = -1, + LOW_SPEED = 1, /**< USB 1.1: 1.5 Mbps */ + FULL_SPEED, /**< USB 1.1: 12 Mbps */ + HIGH_SPEED, /**< USB 2.0: 480 Mbps */ + SUPER_SPEED, /**< USB 3.0: 4.8 Gbps */ +} DEVICE_SPEED; + +typedef struct { + int bustype; + int vid; + int pid; + int devid; + int chiprev; /**< chip revsion number */ + int mtu; + int nchan; /**< Data Channels */ + int has_2nd_bulk_in_ep; +} dbus_attrib_t; + +/* FIX: Account for errors related to DBUS; + * Let upper layer account for packets/bytes + */ +typedef struct { + uint32 rx_errors; + uint32 tx_errors; + uint32 rx_dropped; + uint32 tx_dropped; +} dbus_stats_t; + +/** + * Configurable BUS parameters + */ +enum { + DBUS_CONFIG_ID_RXCTL_DEFERRES = 1, + DBUS_CONFIG_ID_AGGR_LIMIT +}; + +typedef struct { + uint32 config_id; + union { + bool rxctl_deferrespok; + struct { + int maxrxsf; + int maxrxsize; + int maxtxsf; + int maxtxsize; + } aggr_param; + }; +} dbus_config_t; + +/** + * External Download Info + */ +typedef struct dbus_extdl { + uint8 *fw; + int fwlen; + uint8 *vars; + int varslen; +} dbus_extdl_t; + +struct dbus_callbacks; +struct exec_parms; + +typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, uint32 hdrlen); +typedef void (*disconnect_cb_t)(void *arg); +typedef void *(*exec_cb_t)(struct exec_parms *args); + +/** Client callbacks registered during dbus_attach() */ +typedef struct dbus_callbacks { + void (*send_complete)(void *cbarg, void *info, int status); + void (*recv_buf)(void *cbarg, uint8 *buf, int len); + void (*recv_pkt)(void *cbarg, void *pkt); + void (*txflowcontrol)(void *cbarg, bool onoff); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); +} dbus_callbacks_t; + +struct dbus_pub; +struct bcmstrbuf; +struct dbus_irb; +struct dbus_irb_rx; +struct dbus_irb_tx; +struct dbus_intf_callbacks; + +typedef struct { + void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs); + void (*detach)(struct dbus_pub *pub, void *bus); + + int (*up)(void *bus); + int (*down)(void *bus); + int (*send_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb); + int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*send_ctl)(void *bus, uint8 *buf, int len); + int (*recv_ctl)(void *bus, uint8 *buf, int len); + int (*get_stats)(void *bus, dbus_stats_t *stats); + int (*get_attrib)(void *bus, dbus_attrib_t *attrib); + + int (*pnp)(void *bus, int evnt); + int (*remove)(void *bus); + int (*resume)(void *bus); + int (*suspend)(void *bus); + int (*stop)(void *bus); + int (*reset)(void *bus); + + /* Access to bus buffers directly */ + void *(*pktget)(void *bus, int len); + void (*pktfree)(void *bus, void *pkt); + + int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len, + bool set); + void (*dump)(void *bus, struct bcmstrbuf *strbuf); + int (*set_config)(void *bus, dbus_config_t *config); + int (*get_config)(void *bus, dbus_config_t *config); + + bool (*device_exists)(void *bus); + bool (*dlneeded)(void *bus); + int (*dlstart)(void *bus, uint8 *fw, int len); + int (*dlrun)(void *bus); + bool (*recv_needed)(void *bus); + + void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args); + void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args); + + int (*tx_timer_init)(void *bus); + int (*tx_timer_start)(void *bus, uint timeout); + int (*tx_timer_stop)(void *bus); + + int (*sched_dpc)(void *bus); + int (*lock)(void *bus); + int (*unlock)(void *bus); + int (*sched_probe_cb)(void *bus); + + int (*shutdown)(void *bus); + + int (*recv_stop)(void *bus); + int (*recv_resume)(void *bus); + + int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx); + + int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value); + + /* Add from the bottom */ +} dbus_intf_t; + +typedef struct dbus_pub { + struct osl_info *osh; + dbus_stats_t stats; + dbus_attrib_t attrib; + enum dbus_state busstate; + DEVICE_SPEED device_speed; + int ntxq, nrxq, rxsize; + void *bus; + struct shared_info *sh; + void *dev_info; +} dbus_pub_t; + +#define BUS_INFO(bus, type) (((type *) bus)->pub->bus) + +#define ALIGNED_LOCAL_VARIABLE(var, align) \ + uint8 buffer[SDALIGN+64]; \ + uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align; + +/* + * Public Bus Function Interface + */ + +/* + * FIX: Is there better way to pass OS/Host handles to DBUS but still + * maintain common interface for all OS?? + * Under NDIS, param1 needs to be MiniportHandle + * For NDIS60, param2 is WdfDevice + * Under Linux, param1 and param2 are NULL; + */ +extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + void *param1, void *param2); +extern int dbus_deregister(void); + +extern dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq, int ntxq, + void *cbarg, dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh); +extern void dbus_detach(dbus_pub_t *pub); + +extern int dbus_download_firmware(dbus_pub_t *pub); +extern int dbus_up(dbus_pub_t *pub); +extern int dbus_down(dbus_pub_t *pub); +extern int dbus_stop(dbus_pub_t *pub); +extern int dbus_shutdown(dbus_pub_t *pub); +extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on); + +extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf); +extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info); +extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info); +extern int dbus_send_ctl(dbus_pub_t *pub, uint8 *buf, int len); +extern int dbus_recv_ctl(dbus_pub_t *pub, uint8 *buf, int len); +extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx); +extern int dbus_poll_intr(dbus_pub_t *pub); +extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats); +extern int dbus_get_attrib(dbus_pub_t *pub, dbus_attrib_t *attrib); +extern int dbus_get_device_speed(dbus_pub_t *pub); +extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config); +extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config); +extern void * dbus_get_devinfo(dbus_pub_t *pub); + +extern void *dbus_pktget(dbus_pub_t *pub, int len); +extern void dbus_pktfree(dbus_pub_t *pub, void* pkt); + +extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask); +extern int dbus_pnp_sleep(dbus_pub_t *pub); +extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload); +extern int dbus_pnp_disconnect(dbus_pub_t *pub); + +extern int dbus_iovar_op(dbus_pub_t *pub, const char *name, + void *params, int plen, void *arg, int len, bool set); + +extern void *dhd_dbus_txq(const dbus_pub_t *pub); +extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub); + +/* + * Private Common Bus Interface + */ + +/** IO Request Block (IRB) */ +typedef struct dbus_irb { + struct dbus_irb *next; /**< it's casted from dbus_irb_tx or dbus_irb_rx struct */ +} dbus_irb_t; + +typedef struct dbus_irb_rx { + struct dbus_irb irb; /* Must be first */ + uint8 *buf; + int buf_len; + int actual_len; + void *pkt; + void *info; + void *arg; +} dbus_irb_rx_t; + +typedef struct dbus_irb_tx { + struct dbus_irb irb; /** Must be first */ + uint8 *buf; /** mutually exclusive with struct member 'pkt' */ + int len; /** length of field 'buf' */ + void *pkt; /** mutually exclusive with struct member 'buf' */ + int retry_count; + void *info; + void *arg; + void *send_buf; /**< linear bufffer for LINUX when aggreagtion is enabled */ +} dbus_irb_tx_t; + +/** + * DBUS interface callbacks are different from user callbacks + * so, internally, different info can be passed to upper layer + */ +typedef struct dbus_intf_callbacks { + void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb); + void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status); + void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + bool (*isr)(void *cbarg, bool *wantdpc); + bool (*dpc)(void *cbarg, bool bounded); + void (*watchdog)(void *cbarg); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); + struct dbus_irb* (*getirb)(void *cbarg, bool send); + void (*rxerr_indicate)(void *cbarg, bool on); +} dbus_intf_callbacks_t; + +/* + * Porting: To support new bus, port these functions below + */ + +/* + * Bus specific Interface + * Implemented by dbus_usb.c/dbus_sdio.c + */ +extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_deregister(void); +extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp); + +/* + * Bus-specific and OS-specific Interface + * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c + */ +extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_osl_deregister(void); + +/* + * Bus-specific, OS-specific, HW-specific Interface + * Mainly for SDIO Host HW controller + */ +extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf); +extern int dbus_bus_osl_hw_deregister(void); + +extern uint usbdev_bulkin_eps(void); +#if defined(BCM_REQUEST_FW) +extern void *dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, + uint16 boardtype, uint16 boardrev); +extern void dbus_release_fw_nvfile(void *firmware); +#endif /* #if defined(BCM_REQUEST_FW) */ + + +#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX) + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + /* Backward compatibility */ + typedef unsigned int gfp_t; + + #define dma_pool pci_pool + #define dma_pool_create(name, dev, size, align, alloc) \ + pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC) + #define dma_pool_destroy(pool) pci_pool_destroy(pool) + #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle) + #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr) + + #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir) + #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir) + #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE + #define DMA_TO_DEVICE PCI_DMA_TODEVICE +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +/* Availability of these functions varies (when present, they have two arguments) */ +#ifndef hc32_to_cpu + #define hc32_to_cpu(x) le32_to_cpu(x) + #define cpu_to_hc32(x) cpu_to_le32(x) + typedef unsigned int __hc32; +#else + #error Two-argument functions needed +#endif + +/* Private USB opcode base */ +#define EHCI_FASTPATH 0x31 +#define EHCI_SET_EP_BYPASS EHCI_FASTPATH +#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1) +#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2) +#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3) +#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4) +#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5) + +/* + * EHCI QTD structure (hardware and extension) + * NOTE that is does not need to (and does not) match its kernel counterpart + */ +#define EHCI_QTD_NBUFFERS 5 +#define EHCI_QTD_ALIGN 32 +#define EHCI_BULK_PACKET_SIZE 512 +#define EHCI_QTD_XACTERR_MAX 32 + +struct ehci_qtd { + /* Hardware map */ + volatile uint32_t qtd_next; + volatile uint32_t qtd_altnext; + volatile uint32_t qtd_status; +#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff) +#define EHCI_QTD_IOC 0x00008000 +#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3) +#define EHCI_QTD_SET_CERR(x) ((x) << 10) +#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3) +#define EHCI_QTD_SET_PID(x) ((x) << 8) +#define EHCI_QTD_ACTIVE 0x80 +#define EHCI_QTD_HALTED 0x40 +#define EHCI_QTD_BUFERR 0x20 +#define EHCI_QTD_BABBLE 0x10 +#define EHCI_QTD_XACTERR 0x08 +#define EHCI_QTD_MISSEDMICRO 0x04 + volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS]; + volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS]; + + /* Implementation extension */ + dma_addr_t qtd_self; /**< own hardware address */ + struct ehci_qtd *obj_next; /**< software link to the next QTD */ + void *rpc; /**< pointer to the rpc buffer */ + size_t length; /**< length of the data in the buffer */ + void *buff; /**< pointer to the reassembly buffer */ + int xacterrs; /**< retry counter for qtd xact error */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */ + +#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1) + +/** + * Queue Head + * NOTE This structure is slightly different from the one in the kernel; but needs to stay + * compatible. + */ +struct ehci_qh { + /* Hardware map */ + volatile uint32_t qh_link; + volatile uint32_t qh_endp; + volatile uint32_t qh_endphub; + volatile uint32_t qh_curqtd; + + /* QTD overlay */ + volatile uint32_t ow_next; + volatile uint32_t ow_altnext; + volatile uint32_t ow_status; + volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS]; + volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS]; + + /* Extension (should match the kernel layout) */ + dma_addr_t unused0; + void *unused1; + struct list_head unused2; + struct ehci_qtd *dummy; + struct ehci_qh *unused3; + + struct ehci_hcd *unused4; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct kref unused5; + unsigned unused6; + + uint8_t unused7; + + /* periodic schedule info */ + uint8_t unused8; + uint8_t unused9; + uint8_t unused10; + uint16_t unused11; + uint16_t unused12; + uint16_t unused13; + struct usb_device *unused14; +#else + unsigned unused5; + + u8 unused6; + + /* periodic schedule info */ + u8 unused7; + u8 unused8; + u8 unused9; + unsigned short unused10; + unsigned short unused11; +#define NO_FRAME ((unsigned short)~0) +#ifdef EHCI_QUIRK_FIX + struct usb_device *unused12; +#endif /* EHCI_QUIRK_FIX */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + struct ehci_qtd *first_qtd; + /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */ + /* NOTE that ehci_qh in ehci.h shall reserve this word */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/** The corresponding structure in the kernel is used to get the QH */ +struct hcd_dev { /* usb_device.hcpriv points to this */ + struct list_head unused0; + struct list_head unused1; + + /* array of QH pointers */ + void *ep[32]; +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc, + int token, int len); +int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data, + int token, int len); +int optimize_submit_async(struct ehci_qtd *qtd, int epn); +void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma); +struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags); +void optimize_ehci_qtd_free(struct ehci_qtd *qtd); +void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf); +#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */ + +void dbus_flowctrl_tx(void *dbi, bool on); +#endif /* __DBUS_H__ */ diff --git a/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h b/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h new file mode 100644 index 000000000000..2fbe8e0a6b50 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h @@ -0,0 +1,2114 @@ +/* + * Custom OID/ioctl definitions for + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $ + */ + + +#ifndef wlioctl_defs_h +#define wlioctl_defs_h + + + + + +/* All builds use the new 11ac ratespec/chanspec */ +#undef D11AC_IOTYPES +#define D11AC_IOTYPES + +/* WL_RSPEC defines for rate information */ +#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */ +#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */ +#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */ +#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */ +#define WL_RSPEC_TXEXP_MASK 0x00000300 +#define WL_RSPEC_TXEXP_SHIFT 8 +#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */ +#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */ +#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */ +#define WL_RSPEC_TXBF 0x00200000 /* bit indicates TXBF mode */ +#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */ +#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */ +#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */ +#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */ +#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override both rate & mode */ + +/* WL_RSPEC_ENCODING field defs */ +#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */ + +/* WL_RSPEC_BW field defs */ +#define WL_RSPEC_BW_UNSPECIFIED 0 +#define WL_RSPEC_BW_20MHZ 0x00010000 +#define WL_RSPEC_BW_40MHZ 0x00020000 +#define WL_RSPEC_BW_80MHZ 0x00030000 +#define WL_RSPEC_BW_160MHZ 0x00040000 +#define WL_RSPEC_BW_10MHZ 0x00050000 +#define WL_RSPEC_BW_5MHZ 0x00060000 +#define WL_RSPEC_BW_2P5MHZ 0x00070000 + +/* Legacy defines for the nrate iovar */ +#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */ +#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */ +#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */ +#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */ +#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */ +#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */ +#define OLD_NRATE_SGI 0x00800000 /* sgi mode */ +#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */ + +#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */ +#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */ +#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */ +#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */ + +#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */ + +#define WLC_11N_N_PROP_MCS 6 +#define WLC_11N_FIRST_PROP_MCS 87 +#define WLC_11N_LAST_PROP_MCS 102 + + +#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */ +#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */ + +#define IBSS_MED 15 /* Mediom in-bss congestion percentage */ +#define IBSS_HI 25 /* Hi in-bss congestion percentage */ +#define OBSS_MED 12 +#define OBSS_HI 25 +#define INTERFER_MED 5 +#define INTERFER_HI 10 + +#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */ +#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */ +#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */ +#define CCA_FLAGS_PREFER_1_6_11 0x10 +#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */ + +#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */ +#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */ +#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */ +#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */ +#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */ + +#define WL_STA_AID(a) ((a) &~ 0xc000) + +/* Flags for sta_info_t indicating properties of STA */ +#define WL_STA_BRCM 0x00000001 /* Running a Broadcom driver */ +#define WL_STA_WME 0x00000002 /* WMM association */ +#define WL_STA_NONERP 0x00000004 /* No ERP */ +#define WL_STA_AUTHE 0x00000008 /* Authenticated */ +#define WL_STA_ASSOC 0x00000010 /* Associated */ +#define WL_STA_AUTHO 0x00000020 /* Authorized */ +#define WL_STA_WDS 0x00000040 /* Wireless Distribution System */ +#define WL_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */ +#define WL_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */ +#define WL_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */ +#define WL_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */ +#define WL_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */ +#define WL_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */ +#define WL_STA_N_CAP 0x00002000 /* STA 802.11n capable */ +#define WL_STA_SCBSTATS 0x00004000 /* Per STA debug stats */ +#define WL_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */ +#define WL_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */ +#define WL_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */ +#define WL_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */ +#define WL_STA_RIFS_CAP 0x00080000 /* rifs enabled */ +#define WL_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */ +#define WL_STA_WPS 0x00200000 /* WPS state */ + +#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */ + +/* STA HT cap fields */ +#define WL_STA_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */ +#define WL_STA_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */ +#define WL_STA_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */ +#define WL_STA_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */ +#define WL_STA_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */ +#define WL_STA_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */ +#define WL_STA_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */ +#define WL_STA_CAP_GF 0x0010 /* Greenfield preamble support */ +#define WL_STA_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */ +#define WL_STA_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */ +#define WL_STA_CAP_TX_STBC 0x0080 /* Tx STBC support */ +#define WL_STA_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */ +#define WL_STA_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */ +#define WL_STA_CAP_DELAYED_BA 0x0400 /* delayed BA support */ +#define WL_STA_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */ +#define WL_STA_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */ +#define WL_STA_CAP_PSMP 0x2000 /* Power Save Multi Poll support */ +#define WL_STA_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */ +#define WL_STA_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */ + +#define WL_STA_CAP_RX_STBC_NO 0x0 /* no rx STBC support */ +#define WL_STA_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */ +#define WL_STA_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */ +#define WL_STA_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */ + +/* scb vht flags */ +#define WL_STA_VHT_LDPCCAP 0x0001 +#define WL_STA_SGI80 0x0002 +#define WL_STA_SGI160 0x0004 +#define WL_STA_VHT_TX_STBCCAP 0x0008 +#define WL_STA_VHT_RX_STBCCAP 0x0010 +#define WL_STA_SU_BEAMFORMER 0x0020 +#define WL_STA_SU_BEAMFORMEE 0x0040 +#define WL_STA_MU_BEAMFORMER 0x0080 +#define WL_STA_MU_BEAMFORMEE 0x0100 +#define WL_STA_VHT_TXOP_PS 0x0200 +#define WL_STA_HTC_VHT_CAP 0x0400 + +/* Values for TX Filter override mode */ +#define WLC_TXFILTER_OVERRIDE_DISABLED 0 +#define WLC_TXFILTER_OVERRIDE_ENABLED 1 + +#define WL_IOCTL_ACTION_GET 0x0 +#define WL_IOCTL_ACTION_SET 0x1 +#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e +#define WL_IOCTL_ACTION_OVL_RSV 0x20 +#define WL_IOCTL_ACTION_OVL 0x40 +#define WL_IOCTL_ACTION_MASK 0x7e +#define WL_IOCTL_ACTION_OVL_SHIFT 1 + +/* For WLC_SET_INFRA ioctl & infra_configuration iovar SET/GET operations */ +#define WL_BSSTYPE_INDEP 0 +#define WL_BSSTYPE_INFRA 1 +#define WL_BSSTYPE_ANY 2 /* deprecated */ +#define WL_BSSTYPE_MESH 3 + +/* Bitmask for scan_type */ +#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */ +#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */ +#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */ +#define WL_SCANFLAGS_OFFCHAN 0x08 /* allow scanning/reporting off-channel APs */ +#define WL_SCANFLAGS_HOTSPOT 0x10 /* automatic ANQP to hotspot APs */ +#define WL_SCANFLAGS_SWTCHAN 0x20 /* Force channel switch for differerent bandwidth */ +#define WL_SCANFLAGS_FORCE_PARALLEL 0x40 /* Force parallel scan even when actcb_fn_t is on. + * by default parallel scan will be disabled if actcb_fn_t + * is provided. + */ + +/* wl_iscan_results status values */ +#define WL_SCAN_RESULTS_SUCCESS 0 +#define WL_SCAN_RESULTS_PARTIAL 1 +#define WL_SCAN_RESULTS_PENDING 2 +#define WL_SCAN_RESULTS_ABORTED 3 +#define WL_SCAN_RESULTS_NO_MEM 4 + +#define SCANOL_ENABLED (1 << 0) +#define SCANOL_BCAST_SSID (1 << 1) +#define SCANOL_NOTIFY_BCAST_SSID (1 << 2) +#define SCANOL_RESULTS_PER_CYCLE (1 << 3) + +/* scan times in milliseconds */ +#define SCANOL_HOME_TIME 45 /* for home channel processing */ +#define SCANOL_ASSOC_TIME 20 /* dwell on a channel while associated */ +#define SCANOL_UNASSOC_TIME 40 /* dwell on a channel while unassociated */ +#define SCANOL_PASSIVE_TIME 110 /* listen on a channelfor passive scan */ +#define SCANOL_AWAY_LIMIT 100 /* max time to be away from home channel */ +#define SCANOL_IDLE_REST_TIME 40 +#define SCANOL_IDLE_REST_MULTIPLIER 0 +#define SCANOL_ACTIVE_REST_TIME 20 +#define SCANOL_ACTIVE_REST_MULTIPLIER 0 +#define SCANOL_CYCLE_IDLE_REST_TIME 300000 /* Idle Rest Time between Scan Cycle (msec) */ +#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER 0 /* Idle Rest Time Multiplier */ +#define SCANOL_CYCLE_ACTIVE_REST_TIME 200 +#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER 0 +#define SCANOL_MAX_REST_TIME 3600000 /* max rest time between scan cycle (msec) */ +#define SCANOL_CYCLE_DEFAULT 0 /* default for Max Scan Cycle, 0 = forever */ +#define SCANOL_CYCLE_MAX 864000 /* Max Scan Cycle */ + /* 10 sec/scan cycle => 100 days */ +#define SCANOL_NPROBES 2 /* for Active scan; send n probes on each channel */ +#define SCANOL_NPROBES_MAX 5 /* for Active scan; send n probes on each channel */ +#define SCANOL_SCAN_START_DLY 10 /* delay start of offload scan (sec) */ +#define SCANOL_SCAN_START_DLY_MAX 240 /* delay start of offload scan (sec) */ +#define SCANOL_MULTIPLIER_MAX 10 /* Max Multiplier */ +#define SCANOL_UNASSOC_TIME_MAX 100 /* max dwell on a channel while unassociated */ +#define SCANOL_PASSIVE_TIME_MAX 500 /* max listen on a channel for passive scan */ +#define SCANOL_SSID_MAX 16 /* max supported preferred SSID */ + +/* masks for channel and ssid count */ +#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff +#define WL_SCAN_PARAMS_NSSID_SHIFT 16 + +#define WL_SCAN_ACTION_START 1 +#define WL_SCAN_ACTION_CONTINUE 2 +#define WL_SCAN_ACTION_ABORT 3 + + +#define ANTENNA_NUM_1 1 /* total number of antennas to be used */ +#define ANTENNA_NUM_2 2 +#define ANTENNA_NUM_3 3 +#define ANTENNA_NUM_4 4 + +#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */ +#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */ +#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */ +#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */ +#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */ +#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */ + +/* interference source detection and identification mode */ +#define ITFR_MODE_DISABLE 0 /* disable feature */ +#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */ +#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */ + +/* bit definitions for flags in interference source report */ +#define ITFR_INTERFERENCED 1 /* interference detected */ +#define ITFR_HOME_CHANNEL 2 /* home channel has interference */ +#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */ + +#define WL_NUM_RPI_BINS 8 +#define WL_RM_TYPE_BASIC 1 +#define WL_RM_TYPE_CCA 2 +#define WL_RM_TYPE_RPI 3 +#define WL_RM_TYPE_ABORT -1 /* ABORT any in-progress RM request */ + +#define WL_RM_FLAG_PARALLEL (1<<0) + +#define WL_RM_FLAG_LATE (1<<1) +#define WL_RM_FLAG_INCAPABLE (1<<2) +#define WL_RM_FLAG_REFUSED (1<<3) + +/* flags */ +#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */ + +#define WLC_CIS_DEFAULT 0 /* built-in default */ +#define WLC_CIS_SROM 1 /* source is sprom */ +#define WLC_CIS_OTP 2 /* source is otp */ + +/* PCL - Power Control Loop */ +/* current gain setting is replaced by user input */ +#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */ +#define WL_ATTEN_PCL_ON 1 /* turn on PCL */ +/* current gain setting is maintained */ +#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */ + +/* defines used by poweridx iovar - it controls power in a-band */ +/* current gain setting is maintained */ +#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */ +#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */ +#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */ +#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */ +/* value >= 0 causes + * - input to be set to that value + * - PCL to be off + */ + +#define BCM_MAC_STATUS_INDICATION (0x40010200L) + +/* Values for TX Filter override mode */ +#define WLC_TXFILTER_OVERRIDE_DISABLED 0 +#define WLC_TXFILTER_OVERRIDE_ENABLED 1 + +/* magic pattern used for mismatch driver and wl */ +#define WL_TXFIFO_SZ_MAGIC 0xa5a5 + +/* check this magic number */ +#define WLC_IOCTL_MAGIC 0x14e46c77 + +/* bss_info_cap_t flags */ +#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */ +#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */ +#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info received on channel (vs offchannel) */ +#define WL_BSS_FLAGS_HS20 0x08 /* hotspot 2.0 capable */ +#define WL_BSS_FLAGS_RSSI_INVALID 0x10 /* BSS contains invalid RSSI */ +#define WL_BSS_FLAGS_RSSI_INACCURATE 0x20 /* BSS contains inaccurate RSSI */ +#define WL_BSS_FLAGS_SNR_INVALID 0x40 /* BSS contains invalid SNR */ +#define WL_BSS_FLAGS_NF_INVALID 0x80 /* BSS contains invalid noise floor */ + +/* bssinfo flag for nbss_cap */ +#define VHT_BI_SGI_80MHZ 0x00000100 +#define VHT_BI_80MHZ 0x00000200 +#define VHT_BI_160MHZ 0x00000400 +#define VHT_BI_8080MHZ 0x00000800 + +/* reference to wl_ioctl_t struct used by usermode driver */ +#define ioctl_subtype set /* subtype param */ +#define ioctl_pid used /* pid param */ +#define ioctl_status needed /* status param */ + + +/* Enumerate crypto algorithms */ +#define CRYPTO_ALGO_OFF 0 +#define CRYPTO_ALGO_WEP1 1 +#define CRYPTO_ALGO_TKIP 2 +#define CRYPTO_ALGO_WEP128 3 +#define CRYPTO_ALGO_AES_CCM 4 +#define CRYPTO_ALGO_AES_OCB_MSDU 5 +#define CRYPTO_ALGO_AES_OCB_MPDU 6 +#if !defined(BCMEXTCCX) +#define CRYPTO_ALGO_NALG 7 +#else +#define CRYPTO_ALGO_CKIP 7 +#define CRYPTO_ALGO_CKIP_MMH 8 +#define CRYPTO_ALGO_WEP_MMH 9 +#define CRYPTO_ALGO_NALG 10 +#endif + +#define CRYPTO_ALGO_SMS4 11 +#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */ +#define CRYPTO_ALGO_BIP 13 /* 802.11w BIP (aes cmac) */ + +#define CRYPTO_ALGO_AES_GCM 14 /* 128 bit GCM */ +#define CRYPTO_ALGO_AES_CCM256 15 /* 256 bit CCM */ +#define CRYPTO_ALGO_AES_GCM256 16 /* 256 bit GCM */ +#define CRYPTO_ALGO_BIP_CMAC256 17 /* 256 bit BIP CMAC */ +#define CRYPTO_ALGO_BIP_GMAC 18 /* 128 bit BIP GMAC */ +#define CRYPTO_ALGO_BIP_GMAC256 19 /* 256 bit BIP GMAC */ + +#define CRYPTO_ALGO_NONE CRYPTO_ALGO_OFF + +#define WSEC_GEN_MIC_ERROR 0x0001 +#define WSEC_GEN_REPLAY 0x0002 +#define WSEC_GEN_ICV_ERROR 0x0004 +#define WSEC_GEN_MFP_ACT_ERROR 0x0008 +#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010 +#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020 + +#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */ +#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */ +#if defined(BCMEXTCCX) +#define WL_CKIP_KP (1 << 4) /* CMIC */ +#define WL_CKIP_MMH (1 << 5) /* CKIP */ +#else +#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */ +#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */ +#endif +#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */ + +/* wireless security bitvec */ +#define WEP_ENABLED 0x0001 +#define TKIP_ENABLED 0x0002 +#define AES_ENABLED 0x0004 +#define WSEC_SWFLAG 0x0008 +#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */ + +/* wsec macros for operating on the above definitions */ +#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED) +#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED) +#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED) + +#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) +#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED) + +/* Following macros are not used any more. Just kept here to + * avoid build issue in BISON/CARIBOU branch + */ +#define MFP_CAPABLE 0x0200 +#define MFP_REQUIRED 0x0400 +#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */ + +/* WPA authentication mode bitvec */ +#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */ +#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */ +#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */ +#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */ +#if defined(BCMEXTCCX) +#define WPA_AUTH_CCKM 0x0008 /* CCKM */ +#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */ +#endif +/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */ +#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */ +#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */ +#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */ +#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */ +#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */ +#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */ +#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */ +#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */ +/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */ +#define WPA2_AUTH_SHA256 0x8000 +#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ + +/* pmkid */ +#define MAXPMKID 16 + +/* SROM12 changes */ +#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ + + +#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ +#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */ +#if defined(LCNCONF) || defined(LCN40CONF) || defined(LCN20CONF) +#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */ +#else +#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */ +#endif +#define WLC_SAMPLECOLLECT_MAXLEN_LCN40 8192 + +/* common ioctl definitions */ +#define WLC_GET_MAGIC 0 +#define WLC_GET_VERSION 1 +#define WLC_UP 2 +#define WLC_DOWN 3 +#define WLC_GET_LOOP 4 +#define WLC_SET_LOOP 5 +#define WLC_DUMP 6 +#define WLC_GET_MSGLEVEL 7 +#define WLC_SET_MSGLEVEL 8 +#define WLC_GET_PROMISC 9 +#define WLC_SET_PROMISC 10 +/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */ +#define WLC_GET_RATE 12 +#define WLC_GET_MAX_RATE 13 +#define WLC_GET_INSTANCE 14 +/* #define WLC_GET_FRAG 15 */ /* no longer supported */ +/* #define WLC_SET_FRAG 16 */ /* no longer supported */ +/* #define WLC_GET_RTS 17 */ /* no longer supported */ +/* #define WLC_SET_RTS 18 */ /* no longer supported */ +#define WLC_GET_INFRA 19 +#define WLC_SET_INFRA 20 +#define WLC_GET_AUTH 21 +#define WLC_SET_AUTH 22 +#define WLC_GET_BSSID 23 +#define WLC_SET_BSSID 24 +#define WLC_GET_SSID 25 +#define WLC_SET_SSID 26 +#define WLC_RESTART 27 +#define WLC_TERMINATED 28 +/* #define WLC_DUMP_SCB 28 */ /* no longer supported */ +#define WLC_GET_CHANNEL 29 +#define WLC_SET_CHANNEL 30 +#define WLC_GET_SRL 31 +#define WLC_SET_SRL 32 +#define WLC_GET_LRL 33 +#define WLC_SET_LRL 34 +#define WLC_GET_PLCPHDR 35 +#define WLC_SET_PLCPHDR 36 +#define WLC_GET_RADIO 37 +#define WLC_SET_RADIO 38 +#define WLC_GET_PHYTYPE 39 +#define WLC_DUMP_RATE 40 +#define WLC_SET_RATE_PARAMS 41 +#define WLC_GET_FIXRATE 42 +#define WLC_SET_FIXRATE 43 +/* #define WLC_GET_WEP 42 */ /* no longer supported */ +/* #define WLC_SET_WEP 43 */ /* no longer supported */ +#define WLC_GET_KEY 44 +#define WLC_SET_KEY 45 +#define WLC_GET_REGULATORY 46 +#define WLC_SET_REGULATORY 47 +#define WLC_GET_PASSIVE_SCAN 48 +#define WLC_SET_PASSIVE_SCAN 49 +#define WLC_SCAN 50 +#define WLC_SCAN_RESULTS 51 +#define WLC_DISASSOC 52 +#define WLC_REASSOC 53 +#define WLC_GET_ROAM_TRIGGER 54 +#define WLC_SET_ROAM_TRIGGER 55 +#define WLC_GET_ROAM_DELTA 56 +#define WLC_SET_ROAM_DELTA 57 +#define WLC_GET_ROAM_SCAN_PERIOD 58 +#define WLC_SET_ROAM_SCAN_PERIOD 59 +#define WLC_EVM 60 /* diag */ +#define WLC_GET_TXANT 61 +#define WLC_SET_TXANT 62 +#define WLC_GET_ANTDIV 63 +#define WLC_SET_ANTDIV 64 +/* #define WLC_GET_TXPWR 65 */ /* no longer supported */ +/* #define WLC_SET_TXPWR 66 */ /* no longer supported */ +#define WLC_GET_CLOSED 67 +#define WLC_SET_CLOSED 68 +#define WLC_GET_MACLIST 69 +#define WLC_SET_MACLIST 70 +#define WLC_GET_RATESET 71 +#define WLC_SET_RATESET 72 +/* #define WLC_GET_LOCALE 73 */ /* no longer supported */ +#define WLC_LONGTRAIN 74 +#define WLC_GET_BCNPRD 75 +#define WLC_SET_BCNPRD 76 +#define WLC_GET_DTIMPRD 77 +#define WLC_SET_DTIMPRD 78 +#define WLC_GET_SROM 79 +#define WLC_SET_SROM 80 +#define WLC_GET_WEP_RESTRICT 81 +#define WLC_SET_WEP_RESTRICT 82 +#define WLC_GET_COUNTRY 83 +#define WLC_SET_COUNTRY 84 +#define WLC_GET_PM 85 +#define WLC_SET_PM 86 +#define WLC_GET_WAKE 87 +#define WLC_SET_WAKE 88 +/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */ +#define WLC_GET_FORCELINK 90 /* ndis only */ +#define WLC_SET_FORCELINK 91 /* ndis only */ +#define WLC_FREQ_ACCURACY 92 /* diag */ +#define WLC_CARRIER_SUPPRESS 93 /* diag */ +#define WLC_GET_PHYREG 94 +#define WLC_SET_PHYREG 95 +#define WLC_GET_RADIOREG 96 +#define WLC_SET_RADIOREG 97 +#define WLC_GET_REVINFO 98 +#define WLC_GET_UCANTDIV 99 +#define WLC_SET_UCANTDIV 100 +#define WLC_R_REG 101 +#define WLC_W_REG 102 +/* #define WLC_DIAG_LOOPBACK 103 old tray diag */ +/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */ +#define WLC_GET_MACMODE 105 +#define WLC_SET_MACMODE 106 +#define WLC_GET_MONITOR 107 +#define WLC_SET_MONITOR 108 +#define WLC_GET_GMODE 109 +#define WLC_SET_GMODE 110 +#define WLC_GET_LEGACY_ERP 111 +#define WLC_SET_LEGACY_ERP 112 +#define WLC_GET_RX_ANT 113 +#define WLC_GET_CURR_RATESET 114 /* current rateset */ +#define WLC_GET_SCANSUPPRESS 115 +#define WLC_SET_SCANSUPPRESS 116 +#define WLC_GET_AP 117 +#define WLC_SET_AP 118 +#define WLC_GET_EAP_RESTRICT 119 +#define WLC_SET_EAP_RESTRICT 120 +#define WLC_SCB_AUTHORIZE 121 +#define WLC_SCB_DEAUTHORIZE 122 +#define WLC_GET_WDSLIST 123 +#define WLC_SET_WDSLIST 124 +#define WLC_GET_ATIM 125 +#define WLC_SET_ATIM 126 +#define WLC_GET_RSSI 127 +#define WLC_GET_PHYANTDIV 128 +#define WLC_SET_PHYANTDIV 129 +#define WLC_AP_RX_ONLY 130 +#define WLC_GET_TX_PATH_PWR 131 +#define WLC_SET_TX_PATH_PWR 132 +#define WLC_GET_WSEC 133 +#define WLC_SET_WSEC 134 +#define WLC_GET_PHY_NOISE 135 +#define WLC_GET_BSS_INFO 136 +#define WLC_GET_PKTCNTS 137 +#define WLC_GET_LAZYWDS 138 +#define WLC_SET_LAZYWDS 139 +#define WLC_GET_BANDLIST 140 + +#define WLC_GET_BAND 141 +#define WLC_SET_BAND 142 +#define WLC_SCB_DEAUTHENTICATE 143 +#define WLC_GET_SHORTSLOT 144 +#define WLC_GET_SHORTSLOT_OVERRIDE 145 +#define WLC_SET_SHORTSLOT_OVERRIDE 146 +#define WLC_GET_SHORTSLOT_RESTRICT 147 +#define WLC_SET_SHORTSLOT_RESTRICT 148 +#define WLC_GET_GMODE_PROTECTION 149 +#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150 +#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151 +#define WLC_UPGRADE 152 +/* #define WLC_GET_MRATE 153 */ /* no longer supported */ +/* #define WLC_SET_MRATE 154 */ /* no longer supported */ +#define WLC_GET_IGNORE_BCNS 155 +#define WLC_SET_IGNORE_BCNS 156 +#define WLC_GET_SCB_TIMEOUT 157 +#define WLC_SET_SCB_TIMEOUT 158 +#define WLC_GET_ASSOCLIST 159 +#define WLC_GET_CLK 160 +#define WLC_SET_CLK 161 +#define WLC_GET_UP 162 +#define WLC_OUT 163 +#define WLC_GET_WPA_AUTH 164 +#define WLC_SET_WPA_AUTH 165 +#define WLC_GET_UCFLAGS 166 +#define WLC_SET_UCFLAGS 167 +#define WLC_GET_PWRIDX 168 +#define WLC_SET_PWRIDX 169 +#define WLC_GET_TSSI 170 +#define WLC_GET_SUP_RATESET_OVERRIDE 171 +#define WLC_SET_SUP_RATESET_OVERRIDE 172 +/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */ +/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */ +/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */ +/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */ +/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */ +#define WLC_GET_PROTECTION_CONTROL 178 +#define WLC_SET_PROTECTION_CONTROL 179 +#define WLC_GET_PHYLIST 180 +#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */ +#define WLC_DECRYPT_STATUS 182 /* ndis only */ +#define WLC_GET_KEY_SEQ 183 +#define WLC_GET_SCAN_CHANNEL_TIME 184 +#define WLC_SET_SCAN_CHANNEL_TIME 185 +#define WLC_GET_SCAN_UNASSOC_TIME 186 +#define WLC_SET_SCAN_UNASSOC_TIME 187 +#define WLC_GET_SCAN_HOME_TIME 188 +#define WLC_SET_SCAN_HOME_TIME 189 +#define WLC_GET_SCAN_NPROBES 190 +#define WLC_SET_SCAN_NPROBES 191 +#define WLC_GET_PRB_RESP_TIMEOUT 192 +#define WLC_SET_PRB_RESP_TIMEOUT 193 +#define WLC_GET_ATTEN 194 +#define WLC_SET_ATTEN 195 +#define WLC_GET_SHMEM 196 /* diag */ +#define WLC_SET_SHMEM 197 /* diag */ +/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */ +/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */ +#define WLC_SET_WSEC_TEST 200 +#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201 +#define WLC_TKIP_COUNTERMEASURES 202 +#define WLC_GET_PIOMODE 203 +#define WLC_SET_PIOMODE 204 +#define WLC_SET_ASSOC_PREFER 205 +#define WLC_GET_ASSOC_PREFER 206 +#define WLC_SET_ROAM_PREFER 207 +#define WLC_GET_ROAM_PREFER 208 +#define WLC_SET_LED 209 +#define WLC_GET_LED 210 +#define WLC_GET_INTERFERENCE_MODE 211 +#define WLC_SET_INTERFERENCE_MODE 212 +#define WLC_GET_CHANNEL_QA 213 +#define WLC_START_CHANNEL_QA 214 +#define WLC_GET_CHANNEL_SEL 215 +#define WLC_START_CHANNEL_SEL 216 +#define WLC_GET_VALID_CHANNELS 217 +#define WLC_GET_FAKEFRAG 218 +#define WLC_SET_FAKEFRAG 219 +#define WLC_GET_PWROUT_PERCENTAGE 220 +#define WLC_SET_PWROUT_PERCENTAGE 221 +#define WLC_SET_BAD_FRAME_PREEMPT 222 +#define WLC_GET_BAD_FRAME_PREEMPT 223 +#define WLC_SET_LEAP_LIST 224 +#define WLC_GET_LEAP_LIST 225 +#define WLC_GET_CWMIN 226 +#define WLC_SET_CWMIN 227 +#define WLC_GET_CWMAX 228 +#define WLC_SET_CWMAX 229 +#define WLC_GET_WET 230 +#define WLC_SET_WET 231 +#define WLC_GET_PUB 232 +/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */ +/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */ +#define WLC_GET_KEY_PRIMARY 235 +#define WLC_SET_KEY_PRIMARY 236 + + +/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */ +#define WLC_GET_ACI_ARGS 238 +#define WLC_SET_ACI_ARGS 239 +#define WLC_UNSET_CALLBACK 240 +#define WLC_SET_CALLBACK 241 +#define WLC_GET_RADAR 242 +#define WLC_SET_RADAR 243 +#define WLC_SET_SPECT_MANAGMENT 244 +#define WLC_GET_SPECT_MANAGMENT 245 +#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */ +#define WLC_WDS_GET_WPA_SUP 247 +#define WLC_SET_CS_SCAN_TIMER 248 +#define WLC_GET_CS_SCAN_TIMER 249 +#define WLC_MEASURE_REQUEST 250 +#define WLC_INIT 251 +#define WLC_SEND_QUIET 252 +#define WLC_KEEPALIVE 253 +#define WLC_SEND_PWR_CONSTRAINT 254 +#define WLC_UPGRADE_STATUS 255 +#define WLC_CURRENT_PWR 256 +#define WLC_GET_SCAN_PASSIVE_TIME 257 +#define WLC_SET_SCAN_PASSIVE_TIME 258 +#define WLC_LEGACY_LINK_BEHAVIOR 259 +#define WLC_GET_CHANNELS_IN_COUNTRY 260 +#define WLC_GET_COUNTRY_LIST 261 +#define WLC_GET_VAR 262 /* get value of named variable */ +#define WLC_SET_VAR 263 /* set named variable to value */ +#define WLC_NVRAM_GET 264 /* deprecated */ +#define WLC_NVRAM_SET 265 +#define WLC_NVRAM_DUMP 266 +#define WLC_REBOOT 267 +#define WLC_SET_WSEC_PMK 268 +#define WLC_GET_AUTH_MODE 269 +#define WLC_SET_AUTH_MODE 270 +#define WLC_GET_WAKEENTRY 271 +#define WLC_SET_WAKEENTRY 272 +#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */ +#define WLC_NVOTPW 274 +#define WLC_OTPW 275 +#define WLC_IOV_BLOCK_GET 276 +#define WLC_IOV_MODULES_GET 277 +#define WLC_SOFT_RESET 278 +#define WLC_GET_ALLOW_MODE 279 +#define WLC_SET_ALLOW_MODE 280 +#define WLC_GET_DESIRED_BSSID 281 +#define WLC_SET_DESIRED_BSSID 282 +#define WLC_DISASSOC_MYAP 283 +#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */ +#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */ +#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */ +#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */ +#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */ +#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */ +#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */ +#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */ +#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */ +#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */ +#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */ +#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */ +#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */ +#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */ +#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */ +#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */ +#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */ +#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */ +#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */ +#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */ +#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */ +/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */ +#define WLC_GET_CMD 309 +/* #define WLC_LAST 310 */ /* Never used - can be reused */ +#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */ +#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */ +/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */ +/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */ +/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */ +#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */ +#define WLC_GET_NAT_STATE 317 +#define WLC_GET_TXBF_RATESET 318 +#define WLC_SET_TXBF_RATESET 319 +#define WLC_SCAN_CQ 320 +#define WLC_GET_RSSI_QDB 321 /* qdB portion of the RSSI */ +#define WLC_DUMP_RATESET 322 +#define WLC_ECHO 323 +#define WLC_LAST 324 +#ifndef EPICTRL_COOKIE +#define EPICTRL_COOKIE 0xABADCEDE +#endif + +/* vx wlc ioctl's offset */ +#define CMN_IOCTL_OFF 0x180 + +/* + * custom OID support + * + * 0xFF - implementation specific OID + * 0xE4 - first byte of Broadcom PCI vendor ID + * 0x14 - second byte of Broadcom PCI vendor ID + * 0xXX - the custom OID number + */ + +/* begin 0x1f values beyond the start of the ET driver range. */ +#define WL_OID_BASE 0xFFE41420 + +/* NDIS overrides */ +#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE) +#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK) +#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK) +#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH) +#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS) +#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR) +#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM) + +/* EXT_STA Dongle suuport */ +#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC) +#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS) +#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY) +#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY) +#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME) +#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID) +#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE) +#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING) +#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING) +#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON) +#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON) +#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE) +#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC) +#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS) +#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS) + +/* NAT filter driver support */ +#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG) +#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE) + +#define WL_DECRYPT_STATUS_SUCCESS 1 +#define WL_DECRYPT_STATUS_FAILURE 2 +#define WL_DECRYPT_STATUS_UNKNOWN 3 + +/* allows user-mode app to poll the status of USB image upgrade */ +#define WLC_UPGRADE_SUCCESS 0 +#define WLC_UPGRADE_PENDING 1 + +/* WLC_GET_AUTH, WLC_SET_AUTH values */ +#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */ +#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */ +#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */ + +/* a large TX Power as an init value to factor out of MIN() calculations, + * keep low enough to fit in an int8, units are .25 dBm + */ +#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */ + +/* "diag" iovar argument and error code */ +#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */ +#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */ +#define WL_DIAG_MEMORY 3 /* d11 memory test */ +#define WL_DIAG_LED 4 /* LED test */ +#define WL_DIAG_REG 5 /* d11/phy register test */ +#define WL_DIAG_SROM 6 /* srom read/crc test */ +#define WL_DIAG_DMA 7 /* DMA test */ +#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */ + +#define WL_DIAGERR_SUCCESS 0 +#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */ +#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */ +#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */ +#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */ +#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */ +#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */ +#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */ +#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */ +#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */ +#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */ + +#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */ +#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */ + +/* band types */ +#define WLC_BAND_AUTO 0 /* auto-select */ +#define WLC_BAND_5G 1 /* 5 Ghz */ +#define WLC_BAND_2G 2 /* 2.4 Ghz */ +#define WLC_BAND_ALL 3 /* all bands */ + +/* band range returned by band_range iovar */ +#define WL_CHAN_FREQ_RANGE_2G 0 +#define WL_CHAN_FREQ_RANGE_5GL 1 +#define WL_CHAN_FREQ_RANGE_5GM 2 +#define WL_CHAN_FREQ_RANGE_5GH 3 + +#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4 +#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5 +#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6 +#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7 +#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8 + +#define WL_CHAN_FREQ_RANGE_5G_BAND0 1 +#define WL_CHAN_FREQ_RANGE_5G_BAND1 2 +#define WL_CHAN_FREQ_RANGE_5G_BAND2 3 +#define WL_CHAN_FREQ_RANGE_5G_BAND3 4 +#define WL_CHAN_FREQ_RANGE_5G_4BAND 5 + + +/* SROM12 */ +#define WL_CHAN_FREQ_RANGE_5G_BAND4 5 +#define WL_CHAN_FREQ_RANGE_2G_40 6 +#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7 +#define WL_CHAN_FREQ_RANGE_5G_BAND1_40 8 +#define WL_CHAN_FREQ_RANGE_5G_BAND2_40 9 +#define WL_CHAN_FREQ_RANGE_5G_BAND3_40 10 +#define WL_CHAN_FREQ_RANGE_5G_BAND4_40 11 +#define WL_CHAN_FREQ_RANGE_5G_BAND0_80 12 +#define WL_CHAN_FREQ_RANGE_5G_BAND1_80 13 +#define WL_CHAN_FREQ_RANGE_5G_BAND2_80 14 +#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15 +#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16 + +#define WL_CHAN_FREQ_RANGE_5G_5BAND 18 +#define WL_CHAN_FREQ_RANGE_5G_5BAND_40 19 +#define WL_CHAN_FREQ_RANGE_5G_5BAND_80 20 + +#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */ +#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */ +#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */ + +/* + * 54g modes (basic bits may still be overridden) + * + * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11 + * Preamble: Long + * Shortslot: Off + * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54 + * Extended Rateset: 6, 9, 12, 48 + * Preamble: Long + * Shortslot: Auto + * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54 + * Extended Rateset: 6b, 9, 12b, 48 + * Preamble: Short required + * Shortslot: Auto + * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54 + * Extended Rateset: 6, 9, 12, 48 + * Preamble: Long + * Shortslot: On + * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54 + * Preamble: Short required + * Shortslot: On and required + * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b + * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54 + * Preamble: Long + * Shortslot: Auto + */ +#define GMODE_LEGACY_B 0 +#define GMODE_AUTO 1 +#define GMODE_ONLY 2 +#define GMODE_B_DEFERRED 3 +#define GMODE_PERFORMANCE 4 +#define GMODE_LRS 5 +#define GMODE_MAX 6 + +/* values for PLCPHdr_override */ +#define WLC_PLCP_AUTO -1 +#define WLC_PLCP_SHORT 0 +#define WLC_PLCP_LONG 1 + +/* values for g_protection_override and n_protection_override */ +#define WLC_PROTECTION_AUTO -1 +#define WLC_PROTECTION_OFF 0 +#define WLC_PROTECTION_ON 1 +#define WLC_PROTECTION_MMHDR_ONLY 2 +#define WLC_PROTECTION_CTS_ONLY 3 + +/* values for g_protection_control and n_protection_control */ +#define WLC_PROTECTION_CTL_OFF 0 +#define WLC_PROTECTION_CTL_LOCAL 1 +#define WLC_PROTECTION_CTL_OVERLAP 2 + +/* values for n_protection */ +#define WLC_N_PROTECTION_OFF 0 +#define WLC_N_PROTECTION_OPTIONAL 1 +#define WLC_N_PROTECTION_20IN40 2 +#define WLC_N_PROTECTION_MIXEDMODE 3 + +/* values for n_preamble_type */ +#define WLC_N_PREAMBLE_MIXEDMODE 0 +#define WLC_N_PREAMBLE_GF 1 +#define WLC_N_PREAMBLE_GF_BRCM 2 + +/* values for band specific 40MHz capabilities (deprecated) */ +#define WLC_N_BW_20ALL 0 +#define WLC_N_BW_40ALL 1 +#define WLC_N_BW_20IN2G_40IN5G 2 + +#define WLC_BW_20MHZ_BIT (1<<0) +#define WLC_BW_40MHZ_BIT (1<<1) +#define WLC_BW_80MHZ_BIT (1<<2) +#define WLC_BW_160MHZ_BIT (1<<3) +#define WLC_BW_10MHZ_BIT (1<<4) +#define WLC_BW_5MHZ_BIT (1<<5) +#define WLC_BW_2P5MHZ_BIT (1<<6) + +/* Bandwidth capabilities */ +#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \ + WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_2P5MHZ (WLC_BW_2P5MHZ_BIT) +#define WLC_BW_CAP_5MHZ (WLC_BW_5MHZ_BIT) +#define WLC_BW_CAP_10MHZ (WLC_BW_10MHZ_BIT) +#define WLC_BW_CAP_UNRESTRICTED 0xFF + +#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_2P5MHZ(bw_cap)(((bw_cap) & WLC_BW_2P5MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_5MHZ(bw_cap) (((bw_cap) & WLC_BW_5MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_10MHZ(bw_cap) (((bw_cap) & WLC_BW_10MHZ_BIT) ? TRUE : FALSE) + +/* values to force tx/rx chain */ +#define WLC_N_TXRX_CHAIN0 0 +#define WLC_N_TXRX_CHAIN1 1 + +/* bitflags for SGI support (sgi_rx iovar) */ +#define WLC_N_SGI_20 0x01 +#define WLC_N_SGI_40 0x02 +#define WLC_VHT_SGI_80 0x04 + +/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */ +#define WLC_SGI_ALL 0x02 + +#define LISTEN_INTERVAL 10 +/* interference mitigation options */ +#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */ +#define INTERFERE_NONE 0 /* off */ +#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */ +#define WLAN_MANUAL 2 /* ACI: no auto detection */ +#define WLAN_AUTO 3 /* ACI: auto detect */ +#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */ +#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */ + +/* interfernece mode bit-masks (ACPHY) */ +#define ACPHY_ACI_GLITCHBASED_DESENSE 1 /* bit 0 */ +#define ACPHY_ACI_HWACI_PKTGAINLMT 2 /* bit 1 */ +#define ACPHY_ACI_W2NB_PKTGAINLMT 4 /* bit 2 */ +#define ACPHY_ACI_PREEMPTION 8 /* bit 3 */ +#define ACPHY_HWACI_MITIGATION 16 /* bit 4 */ +#define ACPHY_ACI_MAX_MODE 31 + +/* AP environment */ +#define AP_ENV_DETECT_NOT_USED 0 /* We aren't using AP environment detection */ +#define AP_ENV_DENSE 1 /* "Corporate" or other AP dense environment */ +#define AP_ENV_SPARSE 2 /* "Home" or other sparse environment */ +#define AP_ENV_INDETERMINATE 3 /* AP environment hasn't been identified */ + +#define TRIGGER_NOW 0 +#define TRIGGER_CRS 0x01 +#define TRIGGER_CRSDEASSERT 0x02 +#define TRIGGER_GOODFCS 0x04 +#define TRIGGER_BADFCS 0x08 +#define TRIGGER_BADPLCP 0x10 +#define TRIGGER_CRSGLITCH 0x20 + +#define WL_SAMPLEDATA_HEADER_TYPE 1 +#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */ +#define WL_SAMPLEDATA_TYPE 2 +#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */ +#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */ + +/* WL_OTA START */ +#define WL_OTA_ARG_PARSE_BLK_SIZE 1200 +#define WL_OTA_TEST_MAX_NUM_RATE 30 +#define WL_OTA_TEST_MAX_NUM_SEQ 100 +#define WL_OTA_TEST_MAX_NUM_RSSI 85 + +#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */ + +/* radar iovar SET defines */ +#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */ +#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */ +#define WL_RADAR_SIMULATED 2 /* force radar detector to declare + * detection once + */ +#define WL_RADAR_SIMULATED_SC 3 /* force radar detector to declare + * detection once on scan core + * if available and active + */ +#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */ +#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */ +#define WL_ANT_HT_RX_MAX 4 /* max 4 receive antennas/cores */ +#define WL_ANT_IDX_1 0 /* antenna index 1 */ +#define WL_ANT_IDX_2 1 /* antenna index 2 */ + +#ifndef WL_RSSI_ANT_MAX +#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */ +#elif WL_RSSI_ANT_MAX != 4 +#error "WL_RSSI_ANT_MAX does not match" +#endif + +/* dfs_status iovar-related defines */ + +/* cac - channel availability check, + * ism - in-service monitoring + * csa - channel switching announcement + */ + +/* cac state values */ +#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */ +#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */ +#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */ +#define WL_DFS_CACSTATE_CSA 3 /* csa */ +#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */ +#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */ +#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */ +#define WL_DFS_CACSTATES 7 /* this many states exist */ + +/* Defines used with channel_bandwidth for curpower */ +#define WL_BW_20MHZ 0 +#define WL_BW_40MHZ 1 +#define WL_BW_80MHZ 2 +#define WL_BW_160MHZ 3 +#define WL_BW_8080MHZ 4 +#define WL_BW_2P5MHZ 5 +#define WL_BW_5MHZ 6 +#define WL_BW_10MHZ 7 + +/* tx_power_t.flags bits */ +#define WL_TX_POWER_F_ENABLED 1 +#define WL_TX_POWER_F_HW 2 +#define WL_TX_POWER_F_MIMO 4 +#define WL_TX_POWER_F_SISO 8 +#define WL_TX_POWER_F_HT 0x10 +#define WL_TX_POWER_F_VHT 0x20 +#define WL_TX_POWER_F_OPENLOOP 0x40 +#define WL_TX_POWER_F_PROP11NRATES 0x80 + +/* Message levels */ +#define WL_ERROR_VAL 0x00000001 +#define WL_TRACE_VAL 0x00000002 +#define WL_PRHDRS_VAL 0x00000004 +#define WL_PRPKT_VAL 0x00000008 +#define WL_INFORM_VAL 0x00000010 +#define WL_TMP_VAL 0x00000020 +#define WL_OID_VAL 0x00000040 +#define WL_RATE_VAL 0x00000080 +#define WL_ASSOC_VAL 0x00000100 +#define WL_PRUSR_VAL 0x00000200 +#define WL_PS_VAL 0x00000400 +#define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */ +#define WL_PORT_VAL 0x00001000 +#define WL_DUAL_VAL 0x00002000 +#define WL_WSEC_VAL 0x00004000 +#define WL_WSEC_DUMP_VAL 0x00008000 +#define WL_LOG_VAL 0x00010000 +#define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */ +#define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */ +#define WL_REGULATORY_VAL 0x00080000 +#define WL_TAF_VAL 0x00100000 +#define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */ +#define WL_MPC_VAL 0x00400000 +#define WL_APSTA_VAL 0x00800000 +#define WL_DFS_VAL 0x01000000 +#define WL_BA_VAL 0x00000000 /* retired in TOT on 6/14/2010 */ +#define WL_MUMIMO_VAL 0x02000000 /* Using retired WL_BA_VAL */ +#define WL_ACI_VAL 0x04000000 +#define WL_PRMAC_VAL 0x04000000 +#define WL_MBSS_VAL 0x04000000 +#define WL_CAC_VAL 0x08000000 +#define WL_AMSDU_VAL 0x10000000 +#define WL_AMPDU_VAL 0x20000000 +#define WL_FFPLD_VAL 0x40000000 + +/* wl_msg_level is full. For new bits take the next one and AND with + * wl_msg_level2 in wl_dbg.h + */ +#define WL_DPT_VAL 0x00000001 +/* re-using WL_DPT_VAL */ +#define WL_MESH_VAL 0x00000001 +#define WL_SCAN_VAL 0x00000002 +#define WL_WOWL_VAL 0x00000004 +#define WL_COEX_VAL 0x00000008 +#define WL_RTDC_VAL 0x00000010 +#define WL_PROTO_VAL 0x00000020 +#define WL_BTA_VAL 0x00000040 +#define WL_CHANINT_VAL 0x00000080 +#define WL_WMF_VAL 0x00000100 +#define WL_P2P_VAL 0x00000200 +#define WL_ITFR_VAL 0x00000400 +#define WL_MCHAN_VAL 0x00000800 +#define WL_TDLS_VAL 0x00001000 +#define WL_MCNX_VAL 0x00002000 +#define WL_PROT_VAL 0x00004000 +#define WL_PSTA_VAL 0x00008000 +#define WL_TSO_VAL 0x00010000 +#define WL_TRF_MGMT_VAL 0x00020000 +#define WL_LPC_VAL 0x00040000 +#define WL_L2FILTER_VAL 0x00080000 +#define WL_TXBF_VAL 0x00100000 +#define WL_P2PO_VAL 0x00200000 +#define WL_TBTT_VAL 0x00400000 +#define WL_FBT_VAL 0x00800000 +#define WL_MQ_VAL 0x01000000 + +/* This level is currently used in Phoenix2 only */ +#define WL_SRSCAN_VAL 0x02000000 + +#define WL_WNM_VAL 0x04000000 +#define WL_PWRSEL_VAL 0x10000000 +#define WL_NET_DETECT_VAL 0x20000000 +#define WL_PCIE_VAL 0x40000000 +#define WL_PMDUR_VAL 0x80000000 + + +/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier + * rather than a message-type of its own + */ +#define WL_TIMESTAMP_VAL 0x80000000 + +/* max # of leds supported by GPIO (gpio pin# == led index#) */ +#define WL_LED_NUMGPIO 32 /* gpio 0-31 */ + +/* led per-pin behaviors */ +#define WL_LED_OFF 0 /* always off */ +#define WL_LED_ON 1 /* always on */ +#define WL_LED_ACTIVITY 2 /* activity */ +#define WL_LED_RADIO 3 /* radio enabled */ +#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */ +#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */ +#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */ +#define WL_LED_WI1 7 +#define WL_LED_WI2 8 +#define WL_LED_WI3 9 +#define WL_LED_ASSOC 10 /* associated state indicator */ +#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */ +#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */ +#define WL_LED_WI4 13 +#define WL_LED_WI5 14 +#define WL_LED_BLINKSLOW 15 /* blink slow */ +#define WL_LED_BLINKMED 16 /* blink med */ +#define WL_LED_BLINKFAST 17 /* blink fast */ +#define WL_LED_BLINKCUSTOM 18 /* blink custom */ +#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */ +#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */ + /* keep on for 300 sec */ +#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */ +#define WL_LED_WI6 22 +#define WL_LED_WI7 23 +#define WL_LED_WI8 24 +#define WL_LED_NUMBEHAVIOR 25 + +/* led behavior numeric value format */ +#define WL_LED_BEH_MASK 0x3f /* behavior mask */ +#define WL_LED_PMU_OVERRIDE 0x40 /* need to set PMU Override bit for the GPIO */ +#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */ + +/* number of bytes needed to define a proper bit mask for MAC event reporting */ +#define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define BCMIO_NBBY 8 +#define WL_EVENTING_MASK_LEN 16 + + +/* join preference types */ +#define WL_JOIN_PREF_RSSI 1 /* by RSSI */ +#define WL_JOIN_PREF_WPA 2 /* by akm and ciphers */ +#define WL_JOIN_PREF_BAND 3 /* by 802.11 band */ +#define WL_JOIN_PREF_RSSI_DELTA 4 /* by 802.11 band only if RSSI delta condition matches */ +#define WL_JOIN_PREF_TRANS_PREF 5 /* defined by requesting AP */ + +/* band preference */ +#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */ + +/* any multicast cipher suite */ +#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00" + +/* 802.11h measurement types */ +#define WLC_MEASURE_TPC 1 +#define WLC_MEASURE_CHANNEL_BASIC 2 +#define WLC_MEASURE_CHANNEL_CCA 3 +#define WLC_MEASURE_CHANNEL_RPI 4 + +/* regulatory enforcement levels */ +#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */ +#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */ +#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */ +#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */ +/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE + * adoption is done regardless of capability spectrum_management + */ +#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */ + +#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */ +#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */ +#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */ +#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */ +#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */ +#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */ +#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */ + +/* BTC mode used by "btc_mode" iovar */ +#define WL_BTC_DISABLE 0 /* disable BT coexistence */ +#define WL_BTC_FULLTDM 1 /* full TDM COEX */ +#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */ +#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */ +#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */ +#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */ +#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */ +#define WL_BTC_DEFAULT 8 /* set the default mode for the device */ +#define WL_INF_BTC_DISABLE 0 +#define WL_INF_BTC_ENABLE 1 +#define WL_INF_BTC_AUTO 3 + +/* BTC wire used by "btc_wire" iovar */ +#define WL_BTC_DEFWIRE 0 /* use default wire setting */ +#define WL_BTC_2WIRE 2 /* use 2-wire BTC */ +#define WL_BTC_3WIRE 3 /* use 3-wire BTC */ +#define WL_BTC_4WIRE 4 /* use 4-wire BTC */ + +/* BTC flags: BTC configuration that can be set by host */ +#define WL_BTC_FLAG_PREMPT (1 << 0) +#define WL_BTC_FLAG_BT_DEF (1 << 1) +#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2) +#define WL_BTC_FLAG_SIM_RSP (1 << 3) +#define WL_BTC_FLAG_PS_PROTECT (1 << 4) +#define WL_BTC_FLAG_SIM_TX_LP (1 << 5) +#define WL_BTC_FLAG_ECI (1 << 6) +#define WL_BTC_FLAG_LIGHT (1 << 7) +#define WL_BTC_FLAG_PARALLEL (1 << 8) + +/* maximum channels returned by the get valid channels iovar */ +#define WL_NUMCHANNELS 64 + +/* max number of chanspecs (used by the iovar to calc. buf space) */ +#ifdef WL11AC_80P80 +#define WL_NUMCHANSPECS 206 +#else +#define WL_NUMCHANSPECS 110 +#endif + +/* WDS link local endpoint WPA role */ +#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */ +#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */ +#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */ + +/* Base offset values */ +#define WL_PKT_FILTER_BASE_PKT 0 +#define WL_PKT_FILTER_BASE_END 1 +#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */ +#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */ +#define WL_PKT_FILTER_BASE_ETH_H 4 +#define WL_PKT_FILTER_BASE_ETH_D 5 +#define WL_PKT_FILTER_BASE_ARP_H 6 +#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */ +#define WL_PKT_FILTER_BASE_IP4_H 8 +#define WL_PKT_FILTER_BASE_IP4_D 9 +#define WL_PKT_FILTER_BASE_IP6_H 10 +#define WL_PKT_FILTER_BASE_IP6_D 11 +#define WL_PKT_FILTER_BASE_TCP_H 12 +#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */ +#define WL_PKT_FILTER_BASE_UDP_H 14 +#define WL_PKT_FILTER_BASE_UDP_D 15 +#define WL_PKT_FILTER_BASE_IP6_P 16 +#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */ + +/* String mapping for bases that may be used by applications or debug */ +#define WL_PKT_FILTER_BASE_NAMES \ + { "START", WL_PKT_FILTER_BASE_PKT }, \ + { "END", WL_PKT_FILTER_BASE_END }, \ + { "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \ + { "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \ + { "D11_H", WL_PKT_FILTER_BASE_D11_H }, \ + { "D11_D", WL_PKT_FILTER_BASE_D11_D }, \ + { "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \ + { "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \ + { "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \ + { "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \ + { "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \ + { "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \ + { "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \ + { "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \ + { "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \ + { "UDP_D", WL_PKT_FILTER_BASE_UDP_D } + +/* Flags for a pattern list element */ +#define WL_PKT_FILTER_MFLAG_NEG 0x0001 + +/* + * Packet engine interface + */ + +#define WL_PKTENG_PER_TX_START 0x01 +#define WL_PKTENG_PER_TX_STOP 0x02 +#define WL_PKTENG_PER_RX_START 0x04 +#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05 +#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06 +#define WL_PKTENG_PER_RX_STOP 0x08 +#define WL_PKTENG_PER_MASK 0xff + +#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */ +#define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */ + +#define WL_PKTENG_MAXPKTSZ 16384 /* max pktsz limit for pkteng */ + +#define NUM_80211b_RATES 4 +#define NUM_80211ag_RATES 8 +#define NUM_80211n_RATES 32 +#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES) + +/* + * WOWL capability/override settings + */ +#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */ +#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */ +#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */ +#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */ +#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */ +#define WL_WOWL_TST (1 << 5) /* Wakeup after test */ +#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */ +#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */ +#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */ +#define WL_WOWL_ULP_BAILOUT (1 << 8) /* wakeind via unknown pkt by basic ULP-offloads - + * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS and + * not WLC_HIGH_ONLY case + */ +#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */ +#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */ +#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */ +#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */ +#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */ +#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */ +#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */ +#define WL_WOWL_SCANOL (1 << 16) /* If the bit is set, scan offload is enabled */ +#define WL_WOWL_TCPKEEP_TIME (1 << 17) /* Wakeup on tcpkeep alive timeout */ +#define WL_WOWL_MDNS_CONFLICT (1 << 18) /* Wakeup on mDNS Conflict Resolution */ +#define WL_WOWL_MDNS_SERVICE (1 << 19) /* Wakeup on mDNS Service Connect */ +#define WL_WOWL_TCPKEEP_DATA (1 << 20) /* tcp keepalive got data */ +#define WL_WOWL_FW_HALT (1 << 21) /* Firmware died in wowl mode */ +#define WL_WOWL_ENAB_HWRADIO (1 << 22) /* Enable detection of radio button changes */ +#define WL_WOWL_MIC_FAIL (1 << 23) /* Offloads detected MIC failure(s) */ +#define WL_WOWL_UNASSOC (1 << 24) /* Wakeup in Unassociated state (Net/Magic Pattern) */ +#define WL_WOWL_SECURE (1 << 25) /* Wakeup if received matched secured pattern */ +#define WL_WOWL_LINKDOWN (1 << 31) /* Link Down indication in WoWL mode */ + +#define WL_WOWL_TCPKEEP (1 << 20) /* temp copy to satisfy automerger */ +#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */ + +#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */ +#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */ + +#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */ +#define MAGIC_PKT_NUM_MAC_ADDRS 16 + + +/* Overlap BSS Scan parameters default, minimum, maximum */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */ +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */ + +#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */ + +#define WL_COEX_INFO_MASK 0x07 +#define WL_COEX_INFO_REQ 0x01 +#define WL_COEX_40MHZ_INTOLERANT 0x02 +#define WL_COEX_WIDTH20 0x04 + +#define WLC_RSSI_INVALID 0 /* invalid RSSI value */ + +#define MAX_RSSI_LEVELS 8 + +/* **** EXTLOG **** */ +#define EXTLOG_CUR_VER 0x0100 + +#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */ + +/* log modules (bitmap) */ +#define LOG_MODULE_COMMON 0x0001 +#define LOG_MODULE_ASSOC 0x0002 +#define LOG_MODULE_EVENT 0x0004 +#define LOG_MODULE_MAX 3 /* Update when adding module */ + +/* log levels */ +#define WL_LOG_LEVEL_DISABLE 0 +#define WL_LOG_LEVEL_ERR 1 +#define WL_LOG_LEVEL_WARN 2 +#define WL_LOG_LEVEL_INFO 3 +#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */ + +/* flag */ +#define LOG_FLAG_EVENT 1 + +/* log arg_type */ +#define LOG_ARGTYPE_NULL 0 +#define LOG_ARGTYPE_STR 1 /* %s */ +#define LOG_ARGTYPE_INT 2 /* %d */ +#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */ +#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */ + +/* 802.11 Mgmt Packet flags */ +#define VNDR_IE_BEACON_FLAG 0x1 +#define VNDR_IE_PRBRSP_FLAG 0x2 +#define VNDR_IE_ASSOCRSP_FLAG 0x4 +#define VNDR_IE_AUTHRSP_FLAG 0x8 +#define VNDR_IE_PRBREQ_FLAG 0x10 +#define VNDR_IE_ASSOCREQ_FLAG 0x20 +#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */ +#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */ + +#if defined(WLP2P) +/* P2P Action Frames flags (spec ordered) */ +#define VNDR_IE_GONREQ_FLAG 0x001000 +#define VNDR_IE_GONRSP_FLAG 0x002000 +#define VNDR_IE_GONCFM_FLAG 0x004000 +#define VNDR_IE_INVREQ_FLAG 0x008000 +#define VNDR_IE_INVRSP_FLAG 0x010000 +#define VNDR_IE_DISREQ_FLAG 0x020000 +#define VNDR_IE_DISRSP_FLAG 0x040000 +#define VNDR_IE_PRDREQ_FLAG 0x080000 +#define VNDR_IE_PRDRSP_FLAG 0x100000 + +#define VNDR_IE_P2PAF_SHIFT 12 +#endif /* WLP2P */ + +/* channel interference measurement (chanim) related defines */ + +/* chanim mode */ +#define CHANIM_DISABLE 0 /* disabled */ +#define CHANIM_DETECT 1 /* detection only */ +#define CHANIM_EXT 2 /* external state machine */ +#define CHANIM_ACT 3 /* full internal state machine, detect + act */ +#define CHANIM_MODE_MAX 4 + +/* define for apcs reason code */ +#define APCS_INIT 0 +#define APCS_IOCTL 1 +#define APCS_CHANIM 2 +#define APCS_CSTIMER 3 +#define APCS_BTA 4 +#define APCS_TXDLY 5 +#define APCS_NONACSD 6 +#define APCS_DFS_REENTRY 7 +#define APCS_TXFAIL 8 +#define APCS_MAX 9 + +/* number of ACS record entries */ +#define CHANIM_ACS_RECORD 10 + +/* CHANIM */ +#define CCASTATS_TXDUR 0 +#define CCASTATS_INBSS 1 +#define CCASTATS_OBSS 2 +#define CCASTATS_NOCTG 3 +#define CCASTATS_NOPKT 4 +#define CCASTATS_DOZE 5 +#define CCASTATS_TXOP 6 +#define CCASTATS_GDTXDUR 7 +#define CCASTATS_BDTXDUR 8 +#define CCASTATS_MAX 9 + +#define WL_CHANIM_COUNT_ALL 0xff +#define WL_CHANIM_COUNT_ONE 0x1 + +/* ap tpc modes */ +#define AP_TPC_OFF 0 +#define AP_TPC_BSS_PWR 1 /* BSS power control */ +#define AP_TPC_AP_PWR 2 /* AP power control */ +#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */ +#define AP_TPC_MAX_LINK_MARGIN 127 + +/* ap tpc modes */ +#define AP_TPC_OFF 0 +#define AP_TPC_BSS_PWR 1 /* BSS power control */ +#define AP_TPC_AP_PWR 2 /* AP power control */ +#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */ +#define AP_TPC_MAX_LINK_MARGIN 127 + +/* state */ +#define WL_P2P_DISC_ST_SCAN 0 +#define WL_P2P_DISC_ST_LISTEN 1 +#define WL_P2P_DISC_ST_SEARCH 2 + +/* i/f type */ +#define WL_P2P_IF_CLIENT 0 +#define WL_P2P_IF_GO 1 +#define WL_P2P_IF_DYNBCN_GO 2 +#define WL_P2P_IF_DEV 3 + +/* count */ +#define WL_P2P_SCHED_RSVD 0 +#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */ + +#define WL_P2P_SCHED_FIXED_LEN 3 + +/* schedule type */ +#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */ +#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */ + +/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */ +#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */ +#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */ +/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */ +#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */ +/* schedule option - WL_P2P_SCHED_TYPE_XXX */ +#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */ + +/* schedule option - WL_P2P_SCHED_TYPE_ABS */ +#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */ +#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */ +/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */ +#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with + * start being an offset of the 'current' TSF + */ + +/* feature flags */ +#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */ +#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe + * requests + */ +#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */ + +/* n-mode support capability */ +/* 2x2 includes both 1x1 & 2x2 devices + * reserved #define 2 for future when we want to separate 1x1 & 2x2 and + * control it independently + */ +#define WL_11N_2x2 1 +#define WL_11N_3x3 3 +#define WL_11N_4x4 4 + +/* define 11n feature disable flags */ +#define WLFEATURE_DISABLE_11N 0x00000001 +#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002 +#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004 +#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008 +#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010 +#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020 +#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 +#define WLFEATURE_DISABLE_11N_GF 0x00000080 + +/* Proxy STA modes */ +#define PSTA_MODE_DISABLED 0 +#define PSTA_MODE_PROXY 1 +#define PSTA_MODE_REPEATER 2 + +/* op code in nat_cfg */ +#define NAT_OP_ENABLE 1 /* enable NAT on given interface */ +#define NAT_OP_DISABLE 2 /* disable NAT on given interface */ +#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */ + +/* NAT state */ +#define NAT_STATE_ENABLED 1 /* NAT is enabled */ +#define NAT_STATE_DISABLED 2 /* NAT is disabled */ + +#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */ +#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */ +#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */ +#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */ + +/* D0 Coalescing */ +#define IPV4_ARP_FILTER 0x0001 +#define IPV4_NETBT_FILTER 0x0002 +#define IPV4_LLMNR_FILTER 0x0004 +#define IPV4_SSDP_FILTER 0x0008 +#define IPV4_WSD_FILTER 0x0010 +#define IPV6_NETBT_FILTER 0x0200 +#define IPV6_LLMNR_FILTER 0x0400 +#define IPV6_SSDP_FILTER 0x0800 +#define IPV6_WSD_FILTER 0x1000 + +/* Network Offload Engine */ +#define NWOE_OL_ENABLE 0x00000001 + +/* + * Traffic management structures/defines. + */ + +/* Traffic management bandwidth parameters */ +#define TRF_MGMT_MAX_PRIORITIES 3 + +#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */ +#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Don't shape traffic */ +#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC 0x0008 /* Manage traffic over our local subnet */ +#define TRF_MGMT_FLAG_FILTER_ON_MACADDR 0x0010 /* filter on MAC address */ +#define TRF_MGMT_FLAG_NO_RX 0x0020 /* do not apply fiters to rx packets */ + +#define TRF_FILTER_MAC_ADDR 0x0001 /* L2 filter use dst mac address for filtering */ +#define TRF_FILTER_IP_ADDR 0x0002 /* L3 filter use ip ddress for filtering */ +#define TRF_FILTER_L4 0x0004 /* L4 filter use tcp/udp for filtering */ +#define TRF_FILTER_DWM 0x0008 /* L3 filter use DSCP for filtering */ +#define TRF_FILTER_FAVORED 0x0010 /* Tag the packet FAVORED */ + +/* WNM/NPS subfeatures mask */ +#define WL_WNM_BSSTRANS 0x00000001 +#define WL_WNM_PROXYARP 0x00000002 +#define WL_WNM_MAXIDLE 0x00000004 +#define WL_WNM_TIMBC 0x00000008 +#define WL_WNM_TFS 0x00000010 +#define WL_WNM_SLEEP 0x00000020 +#define WL_WNM_DMS 0x00000040 +#define WL_WNM_FMS 0x00000080 +#define WL_WNM_NOTIF 0x00000100 +#define WL_WNM_MAX 0x00000200 + +#ifdef WLWNM_BRCM +#define BRCM_WNM_FEATURE_SET\ + (WL_WNM_PROXYARP | \ + WL_WNM_SLEEP | \ + WL_WNM_FMS | \ + WL_WNM_TFS | \ + WL_WNM_TIMBC | \ + WL_WNM_BSSTRANS | \ + WL_WNM_DMS | \ + WL_WNM_NOTIF | \ + 0) +#endif /* WLWNM_BRCM */ + +#ifndef ETHER_MAX_DATA +#define ETHER_MAX_DATA 1500 +#endif /* ETHER_MAX_DATA */ + +/* Different discovery modes for dpt */ +#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */ +#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */ +#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */ + +/* different path selection values */ +#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */ +#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */ +#define DPT_PATHSEL_APPATH 2 /* always use AP path */ + +/* different ops for deny list */ +#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */ +#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */ + +/* different ops for manual end point */ +#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */ +#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */ +#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */ + +/* flags to indicate DPT status */ +#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */ +#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */ +#define DPT_STATUS_FAILED 0x04 /* DPT link failed */ + +#ifdef WLTDLS +/* different ops for manual end point */ +#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */ +#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */ +#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */ +#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */ +#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */ +#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */ +#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */ +#define TDLS_MANUAL_EP_WFD_TPQ 8 /* WiFi-Display Tunneled Probe reQuest */ + +/* modes */ +#define TDLS_WFD_IE_TX 0 +#define TDLS_WFD_IE_RX 1 +#define TDLS_WFD_PROBE_IE_TX 2 +#define TDLS_WFD_PROBE_IE_RX 3 +#endif /* WLTDLS */ + +/* define for flag */ +#define TSPEC_PENDING 0 /* TSPEC pending */ +#define TSPEC_ACCEPTED 1 /* TSPEC accepted */ +#define TSPEC_REJECTED 2 /* TSPEC rejected */ +#define TSPEC_UNKNOWN 3 /* TSPEC unknown */ +#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */ + + +/* Software feature flag defines used by wlfeatureflag */ +#ifdef WLAFTERBURNER +#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */ +#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */ +#endif /* WLAFTERBURNER */ +#define WL_SWFL_NOHWRADIO 0x0004 +#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */ +#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */ + +#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */ + +#define CSA_BROADCAST_ACTION_FRAME 0 /* csa broadcast action frame */ +#define CSA_UNICAST_ACTION_FRAME 1 /* csa unicast action frame */ + +/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER. + * + * (-100 < value < 0) value is used directly as a roaming trigger in dBm + * (0 <= value) value specifies a logical roaming trigger level from + * the list below + * + * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never + * the logical roam trigger value. + */ +#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */ +#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */ +#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */ +#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */ +#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */ + +#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */ + +/* Preferred Network Offload (PNO, formerly PFN) defines */ +#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 +#define IMMEDIATE_EVENT_BIT 8 +#define SUPPRESS_SSID_BIT 9 +#define ENABLE_NET_OFFLOAD_BIT 10 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_BIT 11 +#define BESTN_BSSID_ONLY_BIT 12 + +#define SORT_CRITERIA_MASK 0x0001 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define ENABLE_BKGRD_SCAN_MASK 0x0004 +#define IMMEDIATE_SCAN_MASK 0x0008 +#define AUTO_CONNECT_MASK 0x0010 + +#define ENABLE_BD_SCAN_MASK 0x0020 +#define ENABLE_ADAPTSCAN_MASK 0x00c0 +#define IMMEDIATE_EVENT_MASK 0x0100 +#define SUPPRESS_SSID_MASK 0x0200 +#define ENABLE_NET_OFFLOAD_MASK 0x0400 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_MASK 0x0800 +#define BESTN_BSSID_ONLY_MASK 0x1000 + +#define PFN_VERSION 2 +#define PFN_SCANRESULT_VERSION 1 +#define MAX_PFN_LIST_COUNT 16 + +#define PFN_COMPLETE 1 +#define PFN_INCOMPLETE 0 + +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 + +#define PFN_PARTIAL_SCAN_BIT 0 +#define PFN_PARTIAL_SCAN_MASK 1 + +#define WL_PFN_SUPPRESSFOUND_MASK 0x08 +#define WL_PFN_SUPPRESSLOST_MASK 0x10 +#define WL_PFN_RSSI_MASK 0xff00 +#define WL_PFN_RSSI_SHIFT 8 + +#define WL_PFN_REPORT_ALLNET 0 +#define WL_PFN_REPORT_SSIDNET 1 +#define WL_PFN_REPORT_BSSIDNET 2 + +#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */ +#define WL_PFN_CFG_FLAGS_HISTORY_OFF 0x00000002 /* Scan history suppressed */ + +#define WL_PFN_HIDDEN_BIT 2 +#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */ +#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */ +#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */ +#define WL_PFN_HIDDEN_MASK 0x4 + +#ifndef BESTN_MAX +#define BESTN_MAX 8 +#endif + +#ifndef MSCAN_MAX +#define MSCAN_MAX 32 +#endif + +/* TCP Checksum Offload error injection for testing */ +#define TOE_ERRTEST_TX_CSUM 0x00000001 +#define TOE_ERRTEST_RX_CSUM 0x00000002 +#define TOE_ERRTEST_RX_CSUM2 0x00000004 + +/* ARP Offload feature flags for arp_ol iovar */ +#define ARP_OL_AGENT 0x00000001 +#define ARP_OL_SNOOP 0x00000002 +#define ARP_OL_HOST_AUTO_REPLY 0x00000004 +#define ARP_OL_PEER_AUTO_REPLY 0x00000008 + +/* ARP Offload error injection */ +#define ARP_ERRTEST_REPLY_PEER 0x1 +#define ARP_ERRTEST_REPLY_HOST 0x2 + +#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */ +#define ND_MULTIHOMING_MAX 10 /* Maximum local host IP addresses */ +#define ND_REQUEST_MAX 5 /* Max set of offload params */ + + +/* AOAC wake event flag */ +#define WAKE_EVENT_NLO_DISCOVERY_BIT 1 +#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT 2 +#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4 +#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8 +#define WAKE_EVENT_NET_PACKET_BIT 0x10 + + +#define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */ + + +/* Packet filter operation mode */ +/* True: 1; False: 0 */ +#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1 +/* Enable and disable pkt_filter as a whole */ +#define PKT_FILTER_MODE_DISABLE 2 +/* Cache first matched rx pkt(be queried by host later) */ +#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4 +/* If pkt_filter is enabled and no filter is set, don't forward anything */ +#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8 + +#ifdef DONGLEOVERLAYS +#define OVERLAY_IDX_MASK 0x000000ff +#define OVERLAY_IDX_SHIFT 0 +#define OVERLAY_FLAGS_MASK 0xffffff00 +#define OVERLAY_FLAGS_SHIFT 8 +/* overlay written to device memory immediately after loading the base image */ +#define OVERLAY_FLAG_POSTLOAD 0x100 +/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */ +#define OVERLAY_FLAG_DEFER_DL 0x200 +/* overlay downloaded prior to the host going to sleep */ +#define OVERLAY_FLAG_PRESLEEP 0x400 +#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024 +#endif /* DONGLEOVERLAYS */ + +/* reuse two number in the sc/rc space */ +#define SMFS_CODE_MALFORMED 0xFFFE +#define SMFS_CODE_IGNORED 0xFFFD + +/* RFAWARE def */ +#define BCM_ACTION_RFAWARE 0x77 +#define BCM_ACTION_RFAWARE_DCS 0x01 + +/* DCS reason code define */ +#define BCM_DCS_IOVAR 0x1 +#define BCM_DCS_UNKNOWN 0xFF + + +#ifdef PROP_TXSTATUS +/* Bit definitions for tlv iovar */ +/* + * enable RSSI signals: + * WLFC_CTL_TYPE_RSSI + */ +#define WLFC_FLAGS_RSSI_SIGNALS 0x0001 + +/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals: + * + * WLFC_CTL_TYPE_MAC_OPEN + * WLFC_CTL_TYPE_MAC_CLOSE + * + * WLFC_CTL_TYPE_INTERFACE_OPEN + * WLFC_CTL_TYPE_INTERFACE_CLOSE + * + * WLFC_CTL_TYPE_MACDESC_ADD + * WLFC_CTL_TYPE_MACDESC_DEL + * + */ +#define WLFC_FLAGS_XONXOFF_SIGNALS 0x0002 + +/* enable (status, fifo_credit, mac_credit) signals + * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT + * WLFC_CTL_TYPE_TXSTATUS + * WLFC_CTL_TYPE_FIFO_CREDITBACK + */ +#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 0x0004 + +#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008 +#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010 +#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020 +#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE 0x0040 +#define WLFC_FLAGS_PKT_STAMP_SIGNALS 0x0080 + +#endif /* PROP_TXSTATUS */ + +#define WL_TIMBC_STATUS_AP_UNKNOWN 255 /* AP status for internal use only */ + +#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */ +#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */ +#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */ + +/* Definitions for Reliable Multicast */ +#define WL_RELMCAST_MAX_CLIENT 32 +#define WL_RELMCAST_FLAG_INBLACKLIST 1 +#define WL_RELMCAST_FLAG_ACTIVEACKER 2 +#define WL_RELMCAST_FLAG_RELMCAST 4 + +/* structures for proximity detection device role */ +#define WL_PROXD_MODE_DISABLE 0 +#define WL_PROXD_MODE_NEUTRAL 1 +#define WL_PROXD_MODE_INITIATOR 2 +#define WL_PROXD_MODE_TARGET 3 +#define WL_PROXD_RANDOM_WAKEUP 0x8000 + + +#ifdef NET_DETECT +#define NET_DETECT_MAX_WAKE_DATA_SIZE 2048 +#define NET_DETECT_MAX_PROFILES 16 +#define NET_DETECT_MAX_CHANNELS 50 +#endif /* NET_DETECT */ + +/* Bit masks for radio disabled status - returned by WL_GET_RADIO */ +#define WL_RADIO_SW_DISABLE (1<<0) +#define WL_RADIO_HW_DISABLE (1<<1) +#define WL_RADIO_MPC_DISABLE (1<<2) +#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */ +#define WL_RADIO_PERCORE_DISABLE (1<<4) /* Radio diable per core for DVT */ + +#define WL_SPURAVOID_OFF 0 +#define WL_SPURAVOID_ON1 1 +#define WL_SPURAVOID_ON2 2 + + +#define WL_4335_SPURAVOID_ON1 1 +#define WL_4335_SPURAVOID_ON2 2 +#define WL_4335_SPURAVOID_ON3 3 +#define WL_4335_SPURAVOID_ON4 4 +#define WL_4335_SPURAVOID_ON5 5 +#define WL_4335_SPURAVOID_ON6 6 +#define WL_4335_SPURAVOID_ON7 7 +#define WL_4335_SPURAVOID_ON8 8 +#define WL_4335_SPURAVOID_ON9 9 + +/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */ +#define WL_TXPWR_OVERRIDE (1U<<31) +#define WL_TXPWR_NEG (1U<<30) + + +/* phy types (returned by WLC_GET_PHYTPE) */ +#define WLC_PHY_TYPE_A 0 +#define WLC_PHY_TYPE_B 1 +#define WLC_PHY_TYPE_G 2 +#define WLC_PHY_TYPE_N 4 +#define WLC_PHY_TYPE_LP 5 +#define WLC_PHY_TYPE_SSN 6 +#define WLC_PHY_TYPE_HT 7 +#define WLC_PHY_TYPE_LCN 8 +#define WLC_PHY_TYPE_LCN40 10 +#define WLC_PHY_TYPE_AC 11 +#define WLC_PHY_TYPE_LCN20 12 +#define WLC_PHY_TYPE_NULL 0xf + +/* Values for PM */ +#define PM_OFF 0 +#define PM_MAX 1 +#define PM_FAST 2 +#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */ + +#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */ + +/* fbt_cap: FBT assoc / reassoc modes. */ +#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC 1 /* Driver 4-way handshake & reassoc (WLFBT). */ + +/* monitor_promisc_level bits */ +#define WL_MONPROMISC_PROMISC 0x0001 +#define WL_MONPROMISC_CTRL 0x0002 +#define WL_MONPROMISC_FCS 0x0004 + +/* TCP Checksum Offload defines */ +#define TOE_TX_CSUM_OL 0x00000001 +#define TOE_RX_CSUM_OL 0x00000002 + +/* Wi-Fi Display Services (WFDS) */ +#define WL_P2P_SOCIAL_CHANNELS_MAX WL_NUMCHANNELS +#define MAX_WFDS_SEEK_SVC 4 /* Max # of wfds services to seek */ +#define MAX_WFDS_ADVERT_SVC 4 /* Max # of wfds services to advertise */ +#define MAX_WFDS_SVC_NAME_LEN 200 /* maximum service_name length */ +#define MAX_WFDS_ADV_SVC_INFO_LEN 65000 /* maximum adv service_info length */ +#define P2P_WFDS_HASH_LEN 6 /* Length of a WFDS service hash */ +#define MAX_WFDS_SEEK_SVC_INFO_LEN 255 /* maximum seek service_info req length */ +#define MAX_WFDS_SEEK_SVC_NAME_LEN 200 /* maximum service_name length */ + +/* ap_isolate bitmaps */ +#define AP_ISOLATE_DISABLED 0x0 +#define AP_ISOLATE_SENDUP_ALL 0x01 +#define AP_ISOLATE_SENDUP_MCAST 0x02 + +#endif /* wlioctl_defs_h */ diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h new file mode 100644 index 000000000000..9e79ddb5525e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h @@ -0,0 +1,142 @@ +/* + * Definitions for ioctls to access DHD iovars. + * Based on wlioctl.h (for Broadcom 802.11abg driver). + * (Moves towards generic ioctls for BCM drivers/iovars.) + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhdioctl.h 585723 2015-09-11 06:26:37Z $ + */ + +#ifndef _dhdioctl_h_ +#define _dhdioctl_h_ + +#include + + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + + +/* Linux network driver ioctl encoding */ +typedef struct dhd_ioctl { + uint cmd; /* common ioctl definition */ + void *buf; /* pointer to user buffer */ + uint len; /* length of user buffer */ + bool set; /* get or set request (optional) */ + uint used; /* bytes read or written (optional) */ + uint needed; /* bytes needed (optional) */ + uint driver; /* to identify target driver */ +} dhd_ioctl_t; + +/* Underlying BUS definition */ +enum { + BUS_TYPE_USB = 0, /* for USB dongles */ + BUS_TYPE_SDIO, /* for SDIO dongles */ + BUS_TYPE_PCIE /* for PCIE dongles */ +}; + +/* per-driver magic numbers */ +#define DHD_IOCTL_MAGIC 0x00444944 + +/* bump this number if you change the ioctl interface */ +#define DHD_IOCTL_VERSION 1 + +#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ +#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ + +/* common ioctl definitions */ +#define DHD_GET_MAGIC 0 +#define DHD_GET_VERSION 1 +#define DHD_GET_VAR 2 +#define DHD_SET_VAR 3 + +/* message levels */ +#define DHD_ERROR_VAL 0x0001 +#define DHD_TRACE_VAL 0x0002 +#define DHD_INFO_VAL 0x0004 +#define DHD_DATA_VAL 0x0008 +#define DHD_CTL_VAL 0x0010 +#define DHD_TIMER_VAL 0x0020 +#define DHD_HDRS_VAL 0x0040 +#define DHD_BYTES_VAL 0x0080 +#define DHD_INTR_VAL 0x0100 +#define DHD_LOG_VAL 0x0200 +#define DHD_GLOM_VAL 0x0400 +#define DHD_EVENT_VAL 0x0800 +#define DHD_BTA_VAL 0x1000 +#define DHD_ISCAN_VAL 0x2000 +#define DHD_ARPOE_VAL 0x4000 +#define DHD_REORDER_VAL 0x8000 +#define DHD_WL_VAL 0x10000 +#define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */ +#define DHD_WL_VAL2 0x40000 +#define DHD_PNO_VAL 0x80000 +#define DHD_MSGTRACE_VAL 0x100000 +#define DHD_FWLOG_VAL 0x400000 +#define DHD_RTT_VAL 0x200000 +#define DHD_IOV_INFO_VAL 0x800000 + +#ifdef SDTEST +/* For pktgen iovar */ +typedef struct dhd_pktgen { + uint version; /* To allow structure change tracking */ + uint freq; /* Max ticks between tx/rx attempts */ + uint count; /* Test packets to send/rcv each attempt */ + uint print; /* Print counts every attempts */ + uint total; /* Total packets (or bursts) */ + uint minlen; /* Minimum length of packets to send */ + uint maxlen; /* Maximum length of packets to send */ + uint numsent; /* Count of test packets sent */ + uint numrcvd; /* Count of test packets received */ + uint numfail; /* Count of test send failures */ + uint mode; /* Test mode (type of test packets) */ + uint stop; /* Stop after this many tx failures */ +} dhd_pktgen_t; + +/* Version in case structure changes */ +#define DHD_PKTGEN_VERSION 2 + +/* Type of test packets to use */ +#define DHD_PKTGEN_ECHO 1 /* Send echo requests */ +#define DHD_PKTGEN_SEND 2 /* Send discard packets */ +#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */ +#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */ +#endif /* SDTEST */ + +/* Enter idle immediately (no timeout) */ +#define DHD_IDLE_IMMEDIATE (-1) + +/* Values for idleclock iovar: other values are the sd_divisor to use when idle */ +#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */ +#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */ + + +/* require default structure packing */ +#include + +#endif /* _dhdioctl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h new file mode 100644 index 000000000000..76012564435d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/epivers.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ + * +*/ + +#ifndef _epivers_h_ +#define _epivers_h_ + +#define EPI_MAJOR_VERSION 1 + +#define EPI_MINOR_VERSION 363 + +#define EPI_RC_NUMBER 59 + +#define EPI_INCREMENTAL_NUMBER 144 + +#define EPI_BUILD_NUMBER 0 + +#define EPI_VERSION 1, 363, 59, 144 + +#define EPI_VERSION_NUM 0x0116b3b9 + +#define EPI_VERSION_DEV 1.363.59 + +/* Driver Version String, ASCII, 32 chars max */ +#define EPI_VERSION_STR "1.363.59.144 (r)" + +#endif /* _epivers_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/event_log.h b/drivers/net/wireless/bcmdhd/include/event_log.h new file mode 100644 index 000000000000..d06d811cb925 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/event_log.h @@ -0,0 +1,349 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _EVENT_LOG_H_ +#define _EVENT_LOG_H_ + +#include +#include +#include + +/* logstrs header */ +#define LOGSTRS_MAGIC 0x4C4F4753 +#define LOGSTRS_VERSION 0x1 + +/* We make sure that the block size will fit in a single packet + * (allowing for a bit of overhead on each packet + */ +#define EVENT_LOG_MAX_BLOCK_SIZE 1400 +#define EVENT_LOG_WL_BLOCK_SIZE 0x200 +#define EVENT_LOG_PSM_BLOCK_SIZE 0x200 +#define EVENT_LOG_BUS_BLOCK_SIZE 0x200 +#define EVENT_LOG_ERROR_BLOCK_SIZE 0x200 + +/* + * There are multiple levels of objects define here: + * event_log_set - a set of buffers + * event log groups - every event log call is part of just one. All + * event log calls in a group are handled the + * same way. Each event log group is associated + * with an event log set or is off. + */ + +#ifndef __ASSEMBLER__ + +/* On the external system where the dumper is we need to make sure + * that these types are the same size as they are on the ARM the + * produced them + */ +#ifdef EVENT_LOG_DUMPER +#define _EL_BLOCK_PTR uint32 +#define _EL_TYPE_PTR uint32 +#define _EL_SET_PTR uint32 +#define _EL_TOP_PTR uint32 +#else +#define _EL_BLOCK_PTR struct event_log_block * +#define _EL_TYPE_PTR uint32 * +#define _EL_SET_PTR struct event_log_set ** +#define _EL_TOP_PTR struct event_log_top * +#endif /* EVENT_LOG_DUMPER */ + +/* Event log sets (a logical circurlar buffer) consist of one or more + * event_log_blocks. The blocks themselves form a logical circular + * list. The log entries are placed in each event_log_block until it + * is full. Logging continues with the next event_log_block in the + * event_set until the last event_log_block is reached and then + * logging starts over with the first event_log_block in the + * event_set. + */ +typedef struct event_log_block { + _EL_BLOCK_PTR next_block; + _EL_BLOCK_PTR prev_block; + _EL_TYPE_PTR end_ptr; + + /* Start of packet sent for log tracing */ + uint16 pktlen; /* Size of rest of block */ + uint16 count; /* Logtrace counter */ + uint32 timestamp; /* Timestamp at start of use */ + uint32 event_logs; +} event_log_block_t; + +/* There can be multiple event_sets with each logging a set of + * associated events (i.e, "fast" and "slow" events). + */ +typedef struct event_log_set { + _EL_BLOCK_PTR first_block; /* Pointer to first event_log block */ + _EL_BLOCK_PTR last_block; /* Pointer to last event_log block */ + _EL_BLOCK_PTR logtrace_block; /* next block traced */ + _EL_BLOCK_PTR cur_block; /* Pointer to current event_log block */ + _EL_TYPE_PTR cur_ptr; /* Current event_log pointer */ + uint32 blockcount; /* Number of blocks */ + uint16 logtrace_count; /* Last count for logtrace */ + uint16 blockfill_count; /* Fill count for logtrace */ + uint32 timestamp; /* Last timestamp event */ + uint32 cyclecount; /* Cycles at last timestamp event */ +} event_log_set_t; + +/* Top data structure for access to everything else */ +typedef struct event_log_top { + uint32 magic; +#define EVENT_LOG_TOP_MAGIC 0x474C8669 /* 'EVLG' */ + uint32 version; +#define EVENT_LOG_VERSION 1 + uint32 num_sets; + uint32 logstrs_size; /* Size of lognums + logstrs area */ + uint32 timestamp; /* Last timestamp event */ + uint32 cyclecount; /* Cycles at last timestamp event */ + _EL_SET_PTR sets; /* Ptr to array of set ptrs */ +} event_log_top_t; + +/* Data structure of Keeping the Header from logstrs.bin */ +typedef struct { + uint32 logstrs_size; /* Size of the file */ + uint32 rom_lognums_offset; /* Offset to the ROM lognum */ + uint32 ram_lognums_offset; /* Offset to the RAM lognum */ + uint32 rom_logstrs_offset; /* Offset to the ROM logstr */ + uint32 ram_logstrs_offset; /* Offset to the RAM logstr */ + /* Keep version and magic last since "header" is appended to the end of logstrs file. */ + uint32 version; /* Header version */ + uint32 log_magic; /* MAGIC number for verification 'LOGS' */ +} logstr_header_t; + +/* + * Use the following macros for generating log events. + * + * The FAST versions check the enable of the tag before evaluating the arguments and calling the + * event_log function. This adds 5 instructions. The COMPACT versions evaluate the arguments + * and call the event_log function unconditionally. The event_log function will then skip logging + * if this tag is disabled. + * + * To support easy usage of existing debugging (e.g. msglevel) via macro re-definition there are + * two variants of these macros to help. + * + * First there are the CAST versions. The event_log function normally logs uint32 values or else + * they have to be cast to uint32. The CAST versions blindly cast for you so you don't have to edit + * any existing code. + * + * Second there are the PAREN_ARGS versions. These expect the logging format string and arguments + * to be enclosed in parentheses. This allows us to make the following mapping of an existing + * msglevel macro: + * #define WL_ERROR(args) EVENT_LOG_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args) + * + * The versions of the macros without FAST or COMPACT in their name are just synonyms for the + * COMPACT versions. + * + * You should use the COMPACT macro (or its synonym) in cases where there is some preceding logic + * that prevents the execution of the macro, e.g. WL_ERROR by definition rarely gets executed. + * Use the FAST macro in performance sensitive paths. The key concept here is that you should be + * assuming that your macro usage is compiled into ROM and can't be changed ... so choose wisely. + * + */ + +#ifndef EVENT_LOG_DUMPER + +#ifndef EVENT_LOG_COMPILE + +/* Null define if no tracing */ +#define EVENT_LOG(format, ...) +#define EVENT_LOG_FAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT(tag, fmt, ...) + +#define EVENT_LOG_CAST(tag, fmt, ...) +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) + +#define EVENT_LOG_IS_LOG_ON(tag) 0 + +#else /* EVENT_LOG_COMPILE */ + +/* The first few are special because they can be done more efficiently + * this way and they are the common case. Once there are too many + * parameters the code size starts to be an issue and a loop is better + */ +#define _EVENT_LOG0(tag, fmt_num) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG1(tag, fmt_num, t1) \ + event_log1(tag, fmt_num, t1) +#define _EVENT_LOG2(tag, fmt_num, t1, t2) \ + event_log2(tag, fmt_num, t1, t2) +#define _EVENT_LOG3(tag, fmt_num, t1, t2, t3) \ + event_log3(tag, fmt_num, t1, t2, t3) +#define _EVENT_LOG4(tag, fmt_num, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, t1, t2, t3, t4) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG5(tag, fmt_num, ...) event_logn(5, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG6(tag, fmt_num, ...) event_logn(6, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG7(tag, fmt_num, ...) event_logn(7, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG8(tag, fmt_num, ...) event_logn(8, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG9(tag, fmt_num, ...) event_logn(9, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGA(tag, fmt_num, ...) event_logn(10, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGB(tag, fmt_num, ...) event_logn(11, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGC(tag, fmt_num, ...) event_logn(12, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGD(tag, fmt_num, ...) event_logn(13, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__) + + +/* Casting low level macros */ +#define _EVENT_LOG_CAST0(tag, fmt_num) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG_CAST1(tag, fmt_num, t1) \ + event_log1(tag, fmt_num, (uint32)(t1)) +#define _EVENT_LOG_CAST2(tag, fmt_num, t1, t2) \ + event_log2(tag, fmt_num, (uint32)(t1), (uint32)(t2)) +#define _EVENT_LOG_CAST3(tag, fmt_num, t1, t2, t3) \ + event_log3(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3)) +#define _EVENT_LOG_CAST4(tag, fmt_num, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3), (uint32)(t4)) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG_CAST5(tag, fmt_num, ...) _EVENT_LOG5(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST6(tag, fmt_num, ...) _EVENT_LOG6(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST7(tag, fmt_num, ...) _EVENT_LOG7(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST8(tag, fmt_num, ...) _EVENT_LOG8(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST9(tag, fmt_num, ...) _EVENT_LOG9(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTA(tag, fmt_num, ...) _EVENT_LOGA(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTB(tag, fmt_num, ...) _EVENT_LOGB(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTC(tag, fmt_num, ...) _EVENT_LOGC(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTD(tag, fmt_num, ...) _EVENT_LOGD(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTE(tag, fmt_num, ...) _EVENT_LOGE(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTF(tag, fmt_num, ...) _EVENT_LOGF(tag, fmt_num, __VA_ARGS__) + +/* Hack to make the proper routine call when variadic macros get + * passed. Note the max of 15 arguments. More than that can't be + * handled by the event_log entries anyways so best to catch it at compile + * time + */ + +#define _EVENT_LOG_VA_NUM_ARGS(F, _1, _2, _3, _4, _5, _6, _7, _8, _9, \ + _A, _B, _C, _D, _E, _F, N, ...) F ## N + +/* cast = _EVENT_LOG for no casting + * cast = _EVENT_LOG_CAST for casting of fmt arguments to uint32. + * Only first 4 arguments are casted to uint32. event_logn() is called + * if more than 4 arguments are present. This function internally assumes + * all arguments are uint32 + */ +#define _EVENT_LOG(cast, tag, fmt, ...) \ + static char logstr[] __attribute__ ((section(".logstrs"))) = fmt; \ + static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \ + _EVENT_LOG_VA_NUM_ARGS(cast, ##__VA_ARGS__, \ + F, E, D, C, B, A, 9, 8, \ + 7, 6, 5, 4, 3, 2, 1, 0) \ + (tag, (int) &fmtnum , ## __VA_ARGS__) + + +#define EVENT_LOG_FAST(tag, fmt, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if (tag_flag != 0) { \ + _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT(tag, fmt, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ + } while (0) + +/* Event log macro with casting to uint32 of arguments */ +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if (tag_flag != 0) { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ + } while (0) + + +#define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__) + +#define EVENT_LOG_CAST(tag, fmt, ...) EVENT_LOG_COMPACT_CAST(tag, fmt , ## __VA_ARGS__) + +#define _EVENT_LOG_REMOVE_PAREN(...) __VA_ARGS__ +#define EVENT_LOG_REMOVE_PAREN(args) _EVENT_LOG_REMOVE_PAREN args + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_FAST_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + + +#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG) + +#define EVENT_DUMP event_log_buffer + +extern uint8 *event_log_tag_sets; + +#include + +extern int event_log_init(si_t *sih); +extern int event_log_set_init(si_t *sih, int set_num, int size); +extern int event_log_set_expand(si_t *sih, int set_num, int size); +extern int event_log_set_shrink(si_t *sih, int set_num, int size); +extern int event_log_tag_start(int tag, int set_num, int flags); +extern int event_log_tag_stop(int tag); +extern int event_log_get(int set_num, int buflen, void *buf); +extern uint8 * event_log_next_logtrace(int set_num); + +extern void event_log0(int tag, int fmtNum); +extern void event_log1(int tag, int fmtNum, uint32 t1); +extern void event_log2(int tag, int fmtNum, uint32 t1, uint32 t2); +extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3); +extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4); +extern void event_logn(int num_args, int tag, int fmtNum, ...); + +extern void event_log_time_sync(uint32 ms); +extern void event_log_buffer(int tag, uint8 *buf, int size); + +#endif /* EVENT_LOG_DUMPER */ + +#endif /* EVENT_LOG_COMPILE */ + +#endif /* __ASSEMBLER__ */ + +#endif /* _EVENT_LOG_H */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h new file mode 100644 index 000000000000..baf55724c595 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h @@ -0,0 +1,92 @@ +/* + * HND arm trap handling. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_armtrap.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _hnd_armtrap_h_ +#define _hnd_armtrap_h_ + + +/* ARM trap handling */ + +/* Trap types defined by ARM (see arminc.h) */ + +/* Trap locations in lo memory */ +#define TRAP_STRIDE 4 +#define FIRST_TRAP TR_RST +#define LAST_TRAP (TR_FIQ * TRAP_STRIDE) + +#if defined(__ARM_ARCH_4T__) +#define MAX_TRAP_TYPE (TR_FIQ + 1) +#elif defined(__ARM_ARCH_7M__) +#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS) +#endif /* __ARM_ARCH_7M__ */ + +/* The trap structure is defined here as offsets for assembly */ +#define TR_TYPE 0x00 +#define TR_EPC 0x04 +#define TR_CPSR 0x08 +#define TR_SPSR 0x0c +#define TR_REGS 0x10 +#define TR_REG(n) (TR_REGS + (n) * 4) +#define TR_SP TR_REG(13) +#define TR_LR TR_REG(14) +#define TR_PC TR_REG(15) + +#define TRAP_T_SIZE 80 +#define ASSERT_TRAP_SVC_NUMBER 255 + +#ifndef _LANGUAGE_ASSEMBLY + +#include + +typedef struct _trap_struct { + uint32 type; + uint32 epc; + uint32 cpsr; + uint32 spsr; + uint32 r0; /* a1 */ + uint32 r1; /* a2 */ + uint32 r2; /* a3 */ + uint32 r3; /* a4 */ + uint32 r4; /* v1 */ + uint32 r5; /* v2 */ + uint32 r6; /* v3 */ + uint32 r7; /* v4 */ + uint32 r8; /* v5 */ + uint32 r9; /* sb/v6 */ + uint32 r10; /* sl/v7 */ + uint32 r11; /* fp/v8 */ + uint32 r12; /* ip */ + uint32 r13; /* sp */ + uint32 r14; /* lr */ + uint32 pc; /* r15 */ +} trap_t; + +#endif /* !_LANGUAGE_ASSEMBLY */ + +#endif /* _hnd_armtrap_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_cons.h b/drivers/net/wireless/bcmdhd/include/hnd_cons.h new file mode 100644 index 000000000000..2dee71abefeb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hnd_cons.h @@ -0,0 +1,80 @@ +/* + * Console support for RTE - for host use only. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_cons.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _hnd_cons_h_ +#define _hnd_cons_h_ + +#include +#include + +#define CBUF_LEN (128) + +#define LOG_BUF_LEN 1024 + +#ifdef BOOTLOADER_CONSOLE_OUTPUT +#undef RWL_MAX_DATA_LEN +#undef CBUF_LEN +#undef LOG_BUF_LEN +#define RWL_MAX_DATA_LEN (4 * 1024 + 8) +#define CBUF_LEN (RWL_MAX_DATA_LEN + 64) +#define LOG_BUF_LEN (16 * 1024) +#endif + +typedef struct { + uint32 buf; /* Can't be pointer on (64-bit) hosts */ + uint buf_size; + uint idx; + uint out_idx; /* output index */ +} hnd_log_t; + +typedef struct { + /* Virtual UART + * When there is no UART (e.g. Quickturn), the host should write a complete + * input line directly into cbuf and then write the length into vcons_in. + * This may also be used when there is a real UART (at risk of conflicting with + * the real UART). vcons_out is currently unused. + */ + volatile uint vcons_in; + volatile uint vcons_out; + + /* Output (logging) buffer + * Console output is written to a ring buffer log_buf at index log_idx. + * The host may read the output when it sees log_idx advance. + * Output will be lost if the output wraps around faster than the host polls. + */ + hnd_log_t log; + + /* Console input line buffer + * Characters are read one at a time into cbuf until is received, then + * the buffer is processed as a command line. Also used for virtual UART. + */ + uint cbuf_idx; + char cbuf[CBUF_LEN]; +} hnd_cons_t; + +#endif /* _hnd_cons_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h new file mode 100644 index 000000000000..3cf46727b044 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h @@ -0,0 +1,225 @@ +/* + * HND generic packet pool operation primitives + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktpool.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _hnd_pktpool_h_ +#define _hnd_pktpool_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTPOOL_MUTEX_DECL(mutex) +#endif + +#ifdef BCMPKTPOOL +#define POOL_ENAB(pool) ((pool) && (pool)->inited) +#else /* BCMPKTPOOL */ +#define POOL_ENAB(bus) 0 +#endif /* BCMPKTPOOL */ + +#ifndef PKTPOOL_LEN_MAX +#define PKTPOOL_LEN_MAX 40 +#endif /* PKTPOOL_LEN_MAX */ +#define PKTPOOL_CB_MAX 3 +#define PKTPOOL_CB_MAX_AVL 4 + + +/* forward declaration */ +struct pktpool; + +typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg); +typedef struct { + pktpool_cb_t cb; + void *arg; +} pktpool_cbinfo_t; + +/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */ +typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2); +typedef struct { + pktpool_cb_extn_t cb; + void *arg; +} pktpool_cbextn_info_t; + + +#ifdef BCMDBG_POOL +/* pkt pool debug states */ +#define POOL_IDLE 0 +#define POOL_RXFILL 1 +#define POOL_RXDH 2 +#define POOL_RXD11 3 +#define POOL_TXDH 4 +#define POOL_TXD11 5 +#define POOL_AMPDU 6 +#define POOL_TXENQ 7 + +typedef struct { + void *p; + uint32 cycles; + uint32 dur; +} pktpool_dbg_t; + +typedef struct { + uint8 txdh; /* tx to host */ + uint8 txd11; /* tx to d11 */ + uint8 enq; /* waiting in q */ + uint8 rxdh; /* rx from host */ + uint8 rxd11; /* rx from d11 */ + uint8 rxfill; /* dma_rxfill */ + uint8 idle; /* avail in pool */ +} pktpool_stats_t; +#endif /* BCMDBG_POOL */ + +typedef struct pktpool { + bool inited; /**< pktpool_init was successful */ + uint8 type; /**< type of lbuf: basic, frag, etc */ + uint8 id; /**< pktpool ID: index in registry */ + bool istx; /**< direction: transmit or receive data path */ + HND_PKTPOOL_MUTEX_DECL(mutex) /**< thread-safe mutex */ + + void * freelist; /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */ + uint16 avail; /**< number of packets in pool's free list */ + uint16 len; /**< number of packets managed by pool */ + uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */ + uint16 plen; /**< size of pkt buffer, excluding lbuf|lbuf_frag */ + + bool empty; + uint8 cbtoggle; + uint8 cbcnt; + uint8 ecbcnt; + uint8 emptycb_disable; /**< Value of type enum pktpool_empty_cb_state */ + pktpool_cbinfo_t *availcb_excl; + pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX_AVL]; + pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX]; + pktpool_cbextn_info_t cbext; /**< PCIe SPLITRX related */ + pktpool_cbextn_info_t rxcplidfn; +#ifdef BCMDBG_POOL + uint8 dbg_cbcnt; + pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX]; + uint16 dbg_qlen; + pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1]; +#endif + pktpool_cbinfo_t dmarxfill; +} pktpool_t; + + +pktpool_t *get_pktpools_registry(int id); + +/* Incarnate a pktpool registry. On success returns total_pools. */ +extern int pktpool_attach(osl_t *osh, uint32 total_pools); +extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */ + +extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type); +extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp); +extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal); +extern void* pktpool_get(pktpool_t *pktp); +extern void pktpool_free(pktpool_t *pktp, void *p); +extern int pktpool_add(pktpool_t *pktp, void *p); +extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp); +extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb); +extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen); +extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen); +extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable); +extern bool pktpool_emptycb_disabled(pktpool_t *pktp); +extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1); +extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg); +extern void pktpool_invoke_dmarxfill(pktpool_t *pktp); +extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg); + +#define POOLPTR(pp) ((pktpool_t *)(pp)) +#define POOLID(pp) (POOLPTR(pp)->id) + +#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid)) + +#define pktpool_len(pp) (POOLPTR(pp)->len) +#define pktpool_avail(pp) (POOLPTR(pp)->avail) +#define pktpool_plen(pp) (POOLPTR(pp)->plen) +#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen) + + +/* + * ---------------------------------------------------------------------------- + * A pool ID is assigned with a pkt pool during pool initialization. This is + * done by maintaining a registry of all initialized pools, and the registry + * index at which the pool is registered is used as the pool's unique ID. + * ID 0 is reserved and is used to signify an invalid pool ID. + * All packets henceforth allocated from a pool will be tagged with the pool's + * unique ID. Packets allocated from the heap will use the reserved ID = 0. + * Packets with non-zero pool id signify that they were allocated from a pool. + * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used + * in place of a 32bit pool pointer in each packet. + * ---------------------------------------------------------------------------- + */ +#define PKTPOOL_INVALID_ID (0) +#define PKTPOOL_MAXIMUM_ID (15) + +/* Registry of pktpool(s) */ +/* Pool ID to/from Pool Pointer converters */ +#define PKTPOOL_ID2PTR(id) (get_pktpools_registry(id)) +#define PKTPOOL_PTR2ID(pp) (POOLID(pp)) + +#ifdef BCMDBG_POOL +extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_start_trigger(pktpool_t *pktp, void *p); +extern int pktpool_dbg_dump(pktpool_t *pktp); +extern int pktpool_dbg_notify(pktpool_t *pktp); +extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats); +#endif /* BCMDBG_POOL */ + +#ifdef BCMPKTPOOL +#define SHARED_POOL (pktpool_shared) +extern pktpool_t *pktpool_shared; +#ifdef BCMFRAGPOOL +#define SHARED_FRAG_POOL (pktpool_shared_lfrag) +extern pktpool_t *pktpool_shared_lfrag; +#endif + +/** PCIe SPLITRX related */ +#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag) +extern pktpool_t *pktpool_shared_rxlfrag; + +void hnd_pktpool_init(osl_t *osh); +void hnd_pktpool_fill(pktpool_t *pktpool, bool minimal); +void hnd_pktpool_refill(bool minimal); +#else /* BCMPKTPOOL */ +#define SHARED_POOL ((struct pktpool *)NULL) +#endif /* BCMPKTPOOL */ + +#ifdef __cplusplus + } +#endif + +#endif /* _hnd_pktpool_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktq.h b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h new file mode 100644 index 000000000000..1586de3ca5b4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h @@ -0,0 +1,214 @@ +/* + * HND generic pktq operation primitives + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktq.h 591283 2015-10-07 11:52:00Z $ + */ + +#ifndef _hnd_pktq_h_ +#define _hnd_pktq_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTQ_MUTEX_DECL(mutex) +#endif + +/* osl multi-precedence packet queue */ +#define PKTQ_LEN_MAX 0xFFFF /* Max uint16 65535 packets */ +#ifndef PKTQ_LEN_DEFAULT +#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */ +#endif +#ifndef PKTQ_MAX_PREC +#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */ +#endif + +typedef struct pktq_prec { + void *head; /**< first packet to dequeue */ + void *tail; /**< last packet to dequeue */ + uint16 len; /**< number of queued packets */ + uint16 max; /**< maximum number of queued packets */ +} pktq_prec_t; + +#ifdef PKTQ_LOG +typedef struct { + uint32 requested; /**< packets requested to be stored */ + uint32 stored; /**< packets stored */ + uint32 saved; /**< packets saved, + because a lowest priority queue has given away one packet + */ + uint32 selfsaved; /**< packets saved, + because an older packet from the same queue has been dropped + */ + uint32 full_dropped; /**< packets dropped, + because pktq is full with higher precedence packets + */ + uint32 dropped; /**< packets dropped because pktq per that precedence is full */ + uint32 sacrificed; /**< packets dropped, + in order to save one from a queue of a highest priority + */ + uint32 busy; /**< packets droped because of hardware/transmission error */ + uint32 retry; /**< packets re-sent because they were not received */ + uint32 ps_retry; /**< packets retried again prior to moving power save mode */ + uint32 suppress; /**< packets which were suppressed and not transmitted */ + uint32 retry_drop; /**< packets finally dropped after retry limit */ + uint32 max_avail; /**< the high-water mark of the queue capacity for packets - + goes to zero as queue fills + */ + uint32 max_used; /**< the high-water mark of the queue utilisation for packets - + increases with use ('inverse' of max_avail) + */ + uint32 queue_capacity; /**< the maximum capacity of the queue */ + uint32 rtsfail; /**< count of rts attempts that failed to receive cts */ + uint32 acked; /**< count of packets sent (acked) successfully */ + uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */ + uint32 txrate_main; /**< running totoal of primary phy rate of all packets */ + uint32 throughput; /**< actual data transferred successfully */ + uint32 airtime; /**< cumulative total medium access delay in useconds */ + uint32 _logtime; /**< timestamp of last counter clear */ +} pktq_counters_t; + +typedef struct { + uint32 _prec_log; + pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /**< Counters per queue */ +} pktq_log_t; +#endif /* PKTQ_LOG */ + + +#define PKTQ_COMMON \ + uint16 num_prec; /**< number of precedences in use */ \ + uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */ \ + uint16 max; /**< total max packets */ \ + uint16 len; /**< total number of packets */ + +/* multi-priority pkt queue */ +struct pktq { + PKTQ_COMMON + /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ + struct pktq_prec q[PKTQ_MAX_PREC]; + HND_PKTQ_MUTEX_DECL(mutex) +#ifdef PKTQ_LOG + pktq_log_t* pktqlog; +#endif +}; + +/* simple, non-priority pkt queue */ +struct spktq { + PKTQ_COMMON + /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ + struct pktq_prec q[1]; + HND_PKTQ_MUTEX_DECL(mutex) +}; + +#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) + +/* fn(pkt, arg). return true if pkt belongs to if */ +typedef bool (*ifpkt_cb_t)(void*, int); + +/* operations on a specific precedence in packet queue */ + +#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max)) +#define pktq_pmax(pq, prec) ((pq)->q[prec].max) +#define pktq_plen(pq, prec) ((pq)->q[prec].len) +#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0) +#define pktq_ppeek(pq, prec) ((pq)->q[prec].head) +#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktq_pavail(struct pktq *pq, int prec); +extern bool pktq_pfull(struct pktq *pq, int prec); +#else +#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len) +#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max) +#endif /* HND_PKTQ_THREAD_SAFE */ + +extern void pktq_append(struct pktq *pq, int prec, struct spktq *list); +extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list); + +extern void *pktq_penq(struct pktq *pq, int prec, void *p); +extern void *pktq_penq_head(struct pktq *pq, int prec, void *p); +extern void *pktq_pdeq(struct pktq *pq, int prec); +extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p); +extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg); +extern void *pktq_pdeq_tail(struct pktq *pq, int prec); +/* Empty the queue at particular precedence level */ +extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, + ifpkt_cb_t fn, int arg); +/* Remove a specified packet from its queue */ +extern bool pktq_pdel(struct pktq *pq, void *p, int prec); + +/* operations on a set of precedences in packet queue */ + +extern int pktq_mlen(struct pktq *pq, uint prec_bmp); +extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); +extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out); + +/* operations on packet queue as a whole */ + +#define pktq_len(pq) ((int)(pq)->len) +#define pktq_max(pq) ((int)(pq)->max) +#define pktq_empty(pq) ((pq)->len == 0) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktq_avail(struct pktq *pq); +extern bool pktq_full(struct pktq *pq); +#else +#define pktq_avail(pq) ((int)((pq)->max - (pq)->len)) +#define pktq_full(pq) ((pq)->len >= (pq)->max) +#endif /* HND_PKTQ_THREAD_SAFE */ + +/* operations for single precedence queues */ +#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p)) +#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p)) +#define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0) +#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0) +#define pktqflush(osh, pq) pktq_flush(osh, ((struct pktq *)(void *)pq), TRUE, NULL, 0) +#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len) +#define pktqdeinit(pq) pktq_deinit((struct pktq *)(void *)pq) +#define pktqavail(pq) pktq_avail((struct pktq *)(void *)pq) +#define pktqfull(pq) pktq_full((struct pktq *)(void *)pq) + +extern bool pktq_init(struct pktq *pq, int num_prec, int max_len); +extern bool pktq_deinit(struct pktq *pq); + +extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len); + +/* prec_out may be NULL if caller is not interested in return value */ +extern void *pktq_deq(struct pktq *pq, int *prec_out); +extern void *pktq_deq_tail(struct pktq *pq, int *prec_out); +extern void *pktq_peek(struct pktq *pq, int *prec_out); +extern void *pktq_peek_tail(struct pktq *pq, int *prec_out); +extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg); + +#ifdef __cplusplus + } +#endif + +#endif /* _hnd_pktq_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h new file mode 100644 index 000000000000..dfc83d3d7fd1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h @@ -0,0 +1,45 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _hndpmu_h_ +#define _hndpmu_h_ + +#include +#include +#include + + +extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask); +extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength); + +extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear); +extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh); +extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag); + +#endif /* _hndpmu_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h new file mode 100644 index 000000000000..36884a088b6f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h @@ -0,0 +1,315 @@ +/* + * Broadcom HND chip & on-chip-interconnect-related definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndsoc.h 517544 2014-11-26 00:40:42Z $ + */ + +#ifndef _HNDSOC_H +#define _HNDSOC_H + +/* Include the soci specific files */ +#include +#include + +/* + * SOC Interconnect Address Map. + * All regions may not exist on all chips. + */ +#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */ +#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI_MEM_SZ (64 * 1024 * 1024) +#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */ +#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */ + +#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ + +#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */ +#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ + +#ifndef SI_MAXCORES +#define SI_MAXCORES 32 /* NorthStar has more cores */ +#endif /* SI_MAXCORES */ + +#define SI_MAXBR 4 /* Max bridges (this is arbitrary, for software + * convenience and could be changed if we + * make any larger chips + */ + +#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */ +#define SI_FASTRAM_SWAPPED 0x19800000 + +#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ +#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ +#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */ +#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ +#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ +#define SI_FLASH_WINDOW 0x01000000 /* Flash XIP Window */ + +#define SI_NS_NANDFLASH 0x1c000000 /* NorthStar NAND flash base */ +#define SI_NS_NORFLASH 0x1e000000 /* NorthStar NOR flash base */ +#define SI_NS_ROM 0xfffd0000 /* NorthStar ROM */ +#define SI_NS_FLASH_WINDOW 0x02000000 /* Flash XIP Window */ + +#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */ +#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */ +#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */ +#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */ +#define SI_ARMCA7_ROM 0x00000000 /* ARM Cortex-A7 ROM */ +#define SI_ARMCA7_RAM 0x00200000 /* ARM Cortex-A7 RAM */ +#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */ +#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */ + +#define SI_SFLASH 0x14000000 +#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */ +#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#define SI_BCM53573_NANDFLASH 0x30000000 /* 53573 NAND flash base */ +#define SI_BCM53573_NORFLASH 0x1c000000 /* 53573 NOR flash base */ + +#define SI_BCM53573_NORFLASH_WINDOW 0x01000000 /* only support 16M direct access for + * 3-byte address modes in spi flash + */ +#define SI_BCM53573_BOOTDEV_MASK 0x3 +#define SI_BCM53573_BOOTDEV_NOR 0x0 + +#define SI_BCM53573_DDRTYPE_MASK 0x10 +#define SI_BCM53573_DDRTYPE_DDR3 0x10 + +/* APB bridge code */ +#define APB_BRIDGE_ID 0x135 /* APB Bridge 0, 1, etc. */ + +/* core codes */ +#define NODEV_CORE_ID 0x700 /* Invalid coreid */ +#define CC_CORE_ID 0x800 /* chipcommon core */ +#define ILINE20_CORE_ID 0x801 /* iline20 core */ +#define SRAM_CORE_ID 0x802 /* sram core */ +#define SDRAM_CORE_ID 0x803 /* sdram core */ +#define PCI_CORE_ID 0x804 /* pci core */ +#define MIPS_CORE_ID 0x805 /* mips core */ +#define ENET_CORE_ID 0x806 /* enet mac core */ +#define CODEC_CORE_ID 0x807 /* v90 codec core */ +#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ +#define ADSL_CORE_ID 0x809 /* ADSL core */ +#define ILINE100_CORE_ID 0x80a /* iline100 core */ +#define IPSEC_CORE_ID 0x80b /* ipsec core */ +#define UTOPIA_CORE_ID 0x80c /* utopia core */ +#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ +#define SOCRAM_CORE_ID 0x80e /* internal memory core */ +#define MEMC_CORE_ID 0x80f /* memc sdram core */ +#define OFDM_CORE_ID 0x810 /* OFDM phy core */ +#define EXTIF_CORE_ID 0x811 /* external interface core */ +#define D11_CORE_ID 0x812 /* 802.11 MAC core */ +#define APHY_CORE_ID 0x813 /* 802.11a phy core */ +#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ +#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ +#define MIPS33_CORE_ID 0x816 /* mips3302 core */ +#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ +#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ +#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ +#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ +#define SDIOH_CORE_ID 0x81b /* sdio host core */ +#define ROBO_CORE_ID 0x81c /* roboswitch core */ +#define ATA100_CORE_ID 0x81d /* parallel ATA core */ +#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ +#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ +#define PCIE_CORE_ID 0x820 /* pci express core */ +#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ +#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ +#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ +#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ +#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ +#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ +#define PMU_CORE_ID 0x827 /* PMU core */ +#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ +#define SDIOD_CORE_ID 0x829 /* SDIO device core */ +#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ +#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ +#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ +#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ +#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ +#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ +#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ +#define SC_CORE_ID 0x831 /* shared common core */ +#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ +#define SPIH_CORE_ID 0x833 /* SPI host core */ +#define I2S_CORE_ID 0x834 /* I2S core */ +#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ +#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ + +#define ACPHY_CORE_ID 0x83b /* Dot11 ACPHY */ +#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */ +#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */ +#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */ +#define GCI_CORE_ID 0x840 /* GCI Core */ +#define M2MDMA_CORE_ID 0x844 /* memory to memory dma */ +#define CMEM_CORE_ID 0x846 /* CNDS DDR2/3 memory controller */ +#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */ +#define SYSMEM_CORE_ID 0x849 /* System memory core */ +#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */ +#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */ +#define EROM_CORE_ID 0x366 /* EROM core ID */ +#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ +#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all + * unused address ranges + */ + +#define CC_4706_CORE_ID 0x500 /* chipcommon core */ +#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */ +#define NS_DMA_CORE_ID 0x502 /* DMA core */ +#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */ +#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */ +#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */ +#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */ +#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */ +#define NS_ROM_CORE_ID 0x508 /* ROM core */ +#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */ +#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */ +#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */ +#define SOCRAM_4706_CORE_ID 0x50e /* internal memory core */ +#define NS_SOCRAM_CORE_ID SOCRAM_4706_CORE_ID +#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */ +#define NS_IHOST_CORE_ID ARMCA9_CORE_ID /* ARM Cortex A9 core (ihost) */ +#define GMAC_COMMON_4706_CORE_ID 0x5dc /* Gigabit MAC core */ +#define GMAC_4706_CORE_ID 0x52d /* Gigabit MAC core */ +#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */ +#define ALTA_CORE_ID 0x534 /* I2S core */ +#define DDR23_PHY_CORE_ID 0x5dd + +#define SI_PCI1_MEM 0x40000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI1_CFG 0x44000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ +#define CC_4706B0_CORE_REV 0x8000001f /* chipcommon core */ +#define SOCRAM_4706B0_CORE_REV 0x80000005 /* internal memory core */ +#define GMAC_4706B0_CORE_REV 0x80000000 /* Gigabit MAC core */ +#define NS_PCIEG2_CORE_REV_B0 0x7 /* NS-B0 PCIE Gen 2 core rev */ + +/* There are TWO constants on all HND chips: SI_ENUM_BASE above, + * and chipcommon being the first core: + */ +#define SI_CC_IDX 0 +/* SOC Interconnect types (aka chip types) */ +#define SOCI_SB 0 +#define SOCI_AI 1 +#define SOCI_UBUS 2 +#define SOCI_NAI 3 + +/* Common core control flags */ +#define SICF_BIST_EN 0x8000 +#define SICF_PME_EN 0x4000 +#define SICF_CORE_BITS 0x3ffc +#define SICF_FGC 0x0002 +#define SICF_CLOCK_EN 0x0001 + +/* Common core status flags */ +#define SISF_BIST_DONE 0x8000 +#define SISF_BIST_ERROR 0x4000 +#define SISF_GATED_CLK 0x2000 +#define SISF_DMA64 0x1000 +#define SISF_CORE_BITS 0x0fff + +/* Norstar core status flags */ +#define SISF_NS_BOOTDEV_MASK 0x0003 /* ROM core */ +#define SISF_NS_BOOTDEV_NOR 0x0000 /* ROM core */ +#define SISF_NS_BOOTDEV_NAND 0x0001 /* ROM core */ +#define SISF_NS_BOOTDEV_ROM 0x0002 /* ROM core */ +#define SISF_NS_BOOTDEV_OFFLOAD 0x0003 /* ROM core */ +#define SISF_NS_SKUVEC_MASK 0x000c /* ROM core */ + +/* A register that is common to all cores to + * communicate w/PMU regarding clock control. + */ +#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */ +#define SI_PWR_CTL_ST 0x1e8 /* For memory clock gating */ + +/* clk_ctl_st register */ +#define CCS_FORCEALP 0x00000001 /* force ALP request */ +#define CCS_FORCEHT 0x00000002 /* force HT request */ +#define CCS_FORCEILP 0x00000004 /* force ILP request */ +#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */ +#define CCS_HTAREQ 0x00000010 /* HT Avail Request */ +#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */ +#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */ +#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */ +#define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */ +#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4/CA7 fast clock request */ +#define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */ +#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */ +#define CCS_ERSRC_REQ_SHIFT 8 +#define CCS_ALPAVAIL 0x00010000 /* ALP is available */ +#define CCS_HTAVAIL 0x00020000 /* HT is available */ +#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */ +#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */ +#define CCS_ARMFASTCLOCKSTATUS 0x01000000 /* Fast CPU clock is running */ +#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */ +#define CCS_ERSRC_STS_SHIFT 24 + +#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */ +#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */ + +/* Not really related to SOC Interconnect, but a couple of software + * conventions for the use the flash space: + */ + +/* Minumum amount of flash we support */ +#define FLASH_MIN 0x00020000 /* Minimum flash size */ + +/* A boot/binary may have an embedded block that describes its size */ +#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */ +#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */ +#define BISZ_MAGIC_IDX 0 /* Word 0: magic */ +#define BISZ_TXTST_IDX 1 /* 1: text start */ +#define BISZ_TXTEND_IDX 2 /* 2: text end */ +#define BISZ_DATAST_IDX 3 /* 3: data start */ +#define BISZ_DATAEND_IDX 4 /* 4: data end */ +#define BISZ_BSSST_IDX 5 /* 5: bss start */ +#define BISZ_BSSEND_IDX 6 /* 6: bss end */ +#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */ + +/* Boot/Kernel related defintion and functions */ +#define SOC_BOOTDEV_ROM 0x00000001 +#define SOC_BOOTDEV_PFLASH 0x00000002 +#define SOC_BOOTDEV_SFLASH 0x00000004 +#define SOC_BOOTDEV_NANDFLASH 0x00000008 + +#define SOC_KNLDEV_NORFLASH 0x00000002 +#define SOC_KNLDEV_NANDFLASH 0x00000004 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) +int soc_boot_dev(void *sih); +int soc_knl_dev(void *sih); +#endif /* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */ + +#endif /* _HNDSOC_H */ diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h new file mode 100644 index 000000000000..539a2fa771d7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h @@ -0,0 +1,1088 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_osl.h 601764 2015-11-24 03:47:41Z $ + */ + +#ifndef _linux_osl_h_ +#define _linux_osl_h_ + +#include +#define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x))) + +/* Linux Kernel: File Operations: start */ +extern void * osl_os_open_image(char * filename); +extern int osl_os_get_image_block(char * buf, int len, void * image); +extern void osl_os_close_image(void * image); +extern int osl_os_image_size(void *image); +/* Linux Kernel: File Operations: end */ + +#ifdef BCMDRIVER + +/* OSL initialization */ +#ifdef SHARED_OSL_CMN +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn); +#else +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag); +#endif /* SHARED_OSL_CMN */ + +extern void osl_detach(osl_t *osh); +extern int osl_static_mem_init(osl_t *osh, void *adapter); +extern int osl_static_mem_deinit(osl_t *osh, void *adapter); +extern void osl_set_bus_handle(osl_t *osh, void *bus_handle); +extern void* osl_get_bus_handle(osl_t *osh); + +/* Global ASSERT type */ +extern uint32 g_assert_type; + +/* ASSERT */ +#if defined(BCMASSERT_LOG) + #define ASSERT(exp) \ + do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0) +extern void osl_assert(const char *exp, const char *file, int line); +#else + #ifdef __GNUC__ + #define GCC_VERSION \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + #if GCC_VERSION > 30100 + #define ASSERT(exp) do {} while (0) + #else + /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */ + #define ASSERT(exp) + #endif /* GCC_VERSION > 30100 */ + #endif /* __GNUC__ */ +#endif + +/* bcm_prefetch_32B */ +static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B) +{ +} + +/* microsecond delay */ +#define OSL_DELAY(usec) osl_delay(usec) +extern void osl_delay(uint usec); + +#define OSL_SLEEP(ms) osl_sleep(ms) +extern void osl_sleep(uint ms); + +#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \ + osl_pcmcia_read_attr((osh), (offset), (buf), (size)) +#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \ + osl_pcmcia_write_attr((osh), (offset), (buf), (size)) +extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size); +extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size); + +/* PCI configuration space access macros */ +#define OSL_PCI_READ_CONFIG(osh, offset, size) \ + osl_pci_read_config((osh), (offset), (size)) +#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \ + osl_pci_write_config((osh), (offset), (size), (val)) +extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size); +extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val); + +/* PCI device bus # and slot # */ +#define OSL_PCI_BUS(osh) osl_pci_bus(osh) +#define OSL_PCI_SLOT(osh) osl_pci_slot(osh) +#define OSL_PCIE_DOMAIN(osh) osl_pcie_domain(osh) +#define OSL_PCIE_BUS(osh) osl_pcie_bus(osh) +extern uint osl_pci_bus(osl_t *osh); +extern uint osl_pci_slot(osl_t *osh); +extern uint osl_pcie_domain(osl_t *osh); +extern uint osl_pcie_bus(osl_t *osh); +extern struct pci_dev *osl_pci_device(osl_t *osh); + +#define OSL_ACP_COHERENCE (1<<1L) + +/* Pkttag flag should be part of public information */ +typedef struct { + bool pkttag; + bool mmbus; /**< Bus supports memory-mapped register accesses */ + pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */ + void *tx_ctx; /**< Context to the callback function */ + void *unused[3]; +} osl_pubinfo_t; + +extern void osl_flag_set(osl_t *osh, uint32 mask); +extern bool osl_is_flag_set(osl_t *osh, uint32 mask); + +#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \ + ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \ + } while (0) + + +/* host/bus architecture-specific byte swap */ +#define BUS_SWAP32(v) (v) + #define MALLOC(osh, size) osl_malloc((osh), (size)) + #define MALLOCZ(osh, size) osl_mallocz((osh), (size)) + #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size)) + #define MALLOCED(osh) osl_malloced((osh)) + #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh) + extern void *osl_malloc(osl_t *osh, uint size); + extern void *osl_mallocz(osl_t *osh, uint size); + extern void osl_mfree(osl_t *osh, void *addr, uint size); + extern uint osl_malloced(osl_t *osh); + extern uint osl_check_memleak(osl_t *osh); + + +#define MALLOC_FAILED(osh) osl_malloc_failed((osh)) +extern uint osl_malloc_failed(osl_t *osh); + +/* allocate/free shared (dma-able) consistent memory */ +#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align() +#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) + +#define DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) + +extern uint osl_dma_consistent_align(void); +extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, + uint *tot, dmaaddr_t *pap); +extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); + +/* map/unmap direction */ +#define DMA_TX 1 /* TX direction for DMA */ +#define DMA_RX 2 /* RX direction for DMA */ + +/* map/unmap shared (dma-able) memory */ +#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \ + osl_dma_unmap((osh), (pa), (size), (direction)) +extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *txp_dmah); +extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction); + +/* API for DMA addressing capability */ +#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);}) + +#define OSL_SMP_WMB() smp_wmb() + +/* API for CPU relax */ +extern void osl_cpu_relax(void); +#define OSL_CPU_RELAX() osl_cpu_relax() + +#if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \ + (defined(STBLINUX) && defined(__ARM_ARCH_7A__)) || (defined(CONFIG_ARCH_MSM8996) || \ + defined(CONFIG_SOC_EXYNOS8890)) + extern void osl_cache_flush(void *va, uint size); + extern void osl_cache_inv(void *va, uint size); + extern void osl_prefetch(const void *ptr); + #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *) va, len) + #define OSL_CACHE_INV(va, len) osl_cache_inv((void *) va, len) + #define OSL_PREFETCH(ptr) osl_prefetch(ptr) +#ifdef __ARM_ARCH_7A__ + extern int osl_arch_is_coherent(void); + #define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent() + extern int osl_acp_war_enab(void); + #define OSL_ACP_WAR_ENAB() osl_acp_war_enab() +#else + #define OSL_ARCH_IS_COHERENT() NULL + #define OSL_ACP_WAR_ENAB() NULL +#endif /* __ARM_ARCH_7A__ */ +#else + #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va) + #define OSL_CACHE_INV(va, len) BCM_REFERENCE(va) + #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr) + + #define OSL_ARCH_IS_COHERENT() NULL + #define OSL_ACP_WAR_ENAB() NULL +#endif + +/* register access macros */ +#if defined(BCMSDIO) + #include + #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \ + (uintptr)(r), sizeof(*(r)), (v))) + #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \ + (uintptr)(r), sizeof(*(r)))) +#endif + +#if defined(BCMSDIO) + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \ + mmap_op else bus_op + #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \ + mmap_op : bus_op +#else + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) + #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) +#endif + +#define OSL_ERROR(bcmerror) osl_error(bcmerror) +extern int osl_error(int bcmerror); + +/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */ +#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */ + +#define OSH_NULL NULL + +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + * Macros expand to calls to functions defined in linux_osl.c . + */ +#include /* use current 2.4.x calling conventions */ +#include /* for vsn/printf's */ +#include /* for mem*, str* */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) +#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies)) +#else +#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ)) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */ +#define printf(fmt, args...) printk(fmt , ## args) +#include /* for vsn/printf's */ +#include /* for mem*, str* */ +/* bcopy's: Linux kernel doesn't provide these (anymore) */ +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + +/* register access macros */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \ + defined(CONFIG_X86) +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + case sizeof(uint64): __osl_v = \ + readq((volatile uint64*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#else +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \ + defined(CONFIG_X86) +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + case sizeof(uint64): writeq((uint64)(v), (volatile uint64*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#else +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */ + +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) + +/* bcopy, bcmp, and bzero functions */ +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + +/* uncached/cached virtual address */ +#define OSL_UNCACHED(va) ((void *)va) +#define OSL_CACHED(va) ((void *)va) + +#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va) +#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va) + +/* get processor cycle count */ +#if defined(__i386__) +#define OSL_GETCYCLES(x) rdtscl((x)) +#else +#define OSL_GETCYCLES(x) ((x) = 0) +#endif + +/* dereference an address that may cause a bus exception */ +#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; }) + +/* map/unmap physical to virtual I/O */ +#if !defined(CONFIG_MMC_MSM7X00A) +#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size)) +#else +#define REG_MAP(pa, size) (void *)(0) +#endif /* !defined(CONFIG_MMC_MSM7X00A */ +#define REG_UNMAP(va) iounmap((va)) + +/* shared (dma-able) memory access macros */ +#define R_SM(r) *(r) +#define W_SM(r, v) (*(r) = (v)) +#define BZERO_SM(r, len) memset((r), '\0', (len)) + +/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for + * performance reasons), we need the Linux headers. + */ +#include /* use current 2.4.x calling conventions */ + +/* packet primitives */ +#ifdef BCMDBG_CTRACE +#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FILE__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__) +#else +#ifdef BCM_OBJECT_TRACE +#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FUNCTION__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__) +#else +#define PKTGET(osh, len, send) osl_pktget((osh), (len)) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) +#endif /* BCM_OBJECT_TRACE */ +#endif /* BCMDBG_CTRACE */ +#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh) +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#if defined(BCM_OBJECT_TRACE) +#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__) +#else +#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send)) +#endif /* BCM_OBJECT_TRACE */ +#ifdef CONFIG_DHD_USE_STATIC_BUF +#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len)) +#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send)) +#else +#define PKTGET_STATIC PKTGET +#define PKTFREE_STATIC PKTFREE +#endif /* CONFIG_DHD_USE_STATIC_BUF */ +#define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);}) +#define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);}) +#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head)) +#define PKTEXPHEADROOM(osh, skb, b) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_realloc_headroom((struct sk_buff*)(skb), (b)); \ + }) +#define PKTTAILROOM(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_tailroom((struct sk_buff*)(skb)); \ + }) +#define PKTPADTAILROOM(osh, skb, padlen) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_pad((struct sk_buff*)(skb), (padlen)); \ + }) +#define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);}) +#define PKTSETNEXT(osh, skb, x) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \ + }) +#define PKTSETLEN(osh, skb, len) \ + ({ \ + BCM_REFERENCE(osh); \ + __skb_trim((struct sk_buff*)(skb), (len)); \ + }) +#define PKTPUSH(osh, skb, bytes) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_push((struct sk_buff*)(skb), (bytes)); \ + }) +#define PKTPULL(osh, skb, bytes) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_pull((struct sk_buff*)(skb), (bytes)); \ + }) +#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb)) +#define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh) +#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) +#define PKTFREELIST(skb) PKTLINK(skb) +#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x)) +#define PKTPTR(skb) (skb) +#define PKTID(skb) ({BCM_REFERENCE(skb); 0;}) +#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);}) +#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;}) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER) +#define PKTORPHAN(skb) osl_pkt_orphan_partial(skb) +extern void osl_pkt_orphan_partial(struct sk_buff *skb); +#else +#define PKTORPHAN(skb) ({BCM_REFERENCE(skb); 0;}) +#endif /* LINUX VERSION >= 3.6 */ + + +#ifdef BCMDBG_CTRACE +#define DEL_CTRACE(zosh, zskb) { \ + unsigned long zflags; \ + spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \ + list_del(&(zskb)->ctrace_list); \ + (zosh)->ctrace_num--; \ + (zskb)->ctrace_start = 0; \ + (zskb)->ctrace_count = 0; \ + spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \ +} + +#define UPDATE_CTRACE(zskb, zfile, zline) { \ + struct sk_buff *_zskb = (struct sk_buff *)(zskb); \ + if (_zskb->ctrace_count < CTRACE_NUM) { \ + _zskb->func[_zskb->ctrace_count] = zfile; \ + _zskb->line[_zskb->ctrace_count] = zline; \ + _zskb->ctrace_count++; \ + } \ + else { \ + _zskb->func[_zskb->ctrace_start] = zfile; \ + _zskb->line[_zskb->ctrace_start] = zline; \ + _zskb->ctrace_start++; \ + if (_zskb->ctrace_start >= CTRACE_NUM) \ + _zskb->ctrace_start = 0; \ + } \ +} + +#define ADD_CTRACE(zosh, zskb, zfile, zline) { \ + unsigned long zflags; \ + spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \ + list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \ + (zosh)->ctrace_num++; \ + UPDATE_CTRACE(zskb, zfile, zline); \ + spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \ +} + +#define PKTCALLER(zskb) UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__) +#endif /* BCMDBG_CTRACE */ + +#ifdef CTFPOOL +#define CTFPOOL_REFILL_THRESH 3 +typedef struct ctfpool { + void *head; + spinlock_t lock; + uint max_obj; + uint curr_obj; + uint obj_size; + uint refills; + uint fast_allocs; + uint fast_frees; + uint slow_allocs; +} ctfpool_t; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) +#define FASTBUF (1 << 0) +#define PKTSETFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \ + }) +#define PKTCLRFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \ + }) +#define PKTISFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \ + }) +#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->pktc_flags) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define FASTBUF (1 << 16) +#define PKTSETFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \ + }) +#define PKTCLRFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \ + }) +#define PKTISFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \ + }) +#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len) +#else +#define FASTBUF (1 << 0) +#define PKTSETFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \ + }) +#define PKTCLRFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \ + }) +#define PKTISFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \ + }) +#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused) +#endif /* 2.6.22 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->ctfpool) +#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head) +#else +#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->sk) +#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head) +#endif + +extern void *osl_ctfpool_add(osl_t *osh); +extern void osl_ctfpool_replenish(osl_t *osh, uint thresh); +extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size); +extern void osl_ctfpool_cleanup(osl_t *osh); +extern void osl_ctfpool_stats(osl_t *osh, void *b); +#else /* CTFPOOL */ +#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) +#endif /* CTFPOOL */ + +#define PKTSETCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) + +#ifdef HNDCTF + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) +#define SKIPCT (1 << 2) +#define CHAINED (1 << 3) +#define PKTSETSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \ + }) +#define PKTCLRSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \ + }) +#define PKTSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \ + }) +#define PKTSETCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \ + }) +#define PKTCLRCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \ + }) +#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->pktc_flags & CHAINED) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define SKIPCT (1 << 18) +#define CHAINED (1 << 19) +#define PKTSETSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \ + }) +#define PKTCLRSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \ + }) +#define PKTSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len & SKIPCT); \ + }) +#define PKTSETCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len |= CHAINED); \ + }) +#define PKTCLRCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \ + }) +#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->mac_len & CHAINED) +#else /* 2.6.22 */ +#define SKIPCT (1 << 2) +#define CHAINED (1 << 3) +#define PKTSETSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused |= SKIPCT); \ + }) +#define PKTCLRSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \ + }) +#define PKTSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused & SKIPCT); \ + }) +#define PKTSETCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused |= CHAINED); \ + }) +#define PKTCLRCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \ + }) +#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->__unused & CHAINED) +#endif /* 2.6.22 */ +typedef struct ctf_mark { + uint32 value; +} ctf_mark_t; +#define CTF_MARK(m) (m.value) +#else /* HNDCTF */ +#define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define CTF_MARK(m) ({BCM_REFERENCE(m); 0;}) +#endif /* HNDCTF */ + +#if defined(BCM_GMAC3) + +/** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */ + +/* Account for packets delivered to downstream forwarder by GMAC interface. */ +extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt); +#define PKTTOFWDER(osh, skbs, skb_cnt) \ + osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt)) + +/* Account for packets received from downstream forwarder. */ +#if defined(BCMDBG_CTRACE) /* pkt logging */ +extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, + int line, char *file); +#define PKTFRMFWDER(osh, skbs, skb_cnt) \ + osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \ + __LINE__, __FILE__) +#else /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */ +extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt); +#define PKTFRMFWDER(osh, skbs, skb_cnt) \ + osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt)) +#endif + + +/** GMAC Forwarded packet tagging for reduced cache flush/invalidate. + * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have + * been accessed in the GMAC forwarder. This may be used to limit the number of + * cachelines that need to be flushed or invalidated. + * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF. + * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed. + * Likewise, a debug print of a packet payload in say the ethernet driver needs + * to be accompanied with a clear of the FWDERBUF tag. + */ + +/** Forwarded packets, have a HWRXOFF sized rx header (etc.h) */ +#define FWDER_HWRXOFF (30) + +/** Maximum amount of a pktadat that a downstream forwarder (GMAC) may have + * read into the L1 cache (not dirty). This may be used in reduced cache ops. + * + * Max 56: ET HWRXOFF[30] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4] + */ +#define FWDER_PKTMAPSZ (FWDER_HWRXOFF + 4 + 14 + 4 + 4) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + +#define FWDERBUF (1 << 4) +#define PKTSETFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \ + }) +#define PKTCLRFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \ + }) +#define PKTISFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \ + }) + +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + +#define FWDERBUF (1 << 20) +#define PKTSETFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \ + }) +#define PKTCLRFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \ + }) +#define PKTISFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \ + }) + +#else /* 2.6.22 */ + +#define FWDERBUF (1 << 4) +#define PKTSETFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \ + }) +#define PKTCLRFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \ + }) +#define PKTISFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused & FWDERBUF); \ + }) + +#endif /* 2.6.22 */ + +#else /* ! BCM_GMAC3 */ + +#define PKTSETFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); }) +#define PKTCLRFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); }) +#define PKTISFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) + +#endif /* ! BCM_GMAC3 */ + + +#ifdef HNDCTF +/* For broadstream iqos */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) +#define TOBR (1 << 5) +#define PKTSETTOBR(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \ + }) +#define PKTCLRTOBR(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \ + }) +#define PKTISTOBR(skb) (((struct sk_buff*)(skb))->pktc_flags & TOBR) +#define PKTSETCTFIPCTXIF(skb, ifp) (((struct sk_buff*)(skb))->ctf_ipc_txif = ifp) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);}) +#else /* 2.6.22 */ +#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);}) +#endif /* 2.6.22 */ +#else /* HNDCTF */ +#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;}) +#endif /* HNDCTF */ + + +#ifdef BCMFA +#ifdef BCMFA_HW_HASH +#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx) +#else +#define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);}) +#endif /* BCMFA_SW_HASH */ +#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx) +#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp) +#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev) + +#define AUX_TCP_FIN_RST (1 << 0) +#define AUX_FREED (1 << 1) +#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST) +#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST)) +#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST) +#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED) +#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED)) +#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED) +#define PKTISFABRIDGED(skb) PKTISFAAUX(skb) +#else +#define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;}) + +#define PKTCLRFAAUX(skb) BCM_REFERENCE(skb) +#define PKTSETFAFREED(skb) BCM_REFERENCE(skb) +#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb) +#endif /* BCMFA */ + +#if defined(BCM_OBJECT_TRACE) +extern void osl_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller); +#else +extern void osl_pktfree(osl_t *osh, void *skb, bool send); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pktget_static(osl_t *osh, uint len); +extern void osl_pktfree_static(osl_t *osh, void *skb, bool send); +extern void osl_pktclone(osl_t *osh, void **pkt); + +#ifdef BCMDBG_CTRACE +#define PKT_CTRACE_DUMP(osh, b) osl_ctrace_dump((osh), (b)) +extern void *osl_pktget(osl_t *osh, uint len, int line, char *file); +extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file); +extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file); +struct bcmstrbuf; +extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b); +#else +#ifdef BCM_OBJECT_TRACE +extern void *osl_pktget(osl_t *osh, uint len, int line, const char *caller); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller); +#else +extern void *osl_pktget(osl_t *osh, uint len); +extern void *osl_pktdup(osl_t *osh, void *skb); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pkt_frmnative(osl_t *osh, void *skb); +#endif /* BCMDBG_CTRACE */ +extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt); +#ifdef BCMDBG_CTRACE +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \ + (struct sk_buff*)(skb), __LINE__, __FILE__) +#define PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb)) +#else +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb)) +#endif /* BCMDBG_CTRACE */ +#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt)) + +#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev) +#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x)) +#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority) +#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x)) +#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW) +#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \ + ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE)) +/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */ +#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned) + +#ifdef CONFIG_NF_CONNTRACK_MARK +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define PKTMARK(p) (((struct sk_buff *)(p))->mark) +#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m) +#else /* !2.6.0 */ +#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark) +#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m) +#endif /* 2.6.0 */ +#else /* CONFIG_NF_CONNTRACK_MARK */ +#define PKTMARK(p) 0 +#define PKTSETMARK(p, m) +#endif /* CONFIG_NF_CONNTRACK_MARK */ + +#define PKTALLOCED(osh) osl_pktalloced(osh) +extern uint osl_pktalloced(osl_t *osh); + +#define OSL_RAND() osl_rand() +extern uint32 osl_rand(void); + +#define DMA_MAP(osh, va, size, direction, p, dmah) \ + osl_dma_map((osh), (va), (size), (direction), (p), (dmah)) + +#ifdef PKTC +/* Use 8 bytes of skb tstamp field to store below info */ +struct chain_node { + struct sk_buff *link; + unsigned int flags:3, pkts:9, bytes:20; +}; + +#define CHAIN_NODE(skb) ((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb)) + +#define PKTCSETATTR(s, f, p, b) ({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \ + CHAIN_NODE(s)->bytes = (b);}) +#define PKTCCLRATTR(s) ({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \ + CHAIN_NODE(s)->bytes = 0;}) +#define PKTCGETATTR(s) (CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \ + CHAIN_NODE(s)->bytes) +#define PKTCCNT(skb) (CHAIN_NODE(skb)->pkts) +#define PKTCLEN(skb) (CHAIN_NODE(skb)->bytes) +#define PKTCGETFLAGS(skb) (CHAIN_NODE(skb)->flags) +#define PKTCSETFLAGS(skb, f) (CHAIN_NODE(skb)->flags = (f)) +#define PKTCCLRFLAGS(skb) (CHAIN_NODE(skb)->flags = 0) +#define PKTCFLAGS(skb) (CHAIN_NODE(skb)->flags) +#define PKTCSETCNT(skb, c) (CHAIN_NODE(skb)->pkts = (c)) +#define PKTCINCRCNT(skb) (CHAIN_NODE(skb)->pkts++) +#define PKTCADDCNT(skb, c) (CHAIN_NODE(skb)->pkts += (c)) +#define PKTCSETLEN(skb, l) (CHAIN_NODE(skb)->bytes = (l)) +#define PKTCADDLEN(skb, l) (CHAIN_NODE(skb)->bytes += (l)) +#define PKTCSETFLAG(skb, fb) (CHAIN_NODE(skb)->flags |= (fb)) +#define PKTCCLRFLAG(skb, fb) (CHAIN_NODE(skb)->flags &= ~(fb)) +#define PKTCLINK(skb) (CHAIN_NODE(skb)->link) +#define PKTSETCLINK(skb, x) (CHAIN_NODE(skb)->link = (struct sk_buff*)(x)) +#define FOREACH_CHAINED_PKT(skb, nskb) \ + for (; (skb) != NULL; (skb) = (nskb)) \ + if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \ + PKTSETCLINK((skb), NULL), 1) +#define PKTCFREE(osh, skb, send) \ +do { \ + void *nskb; \ + ASSERT((skb) != NULL); \ + FOREACH_CHAINED_PKT((skb), nskb) { \ + PKTCLRCHAINED((osh), (skb)); \ + PKTCCLRFLAGS((skb)); \ + PKTFREE((osh), (skb), (send)); \ + } \ +} while (0) +#define PKTCENQTAIL(h, t, p) \ +do { \ + if ((t) == NULL) { \ + (h) = (t) = (p); \ + } else { \ + PKTSETCLINK((t), (p)); \ + (t) = (p); \ + } \ +} while (0) +#endif /* PKTC */ + +#else /* ! BCMDRIVER */ + + +/* ASSERT */ + #define ASSERT(exp) do {} while (0) + +/* MALLOC and MFREE */ +#define MALLOC(o, l) malloc(l) +#define MFREE(o, p, l) free(p) +#include + +/* str* and mem* functions */ +#include + +/* *printf functions */ +#include + +/* bcopy, bcmp, and bzero */ +extern void bcopy(const void *src, void *dst, size_t len); +extern int bcmp(const void *b1, const void *b2, size_t len); +extern void bzero(void *b, size_t len); +#endif /* ! BCMDRIVER */ + +typedef struct sec_cma_info { + struct sec_mem_elem *sec_alloc_list; + struct sec_mem_elem *sec_alloc_list_tail; +} sec_cma_info_t; + +/* Current STB 7445D1 doesn't use ACP and it is non-coherrent. + * Adding these dummy values for build apss only + * When we revisit need to change these. + */ +#if defined(STBLINUX) + +#if defined(__ARM_ARCH_7A__) +#define ACP_WAR_ENAB() 0 +#define ACP_WIN_LIMIT 1 +#define arch_is_coherent() 0 +#endif /* __ARM_ARCH_7A__ */ + +#endif /* STBLINUX */ + +#ifdef BCM_SECURE_DMA + +#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \ + osl_sec_dma_map((osh), (va), (size), (direction), (p), (dmah), (pcma), (offset)) +#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) \ + osl_sec_dma_dd_map((osh), (va), (size), (direction), (p), (dmah)) +#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \ + osl_sec_dma_map_txmeta((osh), (va), (size), (direction), (p), (dmah), (pcma)) +#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \ + osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset)) +#define SECURE_DMA_UNMAP_ALL(osh, pcma) \ + osl_sec_dma_unmap_all((osh), (pcma)) +#if defined(__ARM_ARCH_7A__) +#define CMA_BUFSIZE_4K 4096 +#define CMA_BUFSIZE_2K 2048 +#define CMA_BUFSIZE_512 512 + +#define CMA_BUFNUM 2048 +#define SEC_CMA_COHERENT_BLK 0x8000 /* 32768 */ +#define SEC_CMA_COHERENT_MAX 32 +#define CMA_DMA_DESC_MEMBLOCK (SEC_CMA_COHERENT_BLK * SEC_CMA_COHERENT_MAX) +#define CMA_DMA_DATA_MEMBLOCK (CMA_BUFSIZE_4K*CMA_BUFNUM) +#define CMA_MEMBLOCK (CMA_DMA_DESC_MEMBLOCK + CMA_DMA_DATA_MEMBLOCK) +#define CONT_ARMREGION 0x02 /* Region CMA */ +#else +#define CONT_MIPREGION 0x00 /* To access the MIPs mem, Not yet... */ +#endif /* !defined __ARM_ARCH_7A__ */ + +#define SEC_DMA_ALIGN (1<<16) +typedef struct sec_mem_elem { + size_t size; + int direction; + phys_addr_t pa_cma; /**< physical address */ + void *va; /**< virtual address of driver pkt */ + dma_addr_t dma_handle; /**< bus address assign by linux */ + void *vac; /**< virtual address of cma buffer */ + struct sec_mem_elem *next; +} sec_mem_elem_t; + +extern dma_addr_t osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset); +extern dma_addr_t osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah); +extern dma_addr_t osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, + int direction, void *p, hnddma_seg_map_t *dmah, void *ptr_cma_info); +extern void osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction, + void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset); +extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info); + +#endif /* BCM_SECURE_DMA */ + +typedef struct sk_buff_head PKT_LIST; +#define PKTLIST_INIT(x) skb_queue_head_init((x)) +#define PKTLIST_ENQ(x, y) skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y)) +#define PKTLIST_DEQ(x) skb_dequeue((struct sk_buff_head *)(x)) +#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x)) +#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x)) + +#endif /* _linux_osl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h new file mode 100644 index 000000000000..4fe3030b7625 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/linuxver.h @@ -0,0 +1,779 @@ +/* + * Linux-specific abstractions to gain some independence from linux kernel versions. + * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linuxver.h 604758 2015-12-08 12:01:08Z $ + */ + +#ifndef _linuxver_h_ +#define _linuxver_h_ + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-parameter" +#endif + +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#include +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)) +#include +#else +#include +#endif +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) +#include +#endif +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)) +/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */ +#ifdef __UNDEF_NO_VERSION__ +#undef __NO_VERSION__ +#else +#define __NO_VERSION__ +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i") +#define module_param_string(_name_, _string_, _size_, _perm_) \ + MODULE_PARM(_string_, "c" __MODULE_STRING(_size_)) +#endif + +/* linux/malloc.h is deprecated, use linux/slab.h instead. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9)) +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +#include +#else +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) +#undef IP_TOS +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */ +#include + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)) +#include +#else +#include +#ifndef work_struct +#define work_struct tq_struct +#endif +#ifndef INIT_WORK +#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data)) +#endif +#ifndef schedule_work +#define schedule_work(_work) schedule_task((_work)) +#endif +#ifndef flush_scheduled_work +#define flush_scheduled_work() flush_scheduled_tasks() +#endif +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define DAEMONIZE(a) do { \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); \ + } while (0) +#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else /* Linux 2.4 (w/o preemption patch) */ +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \ + } while (0); +#endif /* LINUX_VERSION_CODE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func) +#else +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work) +#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \ + (RHEL_MAJOR == 5)) +/* Exclude RHEL 5 */ +typedef void (*work_func_t)(void *work); +#endif +#endif /* >= 2.6.20 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/* Some distributions have their own 2.6.x compatibility layers */ +#ifndef IRQ_NONE +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif +#else +typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#define IRQF_SHARED SA_SHIRQ +#endif /* < 2.6.18 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) +#ifdef CONFIG_NET_RADIO +#define CONFIG_WIRELESS_EXT +#endif +#endif /* < 2.6.17 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) +#define MOD_INC_USE_COUNT +#define MOD_DEC_USE_COUNT +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) +#include +#endif +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */ + + +#ifndef __exit +#define __exit +#endif +#ifndef __devexit +#define __devexit +#endif +#ifndef __devinit +# if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) +# define __devinit __init +# else +/* All devices are hotpluggable since linux 3.8.0 */ +# define __devinit +# endif +#endif /* !__devinit */ +#ifndef __devinitdata +#define __devinitdata +#endif +#ifndef __devexit_p +#define __devexit_p(x) x +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)) + +#define pci_get_drvdata(dev) (dev)->sysdata +#define pci_set_drvdata(dev, value) (dev)->sysdata = (value) + +/* + * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration + */ + +struct pci_device_id { + unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */ + unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ + unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */ + unsigned long driver_data; /* Data private to the driver */ +}; + +struct pci_driver { + struct list_head node; + char *name; + const struct pci_device_id *id_table; /* NULL if wants all devices */ + int (*probe)(struct pci_dev *dev, + const struct pci_device_id *id); /* New device inserted */ + void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug + * capable driver) + */ + void (*suspend)(struct pci_dev *dev); /* Device suspended */ + void (*resume)(struct pci_dev *dev); /* Device woken up */ +}; + +#define MODULE_DEVICE_TABLE(type, name) +#define PCI_ANY_ID (~0) + +/* compatpci.c */ +#define pci_module_init pci_register_driver +extern int pci_register_driver(struct pci_driver *drv); +extern void pci_unregister_driver(struct pci_driver *drv); + +#endif /* PCI registration */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)) +#define pci_module_init pci_register_driver +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) +#ifdef MODULE +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#else +#define module_init(x) __initcall(x); +#define module_exit(x) __exitcall(x); +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) +#define WL_USE_NETDEV_OPS +#else +#undef WL_USE_NETDEV_OPS +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL) +#define WL_CONFIG_RFKILL +#else +#undef WL_CONFIG_RFKILL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48)) +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13)) +#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)]) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44)) +#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23)) +#define pci_enable_device(dev) do { } while (0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)) +#define net_device device +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42)) + +/* + * DMA mapping + * + * See linux/Documentation/DMA-mapping.txt + */ + +#ifndef PCI_DMA_TODEVICE +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#endif + +typedef u32 dma_addr_t; + +/* Pure 2^n version of get_order */ +static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC | GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_bus(ret); + } + return ret; +} +static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} +#define pci_map_single(cookie, address, size, dir) virt_to_bus(address) +#define pci_unmap_single(cookie, address, size, dir) + +#endif /* DMA mapping */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)) + +#define dev_kfree_skb_any(a) dev_kfree_skb(a) +#define netif_down(dev) do { (dev)->start = 0; } while (0) + +/* pcmcia-cs provides its own netdevice compatibility layer */ +#ifndef _COMPAT_NETDEVICE_H + +/* + * SoftNet + * + * For pre-softnet kernels we need to tell the upper layer not to + * re-enter start_xmit() while we are in there. However softnet + * guarantees not to enter while we are in there so there is no need + * to do the netif_stop_queue() dance unless the transmit queue really + * gets stuck. This should also improve performance according to tests + * done by Aman Singla. + */ + +#define dev_kfree_skb_irq(a) dev_kfree_skb(a) +#define netif_wake_queue(dev) \ + do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0) +#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy) + +static inline void netif_start_queue(struct net_device *dev) +{ + dev->tbusy = 0; + dev->interrupt = 0; + dev->start = 1; +} + +#define netif_queue_stopped(dev) (dev)->tbusy +#define netif_running(dev) (dev)->start + +#endif /* _COMPAT_NETDEVICE_H */ + +#define netif_device_attach(dev) netif_start_queue(dev) +#define netif_device_detach(dev) netif_stop_queue(dev) + +/* 2.4.x renamed bottom halves to tasklets */ +#define tasklet_struct tq_struct +static inline void tasklet_schedule(struct tasklet_struct *tasklet) +{ + queue_task(tasklet, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} + +static inline void tasklet_init(struct tasklet_struct *tasklet, + void (*func)(unsigned long), + unsigned long data) +{ + tasklet->next = NULL; + tasklet->sync = 0; + tasklet->routine = (void (*)(void *))func; + tasklet->data = (void *)data; +} +#define tasklet_kill(tasklet) { do {} while (0); } + +/* 2.4.x introduced del_timer_sync() */ +#define del_timer_sync(timer) del_timer(timer) + +#else + +#define netif_down(dev) + +#endif /* SoftNet */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/* + * Emit code to initialise a tq_struct's routine and data pointers + */ +#define PREPARE_TQUEUE(_tq, _routine, _data) \ + do { \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) + +/* + * Emit code to initialise all of a tq_struct + */ +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + PREPARE_TQUEUE((_tq), (_routine), (_data)); \ + } while (0) + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */ + +/* Power management related macro & routines */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9) +#define PCI_SAVE_STATE(a, b) pci_save_state(a) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a) +#else +#define PCI_SAVE_STATE(a, b) pci_save_state(a, b) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) +static inline int +pci_save_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + if (buffer) { + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &buffer[i]); + } + return 0; +} + +static inline int +pci_restore_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + + if (buffer) { + for (i = 0; i < 16; i++) + pci_write_config_dword(dev, i * 4, buffer[i]); + } + /* + * otherwise, write the context information we know from bootup. + * This works around a problem where warm-booting from Windows + * combined with a D3(hot)->D0 transition causes PCI config + * header data to be forgotten. + */ + else { + for (i = 0; i < 6; i ++) + pci_write_config_dword(dev, + PCI_BASE_ADDRESS_0 + (i * 4), + pci_resource_start(dev, i)); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + } + return 0; +} +#endif /* PCI power management */ + +/* Old cp0 access macros deprecated in 2.4.19 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19)) +#define read_c0_count() read_32bit_cp0_register(CP0_COUNT) +#endif + +/* Module refcount handled internally in 2.6.x */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#else +#define OLD_MOD_INC_USE_COUNT do {} while (0) +#define OLD_MOD_DEC_USE_COUNT do {} while (0) +#endif +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#endif +#ifndef MOD_INC_USE_COUNT +#define MOD_INC_USE_COUNT do {} while (0) +#endif +#ifndef MOD_DEC_USE_COUNT +#define MOD_DEC_USE_COUNT do {} while (0) +#endif +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) do {} while (0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) +#ifndef HAVE_FREE_NETDEV +#define free_netdev(dev) kfree(dev) +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/* struct packet_type redefined in 2.6.x */ +#define af_packet_priv data +#endif + +/* suspend args */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) +#define DRV_SUSPEND_STATE_TYPE pm_message_t +#else +#define DRV_SUSPEND_STATE_TYPE uint32 +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define CHECKSUM_HW CHECKSUM_PARTIAL +#endif + +typedef struct { + void *parent; /* some external entity that the thread supposed to work for */ + char *proc_name; + struct task_struct *p_task; + long thr_pid; + int prio; /* priority */ + struct semaphore sema; + int terminated; + struct completion completed; + spinlock_t spinlock; + int up_cnt; +} tsk_ctl_t; + + +/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */ +/* note this macro assumes there may be only one context waiting on thread's completion */ +#ifdef DHD_DEBUG +#define DBG_THR(x) printk x +#else +#define DBG_THR(x) +#endif + +static inline bool binary_sema_down(tsk_ctl_t *tsk) +{ + if (down_interruptible(&tsk->sema) == 0) { + unsigned long flags = 0; + spin_lock_irqsave(&tsk->spinlock, flags); + if (tsk->up_cnt == 1) + tsk->up_cnt--; + else { + DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt)); + } + spin_unlock_irqrestore(&tsk->spinlock, flags); + return false; + } else + return true; +} + +static inline bool binary_sema_up(tsk_ctl_t *tsk) +{ + bool sem_up = false; + unsigned long flags = 0; + + spin_lock_irqsave(&tsk->spinlock, flags); + if (tsk->up_cnt == 0) { + tsk->up_cnt++; + sem_up = true; + } else if (tsk->up_cnt == 1) { + /* dhd_sched_dpc: dpc is alread up! */ + } else + DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt)); + + spin_unlock_irqrestore(&tsk->spinlock, flags); + + if (sem_up) + up(&tsk->sema); + + return sem_up; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x) +#else +#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x) +#endif + +#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \ +{ \ + sema_init(&((tsk_ctl)->sema), 0); \ + init_completion(&((tsk_ctl)->completed)); \ + (tsk_ctl)->parent = owner; \ + (tsk_ctl)->proc_name = name; \ + (tsk_ctl)->terminated = FALSE; \ + (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \ + if (IS_ERR((tsk_ctl)->p_task)) { \ + (tsk_ctl)->thr_pid = DHD_PID_KT_INVALID; \ + DBG_THR(("%s(): thread:%s:%lx failed\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + } else { \ + (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \ + spin_lock_init(&((tsk_ctl)->spinlock)); \ + DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + } \ +} + +#define PROC_STOP(tsk_ctl) \ +{ \ + (tsk_ctl)->terminated = TRUE; \ + smp_wmb(); \ + up(&((tsk_ctl)->sema)); \ + wait_for_completion(&((tsk_ctl)->completed)); \ + DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + (tsk_ctl)->thr_pid = -1; \ +} + +/* ----------------------- */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +#define KILL_PROC(nr, sig) \ +{ \ +struct task_struct *tsk; \ +struct pid *pid; \ +pid = find_get_pid((pid_t)nr); \ +tsk = pid_task(pid, PIDTYPE_PID); \ +if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 30)) +#define KILL_PROC(pid, sig) \ +{ \ + struct task_struct *tsk; \ + tsk = find_task_by_vpid(pid); \ + if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#define KILL_PROC(pid, sig) \ +{ \ + kill_proc(pid, sig, 1); \ +} +#endif +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#include +#include +#else +#include + +#define __wait_event_interruptible_timeout(wq, condition, ret) \ +do { \ + wait_queue_t __wait; \ + init_waitqueue_entry(&__wait, current); \ + \ + add_wait_queue(&wq, &__wait); \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + current->state = TASK_RUNNING; \ + remove_wait_queue(&wq, &__wait); \ +} while (0) + +#define wait_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __wait_event_interruptible_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +/* +For < 2.6.24, wl creates its own netdev but doesn't +align the priv area like the genuine alloc_netdev(). +Since netdev_priv() always gives us the aligned address, it will +not match our unaligned address for < 2.6.24 +*/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#define DEV_PRIV(dev) (dev->priv) +#else +#define DEV_PRIV(dev) netdev_priv(dev) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) +#define WL_ISR(i, d, p) wl_isr((i), (d)) +#else +#define WL_ISR(i, d, p) wl_isr((i), (d), (p)) +#endif /* < 2.6.20 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_priv(dev) dev->priv +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) +#define CAN_SLEEP() ((!in_atomic() && !irqs_disabled())) +#else +#define CAN_SLEEP() (FALSE) +#endif + +#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define RANDOM32 prandom_u32 +#else +#define RANDOM32 random32 +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define SRANDOM32(entropy) prandom_seed(entropy) +#else +#define SRANDOM32(entropy) srandom32(entropy) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */ + +/* + * Overide latest kfifo functions with + * older version to work on older kernels + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) +#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c) +#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c) +#define kfifo_esize(a) 1 +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS) +#define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d) +#define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d) +#define kfifo_esize(a) 1 +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static inline struct inode *file_inode(const struct file *f) +{ + return f->f_dentry->d_inode; +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ + +#endif /* _linuxver_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h new file mode 100644 index 000000000000..2eb6d18ea7ca --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/miniopt.h @@ -0,0 +1,83 @@ +/* + * Command line options parser. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: miniopt.h 514727 2014-11-12 03:02:48Z $ + */ + + +#ifndef MINI_OPT_H +#define MINI_OPT_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Include Files ---------------------------------------------------- */ + + +/* ---- Constants and Types ---------------------------------------------- */ + +#define MINIOPT_MAXKEY 128 /* Max options */ +typedef struct miniopt { + + /* These are persistent after miniopt_init() */ + const char* name; /* name for prompt in error strings */ + const char* flags; /* option chars that take no args */ + bool longflags; /* long options may be flags */ + bool opt_end; /* at end of options (passed a "--") */ + + /* These are per-call to miniopt() */ + + int consumed; /* number of argv entries cosumed in + * the most recent call to miniopt() + */ + bool positional; + bool good_int; /* 'val' member is the result of a sucessful + * strtol conversion of the option value + */ + char opt; + char key[MINIOPT_MAXKEY]; + char* valstr; /* positional param, or value for the option, + * or null if the option had + * no accompanying value + */ + uint uval; /* strtol translation of valstr */ + int val; /* strtol translation of valstr */ +} miniopt_t; + +void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags); +int miniopt(miniopt_t *t, char **argv); + + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + + +#ifdef __cplusplus + } +#endif + +#endif /* MINI_OPT_H */ diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h new file mode 100644 index 000000000000..0d67000c9df3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h @@ -0,0 +1,81 @@ +/* + * Trace messages sent over HBUS + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: msgtrace.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _MSGTRACE_H +#define _MSGTRACE_H + +#ifndef _TYPEDEFS_H_ +#include +#endif + + +/* This marks the start of a packed structure section. */ +#include +/* for osl_t */ +#include +#define MSGTRACE_VERSION 1 + +/* Message trace header */ +typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr { + uint8 version; + uint8 trace_type; +#define MSGTRACE_HDR_TYPE_MSG 0 +#define MSGTRACE_HDR_TYPE_LOG 1 + uint16 len; /* Len of the trace */ + uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost + * because of DMA error or a bus reset (ex: SDIO Func2) + */ + /* Msgtrace type only */ + uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */ + uint32 discarded_printf; /* Number of discarded printf because of trace overflow */ +} BWL_POST_PACKED_STRUCT msgtrace_hdr_t; + +#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t) + +/* The hbus driver generates traces when sending a trace message. This causes endless traces. + * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put. + * This prevents endless traces but generates hasardous lost of traces only in bus device code. + * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing + * hbus error traces. hbus error trace should not generates endless traces. + */ +extern bool msgtrace_hbus_trace; + +typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr, + uint16 hdrlen, uint8 *buf, uint16 buflen); +extern void msgtrace_start(void); +extern void msgtrace_stop(void); +extern int msgtrace_sent(void); +extern void msgtrace_put(char *buf, int count); +extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send); +extern bool msgtrace_event_enabled(void); + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _MSGTRACE_H */ diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h new file mode 100644 index 000000000000..8a00f9d55a83 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/osl.h @@ -0,0 +1,178 @@ +/* + * OS Abstraction Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl.h 526460 2015-01-14 08:25:24Z $ + */ + +#ifndef _osl_h_ +#define _osl_h_ + +#include + +#define OSL_PKTTAG_SZ 32 /* Size of PktTag */ + +/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */ +typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status); + +/* Drivers use REGOPSSET() to register register read/write funcitons */ +typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size); +typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size); + + + +#if defined(WL_UNITTEST) +#include +#else +#include +#endif + +#ifndef PKTDBG_TRACE +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#endif + +#define PKTCTFMAP(osh, p) BCM_REFERENCE(osh) + +/* -------------------------------------------------------------------------- +** Register manipulation macros. +*/ + +#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val))) + +#ifndef AND_REG +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#endif /* !AND_REG */ + +#ifndef OR_REG +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) +#endif /* !OR_REG */ + +#if !defined(OSL_SYSUPTIME) +#define OSL_SYSUPTIME() (0) +#define OSL_SYSUPTIME_SUPPORT FALSE +#else +#define OSL_SYSUPTIME_SUPPORT TRUE +#endif /* OSL_SYSUPTIME */ + +#ifndef OSL_SYS_HALT +#define OSL_SYS_HALT() do {} while (0) +#endif + +#ifndef OSL_MEM_AVAIL +#define OSL_MEM_AVAIL() (0xffffffff) +#endif + +#if !defined(PKTC) && !defined(PKTC_DONGLE) +#define PKTCGETATTR(skb) (0) +#define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb) +#define PKTCCLRATTR(skb) BCM_REFERENCE(skb) +#define PKTCCNT(skb) (1) +#define PKTCLEN(skb) PKTLEN(NULL, skb) +#define PKTCGETFLAGS(skb) (0) +#define PKTCSETFLAGS(skb, f) BCM_REFERENCE(skb) +#define PKTCCLRFLAGS(skb) BCM_REFERENCE(skb) +#define PKTCFLAGS(skb) (0) +#define PKTCSETCNT(skb, c) BCM_REFERENCE(skb) +#define PKTCINCRCNT(skb) BCM_REFERENCE(skb) +#define PKTCADDCNT(skb, c) BCM_REFERENCE(skb) +#define PKTCSETLEN(skb, l) BCM_REFERENCE(skb) +#define PKTCADDLEN(skb, l) BCM_REFERENCE(skb) +#define PKTCSETFLAG(skb, fb) BCM_REFERENCE(skb) +#define PKTCCLRFLAG(skb, fb) BCM_REFERENCE(skb) +#define PKTCLINK(skb) NULL +#define PKTSETCLINK(skb, x) BCM_REFERENCE(skb) +#define FOREACH_CHAINED_PKT(skb, nskb) \ + for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb)) +#define PKTCFREE PKTFREE +#define PKTCENQTAIL(h, t, p) \ +do { \ + if ((t) == NULL) { \ + (h) = (t) = (p); \ + } \ +} while (0) +#endif /* !linux || !PKTC */ + +#if !defined(HNDCTF) && !defined(PKTC_TX_DONGLE) +#define PKTSETCHAINED(osh, skb) BCM_REFERENCE(osh) +#define PKTCLRCHAINED(osh, skb) BCM_REFERENCE(osh) +#define PKTISCHAINED(skb) FALSE +#endif + +/* Lbuf with fraglist */ +#define PKTFRAGPKTID(osh, lb) (0) +#define PKTSETFRAGPKTID(osh, lb, id) BCM_REFERENCE(osh) +#define PKTFRAGTOTNUM(osh, lb) (0) +#define PKTSETFRAGTOTNUM(osh, lb, tot) BCM_REFERENCE(osh) +#define PKTFRAGTOTLEN(osh, lb) (0) +#define PKTSETFRAGTOTLEN(osh, lb, len) BCM_REFERENCE(osh) +#define PKTIFINDEX(osh, lb) (0) +#define PKTSETIFINDEX(osh, lb, idx) BCM_REFERENCE(osh) +#define PKTGETLF(osh, len, send, lbuf_type) (0) + +/* in rx path, reuse totlen as used len */ +#define PKTFRAGUSEDLEN(osh, lb) (0) +#define PKTSETFRAGUSEDLEN(osh, lb, len) BCM_REFERENCE(osh) + +#define PKTFRAGLEN(osh, lb, ix) (0) +#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh) +#define PKTFRAGDATA_LO(osh, lb, ix) (0) +#define PKTSETFRAGDATA_LO(osh, lb, ix, addr) BCM_REFERENCE(osh) +#define PKTFRAGDATA_HI(osh, lb, ix) (0) +#define PKTSETFRAGDATA_HI(osh, lb, ix, addr) BCM_REFERENCE(osh) + +/* RX FRAG */ +#define PKTISRXFRAG(osh, lb) (0) +#define PKTSETRXFRAG(osh, lb) BCM_REFERENCE(osh) +#define PKTRESETRXFRAG(osh, lb) BCM_REFERENCE(osh) + +/* TX FRAG */ +#define PKTISTXFRAG(osh, lb) (0) +#define PKTSETTXFRAG(osh, lb) BCM_REFERENCE(osh) + +/* Need Rx completion used for AMPDU reordering */ +#define PKTNEEDRXCPL(osh, lb) (TRUE) +#define PKTSETNORXCPL(osh, lb) BCM_REFERENCE(osh) +#define PKTRESETNORXCPL(osh, lb) BCM_REFERENCE(osh) + +#define PKTISFRAG(osh, lb) (0) +#define PKTFRAGISCHAINED(osh, i) (0) +/* TRIM Tail bytes from lfrag */ +#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len) + +#ifdef BCM_SECURE_DMA +#define SECURE_DMA_ENAB(osh) (1) +#else + +#define SECURE_DMA_ENAB(osh) (0) +#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) {(0)}) +#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) 0 +#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) {(0)}) +#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) +#define SECURE_DMA_UNMAP_ALL(osh, pcma) + +#endif + + +#endif /* _osl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/osl_decl.h b/drivers/net/wireless/bcmdhd/include/osl_decl.h new file mode 100644 index 000000000000..6c8d86eeabf1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/osl_decl.h @@ -0,0 +1,37 @@ +/* + * osl forward declarations + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl_decl.h 591283 2015-10-07 11:52:00Z $ + */ + +#ifndef _osl_decl_h_ +#define _osl_decl_h_ + +/* osl handle type forward declaration */ +typedef struct osl_info osl_t; +typedef struct osl_dmainfo osldma_t; +extern unsigned int lmtest; /* low memory test */ +#endif diff --git a/drivers/net/wireless/bcmdhd/include/osl_ext.h b/drivers/net/wireless/bcmdhd/include/osl_ext.h new file mode 100644 index 000000000000..61984e68c4d0 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/osl_ext.h @@ -0,0 +1,697 @@ +/* + * OS Abstraction Layer Extension - the APIs defined by the "extension" API + * are only supported by a subset of all operating systems. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl_ext.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _osl_ext_h_ +#define _osl_ext_h_ + + +/* ---- Include Files ---------------------------------------------------- */ + +#if defined(TARGETOS_symbian) + #include + #include +#elif defined(THREADX) + #include +#else + #define OSL_EXT_DISABLED +#endif + +/* Include base operating system abstraction. */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Constants and Types ---------------------------------------------- */ + +/* ----------------------------------------------------------------------- + * Generic OS types. + */ +typedef enum osl_ext_status_t +{ + OSL_EXT_SUCCESS, + OSL_EXT_ERROR, + OSL_EXT_TIMEOUT + +} osl_ext_status_t; +#define OSL_EXT_STATUS_DECL(status) osl_ext_status_t status; + +#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1)) +typedef unsigned int osl_ext_time_ms_t; + +typedef unsigned int osl_ext_event_bits_t; + +typedef unsigned int osl_ext_interrupt_state_t; + +/* ----------------------------------------------------------------------- + * Timers. + */ +typedef enum +{ + /* One-shot timer. */ + OSL_EXT_TIMER_MODE_ONCE, + + /* Periodic timer. */ + OSL_EXT_TIMER_MODE_REPEAT + +} osl_ext_timer_mode_t; + +/* User registered callback and parameter to invoke when timer expires. */ +typedef void* osl_ext_timer_arg_t; +typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg); + + +/* ----------------------------------------------------------------------- + * Tasks. + */ + +/* Task entry argument. */ +typedef void* osl_ext_task_arg_t; + +/* Task entry function. */ +typedef void (*osl_ext_task_entry)(osl_ext_task_arg_t arg); + +/* Abstract task priority levels. */ +typedef enum +{ + OSL_EXT_TASK_IDLE_PRIORITY, + OSL_EXT_TASK_LOW_PRIORITY, + OSL_EXT_TASK_LOW_NORMAL_PRIORITY, + OSL_EXT_TASK_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGH_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGHEST_PRIORITY, + OSL_EXT_TASK_TIME_CRITICAL_PRIORITY, + + /* This must be last. */ + OSL_EXT_TASK_NUM_PRIORITES +} osl_ext_task_priority_t; + + +#ifndef OSL_EXT_DISABLED + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + + +/* -------------------------------------------------------------------------- +** Semaphore +*/ + +/**************************************************************************** +* Function: osl_ext_sem_create +* +* Purpose: Creates a counting semaphore object, which can subsequently be +* used for thread notification. +* +* Parameters: name (in) Name to assign to the semaphore (must be unique). +* init_cnt (in) Initial count that the semaphore should have. +* sem (out) Newly created semaphore. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was created successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_create(char *name, int init_cnt, osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_delete +* +* Purpose: Destroys a previously created semaphore object. +* +* Parameters: sem (mod) Semaphore object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was deleted successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_delete(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_give +* +* Purpose: Increments the count associated with the semaphore. This will +* cause one thread blocked on a take to wake up. +* +* Parameters: sem (mod) Semaphore object to give. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was given successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_give(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_take +* +* Purpose: Decrements the count associated with the semaphore. If the count +* is less than zero, then the calling task will become blocked until +* another thread does a give on the semaphore. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: sem (mod) Semaphore object to take. +* timeout_msec (in) Number of milliseconds to wait for the +* semaphore to enter a state where it can be +* taken. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was taken successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec); + + +/* -------------------------------------------------------------------------- +** Mutex +*/ + +/**************************************************************************** +* Function: osl_ext_mutex_create +* +* Purpose: Creates a mutex object, which can subsequently be used to control +* mutually exclusion of resources. +* +* Parameters: name (in) Name to assign to the mutex (must be unique). +* mutex (out) Mutex object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the mutex was created successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_create(char *name, osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_delete +* +* Purpose: Destroys a previously created mutex object. +* +* Parameters: mutex (mod) Mutex object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the mutex was deleted successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_delete(osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_acquire +* +* Purpose: Acquires the indicated mutual exclusion object. If the object is +* currently acquired by another task, then this function will wait +* for timeout_msec milli-seconds before returning with OSL_EXT_TIMEOUT. +* +* Parameters: mutex (mod) Mutex object to acquire. +* timeout_msec (in) Number of milliseconds to wait for the mutex. +* +* Returns: OSL_EXT_SUCCESS if the mutex was acquired successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_acquire(osl_ext_mutex_t *mutex, osl_ext_time_ms_t timeout_msec); + +/**************************************************************************** +* Function: osl_ext_mutex_release +* +* Purpose: Releases the indicated mutual exclusion object. This makes it +* available for another task to acquire. +* +* Parameters: mutex (mod) Mutex object to release. +* +* Returns: OSL_EXT_SUCCESS if the mutex was released successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex); + + +/* -------------------------------------------------------------------------- +** Timers +*/ + +/**************************************************************************** +* Function: osl_ext_timer_create +* +* Purpose: Creates a timer object. +* +* Parameters: name (in) Name of timer. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* func (in) Callback function to invoke on timer expiry. +* arg (in) Argument to callback function. +* timer (out) Timer object to create. +* +* Note: The function callback occurs in interrupt context. The application is +* required to provide context switch for the callback if required. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_create(char *name, osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode, + osl_ext_timer_callback func, osl_ext_timer_arg_t arg, osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_delete +* +* Purpose: Destroys a previously created timer object. +* +* Parameters: timer (mod) Timer object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_timer_delete(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_start +* +* Purpose: Start a previously created timer object. +* +* Parameters: timer (in) Timer object. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_start(osl_ext_timer_t *timer, + osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode); + +/**************************************************************************** +* Function: osl_ext_timer_stop +* +* Purpose: Stop a previously created timer object. +* +* Parameters: timer (in) Timer object. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_stop(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_time_get +* +* Purpose: Returns incrementing time counter. +* +* Parameters: None. +* +* Returns: Returns incrementing time counter in msec. +***************************************************************************** +*/ +osl_ext_time_ms_t osl_ext_time_get(void); + +/* -------------------------------------------------------------------------- +** Tasks +*/ + +/**************************************************************************** +* Function: osl_ext_task_create +* +* Purpose: Create a task. +* +* Parameters: name (in) Pointer to task string descriptor. +* stack (in) Pointer to stack. NULL to allocate. +* stack_size (in) Stack size - in bytes. +* priority (in) Abstract task priority. +* func (in) A pointer to the task entry point function. +* arg (in) Value passed into task entry point function. +* task (out) Task to create. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \ + (arg), (task)) + +osl_ext_status_t osl_ext_task_create_ex(char* name, + void *stack, unsigned int stack_size, osl_ext_task_priority_t priority, + osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg, + osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_delete +* +* Purpose: Destroy a task. +* +* Parameters: task (mod) Task to destroy. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task); + + +/**************************************************************************** +* Function: osl_ext_task_is_running +* +* Purpose: Returns current running task. +* +* Parameters: None. +* +* Returns: osl_ext_task_t of current running task. +***************************************************************************** +*/ +osl_ext_task_t *osl_ext_task_current(void); + + +/**************************************************************************** +* Function: osl_ext_task_yield +* +* Purpose: Yield the CPU to other tasks of the same priority that are +* ready-to-run. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_yield(void); + + +/**************************************************************************** +* Function: osl_ext_task_enable_stack_check +* +* Purpose: Enable task stack checking. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_enable_stack_check(void); + + +/* -------------------------------------------------------------------------- +** Queue +*/ + +/**************************************************************************** +* Function: osl_ext_queue_create +* +* Purpose: Create a queue. +* +* Parameters: name (in) Name to assign to the queue (must be unique). +* buffer (in) Queue buffer. NULL to allocate. +* size (in) Size of the queue. +* queue (out) Newly created queue. +* +* Returns: OSL_EXT_SUCCESS if the queue was created successfully, or an +* error code if the queue could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_create(char *name, + void *queue_buffer, unsigned int queue_size, + osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_delete +* +* Purpose: Destroys a previously created queue object. +* +* Parameters: queue (mod) Queue object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the queue was deleted successfully, or an +* error code if the queue could not be deleteed. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_delete(osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_send +* +* Purpose: Send/add data to the queue. This function will not block the +* calling thread if the queue is full. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_send_synchronous +* +* Purpose: Send/add data to the queue. This function will block the +* calling thread until the data is dequeued. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send_synchronous(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_receive +* +* Purpose: Receive/remove data from the queue. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: queue (mod) Queue object. +* timeout_msec (in) Number of milliseconds to wait for the +* data from the queue. +* data (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the data was dequeued successfully, or an +* error code if the data could not be dequeued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_receive(osl_ext_queue_t *queue, + osl_ext_time_ms_t timeout_msec, void **data); + +/**************************************************************************** +* Function: osl_ext_queue_count +* +* Purpose: Returns the number of items in the queue. +* +* Parameters: queue (mod) Queue object. +* count (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the count was returned successfully, or an +* error code if the count is invalid. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count); + + +/* -------------------------------------------------------------------------- +** Event +*/ + +/**************************************************************************** +* Function: osl_ext_event_create +* +* Purpose: Creates a event object, which can subsequently be used to +* notify and trigger tasks. +* +* Parameters: name (in) Name to assign to the event (must be unique). +* event (out) Event object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_create(char *name, osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_delete +* +* Purpose: Destroys a previously created event object. +* +* Parameters: event (mod) Event object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_delete(osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_get +* +* Purpose: Get event from specified event object. +* +* Parameters: event (mod) Event object to get. +* requested (in) Requested event to get. +* timeout_msec (in) Number of milliseconds to wait for the event. +* event_bits (out) Event bits retrieved. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_get(osl_ext_event_t *event, + osl_ext_event_bits_t requested, osl_ext_time_ms_t timeout_msec, + osl_ext_event_bits_t *event_bits); + +/**************************************************************************** +* Function: osl_ext_event_set +* +* Purpose: Set event of specified event object. +* +* Parameters: event (mod) Event object to set. +* event_bits (in) Event bits to set. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event, + osl_ext_event_bits_t event_bits); + + +/* -------------------------------------------------------------------------- +** Interrupt +*/ + +/**************************************************************************** +* Function: osl_ext_interrupt_disable +* +* Purpose: Disable CPU interrupt. +* +* Parameters: None. +* +* Returns: The interrupt state before disable for restoring interrupt. +***************************************************************************** +*/ +osl_ext_interrupt_state_t osl_ext_interrupt_disable(void); + + +/**************************************************************************** +* Function: osl_ext_interrupt_restore +* +* Purpose: Restore CPU interrupt state. +* +* Parameters: state (in) Interrupt state to restore returned from +* osl_ext_interrupt_disable(). +* +* Returns: None. +***************************************************************************** +*/ +void osl_ext_interrupt_restore(osl_ext_interrupt_state_t state); + +#else + +/* ---- Constants and Types ---------------------------------------------- */ + +/* Semaphore. */ +#define osl_ext_sem_t +#define OSL_EXT_SEM_DECL(sem) + +/* Mutex. */ +#define osl_ext_mutex_t +#define OSL_EXT_MUTEX_DECL(mutex) + +/* Timer. */ +#define osl_ext_timer_t +#define OSL_EXT_TIMER_DECL(timer) + +/* Task. */ +#define osl_ext_task_t void +#define OSL_EXT_TASK_DECL(task) + +/* Queue. */ +#define osl_ext_queue_t +#define OSL_EXT_QUEUE_DECL(queue) + +/* Event. */ +#define osl_ext_event_t +#define OSL_EXT_EVENT_DECL(event) + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +#define osl_ext_sem_create(name, init_cnt, sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_delete(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_give(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_take(sem, timeout_msec) (OSL_EXT_SUCCESS) + +#define osl_ext_mutex_create(name, mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_delete(mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_acquire(mutex, timeout_msec) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_release(mutex) (OSL_EXT_SUCCESS) + +#define osl_ext_timer_create(name, timeout_msec, mode, func, arg, timer) \ + (OSL_EXT_SUCCESS) +#define osl_ext_timer_delete(timer) (OSL_EXT_SUCCESS) +#define osl_ext_timer_start(timer, timeout_msec, mode) (OSL_EXT_SUCCESS) +#define osl_ext_timer_stop(timer) (OSL_EXT_SUCCESS) +#define osl_ext_time_get() (0) + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + (OSL_EXT_SUCCESS) +#define osl_ext_task_delete(task) (OSL_EXT_SUCCESS) +#define osl_ext_task_current() (NULL) +#define osl_ext_task_yield() (OSL_EXT_SUCCESS) +#define osl_ext_task_enable_stack_check() (OSL_EXT_SUCCESS) + +#define osl_ext_queue_create(name, queue_buffer, queue_size, queue) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_delete(queue) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send_synchronous(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_receive(queue, timeout_msec, data) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_count(queue, count) (OSL_EXT_SUCCESS) + +#define osl_ext_event_create(name, event) (OSL_EXT_SUCCESS) +#define osl_ext_event_delete(event) (OSL_EXT_SUCCESS) +#define osl_ext_event_get(event, requested, timeout_msec, event_bits) \ + (OSL_EXT_SUCCESS) +#define osl_ext_event_set(event, event_bits) (OSL_EXT_SUCCESS) + +#define osl_ext_interrupt_disable(void) +#define osl_ext_interrupt_restore(state) + +#endif /* OSL_EXT_DISABLED */ + +#ifdef __cplusplus +} +#endif + +#endif /* _osl_ext_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h new file mode 100644 index 000000000000..e3a35c7e9270 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h @@ -0,0 +1,63 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: packed_section_end.h 514727 2014-11-12 03:02:48Z $ + */ + + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is NOT defined at this + * point, then there is a missing include of packed_section_start.h. + */ +#ifdef BWL_PACKED_SECTION + #undef BWL_PACKED_SECTION +#else + #error "BWL_PACKED_SECTION is NOT defined!" +#endif + + + + +/* Compiler-specific directives for structure packing are declared in + * packed_section_start.h. This marks the end of the structure packing section, + * so, undef them here. + */ +#undef BWL_PRE_PACKED_STRUCT +#undef BWL_POST_PACKED_STRUCT diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h new file mode 100644 index 000000000000..617176461f75 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h @@ -0,0 +1,67 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: packed_section_start.h 514727 2014-11-12 03:02:48Z $ + */ + + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is already defined at this + * point, then there is a missing include of packed_section_end.h. + */ +#ifdef BWL_PACKED_SECTION + #error "BWL_PACKED_SECTION is already defined!" +#else + #define BWL_PACKED_SECTION +#endif + + + + +/* Declare compiler-specific directives for structure packing. */ +#if defined(__GNUC__) || defined(__lint) + #define BWL_PRE_PACKED_STRUCT + #define BWL_POST_PACKED_STRUCT __attribute__ ((packed)) +#elif defined(__CC_ARM) + #define BWL_PRE_PACKED_STRUCT __packed + #define BWL_POST_PACKED_STRUCT +#else + #error "Unknown compiler!" +#endif diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h new file mode 100644 index 000000000000..be0a92a17847 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h @@ -0,0 +1,260 @@ +/* + * pcicfg.h: PCI configuration constants and structures. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcicfg.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _h_pcicfg_ +#define _h_pcicfg_ + + +/* pci config status reg has a bit to indicate that capability ptr is present */ + +#define PCI_CAPPTR_PRESENT 0x0010 + +/* A structure for the config registers is nice, but in most + * systems the config space is not memory mapped, so we need + * field offsetts. :-( + */ +#define PCI_CFG_VID 0 +#define PCI_CFG_DID 2 +#define PCI_CFG_CMD 4 +#define PCI_CFG_STAT 6 +#define PCI_CFG_REV 8 +#define PCI_CFG_PROGIF 9 +#define PCI_CFG_SUBCL 0xa +#define PCI_CFG_BASECL 0xb +#define PCI_CFG_CLSZ 0xc +#define PCI_CFG_LATTIM 0xd +#define PCI_CFG_HDR 0xe +#define PCI_CFG_BIST 0xf +#define PCI_CFG_BAR0 0x10 +#define PCI_CFG_BAR1 0x14 +#define PCI_CFG_BAR2 0x18 +#define PCI_CFG_BAR3 0x1c +#define PCI_CFG_BAR4 0x20 +#define PCI_CFG_BAR5 0x24 +#define PCI_CFG_CIS 0x28 +#define PCI_CFG_SVID 0x2c +#define PCI_CFG_SSID 0x2e +#define PCI_CFG_ROMBAR 0x30 +#define PCI_CFG_CAPPTR 0x34 +#define PCI_CFG_INT 0x3c +#define PCI_CFG_PIN 0x3d +#define PCI_CFG_MINGNT 0x3e +#define PCI_CFG_MAXLAT 0x3f +#define PCI_CFG_DEVCTRL 0xd8 + + +/* PCI CAPABILITY DEFINES */ +#define PCI_CAP_POWERMGMTCAP_ID 0x01 +#define PCI_CAP_MSICAP_ID 0x05 +#define PCI_CAP_VENDSPEC_ID 0x09 +#define PCI_CAP_PCIECAP_ID 0x10 + +/* Data structure to define the Message Signalled Interrupt facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_msi { + uint8 capID; + uint8 nextptr; + uint16 msgctrl; + uint32 msgaddr; +} pciconfig_cap_msi; +#define MSI_ENABLE 0x1 /* bit 0 of msgctrl */ + +/* Data structure to define the Power managment facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_pwrmgmt { + uint8 capID; + uint8 nextptr; + uint16 pme_cap; + uint16 pme_sts_ctrl; + uint8 pme_bridge_ext; + uint8 data; +} pciconfig_cap_pwrmgmt; + +#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */ +#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */ +#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */ +#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */ + +/* Data structure to define the PCIE capability */ +typedef struct _pciconfig_cap_pcie { + uint8 capID; + uint8 nextptr; + uint16 pcie_cap; + uint32 dev_cap; + uint16 dev_ctrl; + uint16 dev_status; + uint32 link_cap; + uint16 link_ctrl; + uint16 link_status; + uint32 slot_cap; + uint16 slot_ctrl; + uint16 slot_status; + uint16 root_ctrl; + uint16 root_cap; + uint32 root_status; +} pciconfig_cap_pcie; + +/* PCIE Enhanced CAPABILITY DEFINES */ +#define PCIE_EXTCFG_OFFSET 0x100 +#define PCIE_ADVERRREP_CAPID 0x0001 +#define PCIE_VC_CAPID 0x0002 +#define PCIE_DEVSNUM_CAPID 0x0003 +#define PCIE_PWRBUDGET_CAPID 0x0004 + +/* PCIE Extended configuration */ +#define PCIE_ADV_CORR_ERR_MASK 0x114 +#define CORR_ERR_RE (1 << 0) /* Receiver */ +#define CORR_ERR_BT (1 << 6) /* Bad TLP */ +#define CORR_ERR_BD (1 << 7) /* Bad DLLP */ +#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */ +#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */ +#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \ + CORR_ERR_RR | CORR_ERR_RT) + +/* PCIE Root Control Register bits (Host mode only) */ +#define PCIE_RC_CORR_SERR_EN 0x0001 +#define PCIE_RC_NONFATAL_SERR_EN 0x0002 +#define PCIE_RC_FATAL_SERR_EN 0x0004 +#define PCIE_RC_PME_INT_EN 0x0008 +#define PCIE_RC_CRS_EN 0x0010 + +/* PCIE Root Capability Register bits (Host mode only) */ +#define PCIE_RC_CRS_VISIBILITY 0x0001 + +/* Header to define the PCIE specific capabilities in the extended config space */ +typedef struct _pcie_enhanced_caphdr { + uint16 capID; + uint16 cap_ver : 4; + uint16 next_ptr : 12; +} pcie_enhanced_caphdr; + + +#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */ +#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */ +#define PCI_SPROM_CONTROL 0x88 /* sprom property control */ +#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */ +#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */ +#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */ +#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */ +#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */ +#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */ +#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */ +#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */ +#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */ +#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */ +#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */ + +/* Private Registers */ +#define PCI_STAT_CTRL 0xa80 +#define PCI_L0_EVENTCNT 0xa84 +#define PCI_L0_STATETMR 0xa88 +#define PCI_L1_EVENTCNT 0xa8c +#define PCI_L1_STATETMR 0xa90 +#define PCI_L1_1_EVENTCNT 0xa94 +#define PCI_L1_1_STATETMR 0xa98 +#define PCI_L1_2_EVENTCNT 0xa9c +#define PCI_L1_2_STATETMR 0xaa0 +#define PCI_L2_EVENTCNT 0xaa4 +#define PCI_L2_STATETMR 0xaa8 + +#define PCI_PMCR_REFUP 0x1814 /* Trefup time */ +#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */ +#define PCI_TPOWER_SCALE_MASK 0x3 +#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */ + + +#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */ +#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */ +#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */ +#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the + * 8KB window, so their address is the "regular" + * address plus 4K + */ +/* + * PCIE GEN2 changed some of the above locations for + * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase + * BAR0 maps 32K of register space +*/ +#define PCIE2_BAR0_WIN2 0x70 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */ + +#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */ +/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */ +#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */ +#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */ +#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */ +#define PCI_SECOND_BAR0_OFFSET (16 * 1024) /* secondary bar 0 window */ + + +/* Header types */ +#define PCI_HEADER_MULTI 0x80 +#define PCI_HEADER_MASK 0x7f +typedef enum { + PCI_HEADER_NORMAL, + PCI_HEADER_BRIDGE, + PCI_HEADER_CARDBUS +} pci_header_types; + +#define PCI_CONFIG_SPACE_SIZE 256 + +#define DWORD_ALIGN(x) (x & ~(0x03)) +#define BYTE_POS(x) (x & 0x3) +#define WORD_POS(x) (x & 0x1) + +#define BYTE_SHIFT(x) (8 * BYTE_POS(x)) +#define WORD_SHIFT(x) (16 * WORD_POS(x)) + +#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF) +#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF) + +#define read_pci_cfg_byte(a) \ + (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff) + +#define read_pci_cfg_word(a) \ + (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff) + +#define write_pci_cfg_byte(a, val) do { \ + uint32 tmpval; \ + tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \ + val << BYTE_POS(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#define write_pci_cfg_word(a, val) do { \ + uint32 tmpval; \ + tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \ + val << WORD_POS(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#endif /* _h_pcicfg_ */ diff --git a/drivers/net/wireless/bcmdhd/include/pcie_core.h b/drivers/net/wireless/bcmdhd/include/pcie_core.h new file mode 100644 index 000000000000..25a156adcb4f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/pcie_core.h @@ -0,0 +1,652 @@ +/* + * BCM43XX PCIE core hardware definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcie_core.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _PCIE_CORE_H +#define _PCIE_CORE_H + +#include +#include + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +/* PCIE Enumeration space offsets */ +#define PCIE_CORE_CONFIG_OFFSET 0x0 +#define PCIE_FUNC0_CONFIG_OFFSET 0x400 +#define PCIE_FUNC1_CONFIG_OFFSET 0x500 +#define PCIE_FUNC2_CONFIG_OFFSET 0x600 +#define PCIE_FUNC3_CONFIG_OFFSET 0x700 +#define PCIE_SPROM_SHADOW_OFFSET 0x800 +#define PCIE_SBCONFIG_OFFSET 0xE00 + + +#define PCIEDEV_MAX_DMAS 4 + +/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */ +#define PCIE_DEV_BAR0_SIZE 0x4000 +#define PCIE_BAR0_WINMAPCORE_OFFSET 0x0 +#define PCIE_BAR0_EXTSPROM_OFFSET 0x1000 +#define PCIE_BAR0_PCIECORE_OFFSET 0x2000 +#define PCIE_BAR0_CCCOREREG_OFFSET 0x3000 + +/* different register spaces to access thr'u pcie indirect access */ +#define PCIE_CONFIGREGS 1 /* Access to config space */ +#define PCIE_PCIEREGS 2 /* Access to pcie registers */ + +/* dma regs to control the flow between host2dev and dev2host */ +typedef struct pcie_devdmaregs { + dma64regs_t tx; + uint32 PAD[2]; + dma64regs_t rx; + uint32 PAD[2]; +} pcie_devdmaregs_t; + +#define PCIE_DB_HOST2DEV_0 0x1 +#define PCIE_DB_HOST2DEV_1 0x2 +#define PCIE_DB_DEV2HOST_0 0x3 +#define PCIE_DB_DEV2HOST_1 0x4 + +/* door bell register sets */ +typedef struct pcie_doorbell { + uint32 host2dev_0; + uint32 host2dev_1; + uint32 dev2host_0; + uint32 dev2host_1; +} pcie_doorbell_t; + +/* SB side: PCIE core and host control registers */ +typedef struct sbpcieregs { + uint32 control; /* host mode only */ + uint32 iocstatus; /* PCIE2: iostatus */ + uint32 PAD[1]; + uint32 biststatus; /* bist Status: 0x00C */ + uint32 gpiosel; /* PCIE gpio sel: 0x010 */ + uint32 gpioouten; /* PCIE gpio outen: 0x14 */ + uint32 PAD[2]; + uint32 intstatus; /* Interrupt status: 0x20 */ + uint32 intmask; /* Interrupt mask: 0x24 */ + uint32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */ + uint32 obffcontrol; /* PCIE2: 0x2C */ + uint32 obffintstatus; /* PCIE2: 0x30 */ + uint32 obffdatastatus; /* PCIE2: 0x34 */ + uint32 PAD[2]; + uint32 errlog; /* PCIE2: 0x40 */ + uint32 errlogaddr; /* PCIE2: 0x44 */ + uint32 mailboxint; /* PCIE2: 0x48 */ + uint32 mailboxintmsk; /* PCIE2: 0x4c */ + uint32 ltrspacing; /* PCIE2: 0x50 */ + uint32 ltrhysteresiscnt; /* PCIE2: 0x54 */ + uint32 PAD[42]; + + uint32 sbtopcie0; /* sb to pcie translation 0: 0x100 */ + uint32 sbtopcie1; /* sb to pcie translation 1: 0x104 */ + uint32 sbtopcie2; /* sb to pcie translation 2: 0x108 */ + uint32 PAD[5]; + + /* pcie core supports in direct access to config space */ + uint32 configaddr; /* pcie config space access: Address field: 0x120 */ + uint32 configdata; /* pcie config space access: Data field: 0x124 */ + union { + struct { + /* mdio access to serdes */ + uint32 mdiocontrol; /* controls the mdio access: 0x128 */ + uint32 mdiodata; /* Data to the mdio access: 0x12c */ + /* pcie protocol phy/dllp/tlp register indirect access mechanism */ + uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */ + uint32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */ + uint32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */ + uint32 PAD[177]; + } pcie1; + struct { + /* mdio access to serdes */ + uint32 mdiocontrol; /* controls the mdio access: 0x128 */ + uint32 mdiowrdata; /* write data to mdio 0x12C */ + uint32 mdiorddata; /* read data to mdio 0x130 */ + uint32 PAD[3]; /* 0x134-0x138-0x13c */ + /* door bell registers available from gen2 rev5 onwards */ + pcie_doorbell_t dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */ + uint32 dataintf; /* 0x180 */ + uint32 PAD[1]; /* 0x184 */ + uint32 d2h_intrlazy_0; /* 0x188 */ + uint32 h2d_intrlazy_0; /* 0x18c */ + uint32 h2d_intstat_0; /* 0x190 */ + uint32 h2d_intmask_0; /* 0x194 */ + uint32 d2h_intstat_0; /* 0x198 */ + uint32 d2h_intmask_0; /* 0x19c */ + uint32 ltr_state; /* 0x1A0 */ + uint32 pwr_int_status; /* 0x1A4 */ + uint32 pwr_int_mask; /* 0x1A8 */ + uint32 PAD[13]; /* 0x1AC - 0x1DF */ + uint32 clk_ctl_st; /* 0x1E0 */ + uint32 PAD[7]; /* 0x1E4 - 0x1FF */ + pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */ + pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */ + pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */ + pcie_devdmaregs_t d2h1_dmaregs; /* 0x2c0 - 0x2fc */ + pcie_devdmaregs_t h2d2_dmaregs; /* 0x300 - 0x33c */ + pcie_devdmaregs_t d2h2_dmaregs; /* 0x340 - 0x37c */ + pcie_devdmaregs_t h2d3_dmaregs; /* 0x380 - 0x3bc */ + pcie_devdmaregs_t d2h3_dmaregs; /* 0x3c0 - 0x3fc */ + } pcie2; + } u; + uint32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */ + uint16 sprom[64]; /* SPROM shadow Area */ +} sbpcieregs_t; + +/* PCI control */ +#define PCIE_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */ +#define PCIE_RST 0x02 /* Value driven out to pin */ +#define PCIE_SPERST 0x04 /* SurvivePeRst */ +#define PCIE_DISABLE_L1CLK_GATING 0x10 +#define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */ +#define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */ +#define PCIE_WakeModeL2 0x1000 /* Wake on L2 */ +#define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */ +#define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */ + +#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */ +#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */ + +/* Interrupt status/mask */ +#define PCIE_INTA 0x01 /* PCIE INTA message is received */ +#define PCIE_INTB 0x02 /* PCIE INTB message is received */ +#define PCIE_INTFATAL 0x04 /* PCIE INTFATAL message is received */ +#define PCIE_INTNFATAL 0x08 /* PCIE INTNONFATAL message is received */ +#define PCIE_INTCORR 0x10 /* PCIE INTCORR message is received */ +#define PCIE_INTPME 0x20 /* PCIE INTPME message is received */ +#define PCIE_PERST 0x40 /* PCIE Reset Interrupt */ + +#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */ +#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */ +#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */ +#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */ +#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */ +#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */ +#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */ +#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */ + +/* PCIE MailboxInt/MailboxIntMask register */ +#define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */ +#define PCIE_MB_TOSB_FN0_1 0x0002 +#define PCIE_MB_TOSB_FN1_0 0x0004 +#define PCIE_MB_TOSB_FN1_1 0x0008 +#define PCIE_MB_TOSB_FN2_0 0x0010 +#define PCIE_MB_TOSB_FN2_1 0x0020 +#define PCIE_MB_TOSB_FN3_0 0x0040 +#define PCIE_MB_TOSB_FN3_1 0x0080 +#define PCIE_MB_TOPCIE_FN0_0 0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */ +#define PCIE_MB_TOPCIE_FN0_1 0x0200 +#define PCIE_MB_TOPCIE_FN1_0 0x0400 +#define PCIE_MB_TOPCIE_FN1_1 0x0800 +#define PCIE_MB_TOPCIE_FN2_0 0x1000 +#define PCIE_MB_TOPCIE_FN2_1 0x2000 +#define PCIE_MB_TOPCIE_FN3_0 0x4000 +#define PCIE_MB_TOPCIE_FN3_1 0x8000 +#define PCIE_MB_TOPCIE_D2H0_DB0 0x10000 +#define PCIE_MB_TOPCIE_D2H0_DB1 0x20000 +#define PCIE_MB_TOPCIE_D2H1_DB0 0x40000 +#define PCIE_MB_TOPCIE_D2H1_DB1 0x80000 +#define PCIE_MB_TOPCIE_D2H2_DB0 0x100000 +#define PCIE_MB_TOPCIE_D2H2_DB1 0x200000 +#define PCIE_MB_TOPCIE_D2H3_DB0 0x400000 +#define PCIE_MB_TOPCIE_D2H3_DB1 0x800000 + +#define PCIE_MB_D2H_MB_MASK \ + (PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 | \ + PCIE_MB_TOPCIE_D2H1_DB0 | PCIE_MB_TOPCIE_D2H1_DB1 | \ + PCIE_MB_TOPCIE_D2H2_DB0 | PCIE_MB_TOPCIE_D2H2_DB1 | \ + PCIE_MB_TOPCIE_D2H3_DB0 | PCIE_MB_TOPCIE_D2H3_DB1) + +/* SB to PCIE translation masks */ +#define SBTOPCIE0_MASK 0xfc000000 +#define SBTOPCIE1_MASK 0xfc000000 +#define SBTOPCIE2_MASK 0xc0000000 + +/* Access type bits (0:1) */ +#define SBTOPCIE_MEM 0 +#define SBTOPCIE_IO 1 +#define SBTOPCIE_CFG0 2 +#define SBTOPCIE_CFG1 3 + +/* Prefetch enable bit 2 */ +#define SBTOPCIE_PF 4 + +/* Write Burst enable for memory write bit 3 */ +#define SBTOPCIE_WR_BURST 8 + +/* config access */ +#define CONFIGADDR_FUNC_MASK 0x7000 +#define CONFIGADDR_FUNC_SHF 12 +#define CONFIGADDR_REG_MASK 0x0FFF +#define CONFIGADDR_REG_SHF 0 + +#define PCIE_CONFIG_INDADDR(f, r) ((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \ + (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF)) + +/* PCIE protocol regs Indirect Address */ +#define PCIEADDR_PROT_MASK 0x300 +#define PCIEADDR_PROT_SHF 8 +#define PCIEADDR_PL_TLP 0 +#define PCIEADDR_PL_DLLP 1 +#define PCIEADDR_PL_PLP 2 + +/* PCIE protocol PHY diagnostic registers */ +#define PCIE_PLP_MODEREG 0x200 /* Mode */ +#define PCIE_PLP_STATUSREG 0x204 /* Status */ +#define PCIE_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */ +#define PCIE_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */ +#define PCIE_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */ +#define PCIE_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */ +#define PCIE_PLP_ATTNREG 0x218 /* Attention */ +#define PCIE_PLP_ATTNMASKREG 0x21C /* Attention Mask */ +#define PCIE_PLP_RXERRCTR 0x220 /* Rx Error */ +#define PCIE_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */ +#define PCIE_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */ +#define PCIE_PLP_TESTCTRLREG 0x22C /* Test Control reg */ +#define PCIE_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */ +#define PCIE_PLP_TIMINGOVRDREG 0x234 /* Timing param override */ +#define PCIE_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */ +#define PCIE_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */ + +/* PCIE protocol DLLP diagnostic registers */ +#define PCIE_DLLP_LCREG 0x100 /* Link Control */ +#define PCIE_DLLP_LSREG 0x104 /* Link Status */ +#define PCIE_DLLP_LAREG 0x108 /* Link Attention */ +#define PCIE_DLLP_LAMASKREG 0x10C /* Link Attention Mask */ +#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */ +#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */ +#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */ +#define PCIE_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */ +#define PCIE_DLLP_LRREG 0x120 /* Link Replay */ +#define PCIE_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */ +#define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */ +#define PCIE_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */ +#define PCIE_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */ +#define PCIE_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */ +#define PCIE_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */ +#define PCIE_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */ +#define PCIE_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */ +#define PCIE_DLLP_ERRCTRREG 0x144 /* Error Counter */ +#define PCIE_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */ +#define PCIE_DLLP_TESTREG 0x14C /* Test */ +#define PCIE_DLLP_PKTBIST 0x150 /* Packet BIST */ +#define PCIE_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */ + +#define PCIE_DLLP_LSREG_LINKUP (1 << 16) + +/* PCIE protocol TLP diagnostic registers */ +#define PCIE_TLP_CONFIGREG 0x000 /* Configuration */ +#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */ +#define PCIE_TLP_WRDMAUPPER 0x010 /* Write DMA Upper Address */ +#define PCIE_TLP_WRDMALOWER 0x014 /* Write DMA Lower Address */ +#define PCIE_TLP_WRDMAREQ_LBEREG 0x018 /* Write DMA Len/ByteEn Req */ +#define PCIE_TLP_RDDMAUPPER 0x01C /* Read DMA Upper Address */ +#define PCIE_TLP_RDDMALOWER 0x020 /* Read DMA Lower Address */ +#define PCIE_TLP_RDDMALENREG 0x024 /* Read DMA Len Req */ +#define PCIE_TLP_MSIDMAUPPER 0x028 /* MSI DMA Upper Address */ +#define PCIE_TLP_MSIDMALOWER 0x02C /* MSI DMA Lower Address */ +#define PCIE_TLP_MSIDMALENREG 0x030 /* MSI DMA Len Req */ +#define PCIE_TLP_SLVREQLENREG 0x034 /* Slave Request Len */ +#define PCIE_TLP_FCINPUTSREQ 0x038 /* Flow Control Inputs */ +#define PCIE_TLP_TXSMGRSREQ 0x03C /* Tx StateMachine and Gated Req */ +#define PCIE_TLP_ADRACKCNTARBLEN 0x040 /* Address Ack XferCnt and ARB Len */ +#define PCIE_TLP_DMACPLHDR0 0x044 /* DMA Completion Hdr 0 */ +#define PCIE_TLP_DMACPLHDR1 0x048 /* DMA Completion Hdr 1 */ +#define PCIE_TLP_DMACPLHDR2 0x04C /* DMA Completion Hdr 2 */ +#define PCIE_TLP_DMACPLMISC0 0x050 /* DMA Completion Misc0 */ +#define PCIE_TLP_DMACPLMISC1 0x054 /* DMA Completion Misc1 */ +#define PCIE_TLP_DMACPLMISC2 0x058 /* DMA Completion Misc2 */ +#define PCIE_TLP_SPTCTRLLEN 0x05C /* Split Controller Req len */ +#define PCIE_TLP_SPTCTRLMSIC0 0x060 /* Split Controller Misc 0 */ +#define PCIE_TLP_SPTCTRLMSIC1 0x064 /* Split Controller Misc 1 */ +#define PCIE_TLP_BUSDEVFUNC 0x068 /* Bus/Device/Func */ +#define PCIE_TLP_RESETCTR 0x06C /* Reset Counter */ +#define PCIE_TLP_RTRYBUF 0x070 /* Retry Buffer value */ +#define PCIE_TLP_TGTDEBUG1 0x074 /* Target Debug Reg1 */ +#define PCIE_TLP_TGTDEBUG2 0x078 /* Target Debug Reg2 */ +#define PCIE_TLP_TGTDEBUG3 0x07C /* Target Debug Reg3 */ +#define PCIE_TLP_TGTDEBUG4 0x080 /* Target Debug Reg4 */ + +/* PCIE2 MDIO register offsets */ +#define PCIE2_MDIO_CONTROL 0x128 +#define PCIE2_MDIO_WR_DATA 0x12C +#define PCIE2_MDIO_RD_DATA 0x130 + + +/* MDIO control */ +#define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define MDIOCTL_DIVISOR_VAL 0x2 +#define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */ +#define MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */ + +/* MDIO Data */ +#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */ +#define MDIODATA_TA 0x00020000 /* Turnaround */ +#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */ +#define MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */ +#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */ +#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */ +#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */ +#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */ +#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */ +#define MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */ +#define MDIODATA_WRITE 0x10000000 /* write Transaction */ +#define MDIODATA_READ 0x20000000 /* Read Transaction */ +#define MDIODATA_START 0x40000000 /* start of Transaction */ + +#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */ +#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */ + +/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */ +#define MDIOCTL2_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define MDIOCTL2_DIVISOR_VAL 0x2 +#define MDIOCTL2_REGADDR_SHF 8 /* Regaddr shift */ +#define MDIOCTL2_REGADDR_MASK 0x00FFFF00 /* Regaddr Mask */ +#define MDIOCTL2_DEVADDR_SHF 24 /* Physmedia devaddr shift */ +#define MDIOCTL2_DEVADDR_MASK 0x0f000000 /* Physmedia devaddr Mask */ +#define MDIOCTL2_SLAVE_BYPASS 0x10000000 /* IP slave bypass */ +#define MDIOCTL2_READ 0x20000000 /* IP slave bypass */ + +#define MDIODATA2_DONE 0x80000000 /* rd/wr transaction done */ +#define MDIODATA2_MASK 0x7FFFFFFF /* rd/wr transaction data */ +#define MDIODATA2_DEVADDR_SHF 4 /* Physmedia devaddr shift */ + + +/* MDIO devices (SERDES modules) + * unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks. + * two layers mapping (blockidx, register offset) is required + */ +#define MDIO_DEV_IEEE0 0x000 +#define MDIO_DEV_IEEE1 0x001 +#define MDIO_DEV_BLK0 0x800 +#define MDIO_DEV_BLK1 0x801 +#define MDIO_DEV_BLK2 0x802 +#define MDIO_DEV_BLK3 0x803 +#define MDIO_DEV_BLK4 0x804 +#define MDIO_DEV_TXPLL 0x808 /* TXPLL register block idx */ +#define MDIO_DEV_TXCTRL0 0x820 +#define MDIO_DEV_SERDESID 0x831 +#define MDIO_DEV_RXCTRL0 0x840 + + +/* XgxsBlk1_A Register Offsets */ +#define BLK1_PWR_MGMT0 0x16 +#define BLK1_PWR_MGMT1 0x17 +#define BLK1_PWR_MGMT2 0x18 +#define BLK1_PWR_MGMT3 0x19 +#define BLK1_PWR_MGMT4 0x1A + +/* serdes regs (rev < 10) */ +#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */ +#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ +#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ + /* SERDES RX registers */ +#define SERDES_RX_CTRL 1 /* Rx cntrl */ +#define SERDES_RX_TIMER1 2 /* Rx Timer1 */ +#define SERDES_RX_CDR 6 /* CDR */ +#define SERDES_RX_CDRBW 7 /* CDR BW */ + + /* SERDES RX control register */ +#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */ +#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */ + + /* SERDES PLL registers */ +#define SERDES_PLL_CTRL 1 /* PLL control reg */ +#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */ + +/* Power management threshold */ +#define PCIE_L0THRESHOLDTIME_MASK 0xFF00 /* bits 0 - 7 */ +#define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */ +#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */ +#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */ +#define PCIE_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */ + +/* SPROM offsets */ +#define SRSH_ASPM_OFFSET 4 /* word 4 */ +#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */ +#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */ +#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */ +#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */ +#define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */ +#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */ +#define SRSH_CLKREQ_OFFSET_REV8 52 /* word 52 for srom rev 8 */ +#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */ +#define SRSH_BD_OFFSET 6 /* word 6 */ +#define SRSH_AUTOINIT_OFFSET 18 /* auto initialization enable */ + +/* Linkcontrol reg offset in PCIE Cap */ +#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */ +#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */ +#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */ +#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */ +#define PCIE_LINKSPEED_MASK 0xF0000 /* bits 0 - 3 of high word */ +#define PCIE_LINKSPEED_SHIFT 16 /* PCIE_LINKSPEED_SHIFT */ + +/* Devcontrol reg offset in PCIE Cap */ +#define PCIE_CAP_DEVCTRL_OFFSET 8 /* devctrl offset in pcie cap */ +#define PCIE_CAP_DEVCTRL_MRRS_MASK 0x7000 /* Max read request size mask */ +#define PCIE_CAP_DEVCTRL_MRRS_SHIFT 12 /* Max read request size shift */ +#define PCIE_CAP_DEVCTRL_MRRS_128B 0 /* 128 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_256B 1 /* 256 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_512B 2 /* 512 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_1024B 3 /* 1024 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_MASK 0x00e0 /* Max payload size mask */ +#define PCIE_CAP_DEVCTRL_MPS_SHIFT 5 /* Max payload size shift */ +#define PCIE_CAP_DEVCTRL_MPS_128B 0 /* 128 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_256B 1 /* 256 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_512B 2 /* 512 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_1024B 3 /* 1024 Byte */ + +#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */ + +#define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */ +#define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */ + +/* Devcontrol2 reg offset in PCIE Cap */ +#define PCIE_CAP_DEVCTRL2_OFFSET 0x28 /* devctrl2 offset in pcie cap */ +#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK 0x400 /* Latency Tolerance Reporting Enable */ +#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13 /* Enable OBFF mechanism, select signaling method */ +#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000 /* Enable OBFF mechanism, select signaling method */ + +/* LTR registers in PCIE Cap */ +#define PCIE_LTR0_REG_OFFSET 0x844 /* ltr0_reg offset in pcie cap */ +#define PCIE_LTR1_REG_OFFSET 0x848 /* ltr1_reg offset in pcie cap */ +#define PCIE_LTR2_REG_OFFSET 0x84c /* ltr2_reg offset in pcie cap */ +#define PCIE_LTR0_REG_DEFAULT_60 0x883c883c /* active latency default to 60usec */ +#define PCIE_LTR0_REG_DEFAULT_150 0x88968896 /* active latency default to 150usec */ +#define PCIE_LTR1_REG_DEFAULT 0x88648864 /* idle latency default to 100usec */ +#define PCIE_LTR2_REG_DEFAULT 0x90039003 /* sleep latency default to 3msec */ + +/* Status reg PCIE_PLP_STATUSREG */ +#define PCIE_PLP_POLARITYINV_STAT 0x10 + + +/* PCIE BRCM Vendor CAP REVID reg bits */ +#define BRCMCAP_PCIEREV_CT_MASK 0xF00 +#define BRCMCAP_PCIEREV_CT_SHIFT 8 +#define BRCMCAP_PCIEREV_REVID_MASK 0xFF +#define BRCMCAP_PCIEREV_REVID_SHIFT 0 + +#define PCIE_REVREG_CT_PCIE1 0 +#define PCIE_REVREG_CT_PCIE2 1 + +/* PCIE GEN2 specific defines */ +/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */ +#define PCIE2R0_BRCMCAP_REVID_OFFSET 4 +#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET 8 +#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET 12 +#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET 16 +#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET 20 +#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET 24 +#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET 28 +#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET 32 +#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET 36 +#define PCIE2R0_BRCMCAP_INTMASK_OFFSET 40 +#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET 44 +#define PCIE2R0_BRCMCAP_BPADDR_OFFSET 48 +#define PCIE2R0_BRCMCAP_BPDATA_OFFSET 52 +#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET 56 + +/* definition of configuration space registers of PCIe gen2 + * http://hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/CurrentPcieGen2ProgramGuide/pcie_ep.htm + */ +#define PCIECFGREG_STATUS_CMD 0x4 +#define PCIECFGREG_PM_CSR 0x4C +#define PCIECFGREG_MSI_CAP 0x58 +#define PCIECFGREG_MSI_ADDR_L 0x5C +#define PCIECFGREG_MSI_ADDR_H 0x60 +#define PCIECFGREG_MSI_DATA 0x64 +#define PCIECFGREG_LINK_STATUS_CTRL 0xBC +#define PCIECFGREG_LINK_STATUS_CTRL2 0xDC +#define PCIECFGREG_RBAR_CTRL 0x228 +#define PCIECFGREG_PML1_SUB_CTRL1 0x248 +#define PCIECFGREG_REG_BAR2_CONFIG 0x4E0 +#define PCIECFGREG_REG_BAR3_CONFIG 0x4F4 +#define PCIECFGREG_PDL_CTRL1 0x1004 +#define PCIECFGREG_PDL_IDDQ 0x1814 +#define PCIECFGREG_REG_PHY_CTL7 0x181c + +/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */ +#define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */ +#define PCI_PM_L1_1_ENA_MASK 0x00000002 /* PCI-PM L1.1 Enabled */ +#define ASPM_L1_2_ENA_MASK 0x00000004 /* ASPM L1.2 Enabled */ +#define ASPM_L1_1_ENA_MASK 0x00000008 /* ASPM L1.1 Enabled */ + +/* PCIe gen2 mailbox interrupt masks */ +#define I_MB 0x3 +#define I_BIT0 0x1 +#define I_BIT1 0x2 + +/* PCIE gen2 config regs */ +#define PCIIntstatus 0x090 +#define PCIIntmask 0x094 +#define PCISBMbx 0x98 + +/* enumeration Core regs */ +#define PCIH2D_MailBox 0x140 +#define PCIH2D_DB1 0x144 +#define PCID2H_MailBox 0x148 +#define PCIMailBoxInt 0x48 +#define PCIMailBoxMask 0x4C + +#define I_F0_B0 (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */ +#define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */ + +#define PCIECFGREG_DEVCONTROL 0xB4 +#define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12 +#define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT) + +/* SROM hardware region */ +#define SROM_OFFSET_BAR1_CTRL 52 + +#define BAR1_ENC_SIZE_MASK 0x000e +#define BAR1_ENC_SIZE_SHIFT 1 + +#define BAR1_ENC_SIZE_1M 0 +#define BAR1_ENC_SIZE_2M 1 +#define BAR1_ENC_SIZE_4M 2 + +#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET 0xD4 +#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB 0x400 + +/* + * Latency Tolerance Reporting (LTR) states + * Active has the least tolerant latency requirement + * Sleep is most tolerant + */ +#define LTR_ACTIVE 2 +#define LTR_ACTIVE_IDLE 1 +#define LTR_SLEEP 0 +#define LTR_FINAL_MASK 0x300 +#define LTR_FINAL_SHIFT 8 + +/* pwrinstatus, pwrintmask regs */ +#define PCIEGEN2_PWRINT_D0_STATE_SHIFT 0 +#define PCIEGEN2_PWRINT_D1_STATE_SHIFT 1 +#define PCIEGEN2_PWRINT_D2_STATE_SHIFT 2 +#define PCIEGEN2_PWRINT_D3_STATE_SHIFT 3 +#define PCIEGEN2_PWRINT_L0_LINK_SHIFT 4 +#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT 5 +#define PCIEGEN2_PWRINT_L1_LINK_SHIFT 6 +#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT 7 +#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT 8 + +#define PCIEGEN2_PWRINT_D0_STATE_MASK (1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D1_STATE_MASK (1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D2_STATE_MASK (1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D3_STATE_MASK (1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT) +#define PCIEGEN2_PWRINT_L0_LINK_MASK (1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L0s_LINK_MASK (1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L1_LINK_MASK (1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK (1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT) +#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK (1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT) + +/* sbtopcie mail box */ +#define SBTOPCIE_MB_FUNC0_SHIFT 8 +#define SBTOPCIE_MB_FUNC1_SHIFT 10 +#define SBTOPCIE_MB_FUNC2_SHIFT 12 +#define SBTOPCIE_MB_FUNC3_SHIFT 14 + +/* pcieiocstatus */ +#define PCIEGEN2_IOC_D0_STATE_SHIFT 8 +#define PCIEGEN2_IOC_D1_STATE_SHIFT 9 +#define PCIEGEN2_IOC_D2_STATE_SHIFT 10 +#define PCIEGEN2_IOC_D3_STATE_SHIFT 11 +#define PCIEGEN2_IOC_L0_LINK_SHIFT 12 +#define PCIEGEN2_IOC_L1_LINK_SHIFT 13 +#define PCIEGEN2_IOC_L1L2_LINK_SHIFT 14 +#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15 + +#define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT) +#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIFT) +#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIFT) +#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIFT) +#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIFT) +#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIFT) +#define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT) +#define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT) + +/* stat_ctrl */ +#define PCIE_STAT_CTRL_RESET 0x1 +#define PCIE_STAT_CTRL_ENABLE 0x2 +#define PCIE_STAT_CTRL_INTENABLE 0x4 +#define PCIE_STAT_CTRL_INTSTATUS 0x8 + +#ifdef BCMDRIVER +void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); +#endif /* BCMDRIVER */ + +#endif /* _PCIE_CORE_H */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h new file mode 100644 index 000000000000..7aaea5d0596d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h @@ -0,0 +1,4445 @@ +/* + * Fundamental types and constants relating to 802.11 + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11.h 556559 2015-05-14 01:48:17Z $ + */ + +#ifndef _802_11_H_ +#define _802_11_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +#ifndef _NET_ETHERNET_H_ +#include +#endif + +#include + +/* This marks the start of a packed structure section. */ +#include + + +#define DOT11_TU_TO_US 1024 /* 802.11 Time Unit is 1024 microseconds */ + +/* Generic 802.11 frame constants */ +#define DOT11_A3_HDR_LEN 24 /* d11 header length with A3 */ +#define DOT11_A4_HDR_LEN 30 /* d11 header length with A4 */ +#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN /* MAC header length */ +#define DOT11_FCS_LEN 4 /* d11 FCS length */ +#define DOT11_ICV_LEN 4 /* d11 ICV length */ +#define DOT11_ICV_AES_LEN 8 /* d11 ICV/AES length */ +#define DOT11_QOS_LEN 2 /* d11 QoS length */ +#define DOT11_HTC_LEN 4 /* d11 HT Control field length */ + +#define DOT11_KEY_INDEX_SHIFT 6 /* d11 key index shift */ +#define DOT11_IV_LEN 4 /* d11 IV length */ +#define DOT11_IV_TKIP_LEN 8 /* d11 IV TKIP length */ +#define DOT11_IV_AES_OCB_LEN 4 /* d11 IV/AES/OCB length */ +#define DOT11_IV_AES_CCM_LEN 8 /* d11 IV/AES/CCM length */ +#define DOT11_IV_MAX_LEN 8 /* maximum iv len for any encryption */ + +/* Includes MIC */ +#define DOT11_MAX_MPDU_BODY_LEN 2304 /* max MPDU body length */ +/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */ +#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \ + DOT11_QOS_LEN + \ + DOT11_IV_AES_CCM_LEN + \ + DOT11_MAX_MPDU_BODY_LEN + \ + DOT11_ICV_LEN + \ + DOT11_FCS_LEN) /* d11 max MPDU length */ + +#define DOT11_MAX_SSID_LEN 32 /* d11 max ssid length */ + +/* dot11RTSThreshold */ +#define DOT11_DEFAULT_RTS_LEN 2347 /* d11 default RTS length */ +#define DOT11_MAX_RTS_LEN 2347 /* d11 max RTS length */ + +/* dot11FragmentationThreshold */ +#define DOT11_MIN_FRAG_LEN 256 /* d11 min fragmentation length */ +#define DOT11_MAX_FRAG_LEN 2346 /* Max frag is also limited by aMPDUMaxLength + * of the attached PHY + */ +#define DOT11_DEFAULT_FRAG_LEN 2346 /* d11 default fragmentation length */ + +/* dot11BeaconPeriod */ +#define DOT11_MIN_BEACON_PERIOD 1 /* d11 min beacon period */ +#define DOT11_MAX_BEACON_PERIOD 0xFFFF /* d11 max beacon period */ + +/* dot11DTIMPeriod */ +#define DOT11_MIN_DTIM_PERIOD 1 /* d11 min DTIM period */ +#define DOT11_MAX_DTIM_PERIOD 0xFF /* d11 max DTIM period */ + +/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */ +#define DOT11_LLC_SNAP_HDR_LEN 8 /* d11 LLC/SNAP header length */ +#define DOT11_OUI_LEN 3 /* d11 OUI length */ +BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header { + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT11_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* RFC1042 header used by 802.11 per 802.1H */ +#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) /* RCF1042 header length */ + +/* Generic 802.11 MAC header */ +/** + * N.B.: This struct reflects the full 4 address 802.11 MAC header. + * The fields are defined such that the shorter 1, 2, and 3 + * address headers just use the first k fields. + */ +BWL_PRE_PACKED_STRUCT struct dot11_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr a1; /* address 1 */ + struct ether_addr a2; /* address 2 */ + struct ether_addr a3; /* address 3 */ + uint16 seq; /* sequence control */ + struct ether_addr a4; /* address 4 */ +} BWL_POST_PACKED_STRUCT; + +/* Control frames */ + +BWL_PRE_PACKED_STRUCT struct dot11_rts_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_RTS_LEN 16 /* d11 RTS frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_cts_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTS_LEN 10 /* d11 CTS frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_ack_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACK_LEN 10 /* d11 ACK frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame { + uint16 fc; /* frame control */ + uint16 durid; /* AID */ + struct ether_addr bssid; /* receiver address, STA in AP */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_PS_POLL_LEN 16 /* d11 PS poll frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr bssid; /* transmitter address, STA in AP */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CS_END_LEN 16 /* d11 CF-END frame length */ + +/** + * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling + * category+OUI+vendor specific content ( this can be variable) + */ +BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1040]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t; + +/** generic vendor specific action frame with variable length */ +BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t; + +#define DOT11_ACTION_VS_HDR_LEN 6 + +#define BCM_ACTION_OUI_BYTE0 0x00 +#define BCM_ACTION_OUI_BYTE1 0x90 +#define BCM_ACTION_OUI_BYTE2 0x4c + +/* BA/BAR Control parameters */ +#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 /* normal ack */ +#define DOT11_BA_CTL_POLICY_NOACK 0x0001 /* no ack */ +#define DOT11_BA_CTL_POLICY_MASK 0x0001 /* ack policy mask */ + +#define DOT11_BA_CTL_MTID 0x0002 /* multi tid BA */ +#define DOT11_BA_CTL_COMPRESSED 0x0004 /* compressed bitmap */ + +#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 /* num msdu in bitmap mask */ +#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 /* num msdu in bitmap shift */ + +#define DOT11_BA_CTL_TID_MASK 0xF000 /* tid mask */ +#define DOT11_BA_CTL_TID_SHIFT 12 /* tid shift */ + +/** control frame header (BA/BAR) */ +BWL_PRE_PACKED_STRUCT struct dot11_ctl_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTL_HDR_LEN 16 /* control frame hdr len */ + +/** BAR frame payload */ +BWL_PRE_PACKED_STRUCT struct dot11_bar { + uint16 bar_control; /* BAR Control */ + uint16 seqnum; /* Starting Sequence control */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_BAR_LEN 4 /* BAR frame payload length */ + +#define DOT11_BA_BITMAP_LEN 128 /* bitmap length */ +#define DOT11_BA_CMP_BITMAP_LEN 8 /* compressed bitmap length */ +/** BA frame payload */ +BWL_PRE_PACKED_STRUCT struct dot11_ba { + uint16 ba_control; /* BA Control */ + uint16 seqnum; /* Starting Sequence control */ + uint8 bitmap[DOT11_BA_BITMAP_LEN]; /* Block Ack Bitmap */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_BA_LEN 4 /* BA frame payload len (wo bitmap) */ + +/** Management frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_management_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr da; /* receiver address */ + struct ether_addr sa; /* transmitter address */ + struct ether_addr bssid; /* BSS ID */ + uint16 seq; /* sequence control */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_management_header dot11_management_header_t; +#define DOT11_MGMT_HDR_LEN 24 /* d11 management header length */ + +/* Management frame payloads */ + +BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb { + uint32 timestamp[2]; + uint16 beacon_interval; + uint16 capability; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */ +#define DOT11_BCN_PRB_FIXED_LEN 12 /* 802.11 beacon/probe frame fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_auth { + uint16 alg; /* algorithm */ + uint16 seq; /* sequence control */ + uint16 status; /* status code */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */ + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_req { + uint16 capability; /* capability information */ + uint16 listen; /* listen interval */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_REQ_FIXED_LEN 4 /* length of assoc frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req { + uint16 capability; /* capability information */ + uint16 listen; /* listen interval */ + struct ether_addr ap; /* Current AP address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_REASSOC_REQ_FIXED_LEN 10 /* length of assoc frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp { + uint16 capability; /* capability information */ + uint16 status; /* status code */ + uint16 aid; /* association ID */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_RESP_FIXED_LEN 6 /* length of assoc resp frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_measure { + uint8 category; + uint8 action; + uint8 token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACTION_MEASURE_LEN 3 /* d11 action measurement header length */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width { + uint8 category; + uint8 action; + uint8 ch_width; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops { + uint8 category; + uint8 action; + uint8 control; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query { + uint8 category; + uint8 action; + uint16 id; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode { + uint8 category; + uint8 action; + uint8 mode; +} BWL_POST_PACKED_STRUCT; + +/* These lengths assume 64 MU groups, as specified in 802.11ac-2013 */ +#define DOT11_ACTION_GID_MEMBERSHIP_LEN 8 /* bytes */ +#define DOT11_ACTION_GID_USER_POS_LEN 16 /* bytes */ +BWL_PRE_PACKED_STRUCT struct dot11_action_group_id { + uint8 category; + uint8 action; + uint8 membership_status[DOT11_ACTION_GID_MEMBERSHIP_LEN]; + uint8 user_position[DOT11_ACTION_GID_USER_POS_LEN]; +} BWL_POST_PACKED_STRUCT; + +#define SM_PWRSAVE_ENABLE 1 +#define SM_PWRSAVE_MODE 2 + +/* ************* 802.11h related definitions. ************* */ +BWL_PRE_PACKED_STRUCT struct dot11_power_cnst { + uint8 id; + uint8 len; + uint8 power; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cnst dot11_power_cnst_t; + +BWL_PRE_PACKED_STRUCT struct dot11_power_cap { + int8 min; + int8 max; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cap dot11_power_cap_t; + +BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep { + uint8 id; + uint8 len; + uint8 tx_pwr; + uint8 margin; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tpc_rep dot11_tpc_rep_t; +#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */ + +BWL_PRE_PACKED_STRUCT struct dot11_supp_channels { + uint8 id; + uint8 len; + uint8 first_channel; + uint8 num_channels; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_supp_channels dot11_supp_channels_t; + +/** + * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband + * offset for 40MHz operation. The possible 3 values are: + * 1 = above control channel + * 3 = below control channel + * 0 = no extension channel + */ +BWL_PRE_PACKED_STRUCT struct dot11_extch { + uint8 id; /* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */ + uint8 len; /* IE length */ + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extch dot11_extch_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t; + +#define BRCM_EXTCH_IE_LEN 5 +#define BRCM_EXTCH_IE_TYPE 53 /* 802.11n ID not yet assigned */ +#define DOT11_EXTCH_IE_LEN 1 +#define DOT11_EXT_CH_MASK 0x03 /* extension channel mask */ +#define DOT11_EXT_CH_UPPER 0x01 /* ext. ch. on upper sb */ +#define DOT11_EXT_CH_LOWER 0x03 /* ext. ch. on lower sb */ +#define DOT11_EXT_CH_NONE 0x00 /* no extension ch. */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { + uint8 category; + uint8 action; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_frmhdr dot11_action_frmhdr_t; +#define DOT11_ACTION_FRMHDR_LEN 2 + +/** CSA IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch { + uint8 id; /* id DOT11_MNG_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 mode; /* mode 0 or 1 */ + uint8 channel; /* channel switch to */ + uint8 count; /* number of beacons before switching */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch dot11_chan_switch_ie_t; + +#define DOT11_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ +/* CSA mode - 802.11h-2003 $7.3.2.20 */ +#define DOT11_CSA_MODE_ADVISORY 0 /* no DOT11_CSA_MODE_NO_TX restriction imposed */ +#define DOT11_CSA_MODE_NO_TX 1 /* no transmission upon receiving CSA frame. */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel { + uint8 category; + uint8 action; + dot11_chan_switch_ie_t chan_switch_ie; /* for switch IE */ + dot11_brcm_extch_ie_t extch_ie; /* extension channel offset */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_csa_body { + uint8 mode; /* mode 0 or 1 */ + uint8 reg; /* regulatory class */ + uint8 channel; /* channel switch to */ + uint8 count; /* number of beacons before switching */ +} BWL_POST_PACKED_STRUCT; + +/** 11n Extended Channel Switch IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_ext_csa { + uint8 id; /* id DOT11_MNG_EXT_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + struct dot11_csa_body b; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ext_csa dot11_ext_csa_ie_t; +#define DOT11_EXT_CSA_IE_LEN 4 /* length of extended channel switch IE body */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa { + uint8 category; + uint8 action; + dot11_ext_csa_ie_t chan_switch_ie; /* for switch IE */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa { + uint8 category; + uint8 action; + struct dot11_csa_body b; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; + +/** Wide Bandwidth Channel Switch IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 channel_width; /* new channel width */ + uint8 center_frequency_segment_0; /* center frequency segment 0 */ + uint8 center_frequency_segment_1; /* center frequency segment 1 */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t; + +#define DOT11_WIDE_BW_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ + +/** Channel Switch Wrapper IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t; + +/** VHT Transmit Power Envelope IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 transmit_power_info; + uint8 local_max_transmit_power_20; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t; + +/* vht transmit power envelope IE length depends on channel width */ +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ 1 +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ 2 +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ 3 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_coex { + uint8 id; + uint8 len; + uint8 info; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_coex dot11_obss_coex_t; +#define DOT11_OBSS_COEXINFO_LEN 1 /* length of OBSS Coexistence INFO IE */ + +#define DOT11_OBSS_COEX_INFO_REQ 0x01 +#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02 +#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist { + uint8 id; + uint8 len; + uint8 regclass; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_chanlist dot11_obss_chanlist_t; +#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 /* fixed length of regclass */ + +BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie { + uint8 id; + uint8 len; + uint8 cap[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap_ie dot11_extcap_ie_t; + +#define DOT11_EXTCAP_LEN_COEX 1 +#define DOT11_EXTCAP_LEN_BT 3 +#define DOT11_EXTCAP_LEN_IW 4 +#define DOT11_EXTCAP_LEN_SI 6 + +#define DOT11_EXTCAP_LEN_TDLS 5 +#define DOT11_11AC_EXTCAP_LEN_TDLS 8 + +#define DOT11_EXTCAP_LEN_FMS 2 +#define DOT11_EXTCAP_LEN_PROXY_ARP 2 +#define DOT11_EXTCAP_LEN_TFS 3 +#define DOT11_EXTCAP_LEN_WNM_SLEEP 3 +#define DOT11_EXTCAP_LEN_TIMBC 3 +#define DOT11_EXTCAP_LEN_BSSTRANS 3 +#define DOT11_EXTCAP_LEN_DMS 4 +#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION 6 +#define DOT11_EXTCAP_LEN_TDLS_WBW 8 +#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8 + +/* TDLS Capabilities */ +#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */ +#define DOT11_TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */ +#define DOT11_TDLS_CAP_PEER_PSM 20 /* TDLS Peer PSM support */ +#define DOT11_TDLS_CAP_CH_SW 30 /* TDLS Channel switch */ +#define DOT11_TDLS_CAP_PROH 38 /* TDLS prohibited */ +#define DOT11_TDLS_CAP_CH_SW_PROH 39 /* TDLS Channel switch prohibited */ +#define DOT11_TDLS_CAP_TDLS_WIDER_BW 61 /* TDLS Wider Band-Width */ + +#define TDLS_CAP_MAX_BIT 39 /* TDLS max bit defined in ext cap */ + +/* 802.11h/802.11k Measurement Request/Report IEs */ +/* Measurement Type field */ +#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */ +#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */ +#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */ +#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */ +#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */ +#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */ +#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */ +#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */ +#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */ +#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */ +#define DOT11_MEASURE_TYPE_MCDIAGS 10 /* d11 measurement multicast diagnostics */ +#define DOT11_MEASURE_TYPE_CIVICLOC 11 /* d11 measurement location civic */ +#define DOT11_MEASURE_TYPE_LOC_ID 12 /* d11 measurement location identifier */ +#define DOT11_MEASURE_TYPE_DIRCHANQ 13 /* d11 measurement dir channel quality */ +#define DOT11_MEASURE_TYPE_DIRMEAS 14 /* d11 measurement directional */ +#define DOT11_MEASURE_TYPE_DIRSTATS 15 /* d11 measurement directional stats */ +#define DOT11_MEASURE_TYPE_FTMRANGE 16 /* d11 measurement Fine Timing */ +#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */ + +/* Measurement Request Modes */ +#define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */ +#define DOT11_MEASURE_MODE_ENABLE (1<<1) /* d11 measurement enable */ +#define DOT11_MEASURE_MODE_REQUEST (1<<2) /* d11 measurement request */ +#define DOT11_MEASURE_MODE_REPORT (1<<3) /* d11 measurement report */ +#define DOT11_MEASURE_MODE_DUR (1<<4) /* d11 measurement dur mandatory */ +/* Measurement Report Modes */ +#define DOT11_MEASURE_MODE_LATE (1<<0) /* d11 measurement late */ +#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) /* d11 measurement incapable */ +#define DOT11_MEASURE_MODE_REFUSED (1<<2) /* d11 measurement refuse */ +/* Basic Measurement Map bits */ +#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) /* d11 measurement basic map BSS */ +#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) /* d11 measurement map OFDM */ +#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) /* d11 measurement map unknown */ +#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) /* d11 measurement map radar */ +#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) /* d11 measurement map unmeasuremnt */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 channel; + uint8 start_time[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req dot11_meas_req_t; +#define DOT11_MNG_IE_MREQ_LEN 14 /* d11 measurement request IE length */ +/* length of Measure Request IE data not including variable len */ +#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 type; /* type of civic location */ + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + } BWL_POST_PACKED_STRUCT req; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req_loc dot11_meas_req_loc_t; +#define DOT11_MNG_IE_MREQ_MIN_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_LCI_FIXED_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_CIVIC_FIXED_LEN 8 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_FRNG_FIXED_LEN 6 /* d11 measurement report IE length */ + +BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement { + uint8 subelement; + uint8 length; + uint8 lci_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lci_subelement dot11_lci_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 civic_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_civic_subelement dot11_civic_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; + } BWL_POST_PACKED_STRUCT basic; + BWL_PRE_PACKED_STRUCT struct { + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 entry_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT rep; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep dot11_meas_rep_t; +#define DOT11_MNG_IE_MREP_MIN_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_LCI_FIXED_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN 6 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN 15 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN 4 + +/* length of Measure Report IE data not including variable len */ +#define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t; +#define DOT11_MEASURE_BASIC_REP_LEN 12 /* d11 measurement basic report length */ + +BWL_PRE_PACKED_STRUCT struct dot11_quiet { + uint8 id; + uint8 len; + uint8 count; /* TBTTs until beacon interval in quiet starts */ + uint8 period; /* Beacon intervals between periodic quiet periods ? */ + uint16 duration; /* Length of quiet period, in TU's */ + uint16 offset; /* TU's offset from TBTT in Count field */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_quiet dot11_quiet_t; + +BWL_PRE_PACKED_STRUCT struct chan_map_tuple { + uint8 channel; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct chan_map_tuple chan_map_tuple_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs { + uint8 id; + uint8 len; + uint8 eaddr[ETHER_ADDR_LEN]; + uint8 interval; + chan_map_tuple_t map[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ibss_dfs dot11_ibss_dfs_t; + +/* WME Elements */ +#define WME_OUI "\x00\x50\xf2" /* WME OUI */ +#define WME_OUI_LEN 3 +#define WME_OUI_TYPE 2 /* WME type */ +#define WME_TYPE 2 /* WME type, deprecated */ +#define WME_SUBTYPE_IE 0 /* Information Element */ +#define WME_SUBTYPE_PARAM_IE 1 /* Parameter Element */ +#define WME_SUBTYPE_TSPEC 2 /* Traffic Specification */ +#define WME_VER 1 /* WME version */ + +/* WME Access Category Indices (ACIs) */ +#define AC_BE 0 /* Best Effort */ +#define AC_BK 1 /* Background */ +#define AC_VI 2 /* Video */ +#define AC_VO 3 /* Voice */ +#define AC_COUNT 4 /* number of ACs */ + +typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */ + +#define AC_BITMAP_NONE 0x0 /* No ACs */ +#define AC_BITMAP_ALL 0xf /* All ACs */ +#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0) +#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) +#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) + + +/** WME Information Element (IE) */ +BWL_PRE_PACKED_STRUCT struct wme_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_ie wme_ie_t; +#define WME_IE_LEN 7 /* WME IE length */ + +BWL_PRE_PACKED_STRUCT struct edcf_acparam { + uint8 ACI; + uint8 ECW; + uint16 TXOP; /* stored in network order (ls octet first) */ +} BWL_POST_PACKED_STRUCT; +typedef struct edcf_acparam edcf_acparam_t; + +/** WME Parameter Element (PE) */ +BWL_PRE_PACKED_STRUCT struct wme_param_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_param_ie wme_param_ie_t; +#define WME_PARAM_IE_LEN 24 /* WME Parameter IE length */ + +/* QoS Info field for IE as sent from AP */ +#define WME_QI_AP_APSD_MASK 0x80 /* U-APSD Supported mask */ +#define WME_QI_AP_APSD_SHIFT 7 /* U-APSD Supported shift */ +#define WME_QI_AP_COUNT_MASK 0x0f /* Parameter set count mask */ +#define WME_QI_AP_COUNT_SHIFT 0 /* Parameter set count shift */ + +/* QoS Info field for IE as sent from STA */ +#define WME_QI_STA_MAXSPLEN_MASK 0x60 /* Max Service Period Length mask */ +#define WME_QI_STA_MAXSPLEN_SHIFT 5 /* Max Service Period Length shift */ +#define WME_QI_STA_APSD_ALL_MASK 0xf /* APSD all AC bits mask */ +#define WME_QI_STA_APSD_ALL_SHIFT 0 /* APSD all AC bits shift */ +#define WME_QI_STA_APSD_BE_MASK 0x8 /* APSD AC_BE mask */ +#define WME_QI_STA_APSD_BE_SHIFT 3 /* APSD AC_BE shift */ +#define WME_QI_STA_APSD_BK_MASK 0x4 /* APSD AC_BK mask */ +#define WME_QI_STA_APSD_BK_SHIFT 2 /* APSD AC_BK shift */ +#define WME_QI_STA_APSD_VI_MASK 0x2 /* APSD AC_VI mask */ +#define WME_QI_STA_APSD_VI_SHIFT 1 /* APSD AC_VI shift */ +#define WME_QI_STA_APSD_VO_MASK 0x1 /* APSD AC_VO mask */ +#define WME_QI_STA_APSD_VO_SHIFT 0 /* APSD AC_VO shift */ + +/* ACI */ +#define EDCF_AIFSN_MIN 1 /* AIFSN minimum value */ +#define EDCF_AIFSN_MAX 15 /* AIFSN maximum value */ +#define EDCF_AIFSN_MASK 0x0f /* AIFSN mask */ +#define EDCF_ACM_MASK 0x10 /* ACM mask */ +#define EDCF_ACI_MASK 0x60 /* ACI mask */ +#define EDCF_ACI_SHIFT 5 /* ACI shift */ +#define EDCF_AIFSN_SHIFT 12 /* 4 MSB(0xFFF) in ifs_ctl for AC idx */ + +/* ECW */ +#define EDCF_ECW_MIN 0 /* cwmin/cwmax exponent minimum value */ +#define EDCF_ECW_MAX 15 /* cwmin/cwmax exponent maximum value */ +#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) +#define EDCF_ECWMIN_MASK 0x0f /* cwmin exponent form mask */ +#define EDCF_ECWMAX_MASK 0xf0 /* cwmax exponent form mask */ +#define EDCF_ECWMAX_SHIFT 4 /* cwmax exponent form shift */ + +/* TXOP */ +#define EDCF_TXOP_MIN 0 /* TXOP minimum value */ +#define EDCF_TXOP_MAX 65535 /* TXOP maximum value */ +#define EDCF_TXOP2USEC(txop) ((txop) << 5) + +/* Default BE ACI value for non-WME connection STA */ +#define NON_EDCF_AC_BE_ACI_STA 0x02 + +/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */ +#define EDCF_AC_BE_ACI_STA 0x03 /* STA ACI value for best effort AC */ +#define EDCF_AC_BE_ECW_STA 0xA4 /* STA ECW value for best effort AC */ +#define EDCF_AC_BE_TXOP_STA 0x0000 /* STA TXOP value for best effort AC */ +#define EDCF_AC_BK_ACI_STA 0x27 /* STA ACI value for background AC */ +#define EDCF_AC_BK_ECW_STA 0xA4 /* STA ECW value for background AC */ +#define EDCF_AC_BK_TXOP_STA 0x0000 /* STA TXOP value for background AC */ +#define EDCF_AC_VI_ACI_STA 0x42 /* STA ACI value for video AC */ +#define EDCF_AC_VI_ECW_STA 0x43 /* STA ECW value for video AC */ +#define EDCF_AC_VI_TXOP_STA 0x005e /* STA TXOP value for video AC */ +#define EDCF_AC_VO_ACI_STA 0x62 /* STA ACI value for audio AC */ +#define EDCF_AC_VO_ECW_STA 0x32 /* STA ECW value for audio AC */ +#define EDCF_AC_VO_TXOP_STA 0x002f /* STA TXOP value for audio AC */ + +/* Default EDCF parameters that AP uses; WMM draft Table 14 */ +#define EDCF_AC_BE_ACI_AP 0x03 /* AP ACI value for best effort AC */ +#define EDCF_AC_BE_ECW_AP 0x64 /* AP ECW value for best effort AC */ +#define EDCF_AC_BE_TXOP_AP 0x0000 /* AP TXOP value for best effort AC */ +#define EDCF_AC_BK_ACI_AP 0x27 /* AP ACI value for background AC */ +#define EDCF_AC_BK_ECW_AP 0xA4 /* AP ECW value for background AC */ +#define EDCF_AC_BK_TXOP_AP 0x0000 /* AP TXOP value for background AC */ +#define EDCF_AC_VI_ACI_AP 0x41 /* AP ACI value for video AC */ +#define EDCF_AC_VI_ECW_AP 0x43 /* AP ECW value for video AC */ +#define EDCF_AC_VI_TXOP_AP 0x005e /* AP TXOP value for video AC */ +#define EDCF_AC_VO_ACI_AP 0x61 /* AP ACI value for audio AC */ +#define EDCF_AC_VO_ECW_AP 0x32 /* AP ECW value for audio AC */ +#define EDCF_AC_VO_TXOP_AP 0x002f /* AP TXOP value for audio AC */ + +/** EDCA Parameter IE */ +BWL_PRE_PACKED_STRUCT struct edca_param_ie { + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct edca_param_ie edca_param_ie_t; +#define EDCA_PARAM_IE_LEN 18 /* EDCA Parameter IE length */ + +/** QoS Capability IE */ +BWL_PRE_PACKED_STRUCT struct qos_cap_ie { + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct qos_cap_ie qos_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie { + uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */ + uint8 length; + uint16 station_count; /* total number of STAs associated */ + uint8 channel_utilization; /* % of time, normalized to 255, QAP sensed medium busy */ + uint16 aac; /* available admission capacity */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t; +#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */ + +#define WLC_QBSS_LOAD_CHAN_FREE_MAX 0xff /* max for channel free score */ + +/* nom_msdu_size */ +#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */ +#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */ + +/* surplus_bandwidth */ +/* Represented as 3 bits of integer, binary point, 13 bits fraction */ +#define INTEGER_SHIFT 13 /* integer shift */ +#define FRACTION_MASK 0x1FFF /* fraction mask */ + +/** Management Notification Frame */ +BWL_PRE_PACKED_STRUCT struct dot11_management_notification { + uint8 category; /* DOT11_ACTION_NOTIFICATION */ + uint8 action; + uint8 token; + uint8 status; + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_MGMT_NOTIFICATION_LEN 4 /* Fixed length */ + +/** Timeout Interval IE */ +BWL_PRE_PACKED_STRUCT struct ti_ie { + uint8 ti_type; + uint32 ti_val; +} BWL_POST_PACKED_STRUCT; +typedef struct ti_ie ti_ie_t; +#define TI_TYPE_REASSOC_DEADLINE 1 +#define TI_TYPE_KEY_LIFETIME 2 + +/* WME Action Codes */ +#define WME_ADDTS_REQUEST 0 /* WME ADDTS request */ +#define WME_ADDTS_RESPONSE 1 /* WME ADDTS response */ +#define WME_DELTS_REQUEST 2 /* WME DELTS request */ + +/* WME Setup Response Status Codes */ +#define WME_ADMISSION_ACCEPTED 0 /* WME admission accepted */ +#define WME_INVALID_PARAMETERS 1 /* WME invalide parameters */ +#define WME_ADMISSION_REFUSED 3 /* WME admission refused */ + +/* Macro to take a pointer to a beacon or probe response + * body and return the char* pointer to the SSID info element + */ +#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN) + +/* Authentication frame payload constants */ +#define DOT11_OPEN_SYSTEM 0 /* d11 open authentication */ +#define DOT11_SHARED_KEY 1 /* d11 shared authentication */ +#define DOT11_FAST_BSS 2 /* d11 fast bss authentication */ +#define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */ + +/* Frame control macros */ +#define FC_PVER_MASK 0x3 /* PVER mask */ +#define FC_PVER_SHIFT 0 /* PVER shift */ +#define FC_TYPE_MASK 0xC /* type mask */ +#define FC_TYPE_SHIFT 2 /* type shift */ +#define FC_SUBTYPE_MASK 0xF0 /* subtype mask */ +#define FC_SUBTYPE_SHIFT 4 /* subtype shift */ +#define FC_TODS 0x100 /* to DS */ +#define FC_TODS_SHIFT 8 /* to DS shift */ +#define FC_FROMDS 0x200 /* from DS */ +#define FC_FROMDS_SHIFT 9 /* from DS shift */ +#define FC_MOREFRAG 0x400 /* more frag. */ +#define FC_MOREFRAG_SHIFT 10 /* more frag. shift */ +#define FC_RETRY 0x800 /* retry */ +#define FC_RETRY_SHIFT 11 /* retry shift */ +#define FC_PM 0x1000 /* PM */ +#define FC_PM_SHIFT 12 /* PM shift */ +#define FC_MOREDATA 0x2000 /* more data */ +#define FC_MOREDATA_SHIFT 13 /* more data shift */ +#define FC_WEP 0x4000 /* WEP */ +#define FC_WEP_SHIFT 14 /* WEP shift */ +#define FC_ORDER 0x8000 /* order */ +#define FC_ORDER_SHIFT 15 /* order shift */ + +/* sequence control macros */ +#define SEQNUM_SHIFT 4 /* seq. number shift */ +#define SEQNUM_MAX 0x1000 /* max seqnum + 1 */ +#define FRAGNUM_MASK 0xF /* frag. number mask */ + +/* Frame Control type/subtype defs */ + +/* FC Types */ +#define FC_TYPE_MNG 0 /* management type */ +#define FC_TYPE_CTL 1 /* control type */ +#define FC_TYPE_DATA 2 /* data type */ + +/* Management Subtypes */ +#define FC_SUBTYPE_ASSOC_REQ 0 /* assoc. request */ +#define FC_SUBTYPE_ASSOC_RESP 1 /* assoc. response */ +#define FC_SUBTYPE_REASSOC_REQ 2 /* reassoc. request */ +#define FC_SUBTYPE_REASSOC_RESP 3 /* reassoc. response */ +#define FC_SUBTYPE_PROBE_REQ 4 /* probe request */ +#define FC_SUBTYPE_PROBE_RESP 5 /* probe response */ +#define FC_SUBTYPE_BEACON 8 /* beacon */ +#define FC_SUBTYPE_ATIM 9 /* ATIM */ +#define FC_SUBTYPE_DISASSOC 10 /* disassoc. */ +#define FC_SUBTYPE_AUTH 11 /* authentication */ +#define FC_SUBTYPE_DEAUTH 12 /* de-authentication */ +#define FC_SUBTYPE_ACTION 13 /* action */ +#define FC_SUBTYPE_ACTION_NOACK 14 /* action no-ack */ + +/* Control Subtypes */ +#define FC_SUBTYPE_CTL_WRAPPER 7 /* Control Wrapper */ +#define FC_SUBTYPE_BLOCKACK_REQ 8 /* Block Ack Req */ +#define FC_SUBTYPE_BLOCKACK 9 /* Block Ack */ +#define FC_SUBTYPE_PS_POLL 10 /* PS poll */ +#define FC_SUBTYPE_RTS 11 /* RTS */ +#define FC_SUBTYPE_CTS 12 /* CTS */ +#define FC_SUBTYPE_ACK 13 /* ACK */ +#define FC_SUBTYPE_CF_END 14 /* CF-END */ +#define FC_SUBTYPE_CF_END_ACK 15 /* CF-END ACK */ + +/* Data Subtypes */ +#define FC_SUBTYPE_DATA 0 /* Data */ +#define FC_SUBTYPE_DATA_CF_ACK 1 /* Data + CF-ACK */ +#define FC_SUBTYPE_DATA_CF_POLL 2 /* Data + CF-Poll */ +#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 /* Data + CF-Ack + CF-Poll */ +#define FC_SUBTYPE_NULL 4 /* Null */ +#define FC_SUBTYPE_CF_ACK 5 /* CF-Ack */ +#define FC_SUBTYPE_CF_POLL 6 /* CF-Poll */ +#define FC_SUBTYPE_CF_ACK_POLL 7 /* CF-Ack + CF-Poll */ +#define FC_SUBTYPE_QOS_DATA 8 /* QoS Data */ +#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 /* QoS Data + CF-Ack */ +#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 /* QoS Data + CF-Poll */ +#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 /* QoS Data + CF-Ack + CF-Poll */ +#define FC_SUBTYPE_QOS_NULL 12 /* QoS Null */ +#define FC_SUBTYPE_QOS_CF_POLL 14 /* QoS CF-Poll */ +#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 /* QoS CF-Ack + CF-Poll */ + +/* Data Subtype Groups */ +#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0) +#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0) +#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0) +#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0) +#define FC_SUBTYPE_ANY_PSPOLL(s) (((s) & 10) != 0) + +/* Type/Subtype Combos */ +#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) /* FC kind mask */ + +#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) /* FC kind */ + +#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) /* Subtype from FC */ +#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) /* Type from FC */ + +#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) /* assoc. request */ +#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) /* assoc. response */ +#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) /* reassoc. request */ +#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) /* reassoc. response */ +#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) /* probe request */ +#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) /* probe response */ +#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) /* beacon */ +#define FC_ATIM FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM) /* ATIM */ +#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) /* disassoc */ +#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) /* authentication */ +#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) /* deauthentication */ +#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) /* action */ +#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) /* action no-ack */ + +#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) /* Control Wrapper */ +#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) /* Block Ack Req */ +#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) /* Block Ack */ +#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) /* PS poll */ +#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) /* RTS */ +#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) /* CTS */ +#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) /* ACK */ +#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) /* CF-END */ +#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) /* CF-END ACK */ + +#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) /* data */ +#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) /* null data */ +#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) /* data CF ACK */ +#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) /* QoS data */ +#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) /* QoS null */ + +/* QoS Control Field */ + +/* 802.1D Priority */ +#define QOS_PRIO_SHIFT 0 /* QoS priority shift */ +#define QOS_PRIO_MASK 0x0007 /* QoS priority mask */ +#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) /* QoS priority */ + +/* Traffic Identifier */ +#define QOS_TID_SHIFT 0 /* QoS TID shift */ +#define QOS_TID_MASK 0x000f /* QoS TID mask */ +#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) /* QoS TID */ + +/* End of Service Period (U-APSD) */ +#define QOS_EOSP_SHIFT 4 /* QoS End of Service Period shift */ +#define QOS_EOSP_MASK 0x0010 /* QoS End of Service Period mask */ +#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) /* Qos EOSP */ + +/* Ack Policy */ +#define QOS_ACK_NORMAL_ACK 0 /* Normal Ack */ +#define QOS_ACK_NO_ACK 1 /* No Ack (eg mcast) */ +#define QOS_ACK_NO_EXP_ACK 2 /* No Explicit Ack */ +#define QOS_ACK_BLOCK_ACK 3 /* Block Ack */ +#define QOS_ACK_SHIFT 5 /* QoS ACK shift */ +#define QOS_ACK_MASK 0x0060 /* QoS ACK mask */ +#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) /* QoS ACK */ + +/* A-MSDU flag */ +#define QOS_AMSDU_SHIFT 7 /* AMSDU shift */ +#define QOS_AMSDU_MASK 0x0080 /* AMSDU mask */ + +/* Management Frames */ + +/* Management Frame Constants */ + +/* Fixed fields */ +#define DOT11_MNG_AUTH_ALGO_LEN 2 /* d11 management auth. algo. length */ +#define DOT11_MNG_AUTH_SEQ_LEN 2 /* d11 management auth. seq. length */ +#define DOT11_MNG_BEACON_INT_LEN 2 /* d11 management beacon interval length */ +#define DOT11_MNG_CAP_LEN 2 /* d11 management cap. length */ +#define DOT11_MNG_AP_ADDR_LEN 6 /* d11 management AP address length */ +#define DOT11_MNG_LISTEN_INT_LEN 2 /* d11 management listen interval length */ +#define DOT11_MNG_REASON_LEN 2 /* d11 management reason length */ +#define DOT11_MNG_AID_LEN 2 /* d11 management AID length */ +#define DOT11_MNG_STATUS_LEN 2 /* d11 management status length */ +#define DOT11_MNG_TIMESTAMP_LEN 8 /* d11 management timestamp length */ + +/* DUR/ID field in assoc resp is 0xc000 | AID */ +#define DOT11_AID_MASK 0x3fff /* d11 AID mask */ + +/* Reason Codes */ +#define DOT11_RC_RESERVED 0 /* d11 RC reserved */ +#define DOT11_RC_UNSPECIFIED 1 /* Unspecified reason */ +#define DOT11_RC_AUTH_INVAL 2 /* Previous authentication no longer valid */ +#define DOT11_RC_DEAUTH_LEAVING 3 /* Deauthenticated because sending station + * is leaving (or has left) IBSS or ESS + */ +#define DOT11_RC_INACTIVITY 4 /* Disassociated due to inactivity */ +#define DOT11_RC_BUSY 5 /* Disassociated because AP is unable to handle + * all currently associated stations + */ +#define DOT11_RC_INVAL_CLASS_2 6 /* Class 2 frame received from + * nonauthenticated station + */ +#define DOT11_RC_INVAL_CLASS_3 7 /* Class 3 frame received from + * nonassociated station + */ +#define DOT11_RC_DISASSOC_LEAVING 8 /* Disassociated because sending station is + * leaving (or has left) BSS + */ +#define DOT11_RC_NOT_AUTH 9 /* Station requesting (re)association is not + * authenticated with responding station + */ +#define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */ +#define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */ + +/* 12 is unused by STA but could be used by AP/GO */ +#define DOT11_RC_DISASSOC_BTM 12 /* Disassociated due to BSS Transition Magmt */ + + +/* 32-39 are QSTA specific reasons added in 11e */ +#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */ +#define DOT11_RC_INSUFFCIENT_BW 33 /* QAP lacks sufficient bandwidth */ +#define DOT11_RC_EXCESSIVE_FRAMES 34 /* excessive number of frames need ack */ +#define DOT11_RC_TX_OUTSIDE_TXOP 35 /* transmitting outside the limits of txop */ +#define DOT11_RC_LEAVING_QBSS 36 /* QSTA is leaving the QBSS (or restting) */ +#define DOT11_RC_BAD_MECHANISM 37 /* does not want to use the mechanism */ +#define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */ +#define DOT11_RC_TIMEOUT 39 /* timeout */ + +#define DOT11_RC_MESH_PEERING_CANCELLED 52 +#define DOT11_RC_MESH_MAX_PEERS 53 +#define DOT11_RC_MESH_CONFIG_POLICY_VIOLN 54 +#define DOT11_RC_MESH_CLOSE_RECVD 55 +#define DOT11_RC_MESH_MAX_RETRIES 56 +#define DOT11_RC_MESH_CONFIRM_TIMEOUT 57 +#define DOT11_RC_MESH_INVALID_GTK 58 +#define DOT11_RC_MESH_INCONSISTENT_PARAMS 59 + +#define DOT11_RC_MESH_INVALID_SEC_CAP 60 +#define DOT11_RC_MESH_PATHERR_NOPROXYINFO 61 +#define DOT11_RC_MESH_PATHERR_NOFWINFO 62 +#define DOT11_RC_MESH_PATHERR_DSTUNREACH 63 +#define DOT11_RC_MESH_MBSSMAC_EXISTS 64 +#define DOT11_RC_MESH_CHANSWITCH_REGREQ 65 +#define DOT11_RC_MESH_CHANSWITCH_UNSPEC 66 + +#define DOT11_RC_MAX 66 /* Reason codes > 66 are reserved */ + +#define DOT11_RC_TDLS_PEER_UNREACH 25 +#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26 + +/* Status Codes */ +#define DOT11_SC_SUCCESS 0 /* Successful */ +#define DOT11_SC_FAILURE 1 /* Unspecified failure */ +#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2 /* TDLS wakeup schedule rejected but alternative */ + /* schedule provided */ +#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3 /* TDLS wakeup schedule rejected */ +#define DOT11_SC_TDLS_SEC_DISABLED 5 /* TDLS Security disabled */ +#define DOT11_SC_LIFETIME_REJ 6 /* Unacceptable lifetime */ +#define DOT11_SC_NOT_SAME_BSS 7 /* Not in same BSS */ +#define DOT11_SC_CAP_MISMATCH 10 /* Cannot support all requested + * capabilities in the Capability + * Information field + */ +#define DOT11_SC_REASSOC_FAIL 11 /* Reassociation denied due to inability + * to confirm that association exists + */ +#define DOT11_SC_ASSOC_FAIL 12 /* Association denied due to reason + * outside the scope of this standard + */ +#define DOT11_SC_AUTH_MISMATCH 13 /* Responding station does not support + * the specified authentication + * algorithm + */ +#define DOT11_SC_AUTH_SEQ 14 /* Received an Authentication frame + * with authentication transaction + * sequence number out of expected + * sequence + */ +#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 /* Authentication rejected because of + * challenge failure + */ +#define DOT11_SC_AUTH_TIMEOUT 16 /* Authentication rejected due to timeout + * waiting for next frame in sequence + */ +#define DOT11_SC_ASSOC_BUSY_FAIL 17 /* Association denied because AP is + * unable to handle additional + * associated stations + */ +#define DOT11_SC_ASSOC_RATE_MISMATCH 18 /* Association denied due to requesting + * station not supporting all of the + * data rates in the BSSBasicRateSet + * parameter + */ +#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 /* Association denied due to requesting + * station not supporting the Short + * Preamble option + */ +#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 /* Association denied due to requesting + * station not supporting the PBCC + * Modulation option + */ +#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 /* Association denied due to requesting + * station not supporting the Channel + * Agility option + */ +#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 /* Association denied because Spectrum + * Management capability is required. + */ +#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 /* Association denied because the info + * in the Power Cap element is + * unacceptable. + */ +#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 /* Association denied because the info + * in the Supported Channel element is + * unacceptable + */ +#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 /* Association denied due to requesting + * station not supporting the Short Slot + * Time option + */ +#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26 /* Association denied because requesting station + * does not support the DSSS-OFDM option + */ +#define DOT11_SC_ASSOC_HT_REQUIRED 27 /* Association denied because the requesting + * station does not support HT features + */ +#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28 /* Association denied due to AP + * being unable to reach the R0 Key Holder + */ +#define DOT11_SC_ASSOC_TRY_LATER 30 /* Association denied temporarily, try again later + */ +#define DOT11_SC_ASSOC_MFP_VIOLATION 31 /* Association denied due to Robust Management + * frame policy violation + */ + +#define DOT11_SC_DECLINED 37 /* request declined */ +#define DOT11_SC_INVALID_PARAMS 38 /* One or more params have invalid values */ +#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 /* invalid pairwise cipher */ +#define DOT11_SC_INVALID_AKMP 43 /* Association denied due to invalid AKMP */ +#define DOT11_SC_INVALID_RSNIE_CAP 45 /* invalid RSN IE capabilities */ +#define DOT11_SC_DLS_NOT_ALLOWED 48 /* DLS is not allowed in the BSS by policy */ +#define DOT11_SC_INVALID_PMKID 53 /* Association denied due to invalid PMKID */ +#define DOT11_SC_INVALID_MDID 54 /* Association denied due to invalid MDID */ +#define DOT11_SC_INVALID_FTIE 55 /* Association denied due to invalid FTIE */ + +#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED 59 /* ad proto not supported */ +#define DOT11_SC_NO_OUTSTAND_REQ 60 /* no outstanding req */ +#define DOT11_SC_RSP_NOT_RX_FROM_SERVER 61 /* no response from server */ +#define DOT11_SC_TIMEOUT 62 /* timeout */ +#define DOT11_SC_QUERY_RSP_TOO_LARGE 63 /* query rsp too large */ +#define DOT11_SC_SERVER_UNREACHABLE 65 /* server unreachable */ + +#define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */ +#define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */ +#define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */ + +#define DOT11_SC_ANTICLOG_TOCKEN_REQUIRED 76 /* Anti-clogging tocken required */ +#define DOT11_SC_INVALID_FINITE_CYCLIC_GRP 77 /* Invalid contents of RSNIE */ + +#define DOT11_SC_ASSOC_VHT_REQUIRED 104 /* Association denied because the requesting + * station does not support VHT features. + */ + +#define DOT11_SC_TRANSMIT_FAILURE 79 /* transmission failure */ + +/* Info Elts, length of INFORMATION portion of Info Elts */ +#define DOT11_MNG_DS_PARAM_LEN 1 /* d11 management DS parameter length */ +#define DOT11_MNG_IBSS_PARAM_LEN 2 /* d11 management IBSS parameter length */ + +/* TIM Info element has 3 bytes fixed info in INFORMATION field, + * followed by 1 to 251 bytes of Partial Virtual Bitmap + */ +#define DOT11_MNG_TIM_FIXED_LEN 3 /* d11 management TIM fixed length */ +#define DOT11_MNG_TIM_DTIM_COUNT 0 /* d11 management DTIM count */ +#define DOT11_MNG_TIM_DTIM_PERIOD 1 /* d11 management DTIM period */ +#define DOT11_MNG_TIM_BITMAP_CTL 2 /* d11 management TIM BITMAP control */ +#define DOT11_MNG_TIM_PVB 3 /* d11 management TIM PVB */ + +/* TLV defines */ +#define TLV_TAG_OFF 0 /* tag offset */ +#define TLV_LEN_OFF 1 /* length offset */ +#define TLV_HDR_LEN 2 /* header length */ +#define TLV_BODY_OFF 2 /* body offset */ +#define TLV_BODY_LEN_MAX 255 /* max body length */ + +/* Management Frame Information Element IDs */ +#define DOT11_MNG_SSID_ID 0 /* d11 management SSID id */ +#define DOT11_MNG_RATES_ID 1 /* d11 management rates id */ +#define DOT11_MNG_FH_PARMS_ID 2 /* d11 management FH parameter id */ +#define DOT11_MNG_DS_PARMS_ID 3 /* d11 management DS parameter id */ +#define DOT11_MNG_CF_PARMS_ID 4 /* d11 management CF parameter id */ +#define DOT11_MNG_TIM_ID 5 /* d11 management TIM id */ +#define DOT11_MNG_IBSS_PARMS_ID 6 /* d11 management IBSS parameter id */ +#define DOT11_MNG_COUNTRY_ID 7 /* d11 management country id */ +#define DOT11_MNG_HOPPING_PARMS_ID 8 /* d11 management hopping parameter id */ +#define DOT11_MNG_HOPPING_TABLE_ID 9 /* d11 management hopping table id */ +#define DOT11_MNG_REQUEST_ID 10 /* d11 management request id */ +#define DOT11_MNG_QBSS_LOAD_ID 11 /* d11 management QBSS Load id */ +#define DOT11_MNG_EDCA_PARAM_ID 12 /* 11E EDCA Parameter id */ +#define DOT11_MNG_TSPEC_ID 13 /* d11 management TSPEC id */ +#define DOT11_MNG_TCLAS_ID 14 /* d11 management TCLAS id */ +#define DOT11_MNG_CHALLENGE_ID 16 /* d11 management chanllenge id */ +#define DOT11_MNG_PWR_CONSTRAINT_ID 32 /* 11H PowerConstraint */ +#define DOT11_MNG_PWR_CAP_ID 33 /* 11H PowerCapability */ +#define DOT11_MNG_TPC_REQUEST_ID 34 /* 11H TPC Request */ +#define DOT11_MNG_TPC_REPORT_ID 35 /* 11H TPC Report */ +#define DOT11_MNG_SUPP_CHANNELS_ID 36 /* 11H Supported Channels */ +#define DOT11_MNG_CHANNEL_SWITCH_ID 37 /* 11H ChannelSwitch Announcement */ +#define DOT11_MNG_MEASURE_REQUEST_ID 38 /* 11H MeasurementRequest */ +#define DOT11_MNG_MEASURE_REPORT_ID 39 /* 11H MeasurementReport */ +#define DOT11_MNG_QUIET_ID 40 /* 11H Quiet */ +#define DOT11_MNG_IBSS_DFS_ID 41 /* 11H IBSS_DFS */ +#define DOT11_MNG_ERP_ID 42 /* d11 management ERP id */ +#define DOT11_MNG_TS_DELAY_ID 43 /* d11 management TS Delay id */ +#define DOT11_MNG_TCLAS_PROC_ID 44 /* d11 management TCLAS processing id */ +#define DOT11_MNG_HT_CAP 45 /* d11 mgmt HT cap id */ +#define DOT11_MNG_QOS_CAP_ID 46 /* 11E QoS Capability id */ +#define DOT11_MNG_NONERP_ID 47 /* d11 management NON-ERP id */ +#define DOT11_MNG_RSN_ID 48 /* d11 management RSN id */ +#define DOT11_MNG_EXT_RATES_ID 50 /* d11 management ext. rates id */ +#define DOT11_MNG_AP_CHREP_ID 51 /* 11k AP Channel report id */ +#define DOT11_MNG_NEIGHBOR_REP_ID 52 /* 11k & 11v Neighbor report id */ +#define DOT11_MNG_RCPI_ID 53 /* 11k RCPI */ +#define DOT11_MNG_MDIE_ID 54 /* 11r Mobility domain id */ +#define DOT11_MNG_FTIE_ID 55 /* 11r Fast Bss Transition id */ +#define DOT11_MNG_FT_TI_ID 56 /* 11r Timeout Interval id */ +#define DOT11_MNG_RDE_ID 57 /* 11r RIC Data Element id */ +#define DOT11_MNG_REGCLASS_ID 59 /* d11 management regulatory class id */ +#define DOT11_MNG_EXT_CSA_ID 60 /* d11 Extended CSA */ +#define DOT11_MNG_HT_ADD 61 /* d11 mgmt additional HT info */ +#define DOT11_MNG_EXT_CHANNEL_OFFSET 62 /* d11 mgmt ext channel offset */ +#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID 63 /* 11k bss average access delay */ +#define DOT11_MNG_ANTENNA_ID 64 /* 11k antenna id */ +#define DOT11_MNG_RSNI_ID 65 /* 11k RSNI id */ +#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID 66 /* 11k measurement pilot tx info id */ +#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID 67 /* 11k bss aval admission cap id */ +#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID 68 /* 11k bss AC access delay id */ +#define DOT11_MNG_WAPI_ID 68 /* d11 management WAPI id */ +#define DOT11_MNG_TIME_ADVERTISE_ID 69 /* 11p time advertisement */ +#define DOT11_MNG_RRM_CAP_ID 70 /* 11k radio measurement capability */ +#define DOT11_MNG_MULTIPLE_BSSID_ID 71 /* 11k multiple BSSID id */ +#define DOT11_MNG_HT_BSS_COEXINFO_ID 72 /* d11 mgmt OBSS Coexistence INFO */ +#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */ +#define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */ +#define DOT11_MNG_MMIE_ID 76 /* d11 mgmt MIC IE */ +#define DOT11_MNG_FMS_DESCR_ID 86 /* 11v FMS descriptor */ +#define DOT11_MNG_FMS_REQ_ID 87 /* 11v FMS request id */ +#define DOT11_MNG_FMS_RESP_ID 88 /* 11v FMS response id */ +#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID 90 /* 11v bss max idle id */ +#define DOT11_MNG_TFS_REQUEST_ID 91 /* 11v tfs request id */ +#define DOT11_MNG_TFS_RESPONSE_ID 92 /* 11v tfs response id */ +#define DOT11_MNG_WNM_SLEEP_MODE_ID 93 /* 11v wnm-sleep mode id */ +#define DOT11_MNG_TIMBC_REQ_ID 94 /* 11v TIM broadcast request id */ +#define DOT11_MNG_TIMBC_RESP_ID 95 /* 11v TIM broadcast response id */ +#define DOT11_MNG_CHANNEL_USAGE 97 /* 11v channel usage */ +#define DOT11_MNG_TIME_ZONE_ID 98 /* 11v time zone */ +#define DOT11_MNG_DMS_REQUEST_ID 99 /* 11v dms request id */ +#define DOT11_MNG_DMS_RESPONSE_ID 100 /* 11v dms response id */ +#define DOT11_MNG_LINK_IDENTIFIER_ID 101 /* 11z TDLS Link Identifier IE */ +#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102 /* 11z TDLS Wakeup Schedule IE */ +#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104 /* 11z TDLS Channel Switch Timing IE */ +#define DOT11_MNG_PTI_CONTROL_ID 105 /* 11z TDLS PTI Control IE */ +#define DOT11_MNG_PU_BUFFER_STATUS_ID 106 /* 11z TDLS PU Buffer Status IE */ +#define DOT11_MNG_INTERWORKING_ID 107 /* 11u interworking */ +#define DOT11_MNG_ADVERTISEMENT_ID 108 /* 11u advertisement protocol */ +#define DOT11_MNG_EXP_BW_REQ_ID 109 /* 11u expedited bandwith request */ +#define DOT11_MNG_QOS_MAP_ID 110 /* 11u QoS map set */ +#define DOT11_MNG_ROAM_CONSORT_ID 111 /* 11u roaming consortium */ +#define DOT11_MNG_EMERGCY_ALERT_ID 112 /* 11u emergency alert identifier */ +#define DOT11_MNG_MESH_CONFIG 113 /* Mesh Configuration */ +#define DOT11_MNG_MESH_ID 114 /* Mesh ID */ +#define DOT11_MNG_MESH_PEER_MGMT_ID 117 /* Mesh PEER MGMT IE */ + +#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */ +#define DOT11_MNG_EXT_PREQ_ID 130 /* Mesh PREQ IE */ +#define DOT11_MNG_EXT_PREP_ID 131 /* Mesh PREP IE */ +#define DOT11_MNG_EXT_PERR_ID 132 /* Mesh PERR IE */ +#define DOT11_MNG_VHT_CAP_ID 191 /* d11 mgmt VHT cap id */ +#define DOT11_MNG_VHT_OPERATION_ID 192 /* d11 mgmt VHT op id */ +#define DOT11_MNG_EXT_BSSLOAD_ID 193 /* d11 mgmt VHT extended bss load id */ +#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194 /* Wide BW Channel Switch IE */ +#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195 /* VHT transmit Power Envelope IE */ +#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196 /* Channel Switch Wrapper IE */ +#define DOT11_MNG_AID_ID 197 /* Association ID IE */ +#define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */ +#define DOT11_MNG_FTM_PARAMS_ID 206 + +#define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */ +#define DOT11_MNG_PROPR_ID 221 +/* should start using this one instead of above two */ +#define DOT11_MNG_VS_ID 221 /* d11 management Vendor Specific IE */ + +/* Rate Defines */ + +/* Valid rates for the Supported Rates and Extended Supported Rates IEs. + * Encoding is the rate in 500kbps units, rouding up for fractional values. + * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values. + * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates. + * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27}, + * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices. + */ + +#define DOT11_RATE_1M 2 /* 1 Mbps in 500kbps units */ +#define DOT11_RATE_2M 4 /* 2 Mbps in 500kbps units */ +#define DOT11_RATE_5M5 11 /* 5.5 Mbps in 500kbps units */ +#define DOT11_RATE_11M 22 /* 11 Mbps in 500kbps units */ +#define DOT11_RATE_6M 12 /* 6 Mbps in 500kbps units */ +#define DOT11_RATE_9M 18 /* 9 Mbps in 500kbps units */ +#define DOT11_RATE_12M 24 /* 12 Mbps in 500kbps units */ +#define DOT11_RATE_18M 36 /* 18 Mbps in 500kbps units */ +#define DOT11_RATE_24M 48 /* 24 Mbps in 500kbps units */ +#define DOT11_RATE_36M 72 /* 36 Mbps in 500kbps units */ +#define DOT11_RATE_48M 96 /* 48 Mbps in 500kbps units */ +#define DOT11_RATE_54M 108 /* 54 Mbps in 500kbps units */ +#define DOT11_RATE_MAX 108 /* highest rate (54 Mbps) in 500kbps units */ + +/* Supported Rates and Extended Supported Rates IEs + * The supported rates octets are defined a the MSB indicatin a Basic Rate + * and bits 0-6 as the rate value + */ +#define DOT11_RATE_BASIC 0x80 /* flag for a Basic Rate */ +#define DOT11_RATE_MASK 0x7F /* mask for numeric part of rate */ + +/* BSS Membership Selector parameters + * 802.11-2012 and 802.11ac_D4.0 sec 8.4.2.3 + * These selector values are advertised in Supported Rates and Extended Supported Rates IEs + * in the supported rates list with the Basic rate bit set. + * Constants below include the basic bit. + */ +#define DOT11_BSS_MEMBERSHIP_HT 0xFF /* Basic 0x80 + 127, HT Required to join */ +#define DOT11_BSS_MEMBERSHIP_VHT 0xFE /* Basic 0x80 + 126, VHT Required to join */ + +/* ERP info element bit values */ +#define DOT11_MNG_ERP_LEN 1 /* ERP is currently 1 byte long */ +#define DOT11_MNG_NONERP_PRESENT 0x01 /* NonERP (802.11b) STAs are present + *in the BSS + */ +#define DOT11_MNG_USE_PROTECTION 0x02 /* Use protection mechanisms for + *ERP-OFDM frames + */ +#define DOT11_MNG_BARKER_PREAMBLE 0x04 /* Short Preambles: 0 == allowed, + * 1 == not allowed + */ +/* TS Delay element offset & size */ +#define DOT11_MGN_TS_DELAY_LEN 4 /* length of TS DELAY IE */ +#define TS_DELAY_FIELD_SIZE 4 /* TS DELAY field size */ + +/* Capability Information Field */ +#define DOT11_CAP_ESS 0x0001 /* d11 cap. ESS */ +#define DOT11_CAP_IBSS 0x0002 /* d11 cap. IBSS */ +#define DOT11_CAP_POLLABLE 0x0004 /* d11 cap. pollable */ +#define DOT11_CAP_POLL_RQ 0x0008 /* d11 cap. poll request */ +#define DOT11_CAP_PRIVACY 0x0010 /* d11 cap. privacy */ +#define DOT11_CAP_SHORT 0x0020 /* d11 cap. short */ +#define DOT11_CAP_PBCC 0x0040 /* d11 cap. PBCC */ +#define DOT11_CAP_AGILITY 0x0080 /* d11 cap. agility */ +#define DOT11_CAP_SPECTRUM 0x0100 /* d11 cap. spectrum */ +#define DOT11_CAP_QOS 0x0200 /* d11 cap. qos */ +#define DOT11_CAP_SHORTSLOT 0x0400 /* d11 cap. shortslot */ +#define DOT11_CAP_APSD 0x0800 /* d11 cap. apsd */ +#define DOT11_CAP_RRM 0x1000 /* d11 cap. 11k radio measurement */ +#define DOT11_CAP_CCK_OFDM 0x2000 /* d11 cap. CCK/OFDM */ +#define DOT11_CAP_DELAY_BA 0x4000 /* d11 cap. delayed block ack */ +#define DOT11_CAP_IMMEDIATE_BA 0x8000 /* d11 cap. immediate block ack */ + +/* Extended capabilities IE bitfields */ +/* 20/40 BSS Coexistence Management support bit position */ +#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0 +/* Extended Channel Switching support bit position */ +#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING 2 +/* scheduled PSMP support bit position */ +#define DOT11_EXT_CAP_SPSMP 6 +/* Flexible Multicast Service */ +#define DOT11_EXT_CAP_FMS 11 +/* proxy ARP service support bit position */ +#define DOT11_EXT_CAP_PROXY_ARP 12 +/* Civic Location */ +#define DOT11_EXT_CAP_CIVIC_LOC 14 +/* Geospatial Location */ +#define DOT11_EXT_CAP_LCI 15 +/* Traffic Filter Service */ +#define DOT11_EXT_CAP_TFS 16 +/* WNM-Sleep Mode */ +#define DOT11_EXT_CAP_WNM_SLEEP 17 +/* TIM Broadcast service */ +#define DOT11_EXT_CAP_TIMBC 18 +/* BSS Transition Management support bit position */ +#define DOT11_EXT_CAP_BSSTRANS_MGMT 19 +/* Direct Multicast Service */ +#define DOT11_EXT_CAP_DMS 26 +/* Interworking support bit position */ +#define DOT11_EXT_CAP_IW 31 +/* QoS map support bit position */ +#define DOT11_EXT_CAP_QOS_MAP 32 +/* service Interval granularity bit position and mask */ +#define DOT11_EXT_CAP_SI 41 +#define DOT11_EXT_CAP_SI_MASK 0x0E +/* WNM notification */ +#define DOT11_EXT_CAP_WNM_NOTIF 46 +/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */ +#define DOT11_EXT_CAP_OPER_MODE_NOTIF 62 +/* Fine timing measurement - D3.0 */ +#define DOT11_EXT_CAP_FTM_RESPONDER 70 +#define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */ +#ifdef WL_FTM +#define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */ +#else +#define DOT11_EXT_CAP_MAX_BIT_IDX 62 /* !!!update this please!!! */ +#endif + +/* extended capability */ +#ifndef DOT11_EXTCAP_LEN_MAX +#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3) +#endif + +BWL_PRE_PACKED_STRUCT struct dot11_extcap { + uint8 extcap[DOT11_EXTCAP_LEN_MAX]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap dot11_extcap_t; + +/* VHT Operating mode bit fields - (11ac D3.0 - 8.4.1.50) */ +#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0 +#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3 +#define DOT11_OPER_MODE_RXNSS_SHIFT 4 +#define DOT11_OPER_MODE_RXNSS_MASK 0x70 +#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7 +#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80 + +#define DOT11_OPER_MODE(type, nss, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + +#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \ + (((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\ + >> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT) +#define DOT11_OPER_MODE_RXNSS(mode) \ + ((((mode) & DOT11_OPER_MODE_RXNSS_MASK) \ + >> DOT11_OPER_MODE_RXNSS_SHIFT) + 1) +#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \ + (((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\ + >> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT) + +#define DOT11_OPER_MODE_20MHZ 0 +#define DOT11_OPER_MODE_40MHZ 1 +#define DOT11_OPER_MODE_80MHZ 2 +#define DOT11_OPER_MODE_160MHZ 3 +#define DOT11_OPER_MODE_8080MHZ 3 + +#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_160MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_8080MHZ) + +/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */ +BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie { + uint8 mode; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; + +#define DOT11_OPER_MODE_NOTIF_IE_LEN 1 + +/* Extended Capability Information Field */ +#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01 /* 20/40 BSS Coexistence Management support */ + +/* + * Action Frame Constants + */ +#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action field */ +#define DOT11_ACTION_CAT_OFF 0 /* category offset */ +#define DOT11_ACTION_ACT_OFF 1 /* action offset */ + +/* Action Category field (sec 8.4.1.11) */ +#define DOT11_ACTION_CAT_ERR_MASK 0x80 /* category error mask */ +#define DOT11_ACTION_CAT_MASK 0x7F /* category mask */ +#define DOT11_ACTION_CAT_SPECT_MNG 0 /* category spectrum management */ +#define DOT11_ACTION_CAT_QOS 1 /* category QoS */ +#define DOT11_ACTION_CAT_DLS 2 /* category DLS */ +#define DOT11_ACTION_CAT_BLOCKACK 3 /* category block ack */ +#define DOT11_ACTION_CAT_PUBLIC 4 /* category public */ +#define DOT11_ACTION_CAT_RRM 5 /* category radio measurements */ +#define DOT11_ACTION_CAT_FBT 6 /* category fast bss transition */ +#define DOT11_ACTION_CAT_HT 7 /* category for HT */ +#define DOT11_ACTION_CAT_SA_QUERY 8 /* security association query */ +#define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */ +#define DOT11_ACTION_CAT_WNM 10 /* category for WNM */ +#define DOT11_ACTION_CAT_UWNM 11 /* category for Unprotected WNM */ +#define DOT11_ACTION_CAT_MESH 13 /* category for Mesh */ +#define DOT11_ACTION_CAT_SELFPROT 15 /* category for Mesh, self protected */ +#define DOT11_ACTION_NOTIFICATION 17 +#define DOT11_ACTION_CAT_VHT 21 /* VHT action */ +#define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */ +#define DOT11_ACTION_CAT_VS 127 /* category Vendor Specific */ + +/* Spectrum Management Action IDs (sec 7.4.1) */ +#define DOT11_SM_ACTION_M_REQ 0 /* d11 action measurement request */ +#define DOT11_SM_ACTION_M_REP 1 /* d11 action measurement response */ +#define DOT11_SM_ACTION_TPC_REQ 2 /* d11 action TPC request */ +#define DOT11_SM_ACTION_TPC_REP 3 /* d11 action TPC response */ +#define DOT11_SM_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_SM_ACTION_EXT_CSA 5 /* d11 extened CSA for 11n */ + +/* QoS action ids */ +#define DOT11_QOS_ACTION_ADDTS_REQ 0 /* d11 action ADDTS request */ +#define DOT11_QOS_ACTION_ADDTS_RESP 1 /* d11 action ADDTS response */ +#define DOT11_QOS_ACTION_DELTS 2 /* d11 action DELTS */ +#define DOT11_QOS_ACTION_SCHEDULE 3 /* d11 action schedule */ +#define DOT11_QOS_ACTION_QOS_MAP 4 /* d11 action QOS map */ + +/* HT action ids */ +#define DOT11_ACTION_ID_HT_CH_WIDTH 0 /* notify channel width action id */ +#define DOT11_ACTION_ID_HT_MIMO_PS 1 /* mimo ps action id */ + +/* Public action ids */ +#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */ +#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */ +#define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */ +#define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */ + +/* Block Ack action types */ +#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */ +#define DOT11_BA_ACTION_ADDBA_RESP 1 /* ADDBA Resp action frame type */ +#define DOT11_BA_ACTION_DELBA 2 /* DELBA action frame type */ + +/* ADDBA action parameters */ +#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 /* AMSDU supported under BA */ +#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 /* policy mask(ack vs delayed) */ +#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 /* policy shift */ +#define DOT11_ADDBA_PARAM_TID_MASK 0x003c /* tid mask */ +#define DOT11_ADDBA_PARAM_TID_SHIFT 2 /* tid shift */ +#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 /* buffer size mask */ +#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 /* buffer size shift */ + +#define DOT11_ADDBA_POLICY_DELAYED 0 /* delayed BA policy */ +#define DOT11_ADDBA_POLICY_IMMEDIATE 1 /* immediate BA policy */ + +/* Fast Transition action types */ +#define DOT11_FT_ACTION_FT_RESERVED 0 +#define DOT11_FT_ACTION_FT_REQ 1 /* FBT request - for over-the-DS FBT */ +#define DOT11_FT_ACTION_FT_RES 2 /* FBT response - for over-the-DS FBT */ +#define DOT11_FT_ACTION_FT_CON 3 /* FBT confirm - for OTDS with RRP */ +#define DOT11_FT_ACTION_FT_ACK 4 /* FBT ack */ + +/* DLS action types */ +#define DOT11_DLS_ACTION_REQ 0 /* DLS Request */ +#define DOT11_DLS_ACTION_RESP 1 /* DLS Response */ +#define DOT11_DLS_ACTION_TD 2 /* DLS Teardown */ + +/* Wireless Network Management (WNM) action types */ +#define DOT11_WNM_ACTION_EVENT_REQ 0 +#define DOT11_WNM_ACTION_EVENT_REP 1 +#define DOT11_WNM_ACTION_DIAG_REQ 2 +#define DOT11_WNM_ACTION_DIAG_REP 3 +#define DOT11_WNM_ACTION_LOC_CFG_REQ 4 +#define DOT11_WNM_ACTION_LOC_RFG_RESP 5 +#define DOT11_WNM_ACTION_BSSTRANS_QUERY 6 +#define DOT11_WNM_ACTION_BSSTRANS_REQ 7 +#define DOT11_WNM_ACTION_BSSTRANS_RESP 8 +#define DOT11_WNM_ACTION_FMS_REQ 9 +#define DOT11_WNM_ACTION_FMS_RESP 10 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12 +#define DOT11_WNM_ACTION_TFS_REQ 13 +#define DOT11_WNM_ACTION_TFS_RESP 14 +#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ 15 +#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16 +#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17 +#define DOT11_WNM_ACTION_TIMBC_REQ 18 +#define DOT11_WNM_ACTION_TIMBC_RESP 19 +#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20 +#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21 +#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22 +#define DOT11_WNM_ACTION_DMS_REQ 23 +#define DOT11_WNM_ACTION_DMS_RESP 24 +#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25 +#define DOT11_WNM_ACTION_NOTFCTN_REQ 26 +#define DOT11_WNM_ACTION_NOTFCTN_RESP 27 +#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP 28 + +/* Unprotected Wireless Network Management (WNM) action types */ +#define DOT11_UWNM_ACTION_TIM 0 +#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT 1 + +#define DOT11_MNG_COUNTRY_ID_LEN 3 + +/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */ +#define DOT11_VHT_ACTION_CBF 0 /* Compressed Beamforming */ +#define DOT11_VHT_ACTION_GID_MGMT 1 /* Group ID Management */ +#define DOT11_VHT_ACTION_OPER_MODE_NOTIF 2 /* Operating mode notif'n */ + +/** DLS Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dls_req { + uint8 category; /* category of action frame (2) */ + uint8 action; /* DLS action: req (0) */ + struct ether_addr da; /* destination address */ + struct ether_addr sa; /* source address */ + uint16 cap; /* capability */ + uint16 timeout; /* timeout value */ + uint8 data[1]; /* IE:support rate, extend support rate, HT cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dls_req dot11_dls_req_t; +#define DOT11_DLS_REQ_LEN 18 /* Fixed length */ + +/** DLS response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dls_resp { + uint8 category; /* category of action frame (2) */ + uint8 action; /* DLS action: req (0) */ + uint16 status; /* status code field */ + struct ether_addr da; /* destination address */ + struct ether_addr sa; /* source address */ + uint8 data[1]; /* optional: capability, rate ... */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dls_resp dot11_dls_resp_t; +#define DOT11_DLS_RESP_LEN 16 /* Fixed length */ + + +/* ************* 802.11v related definitions. ************* */ + +/** BSS Management Transition Query frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_query (6) */ + uint8 token; /* dialog token */ + uint8 reason; /* transition query reason */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_query dot11_bsstrans_query_t; +#define DOT11_BSSTRANS_QUERY_LEN 4 /* Fixed length */ + +/* BTM transition reason */ +#define DOT11_BSSTRANS_REASON_UNSPECIFIED 0 +#define DOT11_BSSTRANS_REASON_EXC_FRAME_LOSS 1 +#define DOT11_BSSTRANS_REASON_EXC_TRAFFIC_DELAY 2 +#define DOT11_BSSTRANS_REASON_INSUFF_QOS_CAPACITY 3 +#define DOT11_BSSTRANS_REASON_FIRST_ASSOC 4 +#define DOT11_BSSTRANS_REASON_LOAD_BALANCING 5 +#define DOT11_BSSTRANS_REASON_BETTER_AP_FOUND 6 +#define DOT11_BSSTRANS_REASON_DEAUTH_RX 7 +#define DOT11_BSSTRANS_REASON_8021X_EAP_AUTH_FAIL 8 +#define DOT11_BSSTRANS_REASON_4WAY_HANDSHK_FAIL 9 +#define DOT11_BSSTRANS_REASON_MANY_REPLAYCNT_FAIL 10 +#define DOT11_BSSTRANS_REASON_MANY_DATAMIC_FAIL 11 +#define DOT11_BSSTRANS_REASON_EXCEED_MAX_RETRANS 12 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DISASSOC_RX 13 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DEAUTH_RX 14 +#define DOT11_BSSTRANS_REASON_PREV_TRANSITION_FAIL 15 +#define DOT11_BSSTRANS_REASON_LOW_RSSI 16 +#define DOT11_BSSTRANS_REASON_ROAM_FROM_NON_80211 17 +#define DOT11_BSSTRANS_REASON_RX_BTM_REQ 18 +#define DOT11_BSSTRANS_REASON_PREF_LIST_INCLUDED 19 +#define DOT11_BSSTRANS_REASON_LEAVING_ESS 20 + +/** BSS Management Transition Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_req (7) */ + uint8 token; /* dialog token */ + uint8 reqmode; /* transition request mode */ + uint16 disassoc_tmr; /* disassociation timer */ + uint8 validity_intrvl; /* validity interval */ + uint8 data[1]; /* optional: BSS term duration, ... */ + /* ...session info URL, candidate list */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_req dot11_bsstrans_req_t; +#define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */ + +/* BSS Mgmt Transition Request Mode Field - 802.11v */ +#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01 +#define DOT11_BSSTRANS_REQMODE_ABRIDGED 0x02 +#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT 0x04 +#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL 0x08 +#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT 0x10 + +/** BSS Management transition response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_resp (8) */ + uint8 token; /* dialog token */ + uint8 status; /* transition status */ + uint8 term_delay; /* validity interval */ + uint8 data[1]; /* optional: BSSID target, candidate list */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t; +#define DOT11_BSSTRANS_RESP_LEN 5 /* Fixed length */ + +/* BSS Mgmt Transition Response Status Field */ +#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT 0 +#define DOT11_BSSTRANS_RESP_STATUS_REJECT 1 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN 2 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP 3 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED 4 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ 5 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED 6 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS 7 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS 8 + + +/** BSS Max Idle Period element */ +BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie { + uint8 id; /* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */ + uint8 len; + uint16 max_idle_period; /* in unit of 1000 TUs */ + uint8 idle_opt; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t; +#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN 3 /* bss max idle period IE size */ +#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED 1 /* BSS max idle option */ + +/** TIM Broadcast request element */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie { + uint8 id; /* 94, DOT11_MNG_TIMBC_REQ_ID */ + uint8 len; + uint8 interval; /* in unit of beacon interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t; +#define DOT11_TIMBC_REQ_IE_LEN 1 /* Fixed length */ + +/** TIM Broadcast request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* TIM broadcast request element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_req dot11_timbc_req_t; +#define DOT11_TIMBC_REQ_LEN 3 /* Fixed length */ + +/** TIM Broadcast response element */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie { + uint8 id; /* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */ + uint8 len; + uint8 status; /* status of add request */ + uint8 interval; /* in unit of beacon interval */ + int32 offset; /* in unit of ms */ + uint16 high_rate; /* in unit of 0.5 Mb/s */ + uint16 low_rate; /* in unit of 0.5 Mb/s */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t; +#define DOT11_TIMBC_DENY_RESP_IE_LEN 1 /* Deny. Fixed length */ +#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN 10 /* Accept. Fixed length */ + +#define DOT11_TIMBC_STATUS_ACCEPT 0 +#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP 1 +#define DOT11_TIMBC_STATUS_DENY 2 +#define DOT11_TIMBC_STATUS_OVERRIDDEN 3 +#define DOT11_TIMBC_STATUS_RESERVED 4 + +/** TIM Broadcast request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* TIM broadcast response element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_resp dot11_timbc_resp_t; +#define DOT11_TIMBC_RESP_LEN 3 /* Fixed length */ + +/** TIM element */ +BWL_PRE_PACKED_STRUCT struct dot11_tim_ie { + uint8 id; /* 5, DOT11_MNG_TIM_ID */ + uint8 len; /* 4 - 255 */ + uint8 dtim_count; /* DTIM decrementing counter */ + uint8 dtim_period; /* DTIM period */ + uint8 bitmap_control; /* AID 0 + bitmap offset */ + uint8 pvb[1]; /* Partial Virtual Bitmap, variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tim_ie dot11_tim_ie_t; +#define DOT11_TIM_IE_FIXED_LEN 3 /* Fixed length, without id and len */ +#define DOT11_TIM_IE_FIXED_TOTAL_LEN 5 /* Fixed length, with id and len */ + +/** TIM Broadcast frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc { + uint8 category; /* category of action frame (11) */ + uint8 action; /* action: TIM (0) */ + uint8 check_beacon; /* need to check-beacon */ + uint8 tsf[8]; /* Time Synchronization Function */ + dot11_tim_ie_t tim_ie; /* TIM element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc dot11_timbc_t; +#define DOT11_TIMBC_HDR_LEN (sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t)) +#define DOT11_TIMBC_FIXED_LEN (sizeof(dot11_timbc_t) - 1) /* Fixed length */ +#define DOT11_TIMBC_LEN 11 /* Fixed length */ + +/** TCLAS frame classifier type */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr { + uint8 type; + uint8 mask; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t; +#define DOT11_TCLAS_FC_HDR_LEN 2 /* Fixed length */ + +#define DOT11_TCLAS_MASK_0 0x1 +#define DOT11_TCLAS_MASK_1 0x2 +#define DOT11_TCLAS_MASK_2 0x4 +#define DOT11_TCLAS_MASK_3 0x8 +#define DOT11_TCLAS_MASK_4 0x10 +#define DOT11_TCLAS_MASK_5 0x20 +#define DOT11_TCLAS_MASK_6 0x40 +#define DOT11_TCLAS_MASK_7 0x80 + +#define DOT11_TCLAS_FC_0_ETH 0 +#define DOT11_TCLAS_FC_1_IP 1 +#define DOT11_TCLAS_FC_2_8021Q 2 +#define DOT11_TCLAS_FC_3_OFFSET 3 +#define DOT11_TCLAS_FC_4_IP_HIGHER 4 +#define DOT11_TCLAS_FC_5_8021D 5 + +/** TCLAS frame classifier type 0 parameters for Ethernet */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth { + uint8 type; + uint8 mask; + uint8 sa[ETHER_ADDR_LEN]; + uint8 da[ETHER_ADDR_LEN]; + uint16 eth_type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t; +#define DOT11_TCLAS_FC_0_ETH_LEN 16 + +/** TCLAS frame classifier type 1 parameters for IPV4 */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 { + uint8 type; + uint8 mask; + uint8 version; + uint32 src_ip; + uint32 dst_ip; + uint16 src_port; + uint16 dst_port; + uint8 dscp; + uint8 protocol; + uint8 reserved; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t; +#define DOT11_TCLAS_FC_1_IPV4_LEN 18 + +/** TCLAS frame classifier type 2 parameters for 802.1Q */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q { + uint8 type; + uint8 mask; + uint16 tci; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t; +#define DOT11_TCLAS_FC_2_8021Q_LEN 4 + +/** TCLAS frame classifier type 3 parameters for filter offset */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter { + uint8 type; + uint8 mask; + uint16 offset; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t; +#define DOT11_TCLAS_FC_3_FILTER_LEN 4 + +/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */ +typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t; +#define DOT11_TCLAS_FC_4_IPV4_LEN DOT11_TCLAS_FC_1_IPV4_LEN + +/** TCLAS frame classifier type 4 parameters for IPV6 */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 { + uint8 type; + uint8 mask; + uint8 version; + uint8 saddr[16]; + uint8 daddr[16]; + uint16 src_port; + uint16 dst_port; + uint8 dscp; + uint8 nexthdr; + uint8 flow_lbl[3]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t; +#define DOT11_TCLAS_FC_4_IPV6_LEN 44 + +/** TCLAS frame classifier type 5 parameters for 802.1D */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d { + uint8 type; + uint8 mask; + uint8 pcp; + uint8 cfi; + uint16 vid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t; +#define DOT11_TCLAS_FC_5_8021D_LEN 6 + +/** TCLAS frame classifier type parameters */ +BWL_PRE_PACKED_STRUCT union dot11_tclas_fc { + uint8 data[1]; + dot11_tclas_fc_hdr_t hdr; + dot11_tclas_fc_0_eth_t t0_eth; + dot11_tclas_fc_1_ipv4_t t1_ipv4; + dot11_tclas_fc_2_8021q_t t2_8021q; + dot11_tclas_fc_3_filter_t t3_filter; + dot11_tclas_fc_4_ipv4_t t4_ipv4; + dot11_tclas_fc_4_ipv6_t t4_ipv6; + dot11_tclas_fc_5_8021d_t t5_8021d; +} BWL_POST_PACKED_STRUCT; +typedef union dot11_tclas_fc dot11_tclas_fc_t; + +#define DOT11_TCLAS_FC_MIN_LEN 4 /* Classifier Type 2 has the min size */ +#define DOT11_TCLAS_FC_MAX_LEN 254 + +/** TCLAS element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie { + uint8 id; /* 14, DOT11_MNG_TCLAS_ID */ + uint8 len; + uint8 user_priority; + dot11_tclas_fc_t fc; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_ie dot11_tclas_ie_t; +#define DOT11_TCLAS_IE_LEN 3 /* Fixed length, include id and len */ + +/** TCLAS processing element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie { + uint8 id; /* 44, DOT11_MNG_TCLAS_PROC_ID */ + uint8 len; + uint8 process; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t; +#define DOT11_TCLAS_PROC_IE_LEN 3 /* Fixed length, include id and len */ + +#define DOT11_TCLAS_PROC_MATCHALL 0 /* All high level element need to match */ +#define DOT11_TCLAS_PROC_MATCHONE 1 /* One high level element need to match */ +#define DOT11_TCLAS_PROC_NONMATCH 2 /* Non match to any high level element */ + + +/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */ +#define DOT11_TSPEC_IE_LEN 57 /* Fixed length */ + +/** TFS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie { + uint8 id; /* 91, DOT11_MNG_TFS_REQUEST_ID */ + uint8 len; + uint8 tfs_id; + uint8 actcode; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t; +#define DOT11_TFS_REQ_IE_LEN 2 /* Fixed length, without id and len */ + +/** TFS request action codes (bitfield) */ +#define DOT11_TFS_ACTCODE_DELETE 1 +#define DOT11_TFS_ACTCODE_NOTIFY 2 + +/** TFS request subelement IDs */ +#define DOT11_TFS_REQ_TFS_SE_ID 1 +#define DOT11_TFS_REQ_VENDOR_SE_ID 221 + +/** TFS subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_se { + uint8 sub_id; + uint8 len; + uint8 data[1]; /* TCLAS element(s) + optional TCLAS proc */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_se dot11_tfs_se_t; + + +/** TFS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie { + uint8 id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */ + uint8 len; + uint8 tfs_id; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t; +#define DOT11_TFS_RESP_IE_LEN 1 /* Fixed length, without id and len */ + +/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */ +#define DOT11_TFS_RESP_TFS_STATUS_SE_ID 1 +#define DOT11_TFS_RESP_TFS_SE_ID 2 +#define DOT11_TFS_RESP_VENDOR_SE_ID 221 + +/** TFS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se { + uint8 sub_id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */ + uint8 len; + uint8 resp_st; + uint8 data[1]; /* Potential dot11_tfs_se_t included */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_status_se dot11_tfs_status_se_t; +#define DOT11_TFS_STATUS_SE_LEN 1 /* Fixed length, without id and len */ + +/* Following Definition should be merged to FMS_TFS macro below */ +/* TFS Response status code. Identical to FMS Element status, without N/A */ +#define DOT11_TFS_STATUS_ACCEPT 0 +#define DOT11_TFS_STATUS_DENY_FORMAT 1 +#define DOT11_TFS_STATUS_DENY_RESOURCE 2 +#define DOT11_TFS_STATUS_DENY_POLICY 4 +#define DOT11_TFS_STATUS_DENY_UNSPECIFIED 5 +#define DOT11_TFS_STATUS_ALTPREF_POLICY 7 +#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP 14 + +/* FMS Element Status and TFS Response Status Definition */ +#define DOT11_FMS_TFS_STATUS_ACCEPT 0 +#define DOT11_FMS_TFS_STATUS_DENY_FORMAT 1 +#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE 2 +#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI 3 +#define DOT11_FMS_TFS_STATUS_DENY_POLICY 4 +#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED 5 +#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI 6 +#define DOT11_FMS_TFS_STATUS_ALT_POLICY 7 +#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI 8 +#define DOT11_FMS_TFS_STATUS_ALT_MCRATE 9 +#define DOT11_FMS_TFS_STATUS_TERM_POLICY 10 +#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE 11 +#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO 12 +#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI 13 +#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP 14 + +/** TFS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS request (13) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_req dot11_tfs_req_t; +#define DOT11_TFS_REQ_LEN 3 /* Fixed length */ + +/** TFS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS request (14) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_resp dot11_tfs_resp_t; +#define DOT11_TFS_RESP_LEN 3 /* Fixed length */ + +/** TFS Management Notify frame request header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS notify request (15) */ + uint8 tfs_id_cnt; /* TFS IDs count */ + uint8 tfs_id[1]; /* Array of TFS IDs */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t; +#define DOT11_TFS_NOTIFY_REQ_LEN 3 /* Fixed length */ + +/** TFS Management Notify frame response header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS notify response (28) */ + uint8 tfs_id_cnt; /* TFS IDs count */ + uint8 tfs_id[1]; /* Array of TFS IDs */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t; +#define DOT11_TFS_NOTIFY_RESP_LEN 3 /* Fixed length */ + + +/** WNM-Sleep Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: wnm-sleep request (16) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t; +#define DOT11_WNM_SLEEP_REQ_LEN 3 /* Fixed length */ + +/** WNM-Sleep Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: wnm-sleep request (17) */ + uint8 token; /* dialog token */ + uint16 key_len; /* key data length */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t; +#define DOT11_WNM_SLEEP_RESP_LEN 5 /* Fixed length */ + +#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK 0 +#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK 1 + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk { + uint8 sub_id; + uint8 len; + uint16 key_info; + uint8 key_length; + uint8 rsc[8]; + uint8 key[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t; +#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN 11 /* without sub_id, len, and key */ +#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN 43 /* without sub_id and len */ + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk { + uint8 sub_id; + uint8 len; + uint16 key_id; + uint8 pn[6]; + uint8 key[16]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t; +#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie { + uint8 id; /* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */ + uint8 len; + uint8 act_type; + uint8 resp_status; + uint16 interval; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t; +#define DOT11_WNM_SLEEP_IE_LEN 4 /* Fixed length */ + +#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER 0 +#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT 1 + +#define DOT11_WNM_SLEEP_RESP_ACCEPT 0 +#define DOT11_WNM_SLEEP_RESP_UPDATE 1 +#define DOT11_WNM_SLEEP_RESP_DENY 2 +#define DOT11_WNM_SLEEP_RESP_DENY_TEMP 3 +#define DOT11_WNM_SLEEP_RESP_DENY_KEY 4 +#define DOT11_WNM_SLEEP_RESP_DENY_INUSE 5 +#define DOT11_WNM_SLEEP_RESP_LAST 6 + +/** DMS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: dms request (23) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req dot11_dms_req_t; +#define DOT11_DMS_REQ_LEN 3 /* Fixed length */ + +/** DMS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: dms request (24) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp dot11_dms_resp_t; +#define DOT11_DMS_RESP_LEN 3 /* Fixed length */ + +/** DMS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie { + uint8 id; /* 99, DOT11_MNG_DMS_REQUEST_ID */ + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req_ie dot11_dms_req_ie_t; +#define DOT11_DMS_REQ_IE_LEN 2 /* Fixed length */ + +/** DMS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie { + uint8 id; /* 100, DOT11_MNG_DMS_RESPONSE_ID */ + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t; +#define DOT11_DMS_RESP_IE_LEN 2 /* Fixed length */ + +/** DMS request descriptor */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc { + uint8 dms_id; + uint8 len; + uint8 type; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req_desc dot11_dms_req_desc_t; +#define DOT11_DMS_REQ_DESC_LEN 3 /* Fixed length */ + +#define DOT11_DMS_REQ_TYPE_ADD 0 +#define DOT11_DMS_REQ_TYPE_REMOVE 1 +#define DOT11_DMS_REQ_TYPE_CHANGE 2 + +/** DMS response status */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st { + uint8 dms_id; + uint8 len; + uint8 type; + uint16 lsc; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp_st dot11_dms_resp_st_t; +#define DOT11_DMS_RESP_STATUS_LEN 5 /* Fixed length */ + +#define DOT11_DMS_RESP_TYPE_ACCEPT 0 +#define DOT11_DMS_RESP_TYPE_DENY 1 +#define DOT11_DMS_RESP_TYPE_TERM 2 + +#define DOT11_DMS_RESP_LSC_UNSUPPORTED 0xFFFF + +/** FMS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: fms request (9) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_req dot11_fms_req_t; +#define DOT11_FMS_REQ_LEN 3 /* Fixed length */ + +/** FMS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: fms request (10) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_resp dot11_fms_resp_t; +#define DOT11_FMS_RESP_LEN 3 /* Fixed length */ + +/** FMS Descriptor element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_desc { + uint8 id; + uint8 len; + uint8 num_fms_cnt; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_desc dot11_fms_desc_t; +#define DOT11_FMS_DESC_LEN 1 /* Fixed length */ + +#define DOT11_FMS_CNTR_MAX 0x8 +#define DOT11_FMS_CNTR_ID_MASK 0x7 +#define DOT11_FMS_CNTR_ID_SHIFT 0x0 +#define DOT11_FMS_CNTR_COUNT_MASK 0xf1 +#define DOT11_FMS_CNTR_SHIFT 0x3 + +/** FMS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie { + uint8 id; + uint8 len; + uint8 fms_token; /* token used to identify fms stream set */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_req_ie dot11_fms_req_ie_t; +#define DOT11_FMS_REQ_IE_FIX_LEN 1 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field { + uint8 mask; + uint8 mcs_idx; + uint16 rate; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rate_id_field dot11_rate_id_field_t; +#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK 0x7 +#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET 0 +#define DOT11_RATE_ID_FIELD_RATETYPE_MASK 0x18 +#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET 3 +#define DOT11_RATE_ID_FIELD_LEN sizeof(dot11_rate_id_field_t) + +/** FMS request subelements */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_se { + uint8 sub_id; + uint8 len; + uint8 interval; + uint8 max_interval; + dot11_rate_id_field_t rate; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_se dot11_fms_se_t; +#define DOT11_FMS_REQ_SE_LEN 6 /* Fixed length */ + +#define DOT11_FMS_REQ_SE_ID_FMS 1 /* FMS subelement */ +#define DOT11_FMS_REQ_SE_ID_VS 221 /* Vendor Specific subelement */ + +/** FMS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie { + uint8 id; + uint8 len; + uint8 fms_token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t; +#define DOT11_FMS_RESP_IE_FIX_LEN 1 /* Fixed length */ + +/* FMS status subelements */ +#define DOT11_FMS_STATUS_SE_ID_FMS 1 /* FMS Status */ +#define DOT11_FMS_STATUS_SE_ID_TCLAS 2 /* TCLAS Status */ +#define DOT11_FMS_STATUS_SE_ID_VS 221 /* Vendor Specific subelement */ + +/** FMS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se { + uint8 sub_id; + uint8 len; + uint8 status; + uint8 interval; + uint8 max_interval; + uint8 fmsid; + uint8 counter; + dot11_rate_id_field_t rate; + uint8 mcast_addr[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_status_se dot11_fms_status_se_t; +#define DOT11_FMS_STATUS_SE_LEN 15 /* Fixed length */ + +/** TCLAS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se { + uint8 sub_id; + uint8 len; + uint8 fmsid; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_status_se dot11_tclas_status_se_t; +#define DOT11_TCLAS_STATUS_SE_LEN 1 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_addba_req { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba req */ + uint8 token; /* identifier */ + uint16 addba_param_set; /* parameter set */ + uint16 timeout; /* timeout in seconds */ + uint16 start_seqnum; /* starting sequence number */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_req dot11_addba_req_t; +#define DOT11_ADDBA_REQ_LEN 9 /* length of addba req frame */ + +BWL_PRE_PACKED_STRUCT struct dot11_addba_resp { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba resp */ + uint8 token; /* identifier */ + uint16 status; /* status of add request */ + uint16 addba_param_set; /* negotiated parameter set */ + uint16 timeout; /* negotiated timeout in seconds */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_resp dot11_addba_resp_t; +#define DOT11_ADDBA_RESP_LEN 9 /* length of addba resp frame */ + +/* DELBA action parameters */ +#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 /* initiator mask */ +#define DOT11_DELBA_PARAM_INIT_SHIFT 11 /* initiator shift */ +#define DOT11_DELBA_PARAM_TID_MASK 0xf000 /* tid mask */ +#define DOT11_DELBA_PARAM_TID_SHIFT 12 /* tid shift */ + +BWL_PRE_PACKED_STRUCT struct dot11_delba { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba req */ + uint16 delba_param_set; /* paarmeter set */ + uint16 reason; /* reason for dellba */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_delba dot11_delba_t; +#define DOT11_DELBA_LEN 6 /* length of delba frame */ + +/* SA Query action field value */ +#define SA_QUERY_REQUEST 0 +#define SA_QUERY_RESPONSE 1 + +/* ************* 802.11r related definitions. ************* */ + +/** Over-the-DS Fast Transition Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_req { + uint8 category; /* category of action frame (6) */ + uint8 action; /* action: ft req */ + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_req dot11_ft_req_t; +#define DOT11_FT_REQ_FIXED_LEN 14 + +/** Over-the-DS Fast Transition Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_res { + uint8 category; /* category of action frame (6) */ + uint8 action; /* action: ft resp */ + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint16 status; /* status code */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_res dot11_ft_res_t; +#define DOT11_FT_RES_FIXED_LEN 16 + +/** RDE RIC Data Element. */ +BWL_PRE_PACKED_STRUCT struct dot11_rde_ie { + uint8 id; /* 11r, DOT11_MNG_RDE_ID */ + uint8 length; + uint8 rde_id; /* RDE identifier. */ + uint8 rd_count; /* Resource Descriptor Count. */ + uint16 status; /* Status Code. */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rde_ie dot11_rde_ie_t; + +/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */ +#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t) + + +/* ************* 802.11k related definitions. ************* */ + +/* Radio measurements enabled capability ie */ +#define DOT11_RRM_CAP_LEN 5 /* length of rrm cap bitmap */ +#define RCPI_IE_LEN 1 +#define RSNI_IE_LEN 1 +BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie { + uint8 cap[DOT11_RRM_CAP_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; + +/* Bitmap definitions for cap ie */ +#define DOT11_RRM_CAP_LINK 0 +#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1 +#define DOT11_RRM_CAP_PARALLEL 2 +#define DOT11_RRM_CAP_REPEATED 3 +#define DOT11_RRM_CAP_BCN_PASSIVE 4 +#define DOT11_RRM_CAP_BCN_ACTIVE 5 +#define DOT11_RRM_CAP_BCN_TABLE 6 +#define DOT11_RRM_CAP_BCN_REP_COND 7 +#define DOT11_RRM_CAP_FM 8 +#define DOT11_RRM_CAP_CLM 9 +#define DOT11_RRM_CAP_NHM 10 +#define DOT11_RRM_CAP_SM 11 +#define DOT11_RRM_CAP_LCIM 12 +#define DOT11_RRM_CAP_LCIA 13 +#define DOT11_RRM_CAP_TSCM 14 +#define DOT11_RRM_CAP_TTSCM 15 +#define DOT11_RRM_CAP_AP_CHANREP 16 +#define DOT11_RRM_CAP_RMMIB 17 +/* bit18-bit26, not used for RRM_IOVAR */ +#define DOT11_RRM_CAP_MPTI 27 +#define DOT11_RRM_CAP_NBRTSFO 28 +#define DOT11_RRM_CAP_RCPI 29 +#define DOT11_RRM_CAP_RSNI 30 +#define DOT11_RRM_CAP_BSSAAD 31 +#define DOT11_RRM_CAP_BSSAAC 32 +#define DOT11_RRM_CAP_AI 33 +#define DOT11_RRM_CAP_FTM_RANGE 34 +#define DOT11_RRM_CAP_CIVIC_LOC 35 +#define DOT11_RRM_CAP_LAST 35 + +/* Operating Class (formerly "Regulatory Class") definitions */ +#define DOT11_OP_CLASS_NONE 255 + +BWL_PRE_PACKED_STRUCT struct do11_ap_chrep { + uint8 id; + uint8 len; + uint8 reg; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct do11_ap_chrep dot11_ap_chrep_t; + +/* Radio Measurements action ids */ +#define DOT11_RM_ACTION_RM_REQ 0 /* Radio measurement request */ +#define DOT11_RM_ACTION_RM_REP 1 /* Radio measurement report */ +#define DOT11_RM_ACTION_LM_REQ 2 /* Link measurement request */ +#define DOT11_RM_ACTION_LM_REP 3 /* Link measurement report */ +#define DOT11_RM_ACTION_NR_REQ 4 /* Neighbor report request */ +#define DOT11_RM_ACTION_NR_REP 5 /* Neighbor report response */ + +/** Generic radio measurement action frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_rm_action { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_action dot11_rm_action_t; +#define DOT11_RM_ACTION_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint16 reps; /* no. of repetitions */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq dot11_rmreq_t; +#define DOT11_RMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_rm_ie { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_ie dot11_rm_ie_t; +#define DOT11_RM_IE_LEN 5 + +/* Definitions for "mode" bits in rm req */ +#define DOT11_RMREQ_MODE_PARALLEL 1 +#define DOT11_RMREQ_MODE_ENABLE 2 +#define DOT11_RMREQ_MODE_REQUEST 4 +#define DOT11_RMREQ_MODE_REPORT 8 +#define DOT11_RMREQ_MODE_DURMAND 0x10 /* Duration Mandatory */ + +/* Definitions for "mode" bits in rm rep */ +#define DOT11_RMREP_MODE_LATE 1 +#define DOT11_RMREP_MODE_INCAPABLE 2 +#define DOT11_RMREP_MODE_REFUSED 4 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 bcn_mode; + struct ether_addr bssid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t; +#define DOT11_RMREQ_BCN_LEN 18 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 frame_info; + uint8 rcpi; + uint8 rsni; + struct ether_addr bssid; + uint8 antenna_id; + uint32 parent_tsf; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t; +#define DOT11_RMREP_BCN_LEN 26 + +/* Beacon request measurement mode */ +#define DOT11_RMREQ_BCN_PASSIVE 0 +#define DOT11_RMREQ_BCN_ACTIVE 1 +#define DOT11_RMREQ_BCN_TABLE 2 + +/* Sub-element IDs for Beacon Request */ +#define DOT11_RMREQ_BCN_SSID_ID 0 +#define DOT11_RMREQ_BCN_REPINFO_ID 1 +#define DOT11_RMREQ_BCN_REPDET_ID 2 +#define DOT11_RMREQ_BCN_REQUEST_ID 10 +#define DOT11_RMREQ_BCN_APCHREP_ID DOT11_MNG_AP_CHREP_ID + +/* Reporting Detail element definition */ +#define DOT11_RMREQ_BCN_REPDET_FIXED 0 /* Fixed length fields only */ +#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */ +#define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */ + +/* Reporting Information (reporting condition) element definition */ +#define DOT11_RMREQ_BCN_REPINFO_LEN 2 /* Beacon Reporting Information length */ +#define DOT11_RMREQ_BCN_REPCOND_DEFAULT 0 /* Report to be issued after each measurement */ + +/* Sub-element IDs for Beacon Report */ +#define DOT11_RMREP_BCN_FRM_BODY 1 +#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */ + +/* Sub-element IDs for Frame Report */ +#define DOT11_RMREP_FRAME_COUNT_REPORT 1 + +/* Statistics Group Report: Group IDs */ +#define DOT11_RRM_STATS_GRP_ID_0 0 + +/* Statistics Group Report: Group Data length */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28 + +/* Channel load request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t; +#define DOT11_RMREQ_CHANLOAD_LEN 11 + +/** Channel load report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 channel_load; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t; +#define DOT11_RMREP_CHANLOAD_LEN 13 + +/** Noise histogram request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_noise dot11_rmreq_noise_t; +#define DOT11_RMREQ_NOISE_LEN 11 + +/** Noise histogram report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 antid; + uint8 anpi; + uint8 ipi0_dens; + uint8 ipi1_dens; + uint8 ipi2_dens; + uint8 ipi3_dens; + uint8 ipi4_dens; + uint8 ipi5_dens; + uint8 ipi6_dens; + uint8 ipi7_dens; + uint8 ipi8_dens; + uint8 ipi9_dens; + uint8 ipi10_dens; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_noise dot11_rmrep_noise_t; +#define DOT11_RMREP_NOISE_LEN 25 + +/** Frame request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 req_type; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_frame dot11_rmreq_frame_t; +#define DOT11_RMREQ_FRAME_LEN 18 + +/** Frame report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_frame dot11_rmrep_frame_t; +#define DOT11_RMREP_FRAME_LEN 12 + +/** Frame report entry */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry { + struct ether_addr ta; + struct ether_addr bssid; + uint8 phy_type; + uint8 avg_rcpi; + uint8 last_rsni; + uint8 last_rcpi; + uint8 ant_id; + uint16 frame_cnt; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t; +#define DOT11_RMREP_FRMENTRY_LEN 19 + +/** STA statistics request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + struct ether_addr peer; + uint16 interval; + uint16 duration; + uint8 group_id; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_stat dot11_rmreq_stat_t; +#define DOT11_RMREQ_STAT_LEN 16 + +/** STA statistics report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat { + uint16 duration; + uint8 group_id; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_stat dot11_rmrep_stat_t; + +/** Transmit stream/category measurement request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 interval; + uint16 duration; + struct ether_addr peer; + uint8 traffic_id; + uint8 bin0_range; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t; + +/** Transmit stream/category measurement report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream { + uint32 starttime[2]; + uint16 duration; + struct ether_addr peer; + uint8 traffic_id; + uint8 reason; + uint32 txmsdu_cnt; + uint32 msdu_discarded_cnt; + uint32 msdufailed_cnt; + uint32 msduretry_cnt; + uint32 cfpolls_lost_cnt; + uint32 avrqueue_delay; + uint32 avrtx_delay; + uint8 bin0_range; + uint32 bin0; + uint32 bin1; + uint32 bin2; + uint32 bin3; + uint32 bin4; + uint32 bin5; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t; + +enum { + DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, /* Where am I? */ + DOT11_FTM_LOCATION_SUBJ_REMOTE = 1, /* Where are you? */ + DOT11_FTM_LOCATION_SUBJ_THIRDPARTY = 2 /* Where is he/she? */ +}; + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + + /* Following 3 fields are unused. Keep for ROM compatibility. */ + uint8 lat_res; + uint8 lon_res; + uint8 alt_res; + + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t; + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 lci_sub_id; + uint8 lci_sub_len; + /* optional LCI field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_lci dot11_rmrep_ftm_lci_t; + +#define DOT11_FTM_LCI_SUBELEM_ID 0 +#define DOT11_FTM_LCI_SUBELEM_LEN 2 +#define DOT11_FTM_LCI_FIELD_LEN 16 +#define DOT11_FTM_LCI_UNKNOWN_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + uint8 civloc_type; + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t; + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 civloc_type; + uint8 civloc_sub_id; + uint8 civloc_sub_len; + /* optional location civic field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t; + +#define DOT11_FTM_CIVIC_LOC_TYPE_RFC4776 0 +#define DOT11_FTM_CIVIC_SUBELEM_ID 0 +#define DOT11_FTM_CIVIC_SUBELEM_LEN 2 +#define DOT11_FTM_CIVIC_LOC_SI_NONE 0 +#define DOT11_FTM_CIVIC_TYPE_LEN 1 +#define DOT11_FTM_CIVIC_UNKNOWN_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel { + uint8 id; + uint8 len; + uint16 max_age; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_subel dot11_ftm_range_subel_t; +#define DOT11_FTM_RANGE_SUBELEM_ID 4 +#define DOT11_FTM_RANGE_SUBELEM_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + /* neighbor report sub-elements */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t; +#define DOT11_RMREQ_FTM_RANGE_LEN 8 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint16 range; + uint16 max_err; + uint8 rsvd; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t; +#define DOT11_FTM_RANGE_ENTRY_MAX_COUNT 15 + +enum { + DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 3, + DOT11_FTM_RANGE_ERROR_AP_FAILED = 4, + DOT11_FTM_RANGE_ERROR_TX_FAILED = 8, + DOT11_FTM_RANGE_ERROR_MAX +}; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_error_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 code; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_error_entry dot11_ftm_range_error_entry_t; +#define DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT 11 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 entry_count; + uint8 data[2]; /* includes pad */ + /* + dot11_ftm_range_entry_t entries[entry_count]; + uint8 error_count; + dot11_ftm_error_entry_t errors[error_count]; + */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_range dot11_rmrep_ftm_range_t; + +#define DOT11_FTM_RANGE_REP_MIN_LEN 6 /* No extra byte for error_count */ +#define DOT11_FTM_RANGE_ENTRY_CNT_MAX 15 +#define DOT11_FTM_RANGE_ERROR_CNT_MAX 11 +#define DOT11_FTM_RANGE_REP_FIXED_LEN 1 /* No extra byte for error_count */ +/** Measurement pause request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 pause_time; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t; + + +/* Neighbor Report subelements ID (11k & 11v) */ +#define DOT11_NGBR_TSF_INFO_SE_ID 1 +#define DOT11_NGBR_CCS_SE_ID 2 +#define DOT11_NGBR_BSSTRANS_PREF_SE_ID 3 +#define DOT11_NGBR_BSS_TERM_DUR_SE_ID 4 +#define DOT11_NGBR_BEARING_SE_ID 5 + +/** Neighbor Report, BSS Transition Candidate Preference subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se { + uint8 sub_id; + uint8 len; + uint8 preference; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t; +#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN 1 + +/** Neighbor Report, BSS Termination Duration subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se { + uint8 sub_id; + uint8 len; + uint8 tsf[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t; +#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN 10 + +/* Neighbor Report BSSID Information Field */ +#define DOT11_NGBR_BI_REACHABILTY_UNKN 0x0002 +#define DOT11_NGBR_BI_REACHABILTY 0x0003 +#define DOT11_NGBR_BI_SEC 0x0004 +#define DOT11_NGBR_BI_KEY_SCOPE 0x0008 +#define DOT11_NGBR_BI_CAP 0x03f0 +#define DOT11_NGBR_BI_CAP_SPEC_MGMT 0x0010 +#define DOT11_NGBR_BI_CAP_QOS 0x0020 +#define DOT11_NGBR_BI_CAP_APSD 0x0040 +#define DOT11_NGBR_BI_CAP_RDIO_MSMT 0x0080 +#define DOT11_NGBR_BI_CAP_DEL_BA 0x0100 +#define DOT11_NGBR_BI_CAP_IMM_BA 0x0200 +#define DOT11_NGBR_BI_MOBILITY 0x0400 +#define DOT11_NGBR_BI_HT 0x0800 +#define DOT11_NGBR_BI_VHT 0x1000 +#define DOT11_NGBR_BI_FTM 0x2000 + +/** Neighbor Report element (11k & 11v) */ +BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + uint32 bssid_info; + uint8 reg; /* Operating class */ + uint8 channel; + uint8 phytype; + uint8 data[1]; /* Variable size subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t; +#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN 13 + + +/* MLME Enumerations */ +#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */ +#define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */ +#define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */ +#define DOT11_BSSTYPE_MESH 3 /* d11 Mesh */ +#define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */ +#define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */ + +/** Link Measurement */ +BWL_PRE_PACKED_STRUCT struct dot11_lmreq { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint8 txpwr; /* Transmit Power Used */ + uint8 maxtxpwr; /* Max Transmit Power */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmreq dot11_lmreq_t; +#define DOT11_LMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_lmrep { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + dot11_tpc_rep_t tpc; /* TPC element */ + uint8 rxant; /* Receive Antenna ID */ + uint8 txant; /* Transmit Antenna ID */ + uint8 rcpi; /* RCPI */ + uint8 rsni; /* RSNI */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmrep dot11_lmrep_t; +#define DOT11_LMREP_LEN 11 + +/* 802.11 BRCM "Compromise" Pre N constants */ +#define PREN_PREAMBLE 24 /* green field preamble time */ +#define PREN_MM_EXT 12 /* extra mixed mode preamble time */ +#define PREN_PREAMBLE_EXT 4 /* extra preamble (multiply by unique_streams-1) */ + +/* 802.11N PHY constants */ +#define RIFS_11N_TIME 2 /* NPHY RIFS time */ + +/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3 + * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2 + */ +/* HT-SIG1 */ +#define HT_SIG1_MCS_MASK 0x00007F +#define HT_SIG1_CBW 0x000080 +#define HT_SIG1_HT_LENGTH 0xFFFF00 + +/* HT-SIG2 */ +#define HT_SIG2_SMOOTHING 0x000001 +#define HT_SIG2_NOT_SOUNDING 0x000002 +#define HT_SIG2_RESERVED 0x000004 +#define HT_SIG2_AGGREGATION 0x000008 +#define HT_SIG2_STBC_MASK 0x000030 +#define HT_SIG2_STBC_SHIFT 4 +#define HT_SIG2_FEC_CODING 0x000040 +#define HT_SIG2_SHORT_GI 0x000080 +#define HT_SIG2_ESS_MASK 0x000300 +#define HT_SIG2_ESS_SHIFT 8 +#define HT_SIG2_CRC 0x03FC00 +#define HT_SIG2_TAIL 0x1C0000 + +/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */ +#define HT_T_LEG_PREAMBLE 16 +#define HT_T_L_SIG 4 +#define HT_T_SIG 8 +#define HT_T_LTF1 4 +#define HT_T_GF_LTF1 8 +#define HT_T_LTFs 4 +#define HT_T_STF 4 +#define HT_T_GF_STF 8 +#define HT_T_SYML 4 + +#define HT_N_SERVICE 16 /* bits in SERVICE field */ +#define HT_N_TAIL 6 /* tail bits per BCC encoder */ + +/* 802.11 A PHY constants */ +#define APHY_SLOT_TIME 9 /* APHY slot time */ +#define APHY_SIFS_TIME 16 /* APHY SIFS time */ +#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) /* APHY DIFS time */ +#define APHY_PREAMBLE_TIME 16 /* APHY preamble time */ +#define APHY_SIGNAL_TIME 4 /* APHY signal time */ +#define APHY_SYMBOL_TIME 4 /* APHY symbol time */ +#define APHY_SERVICE_NBITS 16 /* APHY service nbits */ +#define APHY_TAIL_NBITS 6 /* APHY tail nbits */ +#define APHY_CWMIN 15 /* APHY cwmin */ +#define APHY_PHYHDR_DUR 20 /* APHY PHY Header Duration */ + +/* 802.11 B PHY constants */ +#define BPHY_SLOT_TIME 20 /* BPHY slot time */ +#define BPHY_SIFS_TIME 10 /* BPHY SIFS time */ +#define BPHY_DIFS_TIME 50 /* BPHY DIFS time */ +#define BPHY_PLCP_TIME 192 /* BPHY PLCP time */ +#define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */ +#define BPHY_CWMIN 31 /* BPHY cwmin */ +#define BPHY_SHORT_PHYHDR_DUR 96 /* BPHY Short PHY Header Duration */ +#define BPHY_LONG_PHYHDR_DUR 192 /* BPHY Long PHY Header Duration */ + +/* 802.11 G constants */ +#define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */ + +#define PHY_CWMAX 1023 /* PHY cwmax */ + +#define DOT11_MAXNUMFRAGS 16 /* max # fragments per MSDU */ + +/* 802.11 VHT constants */ + +typedef int vht_group_id_t; + +/* for VHT-A1 */ +/* SIG-A1 reserved bits */ +#define VHT_SIGA1_CONST_MASK 0x800004 + +#define VHT_SIGA1_BW_MASK 0x000003 +#define VHT_SIGA1_20MHZ_VAL 0x000000 +#define VHT_SIGA1_40MHZ_VAL 0x000001 +#define VHT_SIGA1_80MHZ_VAL 0x000002 +#define VHT_SIGA1_160MHZ_VAL 0x000003 + +#define VHT_SIGA1_STBC 0x000008 + +#define VHT_SIGA1_GID_MASK 0x0003f0 +#define VHT_SIGA1_GID_SHIFT 4 +#define VHT_SIGA1_GID_TO_AP 0x00 +#define VHT_SIGA1_GID_NOT_TO_AP 0x3f +#define VHT_SIGA1_GID_MAX_GID 0x3f + +#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00 +#define VHT_SIGA1_NSTS_SHIFT 10 +#define VHT_SIGA1_MAX_USERPOS 3 + +#define VHT_SIGA1_PARTIAL_AID_MASK 0x3fe000 +#define VHT_SIGA1_PARTIAL_AID_SHIFT 13 + +#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED 0x400000 + +/* for VHT-A2 */ +#define VHT_SIGA2_GI_NONE 0x000000 +#define VHT_SIGA2_GI_SHORT 0x000001 +#define VHT_SIGA2_GI_W_MOD10 0x000002 +#define VHT_SIGA2_CODING_LDPC 0x000004 +#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM 0x000008 +#define VHT_SIGA2_BEAMFORM_ENABLE 0x000100 +#define VHT_SIGA2_MCS_SHIFT 4 + +#define VHT_SIGA2_B9_RESERVED 0x000200 +#define VHT_SIGA2_TAIL_MASK 0xfc0000 +#define VHT_SIGA2_TAIL_VALUE 0x000000 + +/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */ +#define VHT_T_LEG_PREAMBLE 16 +#define VHT_T_L_SIG 4 +#define VHT_T_SIG_A 8 +#define VHT_T_LTF 4 +#define VHT_T_STF 4 +#define VHT_T_SIG_B 4 +#define VHT_T_SYML 4 + +#define VHT_N_SERVICE 16 /* bits in SERVICE field */ +#define VHT_N_TAIL 6 /* tail bits per BCC encoder */ + + +/** dot11Counters Table - 802.11 spec., Annex D */ +typedef struct d11cnt { + uint32 txfrag; /* dot11TransmittedFragmentCount */ + uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ + uint32 txfail; /* dot11FailedCount */ + uint32 txretry; /* dot11RetryCount */ + uint32 txretrie; /* dot11MultipleRetryCount */ + uint32 rxdup; /* dot11FrameduplicateCount */ + uint32 txrts; /* dot11RTSSuccessCount */ + uint32 txnocts; /* dot11RTSFailureCount */ + uint32 txnoack; /* dot11ACKFailureCount */ + uint32 rxfrag; /* dot11ReceivedFragmentCount */ + uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /* dot11FCSErrorCount */ + uint32 txfrmsnt; /* dot11TransmittedFrameCount */ + uint32 rxundec; /* dot11WEPUndecryptableCount */ +} d11cnt_t; + +#define BRCM_PROP_OUI "\x00\x90\x4C" + + +/* Action frame type for FTM Initiator Report */ +#define BRCM_FTM_VS_AF_TYPE 14 +enum { + BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */ + BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */ +}; + +/* Action frame type for RWL */ +#define RWL_WIFI_DEFAULT 0 +#define RWL_WIFI_FIND_MY_PEER 9 /* Used while finding server */ +#define RWL_WIFI_FOUND_PEER 10 /* Server response to the client */ +#define RWL_ACTION_WIFI_FRAG_TYPE 85 /* Fragment indicator for receiver */ + +#define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */ +#define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */ + + + +/* brcm syscap_ie cap */ +#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */ + +#define BRCM_OUI "\x00\x10\x18" /* Broadcom OUI */ + +/** BRCM info element */ +BWL_PRE_PACKED_STRUCT struct brcm_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 ver; /* type/ver of this IE */ + uint8 assoc; /* # of assoc STAs */ + uint8 flags; /* misc flags */ + uint8 flags1; /* misc flags */ + uint16 amsdu_mtu_pref; /* preferred A-MSDU MTU */ +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_ie brcm_ie_t; +#define BRCM_IE_LEN 11 /* BRCM IE length */ +#define BRCM_IE_VER 2 /* BRCM IE version */ +#define BRCM_IE_LEGACY_AES_VER 1 /* BRCM IE legacy AES version */ + +/* brcm_ie flags */ +#define BRF_ABCAP 0x1 /* afterburner is obsolete, defined for backward compat */ +#define BRF_ABRQRD 0x2 /* afterburner is obsolete, defined for backward compat */ +#define BRF_LZWDS 0x4 /* lazy wds enabled */ +#define BRF_BLOCKACK 0x8 /* BlockACK capable */ +#define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */ +#define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */ +#define BRF_MEDIA_CLIENT 0x20 /* re-use afterburner bit to indicate media client device */ + +#define GET_BRF_PROP_11N_MCS(brcm_ie) \ + (!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS)) + +/* brcm_ie flags1 */ +#define BRF1_AMSDU 0x1 /* A-MSDU capable */ +#define BRF1_WNM 0x2 /* WNM capable */ +#define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */ +#define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */ +#define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */ +#define BRF1_RFAWARE_DCS 0x20 /* RFAWARE dynamic channel selection (DCS) */ +#define BRF1_SOFTAP 0x40 /* Configure as Broadcom SOFTAP */ +#define BRF1_DWDS 0x80 /* DWDS capable */ + +/** Vendor IE structure */ +BWL_PRE_PACKED_STRUCT struct vndr_ie { + uchar id; + uchar len; + uchar oui [3]; + uchar data [1]; /* Variable size data */ +} BWL_POST_PACKED_STRUCT; +typedef struct vndr_ie vndr_ie_t; + +#define VNDR_IE_HDR_LEN 2 /* id + len field */ +#define VNDR_IE_MIN_LEN 3 /* size of the oui field */ +#define VNDR_IE_FIXED_LEN (VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN) + +#define VNDR_IE_MAX_LEN 255 /* vendor IE max length, without ID and len */ + +/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */ +BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie { + uchar id; + uchar len; + uchar oui[3]; + uint8 type; /* type indicates what follows */ + struct ether_addr ea; /* Device Primary MAC Adrress */ +} BWL_POST_PACKED_STRUCT; +typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t; + +#define MEMBER_OF_BRCM_PROP_IE_LEN 10 /* IE max length */ +#define MEMBER_OF_BRCM_PROP_IE_HDRLEN (sizeof(member_of_brcm_prop_ie_t)) +#define MEMBER_OF_BRCM_PROP_IE_TYPE 54 + +/** BRCM Reliable Multicast IE */ +BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + struct ether_addr ea; /* The ack sender's MAC Adrress */ + struct ether_addr mcast_ea; /* The multicast MAC address */ + uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */ +} BWL_POST_PACKED_STRUCT; +typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t; + +/* IE length */ +/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */ +#define RELMCAST_BRCM_PROP_IE_LEN (sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8))) + +#define RELMCAST_BRCM_PROP_IE_TYPE 55 + +/* BRCM BTC IE */ +BWL_PRE_PACKED_STRUCT struct btc_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type inidicates what follows */ + uint32 info; +} BWL_POST_PACKED_STRUCT; +typedef struct btc_brcm_prop_ie btc_brcm_prop_ie_t; + +#define BTC_INFO_BRCM_PROP_IE_TYPE 90 +#define BRCM_BTC_INFO_TYPE_LEN (sizeof(btc_brcm_prop_ie_t) - (2 * sizeof(uint8))) + +/* ************* HT definitions. ************* */ +#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */ +#define MAX_MCS_NUM (128) /* max mcs number = 128 */ + +BWL_PRE_PACKED_STRUCT struct ht_cap_ie { + uint16 cap; + uint8 params; + uint8 supp_mcs[MCSSET_LEN]; + uint16 ext_htcap; + uint32 txbf_cap; + uint8 as_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_cap_ie ht_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie { + uint8 id; + uint8 len; + ht_cap_ie_t ht_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t; + +/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ +/* the capability IE is primarily used to convey this nodes abilities */ +BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + ht_cap_ie_t cap_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_cap_ie ht_prop_cap_ie_t; + +#define HT_PROP_IE_OVERHEAD 4 /* overhead bytes for prop oui ie */ +#define HT_CAP_IE_LEN 26 /* HT capability len (based on .11n d2.0) */ +#define HT_CAP_IE_TYPE 51 + +#define HT_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */ +#define HT_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */ +#define HT_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */ +#define HT_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */ +#define HT_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */ +#define HT_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */ +#define HT_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */ +#define HT_CAP_GF 0x0010 /* Greenfield preamble support */ +#define HT_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */ +#define HT_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */ +#define HT_CAP_TX_STBC 0x0080 /* Tx STBC support */ +#define HT_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */ +#define HT_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */ +#define HT_CAP_DELAYED_BA 0x0400 /* delayed BA support */ +#define HT_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */ + +#define HT_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */ +#define HT_CAP_PSMP 0x2000 /* Power Save Multi Poll support */ +#define HT_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */ +#define HT_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */ + +#define HT_CAP_RX_STBC_NO 0x0 /* no rx STBC support */ +#define HT_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */ +#define HT_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */ +#define HT_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */ + + +#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX 0x1 +#define HT_CAP_TXBF_CAP_NDP_RX 0x8 +#define HT_CAP_TXBF_CAP_NDP_TX 0x10 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI 0x100 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING 0x200 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING 0x400 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK 0x1800 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT 11 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK 0x6000 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT 13 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK 0x18000 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT 15 +#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT 19 +#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT 21 +#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT 23 +#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK 0x1800000 + +#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT 27 +#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK 0x18000000 + +#define HT_CAP_TXBF_FB_TYPE_NONE 0 +#define HT_CAP_TXBF_FB_TYPE_DELAYED 1 +#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 2 +#define HT_CAP_TXBF_FB_TYPE_BOTH 3 + +#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK 0x400 +#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT 10 +#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000 +#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15 + +#define VHT_MAX_MPDU 11454 /* max mpdu size for now (bytes) */ +#define VHT_MPDU_MSDU_DELTA 56 /* Difference in spec - vht mpdu, amsdu len */ +/* Max AMSDU len - per spec */ +#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA) + +#define HT_MAX_AMSDU 7935 /* max amsdu size (bytes) per the HT spec */ +#define HT_MIN_AMSDU 3835 /* min amsdu size (bytes) per the HT spec */ + +#define HT_PARAMS_RX_FACTOR_MASK 0x03 /* ampdu rcv factor mask */ +#define HT_PARAMS_DENSITY_MASK 0x1C /* ampdu density mask */ +#define HT_PARAMS_DENSITY_SHIFT 2 /* ampdu density shift */ + +/* HT/AMPDU specific define */ +#define AMPDU_MAX_MPDU_DENSITY 7 /* max mpdu density; in 1/4 usec units */ +#define AMPDU_DENSITY_NONE 0 /* No density requirement */ +#define AMPDU_DENSITY_1over4_US 1 /* 1/4 us density */ +#define AMPDU_DENSITY_1over2_US 2 /* 1/2 us density */ +#define AMPDU_DENSITY_1_US 3 /* 1 us density */ +#define AMPDU_DENSITY_2_US 4 /* 2 us density */ +#define AMPDU_DENSITY_4_US 5 /* 4 us density */ +#define AMPDU_DENSITY_8_US 6 /* 8 us density */ +#define AMPDU_DENSITY_16_US 7 /* 16 us density */ +#define AMPDU_RX_FACTOR_8K 0 /* max rcv ampdu len (8kb) */ +#define AMPDU_RX_FACTOR_16K 1 /* max rcv ampdu len (16kb) */ +#define AMPDU_RX_FACTOR_32K 2 /* max rcv ampdu len (32kb) */ +#define AMPDU_RX_FACTOR_64K 3 /* max rcv ampdu len (64kb) */ + +/* AMPDU RX factors for VHT rates */ +#define AMPDU_RX_FACTOR_128K 4 /* max rcv ampdu len (128kb) */ +#define AMPDU_RX_FACTOR_256K 5 /* max rcv ampdu len (256kb) */ +#define AMPDU_RX_FACTOR_512K 6 /* max rcv ampdu len (512kb) */ +#define AMPDU_RX_FACTOR_1024K 7 /* max rcv ampdu len (1024kb) */ + +#define AMPDU_RX_FACTOR_BASE 8*1024 /* ampdu factor base for rx len */ +#define AMPDU_RX_FACTOR_BASE_PWR 13 /* ampdu factor base for rx len in power of 2 */ + +#define AMPDU_DELIMITER_LEN 4 /* length of ampdu delimiter */ +#define AMPDU_DELIMITER_LEN_MAX 63 /* max length of ampdu delimiter(enforced in HW) */ + +#define HT_CAP_EXT_PCO 0x0001 +#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006 +#define HT_CAP_EXT_PCO_TTIME_SHIFT 1 +#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300 +#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8 +#define HT_CAP_EXT_HTC 0x0400 +#define HT_CAP_EXT_RD_RESP 0x0800 + +/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */ +BWL_PRE_PACKED_STRUCT struct ht_add_ie { + uint8 ctl_ch; /* control channel number */ + uint8 byte1; /* ext ch,rec. ch. width, RIFS support */ + uint16 opmode; /* operation mode */ + uint16 misc_bits; /* misc bits */ + uint8 basic_mcs[MCSSET_LEN]; /* required MCS set */ +} BWL_POST_PACKED_STRUCT; +typedef struct ht_add_ie ht_add_ie_t; + +/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ +/* the additional IE is primarily used to convey the current BSS configuration */ +BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* indicates what follows */ + ht_add_ie_t add_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_add_ie ht_prop_add_ie_t; + +#define HT_ADD_IE_LEN 22 +#define HT_ADD_IE_TYPE 52 + +/* byte1 defn's */ +#define HT_BW_ANY 0x04 /* set, STA can use 20 or 40MHz */ +#define HT_RIFS_PERMITTED 0x08 /* RIFS allowed */ + +/* opmode defn's */ +#define HT_OPMODE_MASK 0x0003 /* protection mode mask */ +#define HT_OPMODE_SHIFT 0 /* protection mode shift */ +#define HT_OPMODE_PURE 0x0000 /* protection mode PURE */ +#define HT_OPMODE_OPTIONAL 0x0001 /* protection mode optional */ +#define HT_OPMODE_HT20IN40 0x0002 /* protection mode 20MHz HT in 40MHz BSS */ +#define HT_OPMODE_MIXED 0x0003 /* protection mode Mixed Mode */ +#define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */ +#define DOT11N_TXBURST 0x0008 /* Tx burst limit */ +#define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */ + +/* misc_bites defn's */ +#define HT_BASIC_STBC_MCS 0x007f /* basic STBC MCS */ +#define HT_DUAL_STBC_PROT 0x0080 /* Dual STBC Protection */ +#define HT_SECOND_BCN 0x0100 /* Secondary beacon support */ +#define HT_LSIG_TXOP 0x0200 /* L-SIG TXOP Protection full support */ +#define HT_PCO_ACTIVE 0x0400 /* PCO active */ +#define HT_PCO_PHASE 0x0800 /* PCO phase */ +#define HT_DUALCTS_PROTECTION 0x0080 /* DUAL CTS protection needed */ + +/* Tx Burst Limits */ +#define DOT11N_2G_TXBURST_LIMIT 6160 /* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */ +#define DOT11N_5G_TXBURST_LIMIT 3080 /* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */ + +/* Macros for opmode */ +#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + >> HT_OPMODE_SHIFT) +#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_MIXED) /* mixed mode present */ +#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_HT20IN40) /* 20MHz HT present */ +#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_OPTIONAL) /* Optional protection present */ +#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \ + HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */ +#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \ + == HT_OPMODE_NONGF) /* non-GF present */ +#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \ + == DOT11N_TXBURST) /* Tx Burst present */ +#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \ + == DOT11N_OBSS_NONHT) /* OBSS Non-HT present */ + +BWL_PRE_PACKED_STRUCT struct obss_params { + uint16 passive_dwell; + uint16 active_dwell; + uint16 bss_widthscan_interval; + uint16 passive_total; + uint16 active_total; + uint16 chanwidth_transition_dly; + uint16 activity_threshold; +} BWL_POST_PACKED_STRUCT; +typedef struct obss_params obss_params_t; + +BWL_PRE_PACKED_STRUCT struct dot11_obss_ie { + uint8 id; + uint8 len; + obss_params_t obss_params; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_ie dot11_obss_ie_t; +#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) /* HT OBSS len (based on 802.11n d3.0) */ + +/* HT control field */ +#define HT_CTRL_LA_TRQ 0x00000002 /* sounding request */ +#define HT_CTRL_LA_MAI 0x0000003C /* MCS request or antenna selection indication */ +#define HT_CTRL_LA_MAI_SHIFT 2 +#define HT_CTRL_LA_MAI_MRQ 0x00000004 /* MCS request */ +#define HT_CTRL_LA_MAI_MSI 0x00000038 /* MCS request sequence identifier */ +#define HT_CTRL_LA_MFSI 0x000001C0 /* MFB sequence identifier */ +#define HT_CTRL_LA_MFSI_SHIFT 6 +#define HT_CTRL_LA_MFB_ASELC 0x0000FE00 /* MCS feedback, antenna selection command/data */ +#define HT_CTRL_LA_MFB_ASELC_SH 9 +#define HT_CTRL_LA_ASELC_CMD 0x00000C00 /* ASEL command */ +#define HT_CTRL_LA_ASELC_DATA 0x0000F000 /* ASEL data */ +#define HT_CTRL_CAL_POS 0x00030000 /* Calibration position */ +#define HT_CTRL_CAL_SEQ 0x000C0000 /* Calibration sequence */ +#define HT_CTRL_CSI_STEERING 0x00C00000 /* CSI/Steering */ +#define HT_CTRL_CSI_STEER_SHIFT 22 +#define HT_CTRL_CSI_STEER_NFB 0 /* no fedback required */ +#define HT_CTRL_CSI_STEER_CSI 1 /* CSI, H matrix */ +#define HT_CTRL_CSI_STEER_NCOM 2 /* non-compressed beamforming */ +#define HT_CTRL_CSI_STEER_COM 3 /* compressed beamforming */ +#define HT_CTRL_NDP_ANNOUNCE 0x01000000 /* NDP announcement */ +#define HT_CTRL_AC_CONSTRAINT 0x40000000 /* AC Constraint */ +#define HT_CTRL_RDG_MOREPPDU 0x80000000 /* RDG/More PPDU */ + +/* ************* VHT definitions. ************* */ + +/** + * VHT Capabilites IE (sec 8.4.2.160) + */ + +BWL_PRE_PACKED_STRUCT struct vht_cap_ie { + uint32 vht_cap_info; + /* supported MCS set - 64 bit field */ + uint16 rx_mcs_map; + uint16 rx_max_rate; + uint16 tx_mcs_map; + uint16 tx_max_rate; +} BWL_POST_PACKED_STRUCT; +typedef struct vht_cap_ie vht_cap_ie_t; + +/* 4B cap_info + 8B supp_mcs */ +#define VHT_CAP_IE_LEN 12 + +/* VHT Capabilities Info field - 32bit - in VHT Cap IE */ +#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003 +#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c +#define VHT_CAP_INFO_LDPC 0x00000010 +#define VHT_CAP_INFO_SGI_80MHZ 0x00000020 +#define VHT_CAP_INFO_SGI_160MHZ 0x00000040 +#define VHT_CAP_INFO_TX_STBC 0x00000080 +#define VHT_CAP_INFO_RX_STBC_MASK 0x00000700 +#define VHT_CAP_INFO_RX_STBC_SHIFT 8 +#define VHT_CAP_INFO_SU_BEAMFMR 0x00000800 +#define VHT_CAP_INFO_SU_BEAMFMEE 0x00001000 +#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK 0x0000e000 +#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT 13 +#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK 0x00070000 +#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT 16 +#define VHT_CAP_INFO_MU_BEAMFMR 0x00080000 +#define VHT_CAP_INFO_MU_BEAMFMEE 0x00100000 +#define VHT_CAP_INFO_TXOPPS 0x00200000 +#define VHT_CAP_INFO_HTCVHT 0x00400000 +#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK 0x03800000 +#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23 +#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000 +#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26 + +/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */ +#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff +#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0 + +#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff +#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0 + +#define VHT_CAP_MCS_MAP_0_7 0 +#define VHT_CAP_MCS_MAP_0_8 1 +#define VHT_CAP_MCS_MAP_0_9 2 +#define VHT_CAP_MCS_MAP_NONE 3 +#define VHT_CAP_MCS_MAP_S 2 /* num bits for 1-stream */ +#define VHT_CAP_MCS_MAP_M 0x3 /* mask for 1-stream */ +/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */ +#define VHT_CAP_MCS_MAP_NONE_ALL 0xffff + +/* VHT rates bitmap */ +#define VHT_CAP_MCS_0_7_RATEMAP 0x00ff +#define VHT_CAP_MCS_0_8_RATEMAP 0x01ff +#define VHT_CAP_MCS_0_9_RATEMAP 0x03ff +#define VHT_CAP_MCS_FULL_RATEMAP VHT_CAP_MCS_0_9_RATEMAP + +#define VHT_PROP_MCS_MAP_10_11 0 +#define VHT_PROP_MCS_MAP_UNUSED1 1 +#define VHT_PROP_MCS_MAP_UNUSED2 2 +#define VHT_PROP_MCS_MAP_NONE 3 +#define VHT_PROP_MCS_MAP_NONE_ALL 0xffff + +/* VHT prop rates bitmap */ +#define VHT_PROP_MCS_10_11_RATEMAP 0x0c00 +#define VHT_PROP_MCS_FULL_RATEMAP VHT_PROP_MCS_10_11_RATEMAP + +#if !defined(VHT_CAP_MCS_MAP_0_9_NSS3) +/* mcsmap with MCS0-9 for Nss = 3 */ +#define VHT_CAP_MCS_MAP_0_9_NSS3 \ + ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \ + (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \ + (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3))) +#endif /* !VHT_CAP_MCS_MAP_0_9_NSS3 */ + +#define VHT_CAP_MCS_MAP_NSS_MAX 8 + +/* get mcsmap with given mcs for given nss streams */ +#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \ + do { \ + int i; \ + for (i = 1; i <= nss; i++) { \ + VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \ + } \ + } while (0) + +/* Map the mcs code to mcs bit map */ +#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0) + +#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0) + +/* Map the mcs bit map to mcs code */ +#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \ + ((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \ + (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \ + (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE) + +#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \ + (((mcs_map & 0xc00) == 0xc00) ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE) + +/** VHT Capabilities Supported Channel Width */ +typedef enum vht_cap_chan_width { + VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00, + VHT_CAP_CHAN_WIDTH_SUPPORT_160 = 0x04, + VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080 = 0x08 +} vht_cap_chan_width_t; + +/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */ +typedef enum vht_cap_max_mpdu_len { + VHT_CAP_MPDU_MAX_4K = 0x00, + VHT_CAP_MPDU_MAX_8K = 0x01, + VHT_CAP_MPDU_MAX_11K = 0x02 +} vht_cap_max_mpdu_len_t; + +/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */ +#define VHT_MPDU_LIMIT_4K 3895 +#define VHT_MPDU_LIMIT_8K 7991 +#define VHT_MPDU_LIMIT_11K 11454 + + +/** + * VHT Operation IE (sec 8.4.2.161) + */ + +BWL_PRE_PACKED_STRUCT struct vht_op_ie { + uint8 chan_width; + uint8 chan1; + uint8 chan2; + uint16 supp_mcs; /* same def as above in vht cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct vht_op_ie vht_op_ie_t; + +/* 3B VHT Op info + 2B Basic MCS */ +#define VHT_OP_IE_LEN 5 + +typedef enum vht_op_chan_width { + VHT_OP_CHAN_WIDTH_20_40 = 0, + VHT_OP_CHAN_WIDTH_80 = 1, + VHT_OP_CHAN_WIDTH_160 = 2, + VHT_OP_CHAN_WIDTH_80_80 = 3 +} vht_op_chan_width_t; + +/* AID length */ +#define AID_IE_LEN 2 +/** + * BRCM vht features IE header + * The header if the fixed part of the IE + * On the 5GHz band this is the entire IE, + * on 2.4GHz the VHT IEs as defined in the 802.11ac + * specification follows + * + * + * VHT features rates bitmap. + * Bit0: 5G MCS 0-9 BW 160MHz + * Bit1: 5G MCS 0-9 support BW 80MHz + * Bit2: 5G MCS 0-9 support BW 20MHz + * Bit3: 2.4G MCS 0-9 support BW 20MHz + * Bits:4-7 Reserved for future use + * + */ +#define VHT_FEATURES_IE_TYPE 0x4 +BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr { + uint8 oui[3]; + uint8 type; /* type of this IE = 4 */ + uint8 rate_mask; /* VHT rate mask */ +} BWL_POST_PACKED_STRUCT; +typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; + +/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */ +#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S) +#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \ + (((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M) +#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \ + do { \ + (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \ + (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \ + } while (0) +#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \ + (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE) + + +/* ************* WPA definitions. ************* */ +#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ +#define WPA_OUI_LEN 3 /* WPA OUI length */ +#define WPA_OUI_TYPE 1 +#define WPA_VERSION 1 /* WPA version */ +#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */ +#define WPA2_OUI_LEN 3 /* WPA2 OUI length */ +#define WPA2_VERSION 1 /* WPA2 version */ +#define WPA2_VERSION_LEN 2 /* WAP2 version length */ + +/* ************* WPS definitions. ************* */ +#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */ +#define WPS_OUI_LEN 3 /* WPS OUI length */ +#define WPS_OUI_TYPE 4 + +/* ************* WFA definitions. ************* */ + +#ifdef P2P_IE_OVRD +#define WFA_OUI MAC_OUI +#else +#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */ +#endif /* P2P_IE_OVRD */ +#define WFA_OUI_LEN 3 /* WFA OUI length */ +#ifdef P2P_IE_OVRD +#define WFA_OUI_TYPE_P2P MAC_OUI_TYPE_P2P +#else +#define WFA_OUI_TYPE_TPC 8 +#define WFA_OUI_TYPE_P2P 9 +#endif + +#define WFA_OUI_TYPE_TPC 8 +#ifdef WLTDLS +#define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */ +#define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */ +#define WFA_OUI_TYPE_WFD 10 +#endif /* WTDLS */ +#define WFA_OUI_TYPE_HS20 0x10 +#define WFA_OUI_TYPE_OSEN 0x12 +#define WFA_OUI_TYPE_NAN 0x13 + +/* RSN authenticated key managment suite */ +#define RSN_AKM_NONE 0 /* None (IBSS) */ +#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */ +#define RSN_AKM_PSK 2 /* Pre-shared Key */ +#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */ +#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */ +/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more + * Just kept here to avoid build issue in BISON/CARIBOU branch + */ +#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */ + +/* OSEN authenticated key managment suite */ +#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */ + +/* Key related defines */ +#define DOT11_MAX_DEFAULT_KEYS 4 /* number of default keys */ +#define DOT11_MAX_IGTK_KEYS 2 +#define DOT11_MAX_KEY_SIZE 32 /* max size of any key */ +#define DOT11_MAX_IV_SIZE 16 /* max size of any IV */ +#define DOT11_EXT_IV_FLAG (1<<5) /* flag to indicate IV is > 4 bytes */ +#define DOT11_WPA_KEY_RSC_LEN 8 /* WPA RSC key len */ + +#define WEP1_KEY_SIZE 5 /* max size of any WEP key */ +#define WEP1_KEY_HEX_SIZE 10 /* size of WEP key in hex. */ +#define WEP128_KEY_SIZE 13 /* max size of any WEP key */ +#define WEP128_KEY_HEX_SIZE 26 /* size of WEP key in hex. */ +#define TKIP_MIC_SIZE 8 /* size of TKIP MIC */ +#define TKIP_EOM_SIZE 7 /* max size of TKIP EOM */ +#define TKIP_EOM_FLAG 0x5a /* TKIP EOM flag byte */ +#define TKIP_KEY_SIZE 32 /* size of any TKIP key, includs MIC keys */ +#define TKIP_TK_SIZE 16 +#define TKIP_MIC_KEY_SIZE 8 +#define TKIP_MIC_AUTH_TX 16 /* offset to Authenticator MIC TX key */ +#define TKIP_MIC_AUTH_RX 24 /* offset to Authenticator MIC RX key */ +#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX /* offset to Supplicant MIC RX key */ +#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX /* offset to Supplicant MIC TX key */ +#define AES_KEY_SIZE 16 /* size of AES key */ +#define AES_MIC_SIZE 8 /* size of AES MIC */ +#define BIP_KEY_SIZE 16 /* size of BIP key */ +#define BIP_MIC_SIZE 8 /* sizeof BIP MIC */ + +#define AES_GCM_MIC_SIZE 16 /* size of MIC for 128-bit GCM - .11adD9 */ + +#define AES256_KEY_SIZE 32 /* size of AES 256 key - .11acD5 */ +#define AES256_MIC_SIZE 16 /* size of MIC for 256 bit keys, incl BIP */ + +/* WCN */ +#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */ +#define WCN_TYPE 4 /* WCN type */ + + +/* 802.11r protocol definitions */ + +/** Mobility Domain IE */ +BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie { + uint8 id; + uint8 len; + uint16 mdid; /* Mobility Domain Id */ + uint8 cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mdid_ie dot11_mdid_ie_t; + +#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */ +#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */ + +/** Fast Bss Transition IE */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_ie { + uint8 id; + uint8 len; + uint16 mic_control; /* Mic Control */ + uint8 mic[16]; + uint8 anonce[32]; + uint8 snonce[32]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_ie dot11_ft_ie_t; + +#define TIE_TYPE_RESERVED 0 +#define TIE_TYPE_REASSOC_DEADLINE 1 +#define TIE_TYPE_KEY_LIEFTIME 2 +#define TIE_TYPE_ASSOC_COMEBACK 3 +BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie { + uint8 id; + uint8 len; + uint8 type; /* timeout interval type */ + uint32 value; /* timeout interval value */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timeout_ie dot11_timeout_ie_t; + +/** GTK ie */ +BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie { + uint8 id; + uint8 len; + uint16 key_info; + uint8 key_len; + uint8 rsc[8]; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_gtk_ie dot11_gtk_ie_t; + +/** Management MIC ie */ +BWL_PRE_PACKED_STRUCT struct mmic_ie { + uint8 id; /* IE ID: DOT11_MNG_MMIE_ID */ + uint8 len; /* IE length */ + uint16 key_id; /* key id */ + uint8 ipn[6]; /* ipn */ + uint8 mic[16]; /* mic */ +} BWL_POST_PACKED_STRUCT; +typedef struct mmic_ie mmic_ie_t; + +/* 802.11r-2008, 11A.10.3 - RRB frame format */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_rrb_frame { + uint8 frame_type; /* 1 for RRB */ + uint8 packet_type; /* 0 for Request 1 for Response */ + uint16 len; + uint8 cur_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* IEs Received/Sent in FT Action Req/Resp Frame */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_ft_rrb_frame dot11_ft_rrb_frame_t; + +#define DOT11_FT_RRB_FIXED_LEN 10 +#define DOT11_FT_REMOTE_FRAME_TYPE 1 +#define DOT11_FT_PACKET_REQ 0 +#define DOT11_FT_PACKET_RESP 1 + +#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00" +#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF" + + +/* ************* WMM Parameter definitions. ************* */ +#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */ +#define WMM_OUI_LEN 3 /* WMM OUI length */ +#define WMM_OUI_TYPE 2 /* WMM OUT type */ +#define WMM_VERSION 1 +#define WMM_VERSION_LEN 1 + +/* WMM OUI subtype */ +#define WMM_OUI_SUBTYPE_PARAMETER 1 +#define WMM_PARAMETER_IE_LEN 24 + +/** Link Identifier Element */ +BWL_PRE_PACKED_STRUCT struct link_id_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + struct ether_addr tdls_init_mac; + struct ether_addr tdls_resp_mac; +} BWL_POST_PACKED_STRUCT; +typedef struct link_id_ie link_id_ie_t; +#define TDLS_LINK_ID_IE_LEN 18 + +/** Link Wakeup Schedule Element */ +BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie { + uint8 id; + uint8 len; + uint32 offset; /* in ms between TSF0 and start of 1st Awake Window */ + uint32 interval; /* in ms bwtween the start of 2 Awake Windows */ + uint32 awake_win_slots; /* in backof slots, duration of Awake Window */ + uint32 max_wake_win; /* in ms, max duration of Awake Window */ + uint16 idle_cnt; /* number of consecutive Awake Windows */ +} BWL_POST_PACKED_STRUCT; +typedef struct wakeup_sch_ie wakeup_sch_ie_t; +#define TDLS_WAKEUP_SCH_IE_LEN 18 + +/** Channel Switch Timing Element */ +BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie { + uint8 id; + uint8 len; + uint16 switch_time; /* in ms, time to switch channels */ + uint16 switch_timeout; /* in ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct channel_switch_timing_ie channel_switch_timing_ie_t; +#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4 + +/** PTI Control Element */ +BWL_PRE_PACKED_STRUCT struct pti_control_ie { + uint8 id; + uint8 len; + uint8 tid; + uint16 seq_control; +} BWL_POST_PACKED_STRUCT; +typedef struct pti_control_ie pti_control_ie_t; +#define TDLS_PTI_CONTROL_IE_LEN 3 + +/** PU Buffer Status Element */ +BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie { + uint8 id; + uint8 len; + uint8 status; +} BWL_POST_PACKED_STRUCT; +typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; +#define TDLS_PU_BUFFER_STATUS_IE_LEN 1 +#define TDLS_PU_BUFFER_STATUS_AC_BK 1 +#define TDLS_PU_BUFFER_STATUS_AC_BE 2 +#define TDLS_PU_BUFFER_STATUS_AC_VI 4 +#define TDLS_PU_BUFFER_STATUS_AC_VO 8 + +/* TDLS Action Field Values */ +#define TDLS_SETUP_REQ 0 +#define TDLS_SETUP_RESP 1 +#define TDLS_SETUP_CONFIRM 2 +#define TDLS_TEARDOWN 3 +#define TDLS_PEER_TRAFFIC_IND 4 +#define TDLS_CHANNEL_SWITCH_REQ 5 +#define TDLS_CHANNEL_SWITCH_RESP 6 +#define TDLS_PEER_PSM_REQ 7 +#define TDLS_PEER_PSM_RESP 8 +#define TDLS_PEER_TRAFFIC_RESP 9 +#define TDLS_DISCOVERY_REQ 10 + +/* 802.11z TDLS Public Action Frame action field */ +#define TDLS_DISCOVERY_RESP 14 + +/* 802.11u GAS action frames */ +#define GAS_REQUEST_ACTION_FRAME 10 +#define GAS_RESPONSE_ACTION_FRAME 11 +#define GAS_COMEBACK_REQUEST_ACTION_FRAME 12 +#define GAS_COMEBACK_RESPONSE_ACTION_FRAME 13 + +/* FTM - fine timing measurement public action frames */ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_req { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (32) */ + uint8 trigger; /* trigger/continue? */ + /* optional lci, civic loc, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_req dot11_ftm_req_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (33) */ + uint8 dialog; /* dialog token */ + uint8 follow_up; /* follow up dialog token */ + uint8 tod[6]; /* t1 - last depart timestamp */ + uint8 toa[6]; /* t4 - last ack arrival timestamp */ + uint8 tod_err[2]; /* t1 error */ + uint8 toa_err[2]; /* t4 error */ + /* optional lci report, civic loc report, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm dot11_ftm_t; + +#define DOT11_FTM_ERR_NOT_CONT_OFFSET 0 +#define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001 +#define DOT11_FTM_ERR_NOT_CONT_SHIFT 0 +#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \ + DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT) +#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\ + uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \ + _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \ + _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \ + (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ +} while (0) + +#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 +#define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7 +#define DOT11_FTM_ERR_MAX_ERR_SHIFT 1 +#define DOT11_FTM_ERR_MAX_ERR(_err) ((((_err)[1] << 7) | (_err)[0]) >> 1) +#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\ + uint16 _val2; \ + _val2 = (((_val) << DOT11_FTM_ERR_MAX_ERR_SHIFT) |\ + ((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & DOT11_FTM_ERR_NOT_CONT_MASK)); \ + (_err)[0] = _val2 & 0xff; \ + (_err)[1] = _val2 >> 8 & 0xff; \ +} while (0) + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_params { + uint8 id; /* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */ + uint8 len; + uint8 info[9]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_params dot11_ftm_params_t; +#define DOT11_FTM_PARAMS_IE_LEN (sizeof(dot11_ftm_params_t) - 2) + +#define FTM_PARAMS_FIELD(_p, _off, _mask, _shift) (((_p)->info[(_off)] & (_mask)) >> (_shift)) +#define FTM_PARAMS_SET_FIELD(_p, _off, _mask, _shift, _val) do {\ + uint8 _ptmp = (_p)->info[_off] & ~(_mask); \ + (_p)->info[(_off)] = _ptmp | (((_val) << (_shift)) & (_mask)); \ +} while (0) + +#define FTM_PARAMS_STATUS_OFFSET 0 +#define FTM_PARAMS_STATUS_MASK 0x03 +#define FTM_PARAMS_STATUS_SHIFT 0 +#define FTM_PARAMS_STATUS(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_STATUS_OFFSET, \ + FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT) +#define FTM_PARAMS_SET_STATUS(_p, _status) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_STATUS_OFFSET, FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT, _status) + +#define FTM_PARAMS_VALUE_OFFSET 0 +#define FTM_PARAMS_VALUE_MASK 0x7c +#define FTM_PARAMS_VALUE_SHIFT 2 +#define FTM_PARAMS_VALUE(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_VALUE_OFFSET, \ + FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT) +#define FTM_PARAMS_SET_VALUE(_p, _value) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_VALUE_OFFSET, FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT, _value) +#define FTM_PARAMS_MAX_VALUE 32 + +#define FTM_PARAMS_NBURSTEXP_OFFSET 1 +#define FTM_PARAMS_NBURSTEXP_MASK 0x0f +#define FTM_PARAMS_NBURSTEXP_SHIFT 0 +#define FTM_PARAMS_NBURSTEXP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_NBURSTEXP_OFFSET, \ + FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT) +#define FTM_PARAMS_SET_NBURSTEXP(_p, _bexp) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_NBURSTEXP_OFFSET, FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT, \ + _bexp) + +#define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p)) + +enum { + FTM_PARAMS_BURSTTMO_NOPREF = 15 +}; + +#define FTM_PARAMS_BURSTTMO_OFFSET 1 +#define FTM_PARAMS_BURSTTMO_MASK 0xf0 +#define FTM_PARAMS_BURSTTMO_SHIFT 4 +#define FTM_PARAMS_BURSTTMO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_BURSTTMO_OFFSET, \ + FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT) +/* set timeout in params using _tmo where timeout = 2^(_tmo) * 250us */ +#define FTM_PARAMS_SET_BURSTTMO(_p, _tmo) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_BURSTTMO_OFFSET, FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT, (_tmo)+2) + +#define FTM_PARAMS_BURSTTMO_USEC(_val) ((1 << ((_val)-2)) * 250) +#define FTM_PARAMS_BURSTTMO_VALID(_val) ((((_val) < 12 && (_val) > 1)) || \ + (_val) == FTM_PARAMS_BURSTTMO_NOPREF) +#define FTM_PARAMS_BURSTTMO_MAX_MSEC 128 /* 2^9 * 250us */ +#define FTM_PARAMS_BURSTTMO_MAX_USEC 128000 /* 2^9 * 250us */ + +#define FTM_PARAMS_MINDELTA_OFFSET 2 +#define FTM_PARAMS_MINDELTA_USEC(_p) ((_p)->info[FTM_PARAMS_MINDELTA_OFFSET] * 100) +#define FTM_PARAMS_SET_MINDELTA_USEC(_p, _delta) do { \ + (_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \ +} while (0) + +#define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3]) +#define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \ + (_p)->info[3] = (_partial_tsf) & 0xff; \ + (_p)->info[4] = ((_partial_tsf) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_PARTIAL_TSF_MASK 0x0000000003fffc00ULL +#define FTM_PARAMS_PARTIAL_TSF_SHIFT 10 +#define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16 +#define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff + +#define FTM_PARAMS_ASAP_OFFSET 5 +#define FTM_PARAMS_ASAP_MASK 0x4 +#define FTM_PARAMS_ASAP_SHIFT 2 +#define FTM_PARAMS_ASAP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_ASAP_OFFSET, \ + FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT) +#define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap) + +#define FTM_PARAMS_FTM1_OFFSET 5 +#define FTM_PARAMS_FTM1_MASK 0x02 +#define FTM_PARAMS_FTM1_SHIFT 1 +#define FTM_PARAMS_FTM1(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTM1_OFFSET, \ + FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT) +#define FTM_PARAMS_SET_FTM1(_p, _ftm1) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTM1_OFFSET, FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT, _ftm1) + +#define FTM_PARAMS_FTMS_PER_BURST_OFFSET 5 +#define FTM_PARAMS_FTMS_PER_BURST_MASK 0xf8 +#define FTM_PARAMS_FTMS_PER_BURST_SHIFT 3 +#define FTM_PARAMS_FTMS_PER_BURST(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTMS_PER_BURST_OFFSET, \ + FTM_PARAMS_FTMS_PER_BURST_MASK, FTM_PARAMS_FTMS_PER_BURST_SHIFT) +#define FTM_PARAMS_SET_FTMS_PER_BURST(_p, _nftms) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \ + FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms) + +#define FTM_PARAMS_CHAN_INFO_OFFSET 6 +#define FTM_PARAMS_CHAN_INFO_MASK 0xfc +#define FTM_PARAMS_CHAN_INFO_SHIFT 2 +#define FTM_PARAMS_CHAN_INFO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_CHAN_INFO_OFFSET, \ + FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT) +#define FTM_PARAMS_SET_CHAN_INFO(_p, _ci) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_CHAN_INFO_OFFSET, FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT, _ci) + +/* burst period - units of 100ms */ +#define FTM_PARAMS_BURST_PERIOD(_p) (((_p)->info[8] << 8) | (_p)->info[7]) +#define FTM_PARAMS_SET_BURST_PERIOD(_p, _bp) do {\ + (_p)->info[7] = (_bp) & 0xff; \ + (_p)->info[8] = ((_bp) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100) + +/* FTM status values - last updated from 11mcD4.0 */ +enum { + FTM_PARAMS_STATUS_RESERVED = 0, + FTM_PARAMS_STATUS_SUCCESSFUL = 1, + FTM_PARAMS_STATUS_INCAPABLE = 2, + FTM_PARAMS_STATUS_FAILED = 3, + /* Below are obsolte */ + FTM_PARAMS_STATUS_OVERRIDDEN = 4, + FTM_PARAMS_STATUS_ASAP_INCAPABLE = 5, + FTM_PARAMS_STATUS_ASAP_FAILED = 6, + /* rest are reserved */ +}; + +enum { + FTM_PARAMS_CHAN_INFO_NO_PREF = 0, + FTM_PARAMS_CHAN_INFO_RESERVE1 = 1, + FTM_PARAMS_CHAN_INFO_RESERVE2 = 2, + FTM_PARAMS_CHAN_INFO_RESERVE3 = 3, + FTM_PARAMS_CHAN_INFO_NON_HT_5 = 4, + FTM_PARAMS_CHAN_INFO_RESERVE5 = 5, + FTM_PARAMS_CHAN_INFO_NON_HT_10 = 6, + FTM_PARAMS_CHAN_INFO_RESERVE7 = 7, + FTM_PARAMS_CHAN_INFO_NON_HT_20 = 8, /* excludes 2.4G, and High rate DSSS */ + FTM_PARAMS_CHAN_INFO_HT_MF_20 = 9, + FTM_PARAMS_CHAN_INFO_VHT_20 = 10, + FTM_PARAMS_CHAN_INFO_HT_MF_40 = 11, + FTM_PARAMS_CHAN_INFO_VHT_40 = 12, + FTM_PARAMS_CHAN_INFO_VHT_80 = 13, + FTM_PARAMS_CHAN_INFO_VHT_80_80 = 14, + FTM_PARAMS_CHAN_INFO_VHT_160_2_RFLOS = 15, + FTM_PARAMS_CHAN_INFO_VHT_160 = 16, + /* Reserved from 17 - 30 */ + FTM_PARAMS_CHAN_INFO_DMG_2160 = 31, + /* Reserved from 32 - 63 */ + FTM_PARAMS_CHAN_INFO_MAX = 63 +}; + +/* 802.11u interworking access network options */ +#define IW_ANT_MASK 0x0f +#define IW_INTERNET_MASK 0x10 +#define IW_ASRA_MASK 0x20 +#define IW_ESR_MASK 0x40 +#define IW_UESA_MASK 0x80 + +/* 802.11u interworking access network type */ +#define IW_ANT_PRIVATE_NETWORK 0 +#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST 1 +#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK 2 +#define IW_ANT_FREE_PUBLIC_NETWORK 3 +#define IW_ANT_PERSONAL_DEVICE_NETWORK 4 +#define IW_ANT_EMERGENCY_SERVICES_NETWORK 5 +#define IW_ANT_TEST_NETWORK 14 +#define IW_ANT_WILDCARD_NETWORK 15 + +/* 802.11u advertisement protocol */ +#define ADVP_ANQP_PROTOCOL_ID 0 +#define ADVP_MIH_PROTOCOL_ID 1 + +/* 802.11u advertisement protocol masks */ +#define ADVP_QRL_MASK 0x7f +#define ADVP_PAME_BI_MASK 0x80 + +/* 802.11u advertisement protocol values */ +#define ADVP_QRL_REQUEST 0x00 +#define ADVP_QRL_RESPONSE 0x7f +#define ADVP_PAME_BI_DEPENDENT 0x00 +#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK + +/* 802.11u ANQP information ID */ +#define ANQP_ID_QUERY_LIST 256 +#define ANQP_ID_CAPABILITY_LIST 257 +#define ANQP_ID_VENUE_NAME_INFO 258 +#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259 +#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO 260 +#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261 +#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO 262 +#define ANQP_ID_NAI_REALM_LIST 263 +#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264 +#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265 +#define ANQP_ID_AP_CIVIC_LOCATION 266 +#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267 +#define ANQP_ID_DOMAIN_NAME_LIST 268 +#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269 +#define ANQP_ID_EMERGENCY_NAI 271 +#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797 + +/* 802.11u ANQP OUI */ +#define ANQP_OUI_SUBTYPE 9 + +/* 802.11u venue name */ +#define VENUE_LANGUAGE_CODE_SIZE 3 +#define VENUE_NAME_SIZE 255 + +/* 802.11u venue groups */ +#define VENUE_UNSPECIFIED 0 +#define VENUE_ASSEMBLY 1 +#define VENUE_BUSINESS 2 +#define VENUE_EDUCATIONAL 3 +#define VENUE_FACTORY 4 +#define VENUE_INSTITUTIONAL 5 +#define VENUE_MERCANTILE 6 +#define VENUE_RESIDENTIAL 7 +#define VENUE_STORAGE 8 +#define VENUE_UTILITY 9 +#define VENUE_VEHICULAR 10 +#define VENUE_OUTDOOR 11 + +/* 802.11u network authentication type indicator */ +#define NATI_UNSPECIFIED -1 +#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0 +#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1 +#define NATI_HTTP_HTTPS_REDIRECTION 2 +#define NATI_DNS_REDIRECTION 3 + +/* 802.11u IP address type availability - IPv6 */ +#define IPA_IPV6_SHIFT 0 +#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT) +#define IPA_IPV6_NOT_AVAILABLE 0x00 +#define IPA_IPV6_AVAILABLE 0x01 +#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02 + +/* 802.11u IP address type availability - IPv4 */ +#define IPA_IPV4_SHIFT 2 +#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT) +#define IPA_IPV4_NOT_AVAILABLE 0x00 +#define IPA_IPV4_PUBLIC 0x01 +#define IPA_IPV4_PORT_RESTRICT 0x02 +#define IPA_IPV4_SINGLE_NAT 0x03 +#define IPA_IPV4_DOUBLE_NAT 0x04 +#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05 +#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06 +#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07 + +/* 802.11u NAI realm encoding */ +#define REALM_ENCODING_RFC4282 0 +#define REALM_ENCODING_UTF8 1 + +/* 802.11u IANA EAP method type numbers */ +#define REALM_EAP_TLS 13 +#define REALM_EAP_LEAP 17 +#define REALM_EAP_SIM 18 +#define REALM_EAP_TTLS 21 +#define REALM_EAP_AKA 23 +#define REALM_EAP_PEAP 25 +#define REALM_EAP_FAST 43 +#define REALM_EAP_PSK 47 +#define REALM_EAP_AKAP 50 +#define REALM_EAP_EXPANDED 254 + +/* 802.11u authentication ID */ +#define REALM_EXPANDED_EAP 1 +#define REALM_NON_EAP_INNER_AUTHENTICATION 2 +#define REALM_INNER_AUTHENTICATION_EAP 3 +#define REALM_EXPANDED_INNER_EAP 4 +#define REALM_CREDENTIAL 5 +#define REALM_TUNNELED_EAP_CREDENTIAL 6 +#define REALM_VENDOR_SPECIFIC_EAP 221 + +/* 802.11u non-EAP inner authentication type */ +#define REALM_RESERVED_AUTH 0 +#define REALM_PAP 1 +#define REALM_CHAP 2 +#define REALM_MSCHAP 3 +#define REALM_MSCHAPV2 4 + +/* 802.11u credential type */ +#define REALM_SIM 1 +#define REALM_USIM 2 +#define REALM_NFC 3 +#define REALM_HARDWARE_TOKEN 4 +#define REALM_SOFTOKEN 5 +#define REALM_CERTIFICATE 6 +#define REALM_USERNAME_PASSWORD 7 +#define REALM_SERVER_SIDE 8 +#define REALM_RESERVED_CRED 9 +#define REALM_VENDOR_SPECIFIC_CRED 10 + +/* 802.11u 3GPP PLMN */ +#define G3PP_GUD_VERSION 0 +#define G3PP_PLMN_LIST_IE 0 + +/** hotspot2.0 indication element (vendor specific) */ +BWL_PRE_PACKED_STRUCT struct hs20_ie { + uint8 oui[3]; + uint8 type; + uint8 config; +} BWL_POST_PACKED_STRUCT; +typedef struct hs20_ie hs20_ie_t; +#define HS20_IE_LEN 5 /* HS20 IE length */ + +/** IEEE 802.11 Annex E */ +typedef enum { + DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */ + DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */ + DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */ + DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */ + DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */ + DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */ + DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */ + DOT11_5GHZ_40MHZ_CLASS_23_DFS = 119, /* Ch 52-60, lower */ + DOT11_5GHZ_40MHZ_CLASS_24_DFS = 122, /* Ch 100-132, lower */ + DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */ + DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */ + DOT11_5GHZ_40MHZ_CLASS_28_DFS = 120, /* Ch 56-64, upper */ + DOT11_5GHZ_40MHZ_CLASS_29_DFS = 123, /* Ch 104-136, upper */ + DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */ + DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */ + DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */ +} dot11_op_class_t; + +/* QoS map */ +#define QOS_MAP_FIXED_LENGTH (8 * 2) /* DSCP ranges fixed with 8 entries */ + +#define BCM_AIBSS_IE_TYPE 56 + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h new file mode 100644 index 000000000000..f1da1c174491 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h @@ -0,0 +1,48 @@ +/* + * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11_bta.h 518342 2014-12-01 23:21:41Z $ +*/ + +#ifndef _802_11_BTA_H_ +#define _802_11_BTA_H_ + +#define BT_SIG_SNAP_MPROT "\xAA\xAA\x03\x00\x19\x58" + +/* BT-AMP 802.11 PAL Protocols */ +#define BTA_PROT_L2CAP 1 +#define BTA_PROT_ACTIVITY_REPORT 2 +#define BTA_PROT_SECURITY 3 +#define BTA_PROT_LINK_SUPERVISION_REQUEST 4 +#define BTA_PROT_LINK_SUPERVISION_REPLY 5 + +/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */ +#define BTA_TYPE_ID_MAC_ADDRESS 1 +#define BTA_TYPE_ID_PREFERRED_CHANNELS 2 +#define BTA_TYPE_ID_CONNECTED_CHANNELS 3 +#define BTA_TYPE_ID_CAPABILITIES 4 +#define BTA_TYPE_ID_VERSION 5 +#endif /* _802_11_bta_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h new file mode 100644 index 000000000000..ccfa9656b83b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h @@ -0,0 +1,135 @@ +/* + * 802.11e protocol header file + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11e.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _802_11e_H_ +#define _802_11e_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* WME Traffic Specification (TSPEC) element */ +#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */ +#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */ + +#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */ +#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */ +#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */ +#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */ + +BWL_PRE_PACKED_STRUCT struct tsinfo { + uint8 octets[3]; +} BWL_POST_PACKED_STRUCT; + +typedef struct tsinfo tsinfo_t; + +/* 802.11e TSPEC IE */ +typedef BWL_PRE_PACKED_STRUCT struct tspec { + uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */ + uint8 type; /* WME_TYPE */ + uint8 subtype; /* WME_SUBTYPE_TSPEC */ + uint8 version; /* WME_VERSION */ + tsinfo_t tsinfo; /* TS Info bit field */ + uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /* Minimum Service Interval (us) */ + uint32 max_srv_interval; /* Maximum Service Interval (us) */ + uint32 inactivity_interval; /* Inactivity Interval (us) */ + uint32 suspension_interval; /* Suspension Interval (us) */ + uint32 srv_start_time; /* Service Start Time (us) */ + uint32 min_data_rate; /* Minimum Data Rate (bps) */ + uint32 mean_data_rate; /* Mean Data Rate (bps) */ + uint32 peak_data_rate; /* Peak Data Rate (bps) */ + uint32 max_burst_size; /* Maximum Burst Size (bytes) */ + uint32 delay_bound; /* Delay Bound (us) */ + uint32 min_phy_rate; /* Minimum PHY Rate (bps) */ + uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */ + uint16 medium_time; /* Medium Time (32 us/s periods) */ +} BWL_POST_PACKED_STRUCT tspec_t; + +#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */ + +/* ts_info */ +/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */ +#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */ +#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */ +#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */ +#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */ +#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */ +#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */ +#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */ +#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */ +#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */ +#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */ +#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */ +#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */ +/* TS info. user priority mask */ +#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT) + +/* Macro to get/set bit(s) field in TSINFO */ +#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT) +#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \ + TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT) +#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT) +#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \ + TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT) + +#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \ + ((id) << TS_INFO_TID_SHIFT)) +#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \ + ((prio) << TS_INFO_USER_PRIO_SHIFT)) + +/* 802.11e QBSS Load IE */ +#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */ +#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */ + +#define CAC_ADDTS_RESP_TIMEOUT 1000 /* default ADDTS response timeout in ms */ + /* DEFVAL dot11ADDTSResponseTimeout = 1s */ + +/* 802.11e ADDTS status code */ +#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */ +#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */ +#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */ +#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */ + +/* 802.11e DELTS status code */ +#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */ +#define DOT11E_STATUS_END_TS 37 /* END TS */ +#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */ +#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */ + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11e_CAC_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h new file mode 100644 index 000000000000..9610b550467a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h @@ -0,0 +1,53 @@ +/* + * Fundamental types and constants relating to 802.1D + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.1d.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _802_1_D_ +#define _802_1_D_ + +/* 802.1D priority defines */ +#define PRIO_8021D_NONE 2 /* None = - */ +#define PRIO_8021D_BK 1 /* BK - Background */ +#define PRIO_8021D_BE 0 /* BE - Best-effort */ +#define PRIO_8021D_EE 3 /* EE - Excellent-effort */ +#define PRIO_8021D_CL 4 /* CL - Controlled Load */ +#define PRIO_8021D_VI 5 /* Vi - Video */ +#define PRIO_8021D_VO 6 /* Vo - Voice */ +#define PRIO_8021D_NC 7 /* NC - Network Control */ +#define MAXPRIO 7 /* 0-7 */ +#define NUMPRIO (MAXPRIO + 1) + +#define ALLPRIO -1 /* All prioirty */ + +/* Converts prio to precedence since the numerical value of + * PRIO_8021D_BE and PRIO_8021D_NONE are swapped. + */ +#define PRIO2PREC(prio) \ + (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) + +#endif /* _802_1_D__ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.3.h b/drivers/net/wireless/bcmdhd/include/proto/802.3.h new file mode 100644 index 000000000000..9f108c888a2e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.3.h @@ -0,0 +1,55 @@ +/* + * Fundamental constants relating to 802.3 + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.3.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _802_3_h_ +#define _802_3_h_ + +/* This marks the start of a packed structure section. */ +#include + +#define SNAP_HDR_LEN 6 /* 802.3 SNAP header length */ +#define DOT3_OUI_LEN 3 /* 802.3 oui length */ + +BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT3_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _802_3_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h new file mode 100644 index 000000000000..5e51979ce393 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h @@ -0,0 +1,80 @@ +/* + * Fundamental constants relating to DHCP Protocol + * + * Copyright (C) 2016, Broadcom Corporation + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; + * the contents of this file may not be disclosed to third parties, copied + * or duplicated in any form, in whole or in part, without the prior + * written permission of Broadcom Corporation. + * + * + * <> + * + * $Id: bcmdhcp.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmdhcp_h_ +#define _bcmdhcp_h_ + +/* DHCP params */ +#define DHCP_TYPE_OFFSET 0 /* DHCP type (request|reply) offset */ +#define DHCP_TID_OFFSET 4 /* DHCP transition id offset */ +#define DHCP_FLAGS_OFFSET 10 /* DHCP flags offset */ +#define DHCP_CIADDR_OFFSET 12 /* DHCP client IP address offset */ +#define DHCP_YIADDR_OFFSET 16 /* DHCP your IP address offset */ +#define DHCP_GIADDR_OFFSET 24 /* DHCP relay agent IP address offset */ +#define DHCP_CHADDR_OFFSET 28 /* DHCP client h/w address offset */ +#define DHCP_OPT_OFFSET 236 /* DHCP options offset */ + +#define DHCP_OPT_MSGTYPE 53 /* DHCP message type */ +#define DHCP_OPT_MSGTYPE_REQ 3 +#define DHCP_OPT_MSGTYPE_ACK 5 /* DHCP message type - ACK */ + +#define DHCP_OPT_CODE_OFFSET 0 /* Option identifier */ +#define DHCP_OPT_LEN_OFFSET 1 /* Option data length */ +#define DHCP_OPT_DATA_OFFSET 2 /* Option data */ + +#define DHCP_OPT_CODE_CLIENTID 61 /* Option identifier */ + +#define DHCP_TYPE_REQUEST 1 /* DHCP request (discover|request) */ +#define DHCP_TYPE_REPLY 2 /* DHCP reply (offset|ack) */ + +#define DHCP_PORT_SERVER 67 /* DHCP server UDP port */ +#define DHCP_PORT_CLIENT 68 /* DHCP client UDP port */ + +#define DHCP_FLAG_BCAST 0x8000 /* DHCP broadcast flag */ + +#define DHCP_FLAGS_LEN 2 /* DHCP flags field length */ + +#define DHCP6_TYPE_SOLICIT 1 /* DHCP6 solicit */ +#define DHCP6_TYPE_ADVERTISE 2 /* DHCP6 advertise */ +#define DHCP6_TYPE_REQUEST 3 /* DHCP6 request */ +#define DHCP6_TYPE_CONFIRM 4 /* DHCP6 confirm */ +#define DHCP6_TYPE_RENEW 5 /* DHCP6 renew */ +#define DHCP6_TYPE_REBIND 6 /* DHCP6 rebind */ +#define DHCP6_TYPE_REPLY 7 /* DHCP6 reply */ +#define DHCP6_TYPE_RELEASE 8 /* DHCP6 release */ +#define DHCP6_TYPE_DECLINE 9 /* DHCP6 decline */ +#define DHCP6_TYPE_RECONFIGURE 10 /* DHCP6 reconfigure */ +#define DHCP6_TYPE_INFOREQ 11 /* DHCP6 information request */ +#define DHCP6_TYPE_RELAYFWD 12 /* DHCP6 relay forward */ +#define DHCP6_TYPE_RELAYREPLY 13 /* DHCP6 relay reply */ + +#define DHCP6_TYPE_OFFSET 0 /* DHCP6 type offset */ + +#define DHCP6_MSG_OPT_OFFSET 4 /* Offset of options in client server messages */ +#define DHCP6_RELAY_OPT_OFFSET 34 /* Offset of options in relay messages */ + +#define DHCP6_OPT_CODE_OFFSET 0 /* Option identifier */ +#define DHCP6_OPT_LEN_OFFSET 2 /* Option data length */ +#define DHCP6_OPT_DATA_OFFSET 4 /* Option data */ + +#define DHCP6_OPT_CODE_CLIENTID 1 /* DHCP6 CLIENTID option */ +#define DHCP6_OPT_CODE_SERVERID 2 /* DHCP6 SERVERID option */ + +#define DHCP6_PORT_SERVER 547 /* DHCP6 server UDP port */ +#define DHCP6_PORT_CLIENT 546 /* DHCP6 client UDP port */ + +#endif /* #ifndef _bcmdhcp_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h new file mode 100644 index 000000000000..7ad453dbad0d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h @@ -0,0 +1,115 @@ +/* + * Broadcom Ethernettype protocol definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmeth.h 518342 2014-12-01 23:21:41Z $ + */ + +/* + * Broadcom Ethernet protocol defines + */ + +#ifndef _BCMETH_H_ +#define _BCMETH_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* ETHER_TYPE_BRCM is defined in ethernet.h */ + +/* + * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field + * in one of two formats: (only subtypes 32768-65535 are in use now) + * + * subtypes 0-32767: + * 8 bit subtype (0-127) + * 8 bit length in bytes (0-255) + * + * subtypes 32768-65535: + * 16 bit big-endian subtype + * 16 bit big-endian length in bytes (0-65535) + * + * length is the number of additional bytes beyond the 4 or 6 byte header + * + * Reserved values: + * 0 reserved + * 5-15 reserved for iLine protocol assignments + * 17-126 reserved, assignable + * 127 reserved + * 32768 reserved + * 32769-65534 reserved, assignable + * 65535 reserved + */ + +/* + * While adding the subtypes and their specific processing code make sure + * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition + */ + +#define BCMILCP_SUBTYPE_RATE 1 +#define BCMILCP_SUBTYPE_LINK 2 +#define BCMILCP_SUBTYPE_CSA 3 +#define BCMILCP_SUBTYPE_LARQ 4 +#define BCMILCP_SUBTYPE_VENDOR 5 +#define BCMILCP_SUBTYPE_FLH 17 + +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 +#define BCMILCP_SUBTYPE_CERT 32770 +#define BCMILCP_SUBTYPE_SES 32771 + + +#define BCMILCP_BCM_SUBTYPE_RESERVED 0 +#define BCMILCP_BCM_SUBTYPE_EVENT 1 +#define BCMILCP_BCM_SUBTYPE_SES 2 +/* + * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded + * within BCMILCP_BCM_SUBTYPE_EVENT type messages + */ +/* #define BCMILCP_BCM_SUBTYPE_EAPOL 3 */ +#define BCMILCP_BCM_SUBTYPE_DPT 4 + +#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8 +#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0 + +/* These fields are stored in network order */ +typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr +{ + uint16 subtype; /* Vendor specific..32769 */ + uint16 length; + uint8 version; /* Version is 0 */ + uint8 oui[3]; /* Broadcom OUI */ + /* user specific Data */ + uint16 usr_subtype; +} BWL_POST_PACKED_STRUCT bcmeth_hdr_t; + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _BCMETH_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h new file mode 100644 index 000000000000..6c30d57bfbbc --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h @@ -0,0 +1,791 @@ +/* + * Broadcom Event protocol definitions + * + * Dependencies: proto/bcmeth.h + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmevent.h 555154 2015-05-07 20:46:07Z $ + * + */ + +/* + * Broadcom Ethernet Events protocol defines + * + */ + +#ifndef _BCMEVENT_H_ +#define _BCMEVENT_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +/* #include -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */ +#include + +/* This marks the start of a packed structure section. */ +#include + +#define BCM_EVENT_MSG_VERSION 2 /* wl_event_msg_t struct version */ +#define BCM_MSG_IFNAME_MAX 16 /* max length of interface name */ + +/* flags */ +#define WLC_EVENT_MSG_LINK 0x01 /* link is up */ +#define WLC_EVENT_MSG_FLUSHTXQ 0x02 /* flush tx queue on MIC error */ +#define WLC_EVENT_MSG_GROUP 0x04 /* group MIC error */ +#define WLC_EVENT_MSG_UNKBSS 0x08 /* unknown source bsscfg */ +#define WLC_EVENT_MSG_UNKIF 0x10 /* unknown source OS i/f */ + +/* these fields are stored in network order */ + +/* version 1 */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; /* see flags below */ + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 auth_type; /* WLC_E_AUTH */ + uint32 datalen; /* data buf */ + struct ether_addr addr; /* Station address (if applicable) */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ +} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t; + +/* the current version */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; /* see flags below */ + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 auth_type; /* WLC_E_AUTH */ + uint32 datalen; /* data buf */ + struct ether_addr addr; /* Station address (if applicable) */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ + uint8 ifidx; /* destination OS i/f index */ + uint8 bsscfgidx; /* source bsscfg index */ +} BWL_POST_PACKED_STRUCT wl_event_msg_t; + +/* used by driver msgs */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + wl_event_msg_t event; + /* data portion follows */ +} BWL_POST_PACKED_STRUCT bcm_event_t; + +#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header)) + +/* Event messages */ +#define WLC_E_SET_SSID 0 /* indicates status of set SSID */ +#define WLC_E_JOIN 1 /* differentiates join IBSS from found (WLC_E_START) IBSS */ +#define WLC_E_START 2 /* STA founded an IBSS or AP started a BSS */ +#define WLC_E_AUTH 3 /* 802.11 AUTH request */ +#define WLC_E_AUTH_IND 4 /* 802.11 AUTH indication */ +#define WLC_E_DEAUTH 5 /* 802.11 DEAUTH request */ +#define WLC_E_DEAUTH_IND 6 /* 802.11 DEAUTH indication */ +#define WLC_E_ASSOC 7 /* 802.11 ASSOC request */ +#define WLC_E_ASSOC_IND 8 /* 802.11 ASSOC indication */ +#define WLC_E_REASSOC 9 /* 802.11 REASSOC request */ +#define WLC_E_REASSOC_IND 10 /* 802.11 REASSOC indication */ +#define WLC_E_DISASSOC 11 /* 802.11 DISASSOC request */ +#define WLC_E_DISASSOC_IND 12 /* 802.11 DISASSOC indication */ +#define WLC_E_QUIET_START 13 /* 802.11h Quiet period started */ +#define WLC_E_QUIET_END 14 /* 802.11h Quiet period ended */ +#define WLC_E_BEACON_RX 15 /* BEACONS received/lost indication */ +#define WLC_E_LINK 16 /* generic link indication */ +#define WLC_E_MIC_ERROR 17 /* TKIP MIC error occurred */ +#define WLC_E_NDIS_LINK 18 /* NDIS style link indication */ +#define WLC_E_ROAM 19 /* roam attempt occurred: indicate status & reason */ +#define WLC_E_TXFAIL 20 /* change in dot11FailedCount (txfail) */ +#define WLC_E_PMKID_CACHE 21 /* WPA2 pmkid cache indication */ +#define WLC_E_RETROGRADE_TSF 22 /* current AP's TSF value went backward */ +#define WLC_E_PRUNE 23 /* AP was pruned from join list for reason */ +#define WLC_E_AUTOAUTH 24 /* report AutoAuth table entry match for join attempt */ +#define WLC_E_EAPOL_MSG 25 /* Event encapsulating an EAPOL message */ +#define WLC_E_SCAN_COMPLETE 26 /* Scan results are ready or scan was aborted */ +#define WLC_E_ADDTS_IND 27 /* indicate to host addts fail/success */ +#define WLC_E_DELTS_IND 28 /* indicate to host delts fail/success */ +#define WLC_E_BCNSENT_IND 29 /* indicate to host of beacon transmit */ +#define WLC_E_BCNRX_MSG 30 /* Send the received beacon up to the host */ +#define WLC_E_BCNLOST_MSG 31 /* indicate to host loss of beacon */ +#define WLC_E_ROAM_PREP 32 /* before attempting to roam */ +#define WLC_E_PFN_NET_FOUND 33 /* PFN network found event */ +#define WLC_E_PFN_NET_LOST 34 /* PFN network lost event */ +#define WLC_E_RESET_COMPLETE 35 +#define WLC_E_JOIN_START 36 +#define WLC_E_ROAM_START 37 +#define WLC_E_ASSOC_START 38 +#define WLC_E_IBSS_ASSOC 39 +#define WLC_E_RADIO 40 +#define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */ +#define WLC_E_PROBREQ_MSG 44 /* probe request received */ +#define WLC_E_SCAN_CONFIRM_IND 45 +#define WLC_E_PSK_SUP 46 /* WPA Handshake fail */ +#define WLC_E_COUNTRY_CODE_CHANGED 47 +#define WLC_E_EXCEEDED_MEDIUM_TIME 48 /* WMMAC excedded medium time */ +#define WLC_E_ICV_ERROR 49 /* WEP ICV error occurred */ +#define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */ +#define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */ +#define WLC_E_TRACE 52 +#define WLC_E_IF 54 /* I/F change (for dongle host notification) */ +#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */ +#define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */ +#define WLC_E_PFN_BEST_BATCHING 57 /* PFN best network batching event */ +#define WLC_E_EXTLOG_MSG 58 +#define WLC_E_ACTION_FRAME 59 /* Action frame Rx */ +#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */ +#define WLC_E_PRE_ASSOC_IND 61 /* assoc request received */ +#define WLC_E_PRE_REASSOC_IND 62 /* re-assoc request received */ +#define WLC_E_CHANNEL_ADOPTED 63 +#define WLC_E_AP_STARTED 64 /* AP started */ +#define WLC_E_DFS_AP_STOP 65 /* AP stopped due to DFS */ +#define WLC_E_DFS_AP_RESUME 66 /* AP resumed due to DFS */ +#define WLC_E_WAI_STA_EVENT 67 /* WAI stations event */ +#define WLC_E_WAI_MSG 68 /* event encapsulating an WAI message */ +#define WLC_E_ESCAN_RESULT 69 /* escan result event */ +#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70 /* action frame off channel complete */ +#define WLC_E_PROBRESP_MSG 71 /* probe response received */ +#define WLC_E_P2P_PROBREQ_MSG 72 /* P2P Probe request received */ +#define WLC_E_DCS_REQUEST 73 +#define WLC_E_FIFO_CREDIT_MAP 74 /* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */ +#define WLC_E_ACTION_FRAME_RX 75 /* Received action frame event WITH + * wl_event_rx_frame_data_t header + */ +#define WLC_E_WAKE_EVENT 76 /* Wake Event timer fired, used for wake WLAN test mode */ +#define WLC_E_RM_COMPLETE 77 /* Radio measurement complete */ +#define WLC_E_HTSFSYNC 78 /* Synchronize TSF with the host */ +#define WLC_E_OVERLAY_REQ 79 /* request an overlay IOCTL/iovar from the host */ +#define WLC_E_CSA_COMPLETE_IND 80 /* 802.11 CHANNEL SWITCH ACTION completed */ +#define WLC_E_EXCESS_PM_WAKE_EVENT 81 /* excess PM Wake Event to inform host */ +#define WLC_E_PFN_SCAN_NONE 82 /* no PFN networks around */ +/* PFN BSSID network found event, conflict/share with WLC_E_PFN_SCAN_NONE */ +#define WLC_E_PFN_BSSID_NET_FOUND 82 +#define WLC_E_PFN_SCAN_ALLGONE 83 /* last found PFN network gets lost */ +/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */ +#define WLC_E_PFN_BSSID_NET_LOST 83 +#define WLC_E_GTK_PLUMBED 84 +#define WLC_E_ASSOC_IND_NDIS 85 /* 802.11 ASSOC indication for NDIS only */ +#define WLC_E_REASSOC_IND_NDIS 86 /* 802.11 REASSOC indication for NDIS only */ +#define WLC_E_ASSOC_REQ_IE 87 +#define WLC_E_ASSOC_RESP_IE 88 +#define WLC_E_ASSOC_RECREATED 89 /* association recreated on resume */ +#define WLC_E_ACTION_FRAME_RX_NDIS 90 /* rx action frame event for NDIS only */ +#define WLC_E_AUTH_REQ 91 /* authentication request received */ +#define WLC_E_TDLS_PEER_EVENT 92 /* discovered peer, connected/disconnected peer */ +#define WLC_E_SPEEDY_RECREATE_FAIL 93 /* fast assoc recreation failed */ +#define WLC_E_NATIVE 94 /* port-specific event and payload (e.g. NDIS) */ +#define WLC_E_PKTDELAY_IND 95 /* event for tx pkt delay suddently jump */ +#define WLC_E_PSTA_PRIMARY_INTF_IND 99 /* psta primary interface indication */ +#define WLC_E_NAN 100 /* NAN event */ +#define WLC_E_BEACON_FRAME_RX 101 +#define WLC_E_SERVICE_FOUND 102 /* desired service found */ +#define WLC_E_GAS_FRAGMENT_RX 103 /* GAS fragment received */ +#define WLC_E_GAS_COMPLETE 104 /* GAS sessions all complete */ +#define WLC_E_P2PO_ADD_DEVICE 105 /* New device found by p2p offload */ +#define WLC_E_P2PO_DEL_DEVICE 106 /* device has been removed by p2p offload */ +#define WLC_E_WNM_STA_SLEEP 107 /* WNM event to notify STA enter sleep mode */ +#define WLC_E_TXFAIL_THRESH 108 /* Indication of MAC tx failures (exhaustion of + * 802.11 retries) exceeding threshold(s) + */ +#define WLC_E_PROXD 109 /* Proximity Detection event */ +#define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */ +#define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */ +#define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */ +#define WLC_E_MSCH 120 /* Multiple channel scheduler event */ +#define WLC_E_CSA_START_IND 121 +#define WLC_E_CSA_DONE_IND 122 +#define WLC_E_CSA_FAILURE_IND 123 +#define WLC_E_CCA_CHAN_QUAL 124 /* CCA based channel quality report */ +#define WLC_E_BSSID 125 /* to report change in BSSID while roaming */ +#define WLC_E_TX_STAT_ERROR 126 /* tx error indication */ +#define WLC_E_BCMC_CREDIT_SUPPORT 127 /* credit check for BCMC supported */ +#define WLC_E_BT_WIFI_HANDOVER_REQ 130 /* Handover Request Initiated */ +#define WLC_E_SPW_TXINHIBIT 131 /* Southpaw TxInhibit notification */ +#define WLC_E_FBT_AUTH_REQ_IND 132 /* FBT Authentication Request Indication */ +#define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */ +#define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */ +#define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */ +#define WLC_E_AUTHORIZED 136 /* a STA been authroized for traffic */ +#define WLC_E_PROBREQ_MSG_RX 137 /* probe req with wl_event_rx_frame_data_t header */ +#define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */ +#define WLC_E_RMC_EVENT 139 /* RMC Event */ +#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */ +#define WLC_E_RRM 141 /* RRM Event */ +#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */ +#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */ +#define WLC_E_LAST 144 /* highest val + 1 for range checking */ +#if (WLC_E_LAST > 144) +#error "WLC_E_LAST: Invalid value for last event; must be <= 141." +#endif /* WLC_E_LAST */ + +/* define an API for getting the string name of an event */ +extern const char *bcmevent_get_name(uint event_type); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern void wl_event_to_network_order(wl_event_msg_t * evt); + +/* conversion between host and network order for events */ +void wl_event_to_host_order(wl_event_msg_t * evt); +void wl_event_to_network_order(wl_event_msg_t * evt); + + +/* Event status codes */ +#define WLC_E_STATUS_SUCCESS 0 /* operation was successful */ +#define WLC_E_STATUS_FAIL 1 /* operation failed */ +#define WLC_E_STATUS_TIMEOUT 2 /* operation timed out */ +#define WLC_E_STATUS_NO_NETWORKS 3 /* failed due to no matching network found */ +#define WLC_E_STATUS_ABORT 4 /* operation was aborted */ +#define WLC_E_STATUS_NO_ACK 5 /* protocol failure: packet not ack'd */ +#define WLC_E_STATUS_UNSOLICITED 6 /* AUTH or ASSOC packet was unsolicited */ +#define WLC_E_STATUS_ATTEMPT 7 /* attempt to assoc to an auto auth configuration */ +#define WLC_E_STATUS_PARTIAL 8 /* scan results are incomplete */ +#define WLC_E_STATUS_NEWSCAN 9 /* scan aborted by another scan */ +#define WLC_E_STATUS_NEWASSOC 10 /* scan aborted due to assoc in progress */ +#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */ +#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */ +#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */ +#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */ +#define WLC_E_STATUS_ERROR 16 /* request failed due to error */ +#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */ + +/* roam reason codes */ +#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */ +#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */ +#define WLC_E_REASON_DEAUTH 2 /* roamed due to DEAUTH indication */ +#define WLC_E_REASON_DISASSOC 3 /* roamed due to DISASSOC indication */ +#define WLC_E_REASON_BCNS_LOST 4 /* roamed due to lost beacons */ + +/* Roam codes used primarily by CCX */ +#define WLC_E_REASON_FAST_ROAM_FAILED 5 /* roamed due to fast roam failure */ +#define WLC_E_REASON_DIRECTED_ROAM 6 /* roamed due to request by AP */ +#define WLC_E_REASON_TSPEC_REJECTED 7 /* roamed due to TSPEC rejection */ +#define WLC_E_REASON_BETTER_AP 8 /* roamed due to finding better AP */ +#define WLC_E_REASON_MINTXRATE 9 /* roamed because at mintxrate for too long */ +#define WLC_E_REASON_TXFAIL 10 /* We can hear AP, but AP can't hear us */ +/* retained for precommit auto-merging errors; remove once all branches are synced */ +#define WLC_E_REASON_REQUESTED_ROAM 11 +#define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */ + +/* prune reason codes */ +#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */ +#define WLC_E_PRUNE_BCAST_BSSID 2 /* AP uses a broadcast BSSID */ +#define WLC_E_PRUNE_MAC_DENY 3 /* STA's MAC addr is in AP's MAC deny list */ +#define WLC_E_PRUNE_MAC_NA 4 /* STA's MAC addr is not in AP's MAC allow list */ +#define WLC_E_PRUNE_REG_PASSV 5 /* AP not allowed due to regulatory restriction */ +#define WLC_E_PRUNE_SPCT_MGMT 6 /* AP does not support STA locale spectrum mgmt */ +#define WLC_E_PRUNE_RADAR 7 /* AP is on a radar channel of STA locale */ +#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */ +#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */ +#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */ +#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */ +#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */ +#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */ +#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */ +#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */ +#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */ + +/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */ +#define WLC_E_SUP_OTHER 0 /* Other reason */ +#define WLC_E_SUP_DECRYPT_KEY_DATA 1 /* Decryption of key data failed */ +#define WLC_E_SUP_BAD_UCAST_WEP128 2 /* Illegal use of ucast WEP128 */ +#define WLC_E_SUP_BAD_UCAST_WEP40 3 /* Illegal use of ucast WEP40 */ +#define WLC_E_SUP_UNSUP_KEY_LEN 4 /* Unsupported key length */ +#define WLC_E_SUP_PW_KEY_CIPHER 5 /* Unicast cipher mismatch in pairwise key */ +#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 /* WPA IE contains > 1 RSN IE in key msg 3 */ +#define WLC_E_SUP_MSG3_IE_MISMATCH 7 /* WPA IE mismatch in key message 3 */ +#define WLC_E_SUP_NO_INSTALL_FLAG 8 /* INSTALL flag unset in 4-way msg */ +#define WLC_E_SUP_MSG3_NO_GTK 9 /* encapsulated GTK missing from msg 3 */ +#define WLC_E_SUP_GRP_KEY_CIPHER 10 /* Multicast cipher mismatch in group key */ +#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 /* encapsulated GTK missing from group msg 1 */ +#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 /* GTK decrypt failure */ +#define WLC_E_SUP_SEND_FAIL 13 /* message send failure */ +#define WLC_E_SUP_DEAUTH 14 /* received FC_DEAUTH */ +#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */ + +/* Event data for events that include frames received over the air */ +/* WLC_E_PROBRESP_MSG + * WLC_E_P2P_PROBREQ_MSG + * WLC_E_ACTION_FRAME_RX + */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data { + uint16 version; + uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */ + int32 rssi; + uint32 mactime; + uint32 rate; +} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t; + +#define BCM_RX_FRAME_DATA_VERSION 1 + +/* WLC_E_IF event data */ +typedef struct wl_event_data_if { + uint8 ifidx; /* RTE virtual device index (for dongle) */ + uint8 opcode; /* see I/F opcode */ + uint8 reserved; /* bit mask (WLC_E_IF_FLAGS_XXX ) */ + uint8 bssidx; /* bsscfg index */ + uint8 role; /* see I/F role */ +} wl_event_data_if_t; + +/* opcode in WLC_E_IF event */ +#define WLC_E_IF_ADD 1 /* bsscfg add */ +#define WLC_E_IF_DEL 2 /* bsscfg delete */ +#define WLC_E_IF_CHANGE 3 /* bsscfg role change */ + +/* I/F role code in WLC_E_IF event */ +#define WLC_E_IF_ROLE_STA 0 /* Infra STA */ +#define WLC_E_IF_ROLE_AP 1 /* Access Point */ +#define WLC_E_IF_ROLE_WDS 2 /* WDS link */ +#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */ +#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */ +#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */ + +/* WLC_E_RSSI event data */ +typedef struct wl_event_data_rssi { + int32 rssi; + int32 snr; + int32 noise; +} wl_event_data_rssi_t; + +/* WLC_E_IF flag */ +#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */ + +/* Reason codes for LINK */ +#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */ +#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */ +#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */ +#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */ + + +/* WLC_E_NDIS_LINK event data */ +typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms { + struct ether_addr peer_mac; /* 6 bytes */ + uint16 chanspec; /* 2 bytes */ + uint32 link_speed; /* current datarate in units of 500 Kbit/s */ + uint32 max_link_speed; /* max possible datarate for link in units of 500 Kbit/s */ + int32 rssi; /* average rssi */ +} BWL_POST_PACKED_STRUCT ndis_link_parms_t; + +/* reason codes for WLC_E_OVERLAY_REQ event */ +#define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */ +#define WLC_E_OVL_UPDATE_IND 1 /* device indication of host overlay update */ + +/* reason codes for WLC_E_TDLS_PEER_EVENT event */ +#define WLC_E_TDLS_PEER_DISCOVERED 0 /* peer is ready to establish TDLS */ +#define WLC_E_TDLS_PEER_CONNECTED 1 +#define WLC_E_TDLS_PEER_DISCONNECTED 2 + +/* reason codes for WLC_E_RMC_EVENT event */ +#define WLC_E_REASON_RMC_NONE 0 +#define WLC_E_REASON_RMC_AR_LOST 1 +#define WLC_E_REASON_RMC_AR_NO_ACK 2 + +#ifdef WLTDLS +/* TDLS Action Category code */ +#define TDLS_AF_CATEGORY 12 +/* Wi-Fi Display (WFD) Vendor Specific Category */ +/* used for WFD Tunneled Probe Request and Response */ +#define TDLS_VENDOR_SPECIFIC 127 +/* TDLS Action Field Values */ +#define TDLS_ACTION_SETUP_REQ 0 +#define TDLS_ACTION_SETUP_RESP 1 +#define TDLS_ACTION_SETUP_CONFIRM 2 +#define TDLS_ACTION_TEARDOWN 3 +#define WLAN_TDLS_SET_PROBE_WFD_IE 11 +#define WLAN_TDLS_SET_SETUP_WFD_IE 12 +#define WLAN_TDLS_SET_WFD_ENABLED 13 +#define WLAN_TDLS_SET_WFD_DISABLED 14 +#endif + + +/* GAS event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas { + uint16 channel; /* channel of GAS protocol */ + uint8 dialog_token; /* GAS dialog token */ + uint8 fragment_id; /* fragment id */ + uint16 status_code; /* status code on GAS completion */ + uint16 data_len; /* length of data to follow */ + uint8 data[1]; /* variable length specified by data_len */ +} BWL_POST_PACKED_STRUCT wl_event_gas_t; + +/* service discovery TLV */ +typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv { + uint16 length; /* length of response_data */ + uint8 protocol; /* service protocol type */ + uint8 transaction_id; /* service transaction id */ + uint8 status_code; /* status code */ + uint8 data[1]; /* response data */ +} BWL_POST_PACKED_STRUCT wl_sd_tlv_t; + +/* service discovery event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd { + uint16 channel; /* channel */ + uint8 count; /* number of tlvs */ + wl_sd_tlv_t tlv[1]; /* service discovery TLV */ +} BWL_POST_PACKED_STRUCT wl_event_sd_t; + +/* Note: proxd has a new API (ver 3.0) deprecates the following */ + +/* Reason codes for WLC_E_PROXD */ +#define WLC_E_PROXD_FOUND 1 /* Found a proximity device */ +#define WLC_E_PROXD_GONE 2 /* Lost a proximity device */ +#define WLC_E_PROXD_START 3 /* used by: target */ +#define WLC_E_PROXD_STOP 4 /* used by: target */ +#define WLC_E_PROXD_COMPLETED 5 /* used by: initiator completed */ +#define WLC_E_PROXD_ERROR 6 /* used by both initiator and target */ +#define WLC_E_PROXD_COLLECT_START 7 /* used by: target & initiator */ +#define WLC_E_PROXD_COLLECT_STOP 8 /* used by: target */ +#define WLC_E_PROXD_COLLECT_COMPLETED 9 /* used by: initiator completed */ +#define WLC_E_PROXD_COLLECT_ERROR 10 /* used by both initiator and target */ +#define WLC_E_PROXD_NAN_EVENT 11 /* used by both initiator and target */ +#define WLC_E_PROXD_TS_RESULTS 12 /* used by: initiator completed */ + +/* proxd_event data */ +typedef struct ftm_sample { + uint32 value; /* RTT in ns */ + int8 rssi; /* RSSI */ +} ftm_sample_t; + +typedef struct ts_sample { + uint32 t1; + uint32 t2; + uint32 t3; + uint32 t4; +} ts_sample_t; + +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint8 OFDM_frame_type; /* legacy or VHT */ + uint8 bandwidth; /* Bandwidth is 20, 40,80, MHZ */ + struct ether_addr peer_mac; /* (e.g for tgt:initiator's */ + uint32 distance; /* dst to tgt, units meter */ + uint32 meanrtt; /* mean delta */ + uint32 modertt; /* Mode delta */ + uint32 medianrtt; /* median RTT */ + uint32 sdrtt; /* Standard deviation of RTT */ + int32 gdcalcresult; /* Software or Hardware Kind of redundant, but if */ + /* frame type is VHT, then we should do it by hardware */ + int16 avg_rssi; /* avg rssi accroos the ftm frames */ + int16 validfrmcnt; /* Firmware's valid frame counts */ + int32 peer_router_info; /* Peer router information if available in TLV, */ + /* We will add this field later */ + int32 var1; /* average of group delay */ + int32 var2; /* average of threshold crossing */ + int32 var3; /* difference between group delay and threshold crossing */ + /* raw Fine Time Measurements (ftm) data */ + uint16 ftm_unit; /* ftm cnt resolution in picoseconds , 6250ps - default */ + uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */ + ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_ts_results { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint16 ts_cnt; /* number of timestamp measurements */ + ts_sample_t ts_buff[1]; /* Timestamps */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t; + + +/* Video Traffic Interference Monitor Event */ +#define INTFER_EVENT_VERSION 1 +#define INTFER_STREAM_TYPE_NONTCP 1 +#define INTFER_STREAM_TYPE_TCP 2 +#define WLINTFER_STATS_NSMPLS 4 +typedef struct wl_intfer_event { + uint16 version; /* version */ + uint16 status; /* status */ + uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */ +} wl_intfer_event_t; + +/* WLC_E_PSTA_PRIMARY_INTF_IND event data */ +typedef struct wl_psta_primary_intf_event { + struct ether_addr prim_ea; /* primary intf ether addr */ +} wl_psta_primary_intf_event_t; + +/* WLC_E_DPSTA_INTF_IND event data */ +typedef enum { + WL_INTF_PSTA = 1, + WL_INTF_DWDS = 2 +} wl_dpsta_intf_type; + +typedef struct wl_dpsta_intf_event { + wl_dpsta_intf_type intf_type; /* dwds/psta intf register */ +} wl_dpsta_intf_event_t; + +/* ********** NAN protocol events/subevents ********** */ +#define NAN_EVENT_BUFFER_SIZE 512 /* max size */ +/* nan application events to the host driver */ +typedef enum nan_app_events { + WL_NAN_EVENT_START = 1, /* NAN cluster started */ + WL_NAN_EVENT_JOIN = 2, /* Joined to a NAN cluster */ + WL_NAN_EVENT_ROLE = 3, /* Role or State changed */ + WL_NAN_EVENT_SCAN_COMPLETE = 4, + WL_NAN_EVENT_DISCOVERY_RESULT = 5, + WL_NAN_EVENT_REPLIED = 6, + WL_NAN_EVENT_TERMINATED = 7, /* the instance ID will be present in the ev data */ + WL_NAN_EVENT_RECEIVE = 8, + WL_NAN_EVENT_STATUS_CHG = 9, /* generated on any change in nan_mac status */ + WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */ + WL_NAN_EVENT_STOP = 11, /* NAN stopped */ + WL_NAN_EVENT_P2P = 12, /* NAN P2P EVENT */ + WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Event for begin of P2P further availability window */ + WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14, + WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15, + WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16, + WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */ + WL_NAN_EVENT_INVALID /* delimiter for max value */ +} nan_app_events_e; + +#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0) +/* ******************* end of NAN section *************** */ + +#define MSCH_EVENTS_BUFFER_SIZE 2048 + +/* Reason codes for WLC_E_MSCH */ +#define WLC_E_MSCH_START 0 /* start event check */ +#define WLC_E_MSCH_EXIT 1 /* exit event check */ +#define WLC_E_MSCH_REQ 2 /* request event */ +#define WLC_E_MSCH_CALLBACK 3 /* call back event */ +#define WLC_E_MSCH_MESSAGE 4 /* message event */ +#define WLC_E_MSCH_PROFILE_START 5 +#define WLC_E_MSCH_PROFILE_END 6 +#define WLC_E_MSCH_REQ_HANDLE 7 +#define WLC_E_MSCH_REQ_ENTITY 8 +#define WLC_E_MSCH_CHAN_CTXT 9 +#define WLC_E_MSCH_TIMESLOT 10 +#define WLC_E_MSCH_REQ_TIMING 11 + +typedef BWL_PRE_PACKED_STRUCT struct msch_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; +} BWL_POST_PACKED_STRUCT msch_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_start_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 status; +} BWL_POST_PACKED_STRUCT msch_start_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_message_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + char message[1]; /* message */ +} BWL_POST_PACKED_STRUCT msch_message_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_param_event_data { + uint16 flags; /* Describe various request properties */ + uint8 req_type; /* Describe start and end time flexiblilty */ + uint8 priority; /* Define the request priority */ + uint32 start_time_l; /* Requested start time offset in us unit */ + uint32 start_time_h; + uint32 duration; /* Requested duration in us unit */ + uint32 interval; /* Requested periodic interval in us unit, + * 0 means non-periodic + */ + union { + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + struct { + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time*/ + uint32 lo_prio_time_l; + uint32 lo_prio_time_h; + uint32 lo_prio_interval; /* repeated low priority interval */ + uint32 hi_prio_time_l; + uint32 hi_prio_time_h; + uint32 hi_prio_interval; /* repeated high priority interval */ + } bf; + } flex; +} BWL_POST_PACKED_STRUCT msch_req_param_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_timeslot_event_data { + uint32 p_timeslot; + uint32 p_prev; + uint32 p_next; + uint32 timeslot_id; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 sch_dur_l; + uint32 sch_dur_h; + uint32 p_chan_ctxt; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 state; +} BWL_POST_PACKED_STRUCT msch_timeslot_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_timing_event_data { + uint32 p_req_timing; + uint32 p_prev; + uint32 p_next; + uint16 flags; + uint16 timeslot_ptr; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 start_time_l; + uint32 start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 p_timeslot; +} BWL_POST_PACKED_STRUCT msch_req_timing_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_chan_ctxt_event_data { + uint32 p_chan_ctxt; + uint32 p_prev; + uint32 p_next; + uint16 chanspec; + uint16 bf_sch_pending; + uint32 bf_link_prev; + uint32 bf_link_next; + uint32 onchan_time_l; + uint32 onchan_time_h; + uint32 actual_onchan_dur_l; + uint32 actual_onchan_dur_h; + uint32 pend_onchan_dur_l; + uint32 pend_onchan_dur_h; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 bf_entity_list_cnt; + uint16 bf_entity_list_ptr; +} BWL_POST_PACKED_STRUCT msch_chan_ctxt_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_prio_event_data { + uint32 is_lo; + uint32 time_l; + uint32 time_h; + uint32 p_entity; +} BWL_POST_PACKED_STRUCT msch_prio_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_entity_event_data { + uint32 p_req_entity; + uint32 req_hdl_link_prev; + uint32 req_hdl_link_next; + uint32 chan_ctxt_link_prev; + uint32 chan_ctxt_link_next; + uint32 rt_specific_link_prev; + uint32 rt_specific_link_next; + uint16 chanspec; + uint16 req_param_ptr; + uint16 cur_slot_ptr; + uint16 pend_slot_ptr; + msch_prio_event_data_t lo_event; + msch_prio_event_data_t hi_event; + uint32 ts_change_dur_flex; + uint16 ts_change_flags; + uint16 chan_ctxt_ptr; + uint32 p_chan_ctxt; + uint32 p_req_hdl; + uint32 hi_cnt_l; + uint32 hi_cnt_h; + uint32 bf_last_serv_time_l; + uint32 bf_last_serv_time_h; +} BWL_POST_PACKED_STRUCT msch_req_entity_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_handle_event_data { + uint32 p_req_handle; + uint32 p_prev; + uint32 p_next; + uint32 cb_func; + uint32 cb_ctxt; + uint16 req_param_ptr; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 chan_cnt; + uint16 schd_chan_cnt; + uint16 chanspec_list_cnt; + uint16 chanspec_list_ptr; + uint16 pad; +} BWL_POST_PACKED_STRUCT msch_req_handle_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_profile_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 free_req_hdl_list; + uint32 free_req_entity_list; + uint32 free_chan_ctxt_list; + uint32 free_timeslot_list; + uint32 free_chanspec_list; + uint16 cur_msch_timeslot_ptr; + uint16 pad; + uint32 p_cur_msch_timeslot; + uint32 cur_armed_timeslot; + uint32 cur_armed_req_timing; + uint32 ts_id; + uint32 service_interval; + uint32 max_lo_prio_interval; + uint16 flex_list_cnt; + uint16 msch_chanspec_alloc_cnt; + uint16 msch_req_entity_alloc_cnt; + uint16 msch_req_hdl_alloc_cnt; + uint16 msch_chan_ctxt_alloc_cnt; + uint16 msch_timeslot_alloc_cnt; + uint16 msch_req_hdl_list_cnt; + uint16 msch_req_hdl_list_ptr; + uint16 msch_chan_ctxt_list_cnt; + uint16 msch_chan_ctxt_list_ptr; + uint16 msch_timeslot_list_cnt; + uint16 msch_timeslot_list_ptr; + uint16 msch_req_timing_list_cnt; + uint16 msch_req_timing_list_ptr; + uint16 msch_start_flex_list_cnt; + uint16 msch_start_flex_list_ptr; + uint16 msch_both_flex_list_cnt; + uint16 msch_both_flex_list_ptr; +} BWL_POST_PACKED_STRUCT msch_profile_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 chanspec_cnt; + uint16 chanspec_ptr; + uint16 req_param_ptr; + uint16 pad; +} BWL_POST_PACKED_STRUCT msch_req_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_callback_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 type; /* callback type */ + uint16 chanspec; /* actual chanspec, may different with requested one */ + uint32 pre_start_time_l; /* time slot prestart time low 32bit */ + uint32 pre_start_time_h; /* time slot prestart time high 32bit */ + uint32 end_time_l; /* time slot end time low 32 bit */ + uint32 end_time_h; /* time slot end time high 32 bit */ + uint32 timeslot_id; /* unique time slot id */ +} BWL_POST_PACKED_STRUCT msch_callback_event_data_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _BCMEVENT_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h new file mode 100644 index 000000000000..eaa679c38948 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h @@ -0,0 +1,248 @@ +/* + * Fundamental constants relating to IP Protocol + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmip.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmip_h_ +#define _bcmip_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* IPV4 and IPV6 common */ +#define IP_VER_OFFSET 0x0 /* offset to version field */ +#define IP_VER_MASK 0xf0 /* version mask */ +#define IP_VER_SHIFT 4 /* version shift */ +#define IP_VER_4 4 /* version number for IPV4 */ +#define IP_VER_6 6 /* version number for IPV6 */ + +#define IP_VER(ip_body) \ + ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT) + +#define IP_PROT_ICMP 0x1 /* ICMP protocol */ +#define IP_PROT_IGMP 0x2 /* IGMP protocol */ +#define IP_PROT_TCP 0x6 /* TCP protocol */ +#define IP_PROT_UDP 0x11 /* UDP protocol type */ +#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */ + +/* IPV4 field offsets */ +#define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */ +#define IPV4_TOS_OFFSET 1 /* type of service offset */ +#define IPV4_PKTLEN_OFFSET 2 /* packet length offset */ +#define IPV4_PKTFLAG_OFFSET 6 /* more-frag,dont-frag flag offset */ +#define IPV4_PROT_OFFSET 9 /* protocol type offset */ +#define IPV4_CHKSUM_OFFSET 10 /* IP header checksum offset */ +#define IPV4_SRC_IP_OFFSET 12 /* src IP addr offset */ +#define IPV4_DEST_IP_OFFSET 16 /* dest IP addr offset */ +#define IPV4_OPTIONS_OFFSET 20 /* IP options offset */ +#define IPV4_MIN_HEADER_LEN 20 /* Minimum size for an IP header (no options) */ + +/* IPV4 field decodes */ +#define IPV4_VER_MASK 0xf0 /* IPV4 version mask */ +#define IPV4_VER_SHIFT 4 /* IPV4 version shift */ + +#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */ +#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK)) + +#define IPV4_ADDR_LEN 4 /* IPV4 address length */ + +#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \ + ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0) + +#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \ + ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff) + +#define IPV4_TOS_DSCP_MASK 0xfc /* DiffServ codepoint mask */ +#define IPV4_TOS_DSCP_SHIFT 2 /* DiffServ codepoint shift */ + +#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET]) + +#define IPV4_TOS_PREC_MASK 0xe0 /* Historical precedence mask */ +#define IPV4_TOS_PREC_SHIFT 5 /* Historical precedence shift */ + +#define IPV4_TOS_LOWDELAY 0x10 /* Lowest delay requested */ +#define IPV4_TOS_THROUGHPUT 0x8 /* Best throughput requested */ +#define IPV4_TOS_RELIABILITY 0x4 /* Most reliable delivery requested */ + +#define IPV4_TOS_ROUTINE 0 +#define IPV4_TOS_PRIORITY 1 +#define IPV4_TOS_IMMEDIATE 2 +#define IPV4_TOS_FLASH 3 +#define IPV4_TOS_FLASHOVERRIDE 4 +#define IPV4_TOS_CRITICAL 5 +#define IPV4_TOS_INETWORK_CTRL 6 +#define IPV4_TOS_NETWORK_CTRL 7 + +#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET]) + +#define IPV4_FRAG_RESV 0x8000 /* Reserved */ +#define IPV4_FRAG_DONT 0x4000 /* Don't fragment */ +#define IPV4_FRAG_MORE 0x2000 /* More fragments */ +#define IPV4_FRAG_OFFSET_MASK 0x1fff /* Fragment offset */ + +#define IPV4_ADDR_STR_LEN 16 /* Max IP address length in string format */ + +/* IPV4 packet formats */ +BWL_PRE_PACKED_STRUCT struct ipv4_addr { + uint8 addr[IPV4_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv4_hdr { + uint8 version_ihl; /* Version and Internet Header Length */ + uint8 tos; /* Type Of Service */ + uint16 tot_len; /* Number of bytes in packet (max 65535) */ + uint16 id; + uint16 frag; /* 3 flag bits and fragment offset */ + uint8 ttl; /* Time To Live */ + uint8 prot; /* Protocol */ + uint16 hdr_chksum; /* IP header checksum */ + uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */ +} BWL_POST_PACKED_STRUCT; + +/* IPV6 field offsets */ +#define IPV6_PAYLOAD_LEN_OFFSET 4 /* payload length offset */ +#define IPV6_NEXT_HDR_OFFSET 6 /* next header/protocol offset */ +#define IPV6_HOP_LIMIT_OFFSET 7 /* hop limit offset */ +#define IPV6_SRC_IP_OFFSET 8 /* src IP addr offset */ +#define IPV6_DEST_IP_OFFSET 24 /* dst IP addr offset */ + +/* IPV6 field decodes */ +#define IPV6_TRAFFIC_CLASS(ipv6_body) \ + (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \ + ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4)) + +#define IPV6_FLOW_LABEL(ipv6_body) \ + (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \ + (((uint8 *)(ipv6_body))[2] << 8) | \ + (((uint8 *)(ipv6_body))[3])) + +#define IPV6_PAYLOAD_LEN(ipv6_body) \ + ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \ + ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1]) + +#define IPV6_NEXT_HDR(ipv6_body) \ + (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET]) + +#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body) + +#define IPV6_ADDR_LEN 16 /* IPV6 address length */ + +/* IPV4 TOS or IPV6 Traffic Classifier or 0 */ +#define IP_TOS46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0) + +#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT); + +/* IPV4 or IPV6 Protocol Classifier or 0 */ +#define IP_PROT46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0) + +/* IPV6 extension headers (options) */ +#define IPV6_EXTHDR_HOP 0 +#define IPV6_EXTHDR_ROUTING 43 +#define IPV6_EXTHDR_FRAGMENT 44 +#define IPV6_EXTHDR_AUTH 51 +#define IPV6_EXTHDR_NONE 59 +#define IPV6_EXTHDR_DEST 60 + +#define IPV6_EXTHDR(prot) (((prot) == IPV6_EXTHDR_HOP) || \ + ((prot) == IPV6_EXTHDR_ROUTING) || \ + ((prot) == IPV6_EXTHDR_FRAGMENT) || \ + ((prot) == IPV6_EXTHDR_AUTH) || \ + ((prot) == IPV6_EXTHDR_NONE) || \ + ((prot) == IPV6_EXTHDR_DEST)) + +#define IPV6_MIN_HLEN 40 + +#define IPV6_EXTHDR_LEN(eh) ((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3) + +BWL_PRE_PACKED_STRUCT struct ipv6_exthdr { + uint8 nexthdr; + uint8 hdrlen; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag { + uint8 nexthdr; + uint8 rsvd; + uint16 frag_off; + uint32 ident; +} BWL_POST_PACKED_STRUCT; + +static INLINE int32 +ipv6_exthdr_len(uint8 *h, uint8 *proto) +{ + uint16 len = 0, hlen; + struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h; + + while (IPV6_EXTHDR(eh->nexthdr)) { + if (eh->nexthdr == IPV6_EXTHDR_NONE) + return -1; + else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT) + hlen = 8; + else if (eh->nexthdr == IPV6_EXTHDR_AUTH) + hlen = (eh->hdrlen + 2) << 2; + else + hlen = IPV6_EXTHDR_LEN(eh); + + len += hlen; + eh = (struct ipv6_exthdr *)(h + len); + } + + *proto = eh->nexthdr; + return len; +} + +#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000) + +#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \ +{ \ + ether[0] = 0x01; \ + ether[1] = 0x00; \ + ether[2] = 0x5E; \ + ether[3] = (ipv4 & 0x7f0000) >> 16; \ + ether[4] = (ipv4 & 0xff00) >> 8; \ + ether[5] = (ipv4 & 0xff); \ +} + +/* This marks the end of a packed structure section. */ +#include + +#define IPV4_ADDR_STR "%d.%d.%d.%d" +#define IPV4_ADDR_TO_STR(addr) ((uint32)addr & 0xff000000) >> 24, \ + ((uint32)addr & 0x00ff0000) >> 16, \ + ((uint32)addr & 0x0000ff00) >> 8, \ + ((uint32)addr & 0x000000ff) + +#endif /* _bcmip_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h new file mode 100644 index 000000000000..fbab037b2f32 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h @@ -0,0 +1,163 @@ +/* + * Fundamental constants relating to Neighbor Discovery Protocol + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmipv6.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmipv6_h_ +#define _bcmipv6_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* Extension headers */ +#define IPV6_EXT_HOP 0 +#define IPV6_EXT_ROUTE 43 +#define IPV6_EXT_FRAG 44 +#define IPV6_EXT_DEST 60 +#define IPV6_EXT_ESEC 50 +#define IPV6_EXT_AUTH 51 + +/* Minimum size (extension header "word" length) */ +#define IPV6_EXT_WORD 8 + +/* Offsets for most extension headers */ +#define IPV6_EXT_NEXTHDR 0 +#define IPV6_EXT_HDRLEN 1 + +/* Constants specific to fragmentation header */ +#define IPV6_FRAG_MORE_MASK 0x0001 +#define IPV6_FRAG_MORE_SHIFT 0 +#define IPV6_FRAG_OFFS_MASK 0xfff8 +#define IPV6_FRAG_OFFS_SHIFT 3 + +/* For icmpv6 */ +#define ICMPV6_HEADER_TYPE 0x3A +#define ICMPV6_PKT_TYPE_RA 134 +#define ICMPV6_PKT_TYPE_NS 135 +#define ICMPV6_PKT_TYPE_NA 136 + +#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2 +#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1 + +#define ICMPV6_ND_OPT_LEN_LINKADDR 1 + +#define ICMPV6_ND_OPT_LEN_LINKADDR 1 + +#define IPV6_VERSION 6 +#define IPV6_HOP_LIMIT 255 + +#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \ + a[5] | a[6] | a[7] | a[8] | a[9] | \ + a[10] | a[11] | a[12] | a[13] | \ + a[14] | a[15]) == 0) + +#define IPV6_ADDR_LOCAL(a) (((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE) + +/* IPV6 address */ +BWL_PRE_PACKED_STRUCT struct ipv6_addr { + uint8 addr[16]; +} BWL_POST_PACKED_STRUCT; + + +/* ICMPV6 Header */ +BWL_PRE_PACKED_STRUCT struct icmp6_hdr { + uint8 icmp6_type; + uint8 icmp6_code; + uint16 icmp6_cksum; + BWL_PRE_PACKED_STRUCT union { + uint32 reserved; + BWL_PRE_PACKED_STRUCT struct nd_advt { + uint32 reserved1:5, + override:1, + solicited:1, + router:1, + reserved2:24; + } BWL_POST_PACKED_STRUCT nd_advt; + } BWL_POST_PACKED_STRUCT opt; +} BWL_POST_PACKED_STRUCT; + +/* Ipv6 Header Format */ +BWL_PRE_PACKED_STRUCT struct ipv6_hdr { + uint8 priority:4, + version:4; + uint8 flow_lbl[3]; + uint16 payload_len; + uint8 nexthdr; + uint8 hop_limit; + struct ipv6_addr saddr; + struct ipv6_addr daddr; +} BWL_POST_PACKED_STRUCT; + +/* Neighbor Advertisement/Solicitation Packet Structure */ +BWL_PRE_PACKED_STRUCT struct bcm_nd_msg { + struct icmp6_hdr icmph; + struct ipv6_addr target; +} BWL_POST_PACKED_STRUCT; + + +/* Neighibor Solicitation/Advertisement Optional Structure */ +BWL_PRE_PACKED_STRUCT struct nd_msg_opt { + uint8 type; + uint8 len; + uint8 mac_addr[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +/* Ipv6 Fragmentation Header */ +BWL_PRE_PACKED_STRUCT struct ipv6_frag { + uint8 nexthdr; + uint8 reserved; + uint16 frag_offset; + uint32 ident; +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +static const struct ipv6_addr all_node_ipv6_maddr = { + { 0xff, 0x2, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 1 + }}; + +#define IPV6_ISMULTI(a) (a[0] == 0xff) + +#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \ +{ \ + ether[0] = 0x33; \ + ether[1] = 0x33; \ + ether[2] = ipv6[12]; \ + ether[3] = ipv6[13]; \ + ether[4] = ipv6[14]; \ + ether[5] = ipv6[15]; \ +} + +#endif /* !defined(_bcmipv6_h_) */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h new file mode 100644 index 000000000000..661e1f84d2ae --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h @@ -0,0 +1,93 @@ +/* + * Fundamental constants relating to TCP Protocol + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmtcp.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmtcp_h_ +#define _bcmtcp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +#define TCP_SRC_PORT_OFFSET 0 /* TCP source port offset */ +#define TCP_DEST_PORT_OFFSET 2 /* TCP dest port offset */ +#define TCP_SEQ_NUM_OFFSET 4 /* TCP sequence number offset */ +#define TCP_ACK_NUM_OFFSET 8 /* TCP acknowledgement number offset */ +#define TCP_HLEN_OFFSET 12 /* HLEN and reserved bits offset */ +#define TCP_FLAGS_OFFSET 13 /* FLAGS and reserved bits offset */ +#define TCP_CHKSUM_OFFSET 16 /* TCP body checksum offset */ + +#define TCP_PORT_LEN 2 /* TCP port field length */ + +/* 8bit TCP flag field */ +#define TCP_FLAG_URG 0x20 +#define TCP_FLAG_ACK 0x10 +#define TCP_FLAG_PSH 0x08 +#define TCP_FLAG_RST 0x04 +#define TCP_FLAG_SYN 0x02 +#define TCP_FLAG_FIN 0x01 + +#define TCP_HLEN_MASK 0xf000 +#define TCP_HLEN_SHIFT 12 + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint32 seq_num; /* TCP Sequence Number */ + uint32 ack_num; /* TCP Sequence Number */ + uint16 hdrlen_rsvd_flags; /* Header length, reserved bits and flags */ + uint16 tcpwin; /* TCP window */ + uint16 chksum; /* Segment checksum with pseudoheader */ + uint16 urg_ptr; /* Points to seq-num of byte following urg data */ +} BWL_POST_PACKED_STRUCT; + +#define TCP_MIN_HEADER_LEN 20 + +#define TCP_HDRLEN_MASK 0xf0 +#define TCP_HDRLEN_SHIFT 4 +#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT) + +#define TCP_FLAGS_MASK 0x1f +#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK) + +/* This marks the end of a packed structure section. */ +#include + +/* To address round up by 32bit. */ +#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31)) /* a >= b */ +#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31)) /* a =< b */ +#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b) /* a > b */ +#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b) /* a < b */ + +#endif /* #ifndef _bcmtcp_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h new file mode 100644 index 000000000000..97cf815db76c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h @@ -0,0 +1,49 @@ +/* + * Fundamental constants relating to UDP Protocol + * + * Copyright (C) 2016, Broadcom Corporation + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; + * the contents of this file may not be disclosed to third parties, copied + * or duplicated in any form, in whole or in part, without the prior + * written permission of Broadcom Corporation. + * + * + * <> + * + * $Id: bcmudp.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmudp_h_ +#define _bcmudp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* UDP header */ +#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */ +#define UDP_LEN_OFFSET 4 /* UDP length offset */ +#define UDP_CHKSUM_OFFSET 6 /* UDP body checksum offset */ + +#define UDP_HDR_LEN 8 /* UDP header length */ +#define UDP_PORT_LEN 2 /* UDP port length */ + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmudp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint16 len; /* Number of bytes in datagram including header */ + uint16 chksum; /* entire datagram checksum with pseudoheader */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _bcmudp_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h new file mode 100644 index 000000000000..4e948d24dfc1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h @@ -0,0 +1,444 @@ +/* + * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bt_amp_hci.h 518342 2014-12-01 23:21:41Z $ +*/ + +#ifndef _bt_amp_hci_h +#define _bt_amp_hci_h + +/* This marks the start of a packed structure section. */ +#include + + +/* AMP HCI CMD packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd { + uint16 opcode; + uint8 plen; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT amp_hci_cmd_t; + +#define HCI_CMD_PREAMBLE_SIZE OFFSETOF(amp_hci_cmd_t, parms) +#define HCI_CMD_DATA_SIZE 255 + +/* AMP HCI CMD opcode layout */ +#define HCI_CMD_OPCODE(ogf, ocf) ((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF)) +#define HCI_CMD_OGF(opcode) ((uint8)(((opcode) >> 10) & 0x3F)) +#define HCI_CMD_OCF(opcode) ((opcode) & 0x03FF) + +/* AMP HCI command opcodes */ +#define HCI_Read_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0001) +#define HCI_Reset_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0002) +#define HCI_Read_Link_Quality HCI_CMD_OPCODE(0x05, 0x0003) +#define HCI_Read_Local_AMP_Info HCI_CMD_OPCODE(0x05, 0x0009) +#define HCI_Read_Local_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000A) +#define HCI_Write_Remote_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000B) +#define HCI_Create_Physical_Link HCI_CMD_OPCODE(0x01, 0x0035) +#define HCI_Accept_Physical_Link_Request HCI_CMD_OPCODE(0x01, 0x0036) +#define HCI_Disconnect_Physical_Link HCI_CMD_OPCODE(0x01, 0x0037) +#define HCI_Create_Logical_Link HCI_CMD_OPCODE(0x01, 0x0038) +#define HCI_Accept_Logical_Link HCI_CMD_OPCODE(0x01, 0x0039) +#define HCI_Disconnect_Logical_Link HCI_CMD_OPCODE(0x01, 0x003A) +#define HCI_Logical_Link_Cancel HCI_CMD_OPCODE(0x01, 0x003B) +#define HCI_Flow_Spec_Modify HCI_CMD_OPCODE(0x01, 0x003C) +#define HCI_Write_Flow_Control_Mode HCI_CMD_OPCODE(0x01, 0x0067) +#define HCI_Read_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x0069) +#define HCI_Write_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x006A) +#define HCI_Short_Range_Mode HCI_CMD_OPCODE(0x01, 0x006B) +#define HCI_Reset HCI_CMD_OPCODE(0x03, 0x0003) +#define HCI_Read_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0015) +#define HCI_Write_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0016) +#define HCI_Read_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0036) +#define HCI_Write_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0037) +#define HCI_Enhanced_Flush HCI_CMD_OPCODE(0x03, 0x005F) +#define HCI_Read_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0061) +#define HCI_Write_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0062) +#define HCI_Set_Event_Mask_Page_2 HCI_CMD_OPCODE(0x03, 0x0063) +#define HCI_Read_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0064) +#define HCI_Write_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0065) +#define HCI_Read_Local_Version_Info HCI_CMD_OPCODE(0x04, 0x0001) +#define HCI_Read_Local_Supported_Commands HCI_CMD_OPCODE(0x04, 0x0002) +#define HCI_Read_Buffer_Size HCI_CMD_OPCODE(0x04, 0x0005) +#define HCI_Read_Data_Block_Size HCI_CMD_OPCODE(0x04, 0x000A) + +/* AMP HCI command parameters */ +typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms { + uint8 plh; + uint8 offset[2]; /* length so far */ + uint8 max_remote[2]; +} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms { + uint8 plh; + uint8 offset[2]; + uint8 len[2]; + uint8 frag[1]; +} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms { + uint8 plh; + uint8 key_length; + uint8 key_type; + uint8 key[1]; +} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms { + uint8 plh; + uint8 reason; +} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms { + uint8 plh; + uint8 txflow[16]; + uint8 rxflow[16]; +} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec { + uint8 id; + uint8 service_type; + uint8 max_sdu[2]; + uint8 sdu_ia_time[4]; + uint8 access_latency[4]; + uint8 flush_timeout[4]; +} BWL_POST_PACKED_STRUCT ext_flow_spec_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms { + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms { + uint8 llh[2]; + uint8 txflow[16]; + uint8 rxflow[16]; +} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct plh_pad { + uint8 plh; + uint8 pad; +} BWL_POST_PACKED_STRUCT plh_pad_t; + +typedef BWL_PRE_PACKED_STRUCT union hci_handle { + uint16 bredr; + plh_pad_t amp; +} BWL_POST_PACKED_STRUCT hci_handle_t; + +typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms { + hci_handle_t handle; + uint8 timeout[2]; +} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms { + uint8 llh[2]; + uint8 befto[4]; +} BWL_POST_PACKED_STRUCT befto_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms { + uint8 plh; + uint8 srm; +} BWL_POST_PACKED_STRUCT srm_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms { + uint8 ld_aware; + uint8 ld[2]; + uint8 ld_opts; + uint8 l_opts; +} BWL_POST_PACKED_STRUCT ld_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms { + uint8 llh[2]; + uint8 packet_type; +} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t; + +/* Generic AMP extended flow spec service types */ +#define EFS_SVCTYPE_NO_TRAFFIC 0 +#define EFS_SVCTYPE_BEST_EFFORT 1 +#define EFS_SVCTYPE_GUARANTEED 2 + +/* AMP HCI event packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event { + uint8 ecode; + uint8 plen; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT amp_hci_event_t; + +#define HCI_EVT_PREAMBLE_SIZE OFFSETOF(amp_hci_event_t, parms) + +/* AMP HCI event codes */ +#define HCI_Command_Complete 0x0E +#define HCI_Command_Status 0x0F +#define HCI_Flush_Occurred 0x11 +#define HCI_Enhanced_Flush_Complete 0x39 +#define HCI_Physical_Link_Complete 0x40 +#define HCI_Channel_Select 0x41 +#define HCI_Disconnect_Physical_Link_Complete 0x42 +#define HCI_Logical_Link_Complete 0x45 +#define HCI_Disconnect_Logical_Link_Complete 0x46 +#define HCI_Flow_Spec_Modify_Complete 0x47 +#define HCI_Number_of_Completed_Data_Blocks 0x48 +#define HCI_Short_Range_Mode_Change_Complete 0x4C +#define HCI_Status_Change_Event 0x4D +#define HCI_Vendor_Specific 0xFF + +/* AMP HCI event mask bit positions */ +#define HCI_Physical_Link_Complete_Event_Mask 0x0001 +#define HCI_Channel_Select_Event_Mask 0x0002 +#define HCI_Disconnect_Physical_Link_Complete_Event_Mask 0x0004 +#define HCI_Logical_Link_Complete_Event_Mask 0x0020 +#define HCI_Disconnect_Logical_Link_Complete_Event_Mask 0x0040 +#define HCI_Flow_Spec_Modify_Complete_Event_Mask 0x0080 +#define HCI_Number_of_Completed_Data_Blocks_Event_Mask 0x0100 +#define HCI_Short_Range_Mode_Change_Complete_Event_Mask 0x1000 +#define HCI_Status_Change_Event_Mask 0x2000 +#define HCI_All_Event_Mask 0x31e7 +/* AMP HCI event parameters */ +typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms { + uint8 status; + uint8 cmdpkts; + uint16 opcode; +} BWL_POST_PACKED_STRUCT cmd_status_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms { + uint8 cmdpkts; + uint16 opcode; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT cmd_complete_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms { + uint16 handle; +} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms { + uint8 status; + uint8 plh; +} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms { + uint8 status; + uint8 plh; + uint16 len; + uint8 frag[1]; +} BWL_POST_PACKED_STRUCT read_local_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms { + uint8 status; + uint8 AMP_status; + uint32 bandwidth; + uint32 gbandwidth; + uint32 latency; + uint32 PDU_size; + uint8 ctrl_type; + uint16 PAL_cap; + uint16 AMP_ASSOC_len; + uint32 max_flush_timeout; + uint32 be_flush_timeout; +} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms { + uint8 status; + uint16 llh; + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms { + uint8 status; + uint16 llh; + uint8 reason; +} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms { + uint8 status; + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms { + uint8 status; + uint16 llh; +} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms { + uint8 status; + uint8 plh; +} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms { + uint8 status; + uint8 plh; + uint8 reason; +} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms { + uint8 status; + hci_handle_t handle; + uint16 timeout; +} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms { + uint8 status; + uint16 timeout; +} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms { + uint8 status; + uint16 ACL_pkt_len; + uint16 data_block_len; + uint16 data_block_num; +} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct data_blocks { + uint16 handle; + uint16 pkts; + uint16 blocks; +} BWL_POST_PACKED_STRUCT data_blocks_t; + +typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms { + uint16 num_blocks; + uint8 num_handles; + data_blocks_t completed[1]; +} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms { + uint8 status; + uint32 befto; +} BWL_POST_PACKED_STRUCT befto_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms { + uint8 status; + uint8 plh; + uint8 srm; +} BWL_POST_PACKED_STRUCT srm_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms { + uint8 status; + uint8 llh[2]; + uint16 counter; +} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms { + uint8 status; + uint8 llh[2]; +} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms { + uint8 status; + hci_handle_t handle; + uint8 link_quality; +} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms { + uint8 status; + uint8 ld_aware; + uint8 ld[2]; + uint8 ld_opts; + uint8 l_opts; +} BWL_POST_PACKED_STRUCT ld_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms { + uint16 handle; +} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms { + uint8 len; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms { + uint8 status; + uint8 hci_version; + uint16 hci_revision; + uint8 pal_version; + uint16 mfg_name; + uint16 pal_subversion; +} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t; + +#define MAX_SUPPORTED_CMD_BYTE 64 +typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms { + uint8 status; + uint8 cmd[MAX_SUPPORTED_CMD_BYTE]; +} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms { + uint8 status; + uint8 amp_status; +} BWL_POST_PACKED_STRUCT status_change_evt_parms_t; + +/* AMP HCI error codes */ +#define HCI_SUCCESS 0x00 +#define HCI_ERR_ILLEGAL_COMMAND 0x01 +#define HCI_ERR_NO_CONNECTION 0x02 +#define HCI_ERR_MEMORY_FULL 0x07 +#define HCI_ERR_CONNECTION_TIMEOUT 0x08 +#define HCI_ERR_MAX_NUM_OF_CONNECTIONS 0x09 +#define HCI_ERR_CONNECTION_EXISTS 0x0B +#define HCI_ERR_CONNECTION_DISALLOWED 0x0C +#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT 0x10 +#define HCI_ERR_UNSUPPORTED_VALUE 0x11 +#define HCI_ERR_ILLEGAL_PARAMETER_FMT 0x12 +#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST 0x16 +#define HCI_ERR_UNSPECIFIED 0x1F +#define HCI_ERR_UNIT_KEY_USED 0x26 +#define HCI_ERR_QOS_REJECTED 0x2D +#define HCI_ERR_PARAM_OUT_OF_RANGE 0x30 +#define HCI_ERR_NO_SUITABLE_CHANNEL 0x39 +#define HCI_ERR_CHANNEL_MOVE 0xFF + +/* AMP HCI ACL Data packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data { + uint16 handle; /* 12-bit connection handle + 2-bit PB and 2-bit BC flags */ + uint16 dlen; /* data total length */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t; + +#define HCI_ACL_DATA_PREAMBLE_SIZE OFFSETOF(amp_hci_ACL_data_t, data) + +#define HCI_ACL_DATA_BC_FLAGS (0x0 << 14) +#define HCI_ACL_DATA_PB_FLAGS (0x3 << 12) + +#define HCI_ACL_DATA_HANDLE(handle) ((handle) & 0x0fff) +#define HCI_ACL_DATA_FLAGS(handle) ((handle) >> 12) + +/* AMP Activity Report packet formats */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report { + uint8 ScheduleKnown; + uint8 NumReports; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t; + +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple { + uint32 StartTime; + uint32 Duration; + uint32 Periodicity; +} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t; + +#define HCI_AR_SCHEDULE_KNOWN 0x01 + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _bt_amp_hci_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h new file mode 100644 index 000000000000..be4ef5358fa5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h @@ -0,0 +1,215 @@ +/* + * 802.1x EAPOL definitions + * + * See + * IEEE Std 802.1X-2001 + * IEEE 802.1X RADIUS Usage Guidelines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: eapol.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _eapol_h_ +#define _eapol_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#include + +/* EAPOL for 802.3/Ethernet */ +typedef BWL_PRE_PACKED_STRUCT struct { + struct ether_header eth; /* 802.3/Ethernet header */ + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ + unsigned char body[1]; /* Body (optional) */ +} BWL_POST_PACKED_STRUCT eapol_header_t; + +#define EAPOL_HEADER_LEN 18 + +typedef struct { + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ +} eapol_hdr_t; + +#define EAPOL_HDR_LEN 4 + +/* EAPOL version */ +#define WPA2_EAPOL_VERSION 2 +#define WPA_EAPOL_VERSION 1 +#define LEAP_EAPOL_VERSION 1 +#define SES_EAPOL_VERSION 1 + +/* EAPOL types */ +#define EAP_PACKET 0 +#define EAPOL_START 1 +#define EAPOL_LOGOFF 2 +#define EAPOL_KEY 3 +#define EAPOL_ASF 4 + +/* EAPOL-Key types */ +#define EAPOL_RC4_KEY 1 +#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */ +#define EAPOL_WPA_KEY 254 /* WPA */ + +/* RC4 EAPOL-Key header field sizes */ +#define EAPOL_KEY_REPLAY_LEN 8 +#define EAPOL_KEY_IV_LEN 16 +#define EAPOL_KEY_SIG_LEN 16 + +/* RC4 EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short length; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */ + unsigned char index; /* Key Flags & Index */ + unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */ + unsigned char key[1]; /* Key (optional) */ +} BWL_POST_PACKED_STRUCT eapol_key_header_t; + +#define EAPOL_KEY_HEADER_LEN 44 + +/* RC4 EAPOL-Key flags */ +#define EAPOL_KEY_FLAGS_MASK 0x80 +#define EAPOL_KEY_BROADCAST 0 +#define EAPOL_KEY_UNICAST 0x80 + +/* RC4 EAPOL-Key index */ +#define EAPOL_KEY_INDEX_MASK 0x7f + +/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */ +#define EAPOL_WPA_KEY_REPLAY_LEN 8 +#define EAPOL_WPA_KEY_NONCE_LEN 32 +#define EAPOL_WPA_KEY_IV_LEN 16 +#define EAPOL_WPA_KEY_RSC_LEN 8 +#define EAPOL_WPA_KEY_ID_LEN 8 +#define EAPOL_WPA_KEY_MIC_LEN 16 +#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN) +#define EAPOL_WPA_MAX_KEY_SIZE 32 + +/* WPA EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short key_info; /* Key Information (unaligned) */ + unsigned short key_len; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */ + unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */ + unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */ + unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ + unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */ + unsigned short data_len; /* Key Data Length */ + unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */ +} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t; + +#define EAPOL_WPA_KEY_LEN 95 + +/* WPA/802.11i/WPA2 KEY KEY_INFO bits */ +#define WPA_KEY_DESC_OSEN 0x0 +#define WPA_KEY_DESC_V1 0x01 +#define WPA_KEY_DESC_V2 0x02 +#define WPA_KEY_DESC_V3 0x03 +#define WPA_KEY_PAIRWISE 0x08 +#define WPA_KEY_INSTALL 0x40 +#define WPA_KEY_ACK 0x80 +#define WPA_KEY_MIC 0x100 +#define WPA_KEY_SECURE 0x200 +#define WPA_KEY_ERROR 0x400 +#define WPA_KEY_REQ 0x800 + +#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2 + +/* WPA-only KEY KEY_INFO bits */ +#define WPA_KEY_INDEX_0 0x00 +#define WPA_KEY_INDEX_1 0x10 +#define WPA_KEY_INDEX_2 0x20 +#define WPA_KEY_INDEX_3 0x30 +#define WPA_KEY_INDEX_MASK 0x30 +#define WPA_KEY_INDEX_SHIFT 0x04 + +/* 802.11i/WPA2-only KEY KEY_INFO bits */ +#define WPA_KEY_ENCRYPTED_DATA 0x1000 + +/* Key Data encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 type; + uint8 length; + uint8 oui[3]; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t; + +#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6 + +#define WPA2_KEY_DATA_SUBTYPE_GTK 1 +#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2 +#define WPA2_KEY_DATA_SUBTYPE_MAC 3 +#define WPA2_KEY_DATA_SUBTYPE_PMKID 4 +#define WPA2_KEY_DATA_SUBTYPE_IGTK 9 + +/* GTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 flags; + uint8 reserved; + uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t; + +#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2 + +#define WPA2_GTK_INDEX_MASK 0x03 +#define WPA2_GTK_INDEX_SHIFT 0x00 + +#define WPA2_GTK_TRANSMIT 0x04 + +/* IGTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 key_id; + uint8 ipn[6]; + uint8 key[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t; + +#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 8 + +/* STAKey encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 reserved[2]; + uint8 mac[ETHER_ADDR_LEN]; + uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t; + +#define WPA2_KEY_DATA_PAD 0xdd + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _eapol_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h new file mode 100644 index 000000000000..022fee41a196 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h @@ -0,0 +1,227 @@ +/* + * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: ethernet.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */ +#define _NET_ETHERNET_H_ + +#ifndef _TYPEDEFS_H_ +#include "typedefs.h" +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* + * The number of bytes in an ethernet (MAC) address. + */ +#define ETHER_ADDR_LEN 6 + +/* + * The number of bytes in the type field. + */ +#define ETHER_TYPE_LEN 2 + +/* + * The number of bytes in the trailing CRC field. + */ +#define ETHER_CRC_LEN 4 + +/* + * The length of the combined header. + */ +#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) + +/* + * The minimum packet length. + */ +#define ETHER_MIN_LEN 64 + +/* + * The minimum packet user data length. + */ +#define ETHER_MIN_DATA 46 + +/* + * The maximum packet length. + */ +#define ETHER_MAX_LEN 1518 + +/* + * The maximum packet user data length. + */ +#define ETHER_MAX_DATA 1500 + +/* ether types */ +#define ETHER_TYPE_MIN 0x0600 /* Anything less than MIN is a length */ +#define ETHER_TYPE_IP 0x0800 /* IP */ +#define ETHER_TYPE_ARP 0x0806 /* ARP */ +#define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */ +#define ETHER_TYPE_IPV6 0x86dd /* IPv6 */ +#define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */ +#define ETHER_TYPE_802_1X 0x888e /* 802.1x */ +#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */ +#define ETHER_TYPE_WAI 0x88b4 /* WAI */ +#define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */ +#define ETHER_TYPE_RRB ETHER_TYPE_89_0D /* RRB 802.11r 2008 */ + +#define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */ + +#define ETHER_TYPE_IAPP_L2_UPDATE 0x6 /* IAPP L2 update frame */ + +/* Broadcom subtype follows ethertype; First 2 bytes are reserved; Next 2 are subtype; */ +#define ETHER_BRCM_SUBTYPE_LEN 4 /* Broadcom 4 byte subtype */ + +/* ether header */ +#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) /* dest address offset */ +#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) /* src address offset */ +#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) /* ether type offset */ + +/* + * A macro to validate a length with + */ +#define ETHER_IS_VALID_LEN(foo) \ + ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) + +#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \ + ((uint8 *)ea)[0] = 0x01; \ + ((uint8 *)ea)[1] = 0x00; \ + ((uint8 *)ea)[2] = 0x5e; \ + ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \ + ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \ + ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \ +} + +#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */ +/* + * Structure of a 10Mb/s Ethernet header. + */ +BWL_PRE_PACKED_STRUCT struct ether_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 ether_type; +} BWL_POST_PACKED_STRUCT; + +/* + * Structure of a 48-bit Ethernet address. + */ +BWL_PRE_PACKED_STRUCT struct ether_addr { + uint8 octet[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +#endif /* !__INCif_etherh Quick and ugly hack for VxWorks */ + +/* + * Takes a pointer, set, test, clear, toggle locally admininistered + * address bit in the 48-bit Ethernet address. + */ +#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2)) +#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2) +#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd)) +#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2)) + +/* Takes a pointer, marks unicast address bit in the MAC address */ +#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1)) + +/* + * Takes a pointer, returns true if a 48-bit multicast address + * (including broadcast, since it is all ones) + */ +#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1) + + +/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */ +#define eacmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \ + (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \ + (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2])) + +#define ether_cmp(a, b) eacmp(a, b) + +/* copy an ethernet address - assumes the pointers can be referenced as shorts */ +#define eacopy(s, d) \ +do { \ + ((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \ + ((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \ + ((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \ +} while (0) + +#define ether_copy(s, d) eacopy(s, d) + +/* Copy an ethernet address in reverse order */ +#define ether_rcopy(s, d) \ +do { \ + ((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \ + ((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \ + ((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \ +} while (0) + +/* Copy 14B ethernet header: 32bit aligned source and destination. */ +#define ehcopy32(s, d) \ +do { \ + ((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \ + ((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \ + ((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \ + ((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \ +} while (0) + + +static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}}; +static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}}; +static const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}}; + +#define ETHER_ISBCAST(ea) ((((const uint8 *)(ea))[0] & \ + ((const uint8 *)(ea))[1] & \ + ((const uint8 *)(ea))[2] & \ + ((const uint8 *)(ea))[3] & \ + ((const uint8 *)(ea))[4] & \ + ((const uint8 *)(ea))[5]) == 0xff) +#define ETHER_ISNULLADDR(ea) ((((const uint8 *)(ea))[0] | \ + ((const uint8 *)(ea))[1] | \ + ((const uint8 *)(ea))[2] | \ + ((const uint8 *)(ea))[3] | \ + ((const uint8 *)(ea))[4] | \ + ((const uint8 *)(ea))[5]) == 0) + +#define ETHER_ISNULLDEST(da) ((((const uint16 *)(da))[0] | \ + ((const uint16 *)(da))[1] | \ + ((const uint16 *)(da))[2]) == 0) +#define ETHER_ISNULLSRC(sa) ETHER_ISNULLDEST(sa) + +#define ETHER_MOVE_HDR(d, s) \ +do { \ + struct ether_header t; \ + t = *(struct ether_header *)(s); \ + *(struct ether_header *)(d) = t; \ +} while (0) + +#define ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _NET_ETHERNET_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/event_log_set.h b/drivers/net/wireless/bcmdhd/include/proto/event_log_set.h new file mode 100644 index 000000000000..910cbcf169af --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/event_log_set.h @@ -0,0 +1,45 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + */ + +#ifndef _EVENT_LOG_SET_H_ +#define _EVENT_LOG_SET_H_ + +/* Set a maximum number of sets here. It is not dynamic for + * efficiency of the EVENT_LOG calls. + */ +#define NUM_EVENT_LOG_SETS 8 + +/* Define new event log sets here */ +#define EVENT_LOG_SET_BUS 0 +#define EVENT_LOG_SET_WL 1 +#define EVENT_LOG_SET_PSM 2 +#define EVENT_LOG_SET_ERROR 3 +#define EVENT_LOG_SET_MEM_API 4 + +#endif /* _EVENT_LOG_SET_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/event_log_tag.h b/drivers/net/wireless/bcmdhd/include/proto/event_log_tag.h new file mode 100644 index 000000000000..25acbc7420e1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/event_log_tag.h @@ -0,0 +1,157 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + */ + +#ifndef _EVENT_LOG_TAG_H_ +#define _EVENT_LOG_TAG_H_ + +#include + +/* Define new event log tags here */ +#define EVENT_LOG_TAG_NULL 0 /* Special null tag */ +#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */ +#define EVENT_LOG_TAG_BUS_OOB 2 +#define EVENT_LOG_TAG_BUS_STATE 3 +#define EVENT_LOG_TAG_BUS_PROTO 4 +#define EVENT_LOG_TAG_BUS_CTL 5 +#define EVENT_LOG_TAG_BUS_EVENT 6 +#define EVENT_LOG_TAG_BUS_PKT 7 +#define EVENT_LOG_TAG_BUS_FRAME 8 +#define EVENT_LOG_TAG_BUS_DESC 9 +#define EVENT_LOG_TAG_BUS_SETUP 10 +#define EVENT_LOG_TAG_BUS_MISC 11 +#define EVENT_LOG_TAG_SRSCAN 22 +#define EVENT_LOG_TAG_PWRSTATS_INFO 23 +#define EVENT_LOG_TAG_UCODE_WATCHDOG 26 +#define EVENT_LOG_TAG_UCODE_FIFO 27 +#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28 +#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29 +#define EVENT_LOG_TAG_SCAN_ERROR 30 +#define EVENT_LOG_TAG_SCAN_WARN 31 +#define EVENT_LOG_TAG_MPF_ERR 32 +#define EVENT_LOG_TAG_MPF_WARN 33 +#define EVENT_LOG_TAG_MPF_INFO 34 +#define EVENT_LOG_TAG_MPF_DEBUG 35 +#define EVENT_LOG_TAG_EVENT_INFO 36 +#define EVENT_LOG_TAG_EVENT_ERR 37 +#define EVENT_LOG_TAG_PWRSTATS_ERROR 38 +#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39 +#define EVENT_LOG_TAG_IOCTL_LOG 40 +#define EVENT_LOG_TAG_PFN_ERR 41 +#define EVENT_LOG_TAG_PFN_WARN 42 +#define EVENT_LOG_TAG_PFN_INFO 43 +#define EVENT_LOG_TAG_PFN_DEBUG 44 +#define EVENT_LOG_TAG_BEACON_LOG 45 +#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46 +#define EVENT_LOG_TAG_TRACE_CHANSW 47 +#define EVENT_LOG_TAG_PCI_ERROR 48 +#define EVENT_LOG_TAG_PCI_TRACE 49 +#define EVENT_LOG_TAG_PCI_WARN 50 +#define EVENT_LOG_TAG_PCI_INFO 51 +#define EVENT_LOG_TAG_PCI_DBG 52 +#define EVENT_LOG_TAG_PCI_DATA 53 +#define EVENT_LOG_TAG_PCI_RING 54 +#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55 +#define EVENT_LOG_TAG_WL_ERROR 56 +#define EVENT_LOG_TAG_PHY_ERROR 57 +#define EVENT_LOG_TAG_OTP_ERROR 58 +#define EVENT_LOG_TAG_NOTIF_ERROR 59 +#define EVENT_LOG_TAG_MPOOL_ERROR 60 +#define EVENT_LOG_TAG_OBJR_ERROR 61 +#define EVENT_LOG_TAG_DMA_ERROR 62 +#define EVENT_LOG_TAG_PMU_ERROR 63 +#define EVENT_LOG_TAG_BSROM_ERROR 64 +#define EVENT_LOG_TAG_SI_ERROR 65 +#define EVENT_LOG_TAG_ROM_PRINTF 66 +#define EVENT_LOG_TAG_RATE_CNT 67 +#define EVENT_LOG_TAG_CTL_MGT_CNT 68 +#define EVENT_LOG_TAG_AMPDU_DUMP 69 +#define EVENT_LOG_TAG_MEM_ALLOC_SUCC 70 +#define EVENT_LOG_TAG_MEM_ALLOC_FAIL 71 +#define EVENT_LOG_TAG_MEM_FREE 72 +#define EVENT_LOG_TAG_WL_ASSOC_LOG 73 +#define EVENT_LOG_TAG_WL_PS_LOG 74 +#define EVENT_LOG_TAG_WL_ROAM_LOG 75 +#define EVENT_LOG_TAG_WL_MPC_LOG 76 +#define EVENT_LOG_TAG_WL_WSEC_LOG 77 +#define EVENT_LOG_TAG_WL_WSEC_DUMP 78 +#define EVENT_LOG_TAG_WL_MCNX_LOG 79 +#define EVENT_LOG_TAG_HEALTH_CHECK_ERROR 80 +#define EVENT_LOG_TAG_HNDRTE_EVENT_ERROR 81 +#define EVENT_LOG_TAG_ECOUNTERS_ERROR 82 +#define EVENT_LOG_TAG_WL_COUNTERS 83 +#define EVENT_LOG_TAG_ECOUNTERS_IPCSTATS 84 +#define EVENT_LOG_TAG_WL_P2P_LOG 85 +#define EVENT_LOG_TAG_SDIO_ERROR 86 +#define EVENT_LOG_TAG_SDIO_TRACE 87 +#define EVENT_LOG_TAG_SDIO_DBG 88 +#define EVENT_LOG_TAG_SDIO_PRHDRS 89 +#define EVENT_LOG_TAG_SDIO_PRPKT 90 +#define EVENT_LOG_TAG_SDIO_INFORM 91 +#define EVENT_LOG_TAG_MIMO_PS_ERROR 92 +#define EVENT_LOG_TAG_MIMO_PS_TRACE 93 +#define EVENT_LOG_TAG_MIMO_PS_INFO 94 +#define EVENT_LOG_TAG_BTCX_STATS 95 +#define EVENT_LOG_TAG_LEAKY_AP_STATS 96 +#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97 +#define EVENT_LOG_TAG_MIMO_PS_STATS 98 +#define EVENT_LOG_TAG_PWRSTATS_PHY 99 +#define EVENT_LOG_TAG_PWRSTATS_SCAN 100 +#define EVENT_LOG_TAG_PWRSTATS_AWDL 101 +#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102 +#define EVENT_LOG_TAG_LQM 103 +#define EVENT_LOG_TAG_TRACE_WL_INFO 104 +#define EVENT_LOG_TAG_TRACE_BTCOEX_INFO 105 +#define EVENT_LOG_TAG_MAX 105 /* Set to the same value of last tag, not last tag + 1 */ +/* Note: New event should be added/reserved in trunk before adding it to branches */ + + +#define SD_PRHDRS(i, s, h, p, n, l) +#define SD_PRPKT(m, b, n) +#define SD_INFORM(args) + +/* Flags for tag control */ +#define EVENT_LOG_TAG_FLAG_NONE 0 +#define EVENT_LOG_TAG_FLAG_LOG 0x80 +#define EVENT_LOG_TAG_FLAG_PRINT 0x40 +#define EVENT_LOG_TAG_FLAG_SET_MASK 0x3f + +/* Each event log entry has a type. The type is the LAST word of the + * event log. The printing code walks the event entries in reverse + * order to find the first entry. + */ +typedef union event_log_hdr { + struct { + uint8 tag; /* Event_log entry tag */ + uint8 count; /* Count of 4-byte entries */ + uint16 fmt_num; /* Format number */ + }; + uint32 t; /* Type cheat */ +} event_log_hdr_t; + +#endif /* _EVENT_LOG_TAG_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h new file mode 100644 index 000000000000..91f5147f54d0 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h @@ -0,0 +1,713 @@ +/* + * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: p2p.h 536785 2015-02-24 08:35:00Z $ + */ + +#ifndef _P2P_H_ +#define _P2P_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +#include +#include + +/* This marks the start of a packed structure section. */ +#include + + +/* WiFi P2P OUI values */ +#define P2P_OUI WFA_OUI /* WiFi P2P OUI */ +#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */ + +#define P2P_IE_ID 0xdd /* P2P IE element ID */ + +/* WiFi P2P IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie { + uint8 id; /* IE ID: 0xDD */ + uint8 len; /* IE length */ + uint8 OUI[3]; /* WiFi P2P specific OUI: P2P_OUI */ + uint8 oui_type; /* Identifies P2P version: P2P_VER */ + uint8 subelts[1]; /* variable length subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ie wifi_p2p_ie_t; + +#define P2P_IE_FIXED_LEN 6 + +#define P2P_ATTR_ID_OFF 0 +#define P2P_ATTR_LEN_OFF 1 +#define P2P_ATTR_DATA_OFF 3 + +#define P2P_ATTR_ID_LEN 1 /* ID filed length */ +#define P2P_ATTR_LEN_LEN 2 /* length field length */ +#define P2P_ATTR_HDR_LEN 3 /* ID + 2-byte length field spec 1.02 */ + +#define P2P_WFDS_HASH_LEN 6 +#define P2P_WFDS_MAX_SVC_NAME_LEN 32 + +/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */ +#define P2P_SEID_STATUS 0 /* Status */ +#define P2P_SEID_MINOR_RC 1 /* Minor Reason Code */ +#define P2P_SEID_P2P_INFO 2 /* P2P Capability (capabilities info) */ +#define P2P_SEID_DEV_ID 3 /* P2P Device ID */ +#define P2P_SEID_INTENT 4 /* Group Owner Intent */ +#define P2P_SEID_CFG_TIMEOUT 5 /* Configuration Timeout */ +#define P2P_SEID_CHANNEL 6 /* Listen channel */ +#define P2P_SEID_GRP_BSSID 7 /* P2P Group BSSID */ +#define P2P_SEID_XT_TIMING 8 /* Extended Listen Timing */ +#define P2P_SEID_INTINTADDR 9 /* Intended P2P Interface Address */ +#define P2P_SEID_P2P_MGBTY 10 /* P2P Manageability */ +#define P2P_SEID_CHAN_LIST 11 /* Channel List */ +#define P2P_SEID_ABSENCE 12 /* Notice of Absence */ +#define P2P_SEID_DEV_INFO 13 /* Device Info */ +#define P2P_SEID_GROUP_INFO 14 /* Group Info */ +#define P2P_SEID_GROUP_ID 15 /* Group ID */ +#define P2P_SEID_P2P_IF 16 /* P2P Interface */ +#define P2P_SEID_OP_CHANNEL 17 /* Operating Channel */ +#define P2P_SEID_INVITE_FLAGS 18 /* Invitation Flags */ +#define P2P_SEID_SERVICE_HASH 21 /* Service hash */ +#define P2P_SEID_SESSION 22 /* Session information */ +#define P2P_SEID_CONNECT_CAP 23 /* Connection capability */ +#define P2P_SEID_ADVERTISE_ID 24 /* Advertisement ID */ +#define P2P_SEID_ADVERTISE_SERVICE 25 /* Advertised service */ +#define P2P_SEID_SESSION_ID 26 /* Session ID */ +#define P2P_SEID_FEATURE_CAP 27 /* Feature capability */ +#define P2P_SEID_PERSISTENT_GROUP 28 /* Persistent group */ +#define P2P_SEID_SESSION_INFO_RESP 29 /* Session Information Response */ +#define P2P_SEID_VNDR 221 /* Vendor-specific subelement */ + +#define P2P_SE_VS_ID_SERVICES 0x1b + + +/* WiFi P2P IE subelement: P2P Capability (capabilities info) */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 dev; /* Device Capability Bitmap */ + uint8 group; /* Group Capability Bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t; + +/* P2P Capability subelement's Device Capability Bitmap bit values */ +#define P2P_CAPSE_DEV_SERVICE_DIS 0x1 /* Service Discovery */ +#define P2P_CAPSE_DEV_CLIENT_DIS 0x2 /* Client Discoverability */ +#define P2P_CAPSE_DEV_CONCURRENT 0x4 /* Concurrent Operation */ +#define P2P_CAPSE_DEV_INFRA_MAN 0x8 /* P2P Infrastructure Managed */ +#define P2P_CAPSE_DEV_LIMIT 0x10 /* P2P Device Limit */ +#define P2P_CAPSE_INVITE_PROC 0x20 /* P2P Invitation Procedure */ + +/* P2P Capability subelement's Group Capability Bitmap bit values */ +#define P2P_CAPSE_GRP_OWNER 0x1 /* P2P Group Owner */ +#define P2P_CAPSE_PERSIST_GRP 0x2 /* Persistent P2P Group */ +#define P2P_CAPSE_GRP_LIMIT 0x4 /* P2P Group Limit */ +#define P2P_CAPSE_GRP_INTRA_BSS 0x8 /* Intra-BSS Distribution */ +#define P2P_CAPSE_GRP_X_CONNECT 0x10 /* Cross Connection */ +#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */ +#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */ + + +/* WiFi P2P IE subelement: Group Owner Intent */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTENT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 intent; /* Intent Value 0...15 (0=legacy 15=master only) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t; + +/* WiFi P2P IE subelement: Configuration Timeout */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CFG_TIMEOUT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 go_tmo; /* GO config timeout in units of 10 ms */ + uint8 client_tmo; /* Client config timeout in units of 10 ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t; + +/* WiFi P2P IE subelement: Listen Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CHANNEL */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 op_class; /* Operating Class */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t; + +/* WiFi P2P IE subelement: P2P Group BSSID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GRP_BSSID */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P group bssid */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t; + +/* WiFi P2P IE subelement: P2P Group ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_ID */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P device address */ + uint8 ssid[1]; /* ssid. device id. variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t; + +/* WiFi P2P IE subelement: P2P Interface */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_IF */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P device address */ + uint8 ifaddrs; /* P2P Interface Address count */ + uint8 ifaddr[1][6]; /* P2P Interface Address list */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t; + +/* WiFi P2P IE subelement: Status */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 status; /* Status Code: P2P_STATSE_* */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t; + +/* Status subelement Status Code definitions */ +#define P2P_STATSE_SUCCESS 0 + /* Success */ +#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1 + /* Failed, information currently unavailable */ +#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL + /* Old name for above in P2P spec 1.08 and older */ +#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2 + /* Failed, incompatible parameters */ +#define P2P_STATSE_FAIL_LIMIT_REACHED 3 + /* Failed, limit reached */ +#define P2P_STATSE_FAIL_INVALID_PARAMS 4 + /* Failed, invalid parameters */ +#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5 + /* Failed, unable to accomodate request */ +#define P2P_STATSE_FAIL_PROTO_ERROR 6 + /* Failed, previous protocol error or disruptive behaviour */ +#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7 + /* Failed, no common channels */ +#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8 + /* Failed, unknown P2P Group */ +#define P2P_STATSE_FAIL_INTENT 9 + /* Failed, both peers indicated Intent 15 in GO Negotiation */ +#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10 + /* Failed, incompatible provisioning method */ +#define P2P_STATSE_FAIL_USER_REJECT 11 + /* Failed, rejected by user */ +#define P2P_STATSE_SUCCESS_USER_ACCEPT 12 + /* Success, accepted by user */ + +/* WiFi P2P IE attribute: Extended Listen Timing */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s { + uint8 eltId; /* ID: P2P_SEID_EXT_TIMING */ + uint8 len[2]; /* length not including eltId, len fields */ + uint8 avail[2]; /* availibility period */ + uint8 interval[2]; /* availibility interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t; + +#define P2P_EXT_MIN 10 /* minimum 10ms */ + +/* WiFi P2P IE subelement: Intended P2P Interface Address */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTINTADDR */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* intended P2P interface MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t; + +/* WiFi P2P IE subelement: Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 band; /* Regulatory Class (band) */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t; + + +/* Channel Entry structure within the Channel List SE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s { + uint8 band; /* Regulatory Class (band) */ + uint8 num_channels; /* # of channels in the channel list */ + uint8 channels[WL_NUMCHANNELS]; /* Channel List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t; +#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2 + +/* WiFi P2P IE subelement: Channel List */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CHAN_LIST */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 num_entries; /* # of channel entries */ + wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES]; + /* Channel Entry List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t; + +/* WiFi Primary Device Type structure */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s { + uint16 cat_id; /* Category ID */ + uint8 OUI[3]; /* WFA OUI: 0x0050F2 */ + uint8 oui_type; /* WPS_OUI_TYPE */ + uint16 sub_cat_id; /* Sub Category ID */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t; + +/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category + * maximum values for each category + */ +#define P2P_DISE_SUBCATEGORY_MINVAL 1 +#define P2P_DISE_CATEGORY_COMPUTER 1 +#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL 8 +#define P2P_DISE_CATEGORY_INPUT_DEVICE 2 +#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL 9 +#define P2P_DISE_CATEGORY_PRINTER 3 +#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL 5 +#define P2P_DISE_CATEGORY_CAMERA 4 +#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL 4 +#define P2P_DISE_CATEGORY_STORAGE 5 +#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL 1 +#define P2P_DISE_CATEGORY_NETWORK_INFRA 6 +#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL 4 +#define P2P_DISE_CATEGORY_DISPLAY 7 +#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL 4 +#define P2P_DISE_CATEGORY_MULTIMEDIA 8 +#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL 6 +#define P2P_DISE_CATEGORY_GAMING 9 +#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL 5 +#define P2P_DISE_CATEGORY_TELEPHONE 10 +#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL 5 +#define P2P_DISE_CATEGORY_AUDIO 11 +#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL 6 + +/* WiFi P2P IE's Device Info subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_DEVINFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P Device MAC address */ + uint16 wps_cfg_meths; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pri_devtype[8]; /* Primary Device Type */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t; + +#define P2P_DEV_TYPE_LEN 8 + +/* WiFi P2P IE's Group Info subelement Client Info Descriptor */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s { + uint8 len; + uint8 devaddr[ETHER_ADDR_LEN]; /* P2P Device Address */ + uint8 ifaddr[ETHER_ADDR_LEN]; /* P2P Interface Address */ + uint8 devcap; /* Device Capability */ + uint8 cfg_meths[2]; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pridt[P2P_DEV_TYPE_LEN]; /* Primary Device Type */ + uint8 secdts; /* Number of Secondary Device Types */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t; + +/* WiFi P2P IE's Device ID subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s { + uint8 eltId; + uint8 len[2]; + struct ether_addr addr; /* P2P Device MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t; + +/* WiFi P2P IE subelement: P2P Manageability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_MGBTY */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mg_bitmap; /* manageability bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t; +/* mg_bitmap field bit values */ +#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1 /* AP supports Managed P2P Device */ + +/* WiFi P2P IE subelement: Group Info */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t; + +/* WiFi IE subelement: Operating Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_OP_CHANNEL */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 op_class; /* Operating Class */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t; + +/* WiFi IE subelement: INVITATION FLAGS */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INVITE_FLAGS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 flags; /* Flags */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t; + +/* WiFi P2P IE subelement: Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SERVICE_HASH */ + uint8 len[2]; /* SE length not including eltId, len fields + * in multiple of 6 Bytes + */ + uint8 hash[1]; /* Variable length - SHA256 hash of + * service names (can be more than one hashes) + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t; + +/* WiFi P2P IE subelement: Service Instance Data */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SESSION */ + uint8 len[2]; /* SE length not including eltId, len */ + uint8 ssn_info[1]; /* Variable length - Session information as specified by + * the service layer, type matches serv. name + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t; + + +/* WiFi P2P IE subelement: Connection capability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CONNECT_CAP */ + uint8 len[2]; /* SE length not including eltId, len */ + uint8 conn_cap; /* 1byte capability as specified by the + * service layer, valid bitmask/values + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t; + + +/* WiFi P2P IE subelement: Advertisement ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_ID */ + uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */ + uint8 advt_id[4]; /* 4byte Advertisement ID of the peer device sent in + * PROV Disc in Network byte order + */ + uint8 advt_mac[6]; /* P2P device address of the service advertiser */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t; + + +/* WiFi P2P IE subelement: Advertise Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s { + uint8 advt_id[4]; /* SE Advertise ID for the service */ + uint16 nw_cfg_method; /* SE Network Config method for the service */ + uint8 serv_name_len; /* SE length of the service name */ + uint8 serv_name[1]; /* Variable length service name field */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t; + + +/* WiFi P2P IE subelement: Advertise Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s { + uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_SERVICE */ + uint8 len[2]; /* SE length not including eltId, len fields mutiple len of + * wifi_p2p_adv_serv_info_t entries + */ + wifi_p2p_adv_serv_info_t p_advt_serv_info[1]; /* Variable length + of multiple instances + of the advertise service info + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t; + + +/* WiFi P2P IE subelement: Session ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SESSION_ID */ + uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */ + uint8 ssn_id[4]; /* 4byte Session ID of the peer device sent in + * PROV Disc in Network byte order + */ + uint8 ssn_mac[6]; /* P2P device address of the seeker - session mac */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t; + + +#define P2P_ADVT_SERV_SE_FIXED_LEN 3 /* Includes only the element ID and len */ +#define P2P_ADVT_SERV_INFO_FIXED_LEN 7 /* Per ADV Service Instance advt_id + + * nw_config_method + serv_name_len + */ + +/* WiFi P2P Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame { + uint8 category; /* P2P_AF_CATEGORY */ + uint8 OUI[3]; /* OUI - P2P_OUI */ + uint8 type; /* OUI Type - P2P_VER */ + uint8 subtype; /* OUI Subtype - P2P_AF_* */ + uint8 dialog_token; /* nonzero, identifies req/resp tranaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t; +#define P2P_AF_CATEGORY 0x7f + +#define P2P_AF_FIXED_LEN 7 + +/* WiFi P2P Action Frame OUI Subtypes */ +#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */ +#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */ +#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */ +#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */ + + +/* WiFi P2P Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame { + uint8 category; /* P2P_PUB_AF_CATEGORY */ + uint8 action; /* P2P_PUB_AF_ACTION */ + uint8 oui[3]; /* P2P_OUI */ + uint8 oui_type; /* OUI type - P2P_VER */ + uint8 subtype; /* OUI subtype - P2P_TYPE_* */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t; +#define P2P_PUB_AF_FIXED_LEN 8 +#define P2P_PUB_AF_CATEGORY 0x04 +#define P2P_PUB_AF_ACTION 0x09 + +/* WiFi P2P Public Action Frame OUI Subtypes */ +#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */ +#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */ +#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */ +#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */ +#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */ +#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */ +#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */ +#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */ +#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */ +#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */ + +/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */ +#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ +#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP +#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF + +/* WiFi P2P IE subelement: Notice of Absence */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc { + uint8 cnt_type; /* Count/Type */ + uint32 duration; /* Duration */ + uint32 interval; /* Interval */ + uint32 start; /* Start Time */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t; + +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se { + uint8 eltId; /* Subelement ID */ + uint8 len[2]; /* Length */ + uint8 index; /* Index */ + uint8 ops_ctw_parms; /* CTWindow and OppPS Parameters */ + wifi_p2p_noa_desc_t desc[1]; /* Notice of Absence Descriptor(s) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t; + +#define P2P_NOA_SE_FIXED_LEN 5 + +#define P2P_NOA_SE_MAX_DESC 2 /* max NoA descriptors in presence request */ + +/* cnt_type field values */ +#define P2P_NOA_DESC_CNT_RESERVED 0 /* reserved and should not be used */ +#define P2P_NOA_DESC_CNT_REPEAT 255 /* continuous schedule */ +#define P2P_NOA_DESC_TYPE_PREFERRED 1 /* preferred values */ +#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2 /* acceptable limits */ + +/* ctw_ops_parms field values */ +#define P2P_NOA_CTW_MASK 0x7f +#define P2P_NOA_OPS_MASK 0x80 +#define P2P_NOA_OPS_SHIFT 7 + +#define P2P_CTW_MIN 10 /* minimum 10TU */ + +/* + * P2P Service Discovery related + */ +#define P2PSD_ACTION_CATEGORY 0x04 + /* Public action frame */ +#define P2PSD_ACTION_ID_GAS_IREQ 0x0a + /* Action value for GAS Initial Request AF */ +#define P2PSD_ACTION_ID_GAS_IRESP 0x0b + /* Action value for GAS Initial Response AF */ +#define P2PSD_ACTION_ID_GAS_CREQ 0x0c + /* Action value for GAS Comeback Request AF */ +#define P2PSD_ACTION_ID_GAS_CRESP 0x0d + /* Action value for GAS Comeback Response AF */ +#define P2PSD_AD_EID 0x6c + /* Advertisement Protocol IE ID */ +#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00 + /* Query Response Length Limit 7 bits plus PAME-BI 1 bit */ +#define P2PSD_ADP_PROTO_ID 0x00 + /* Advertisement Protocol ID. Always 0 for P2P SD */ +#define P2PSD_GAS_OUI P2P_OUI + /* WFA OUI */ +#define P2PSD_GAS_OUI_SUBTYPE P2P_VER + /* OUI Subtype for GAS IE */ +#define P2PSD_GAS_NQP_INFOID 0xDDDD + /* NQP Query Info ID: 56797 */ +#define P2PSD_GAS_COMEBACKDEALY 0x00 + /* Not used in the Native GAS protocol */ + +/* Service Protocol Type */ +typedef enum p2psd_svc_protype { + SVC_RPOTYPE_ALL = 0, + SVC_RPOTYPE_BONJOUR = 1, + SVC_RPOTYPE_UPNP = 2, + SVC_RPOTYPE_WSD = 3, + SVC_RPOTYPE_WFDS = 11, + SVC_RPOTYPE_VENDOR = 255 +} p2psd_svc_protype_t; + +/* Service Discovery response status code */ +typedef enum { + P2PSD_RESP_STATUS_SUCCESS = 0, + P2PSD_RESP_STATUS_PROTYPE_NA = 1, + P2PSD_RESP_STATUS_DATA_NA = 2, + P2PSD_RESP_STATUS_BAD_REQUEST = 3 +} p2psd_resp_status_t; + +/* Advertisement Protocol IE tuple field */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl { + uint8 llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus + * Pre-Associated Message Exchange BSSID Independent bit 7, set to 0 + */ + uint8 adp_id; /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t; + +/* Advertisement Protocol IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie { + uint8 id; /* IE ID: 0x6c - 108 */ + uint8 len; /* IE length */ + wifi_p2psd_adp_tpl_t adp_tpl; /* Advertisement Protocol Tuple field. Only one + * tuple is defined for P2P Service Discovery + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t; + +/* NQP Vendor-specific Content */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc { + uint8 oui_subtype; /* OUI Subtype: 0x09 */ + uint16 svc_updi; /* Service Update Indicator */ + uint8 svc_tlvs[1]; /* wifi_p2psd_qreq_tlv_t type for service request, + * wifi_p2psd_qresp_tlv_t type for service response + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t; + +/* Service Request TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 query_data[1]; /* Query Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t; + +/* Query Request Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Length of service request TLV, 5 plus the size of request data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t; + +/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame { + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qreq_len; /* Query Request Length */ + uint8 qreq_frm[1]; /* Query Request Frame wifi_p2psd_qreq_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t; + +/* Service Response TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 status; /* Value defined in Table 57 of P2P spec. */ + uint8 query_data[1]; /* Response Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t; + +/* Query Response Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Lenth of service response TLV, 6 plus the size of resp data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t; + +/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t; + +/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint8 fragment_id; /* Fragmentation ID */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t; + +/* Wi-Fi GAS Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame { + uint8 category; /* 0x04 Public Action Frame */ + uint8 action; /* 0x6c Advertisement Protocol */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 query_data[1]; /* Query Data. wifi_p2psd_gas_ireq_frame_t + * or wifi_p2psd_gas_iresp_frame_t format + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _P2P_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h new file mode 100644 index 000000000000..a1d7ac937cf3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h @@ -0,0 +1,78 @@ +/* + * SD-SPI Protocol Standard + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdspi.h 518342 2014-12-01 23:21:41Z $ + */ +#ifndef _SD_SPI_H +#define _SD_SPI_H + +#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */ +#define SPI_START_S 31 +#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */ +#define SPI_DIR_S 30 +#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */ +#define SPI_CMD_INDEX_S 24 +#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */ +#define SPI_RW_S 23 +#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */ +#define SPI_FUNC_S 20 +#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */ +#define SPI_RAW_S 19 +#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */ +#define SPI_STUFF_S 18 +#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */ +#define SPI_BLKMODE_S 19 +#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */ +#define SPI_OPCODE_S 18 +#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */ +#define SPI_ADDR_S 1 +#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */ +#define SPI_STUFF0_S 0 + +#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */ +#define SPI_RSP_START_S 7 +#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */ +#define SPI_RSP_PARAM_ERR_S 6 +#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */ +#define SPI_RSP_RFU5_S 5 +#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */ +#define SPI_RSP_FUNC_ERR_S 4 +#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */ +#define SPI_RSP_CRC_ERR_S 3 +#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */ +#define SPI_RSP_ILL_CMD_S 2 +#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */ +#define SPI_RSP_RFU1_S 1 +#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */ +#define SPI_RSP_IDLE_S 0 + +/* SD-SPI Protocol Definitions */ +#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */ +#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */ +#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */ +#define SDSPI_START_BIT_MASK 0x80 + +#endif /* _SD_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h new file mode 100644 index 000000000000..77b1458b3683 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h @@ -0,0 +1,98 @@ +/* + * 802.1Q VLAN protocol definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: vlan.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _vlan_h_ +#define _vlan_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#ifndef VLAN_VID_MASK +#define VLAN_VID_MASK 0xfff /* low 12 bits are vlan id */ +#endif + +#define VLAN_CFI_SHIFT 12 /* canonical format indicator bit */ +#define VLAN_PRI_SHIFT 13 /* user priority */ + +#define VLAN_PRI_MASK 7 /* 3 bits of priority */ + +#define VLAN_TPID_OFFSET 12 /* offset of tag protocol id field */ +#define VLAN_TCI_OFFSET 14 /* offset of tag ctrl info field */ + +#define VLAN_TAG_LEN 4 +#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) /* offset in Ethernet II packet only */ + +#define VLAN_TPID 0x8100 /* VLAN ethertype/Tag Protocol ID */ + +struct vlan_header { + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ +}; + +struct ethervlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ + uint16 ether_type; +}; + +struct dot3_mac_llc_snapvlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[3]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ + uint16 ether_type; /* ethertype */ +}; + +#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN) + + +/* This marks the end of a packed structure section. */ +#include + +#define ETHERVLAN_MOVE_HDR(d, s) \ +do { \ + struct ethervlan_header t; \ + t = *(struct ethervlan_header *)(s); \ + *(struct ethervlan_header *)(d) = t; \ +} while (0) + +#endif /* _vlan_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h new file mode 100644 index 000000000000..ef5d664dabee --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h @@ -0,0 +1,182 @@ +/* + * Fundamental types and constants relating to WPA + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wpa.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _proto_wpa_h_ +#define _proto_wpa_h_ + +#include +#include + + +/* This marks the start of a packed structure section. */ +#include + +/* Reason Codes */ + +/* 13 through 23 taken from IEEE Std 802.11i-2004 */ +#define DOT11_RC_INVALID_WPA_IE 13 /* Invalid info. element */ +#define DOT11_RC_MIC_FAILURE 14 /* Michael failure */ +#define DOT11_RC_4WH_TIMEOUT 15 /* 4-way handshake timeout */ +#define DOT11_RC_GTK_UPDATE_TIMEOUT 16 /* Group key update timeout */ +#define DOT11_RC_WPA_IE_MISMATCH 17 /* WPA IE in 4-way handshake differs from + * (re-)assoc. request/probe response + */ +#define DOT11_RC_INVALID_MC_CIPHER 18 /* Invalid multicast cipher */ +#define DOT11_RC_INVALID_UC_CIPHER 19 /* Invalid unicast cipher */ +#define DOT11_RC_INVALID_AKMP 20 /* Invalid authenticated key management protocol */ +#define DOT11_RC_BAD_WPA_VERSION 21 /* Unsupported WPA version */ +#define DOT11_RC_INVALID_WPA_CAP 22 /* Invalid WPA IE capabilities */ +#define DOT11_RC_8021X_AUTH_FAIL 23 /* 802.1X authentication failure */ + +#define WPA2_PMKID_LEN 16 + +/* WPA IE fixed portion */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 tag; /* TAG */ + uint8 length; /* TAG length */ + uint8 oui[3]; /* IE OUI */ + uint8 oui_type; /* OUI type */ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; /* IE version */ +} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t; +#define WPA_IE_OUITYPE_LEN 4 +#define WPA_IE_FIXED_LEN 8 +#define WPA_IE_TAG_FIXED_LEN 6 + +#define BIP_OUI_TYPE WPA2_OUI "\x06" + +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 tag; /* TAG */ + uint8 length; /* TAG length */ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; /* IE version */ +} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t; +#define WPA_RSN_IE_FIXED_LEN 4 +#define WPA_RSN_IE_TAG_FIXED_LEN 2 +typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN]; + +#define WFA_OSEN_IE_FIXED_LEN 6 + +/* WPA suite/multicast suite */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 oui[3]; + uint8 type; +} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t; +#define WPA_SUITE_LEN 4 + +/* WPA unicast suite list/key management suite list */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_suite_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t; +#define WPA_IE_SUITE_COUNT_LEN 2 +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_pmkid_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t; + +/* WPA cipher suites */ +#define WPA_CIPHER_NONE 0 /* None */ +#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */ +#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */ +#define WPA_CIPHER_AES_OCB 3 /* AES (OCB) */ +#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */ +#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */ +#define WPA_CIPHER_BIP 6 /* WEP (104-bit) */ +#define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */ + + +#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \ + (cipher) == WPA_CIPHER_WEP_40 || \ + (cipher) == WPA_CIPHER_WEP_104 || \ + (cipher) == WPA_CIPHER_TKIP || \ + (cipher) == WPA_CIPHER_AES_OCB || \ + (cipher) == WPA_CIPHER_AES_CCM || \ + (cipher) == WPA_CIPHER_TPK) + + +/* WPA TKIP countermeasures parameters */ +#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */ +#define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */ + +/* RSN IE defines */ +#define RSN_CAP_LEN 2 /* Length of RSN capabilities field (2 octets) */ + +/* RSN Capabilities defined in 802.11i */ +#define RSN_CAP_PREAUTH 0x0001 +#define RSN_CAP_NOPAIRWISE 0x0002 +#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C +#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2 +#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030 +#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4 +#define RSN_CAP_1_REPLAY_CNTR 0 +#define RSN_CAP_2_REPLAY_CNTRS 1 +#define RSN_CAP_4_REPLAY_CNTRS 2 +#define RSN_CAP_16_REPLAY_CNTRS 3 +#define RSN_CAP_MFPR 0x0040 +#define RSN_CAP_MFPC 0x0080 +#define RSN_CAP_SPPC 0x0400 +#define RSN_CAP_SPPR 0x0800 + +/* WPA capabilities defined in 802.11i */ +#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS +#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS +#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT +#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK + +/* WPA capabilities defined in 802.11zD9.0 */ +#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1) /* bit 9 */ + +/* WPA Specific defines */ +#define WPA_CAP_LEN RSN_CAP_LEN /* Length of RSN capabilities in RSN IE (2 octets) */ +#define WPA_PMKID_CNT_LEN 2 /* Length of RSN PMKID count (2 octests) */ + +#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH + +#define WPA2_PMKID_COUNT_LEN 2 + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _proto_wpa_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/wps.h b/drivers/net/wireless/bcmdhd/include/proto/wps.h new file mode 100644 index 000000000000..495d7f181fd3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/wps.h @@ -0,0 +1,389 @@ +/* + * WPS IE definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ + +#ifndef _WPS_ +#define _WPS_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Data Element Definitions */ +#define WPS_ID_AP_CHANNEL 0x1001 +#define WPS_ID_ASSOC_STATE 0x1002 +#define WPS_ID_AUTH_TYPE 0x1003 +#define WPS_ID_AUTH_TYPE_FLAGS 0x1004 +#define WPS_ID_AUTHENTICATOR 0x1005 +#define WPS_ID_CONFIG_METHODS 0x1008 +#define WPS_ID_CONFIG_ERROR 0x1009 +#define WPS_ID_CONF_URL4 0x100A +#define WPS_ID_CONF_URL6 0x100B +#define WPS_ID_CONN_TYPE 0x100C +#define WPS_ID_CONN_TYPE_FLAGS 0x100D +#define WPS_ID_CREDENTIAL 0x100E +#define WPS_ID_DEVICE_NAME 0x1011 +#define WPS_ID_DEVICE_PWD_ID 0x1012 +#define WPS_ID_E_HASH1 0x1014 +#define WPS_ID_E_HASH2 0x1015 +#define WPS_ID_E_SNONCE1 0x1016 +#define WPS_ID_E_SNONCE2 0x1017 +#define WPS_ID_ENCR_SETTINGS 0x1018 +#define WPS_ID_ENCR_TYPE 0x100F +#define WPS_ID_ENCR_TYPE_FLAGS 0x1010 +#define WPS_ID_ENROLLEE_NONCE 0x101A +#define WPS_ID_FEATURE_ID 0x101B +#define WPS_ID_IDENTITY 0x101C +#define WPS_ID_IDENTITY_PROOF 0x101D +#define WPS_ID_KEY_WRAP_AUTH 0x101E +#define WPS_ID_KEY_IDENTIFIER 0x101F +#define WPS_ID_MAC_ADDR 0x1020 +#define WPS_ID_MANUFACTURER 0x1021 +#define WPS_ID_MSG_TYPE 0x1022 +#define WPS_ID_MODEL_NAME 0x1023 +#define WPS_ID_MODEL_NUMBER 0x1024 +#define WPS_ID_NW_INDEX 0x1026 +#define WPS_ID_NW_KEY 0x1027 +#define WPS_ID_NW_KEY_INDEX 0x1028 +#define WPS_ID_NEW_DEVICE_NAME 0x1029 +#define WPS_ID_NEW_PWD 0x102A +#define WPS_ID_OOB_DEV_PWD 0x102C +#define WPS_ID_OS_VERSION 0x102D +#define WPS_ID_POWER_LEVEL 0x102F +#define WPS_ID_PSK_CURRENT 0x1030 +#define WPS_ID_PSK_MAX 0x1031 +#define WPS_ID_PUBLIC_KEY 0x1032 +#define WPS_ID_RADIO_ENABLED 0x1033 +#define WPS_ID_REBOOT 0x1034 +#define WPS_ID_REGISTRAR_CURRENT 0x1035 +#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036 +#define WPS_ID_REGISTRAR_LIST 0x1037 +#define WPS_ID_REGISTRAR_MAX 0x1038 +#define WPS_ID_REGISTRAR_NONCE 0x1039 +#define WPS_ID_REQ_TYPE 0x103A +#define WPS_ID_RESP_TYPE 0x103B +#define WPS_ID_RF_BAND 0x103C +#define WPS_ID_R_HASH1 0x103D +#define WPS_ID_R_HASH2 0x103E +#define WPS_ID_R_SNONCE1 0x103F +#define WPS_ID_R_SNONCE2 0x1040 +#define WPS_ID_SEL_REGISTRAR 0x1041 +#define WPS_ID_SERIAL_NUM 0x1042 +#define WPS_ID_SC_STATE 0x1044 +#define WPS_ID_SSID 0x1045 +#define WPS_ID_TOT_NETWORKS 0x1046 +#define WPS_ID_UUID_E 0x1047 +#define WPS_ID_UUID_R 0x1048 +#define WPS_ID_VENDOR_EXT 0x1049 +#define WPS_ID_VERSION 0x104A +#define WPS_ID_X509_CERT_REQ 0x104B +#define WPS_ID_X509_CERT 0x104C +#define WPS_ID_EAP_IDENTITY 0x104D +#define WPS_ID_MSG_COUNTER 0x104E +#define WPS_ID_PUBKEY_HASH 0x104F +#define WPS_ID_REKEY_KEY 0x1050 +#define WPS_ID_KEY_LIFETIME 0x1051 +#define WPS_ID_PERM_CFG_METHODS 0x1052 +#define WPS_ID_SEL_REG_CFG_METHODS 0x1053 +#define WPS_ID_PRIM_DEV_TYPE 0x1054 +#define WPS_ID_SEC_DEV_TYPE_LIST 0x1055 +#define WPS_ID_PORTABLE_DEVICE 0x1056 +#define WPS_ID_AP_SETUP_LOCKED 0x1057 +#define WPS_ID_APP_LIST 0x1058 +#define WPS_ID_EAP_TYPE 0x1059 +#define WPS_ID_INIT_VECTOR 0x1060 +#define WPS_ID_KEY_PROVIDED_AUTO 0x1061 +#define WPS_ID_8021X_ENABLED 0x1062 +#define WPS_ID_WEP_TRANSMIT_KEY 0x1064 +#define WPS_ID_REQ_DEV_TYPE 0x106A + +/* WSC 2.0, WFA Vendor Extension Subelements */ +#define WFA_VENDOR_EXT_ID "\x00\x37\x2A" +#define WPS_WFA_SUBID_VERSION2 0x00 +#define WPS_WFA_SUBID_AUTHORIZED_MACS 0x01 +#define WPS_WFA_SUBID_NW_KEY_SHAREABLE 0x02 +#define WPS_WFA_SUBID_REQ_TO_ENROLL 0x03 +#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04 +#define WPS_WFA_SUBID_REG_CFG_METHODS 0x05 + + +/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */ +#define MS_VENDOR_EXT_ID "\x00\x01\x37" +#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */ +#define WPS_MS_ID_TRANSPORT_UUID 0x1002 /* Transport UUID TLV */ + +/* Vertical Pairing Identifier TLV Definitions */ +#define WPS_MS_VPI_TRANSPORT_NONE 0x00 /* None */ +#define WPS_MS_VPI_TRANSPORT_DPWS 0x01 /* Devices Profile for Web Services */ +#define WPS_MS_VPI_TRANSPORT_UPNP 0x02 /* uPnP */ +#define WPS_MS_VPI_TRANSPORT_SDNWS 0x03 /* Secure Devices Profile for Web Services */ +#define WPS_MS_VPI_NO_PROFILE_REQ 0x00 /* Wi-Fi profile not requested. + * Not supported in Windows 7 + */ +#define WPS_MS_VPI_PROFILE_REQ 0x01 /* Wi-Fi profile requested. */ + +/* sizes of the fixed size elements */ +#define WPS_ID_AP_CHANNEL_S 2 +#define WPS_ID_ASSOC_STATE_S 2 +#define WPS_ID_AUTH_TYPE_S 2 +#define WPS_ID_AUTH_TYPE_FLAGS_S 2 +#define WPS_ID_AUTHENTICATOR_S 8 +#define WPS_ID_CONFIG_METHODS_S 2 +#define WPS_ID_CONFIG_ERROR_S 2 +#define WPS_ID_CONN_TYPE_S 1 +#define WPS_ID_CONN_TYPE_FLAGS_S 1 +#define WPS_ID_DEVICE_PWD_ID_S 2 +#define WPS_ID_ENCR_TYPE_S 2 +#define WPS_ID_ENCR_TYPE_FLAGS_S 2 +#define WPS_ID_FEATURE_ID_S 4 +#define WPS_ID_MAC_ADDR_S 6 +#define WPS_ID_MSG_TYPE_S 1 +#define WPS_ID_SC_STATE_S 1 +#define WPS_ID_RF_BAND_S 1 +#define WPS_ID_OS_VERSION_S 4 +#define WPS_ID_VERSION_S 1 +#define WPS_ID_SEL_REGISTRAR_S 1 +#define WPS_ID_SEL_REG_CFG_METHODS_S 2 +#define WPS_ID_REQ_TYPE_S 1 +#define WPS_ID_RESP_TYPE_S 1 +#define WPS_ID_AP_SETUP_LOCKED_S 1 + +/* WSC 2.0, WFA Vendor Extension Subelements */ +#define WPS_WFA_SUBID_VERSION2_S 1 +#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S 1 +#define WPS_WFA_SUBID_REQ_TO_ENROLL_S 1 +#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1 +#define WPS_WFA_SUBID_REG_CFG_METHODS_S 2 + +/* Association states */ +#define WPS_ASSOC_NOT_ASSOCIATED 0 +#define WPS_ASSOC_CONN_SUCCESS 1 +#define WPS_ASSOC_CONFIG_FAIL 2 +#define WPS_ASSOC_ASSOC_FAIL 3 +#define WPS_ASSOC_IP_FAIL 4 + +/* Authentication types */ +#define WPS_AUTHTYPE_OPEN 0x0001 +#define WPS_AUTHTYPE_WPAPSK 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_SHARED 0x0004 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_WPA 0x0008 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_WPA2 0x0010 +#define WPS_AUTHTYPE_WPA2PSK 0x0020 + +/* Config methods */ +#define WPS_CONFMET_USBA 0x0001 /* Deprecated in WSC 2.0 */ +#define WPS_CONFMET_ETHERNET 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_CONFMET_LABEL 0x0004 +#define WPS_CONFMET_DISPLAY 0x0008 +#define WPS_CONFMET_EXT_NFC_TOK 0x0010 +#define WPS_CONFMET_INT_NFC_TOK 0x0020 +#define WPS_CONFMET_NFC_INTF 0x0040 +#define WPS_CONFMET_PBC 0x0080 +#define WPS_CONFMET_KEYPAD 0x0100 +/* WSC 2.0 */ +#define WPS_CONFMET_VIRT_PBC 0x0280 +#define WPS_CONFMET_PHY_PBC 0x0480 +#define WPS_CONFMET_VIRT_DISPLAY 0x2008 +#define WPS_CONFMET_PHY_DISPLAY 0x4008 + +/* WPS error messages */ +#define WPS_ERROR_NO_ERROR 0 +#define WPS_ERROR_OOB_INT_READ_ERR 1 +#define WPS_ERROR_DECRYPT_CRC_FAIL 2 +#define WPS_ERROR_CHAN24_NOT_SUPP 3 +#define WPS_ERROR_CHAN50_NOT_SUPP 4 +#define WPS_ERROR_SIGNAL_WEAK 5 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NW_AUTH_FAIL 6 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NW_ASSOC_FAIL 7 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NO_DHCP_RESP 8 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_FAILED_DHCP_CONF 9 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_IP_ADDR_CONFLICT 10 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_FAIL_CONN_REGISTRAR 11 +#define WPS_ERROR_MULTI_PBC_DETECTED 12 +#define WPS_ERROR_ROGUE_SUSPECTED 13 +#define WPS_ERROR_DEVICE_BUSY 14 +#define WPS_ERROR_SETUP_LOCKED 15 +#define WPS_ERROR_MSG_TIMEOUT 16 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_REG_SESSION_TIMEOUT 17 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_DEV_PWD_AUTH_FAIL 18 +#define WPS_ERROR_60GHZ_NOT_SUPPORT 19 +#define WPS_ERROR_PKH_MISMATCH 20 /* Public Key Hash Mismatch */ + +/* Connection types */ +#define WPS_CONNTYPE_ESS 0x01 +#define WPS_CONNTYPE_IBSS 0x02 + +/* Device password ID */ +#define WPS_DEVICEPWDID_DEFAULT 0x0000 +#define WPS_DEVICEPWDID_USER_SPEC 0x0001 +#define WPS_DEVICEPWDID_MACHINE_SPEC 0x0002 +#define WPS_DEVICEPWDID_REKEY 0x0003 +#define WPS_DEVICEPWDID_PUSH_BTN 0x0004 +#define WPS_DEVICEPWDID_REG_SPEC 0x0005 +#define WPS_DEVICEPWDID_IBSS 0x0006 +#define WPS_DEVICEPWDID_NFC_CHO 0x0007 /* NFC-Connection-Handover */ +#define WPS_DEVICEPWDID_WFDS 0x0008 /* Wi-Fi Direct Services Specification */ + +/* Encryption type */ +#define WPS_ENCRTYPE_NONE 0x0001 +#define WPS_ENCRTYPE_WEP 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_ENCRTYPE_TKIP 0x0004 /* Deprecated in version 2.0. TKIP can only + * be advertised on the AP when Mixed Mode + * is enabled (Encryption Type is 0x000c). + */ +#define WPS_ENCRTYPE_AES 0x0008 + + +/* WPS Message Types */ +#define WPS_ID_BEACON 0x01 +#define WPS_ID_PROBE_REQ 0x02 +#define WPS_ID_PROBE_RESP 0x03 +#define WPS_ID_MESSAGE_M1 0x04 +#define WPS_ID_MESSAGE_M2 0x05 +#define WPS_ID_MESSAGE_M2D 0x06 +#define WPS_ID_MESSAGE_M3 0x07 +#define WPS_ID_MESSAGE_M4 0x08 +#define WPS_ID_MESSAGE_M5 0x09 +#define WPS_ID_MESSAGE_M6 0x0A +#define WPS_ID_MESSAGE_M7 0x0B +#define WPS_ID_MESSAGE_M8 0x0C +#define WPS_ID_MESSAGE_ACK 0x0D +#define WPS_ID_MESSAGE_NACK 0x0E +#define WPS_ID_MESSAGE_DONE 0x0F + +/* WSP private ID for local use */ +#define WPS_PRIVATE_ID_IDENTITY (WPS_ID_MESSAGE_DONE + 1) +#define WPS_PRIVATE_ID_WPS_START (WPS_ID_MESSAGE_DONE + 2) +#define WPS_PRIVATE_ID_FAILURE (WPS_ID_MESSAGE_DONE + 3) +#define WPS_PRIVATE_ID_FRAG (WPS_ID_MESSAGE_DONE + 4) +#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5) +#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6) + + +/* Device Type categories for primary and secondary device types */ +#define WPS_DEVICE_TYPE_CAT_COMPUTER 1 +#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2 +#define WPS_DEVICE_TYPE_CAT_PRINTER 3 +#define WPS_DEVICE_TYPE_CAT_CAMERA 4 +#define WPS_DEVICE_TYPE_CAT_STORAGE 5 +#define WPS_DEVICE_TYPE_CAT_NW_INFRA 6 +#define WPS_DEVICE_TYPE_CAT_DISPLAYS 7 +#define WPS_DEVICE_TYPE_CAT_MM_DEVICES 8 +#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES 9 +#define WPS_DEVICE_TYPE_CAT_TELEPHONE 10 +#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES 11 /* WSC 2.0 */ + +/* Device Type sub categories for primary and secondary device types */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC 1 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR 3 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID 7 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK 8 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard 1 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER 8 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER 9 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER 1 +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL 1 +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS 1 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP 1 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH 3 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV 1 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME 2 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR 3 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR 1 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR 2 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX 3 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX 1 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360 2 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS 3 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM 1 +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER 1 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */ + + +/* Device request/response type */ +#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00 +#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01 +#define WPS_MSGTYPE_REGISTRAR 0x02 +#define WPS_MSGTYPE_AP_WLAN_MGR 0x03 + +/* RF Band */ +#define WPS_RFBAND_24GHZ 0x01 +#define WPS_RFBAND_50GHZ 0x02 + +/* Simple Config state */ +#define WPS_SCSTATE_UNCONFIGURED 0x01 +#define WPS_SCSTATE_CONFIGURED 0x02 +#define WPS_SCSTATE_OFF 11 + +/* WPS Vendor extension key */ +#define WPS_OUI_HEADER_LEN 2 +#define WPS_OUI_HEADER_SIZE 4 +#define WPS_OUI_FIXED_HEADER_OFF 16 +#define WPS_WFA_SUBID_V2_OFF 3 +#define WPS_WFA_V2_OFF 5 + +#ifdef __cplusplus +} +#endif + +#endif /* _WPS_ */ diff --git a/drivers/net/wireless/bcmdhd/include/rte_ioctl.h b/drivers/net/wireless/bcmdhd/include/rte_ioctl.h new file mode 100644 index 000000000000..9c214ae704ac --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/rte_ioctl.h @@ -0,0 +1,85 @@ +/* + * HND Run Time Environment ioctl. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: rte_ioctl.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _rte_ioctl_h_ +#define _rte_ioctl_h_ + +/* RTE IOCTL definitions for generic ether devices */ +#define RTEGHWADDR 0x8901 +#define RTESHWADDR 0x8902 +#define RTEGMTU 0x8903 +#define RTEGSTATS 0x8904 +#define RTEGALLMULTI 0x8905 +#define RTESALLMULTI 0x8906 +#define RTEGPROMISC 0x8907 +#define RTESPROMISC 0x8908 +#define RTESMULTILIST 0x8909 +#define RTEGUP 0x890A +#define RTEGPERMADDR 0x890B +#define RTEDEVPWRSTCHG 0x890C /* Device pwr state change for PCIedev */ +#define RTEDEVPMETOGGLE 0x890D /* Toggle PME# to wake up the host */ + +#define RTE_IOCTL_QUERY 0x00 +#define RTE_IOCTL_SET 0x01 +#define RTE_IOCTL_OVL_IDX_MASK 0x1e +#define RTE_IOCTL_OVL_RSV 0x20 +#define RTE_IOCTL_OVL 0x40 +#define RTE_IOCTL_OVL_IDX_SHIFT 1 + +enum hnd_ioctl_cmd { + HND_RTE_DNGL_IS_SS = 1, /* true if device connected at super speed */ + + /* PCIEDEV specific wl <--> bus ioctls */ + BUS_GET_VAR = 2, + BUS_SET_VAR = 3, + BUS_FLUSH_RXREORDER_Q = 4, + BUS_SET_LTR_STATE = 5, + BUS_FLUSH_CHAINED_PKTS = 6, + BUS_SET_COPY_COUNT = 7 +}; + +#define SDPCMDEV_SET_MAXTXPKTGLOM 1 + +typedef struct memuse_info { + uint16 ver; /* version of this struct */ + uint16 len; /* length in bytes of this structure */ + uint32 tot; /* Total memory */ + uint32 text_len; /* Size of Text segment memory */ + uint32 data_len; /* Size of Data segment memory */ + uint32 bss_len; /* Size of BSS segment memory */ + + uint32 arena_size; /* Total Heap size */ + uint32 arena_free; /* Heap memory available or free */ + uint32 inuse_size; /* Heap memory currently in use */ + uint32 inuse_hwm; /* High watermark of memory - reclaimed memory */ + uint32 inuse_overhead; /* tally of allocated mem_t blocks */ + uint32 inuse_total; /* Heap in-use + Heap overhead memory */ +} memuse_info_t; + +#endif /* _rte_ioctl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h new file mode 100644 index 000000000000..0f8c52e1c220 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h @@ -0,0 +1,3761 @@ +/* + * SiliconBackplane Chipcommon core hardware definitions. + * + * The chipcommon core provides chip identification, SB control, + * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, + * GPIO interface, extbus, and support for serial and parallel flashes. + * + * $Id: sbchipc.h 574579 2015-07-27 15:36:37Z $ + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + */ + +#ifndef _SBCHIPC_H +#define _SBCHIPC_H + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/** + * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the + * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to + * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead + * be assigned their respective chipc-specific address space and connected to the Always On + * Backplane via the APB interface. + */ +typedef volatile struct { + uint32 PAD[384]; + uint32 pmucontrol; /* 0x600 */ + uint32 pmucapabilities; + uint32 pmustatus; + uint32 res_state; + uint32 res_pending; + uint32 pmutimer; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 res_table_sel; + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; /* 0x638, rev >= 1 */ + uint32 gpioenable; /* 0x63c, rev >= 1 */ + uint32 res_req_timer_sel; + uint32 res_req_timer; + uint32 res_req_mask; + uint32 PAD; + uint32 chipcontrol_addr; /* 0x650 */ + uint32 chipcontrol_data; /* 0x654 */ + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; /* 0x668, corerev >= 28 */ + uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + uint32 retention_ctl; /* 0x670 */ + uint32 PAD[3]; + uint32 retention_grpidx; /* 0x680 */ + uint32 retention_grpctl; /* 0x684 */ + uint32 PAD[20]; + uint32 pmucontrol_ext; /* 0x6d8 */ + uint32 slowclkperiod; /* 0x6dc */ + uint32 PAD[8]; + uint32 pmuintmask0; /* 0x700 */ + uint32 pmuintmask1; /* 0x704 */ + uint32 PAD[14]; + uint32 pmuintstatus; /* 0x740 */ + uint32 PAD[15]; + uint32 pmuintctrl0; /* 0x780 */ +} pmuregs_t; + +typedef struct eci_prerev35 { + uint32 eci_output; + uint32 eci_control; + uint32 eci_inputlo; + uint32 eci_inputmi; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolaritymi; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskmi; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventmi; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskmi; + uint32 eci_eventmaskhi; + uint32 PAD[3]; +} eci_prerev35_t; + +typedef struct eci_rev35 { + uint32 eci_outputlo; + uint32 eci_outputhi; + uint32 eci_controllo; + uint32 eci_controlhi; + uint32 eci_inputlo; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskhi; + uint32 eci_auxtx; + uint32 eci_auxrx; + uint32 eci_datatag; + uint32 eci_uartescvalue; + uint32 eci_autobaudctr; + uint32 eci_uartfifolevel; +} eci_rev35_t; + +typedef struct flash_config { + uint32 PAD[19]; + /* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */ + uint32 flashstrconfig; +} flash_config_t; + +typedef volatile struct { + uint32 chipid; /* 0x0 */ + uint32 capabilities; + uint32 corecontrol; /* corerev >= 1 */ + uint32 bist; + + /* OTP */ + uint32 otpstatus; /* 0x10, corerev >= 10 */ + uint32 otpcontrol; + uint32 otpprog; + uint32 otplayout; /* corerev >= 23 */ + + /* Interrupt control */ + uint32 intstatus; /* 0x20 */ + uint32 intmask; + + /* Chip specific regs */ + uint32 chipcontrol; /* 0x28, rev >= 11 */ + uint32 chipstatus; /* 0x2c, rev >= 11 */ + + /* Jtag Master */ + uint32 jtagcmd; /* 0x30, rev >= 10 */ + uint32 jtagir; + uint32 jtagdr; + uint32 jtagctrl; + + /* serial flash interface registers */ + uint32 flashcontrol; /* 0x40 */ + uint32 flashaddress; + uint32 flashdata; + uint32 otplayoutextension; /* rev >= 35 */ + + /* Silicon backplane configuration broadcast control */ + uint32 broadcastaddress; /* 0x50 */ + uint32 broadcastdata; + + /* gpio - cleared only by power-on-reset */ + uint32 gpiopullup; /* 0x58, corerev >= 20 */ + uint32 gpiopulldown; /* 0x5c, corerev >= 20 */ + uint32 gpioin; /* 0x60 */ + uint32 gpioout; /* 0x64 */ + uint32 gpioouten; /* 0x68 */ + uint32 gpiocontrol; /* 0x6C */ + uint32 gpiointpolarity; /* 0x70 */ + uint32 gpiointmask; /* 0x74 */ + + /* GPIO events corerev >= 11 */ + uint32 gpioevent; + uint32 gpioeventintmask; + + /* Watchdog timer */ + uint32 watchdog; /* 0x80 */ + + /* GPIO events corerev >= 11 */ + uint32 gpioeventintpolarity; + + /* GPIO based LED powersave registers corerev >= 16 */ + uint32 gpiotimerval; /* 0x88 */ + uint32 gpiotimeroutmask; + + /* clock control */ + uint32 clockcontrol_n; /* 0x90 */ + uint32 clockcontrol_sb; /* aka m0 */ + uint32 clockcontrol_pci; /* aka m1 */ + uint32 clockcontrol_m2; /* mii/uart/mipsref */ + uint32 clockcontrol_m3; /* cpu */ + uint32 clkdiv; /* corerev >= 3 */ + uint32 gpiodebugsel; /* corerev >= 28 */ + uint32 capabilities_ext; /* 0xac */ + + /* pll delay registers (corerev >= 4) */ + uint32 pll_on_delay; /* 0xb0 */ + uint32 fref_sel_delay; + uint32 slow_clk_ctl; /* 5 < corerev < 10 */ + uint32 PAD; + + /* Instaclock registers (corerev >= 10) */ + uint32 system_clk_ctl; /* 0xc0 */ + uint32 clkstatestretch; + uint32 PAD[2]; + + /* Indirect backplane access (corerev >= 22) */ + uint32 bp_addrlow; /* 0xd0 */ + uint32 bp_addrhigh; + uint32 bp_data; + uint32 PAD; + uint32 bp_indaccess; + /* SPI registers, corerev >= 37 */ + uint32 gsioctrl; + uint32 gsioaddress; + uint32 gsiodata; + + /* More clock dividers (corerev >= 32) */ + uint32 clkdiv2; + /* FAB ID (corerev >= 40) */ + uint32 otpcontrol1; + uint32 fabid; /* 0xf8 */ + + /* In AI chips, pointer to erom */ + uint32 eromptr; /* 0xfc */ + + /* ExtBus control registers (corerev >= 3) */ + uint32 pcmcia_config; /* 0x100 */ + uint32 pcmcia_memwait; + uint32 pcmcia_attrwait; + uint32 pcmcia_iowait; + uint32 ide_config; + uint32 ide_memwait; + uint32 ide_attrwait; + uint32 ide_iowait; + uint32 prog_config; + uint32 prog_waitcount; + uint32 flash_config; + uint32 flash_waitcount; + uint32 SECI_config; /* 0x130 SECI configuration */ + uint32 SECI_status; + uint32 SECI_statusmask; + uint32 SECI_rxnibchanged; + + uint32 PAD[20]; + + /* SROM interface (corerev >= 32) */ + uint32 sromcontrol; /* 0x190 */ + uint32 sromaddress; + uint32 sromdata; + uint32 PAD[1]; /* 0x19C */ + /* NAND flash registers for BCM4706 (corerev = 31) */ + uint32 nflashctrl; /* 0x1a0 */ + uint32 nflashconf; + uint32 nflashcoladdr; + uint32 nflashrowaddr; + uint32 nflashdata; + uint32 nflashwaitcnt0; /* 0x1b4 */ + uint32 PAD[2]; + + uint32 seci_uart_data; /* 0x1C0 */ + uint32 seci_uart_bauddiv; + uint32 seci_uart_fcr; + uint32 seci_uart_lcr; + uint32 seci_uart_mcr; + uint32 seci_uart_lsr; + uint32 seci_uart_msr; + uint32 seci_uart_baudadj; + /* Clock control and hardware workarounds (corerev >= 20) */ + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 hw_war; + uint32 PAD[70]; + + /* UARTs */ + uint8 uart0data; /* 0x300 */ + uint8 uart0imr; + uint8 uart0fcr; + uint8 uart0lcr; + uint8 uart0mcr; + uint8 uart0lsr; + uint8 uart0msr; + uint8 uart0scratch; + uint8 PAD[248]; /* corerev >= 1 */ + + uint8 uart1data; /* 0x400 */ + uint8 uart1imr; + uint8 uart1fcr; + uint8 uart1lcr; + uint8 uart1mcr; + uint8 uart1lsr; + uint8 uart1msr; + uint8 uart1scratch; /* 0x407 */ + uint32 PAD[62]; + + /* save/restore, corerev >= 48 */ + uint32 sr_capability; /* 0x500 */ + uint32 sr_control0; /* 0x504 */ + uint32 sr_control1; /* 0x508 */ + uint32 gpio_control; /* 0x50C */ + uint32 PAD[60]; + + /* PMU registers (corerev >= 20) */ + /* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP. + * The CPU must read them twice, compare, and retry if different. + */ + uint32 pmucontrol; /* 0x600 */ + uint32 pmucapabilities; + uint32 pmustatus; + uint32 res_state; + uint32 res_pending; + uint32 pmutimer; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 res_table_sel; + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; /* 0x638, rev >= 1 */ + uint32 gpioenable; /* 0x63c, rev >= 1 */ + uint32 res_req_timer_sel; + uint32 res_req_timer; + uint32 res_req_mask; + uint32 PAD; + uint32 chipcontrol_addr; /* 0x650 */ + uint32 chipcontrol_data; /* 0x654 */ + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; /* 0x668, corerev >= 28 */ + uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + uint32 retention_ctl; /* 0x670 */ + uint32 PAD[3]; + uint32 retention_grpidx; /* 0x680 */ + uint32 retention_grpctl; /* 0x684 */ + uint32 PAD[20]; + uint32 pmucontrol_ext; /* 0x6d8 */ + uint32 slowclkperiod; /* 0x6dc */ + uint32 PAD[8]; + uint32 pmuintmask0; /* 0x700 */ + uint32 pmuintmask1; /* 0x704 */ + uint32 PAD[14]; + uint32 pmuintstatus; /* 0x740 */ + uint32 PAD[15]; + uint32 pmuintctrl0; /* 0x780 */ + uint32 PAD[31]; + uint16 sromotp[512]; /* 0x800 */ +#ifdef CCNFLASH_SUPPORT + /* Nand flash MLC controller registers (corerev >= 38) */ + uint32 nand_revision; /* 0xC00 */ + uint32 nand_cmd_start; + uint32 nand_cmd_addr_x; + uint32 nand_cmd_addr; + uint32 nand_cmd_end_addr; + uint32 nand_cs_nand_select; + uint32 nand_cs_nand_xor; + uint32 PAD; + uint32 nand_spare_rd0; + uint32 nand_spare_rd4; + uint32 nand_spare_rd8; + uint32 nand_spare_rd12; + uint32 nand_spare_wr0; + uint32 nand_spare_wr4; + uint32 nand_spare_wr8; + uint32 nand_spare_wr12; + uint32 nand_acc_control; + uint32 PAD; + uint32 nand_config; + uint32 PAD; + uint32 nand_timing_1; + uint32 nand_timing_2; + uint32 nand_semaphore; + uint32 PAD; + uint32 nand_devid; + uint32 nand_devid_x; + uint32 nand_block_lock_status; + uint32 nand_intfc_status; + uint32 nand_ecc_corr_addr_x; + uint32 nand_ecc_corr_addr; + uint32 nand_ecc_unc_addr_x; + uint32 nand_ecc_unc_addr; + uint32 nand_read_error_count; + uint32 nand_corr_stat_threshold; + uint32 PAD[2]; + uint32 nand_read_addr_x; + uint32 nand_read_addr; + uint32 nand_page_program_addr_x; + uint32 nand_page_program_addr; + uint32 nand_copy_back_addr_x; + uint32 nand_copy_back_addr; + uint32 nand_block_erase_addr_x; + uint32 nand_block_erase_addr; + uint32 nand_inv_read_addr_x; + uint32 nand_inv_read_addr; + uint32 PAD[2]; + uint32 nand_blk_wr_protect; + uint32 PAD[3]; + uint32 nand_acc_control_cs1; + uint32 nand_config_cs1; + uint32 nand_timing_1_cs1; + uint32 nand_timing_2_cs1; + uint32 PAD[20]; + uint32 nand_spare_rd16; + uint32 nand_spare_rd20; + uint32 nand_spare_rd24; + uint32 nand_spare_rd28; + uint32 nand_cache_addr; + uint32 nand_cache_data; + uint32 nand_ctrl_config; + uint32 nand_ctrl_status; +#endif /* CCNFLASH_SUPPORT */ + uint32 gci_corecaps0; /* GCI starting at 0xC00 */ + uint32 gci_corecaps1; + uint32 gci_corecaps2; + uint32 gci_corectrl; + uint32 gci_corestat; /* 0xC10 */ + uint32 gci_intstat; /* 0xC14 */ + uint32 gci_intmask; /* 0xC18 */ + uint32 gci_wakemask; /* 0xC1C */ + uint32 gci_levelintstat; /* 0xC20 */ + uint32 gci_eventintstat; /* 0xC24 */ + uint32 PAD[6]; + uint32 gci_indirect_addr; /* 0xC40 */ + uint32 gci_gpioctl; /* 0xC44 */ + uint32 gci_gpiostatus; + uint32 gci_gpiomask; /* 0xC4C */ + uint32 PAD; + uint32 gci_miscctl; /* 0xC54 */ + uint32 gci_gpiointmask; + uint32 gci_gpiowakemask; + uint32 gci_input[32]; /* C60 */ + uint32 gci_event[32]; /* CE0 */ + uint32 gci_output[4]; /* D60 */ + uint32 gci_control_0; /* 0xD70 */ + uint32 gci_control_1; /* 0xD74 */ + uint32 gci_intpolreg; /* 0xD78 */ + uint32 gci_levelintmask; /* 0xD7C */ + uint32 gci_eventintmask; /* 0xD80 */ + uint32 PAD[3]; + uint32 gci_inbandlevelintmask; /* 0xD90 */ + uint32 gci_inbandeventintmask; /* 0xD94 */ + uint32 PAD[2]; + uint32 gci_seciauxtx; /* 0xDA0 */ + uint32 gci_seciauxrx; /* 0xDA4 */ + uint32 gci_secitx_datatag; /* 0xDA8 */ + uint32 gci_secirx_datatag; /* 0xDAC */ + uint32 gci_secitx_datamask; /* 0xDB0 */ + uint32 gci_seciusef0tx_reg; /* 0xDB4 */ + uint32 gci_secif0tx_offset; /* 0xDB8 */ + uint32 gci_secif0rx_offset; /* 0xDBC */ + uint32 gci_secif1tx_offset; /* 0xDC0 */ + uint32 gci_rxfifo_common_ctrl; /* 0xDC4 */ + uint32 gci_rxfifoctrl; /* 0xDC8 */ + uint32 gci_uartreadid; /* DCC */ + uint32 gci_uartescval; /* DD0 */ + uint32 PAD; + uint32 gci_secififolevel; /* DD8 */ + uint32 gci_seciuartdata; /* DDC */ + uint32 gci_secibauddiv; /* DE0 */ + uint32 gci_secifcr; /* DE4 */ + uint32 gci_secilcr; /* DE8 */ + uint32 gci_secimcr; /* DEC */ + uint32 gci_secilsr; /* DF0 */ + uint32 gci_secimsr; /* DF4 */ + uint32 gci_baudadj; /* DF8 */ + uint32 PAD; + uint32 gci_chipctrl; /* 0xE00 */ + uint32 gci_chipsts; /* 0xE04 */ + uint32 gci_gpioout; /* 0xE08 */ + uint32 gci_gpioout_read; /* 0xE0C */ + uint32 gci_mpwaketx; /* 0xE10 */ + uint32 gci_mpwakedetect; /* 0xE14 */ + uint32 gci_seciin_ctrl; /* 0xE18 */ + uint32 gci_seciout_ctrl; /* 0xE1C */ + uint32 gci_seciin_auxfifo_en; /* 0xE20 */ + uint32 gci_seciout_txen_txbr; /* 0xE24 */ + uint32 gci_seciin_rxbrstatus; /* 0xE28 */ + uint32 gci_seciin_rxerrstatus; /* 0xE2C */ + uint32 gci_seciin_fcstatus; /* 0xE30 */ + uint32 gci_seciout_txstatus; /* 0xE34 */ + uint32 gci_seciout_txbrstatus; /* 0xE38 */ +} chipcregs_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + + +#define CC_CHIPID 0 +#define CC_CAPABILITIES 4 +#define CC_CHIPST 0x2c +#define CC_EROMPTR 0xfc + +#define CC_OTPST 0x10 +#define CC_INTSTATUS 0x20 +#define CC_INTMASK 0x24 +#define CC_JTAGCMD 0x30 +#define CC_JTAGIR 0x34 +#define CC_JTAGDR 0x38 +#define CC_JTAGCTRL 0x3c +#define CC_GPIOPU 0x58 +#define CC_GPIOPD 0x5c +#define CC_GPIOIN 0x60 +#define CC_GPIOOUT 0x64 +#define CC_GPIOOUTEN 0x68 +#define CC_GPIOCTRL 0x6c +#define CC_GPIOPOL 0x70 +#define CC_GPIOINTM 0x74 +#define CC_GPIOEVENT 0x78 +#define CC_GPIOEVENTMASK 0x7c +#define CC_WATCHDOG 0x80 +#define CC_GPIOEVENTPOL 0x84 +#define CC_CLKC_N 0x90 +#define CC_CLKC_M0 0x94 +#define CC_CLKC_M1 0x98 +#define CC_CLKC_M2 0x9c +#define CC_CLKC_M3 0xa0 +#define CC_CLKDIV 0xa4 +#define CC_CAP_EXT 0xac +#define CC_SYS_CLK_CTL 0xc0 +#define CC_CLKDIV2 0xf0 +#define CC_CLK_CTL_ST SI_CLK_CTL_ST +#define PMU_CTL 0x600 +#define PMU_CAP 0x604 +#define PMU_ST 0x608 +#define PMU_RES_STATE 0x60c +#define PMU_RES_PENDING 0x610 +#define PMU_TIMER 0x614 +#define PMU_MIN_RES_MASK 0x618 +#define PMU_MAX_RES_MASK 0x61c +#define CC_CHIPCTL_ADDR 0x650 +#define CC_CHIPCTL_DATA 0x654 +#define PMU_REG_CONTROL_ADDR 0x658 +#define PMU_REG_CONTROL_DATA 0x65C +#define PMU_PLL_CONTROL_ADDR 0x660 +#define PMU_PLL_CONTROL_DATA 0x664 +#define CC_SROM_CTRL 0x190 +#define CC_SROM_OTP 0x800 /* SROM/OTP address space */ +#define CC_GCI_INDIRECT_ADDR_REG 0xC40 +#define CC_GCI_CHIP_CTRL_REG 0xE00 +#define CC_GCI_CC_OFFSET_2 2 +#define CC_GCI_CC_OFFSET_5 5 +#define CC_SWD_CTRL 0x380 +#define CC_SWD_REQACK 0x384 +#define CC_SWD_DATA 0x388 + + +#define CHIPCTRLREG0 0x0 +#define CHIPCTRLREG1 0x1 +#define CHIPCTRLREG2 0x2 +#define CHIPCTRLREG3 0x3 +#define CHIPCTRLREG4 0x4 +#define CHIPCTRLREG5 0x5 +#define CHIPCTRLREG6 0x6 +#define REGCTRLREG4 0x4 +#define REGCTRLREG5 0x5 +#define REGCTRLREG6 0x6 +#define MINRESMASKREG 0x618 +#define MAXRESMASKREG 0x61c +#define CHIPCTRLADDR 0x650 +#define CHIPCTRLDATA 0x654 +#define RSRCTABLEADDR 0x620 +#define PMU_RES_DEP_MASK 0x624 +#define RSRCUPDWNTIME 0x628 +#define PMUREG_RESREQ_MASK 0x68c +#define PMUREG_RESREQ_TIMER 0x688 +#define PMUREG_RESREQ_MASK1 0x6f4 +#define PMUREG_RESREQ_TIMER1 0x6f0 +#define EXT_LPO_AVAIL 0x100 +#define LPO_SEL (1 << 0) +#define CC_EXT_LPO_PU 0x200000 +#define GC_EXT_LPO_PU 0x2 +#define CC_INT_LPO_PU 0x100000 +#define GC_INT_LPO_PU 0x1 +#define EXT_LPO_SEL 0x8 +#define INT_LPO_SEL 0x4 +#define ENABLE_FINE_CBUCK_CTRL (1 << 30) +#define REGCTRL5_PWM_AUTO_CTRL_MASK 0x007e0000 +#define REGCTRL5_PWM_AUTO_CTRL_SHIFT 17 +#define REGCTRL6_PWM_AUTO_CTRL_MASK 0x3fff0000 +#define REGCTRL6_PWM_AUTO_CTRL_SHIFT 16 +#define CC_BP_IND_ACCESS_START_SHIFT 9 +#define CC_BP_IND_ACCESS_START_MASK (1 << CC_BP_IND_ACCESS_START_SHIFT) +#define CC_BP_IND_ACCESS_RDWR_SHIFT 8 +#define CC_BP_IND_ACCESS_RDWR_MASK (1 << CC_BP_IND_ACCESS_RDWR_SHIFT) +#define CC_BP_IND_ACCESS_ERROR_SHIFT 10 +#define CC_BP_IND_ACCESS_ERROR_MASK (1 << CC_BP_IND_ACCESS_ERROR_SHIFT) + +#ifdef SR_DEBUG +#define SUBCORE_POWER_ON 0x0001 +#define PHY_POWER_ON 0x0010 +#define VDDM_POWER_ON 0x0100 +#define MEMLPLDO_POWER_ON 0x1000 +#define SUBCORE_POWER_ON_CHK 0x00040000 +#define PHY_POWER_ON_CHK 0x00080000 +#define VDDM_POWER_ON_CHK 0x00100000 +#define MEMLPLDO_POWER_ON_CHK 0x00200000 +#endif /* SR_DEBUG */ + +#ifdef CCNFLASH_SUPPORT +/* NAND flash support */ +#define CC_NAND_REVISION 0xC00 +#define CC_NAND_CMD_START 0xC04 +#define CC_NAND_CMD_ADDR 0xC0C +#define CC_NAND_SPARE_RD_0 0xC20 +#define CC_NAND_SPARE_RD_4 0xC24 +#define CC_NAND_SPARE_RD_8 0xC28 +#define CC_NAND_SPARE_RD_C 0xC2C +#define CC_NAND_CONFIG 0xC48 +#define CC_NAND_DEVID 0xC60 +#define CC_NAND_DEVID_EXT 0xC64 +#define CC_NAND_INTFC_STATUS 0xC6C +#endif /* CCNFLASH_SUPPORT */ + +/* chipid */ +#define CID_ID_MASK 0x0000ffff /**< Chip Id mask */ +#define CID_REV_MASK 0x000f0000 /**< Chip Revision mask */ +#define CID_REV_SHIFT 16 /**< Chip Revision shift */ +#define CID_PKG_MASK 0x00f00000 /**< Package Option mask */ +#define CID_PKG_SHIFT 20 /**< Package Option shift */ +#define CID_CC_MASK 0x0f000000 /**< CoreCount (corerev >= 4) */ +#define CID_CC_SHIFT 24 +#define CID_TYPE_MASK 0xf0000000 /**< Chip Type */ +#define CID_TYPE_SHIFT 28 + +/* capabilities */ +#define CC_CAP_UARTS_MASK 0x00000003 /**< Number of UARTs */ +#define CC_CAP_MIPSEB 0x00000004 /**< MIPS is in big-endian mode */ +#define CC_CAP_UCLKSEL 0x00000018 /**< UARTs clock select */ +#define CC_CAP_UINTCLK 0x00000008 /**< UARTs are driven by internal divided clock */ +#define CC_CAP_UARTGPIO 0x00000020 /**< UARTs own GPIOs 15:12 */ +#define CC_CAP_EXTBUS_MASK 0x000000c0 /**< External bus mask */ +#define CC_CAP_EXTBUS_NONE 0x00000000 /**< No ExtBus present */ +#define CC_CAP_EXTBUS_FULL 0x00000040 /**< ExtBus: PCMCIA, IDE & Prog */ +#define CC_CAP_EXTBUS_PROG 0x00000080 /**< ExtBus: ProgIf only */ +#define CC_CAP_FLASH_MASK 0x00000700 /**< Type of flash */ +#define CC_CAP_PLL_MASK 0x00038000 /**< Type of PLL */ +#define CC_CAP_PWR_CTL 0x00040000 /**< Power control */ +#define CC_CAP_OTPSIZE 0x00380000 /**< OTP Size (0 = none) */ +#define CC_CAP_OTPSIZE_SHIFT 19 /**< OTP Size shift */ +#define CC_CAP_OTPSIZE_BASE 5 /**< OTP Size base */ +#define CC_CAP_JTAGP 0x00400000 /**< JTAG Master Present */ +#define CC_CAP_ROM 0x00800000 /**< Internal boot rom active */ +#define CC_CAP_BKPLN64 0x08000000 /**< 64-bit backplane */ +#define CC_CAP_PMU 0x10000000 /**< PMU Present, rev >= 20 */ +#define CC_CAP_ECI 0x20000000 /**< ECI Present, rev >= 21 */ +#define CC_CAP_SROM 0x40000000 /**< Srom Present, rev >= 32 */ +#define CC_CAP_NFLASH 0x80000000 /**< Nand flash present, rev >= 35 */ + +#define CC_CAP2_SECI 0x00000001 /**< SECI Present, rev >= 36 */ +#define CC_CAP2_GSIO 0x00000002 /**< GSIO (spi/i2c) present, rev >= 37 */ + +/* capabilities extension */ +#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /**< SECI present */ +#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /**< GSIO present */ +#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /**< GCI present */ +#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /**< AOB present */ +#define CC_CAP_EXT_SWD_PRESENT 0x00000400 /**< SWD present */ + +/* WL Channel Info to BT via GCI - bits 40 - 47 */ +#define GCI_WL_CHN_INFO_MASK (0xFF00) +/* bits [51:48] - reserved for wlan TX pwr index */ +/* bits [55:52] btc mode indication */ +#define GCI_WL_BTC_MODE_SHIFT (20) +#define GCI_WL_BTC_MODE_MASK (0xF << GCI_WL_BTC_MODE_SHIFT) +#define GCI_WL_ANT_BIT_MASK (0x00c0) +#define GCI_WL_ANT_SHIFT_BITS (6) +/* PLL type */ +#define PLL_NONE 0x00000000 +#define PLL_TYPE1 0x00010000 /**< 48MHz base, 3 dividers */ +#define PLL_TYPE2 0x00020000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE3 0x00030000 /**< 25MHz, 2 dividers */ +#define PLL_TYPE4 0x00008000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE5 0x00018000 /**< 25MHz, 4 dividers */ +#define PLL_TYPE6 0x00028000 /**< 100/200 or 120/240 only */ +#define PLL_TYPE7 0x00038000 /**< 25MHz, 4 dividers */ + +/* ILP clock */ +#define ILP_CLOCK 32000 + +/* ALP clock on pre-PMU chips */ +#define ALP_CLOCK 20000000 + +#ifdef CFG_SIM +#define NS_ALP_CLOCK 84922 +#define NS_SLOW_ALP_CLOCK 84922 +#define NS_CPU_CLOCK 534500 +#define NS_SLOW_CPU_CLOCK 534500 +#define NS_SI_CLOCK 271750 +#define NS_SLOW_SI_CLOCK 271750 +#define NS_FAST_MEM_CLOCK 271750 +#define NS_MEM_CLOCK 271750 +#define NS_SLOW_MEM_CLOCK 271750 +#else +#define NS_ALP_CLOCK 125000000 +#define NS_SLOW_ALP_CLOCK 100000000 +#define NS_CPU_CLOCK 1000000000 +#define NS_SLOW_CPU_CLOCK 800000000 +#define NS_SI_CLOCK 250000000 +#define NS_SLOW_SI_CLOCK 200000000 +#define NS_FAST_MEM_CLOCK 800000000 +#define NS_MEM_CLOCK 533000000 +#define NS_SLOW_MEM_CLOCK 400000000 +#endif /* CFG_SIM */ + +/* HT clock */ +#define HT_CLOCK 80000000 + +/* corecontrol */ +#define CC_UARTCLKO 0x00000001 /**< Drive UART with internal clock */ +#define CC_SE 0x00000002 /**< sync clk out enable (corerev >= 3) */ +#define CC_ASYNCGPIO 0x00000004 /**< 1=generate GPIO interrupt without backplane clock */ +#define CC_UARTCLKEN 0x00000008 /**< enable UART Clock (corerev > = 21 */ + +/* retention_ctl */ +#define RCTL_MEM_RET_SLEEP_LOG_SHIFT 29 +#define RCTL_MEM_RET_SLEEP_LOG_MASK (1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT) + +/* 4321 chipcontrol */ +#define CHIPCTRL_4321A0_DEFAULT 0x3a4 +#define CHIPCTRL_4321A1_DEFAULT 0x0a4 +#define CHIPCTRL_4321_PLL_DOWN 0x800000 /**< serdes PLL down override */ + +/* Fields in the otpstatus register in rev >= 21 */ +#define OTPS_OL_MASK 0x000000ff +#define OTPS_OL_MFG 0x00000001 /**< manuf row is locked */ +#define OTPS_OL_OR1 0x00000002 /**< otp redundancy row 1 is locked */ +#define OTPS_OL_OR2 0x00000004 /**< otp redundancy row 2 is locked */ +#define OTPS_OL_GU 0x00000008 /**< general use region is locked */ +#define OTPS_GUP_MASK 0x00000f00 +#define OTPS_GUP_SHIFT 8 +#define OTPS_GUP_HW 0x00000100 /**< h/w subregion is programmed */ +#define OTPS_GUP_SW 0x00000200 /**< s/w subregion is programmed */ +#define OTPS_GUP_CI 0x00000400 /**< chipid/pkgopt subregion is programmed */ +#define OTPS_GUP_FUSE 0x00000800 /**< fuse subregion is programmed */ +#define OTPS_READY 0x00001000 +#define OTPS_RV(x) (1 << (16 + (x))) /**< redundancy entry valid */ +#define OTPS_RV_MASK 0x0fff0000 +#define OTPS_PROGOK 0x40000000 + +/* Fields in the otpcontrol register in rev >= 21 */ +#define OTPC_PROGSEL 0x00000001 +#define OTPC_PCOUNT_MASK 0x0000000e +#define OTPC_PCOUNT_SHIFT 1 +#define OTPC_VSEL_MASK 0x000000f0 +#define OTPC_VSEL_SHIFT 4 +#define OTPC_TMM_MASK 0x00000700 +#define OTPC_TMM_SHIFT 8 +#define OTPC_ODM 0x00000800 +#define OTPC_PROGEN 0x80000000 + +/* Fields in the 40nm otpcontrol register in rev >= 40 */ +#define OTPC_40NM_PROGSEL_SHIFT 0 +#define OTPC_40NM_PCOUNT_SHIFT 1 +#define OTPC_40NM_PCOUNT_WR 0xA +#define OTPC_40NM_PCOUNT_V1X 0xB +#define OTPC_40NM_REGCSEL_SHIFT 5 +#define OTPC_40NM_REGCSEL_DEF 0x4 +#define OTPC_40NM_PROGIN_SHIFT 8 +#define OTPC_40NM_R2X_SHIFT 10 +#define OTPC_40NM_ODM_SHIFT 11 +#define OTPC_40NM_DF_SHIFT 15 +#define OTPC_40NM_VSEL_SHIFT 16 +#define OTPC_40NM_VSEL_WR 0xA +#define OTPC_40NM_VSEL_V1X 0xA +#define OTPC_40NM_VSEL_R1X 0x5 +#define OTPC_40NM_COFAIL_SHIFT 30 + +#define OTPC1_CPCSEL_SHIFT 0 +#define OTPC1_CPCSEL_DEF 6 +#define OTPC1_TM_SHIFT 8 +#define OTPC1_TM_WR 0x84 +#define OTPC1_TM_V1X 0x84 +#define OTPC1_TM_R1X 0x4 +#define OTPC1_CLK_EN_MASK 0x00020000 +#define OTPC1_CLK_DIV_MASK 0x00FC0000 + +/* Fields in otpprog in rev >= 21 and HND OTP */ +#define OTPP_COL_MASK 0x000000ff +#define OTPP_COL_SHIFT 0 +#define OTPP_ROW_MASK 0x0000ff00 +#define OTPP_ROW_MASK9 0x0001ff00 /* for ccrev >= 49 */ +#define OTPP_ROW_SHIFT 8 +#define OTPP_OC_MASK 0x0f000000 +#define OTPP_OC_SHIFT 24 +#define OTPP_READERR 0x10000000 +#define OTPP_VALUE_MASK 0x20000000 +#define OTPP_VALUE_SHIFT 29 +#define OTPP_START_BUSY 0x80000000 +#define OTPP_READ 0x40000000 /* HND OTP */ + +/* Fields in otplayout register */ +#define OTPL_HWRGN_OFF_MASK 0x00000FFF +#define OTPL_HWRGN_OFF_SHIFT 0 +#define OTPL_WRAP_REVID_MASK 0x00F80000 +#define OTPL_WRAP_REVID_SHIFT 19 +#define OTPL_WRAP_TYPE_MASK 0x00070000 +#define OTPL_WRAP_TYPE_SHIFT 16 +#define OTPL_WRAP_TYPE_65NM 0 +#define OTPL_WRAP_TYPE_40NM 1 +#define OTPL_ROW_SIZE_MASK 0x0000F000 +#define OTPL_ROW_SIZE_SHIFT 12 + +/* otplayout reg corerev >= 36 */ +#define OTP_CISFORMAT_NEW 0x80000000 + +/* Opcodes for OTPP_OC field */ +#define OTPPOC_READ 0 +#define OTPPOC_BIT_PROG 1 +#define OTPPOC_VERIFY 3 +#define OTPPOC_INIT 4 +#define OTPPOC_SET 5 +#define OTPPOC_RESET 6 +#define OTPPOC_OCST 7 +#define OTPPOC_ROW_LOCK 8 +#define OTPPOC_PRESCN_TEST 9 + +/* Opcodes for OTPP_OC field (40NM) */ +#define OTPPOC_READ_40NM 0 +#define OTPPOC_PROG_ENABLE_40NM 1 +#define OTPPOC_PROG_DISABLE_40NM 2 +#define OTPPOC_VERIFY_40NM 3 +#define OTPPOC_WORD_VERIFY_1_40NM 4 +#define OTPPOC_ROW_LOCK_40NM 5 +#define OTPPOC_STBY_40NM 6 +#define OTPPOC_WAKEUP_40NM 7 +#define OTPPOC_WORD_VERIFY_0_40NM 8 +#define OTPPOC_PRESCN_TEST_40NM 9 +#define OTPPOC_BIT_PROG_40NM 10 +#define OTPPOC_WORDPROG_40NM 11 +#define OTPPOC_BURNIN_40NM 12 +#define OTPPOC_AUTORELOAD_40NM 13 +#define OTPPOC_OVST_READ_40NM 14 +#define OTPPOC_OVST_PROG_40NM 15 + +/* Fields in otplayoutextension */ +#define OTPLAYOUTEXT_FUSE_MASK 0x3FF + + +/* Jtagm characteristics that appeared at a given corerev */ +#define JTAGM_CREV_OLD 10 /**< Old command set, 16bit max IR */ +#define JTAGM_CREV_IRP 22 /**< Able to do pause-ir */ +#define JTAGM_CREV_RTI 28 /**< Able to do return-to-idle */ + +/* jtagcmd */ +#define JCMD_START 0x80000000 +#define JCMD_BUSY 0x80000000 +#define JCMD_STATE_MASK 0x60000000 +#define JCMD_STATE_TLR 0x00000000 /**< Test-logic-reset */ +#define JCMD_STATE_PIR 0x20000000 /**< Pause IR */ +#define JCMD_STATE_PDR 0x40000000 /**< Pause DR */ +#define JCMD_STATE_RTI 0x60000000 /**< Run-test-idle */ +#define JCMD0_ACC_MASK 0x0000f000 +#define JCMD0_ACC_IRDR 0x00000000 +#define JCMD0_ACC_DR 0x00001000 +#define JCMD0_ACC_IR 0x00002000 +#define JCMD0_ACC_RESET 0x00003000 +#define JCMD0_ACC_IRPDR 0x00004000 +#define JCMD0_ACC_PDR 0x00005000 +#define JCMD0_IRW_MASK 0x00000f00 +#define JCMD_ACC_MASK 0x000f0000 /**< Changes for corerev 11 */ +#define JCMD_ACC_IRDR 0x00000000 +#define JCMD_ACC_DR 0x00010000 +#define JCMD_ACC_IR 0x00020000 +#define JCMD_ACC_RESET 0x00030000 +#define JCMD_ACC_IRPDR 0x00040000 +#define JCMD_ACC_PDR 0x00050000 +#define JCMD_ACC_PIR 0x00060000 +#define JCMD_ACC_IRDR_I 0x00070000 /**< rev 28: return to run-test-idle */ +#define JCMD_ACC_DR_I 0x00080000 /**< rev 28: return to run-test-idle */ +#define JCMD_IRW_MASK 0x00001f00 +#define JCMD_IRW_SHIFT 8 +#define JCMD_DRW_MASK 0x0000003f + +/* jtagctrl */ +#define JCTRL_FORCE_CLK 4 /**< Force clock */ +#define JCTRL_EXT_EN 2 /**< Enable external targets */ +#define JCTRL_EN 1 /**< Enable Jtag master */ +#define JCTRL_TAPSEL_BIT 0x00000008 /**< JtagMasterCtrl tap_sel bit */ + +/* swdmasterctrl */ +#define SWDCTRL_INT_EN 8 /**< Enable internal targets */ +#define SWDCTRL_FORCE_CLK 4 /**< Force clock */ +#define SWDCTRL_OVJTAG 2 /**< Enable shared SWD/JTAG pins */ +#define SWDCTRL_EN 1 /**< Enable Jtag master */ + +/* Fields in clkdiv */ +#define CLKD_SFLASH 0x1f000000 +#define CLKD_SFLASH_SHIFT 24 +#define CLKD_OTP 0x000f0000 +#define CLKD_OTP_SHIFT 16 +#define CLKD_JTAG 0x00000f00 +#define CLKD_JTAG_SHIFT 8 +#define CLKD_UART 0x000000ff + +#define CLKD2_SROM 0x00000003 +#define CLKD2_SWD 0xf8000000 +#define CLKD2_SWD_SHIFT 27 + +/* intstatus/intmask */ +#define CI_GPIO 0x00000001 /**< gpio intr */ +#define CI_EI 0x00000002 /**< extif intr (corerev >= 3) */ +#define CI_TEMP 0x00000004 /**< temp. ctrl intr (corerev >= 15) */ +#define CI_SIRQ 0x00000008 /**< serial IRQ intr (corerev >= 15) */ +#define CI_ECI 0x00000010 /**< eci intr (corerev >= 21) */ +#define CI_PMU 0x00000020 /**< pmu intr (corerev >= 21) */ +#define CI_UART 0x00000040 /**< uart intr (corerev >= 21) */ +#define CI_WDRESET 0x80000000 /**< watchdog reset occurred */ + +/* slow_clk_ctl */ +#define SCC_SS_MASK 0x00000007 /**< slow clock source mask */ +#define SCC_SS_LPO 0x00000000 /**< source of slow clock is LPO */ +#define SCC_SS_XTAL 0x00000001 /**< source of slow clock is crystal */ +#define SCC_SS_PCI 0x00000002 /**< source of slow clock is PCI */ +#define SCC_LF 0x00000200 /**< LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SCC_LP 0x00000400 /**< LPOPowerDown, 1: LPO is disabled, + * 0: LPO is enabled + */ +#define SCC_FS 0x00000800 /**< ForceSlowClk, 1: sb/cores running on slow clock, + * 0: power logic control + */ +#define SCC_IP 0x00001000 /**< IgnorePllOffReq, 1/0: power logic ignores/honors + * PLL clock disable requests from core + */ +#define SCC_XC 0x00002000 /**< XtalControlEn, 1/0: power logic does/doesn't + * disable crystal when appropriate + */ +#define SCC_XP 0x00004000 /**< XtalPU (RO), 1/0: crystal running/disabled */ +#define SCC_CD_MASK 0xffff0000 /**< ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SCC_CD_SHIFT 16 + +/* system_clk_ctl */ +#define SYCC_IE 0x00000001 /**< ILPen: Enable Idle Low Power */ +#define SYCC_AE 0x00000002 /**< ALPen: Enable Active Low Power */ +#define SYCC_FP 0x00000004 /**< ForcePLLOn */ +#define SYCC_AR 0x00000008 /**< Force ALP (or HT if ALPen is not set */ +#define SYCC_HR 0x00000010 /**< Force HT */ +#define SYCC_CD_MASK 0xffff0000 /**< ClkDiv (ILP = 1/(4 * (divisor + 1)) */ +#define SYCC_CD_SHIFT 16 + +/* Indirect backplane access */ +#define BPIA_BYTEEN 0x0000000f +#define BPIA_SZ1 0x00000001 +#define BPIA_SZ2 0x00000003 +#define BPIA_SZ4 0x00000007 +#define BPIA_SZ8 0x0000000f +#define BPIA_WRITE 0x00000100 +#define BPIA_START 0x00000200 +#define BPIA_BUSY 0x00000200 +#define BPIA_ERROR 0x00000400 + +/* pcmcia/prog/flash_config */ +#define CF_EN 0x00000001 /**< enable */ +#define CF_EM_MASK 0x0000000e /**< mode */ +#define CF_EM_SHIFT 1 +#define CF_EM_FLASH 0 /**< flash/asynchronous mode */ +#define CF_EM_SYNC 2 /**< synchronous mode */ +#define CF_EM_PCMCIA 4 /**< pcmcia mode */ +#define CF_DS 0x00000010 /**< destsize: 0=8bit, 1=16bit */ +#define CF_BS 0x00000020 /**< byteswap */ +#define CF_CD_MASK 0x000000c0 /**< clock divider */ +#define CF_CD_SHIFT 6 +#define CF_CD_DIV2 0x00000000 /**< backplane/2 */ +#define CF_CD_DIV3 0x00000040 /**< backplane/3 */ +#define CF_CD_DIV4 0x00000080 /**< backplane/4 */ +#define CF_CE 0x00000100 /**< clock enable */ +#define CF_SB 0x00000200 /**< size/bytestrobe (synch only) */ + +/* pcmcia_memwait */ +#define PM_W0_MASK 0x0000003f /**< waitcount0 */ +#define PM_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PM_W1_SHIFT 8 +#define PM_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PM_W2_SHIFT 16 +#define PM_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PM_W3_SHIFT 24 + +/* pcmcia_attrwait */ +#define PA_W0_MASK 0x0000003f /**< waitcount0 */ +#define PA_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PA_W1_SHIFT 8 +#define PA_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PA_W2_SHIFT 16 +#define PA_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PA_W3_SHIFT 24 + +/* pcmcia_iowait */ +#define PI_W0_MASK 0x0000003f /**< waitcount0 */ +#define PI_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PI_W1_SHIFT 8 +#define PI_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PI_W2_SHIFT 16 +#define PI_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PI_W3_SHIFT 24 + +/* prog_waitcount */ +#define PW_W0_MASK 0x0000001f /**< waitcount0 */ +#define PW_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PW_W1_SHIFT 8 +#define PW_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PW_W2_SHIFT 16 +#define PW_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PW_W3_SHIFT 24 + +#define PW_W0 0x0000000c +#define PW_W1 0x00000a00 +#define PW_W2 0x00020000 +#define PW_W3 0x01000000 + +/* flash_waitcount */ +#define FW_W0_MASK 0x0000003f /**< waitcount0 */ +#define FW_W1_MASK 0x00001f00 /**< waitcount1 */ +#define FW_W1_SHIFT 8 +#define FW_W2_MASK 0x001f0000 /**< waitcount2 */ +#define FW_W2_SHIFT 16 +#define FW_W3_MASK 0x1f000000 /**< waitcount3 */ +#define FW_W3_SHIFT 24 + +/* When Srom support present, fields in sromcontrol */ +#define SRC_START 0x80000000 +#define SRC_BUSY 0x80000000 +#define SRC_OPCODE 0x60000000 +#define SRC_OP_READ 0x00000000 +#define SRC_OP_WRITE 0x20000000 +#define SRC_OP_WRDIS 0x40000000 +#define SRC_OP_WREN 0x60000000 +#define SRC_OTPSEL 0x00000010 +#define SRC_OTPPRESENT 0x00000020 +#define SRC_LOCK 0x00000008 +#define SRC_SIZE_MASK 0x00000006 +#define SRC_SIZE_1K 0x00000000 +#define SRC_SIZE_4K 0x00000002 +#define SRC_SIZE_16K 0x00000004 +#define SRC_SIZE_SHIFT 1 +#define SRC_PRESENT 0x00000001 + +/* Fields in pmucontrol */ +#define PCTL_ILP_DIV_MASK 0xffff0000 +#define PCTL_ILP_DIV_SHIFT 16 +#define PCTL_LQ_REQ_EN 0x00008000 +#define PCTL_PLL_PLLCTL_UPD 0x00000400 /**< rev 2 */ +#define PCTL_NOILP_ON_WAIT 0x00000200 /**< rev 1 */ +#define PCTL_HT_REQ_EN 0x00000100 +#define PCTL_ALP_REQ_EN 0x00000080 +#define PCTL_XTALFREQ_MASK 0x0000007c +#define PCTL_XTALFREQ_SHIFT 2 +#define PCTL_ILP_DIV_EN 0x00000002 +#define PCTL_LPO_SEL 0x00000001 + +/* Retention Control */ +#define PMU_RCTL_CLK_DIV_SHIFT 0 +#define PMU_RCTL_CHAIN_LEN_SHIFT 12 +#define PMU_RCTL_MACPHY_DISABLE_SHIFT 26 +#define PMU_RCTL_MACPHY_DISABLE_MASK (1 << 26) +#define PMU_RCTL_LOGIC_DISABLE_SHIFT 27 +#define PMU_RCTL_LOGIC_DISABLE_MASK (1 << 27) +#define PMU_RCTL_MEMSLP_LOG_SHIFT 28 +#define PMU_RCTL_MEMSLP_LOG_MASK (1 << 28) +#define PMU_RCTL_MEMRETSLP_LOG_SHIFT 29 +#define PMU_RCTL_MEMRETSLP_LOG_MASK (1 << 29) + +/* Retention Group Control */ +#define PMU_RCTLGRP_CHAIN_LEN_SHIFT 0 +#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT 14 +#define PMU_RCTLGRP_RMODE_ENABLE_MASK (1 << 14) +#define PMU_RCTLGRP_DFT_ENABLE_SHIFT 15 +#define PMU_RCTLGRP_DFT_ENABLE_MASK (1 << 15) +#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT 16 +#define PMU_RCTLGRP_NSRST_DISABLE_MASK (1 << 16) +/* Retention Group Control special for 4334 */ +#define PMU4334_RCTLGRP_CHAIN_LEN_GRP0 338 +#define PMU4334_RCTLGRP_CHAIN_LEN_GRP1 315 +/* Retention Group Control special for 43341 */ +#define PMU43341_RCTLGRP_CHAIN_LEN_GRP0 366 +#define PMU43341_RCTLGRP_CHAIN_LEN_GRP1 330 + +/* Fields in clkstretch */ +#define CSTRETCH_HT 0xffff0000 +#define CSTRETCH_ALP 0x0000ffff + +/* gpiotimerval */ +#define GPIO_ONTIME_SHIFT 16 + +/* clockcontrol_n */ +#define CN_N1_MASK 0x3f /**< n1 control */ +#define CN_N2_MASK 0x3f00 /**< n2 control */ +#define CN_N2_SHIFT 8 +#define CN_PLLC_MASK 0xf0000 /**< pll control */ +#define CN_PLLC_SHIFT 16 + +/* clockcontrol_sb/pci/uart */ +#define CC_M1_MASK 0x3f /**< m1 control */ +#define CC_M2_MASK 0x3f00 /**< m2 control */ +#define CC_M2_SHIFT 8 +#define CC_M3_MASK 0x3f0000 /**< m3 control */ +#define CC_M3_SHIFT 16 +#define CC_MC_MASK 0x1f000000 /**< mux control */ +#define CC_MC_SHIFT 24 + +/* N3M Clock control magic field values */ +#define CC_F6_2 0x02 /**< A factor of 2 in */ +#define CC_F6_3 0x03 /**< 6-bit fields like */ +#define CC_F6_4 0x05 /**< N1, M1 or M3 */ +#define CC_F6_5 0x09 +#define CC_F6_6 0x11 +#define CC_F6_7 0x21 + +#define CC_F5_BIAS 5 /**< 5-bit fields get this added */ + +#define CC_MC_BYPASS 0x08 +#define CC_MC_M1 0x04 +#define CC_MC_M1M2 0x02 +#define CC_MC_M1M2M3 0x01 +#define CC_MC_M1M3 0x11 + +/* Type 2 Clock control magic field values */ +#define CC_T2_BIAS 2 /**< n1, n2, m1 & m3 bias */ +#define CC_T2M2_BIAS 3 /**< m2 bias */ + +#define CC_T2MC_M1BYP 1 +#define CC_T2MC_M2BYP 2 +#define CC_T2MC_M3BYP 4 + +/* Type 6 Clock control magic field values */ +#define CC_T6_MMASK 1 /**< bits of interest in m */ +#define CC_T6_M0 120000000 /**< sb clock for m = 0 */ +#define CC_T6_M1 100000000 /**< sb clock for m = 1 */ +#define SB2MIPS_T6(sb) (2 * (sb)) + +/* Common clock base */ +#define CC_CLOCK_BASE1 24000000 /**< Half the clock freq */ +#define CC_CLOCK_BASE2 12500000 /**< Alternate crystal on some PLLs */ + +/* Clock control values for 200MHz in 5350 */ +#define CLKC_5350_N 0x0311 +#define CLKC_5350_M 0x04020009 + +/* Flash types in the chipcommon capabilities register */ +#define FLASH_NONE 0x000 /**< No flash */ +#define SFLASH_ST 0x100 /**< ST serial flash */ +#define SFLASH_AT 0x200 /**< Atmel serial flash */ +#define NFLASH 0x300 +#define PFLASH 0x700 /**< Parallel flash */ +#define QSPIFLASH_ST 0x800 +#define QSPIFLASH_AT 0x900 + +/* Bits in the ExtBus config registers */ +#define CC_CFG_EN 0x0001 /**< Enable */ +#define CC_CFG_EM_MASK 0x000e /**< Extif Mode */ +#define CC_CFG_EM_ASYNC 0x0000 /**< Async/Parallel flash */ +#define CC_CFG_EM_SYNC 0x0002 /**< Synchronous */ +#define CC_CFG_EM_PCMCIA 0x0004 /**< PCMCIA */ +#define CC_CFG_EM_IDE 0x0006 /**< IDE */ +#define CC_CFG_DS 0x0010 /**< Data size, 0=8bit, 1=16bit */ +#define CC_CFG_CD_MASK 0x00e0 /**< Sync: Clock divisor, rev >= 20 */ +#define CC_CFG_CE 0x0100 /**< Sync: Clock enable, rev >= 20 */ +#define CC_CFG_SB 0x0200 /**< Sync: Size/Bytestrobe, rev >= 20 */ +#define CC_CFG_IS 0x0400 /**< Extif Sync Clk Select, rev >= 20 */ + +/* ExtBus address space */ +#define CC_EB_BASE 0x1a000000 /**< Chipc ExtBus base address */ +#define CC_EB_PCMCIA_MEM 0x1a000000 /**< PCMCIA 0 memory base address */ +#define CC_EB_PCMCIA_IO 0x1a200000 /**< PCMCIA 0 I/O base address */ +#define CC_EB_PCMCIA_CFG 0x1a400000 /**< PCMCIA 0 config base address */ +#define CC_EB_IDE 0x1a800000 /**< IDE memory base */ +#define CC_EB_PCMCIA1_MEM 0x1a800000 /**< PCMCIA 1 memory base address */ +#define CC_EB_PCMCIA1_IO 0x1aa00000 /**< PCMCIA 1 I/O base address */ +#define CC_EB_PCMCIA1_CFG 0x1ac00000 /**< PCMCIA 1 config base address */ +#define CC_EB_PROGIF 0x1b000000 /**< ProgIF Async/Sync base address */ + + +/* Start/busy bit in flashcontrol */ +#define SFLASH_OPCODE 0x000000ff +#define SFLASH_ACTION 0x00000700 +#define SFLASH_CS_ACTIVE 0x00001000 /**< Chip Select Active, rev >= 20 */ +#define SFLASH_START 0x80000000 +#define SFLASH_BUSY SFLASH_START + +/* flashcontrol action codes */ +#define SFLASH_ACT_OPONLY 0x0000 /**< Issue opcode only */ +#define SFLASH_ACT_OP1D 0x0100 /**< opcode + 1 data byte */ +#define SFLASH_ACT_OP3A 0x0200 /**< opcode + 3 addr bytes */ +#define SFLASH_ACT_OP3A1D 0x0300 /**< opcode + 3 addr & 1 data bytes */ +#define SFLASH_ACT_OP3A4D 0x0400 /**< opcode + 3 addr & 4 data bytes */ +#define SFLASH_ACT_OP3A4X4D 0x0500 /**< opcode + 3 addr, 4 don't care & 4 data bytes */ +#define SFLASH_ACT_OP3A1X4D 0x0700 /**< opcode + 3 addr, 1 don't care & 4 data bytes */ + +/* flashcontrol action+opcodes for ST flashes */ +#define SFLASH_ST_WREN 0x0006 /**< Write Enable */ +#define SFLASH_ST_WRDIS 0x0004 /**< Write Disable */ +#define SFLASH_ST_RDSR 0x0105 /**< Read Status Register */ +#define SFLASH_ST_WRSR 0x0101 /**< Write Status Register */ +#define SFLASH_ST_READ 0x0303 /**< Read Data Bytes */ +#define SFLASH_ST_PP 0x0302 /**< Page Program */ +#define SFLASH_ST_SE 0x02d8 /**< Sector Erase */ +#define SFLASH_ST_BE 0x00c7 /**< Bulk Erase */ +#define SFLASH_ST_DP 0x00b9 /**< Deep Power-down */ +#define SFLASH_ST_RES 0x03ab /**< Read Electronic Signature */ +#define SFLASH_ST_CSA 0x1000 /**< Keep chip select asserted */ +#define SFLASH_ST_SSE 0x0220 /**< Sub-sector Erase */ + +#define SFLASH_MXIC_RDID 0x0390 /**< Read Manufacture ID */ +#define SFLASH_MXIC_MFID 0xc2 /**< MXIC Manufacture ID */ + +/* Status register bits for ST flashes */ +#define SFLASH_ST_WIP 0x01 /**< Write In Progress */ +#define SFLASH_ST_WEL 0x02 /**< Write Enable Latch */ +#define SFLASH_ST_BP_MASK 0x1c /**< Block Protect */ +#define SFLASH_ST_BP_SHIFT 2 +#define SFLASH_ST_SRWD 0x80 /**< Status Register Write Disable */ + +/* flashcontrol action+opcodes for Atmel flashes */ +#define SFLASH_AT_READ 0x07e8 +#define SFLASH_AT_PAGE_READ 0x07d2 +#define SFLASH_AT_BUF1_READ +#define SFLASH_AT_BUF2_READ +#define SFLASH_AT_STATUS 0x01d7 +#define SFLASH_AT_BUF1_WRITE 0x0384 +#define SFLASH_AT_BUF2_WRITE 0x0387 +#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283 +#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286 +#define SFLASH_AT_BUF1_PROGRAM 0x0288 +#define SFLASH_AT_BUF2_PROGRAM 0x0289 +#define SFLASH_AT_PAGE_ERASE 0x0281 +#define SFLASH_AT_BLOCK_ERASE 0x0250 +#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define SFLASH_AT_BUF1_LOAD 0x0253 +#define SFLASH_AT_BUF2_LOAD 0x0255 +#define SFLASH_AT_BUF1_COMPARE 0x0260 +#define SFLASH_AT_BUF2_COMPARE 0x0261 +#define SFLASH_AT_BUF1_REPROGRAM 0x0258 +#define SFLASH_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for Atmel flashes */ +#define SFLASH_AT_READY 0x80 +#define SFLASH_AT_MISMATCH 0x40 +#define SFLASH_AT_ID_MASK 0x38 +#define SFLASH_AT_ID_SHIFT 3 + +/* SPI register bits, corerev >= 37 */ +#define GSIO_START 0x80000000 +#define GSIO_BUSY GSIO_START + +/* + * These are the UART port assignments, expressed as offsets from the base + * register. These assignments should hold for any serial port based on + * a 8250, 16450, or 16550(A). + */ + +#define UART_RX 0 /**< In: Receive buffer (DLAB=0) */ +#define UART_TX 0 /**< Out: Transmit buffer (DLAB=0) */ +#define UART_DLL 0 /**< Out: Divisor Latch Low (DLAB=1) */ +#define UART_IER 1 /**< In/Out: Interrupt Enable Register (DLAB=0) */ +#define UART_DLM 1 /**< Out: Divisor Latch High (DLAB=1) */ +#define UART_IIR 2 /**< In: Interrupt Identity Register */ +#define UART_FCR 2 /**< Out: FIFO Control Register */ +#define UART_LCR 3 /**< Out: Line Control Register */ +#define UART_MCR 4 /**< Out: Modem Control Register */ +#define UART_LSR 5 /**< In: Line Status Register */ +#define UART_MSR 6 /**< In: Modem Status Register */ +#define UART_SCR 7 /**< I/O: Scratch Register */ +#define UART_LCR_DLAB 0x80 /**< Divisor latch access bit */ +#define UART_LCR_WLEN8 0x03 /**< Word length: 8 bits */ +#define UART_MCR_OUT2 0x08 /**< MCR GPIO out 2 */ +#define UART_MCR_LOOP 0x10 /**< Enable loopback test mode */ +#define UART_LSR_RX_FIFO 0x80 /**< Receive FIFO error */ +#define UART_LSR_TDHR 0x40 /**< Data-hold-register empty */ +#define UART_LSR_THRE 0x20 /**< Transmit-hold-register empty */ +#define UART_LSR_BREAK 0x10 /**< Break interrupt */ +#define UART_LSR_FRAMING 0x08 /**< Framing error */ +#define UART_LSR_PARITY 0x04 /**< Parity error */ +#define UART_LSR_OVERRUN 0x02 /**< Overrun error */ +#define UART_LSR_RXRDY 0x01 /**< Receiver ready */ +#define UART_FCR_FIFO_ENABLE 1 /**< FIFO control register bit controlling FIFO enable/disable */ + +/* Interrupt Identity Register (IIR) bits */ +#define UART_IIR_FIFO_MASK 0xc0 /**< IIR FIFO disable/enabled mask */ +#define UART_IIR_INT_MASK 0xf /**< IIR interrupt ID source */ +#define UART_IIR_MDM_CHG 0x0 /**< Modem status changed */ +#define UART_IIR_NOINT 0x1 /**< No interrupt pending */ +#define UART_IIR_THRE 0x2 /**< THR empty */ +#define UART_IIR_RCVD_DATA 0x4 /**< Received data available */ +#define UART_IIR_RCVR_STATUS 0x6 /**< Receiver status */ +#define UART_IIR_CHAR_TIME 0xc /**< Character time */ + +/* Interrupt Enable Register (IER) bits */ +#define UART_IER_PTIME 128 /**< Programmable THRE Interrupt Mode Enable */ +#define UART_IER_EDSSI 8 /**< enable modem status interrupt */ +#define UART_IER_ELSI 4 /**< enable receiver line status interrupt */ +#define UART_IER_ETBEI 2 /**< enable transmitter holding register empty interrupt */ +#define UART_IER_ERBFI 1 /**< enable data available interrupt */ + +/* pmustatus */ +#define PST_SLOW_WR_PENDING 0x0400 +#define PST_EXTLPOAVAIL 0x0100 +#define PST_WDRESET 0x0080 +#define PST_INTPEND 0x0040 +#define PST_SBCLKST 0x0030 +#define PST_SBCLKST_ILP 0x0010 +#define PST_SBCLKST_ALP 0x0020 +#define PST_SBCLKST_HT 0x0030 +#define PST_ALPAVAIL 0x0008 +#define PST_HTAVAIL 0x0004 +#define PST_RESINIT 0x0003 + +/* pmucapabilities */ +#define PCAP_REV_MASK 0x000000ff +#define PCAP_RC_MASK 0x00001f00 +#define PCAP_RC_SHIFT 8 +#define PCAP_TC_MASK 0x0001e000 +#define PCAP_TC_SHIFT 13 +#define PCAP_PC_MASK 0x001e0000 +#define PCAP_PC_SHIFT 17 +#define PCAP_VC_MASK 0x01e00000 +#define PCAP_VC_SHIFT 21 +#define PCAP_CC_MASK 0x1e000000 +#define PCAP_CC_SHIFT 25 +#define PCAP5_PC_MASK 0x003e0000 /**< PMU corerev >= 5 */ +#define PCAP5_PC_SHIFT 17 +#define PCAP5_VC_MASK 0x07c00000 +#define PCAP5_VC_SHIFT 22 +#define PCAP5_CC_MASK 0xf8000000 +#define PCAP5_CC_SHIFT 27 + +/* PMU Resource Request Timer registers */ +/* This is based on PmuRev0 */ +#define PRRT_TIME_MASK 0x03ff +#define PRRT_INTEN 0x0400 +#define PRRT_REQ_ACTIVE 0x0800 +#define PRRT_ALP_REQ 0x1000 +#define PRRT_HT_REQ 0x2000 +#define PRRT_HQ_REQ 0x4000 + +/* PMU Int Control register bits */ +#define PMU_INTC_ALP_REQ 0x1 +#define PMU_INTC_HT_REQ 0x2 +#define PMU_INTC_HQ_REQ 0x4 + +/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */ +#define RSRC_INTR_MASK_TIMER_INT_0 1 + +/* PMU resource bit position */ +#define PMURES_BIT(bit) (1 << (bit)) + +/* PMU resource number limit */ +#define PMURES_MAX_RESNUM 30 + +/* PMU chip control0 register */ +#define PMU_CHIPCTL0 0 +#define PMU43143_CC0_SDIO_DRSTR_OVR (1 << 31) /* sdio drive strength override enable */ + +/* clock req types */ +#define PMU_CC1_CLKREQ_TYPE_SHIFT 19 +#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT) + +#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0 +#define CLKREQ_TYPE_CONFIG_PUSHPULL 1 + +/* PMU chip control1 register */ +#define PMU_CHIPCTL1 1 +#define PMU_CC1_RXC_DLL_BYPASS 0x00010000 +#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN 0x00000010 + +#define PMU_CC1_IF_TYPE_MASK 0x00000030 +#define PMU_CC1_IF_TYPE_RMII 0x00000000 +#define PMU_CC1_IF_TYPE_MII 0x00000010 +#define PMU_CC1_IF_TYPE_RGMII 0x00000020 + +#define PMU_CC1_SW_TYPE_MASK 0x000000c0 +#define PMU_CC1_SW_TYPE_EPHY 0x00000000 +#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040 +#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080 +#define PMU_CC1_SW_TYPE_RGMII 0x000000c0 + +#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080 +#define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000 + +/* PMU chip control2 register */ +#define PMU_CHIPCTL2 2 +#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1 << 18) +#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1 << 19) +#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1 << 20) +#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1 << 21) +#define PMU_CC2_MASK_WL_DEV_WAKE (1 << 22) +#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1 << 25) + + +/* PMU chip control3 register */ +#define PMU_CHIPCTL3 3 +#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19 +#define PMU_CC3_ENABLE_RF_SHIFT 22 +#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23 + +/* PMU chip control4 register */ +#define PMU_CHIPCTL4 4 + +/* 53537 series moved switch_type and gmac_if_type to CC4 [15:14] and [13:12] */ +#define PMU_CC4_IF_TYPE_MASK 0x00003000 +#define PMU_CC4_IF_TYPE_RMII 0x00000000 +#define PMU_CC4_IF_TYPE_MII 0x00001000 +#define PMU_CC4_IF_TYPE_RGMII 0x00002000 + +#define PMU_CC4_SW_TYPE_MASK 0x0000c000 +#define PMU_CC4_SW_TYPE_EPHY 0x00000000 +#define PMU_CC4_SW_TYPE_EPHYMII 0x00004000 +#define PMU_CC4_SW_TYPE_EPHYRMII 0x00008000 +#define PMU_CC4_SW_TYPE_RGMII 0x0000c000 + +/* PMU chip control5 register */ +#define PMU_CHIPCTL5 5 + +/* PMU chip control6 register */ +#define PMU_CHIPCTL6 6 +#define PMU_CC6_ENABLE_CLKREQ_WAKEUP (1 << 4) +#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP (1 << 6) + +/* PMU chip control7 register */ +#define PMU_CHIPCTL7 7 +#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN (1 << 25) +#define PMU_CC7_ENABLE_MDIO_RESET_WAR (1 << 27) +/* 53537 series have gmca1 gmac_if_type in cc7 [7:6](defalut 0b01) */ +#define PMU_CC7_IF_TYPE_MASK 0x000000c0 +#define PMU_CC7_IF_TYPE_RMII 0x00000000 +#define PMU_CC7_IF_TYPE_MII 0x00000040 +#define PMU_CC7_IF_TYPE_RGMII 0x00000080 + + +/* PMU corerev and chip specific PLL controls. + * PMU_PLL_XX where is PMU corerev and is an arbitrary number + * to differentiate different PLLs controlled by the same PMU rev. + */ +/* pllcontrol registers */ +/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */ +#define PMU0_PLL0_PLLCTL0 0 +#define PMU0_PLL0_PC0_PDIV_MASK 1 +#define PMU0_PLL0_PC0_PDIV_FREQ 25000 +#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038 +#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3 +#define PMU0_PLL0_PC0_DIV_ARM_BASE 8 + +/* PC0_DIV_ARM for PLLOUT_ARM */ +#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0 +#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1 +#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2 +#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */ +#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4 +#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5 +#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6 +#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7 + +/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */ +#define PMU0_PLL0_PLLCTL1 1 +#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000 +#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28 +#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00 +#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8 +#define PMU0_PLL0_PC1_STOP_MOD 0x00000040 + +/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */ +#define PMU0_PLL0_PLLCTL2 2 +#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf +#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4 + +/* pllcontrol registers */ +/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ +#define PMU1_PLL0_PLLCTL0 0 +#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 +#define PMU1_PLL0_PC0_P1DIV_SHIFT 20 +#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000 +#define PMU1_PLL0_PC0_P2DIV_SHIFT 24 + +/* mdiv */ +#define PMU1_PLL0_PLLCTL1 1 +#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff +#define PMU1_PLL0_PC1_M1DIV_SHIFT 0 +#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC1_M2DIV_SHIFT 8 +#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000 +#define PMU1_PLL0_PC1_M3DIV_SHIFT 16 +#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000 +#define PMU1_PLL0_PC1_M4DIV_SHIFT 24 +#define PMU1_PLL0_PC1_M4DIV_BY_9 9 +#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12 +#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24 +#define PMU1_PLL0_PC1_M4DIV_BY_60 0x3C + +#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8 +#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) +#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) + +/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ +#define PMU1_PLL0_PLLCTL2 2 +#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff +#define PMU1_PLL0_PC2_M5DIV_SHIFT 0 +#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc +#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC2_M6DIV_SHIFT 8 +#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17 +#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1 +#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /**< recommended for 4319 */ +#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 + +/* ndiv_frac */ +#define PMU1_PLL0_PLLCTL3 3 +#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0 + +/* pll_ctrl */ +#define PMU1_PLL0_PLLCTL4 4 + +/* pll_ctrl, vco_rng, clkdrive_ch */ +#define PMU1_PLL0_PLLCTL5 5 +#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 +#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 + +#define PMU1_PLL0_PLLCTL6 6 +#define PMU1_PLL0_PLLCTL7 7 +#define PMU1_PLL0_PLLCTL8 8 + +#define PMU1_PLLCTL8_OPENLOOP_MASK (1 << 1) +#define PMU_PLL4350_OPENLOOP_MASK (1 << 7) + +/* PMU rev 2 control words */ +#define PMU2_PHY_PLL_PLLCTL 4 +#define PMU2_SI_PLL_PLLCTL 10 + +/* PMU rev 2 */ +/* pllcontrol registers */ +/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ +#define PMU2_PLL_PLLCTL0 0 +#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000 +#define PMU2_PLL_PC0_P1DIV_SHIFT 20 +#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000 +#define PMU2_PLL_PC0_P2DIV_SHIFT 24 + +/* mdiv */ +#define PMU2_PLL_PLLCTL1 1 +#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff +#define PMU2_PLL_PC1_M1DIV_SHIFT 0 +#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC1_M2DIV_SHIFT 8 +#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000 +#define PMU2_PLL_PC1_M3DIV_SHIFT 16 +#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000 +#define PMU2_PLL_PC1_M4DIV_SHIFT 24 + +/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ +#define PMU2_PLL_PLLCTL2 2 +#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff +#define PMU2_PLL_PC2_M5DIV_SHIFT 0 +#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC2_M6DIV_SHIFT 8 +#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17 +#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20 + +/* ndiv_frac */ +#define PMU2_PLL_PLLCTL3 3 +#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0 + +/* pll_ctrl */ +#define PMU2_PLL_PLLCTL4 4 + +/* pll_ctrl, vco_rng, clkdrive_ch */ +#define PMU2_PLL_PLLCTL5 5 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28 + +/* PMU rev 5 (& 6) */ +#define PMU5_PLL_P1P2_OFF 0 +#define PMU5_PLL_P1_MASK 0x0f000000 +#define PMU5_PLL_P1_SHIFT 24 +#define PMU5_PLL_P2_MASK 0x00f00000 +#define PMU5_PLL_P2_SHIFT 20 +#define PMU5_PLL_M14_OFF 1 +#define PMU5_PLL_MDIV_MASK 0x000000ff +#define PMU5_PLL_MDIV_WIDTH 8 +#define PMU5_PLL_NM5_OFF 2 +#define PMU5_PLL_NDIV_MASK 0xfff00000 +#define PMU5_PLL_NDIV_SHIFT 20 +#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000 +#define PMU5_PLL_NDIV_MODE_SHIFT 17 +#define PMU5_PLL_FMAB_OFF 3 +#define PMU5_PLL_MRAT_MASK 0xf0000000 +#define PMU5_PLL_MRAT_SHIFT 28 +#define PMU5_PLL_ABRAT_MASK 0x08000000 +#define PMU5_PLL_ABRAT_SHIFT 27 +#define PMU5_PLL_FDIV_MASK 0x07ffffff +#define PMU5_PLL_PLLCTL_OFF 4 +#define PMU5_PLL_PCHI_OFF 5 +#define PMU5_PLL_PCHI_MASK 0x0000003f + +/* pmu XtalFreqRatio */ +#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF +#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000 +#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31 + +/* Divider allocation in 4716/47162/5356/5357 */ +#define PMU5_MAINPLL_CPU 1 +#define PMU5_MAINPLL_MEM 2 +#define PMU5_MAINPLL_SI 3 + +/* 4706 PMU */ +#define PMU4706_MAINPLL_PLL0 0 +#define PMU6_4706_PROCPLL_OFF 4 /**< The CPU PLL */ +#define PMU6_4706_PROC_P2DIV_MASK 0x000f0000 +#define PMU6_4706_PROC_P2DIV_SHIFT 16 +#define PMU6_4706_PROC_P1DIV_MASK 0x0000f000 +#define PMU6_4706_PROC_P1DIV_SHIFT 12 +#define PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8 +#define PMU6_4706_PROC_NDIV_INT_SHIFT 3 +#define PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007 +#define PMU6_4706_PROC_NDIV_MODE_SHIFT 0 + +#define PMU7_PLL_PLLCTL7 7 +#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000 +#define PMU7_PLL_CTL7_M4DIV_SHIFT 24 +#define PMU7_PLL_CTL7_M4DIV_BY_6 6 +#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc +#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL8 8 +#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff +#define PMU7_PLL_CTL8_M5DIV_SHIFT 0 +#define PMU7_PLL_CTL8_M5DIV_BY_8 8 +#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18 +#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00 +#define PMU7_PLL_CTL8_M6DIV_SHIFT 8 +#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL11 11 +#define PMU7_PLL_PLLCTL11_MASK 0xffffff00 +#define PMU7_PLL_PLLCTL11_VAL 0x22222200 + +/* PMU rev 15 */ +#define PMU15_PLL_PLLCTL0 0 +#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 +#define PMU15_PLL_PC0_CLKSEL_SHIFT 0 +#define PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC +#define PMU15_PLL_PC0_FREQTGT_SHIFT 2 +#define PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 +#define PMU15_PLL_PC0_PRESCALE_SHIFT 22 +#define PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 +#define PMU15_PLL_PC0_KPCTRL_SHIFT 24 +#define PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 +#define PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 +#define PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 +#define PMU15_PLL_PC0_FDCMODE_SHIFT 30 +#define PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 +#define PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 + +#define PMU15_PLL_PLLCTL1 1 +#define PMU15_PLL_PC1_BIAS_CTLM_MASK 0x00000060 +#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT 5 +#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK 0x00000040 +#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT 6 +#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK 0x0001FF80 +#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT 7 +#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK 0x03FE0000 +#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT 17 +#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK 0x0C000000 +#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT 26 +#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK 0x10000000 +#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT 28 +#define PMU15_PLL_PC1_OPENLP_EN_MASK 0x40000000 +#define PMU15_PLL_PC1_OPENLP_EN_SHIFT 30 + +#define PMU15_PLL_PLLCTL2 2 +#define PMU15_PLL_PC2_CTEN_MASK 0x00000001 +#define PMU15_PLL_PC2_CTEN_SHIFT 0 + +#define PMU15_PLL_PLLCTL3 3 +#define PMU15_PLL_PC3_DITHER_EN_MASK 0x00000001 +#define PMU15_PLL_PC3_DITHER_EN_SHIFT 0 +#define PMU15_PLL_PC3_DCOCTLSP_MASK 0xFE000000 +#define PMU15_PLL_PC3_DCOCTLSP_SHIFT 25 +#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK 0x01 +#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT 0 +#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK 0x02 +#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT 1 +#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK 0x04 +#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT 2 +#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK 0x18 +#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT 3 +#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK 0x60 +#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT 5 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1 0 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2 1 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3 2 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5 3 + +#define PMU15_PLL_PLLCTL4 4 +#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK 0x00000007 +#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT 0 +#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK 0x00000038 +#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT 3 +#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK 0x000001C0 +#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT 6 +#define PMU15_PLL_PC4_DBGMODE_MASK 0x00000E00 +#define PMU15_PLL_PC4_DBGMODE_SHIFT 9 +#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK 0x00001000 +#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT 12 +#define PMU15_PLL_PC4_FLL480_CTLSP_MASK 0x000FE000 +#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT 13 +#define PMU15_PLL_PC4_DINPOL_MASK 0x00100000 +#define PMU15_PLL_PC4_DINPOL_SHIFT 20 +#define PMU15_PLL_PC4_CLKOUT_PD_MASK 0x00200000 +#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT 21 +#define PMU15_PLL_PC4_CLKDIV2_PD_MASK 0x00400000 +#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT 22 +#define PMU15_PLL_PC4_CLKDIV4_PD_MASK 0x00800000 +#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT 23 +#define PMU15_PLL_PC4_CLKDIV8_PD_MASK 0x01000000 +#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT 24 +#define PMU15_PLL_PC4_CLKDIV16_PD_MASK 0x02000000 +#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT 25 +#define PMU15_PLL_PC4_TEST_EN_MASK 0x04000000 +#define PMU15_PLL_PC4_TEST_EN_SHIFT 26 + +#define PMU15_PLL_PLLCTL5 5 +#define PMU15_PLL_PC5_FREQTGT_MASK 0x000FFFFF +#define PMU15_PLL_PC5_FREQTGT_SHIFT 0 +#define PMU15_PLL_PC5_DCOCTLSP_MASK 0x07F00000 +#define PMU15_PLL_PC5_DCOCTLSP_SHIFT 20 +#define PMU15_PLL_PC5_PRESCALE_MASK 0x18000000 +#define PMU15_PLL_PC5_PRESCALE_SHIFT 27 + +#define PMU15_PLL_PLLCTL6 6 +#define PMU15_PLL_PC6_FREQTGT_MASK 0x000FFFFF +#define PMU15_PLL_PC6_FREQTGT_SHIFT 0 +#define PMU15_PLL_PC6_DCOCTLSP_MASK 0x07F00000 +#define PMU15_PLL_PC6_DCOCTLSP_SHIFT 20 +#define PMU15_PLL_PC6_PRESCALE_MASK 0x18000000 +#define PMU15_PLL_PC6_PRESCALE_SHIFT 27 + +#define PMU15_FREQTGT_480_DEFAULT 0x19AB1 +#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5 +#define PMU15_ARM_96MHZ 96000000 /**< 96 Mhz */ +#define PMU15_ARM_98MHZ 98400000 /**< 98.4 Mhz */ +#define PMU15_ARM_97MHZ 97000000 /**< 97 Mhz */ + + +#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070 +#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4 + +#define PMU17_PLLCTL2_NDIV_MODE_INT 0 +#define PMU17_PLLCTL2_NDIV_MODE_INT1B8 1 +#define PMU17_PLLCTL2_NDIV_MODE_MASH111 2 +#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8 3 + +#define PMU17_PLLCTL0_BBPLL_PWRDWN 0 +#define PMU17_PLLCTL0_BBPLL_DRST 3 +#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8 + +/* PLL usage in 4716/47162 */ +#define PMU4716_MAINPLL_PLL0 12 + +/* PLL usage in 4335 */ +#define PMU4335_PLL0_PC2_P1DIV_MASK 0x000f0000 +#define PMU4335_PLL0_PC2_P1DIV_SHIFT 16 +#define PMU4335_PLL0_PC2_NDIV_INT_MASK 0xff800000 +#define PMU4335_PLL0_PC2_NDIV_INT_SHIFT 23 +#define PMU4335_PLL0_PC1_MDIV2_MASK 0x0000ff00 +#define PMU4335_PLL0_PC1_MDIV2_SHIFT 8 + + +/* PLL usage in 5356/5357 */ +#define PMU5356_MAINPLL_PLL0 0 +#define PMU5357_MAINPLL_PLL0 0 + +/* 4716/47162 resources */ +#define RES4716_PROC_PLL_ON 0x00000040 +#define RES4716_PROC_HT_AVAIL 0x00000080 + +/* 4716/4717/4718 Chip specific ChipControl register bits */ +#define CCTRL_471X_I2S_PINS_ENABLE 0x0080 /* I2S pins off by default, shared w/ pflash */ + +/* 5357 Chip specific ChipControl register bits */ +/* 2nd - 32-bit reg */ +#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000 /* I2S pins enable */ +#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000 /* I2C/SPI pins enable */ + +/* 5354 resources */ +#define RES5354_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES5354_BB_SWITCHER_PWM 1 /**< 0x00002 */ +#define RES5354_BB_SWITCHER_BURST 2 /**< 0x00004 */ +#define RES5354_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */ +#define RES5354_ILP_REQUEST 4 /**< 0x00010 */ +#define RES5354_RADIO_SWITCHER_PWM 5 /**< 0x00020 */ +#define RES5354_RADIO_SWITCHER_BURST 6 /**< 0x00040 */ +#define RES5354_ROM_SWITCH 7 /**< 0x00080 */ +#define RES5354_PA_REF_LDO 8 /**< 0x00100 */ +#define RES5354_RADIO_LDO 9 /**< 0x00200 */ +#define RES5354_AFE_LDO 10 /**< 0x00400 */ +#define RES5354_PLL_LDO 11 /**< 0x00800 */ +#define RES5354_BG_FILTBYP 12 /**< 0x01000 */ +#define RES5354_TX_FILTBYP 13 /**< 0x02000 */ +#define RES5354_RX_FILTBYP 14 /**< 0x04000 */ +#define RES5354_XTAL_PU 15 /**< 0x08000 */ +#define RES5354_XTAL_EN 16 /**< 0x10000 */ +#define RES5354_BB_PLL_FILTBYP 17 /**< 0x20000 */ +#define RES5354_RF_PLL_FILTBYP 18 /**< 0x40000 */ +#define RES5354_BB_PLL_PU 19 /**< 0x80000 */ + +/* 5357 Chip specific ChipControl register bits */ +#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */ +#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */ +#define CCTRL5357_NFLASH (1<<16) /* Nandflash in ChipControl 1, bit 16 */ + +/* 43217 Chip specific ChipControl register bits */ +#define CCTRL43217_EXTPA_C0 (1<<13) /* core0 extPA in ChipControl 1, bit 13 */ +#define CCTRL43217_EXTPA_C1 (1<<8) /* core1 extPA in ChipControl 1, bit 8 */ + +/* 43228 Chip specific ChipControl register bits */ +#define CCTRL43228_EXTPA_C0 (1<<14) /* core1 extPA in ChipControl 1, bit 14 */ +#define CCTRL43228_EXTPA_C1 (1<<9) /* core0 extPA in ChipControl 1, bit 1 */ + +/* 4328 resources */ +#define RES4328_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES4328_BB_SWITCHER_PWM 1 /**< 0x00002 */ +#define RES4328_BB_SWITCHER_BURST 2 /**< 0x00004 */ +#define RES4328_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */ +#define RES4328_ILP_REQUEST 4 /**< 0x00010 */ +#define RES4328_RADIO_SWITCHER_PWM 5 /**< 0x00020 */ +#define RES4328_RADIO_SWITCHER_BURST 6 /**< 0x00040 */ +#define RES4328_ROM_SWITCH 7 /**< 0x00080 */ +#define RES4328_PA_REF_LDO 8 /**< 0x00100 */ +#define RES4328_RADIO_LDO 9 /**< 0x00200 */ +#define RES4328_AFE_LDO 10 /**< 0x00400 */ +#define RES4328_PLL_LDO 11 /**< 0x00800 */ +#define RES4328_BG_FILTBYP 12 /**< 0x01000 */ +#define RES4328_TX_FILTBYP 13 /**< 0x02000 */ +#define RES4328_RX_FILTBYP 14 /**< 0x04000 */ +#define RES4328_XTAL_PU 15 /**< 0x08000 */ +#define RES4328_XTAL_EN 16 /**< 0x10000 */ +#define RES4328_BB_PLL_FILTBYP 17 /**< 0x20000 */ +#define RES4328_RF_PLL_FILTBYP 18 /**< 0x40000 */ +#define RES4328_BB_PLL_PU 19 /**< 0x80000 */ + +/* 4325 A0/A1 resources */ +#define RES4325_BUCK_BOOST_BURST 0 /**< 0x00000001 */ +#define RES4325_CBUCK_BURST 1 /**< 0x00000002 */ +#define RES4325_CBUCK_PWM 2 /**< 0x00000004 */ +#define RES4325_CLDO_CBUCK_BURST 3 /**< 0x00000008 */ +#define RES4325_CLDO_CBUCK_PWM 4 /**< 0x00000010 */ +#define RES4325_BUCK_BOOST_PWM 5 /**< 0x00000020 */ +#define RES4325_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4325_ABUCK_BURST 7 /**< 0x00000080 */ +#define RES4325_ABUCK_PWM 8 /**< 0x00000100 */ +#define RES4325_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4325_OTP_PU 10 /**< 0x00000400 */ +#define RES4325_LNLDO3_PU 11 /**< 0x00000800 */ +#define RES4325_LNLDO4_PU 12 /**< 0x00001000 */ +#define RES4325_XTAL_PU 13 /**< 0x00002000 */ +#define RES4325_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4325_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4325_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4325_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4325_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4325_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4325_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4325_HT_AVAIL 21 /**< 0x00200000 */ + +/* 4325 B0/C0 resources */ +#define RES4325B0_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4325B0_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4325B0_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4325B0_CLDO_PU 4 /**< 0x00000010 */ + +/* 4325 C1 resources */ +#define RES4325C1_LNLDO2_PU 12 /**< 0x00001000 */ + +/* 4325 chip-specific ChipStatus register bits */ +#define CST4325_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4325_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4325_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4325_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4325_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */ +#define CST4325_SDIO_USB_MODE_MASK 0x00000004 +#define CST4325_SDIO_USB_MODE_SHIFT 2 +#define CST4325_RCAL_VALID_MASK 0x00000008 +#define CST4325_RCAL_VALID_SHIFT 3 +#define CST4325_RCAL_VALUE_MASK 0x000001f0 +#define CST4325_RCAL_VALUE_SHIFT 4 +#define CST4325_PMUTOP_2B_MASK 0x00000200 /**< 1 for 2b, 0 for to 2a */ +#define CST4325_PMUTOP_2B_SHIFT 9 + +#define RES4329_RESERVED0 0 /**< 0x00000001 */ +#define RES4329_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4329_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4329_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4329_CLDO_PU 4 /**< 0x00000010 */ +#define RES4329_PALDO_PU 5 /**< 0x00000020 */ +#define RES4329_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4329_RESERVED7 7 /**< 0x00000080 */ +#define RES4329_RESERVED8 8 /**< 0x00000100 */ +#define RES4329_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4329_OTP_PU 10 /**< 0x00000400 */ +#define RES4329_RESERVED11 11 /**< 0x00000800 */ +#define RES4329_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4329_XTAL_PU 13 /**< 0x00002000 */ +#define RES4329_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4329_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4329_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4329_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4329_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4329_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4329_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4329_HT_AVAIL 21 /**< 0x00200000 */ + +#define CST4329_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4329_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4329_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4329_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4329_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */ +#define CST4329_SPI_SDIO_MODE_MASK 0x00000004 +#define CST4329_SPI_SDIO_MODE_SHIFT 2 + +/* 4312 chip-specific ChipStatus register bits */ +#define CST4312_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4312_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4312_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4312_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4312_OTP_BAD 3 /**< OTP is broken, SPROM is present */ + +/* 4312 resources (all PMU chips with little memory constraint) */ +#define RES4312_SWITCHER_BURST 0 /**< 0x00000001 */ +#define RES4312_SWITCHER_PWM 1 /**< 0x00000002 */ +#define RES4312_PA_REF_LDO 2 /**< 0x00000004 */ +#define RES4312_CORE_LDO_BURST 3 /**< 0x00000008 */ +#define RES4312_CORE_LDO_PWM 4 /**< 0x00000010 */ +#define RES4312_RADIO_LDO 5 /**< 0x00000020 */ +#define RES4312_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4312_BG_FILTBYP 7 /**< 0x00000080 */ +#define RES4312_TX_FILTBYP 8 /**< 0x00000100 */ +#define RES4312_RX_FILTBYP 9 /**< 0x00000200 */ +#define RES4312_XTAL_PU 10 /**< 0x00000400 */ +#define RES4312_ALP_AVAIL 11 /**< 0x00000800 */ +#define RES4312_BB_PLL_FILTBYP 12 /**< 0x00001000 */ +#define RES4312_RF_PLL_FILTBYP 13 /**< 0x00002000 */ +#define RES4312_HT_AVAIL 14 /**< 0x00004000 */ + +/* 4322 resources */ +#define RES4322_RF_LDO 0 +#define RES4322_ILP_REQUEST 1 +#define RES4322_XTAL_PU 2 +#define RES4322_ALP_AVAIL 3 +#define RES4322_SI_PLL_ON 4 +#define RES4322_HT_SI_AVAIL 5 +#define RES4322_PHY_PLL_ON 6 +#define RES4322_HT_PHY_AVAIL 7 +#define RES4322_OTP_PU 8 + +/* 4322 chip-specific ChipStatus register bits */ +#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020 +#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0 +#define CST4322_SPROM_OTP_SEL_SHIFT 6 +#define CST4322_NO_SPROM_OTP 0 /**< no OTP, no SPROM */ +#define CST4322_SPROM_PRESENT 1 /**< SPROM is present */ +#define CST4322_OTP_PRESENT 2 /**< OTP is present */ +#define CST4322_PCI_OR_USB 0x00000100 +#define CST4322_BOOT_MASK 0x00000600 +#define CST4322_BOOT_SHIFT 9 +#define CST4322_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST4322_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST4322_BOOT_FROM_FLASH 2 /**< boot from FLASH */ +#define CST4322_BOOT_FROM_INVALID 3 +#define CST4322_ILP_DIV_EN 0x00000800 +#define CST4322_FLASH_TYPE_MASK 0x00001000 +#define CST4322_FLASH_TYPE_SHIFT 12 +#define CST4322_FLASH_TYPE_SHIFT_ST 0 /**< ST serial FLASH */ +#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /**< ATMEL flash */ +#define CST4322_ARM_TAP_SEL 0x00002000 +#define CST4322_RES_INIT_MODE_MASK 0x0000c000 +#define CST4322_RES_INIT_MODE_SHIFT 14 +#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /**< resinitmode: ILP available */ +#define CST4322_RES_INIT_MODE_ILPREQ 1 /**< resinitmode: ILP request */ +#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /**< resinitmode: ALP available */ +#define CST4322_RES_INIT_MODE_HTAVAIL 3 /**< resinitmode: HT available */ +#define CST4322_PCIPLLCLK_GATING 0x00010000 +#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000 +#define CST4322_PCI_CARDBUS_MODE 0x00040000 + +/* 43224 chip-specific ChipControl register bits */ +#define CCTRL43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */ +#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */ +#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */ + +/* 43236 resources */ +#define RES43236_REGULATOR 0 +#define RES43236_ILP_REQUEST 1 +#define RES43236_XTAL_PU 2 +#define RES43236_ALP_AVAIL 3 +#define RES43236_SI_PLL_ON 4 +#define RES43236_HT_SI_AVAIL 5 + +/* 43236 chip-specific ChipControl register bits */ +#define CCTRL43236_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL43236_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL43236_EXT_LNA (1<<2) /**< 0 disable */ +#define CCTRL43236_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */ +#define CCTRL43236_GSIO (1<<4) /**< 0 disable */ + +/* 43236 Chip specific ChipStatus register bits */ +#define CST43236_SFLASH_MASK 0x00000040 +#define CST43236_OTP_SEL_MASK 0x00000080 +#define CST43236_OTP_SEL_SHIFT 7 +#define CST43236_HSIC_MASK 0x00000100 /**< USB/HSIC */ +#define CST43236_BP_CLK 0x00000200 /**< 120/96Mbps */ +#define CST43236_BOOT_MASK 0x00001800 +#define CST43236_BOOT_SHIFT 11 +#define CST43236_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST43236_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST43236_BOOT_FROM_FLASH 2 /**< boot from FLASH */ +#define CST43236_BOOT_FROM_INVALID 3 + +/* 43237 resources */ +#define RES43237_REGULATOR 0 +#define RES43237_ILP_REQUEST 1 +#define RES43237_XTAL_PU 2 +#define RES43237_ALP_AVAIL 3 +#define RES43237_SI_PLL_ON 4 +#define RES43237_HT_SI_AVAIL 5 + +/* 43237 chip-specific ChipControl register bits */ +#define CCTRL43237_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL43237_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL43237_EXT_LNA (1<<2) /**< 0 disable */ +#define CCTRL43237_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */ +#define CCTRL43237_GSIO (1<<4) /**< 0 disable */ + +/* 43237 Chip specific ChipStatus register bits */ +#define CST43237_SFLASH_MASK 0x00000040 +#define CST43237_OTP_SEL_MASK 0x00000080 +#define CST43237_OTP_SEL_SHIFT 7 +#define CST43237_HSIC_MASK 0x00000100 /**< USB/HSIC */ +#define CST43237_BP_CLK 0x00000200 /**< 120/96Mbps */ +#define CST43237_BOOT_MASK 0x00001800 +#define CST43237_BOOT_SHIFT 11 +#define CST43237_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST43237_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST43237_BOOT_FROM_FLASH 2 /**< boot from FLASH */ +#define CST43237_BOOT_FROM_INVALID 3 + +/* 43239 resources */ +#define RES43239_OTP_PU 9 +#define RES43239_MACPHY_CLKAVAIL 23 +#define RES43239_HT_AVAIL 24 + +/* 43239 Chip specific ChipStatus register bits */ +#define CST43239_SPROM_MASK 0x00000002 +#define CST43239_SFLASH_MASK 0x00000004 +#define CST43239_RES_INIT_MODE_SHIFT 7 +#define CST43239_RES_INIT_MODE_MASK 0x000001f0 +#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /**< SDIO || gSPI */ +#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /**< USB || USBDA */ +#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /**< SDIO */ +#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /**< gSPI */ + +/* 4324 resources */ +/* 43242 use same PMU as 4324 */ +#define RES4324_LPLDO_PU 0 +#define RES4324_RESET_PULLDN_DIS 1 +#define RES4324_PMU_BG_PU 2 +#define RES4324_HSIC_LDO_PU 3 +#define RES4324_CBUCK_LPOM_PU 4 +#define RES4324_CBUCK_PFM_PU 5 +#define RES4324_CLDO_PU 6 +#define RES4324_LPLDO2_LVM 7 +#define RES4324_LNLDO1_PU 8 +#define RES4324_LNLDO2_PU 9 +#define RES4324_LDO3P3_PU 10 +#define RES4324_OTP_PU 11 +#define RES4324_XTAL_PU 12 +#define RES4324_BBPLL_PU 13 +#define RES4324_LQ_AVAIL 14 +#define RES4324_WL_CORE_READY 17 +#define RES4324_ILP_REQ 18 +#define RES4324_ALP_AVAIL 19 +#define RES4324_PALDO_PU 20 +#define RES4324_RADIO_PU 21 +#define RES4324_SR_CLK_STABLE 22 +#define RES4324_SR_SAVE_RESTORE 23 +#define RES4324_SR_PHY_PWRSW 24 +#define RES4324_SR_PHY_PIC 25 +#define RES4324_SR_SUBCORE_PWRSW 26 +#define RES4324_SR_SUBCORE_PIC 27 +#define RES4324_SR_MEM_PM0 28 +#define RES4324_HT_AVAIL 29 +#define RES4324_MACPHY_CLKAVAIL 30 + +/* 4324 Chip specific ChipStatus register bits */ +#define CST4324_SPROM_MASK 0x00000080 +#define CST4324_SFLASH_MASK 0x00400000 +#define CST4324_RES_INIT_MODE_SHIFT 10 +#define CST4324_RES_INIT_MODE_MASK 0x00000c00 +#define CST4324_CHIPMODE_MASK 0x7 +#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /**< SDIO || gSPI */ +#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /**< USB || USBDA */ + +/* 43242 Chip specific ChipStatus register bits */ +#define CST43242_SFLASH_MASK 0x00000008 +#define CST43242_SR_HALT (1<<25) +#define CST43242_SR_CHIP_STATUS_2 27 /* bit 27 */ + +/* 4331 resources */ +#define RES4331_REGULATOR 0 +#define RES4331_ILP_REQUEST 1 +#define RES4331_XTAL_PU 2 +#define RES4331_ALP_AVAIL 3 +#define RES4331_SI_PLL_ON 4 +#define RES4331_HT_SI_AVAIL 5 + +/* 4331 chip-specific ChipControl register bits */ +#define CCTRL4331_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL4331_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL4331_EXT_LNA_G (1<<2) /**< 0 disable */ +#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /**< sprom/gpio13-15 mux */ +#define CCTRL4331_EXTPA_EN (1<<4) /**< 0 ext pa disable, 1 ext pa enabled */ +#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /**< set drive out GPIO_CLK on sprom_cs pin */ +#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /**< use sprom_cs pin as PCIE mdio interface */ +#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */ +#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /**< override core control on pipe_AuxClkEnable */ +#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /**< override core control on pipe_AuxPowerDown */ +#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /**< pcie_auxclkenable */ +#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /**< pcie_pipe_pllpowerdown */ +#define CCTRL4331_EXTPA_EN2 (1<<12) /**< 0 ext pa disable, 1 ext pa enabled */ +#define CCTRL4331_EXT_LNA_A (1<<13) /**< 0 disable */ +#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /**< enable bt_shd0 at gpio4 */ +#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /**< enable bt_shd1 at gpio5 */ +#define CCTRL4331_EXTPA_ANA_EN (1<<24) /**< 0 ext pa disable, 1 ext pa enabled */ + +/* 4331 Chip specific ChipStatus register bits */ +#define CST4331_XTAL_FREQ 0x00000001 /**< crystal frequency 20/40Mhz */ +#define CST4331_SPROM_OTP_SEL_MASK 0x00000006 +#define CST4331_SPROM_OTP_SEL_SHIFT 1 +#define CST4331_SPROM_PRESENT 0x00000002 +#define CST4331_OTP_PRESENT 0x00000004 +#define CST4331_LDO_RF 0x00000008 +#define CST4331_LDO_PAR 0x00000010 + +/* 4315 resource */ +#define RES4315_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4315_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4315_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4315_CLDO_PU 4 /**< 0x00000010 */ +#define RES4315_PALDO_PU 5 /**< 0x00000020 */ +#define RES4315_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4315_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4315_OTP_PU 10 /**< 0x00000400 */ +#define RES4315_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4315_XTAL_PU 13 /**< 0x00002000 */ +#define RES4315_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4315_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4315_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4315_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4315_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4315_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4315_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4315_HT_AVAIL 21 /**< 0x00200000 */ + +/* 4315 chip-specific ChipStatus register bits */ +#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /**< gpio [7:6], SDIO CIS selection */ +#define CST4315_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */ +#define CST4315_SPROM_SEL 0x00000001 /**< use SPROM, OTP is powered up */ +#define CST4315_OTP_SEL 0x00000002 /**< use OTP, OTP is powered up */ +#define CST4315_OTP_PWRDN 0x00000003 /**< use SPROM, OTP is powered down */ +#define CST4315_SDIO_MODE 0x00000004 /**< gpio [8], sdio/usb mode */ +#define CST4315_RCAL_VALID 0x00000008 +#define CST4315_RCAL_VALUE_MASK 0x000001f0 +#define CST4315_RCAL_VALUE_SHIFT 4 +#define CST4315_PALDO_EXTPNP 0x00000200 /**< PALDO is configured with external PNP */ +#define CST4315_CBUCK_MODE_MASK 0x00000c00 +#define CST4315_CBUCK_MODE_BURST 0x00000400 +#define CST4315_CBUCK_MODE_LPBURST 0x00000c00 + +/* 4319 resources */ +#define RES4319_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4319_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4319_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4319_CLDO_PU 4 /**< 0x00000010 */ +#define RES4319_PALDO_PU 5 /**< 0x00000020 */ +#define RES4319_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4319_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4319_OTP_PU 10 /**< 0x00000400 */ +#define RES4319_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4319_XTAL_PU 13 /**< 0x00002000 */ +#define RES4319_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4319_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4319_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4319_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4319_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4319_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4319_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4319_HT_AVAIL 21 /**< 0x00200000 */ + +/* 4319 chip-specific ChipStatus register bits */ +#define CST4319_SPI_CPULESSUSB 0x00000001 +#define CST4319_SPI_CLK_POL 0x00000002 +#define CST4319_SPI_CLK_PH 0x00000008 +#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /**< gpio [7:6], SDIO CIS selection */ +#define CST4319_SPROM_OTP_SEL_SHIFT 6 +#define CST4319_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */ +#define CST4319_SPROM_SEL 0x00000040 /**< use SPROM, OTP is powered up */ +#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */ +#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */ +#define CST4319_SDIO_USB_MODE 0x00000100 /**< gpio [8], sdio/usb mode */ +#define CST4319_REMAP_SEL_MASK 0x00000600 +#define CST4319_ILPDIV_EN 0x00000800 +#define CST4319_XTAL_PD_POL 0x00001000 +#define CST4319_LPO_SEL 0x00002000 +#define CST4319_RES_INIT_MODE 0x0000c000 +#define CST4319_PALDO_EXTPNP 0x00010000 /**< PALDO is configured with external PNP */ +#define CST4319_CBUCK_MODE_MASK 0x00060000 +#define CST4319_CBUCK_MODE_BURST 0x00020000 +#define CST4319_CBUCK_MODE_LPBURST 0x00060000 +#define CST4319_RCAL_VALID 0x01000000 +#define CST4319_RCAL_VALUE_MASK 0x3e000000 +#define CST4319_RCAL_VALUE_SHIFT 25 + +#define PMU1_PLL0_CHIPCTL0 0 +#define PMU1_PLL0_CHIPCTL1 1 +#define PMU1_PLL0_CHIPCTL2 2 +#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000 +#define CCTL_4319USB_XTAL_SEL_SHIFT 19 +#define CCTL_4319USB_48MHZ_PLL_SEL 1 +#define CCTL_4319USB_24MHZ_PLL_SEL 2 + +/* PMU resources for 4336 */ +#define RES4336_CBUCK_LPOM 0 +#define RES4336_CBUCK_BURST 1 +#define RES4336_CBUCK_LP_PWM 2 +#define RES4336_CBUCK_PWM 3 +#define RES4336_CLDO_PU 4 +#define RES4336_DIS_INT_RESET_PD 5 +#define RES4336_ILP_REQUEST 6 +#define RES4336_LNLDO_PU 7 +#define RES4336_LDO3P3_PU 8 +#define RES4336_OTP_PU 9 +#define RES4336_XTAL_PU 10 +#define RES4336_ALP_AVAIL 11 +#define RES4336_RADIO_PU 12 +#define RES4336_BG_PU 13 +#define RES4336_VREG1p4_PU_PU 14 +#define RES4336_AFE_PWRSW_PU 15 +#define RES4336_RX_PWRSW_PU 16 +#define RES4336_TX_PWRSW_PU 17 +#define RES4336_BB_PWRSW_PU 18 +#define RES4336_SYNTH_PWRSW_PU 19 +#define RES4336_MISC_PWRSW_PU 20 +#define RES4336_LOGEN_PWRSW_PU 21 +#define RES4336_BBPLL_PWRSW_PU 22 +#define RES4336_MACPHY_CLKAVAIL 23 +#define RES4336_HT_AVAIL 24 +#define RES4336_RSVD 25 + +/* 4336 chip-specific ChipStatus register bits */ +#define CST4336_SPI_MODE_MASK 0x00000001 +#define CST4336_SPROM_PRESENT 0x00000002 +#define CST4336_OTP_PRESENT 0x00000004 +#define CST4336_ARMREMAP_0 0x00000008 +#define CST4336_ILPDIV_EN_MASK 0x00000010 +#define CST4336_ILPDIV_EN_SHIFT 4 +#define CST4336_XTAL_PD_POL_MASK 0x00000020 +#define CST4336_XTAL_PD_POL_SHIFT 5 +#define CST4336_LPO_SEL_MASK 0x00000040 +#define CST4336_LPO_SEL_SHIFT 6 +#define CST4336_RES_INIT_MODE_MASK 0x00000180 +#define CST4336_RES_INIT_MODE_SHIFT 7 +#define CST4336_CBUCK_MODE_MASK 0x00000600 +#define CST4336_CBUCK_MODE_SHIFT 9 + +/* 4336 Chip specific PMU ChipControl register bits */ +#define PCTL_4336_SERIAL_ENAB (1 << 24) + +/* 4330 resources */ +#define RES4330_CBUCK_LPOM 0 +#define RES4330_CBUCK_BURST 1 +#define RES4330_CBUCK_LP_PWM 2 +#define RES4330_CBUCK_PWM 3 +#define RES4330_CLDO_PU 4 +#define RES4330_DIS_INT_RESET_PD 5 +#define RES4330_ILP_REQUEST 6 +#define RES4330_LNLDO_PU 7 +#define RES4330_LDO3P3_PU 8 +#define RES4330_OTP_PU 9 +#define RES4330_XTAL_PU 10 +#define RES4330_ALP_AVAIL 11 +#define RES4330_RADIO_PU 12 +#define RES4330_BG_PU 13 +#define RES4330_VREG1p4_PU_PU 14 +#define RES4330_AFE_PWRSW_PU 15 +#define RES4330_RX_PWRSW_PU 16 +#define RES4330_TX_PWRSW_PU 17 +#define RES4330_BB_PWRSW_PU 18 +#define RES4330_SYNTH_PWRSW_PU 19 +#define RES4330_MISC_PWRSW_PU 20 +#define RES4330_LOGEN_PWRSW_PU 21 +#define RES4330_BBPLL_PWRSW_PU 22 +#define RES4330_MACPHY_CLKAVAIL 23 +#define RES4330_HT_AVAIL 24 +#define RES4330_5gRX_PWRSW_PU 25 +#define RES4330_5gTX_PWRSW_PU 26 +#define RES4330_5g_LOGEN_PWRSW_PU 27 + +/* 4330 chip-specific ChipStatus register bits */ +#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /**< SDIO || gSPI */ +#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /**< USB || USBDA */ +#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /**< SDIO */ +#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /**< gSPI */ +#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /**< USB packet-oriented */ +#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /**< USB Direct Access */ +#define CST4330_OTP_PRESENT 0x00000010 +#define CST4330_LPO_AUTODET_EN 0x00000020 +#define CST4330_ARMREMAP_0 0x00000040 +#define CST4330_SPROM_PRESENT 0x00000080 /**< takes priority over OTP if both set */ +#define CST4330_ILPDIV_EN 0x00000100 +#define CST4330_LPO_SEL 0x00000200 +#define CST4330_RES_INIT_MODE_SHIFT 10 +#define CST4330_RES_INIT_MODE_MASK 0x00000c00 +#define CST4330_CBUCK_MODE_SHIFT 12 +#define CST4330_CBUCK_MODE_MASK 0x00003000 +#define CST4330_CBUCK_POWER_OK 0x00004000 +#define CST4330_BB_PLL_LOCKED 0x00008000 +#define SOCDEVRAM_BP_ADDR 0x1E000000 +#define SOCDEVRAM_ARM_ADDR 0x00800000 + +/* 4330 Chip specific PMU ChipControl register bits */ +#define PCTL_4330_SERIAL_ENAB (1 << 24) + +/* 4330 Chip specific ChipControl register bits */ +#define CCTRL_4330_GPIO_SEL 0x00000001 /* 1=select GPIOs to be muxed out */ +#define CCTRL_4330_ERCX_SEL 0x00000002 /* 1=select ERCX BT coex to be muxed out */ +#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004 /* SDIO: 1=configure GPIO0 for host wake */ +#define CCTRL_4330_JTAG_DISABLE 0x00000008 /* 1=disable JTAG interface on mux'd pins */ + +#define PMU_VREG0_ADDR 0 +#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2 +#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3 + +#define PMU_VREG4_ADDR 4 + +#define PMU_VREG4_CLDO_PWM_SHIFT 4 +#define PMU_VREG4_CLDO_PWM_MASK 0x7 + +#define PMU_VREG4_LPLDO1_SHIFT 15 +#define PMU_VREG4_LPLDO1_MASK 0x7 +#define PMU_VREG4_LPLDO1_1p20V 0 +#define PMU_VREG4_LPLDO1_1p15V 1 +#define PMU_VREG4_LPLDO1_1p10V 2 +#define PMU_VREG4_LPLDO1_1p25V 3 +#define PMU_VREG4_LPLDO1_1p05V 4 +#define PMU_VREG4_LPLDO1_1p00V 5 +#define PMU_VREG4_LPLDO1_0p95V 6 +#define PMU_VREG4_LPLDO1_0p90V 7 + +/* 4350/4345 VREG4 settings */ +#define PMU4350_VREG4_LPLDO1_1p10V 0 +#define PMU4350_VREG4_LPLDO1_1p15V 1 +#define PMU4350_VREG4_LPLDO1_1p21V 2 +#define PMU4350_VREG4_LPLDO1_1p24V 3 +#define PMU4350_VREG4_LPLDO1_0p90V 4 +#define PMU4350_VREG4_LPLDO1_0p96V 5 +#define PMU4350_VREG4_LPLDO1_1p01V 6 +#define PMU4350_VREG4_LPLDO1_1p04V 7 + +#define PMU_VREG4_LPLDO2_LVM_SHIFT 18 +#define PMU_VREG4_LPLDO2_LVM_MASK 0x7 +#define PMU_VREG4_LPLDO2_HVM_SHIFT 21 +#define PMU_VREG4_LPLDO2_HVM_MASK 0x7 +#define PMU_VREG4_LPLDO2_LVM_HVM_MASK 0x3f +#define PMU_VREG4_LPLDO2_1p00V 0 +#define PMU_VREG4_LPLDO2_1p15V 1 +#define PMU_VREG4_LPLDO2_1p20V 2 +#define PMU_VREG4_LPLDO2_1p10V 3 +#define PMU_VREG4_LPLDO2_0p90V 4 /**< 4 - 7 is 0.90V */ + +#define PMU_VREG4_HSICLDO_BYPASS_SHIFT 27 +#define PMU_VREG4_HSICLDO_BYPASS_MASK 0x1 + +#define PMU_VREG5_ADDR 5 +#define PMU_VREG5_HSICAVDD_PD_SHIFT 6 +#define PMU_VREG5_HSICAVDD_PD_MASK 0x1 +#define PMU_VREG5_HSICDVDD_PD_SHIFT 11 +#define PMU_VREG5_HSICDVDD_PD_MASK 0x1 + +/* 4334 resources */ +#define RES4334_LPLDO_PU 0 +#define RES4334_RESET_PULLDN_DIS 1 +#define RES4334_PMU_BG_PU 2 +#define RES4334_HSIC_LDO_PU 3 +#define RES4334_CBUCK_LPOM_PU 4 +#define RES4334_CBUCK_PFM_PU 5 +#define RES4334_CLDO_PU 6 +#define RES4334_LPLDO2_LVM 7 +#define RES4334_LNLDO_PU 8 +#define RES4334_LDO3P3_PU 9 +#define RES4334_OTP_PU 10 +#define RES4334_XTAL_PU 11 +#define RES4334_WL_PWRSW_PU 12 +#define RES4334_LQ_AVAIL 13 +#define RES4334_LOGIC_RET 14 +#define RES4334_MEM_SLEEP 15 +#define RES4334_MACPHY_RET 16 +#define RES4334_WL_CORE_READY 17 +#define RES4334_ILP_REQ 18 +#define RES4334_ALP_AVAIL 19 +#define RES4334_MISC_PWRSW_PU 20 +#define RES4334_SYNTH_PWRSW_PU 21 +#define RES4334_RX_PWRSW_PU 22 +#define RES4334_RADIO_PU 23 +#define RES4334_WL_PMU_PU 24 +#define RES4334_VCO_LDO_PU 25 +#define RES4334_AFE_LDO_PU 26 +#define RES4334_RX_LDO_PU 27 +#define RES4334_TX_LDO_PU 28 +#define RES4334_HT_AVAIL 29 +#define RES4334_MACPHY_CLK_AVAIL 30 + +/* 4334 chip-specific ChipStatus register bits */ +#define CST4334_CHIPMODE_MASK 7 +#define CST4334_SDIO_MODE 0x00000000 +#define CST4334_SPI_MODE 0x00000004 +#define CST4334_HSIC_MODE 0x00000006 +#define CST4334_BLUSB_MODE 0x00000007 +#define CST4334_CHIPMODE_HSIC(cs) (((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE) +#define CST4334_OTP_PRESENT 0x00000010 +#define CST4334_LPO_AUTODET_EN 0x00000020 +#define CST4334_ARMREMAP_0 0x00000040 +#define CST4334_SPROM_PRESENT 0x00000080 +#define CST4334_ILPDIV_EN_MASK 0x00000100 +#define CST4334_ILPDIV_EN_SHIFT 8 +#define CST4334_LPO_SEL_MASK 0x00000200 +#define CST4334_LPO_SEL_SHIFT 9 +#define CST4334_RES_INIT_MODE_MASK 0x00000C00 +#define CST4334_RES_INIT_MODE_SHIFT 10 + +/* 4334 Chip specific PMU ChipControl register bits */ +#define PCTL_4334_GPIO3_ENAB (1 << 3) + +/* 4334 Chip control */ +#define CCTRL4334_PMU_WAKEUP_GPIO1 (1 << 0) +#define CCTRL4334_PMU_WAKEUP_HSIC (1 << 1) +#define CCTRL4334_PMU_WAKEUP_AOS (1 << 2) +#define CCTRL4334_HSIC_WAKE_MODE (1 << 3) +#define CCTRL4334_HSIC_INBAND_GPIO1 (1 << 4) +#define CCTRL4334_HSIC_LDO_PU (1 << 23) + +/* 4334 Chip control 3 */ +#define CCTRL4334_BLOCK_EXTRNL_WAKE (1 << 4) +#define CCTRL4334_SAVERESTORE_FIX (1 << 5) + +/* 43341 Chip control 3 */ +#define CCTRL43341_BLOCK_EXTRNL_WAKE (1 << 13) +#define CCTRL43341_SAVERESTORE_FIX (1 << 14) +#define CCTRL43341_BT_ISO_SEL (1 << 16) + +/* 4334 Chip specific ChipControl1 register bits */ +#define CCTRL1_4334_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ +#define CCTRL1_4334_ERCX_SEL (1 << 1) /* 1=select ERCX BT coex to be muxed out */ +#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ +#define CCTRL1_4334_JTAG_DISABLE (1 << 3) /* 1=disable JTAG interface on mux'd pins */ +#define CCTRL1_4334_UART_ON_4_5 (1 << 28) /**< 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */ + +/* 4324 Chip specific ChipControl1 register bits */ +#define CCTRL1_4324_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ +#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ + +/* 43143 chip-specific ChipStatus register bits based on Confluence documentation */ +/* register contains strap values sampled during POR */ +#define CST43143_REMAP_TO_ROM (3 << 0) /* 00=Boot SRAM, 01=Boot ROM, 10=Boot SFLASH */ +#define CST43143_SDIO_EN (1 << 2) /* 0 = USB Enab, SDIO pins are GPIO or I2S */ +#define CST43143_SDIO_ISO (1 << 3) /* 1 = SDIO isolated */ +#define CST43143_USB_CPU_LESS (1 << 4) /* 1 = CPULess mode Enabled */ +#define CST43143_CBUCK_MODE (3 << 6) /* Indicates what controller mode CBUCK is in */ +#define CST43143_POK_CBUCK (1 << 8) /* 1 = 1.2V CBUCK voltage ready */ +#define CST43143_PMU_OVRSPIKE (1 << 9) +#define CST43143_PMU_OVRTEMP (0xF << 10) +#define CST43143_SR_FLL_CAL_DONE (1 << 14) +#define CST43143_USB_PLL_LOCKDET (1 << 15) +#define CST43143_PMU_PLL_LOCKDET (1 << 16) +#define CST43143_CHIPMODE_SDIOD(cs) (((cs) & CST43143_SDIO_EN) != 0) /* SDIO */ + +/* 43143 Chip specific ChipControl register bits */ +/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire */ +#define CCTRL_43143_SECI (1<<0) +#define CCTRL_43143_BT_LEGACY (1<<1) +#define CCTRL_43143_I2S_MODE (1<<2) /**< 0: SDIO enabled */ +#define CCTRL_43143_I2S_MASTER (1<<3) /**< 0: I2S MCLK input disabled */ +#define CCTRL_43143_I2S_FULL (1<<4) /**< 0: I2S SDIN and SPDIF_TX inputs disabled */ +#define CCTRL_43143_GSIO (1<<5) /**< 0: sFlash enabled */ +#define CCTRL_43143_RF_SWCTRL_MASK (7<<6) /**< 0: disabled */ +#define CCTRL_43143_RF_SWCTRL_0 (1<<6) +#define CCTRL_43143_RF_SWCTRL_1 (2<<6) +#define CCTRL_43143_RF_SWCTRL_2 (4<<6) +#define CCTRL_43143_RF_XSWCTRL (1<<9) /**< 0: UART enabled */ +#define CCTRL_43143_HOST_WAKE0 (1<<11) /**< 1: SDIO separate interrupt output from GPIO4 */ +#define CCTRL_43143_HOST_WAKE1 (1<<12) /* 1: SDIO separate interrupt output from GPIO16 */ + +/* 43143 resources, based on pmu_params.xls V1.19 */ +#define RES43143_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES43143_XTAL_PU 1 /**< 0x00002 */ +#define RES43143_ILP_REQUEST 2 /**< 0x00004 */ +#define RES43143_ALP_AVAIL 3 /**< 0x00008 */ +#define RES43143_WL_CORE_READY 4 /**< 0x00010 */ +#define RES43143_BBPLL_PWRSW_PU 5 /**< 0x00020 */ +#define RES43143_HT_AVAIL 6 /**< 0x00040 */ +#define RES43143_RADIO_PU 7 /**< 0x00080 */ +#define RES43143_MACPHY_CLK_AVAIL 8 /**< 0x00100 */ +#define RES43143_OTP_PU 9 /**< 0x00200 */ +#define RES43143_LQ_AVAIL 10 /**< 0x00400 */ + +#define PMU43143_XTAL_CORE_SIZE_MASK 0x3F + +/* 4313 resources */ +#define RES4313_BB_PU_RSRC 0 +#define RES4313_ILP_REQ_RSRC 1 +#define RES4313_XTAL_PU_RSRC 2 +#define RES4313_ALP_AVAIL_RSRC 3 +#define RES4313_RADIO_PU_RSRC 4 +#define RES4313_BG_PU_RSRC 5 +#define RES4313_VREG1P4_PU_RSRC 6 +#define RES4313_AFE_PWRSW_RSRC 7 +#define RES4313_RX_PWRSW_RSRC 8 +#define RES4313_TX_PWRSW_RSRC 9 +#define RES4313_BB_PWRSW_RSRC 10 +#define RES4313_SYNTH_PWRSW_RSRC 11 +#define RES4313_MISC_PWRSW_RSRC 12 +#define RES4313_BB_PLL_PWRSW_RSRC 13 +#define RES4313_HT_AVAIL_RSRC 14 +#define RES4313_MACPHY_CLK_AVAIL_RSRC 15 + +/* 4313 chip-specific ChipStatus register bits */ +#define CST4313_SPROM_PRESENT 1 +#define CST4313_OTP_PRESENT 2 +#define CST4313_SPROM_OTP_SEL_MASK 0x00000002 +#define CST4313_SPROM_OTP_SEL_SHIFT 0 + +/* 4313 Chip specific ChipControl register bits */ +#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */ + +/* PMU respources for 4314 */ +#define RES4314_LPLDO_PU 0 +#define RES4314_PMU_SLEEP_DIS 1 +#define RES4314_PMU_BG_PU 2 +#define RES4314_CBUCK_LPOM_PU 3 +#define RES4314_CBUCK_PFM_PU 4 +#define RES4314_CLDO_PU 5 +#define RES4314_LPLDO2_LVM 6 +#define RES4314_WL_PMU_PU 7 +#define RES4314_LNLDO_PU 8 +#define RES4314_LDO3P3_PU 9 +#define RES4314_OTP_PU 10 +#define RES4314_XTAL_PU 11 +#define RES4314_WL_PWRSW_PU 12 +#define RES4314_LQ_AVAIL 13 +#define RES4314_LOGIC_RET 14 +#define RES4314_MEM_SLEEP 15 +#define RES4314_MACPHY_RET 16 +#define RES4314_WL_CORE_READY 17 +#define RES4314_ILP_REQ 18 +#define RES4314_ALP_AVAIL 19 +#define RES4314_MISC_PWRSW_PU 20 +#define RES4314_SYNTH_PWRSW_PU 21 +#define RES4314_RX_PWRSW_PU 22 +#define RES4314_RADIO_PU 23 +#define RES4314_VCO_LDO_PU 24 +#define RES4314_AFE_LDO_PU 25 +#define RES4314_RX_LDO_PU 26 +#define RES4314_TX_LDO_PU 27 +#define RES4314_HT_AVAIL 28 +#define RES4314_MACPHY_CLK_AVAIL 29 + +/* 4314 chip-specific ChipStatus register bits */ +#define CST4314_OTP_ENABLED 0x00200000 + +/* 43228 resources */ +#define RES43228_NOT_USED 0 +#define RES43228_ILP_REQUEST 1 +#define RES43228_XTAL_PU 2 +#define RES43228_ALP_AVAIL 3 +#define RES43228_PLL_EN 4 +#define RES43228_HT_PHY_AVAIL 5 + +/* 43228 chipstatus reg bits */ +#define CST43228_ILP_DIV_EN 0x1 +#define CST43228_OTP_PRESENT 0x2 +#define CST43228_SERDES_REFCLK_PADSEL 0x4 +#define CST43228_SDIO_MODE 0x8 +#define CST43228_SDIO_OTP_PRESENT 0x10 +#define CST43228_SDIO_RESET 0x20 + +/* 4706 chipstatus reg bits */ +#define CST4706_PKG_OPTION (1<<0) /* 0: full-featured package 1: low-cost package */ +#define CST4706_SFLASH_PRESENT (1<<1) /* 0: parallel, 1: serial flash is present */ +#define CST4706_SFLASH_TYPE (1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ +#define CST4706_MIPS_BENDIAN (1<<3) /* 0: little, 1: big endian */ +#define CST4706_PCIE1_DISABLE (1<<5) /* PCIE1 enable strap pin */ + +/* 4706 flashstrconfig reg bits */ +#define FLSTRCF4706_MASK 0x000000ff +#define FLSTRCF4706_SF1 0x00000001 /**< 2nd serial flash present */ +#define FLSTRCF4706_PF1 0x00000002 /**< 2nd parallel flash present */ +#define FLSTRCF4706_SF1_TYPE 0x00000004 /**< 2nd serial flash type : 0 : ST, 1 : Atmel */ +#define FLSTRCF4706_NF1 0x00000008 /**< 2nd NAND flash present */ +#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /**< Valid value mask */ +#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /**< 4MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /**< 8MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /**< 16MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /**< 32MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /**< 64MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /**< 128MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /**< 256MB */ + +/* 4360 Chip specific ChipControl register bits */ +#define CCTRL4360_I2C_MODE (1 << 0) +#define CCTRL4360_UART_MODE (1 << 1) +#define CCTRL4360_SECI_MODE (1 << 2) +#define CCTRL4360_BTSWCTRL_MODE (1 << 3) +#define CCTRL4360_DISCRETE_FEMCTRL_MODE (1 << 4) +#define CCTRL4360_DIGITAL_PACTRL_MODE (1 << 5) +#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT (1 << 6) +#define CCTRL4360_EXTRA_GPIO_MODE (1 << 7) +#define CCTRL4360_EXTRA_FEMCTRL_MODE (1 << 8) +#define CCTRL4360_BT_LGCY_MODE (1 << 9) +#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21) +#define CCTRL4360_SECI_ON_GPIO01 (1 << 24) + +/* 4360 Chip specific Regulator Control register bits */ +#define RCTRL4360_RFLDO_PWR_DOWN (1 << 1) + +/* 4360 PMU resources and chip status bits */ +#define RES4360_REGULATOR 0 +#define RES4360_ILP_AVAIL 1 +#define RES4360_ILP_REQ 2 +#define RES4360_XTAL_LDO_PU 3 +#define RES4360_XTAL_PU 4 +#define RES4360_ALP_AVAIL 5 +#define RES4360_BBPLLPWRSW_PU 6 +#define RES4360_HT_AVAIL 7 +#define RES4360_OTP_PU 8 +#define RES4360_AVB_PLL_PWRSW_PU 9 +#define RES4360_PCIE_TL_CLK_AVAIL 10 + +#define CST4360_XTAL_40MZ 0x00000001 +#define CST4360_SFLASH 0x00000002 +#define CST4360_SPROM_PRESENT 0x00000004 +#define CST4360_SFLASH_TYPE 0x00000004 +#define CST4360_OTP_ENABLED 0x00000008 +#define CST4360_REMAP_ROM 0x00000010 +#define CST4360_RSRC_INIT_MODE_MASK 0x00000060 +#define CST4360_RSRC_INIT_MODE_SHIFT 5 +#define CST4360_ILP_DIVEN 0x00000080 +#define CST4360_MODE_USB 0x00000100 +#define CST4360_SPROM_SIZE_MASK 0x00000600 +#define CST4360_SPROM_SIZE_SHIFT 9 +#define CST4360_BBPLL_LOCK 0x00000800 +#define CST4360_AVBBPLL_LOCK 0x00001000 +#define CST4360_USBBBPLL_LOCK 0x00002000 +#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ + CST4360_RSRC_INIT_MODE_SHIFT) + +#define CCTRL_4360_UART_SEL 0x2 +#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ + CST4360_RSRC_INIT_MODE_SHIFT) + + +/* 43602 PMU resources based on pmu_params.xls version v0.95 */ +#define RES43602_LPLDO_PU 0 +#define RES43602_REGULATOR 1 +#define RES43602_PMU_SLEEP 2 +#define RES43602_RSVD_3 3 +#define RES43602_XTALLDO_PU 4 +#define RES43602_SERDES_PU 5 +#define RES43602_BBPLL_PWRSW_PU 6 +#define RES43602_SR_CLK_START 7 +#define RES43602_SR_PHY_PWRSW 8 +#define RES43602_SR_SUBCORE_PWRSW 9 +#define RES43602_XTAL_PU 10 +#define RES43602_PERST_OVR 11 +#define RES43602_SR_CLK_STABLE 12 +#define RES43602_SR_SAVE_RESTORE 13 +#define RES43602_SR_SLEEP 14 +#define RES43602_LQ_START 15 +#define RES43602_LQ_AVAIL 16 +#define RES43602_WL_CORE_RDY 17 +#define RES43602_ILP_REQ 18 +#define RES43602_ALP_AVAIL 19 +#define RES43602_RADIO_PU 20 +#define RES43602_RFLDO_PU 21 +#define RES43602_HT_START 22 +#define RES43602_HT_AVAIL 23 +#define RES43602_MACPHY_CLKAVAIL 24 +#define RES43602_PARLDO_PU 25 +#define RES43602_RSVD_26 26 + +/* 43602 chip status bits */ +#define CST43602_SPROM_PRESENT (1<<1) +#define CST43602_SPROM_SIZE (1<<10) /* 0 = 16K, 1 = 4K */ +#define CST43602_BBPLL_LOCK (1<<11) +#define CST43602_RF_LDO_OUT_OK (1<<15) /* RF LDO output OK */ + +#define PMU43602_CC1_GPIO12_OVRD (1<<28) /* GPIO12 override */ + +#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1) /* creates gated_pcie_wake, pmu_wakeup logic */ +#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN (1<<2) /* creates gated_pcie_wake, pmu_wakeup logic */ +#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3) +#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5) /* enable pmu_wakeup to request for ALP_AVAIL */ +#define PMU43602_CC2_PERST_L_EXTEND_EN (1<<9) /* extend perst_l until rsc PERST_OVR comes up */ +#define PMU43602_CC2_FORCE_EXT_LPO (1<<19) /* 1=ext LPO clock is the final LPO clock */ +#define PMU43602_CC2_XTAL32_SEL (1<<30) /* 0=ext_clock, 1=xtal */ + +#define CC_SR1_43602_SR_ASM_ADDR (0x0) + +/* PLL CTL register values for open loop, used during S/R operation */ +#define PMU43602_PLL_CTL6_VAL 0x68000528 +#define PMU43602_PLL_CTL7_VAL 0x6 + +#define PMU43602_CC3_ARMCR4_DBG_CLK (1 << 29) + +/* 4365 PMU resources */ +#define RES4365_REGULATOR_PU 0 +#define RES4365_XTALLDO_PU 1 +#define RES4365_XTAL_PU 2 +#define RES4365_CPU_PLLLDO_PU 3 +#define RES4365_CPU_PLL_PU 4 +#define RES4365_WL_CORE_RDY 5 +#define RES4365_ILP_REQ 6 +#define RES4365_ALP_AVAIL 7 +#define RES4365_HT_AVAIL 8 +#define RES4365_BB_PLLLDO_PU 9 +#define RES4365_BB_PLL_PU 10 +#define RES4365_MINIMU_PU 11 +#define RES4365_RADIO_PU 12 +#define RES4365_MACPHY_CLK_AVAIL 13 + +/* 4349 related */ +#define RES4349_LPLDO_PU 0 +#define RES4349_BG_PU 1 +#define RES4349_PMU_SLEEP 2 +#define RES4349_PALDO3P3_PU 3 +#define RES4349_CBUCK_LPOM_PU 4 +#define RES4349_CBUCK_PFM_PU 5 +#define RES4349_COLD_START_WAIT 6 +#define RES4349_RSVD_7 7 +#define RES4349_LNLDO_PU 8 +#define RES4349_XTALLDO_PU 9 +#define RES4349_LDO3P3_PU 10 +#define RES4349_OTP_PU 11 +#define RES4349_XTAL_PU 12 +#define RES4349_SR_CLK_START 13 +#define RES4349_LQ_AVAIL 14 +#define RES4349_LQ_START 15 +#define RES4349_PERST_OVR 16 +#define RES4349_WL_CORE_RDY 17 +#define RES4349_ILP_REQ 18 +#define RES4349_ALP_AVAIL 19 +#define RES4349_MINI_PMU 20 +#define RES4349_RADIO_PU 21 +#define RES4349_SR_CLK_STABLE 22 +#define RES4349_SR_SAVE_RESTORE 23 +#define RES4349_SR_PHY_PWRSW 24 +#define RES4349_SR_VDDM_PWRSW 25 +#define RES4349_SR_SUBCORE_PWRSW 26 +#define RES4349_SR_SLEEP 27 +#define RES4349_HT_START 28 +#define RES4349_HT_AVAIL 29 +#define RES4349_MACPHY_CLKAVAIL 30 + +#define CR4_4349_RAM_BASE (0x180000) +#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000) + +/* SR binary offset is at 8K */ +#define CC_SR1_4349_SR_ASM_ADDR (0x10) + +#define CST4349_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4349_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ + +#define CST4349_SPROM_PRESENT 0x00000010 + +#define CC2_4349_VDDM_PWRSW_EN_MASK (1 << 20) +#define CC2_4349_VDDM_PWRSW_EN_SHIFT (20) +#define CC2_4349_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4349_SDIO_AOS_WAKEUP_SHIFT (24) + + +#define CC6_4349_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4349_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4349_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4349_PMU_WAKEUP_ALPAVAIL_SHIFT (6) +#define CC6_4349_PMU_EN_EXT_PERST_MASK (1 << 13) +#define CC6_4349_PMU_ENABLE_L2REFCLKPAD_PWRDWN (1 << 15) +#define CC6_4349_PMU_EN_MDIO_MASK (1 << 16) +#define CC6_4349_PMU_EN_ASSERT_L2_MASK (1 << 25) + + + +/* 43430 PMU resources based on pmu_params.xls */ +#define RES43430_LPLDO_PU 0 +#define RES43430_BG_PU 1 +#define RES43430_PMU_SLEEP 2 +#define RES43430_RSVD_3 3 +#define RES43430_CBUCK_LPOM_PU 4 +#define RES43430_CBUCK_PFM_PU 5 +#define RES43430_COLD_START_WAIT 6 +#define RES43430_RSVD_7 7 +#define RES43430_LNLDO_PU 8 +#define RES43430_RSVD_9 9 +#define RES43430_LDO3P3_PU 10 +#define RES43430_OTP_PU 11 +#define RES43430_XTAL_PU 12 +#define RES43430_SR_CLK_START 13 +#define RES43430_LQ_AVAIL 14 +#define RES43430_LQ_START 15 +#define RES43430_RSVD_16 16 +#define RES43430_WL_CORE_RDY 17 +#define RES43430_ILP_REQ 18 +#define RES43430_ALP_AVAIL 19 +#define RES43430_MINI_PMU 20 +#define RES43430_RADIO_PU 21 +#define RES43430_SR_CLK_STABLE 22 +#define RES43430_SR_SAVE_RESTORE 23 +#define RES43430_SR_PHY_PWRSW 24 +#define RES43430_SR_VDDM_PWRSW 25 +#define RES43430_SR_SUBCORE_PWRSW 26 +#define RES43430_SR_SLEEP 27 +#define RES43430_HT_START 28 +#define RES43430_HT_AVAIL 29 +#define RES43430_MACPHY_CLK_AVAIL 30 + +/* 43430 chip status bits */ +#define CST43430_SDIO_MODE 0x00000001 +#define CST43430_GSPI_MODE 0x00000002 +#define CST43430_RSRC_INIT_MODE_0 0x00000080 +#define CST43430_RSRC_INIT_MODE_1 0x00000100 +#define CST43430_SEL0_SDIO 0x00000200 +#define CST43430_SEL1_SDIO 0x00000400 +#define CST43430_SEL2_SDIO 0x00000800 +#define CST43430_BBPLL_LOCKED 0x00001000 +#define CST43430_DBG_INST_DETECT 0x00004000 +#define CST43430_CLB2WL_BT_READY 0x00020000 +#define CST43430_JTAG_MODE 0x00100000 +#define CST43430_HOST_IFACE 0x00400000 +#define CST43430_TRIM_EN 0x00800000 +#define CST43430_DIN_PACKAGE_OPTION 0x10000000 + +#define PMU_MACCORE_0_RES_REQ_TIMER 0x19000000 +#define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F + +#define PMU_MACCORE_1_RES_REQ_TIMER 0x19000000 +#define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F + +/* defines to detect active host interface in use */ +#define CHIP_HOSTIF_PCIEMODE 0x1 +#define CHIP_HOSTIF_USBMODE 0x2 +#define CHIP_HOSTIF_SDIOMODE 0x4 +#define CHIP_HOSTIF_PCIE(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE) +#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE) +#define CHIP_HOSTIF_SDIO(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE) + +/* 4335 resources */ +#define RES4335_LPLDO_PO 0 +#define RES4335_PMU_BG_PU 1 +#define RES4335_PMU_SLEEP 2 +#define RES4335_RSVD_3 3 +#define RES4335_CBUCK_LPOM_PU 4 +#define RES4335_CBUCK_PFM_PU 5 +#define RES4335_RSVD_6 6 +#define RES4335_RSVD_7 7 +#define RES4335_LNLDO_PU 8 +#define RES4335_XTALLDO_PU 9 +#define RES4335_LDO3P3_PU 10 +#define RES4335_OTP_PU 11 +#define RES4335_XTAL_PU 12 +#define RES4335_SR_CLK_START 13 +#define RES4335_LQ_AVAIL 14 +#define RES4335_LQ_START 15 +#define RES4335_RSVD_16 16 +#define RES4335_WL_CORE_RDY 17 +#define RES4335_ILP_REQ 18 +#define RES4335_ALP_AVAIL 19 +#define RES4335_MINI_PMU 20 +#define RES4335_RADIO_PU 21 +#define RES4335_SR_CLK_STABLE 22 +#define RES4335_SR_SAVE_RESTORE 23 +#define RES4335_SR_PHY_PWRSW 24 +#define RES4335_SR_VDDM_PWRSW 25 +#define RES4335_SR_SUBCORE_PWRSW 26 +#define RES4335_SR_SLEEP 27 +#define RES4335_HT_START 28 +#define RES4335_HT_AVAIL 29 +#define RES4335_MACPHY_CLKAVAIL 30 + +/* 4335 Chip specific ChipStatus register bits */ +#define CST4335_SPROM_MASK 0x00000020 +#define CST4335_SFLASH_MASK 0x00000040 +#define CST4335_RES_INIT_MODE_SHIFT 7 +#define CST4335_RES_INIT_MODE_MASK 0x00000180 +#define CST4335_CHIPMODE_MASK 0xF +#define CST4335_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */ +#define CST4335_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */ +#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /**< HSIC || USBDA */ +#define CST4335_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */ + +/* 4335 Chip specific ChipControl1 register bits */ +#define CCTRL1_4335_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ +#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ + +/* 4335 Chip specific ChipControl2 register bits */ +#define CCTRL2_4335_AOSBLOCK (1 << 30) +#define CCTRL2_4335_PMUWAKE (1 << 31) +#define PATCHTBL_SIZE (0x800) +#define CR4_4335_RAM_BASE (0x180000) +#define CR4_4345_LT_C0_RAM_BASE (0x1b0000) +#define CR4_4345_GE_C0_RAM_BASE (0x198000) +#define CR4_4349_RAM_BASE (0x180000) +#define CR4_4350_RAM_BASE (0x180000) +#define CR4_4360_RAM_BASE (0x0) +#define CR4_43602_RAM_BASE (0x180000) +#define CA7_4365_RAM_BASE (0x200000) + + +/* 4335 chip OTP present & OTP select bits. */ +#define SPROM4335_OTP_SELECT 0x00000010 +#define SPROM4335_OTP_PRESENT 0x00000020 + +/* 4335 GCI specific bits. */ +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT (1 << 24) +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE 25 +#define CC4335_GCI_FUNC_SEL_PAD_SDIO 0x00707770 + +/* SFLASH clkdev specific bits. */ +#define CC4335_SFLASH_CLKDIV_MASK 0x1F000000 +#define CC4335_SFLASH_CLKDIV_SHIFT 25 + +/* 4335 OTP bits for SFLASH. */ +#define CC4335_SROM_OTP_SFLASH 40 +#define CC4335_SROM_OTP_SFLASH_PRESENT 0x1 +#define CC4335_SROM_OTP_SFLASH_TYPE 0x2 +#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK 0x003C +#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2 + + +/* 4335 chip OTP present & OTP select bits. */ +#define SPROM4335_OTP_SELECT 0x00000010 +#define SPROM4335_OTP_PRESENT 0x00000020 + +/* 4335 GCI specific bits. */ +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT (1 << 24) +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE 25 +#define CC4335_GCI_FUNC_SEL_PAD_SDIO 0x00707770 + +/* SFLASH clkdev specific bits. */ +#define CC4335_SFLASH_CLKDIV_MASK 0x1F000000 +#define CC4335_SFLASH_CLKDIV_SHIFT 25 + +/* 4335 OTP bits for SFLASH. */ +#define CC4335_SROM_OTP_SFLASH 40 +#define CC4335_SROM_OTP_SFLASH_PRESENT 0x1 +#define CC4335_SROM_OTP_SFLASH_TYPE 0x2 +#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK 0x003C +#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2 + +/* 4335 resources--END */ + +/* 4345 Chip specific ChipStatus register bits */ +#define CST4345_SPROM_MASK 0x00000020 +#define CST4345_SFLASH_MASK 0x00000040 +#define CST4345_RES_INIT_MODE_SHIFT 7 +#define CST4345_RES_INIT_MODE_MASK 0x00000180 +#define CST4345_CHIPMODE_MASK 0x4000F +#define CST4345_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */ +#define CST4345_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */ +#define CST4345_CHIPMODE_HSIC(cs) (((cs) & (1 << 2)) != 0) /* HSIC */ +#define CST4345_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */ +#define CST4345_CHIPMODE_USB20D(cs) (((cs) & (1 << 18)) != 0) /* USBDA */ + +/* 4350 Chipcommon ChipStatus bits */ +#define CST4350_SDIO_MODE 0x00000001 +#define CST4350_HSIC20D_MODE 0x00000002 +#define CST4350_BP_ON_HSIC_CLK 0x00000004 +#define CST4350_PCIE_MODE 0x00000008 +#define CST4350_USB20D_MODE 0x00000010 +#define CST4350_USB30D_MODE 0x00000020 +#define CST4350_SPROM_PRESENT 0x00000040 +#define CST4350_RSRC_INIT_MODE_0 0x00000080 +#define CST4350_RSRC_INIT_MODE_1 0x00000100 +#define CST4350_SEL0_SDIO 0x00000200 +#define CST4350_SEL1_SDIO 0x00000400 +#define CST4350_SDIO_PAD_MODE 0x00000800 +#define CST4350_BBPLL_LOCKED 0x00001000 +#define CST4350_USBPLL_LOCKED 0x00002000 +#define CST4350_LINE_STATE 0x0000C000 +#define CST4350_SERDES_PIPE_PLLLOCK 0x00010000 +#define CST4350_BT_READY 0x00020000 +#define CST4350_SFLASH_PRESENT 0x00040000 +#define CST4350_CPULESS_ENABLE 0x00080000 +#define CST4350_STRAP_HOST_IFC_1 0x00100000 +#define CST4350_STRAP_HOST_IFC_2 0x00200000 +#define CST4350_STRAP_HOST_IFC_3 0x00400000 +#define CST4350_RAW_SPROM_PRESENT 0x00800000 +#define CST4350_APP_CLK_SWITCH_SEL_RDBACK 0x01000000 +#define CST4350_RAW_RSRC_INIT_MODE_0 0x02000000 +#define CST4350_SDIO_PAD_VDDIO 0x04000000 +#define CST4350_GSPI_MODE 0x08000000 +#define CST4350_PACKAGE_OPTION 0xF0000000 +#define CST4350_PACKAGE_SHIFT 28 + +/* package option for 4350 */ +#define CST4350_PACKAGE_WLCSP 0x0 +#define CST4350_PACKAGE_PCIE 0x1 +#define CST4350_PACKAGE_WLBGA 0x2 +#define CST4350_PACKAGE_DBG 0x3 +#define CST4350_PACKAGE_USB 0x4 +#define CST4350_PACKAGE_USB_HSIC 0x4 + +#define CST4350_PKG_MODE(cs) ((cs & CST4350_PACKAGE_OPTION) >> CST4350_PACKAGE_SHIFT) + +#define CST4350_PKG_WLCSP(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLCSP)) +#define CST4350_PKG_PCIE(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_PCIE)) +#define CST4350_PKG_WLBGA(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLBGA)) +#define CST4350_PKG_USB(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB)) +#define CST4350_PKG_USB_HSIC(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB_HSIC)) + +/* 4350C0 USB PACKAGE using raw_sprom_present to indicate 40mHz xtal */ +#define CST4350_PKG_USB_40M(cs) (cs & CST4350_RAW_SPROM_PRESENT) + +#define CST4350_CHIPMODE_SDIOD(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD)) +#define CST4350_CHIPMODE_USB20D(cs) ((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D)) +#define CST4350_CHIPMODE_HSIC20D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D)) +#define CST4350_CHIPMODE_HSIC30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D)) +#define CST4350_CHIPMODE_USB30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D)) +#define CST4350_CHIPMODE_USB30D_WL(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL)) +#define CST4350_CHIPMODE_PCIE(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE)) + +/* strap_host_ifc strap value */ +#define CST4350_HOST_IFC_MASK 0x00700000 +#define CST4350_HOST_IFC_SHIFT 20 + +/* host_ifc raw mode */ +#define CST4350_IFC_MODE_SDIOD 0x0 +#define CST4350_IFC_MODE_HSIC20D 0x1 +#define CST4350_IFC_MODE_HSIC30D 0x2 +#define CST4350_IFC_MODE_PCIE 0x3 +#define CST4350_IFC_MODE_USB20D 0x4 +#define CST4350_IFC_MODE_USB30D 0x5 +#define CST4350_IFC_MODE_USB30D_WL 0x6 +#define CST4350_IFC_MODE_USB30D_BT 0x7 + +#define CST4350_IFC_MODE(cs) ((cs & CST4350_HOST_IFC_MASK) >> CST4350_HOST_IFC_SHIFT) + +/* 4350 PMU resources */ +#define RES4350_LPLDO_PU 0 +#define RES4350_PMU_BG_PU 1 +#define RES4350_PMU_SLEEP 2 +#define RES4350_RSVD_3 3 +#define RES4350_CBUCK_LPOM_PU 4 +#define RES4350_CBUCK_PFM_PU 5 +#define RES4350_COLD_START_WAIT 6 +#define RES4350_RSVD_7 7 +#define RES4350_LNLDO_PU 8 +#define RES4350_XTALLDO_PU 9 +#define RES4350_LDO3P3_PU 10 +#define RES4350_OTP_PU 11 +#define RES4350_XTAL_PU 12 +#define RES4350_SR_CLK_START 13 +#define RES4350_LQ_AVAIL 14 +#define RES4350_LQ_START 15 +#define RES4350_PERST_OVR 16 +#define RES4350_WL_CORE_RDY 17 +#define RES4350_ILP_REQ 18 +#define RES4350_ALP_AVAIL 19 +#define RES4350_MINI_PMU 20 +#define RES4350_RADIO_PU 21 +#define RES4350_SR_CLK_STABLE 22 +#define RES4350_SR_SAVE_RESTORE 23 +#define RES4350_SR_PHY_PWRSW 24 +#define RES4350_SR_VDDM_PWRSW 25 +#define RES4350_SR_SUBCORE_PWRSW 26 +#define RES4350_SR_SLEEP 27 +#define RES4350_HT_START 28 +#define RES4350_HT_AVAIL 29 +#define RES4350_MACPHY_CLKAVAIL 30 + +#define MUXENAB4350_UART_MASK (0x0000000f) +#define MUXENAB4350_UART_SHIFT 0 +#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */ +#define MUXENAB4350_HOSTWAKE_SHIFT 4 + + +/* 4350 GCI function sel values */ +#define CC4350_FNSEL_HWDEF (0) +#define CC4350_FNSEL_SAMEASPIN (1) +#define CC4350_FNSEL_UART (2) +#define CC4350_FNSEL_SFLASH (3) +#define CC4350_FNSEL_SPROM (4) +#define CC4350_FNSEL_I2C (5) +#define CC4350_FNSEL_MISC0 (6) +#define CC4350_FNSEL_GCI (7) +#define CC4350_FNSEL_MISC1 (8) +#define CC4350_FNSEL_MISC2 (9) +#define CC4350_FNSEL_PWDOG (10) +#define CC4350_FNSEL_IND (12) +#define CC4350_FNSEL_PDN (13) +#define CC4350_FNSEL_PUP (14) +#define CC4350_FNSEL_TRISTATE (15) +#define CC4350C_FNSEL_UART (3) + + +/* 4350 GPIO */ +#define CC4350_PIN_GPIO_00 (0) +#define CC4350_PIN_GPIO_01 (1) +#define CC4350_PIN_GPIO_02 (2) +#define CC4350_PIN_GPIO_03 (3) +#define CC4350_PIN_GPIO_04 (4) +#define CC4350_PIN_GPIO_05 (5) +#define CC4350_PIN_GPIO_06 (6) +#define CC4350_PIN_GPIO_07 (7) +#define CC4350_PIN_GPIO_08 (8) +#define CC4350_PIN_GPIO_09 (9) +#define CC4350_PIN_GPIO_10 (10) +#define CC4350_PIN_GPIO_11 (11) +#define CC4350_PIN_GPIO_12 (12) +#define CC4350_PIN_GPIO_13 (13) +#define CC4350_PIN_GPIO_14 (14) +#define CC4350_PIN_GPIO_15 (15) + +#define CC4350_RSVD_16_SHIFT 16 + +#define CC2_4350_PHY_PWRSW_UPTIME_MASK (0xf << 0) +#define CC2_4350_PHY_PWRSW_UPTIME_SHIFT (0) +#define CC2_4350_VDDM_PWRSW_UPDELAY_MASK (0xf << 4) +#define CC2_4350_VDDM_PWRSW_UPDELAY_SHIFT (4) +#define CC2_4350_VDDM_PWRSW_UPTIME_MASK (0xf << 8) +#define CC2_4350_VDDM_PWRSW_UPTIME_SHIFT (8) +#define CC2_4350_SBC_PWRSW_DNDELAY_MASK (0x3 << 12) +#define CC2_4350_SBC_PWRSW_DNDELAY_SHIFT (12) +#define CC2_4350_PHY_PWRSW_DNDELAY_MASK (0x3 << 14) +#define CC2_4350_PHY_PWRSW_DNDELAY_SHIFT (14) +#define CC2_4350_VDDM_PWRSW_DNDELAY_MASK (0x3 << 16) +#define CC2_4350_VDDM_PWRSW_DNDELAY_SHIFT (16) +#define CC2_4350_VDDM_PWRSW_EN_MASK (1 << 20) +#define CC2_4350_VDDM_PWRSW_EN_SHIFT (20) +#define CC2_4350_MEMLPLDO_PWRSW_EN_MASK (1 << 21) +#define CC2_4350_MEMLPLDO_PWRSW_EN_SHIFT (21) +#define CC2_4350_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4350_SDIO_AOS_WAKEUP_SHIFT (24) + +/* Applies to 4335/4350/4345 */ +#define CC3_SR_CLK_SR_MEM_MASK (1 << 0) +#define CC3_SR_CLK_SR_MEM_SHIFT (0) +#define CC3_SR_BIT1_TBD_MASK (1 << 1) +#define CC3_SR_BIT1_TBD_SHIFT (1) +#define CC3_SR_ENGINE_ENABLE_MASK (1 << 2) +#define CC3_SR_ENGINE_ENABLE_SHIFT (2) +#define CC3_SR_BIT3_TBD_MASK (1 << 3) +#define CC3_SR_BIT3_TBD_SHIFT (3) +#define CC3_SR_MINDIV_FAST_CLK_MASK (0xF << 4) +#define CC3_SR_MINDIV_FAST_CLK_SHIFT (4) +#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_MASK (1 << 8) +#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_SHIFT (8) +#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_MASK (1 << 9) +#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_SHIFT (9) +#define CC3_SR_R23_SR_RISE_EDGE_TRIG_MASK (1 << 10) +#define CC3_SR_R23_SR_RISE_EDGE_TRIG_SHIFT (10) +#define CC3_SR_R23_SR_FALL_EDGE_TRIG_MASK (1 << 11) +#define CC3_SR_R23_SR_FALL_EDGE_TRIG_SHIFT (11) +#define CC3_SR_NUM_CLK_HIGH_MASK (0x7 << 12) +#define CC3_SR_NUM_CLK_HIGH_SHIFT (12) +#define CC3_SR_BIT15_TBD_MASK (1 << 15) +#define CC3_SR_BIT15_TBD_SHIFT (15) +#define CC3_SR_PHY_FUNC_PIC_MASK (1 << 16) +#define CC3_SR_PHY_FUNC_PIC_SHIFT (16) +#define CC3_SR_BIT17_19_TBD_MASK (0x7 << 17) +#define CC3_SR_BIT17_19_TBD_SHIFT (17) +#define CC3_SR_CHIP_TRIGGER_1_MASK (1 << 20) +#define CC3_SR_CHIP_TRIGGER_1_SHIFT (20) +#define CC3_SR_CHIP_TRIGGER_2_MASK (1 << 21) +#define CC3_SR_CHIP_TRIGGER_2_SHIFT (21) +#define CC3_SR_CHIP_TRIGGER_3_MASK (1 << 22) +#define CC3_SR_CHIP_TRIGGER_3_SHIFT (22) +#define CC3_SR_CHIP_TRIGGER_4_MASK (1 << 23) +#define CC3_SR_CHIP_TRIGGER_4_SHIFT (23) +#define CC3_SR_ALLOW_SBC_FUNC_PIC_MASK (1 << 24) +#define CC3_SR_ALLOW_SBC_FUNC_PIC_SHIFT (24) +#define CC3_SR_BIT25_26_TBD_MASK (0x3 << 25) +#define CC3_SR_BIT25_26_TBD_SHIFT (25) +#define CC3_SR_ALLOW_SBC_STBY_MASK (1 << 27) +#define CC3_SR_ALLOW_SBC_STBY_SHIFT (27) +#define CC3_SR_GPIO_MUX_MASK (0xF << 28) +#define CC3_SR_GPIO_MUX_SHIFT (28) + +/* Applies to 4335/4350/4345 */ +#define CC4_SR_INIT_ADDR_MASK (0x3FF0000) +#define CC4_4350_SR_ASM_ADDR (0x30) +#define CC4_4350_C0_SR_ASM_ADDR (0x0) +#define CC4_4335_SR_ASM_ADDR (0x48) +#define CC4_4345_SR_ASM_ADDR (0x48) +#define CC4_SR_INIT_ADDR_SHIFT (16) + +#define CC4_4350_EN_SR_CLK_ALP_MASK (1 << 30) +#define CC4_4350_EN_SR_CLK_ALP_SHIFT (30) +#define CC4_4350_EN_SR_CLK_HT_MASK (1 << 31) +#define CC4_4350_EN_SR_CLK_HT_SHIFT (31) + +#define VREG4_4350_MEMLPDO_PU_MASK (1 << 31) +#define VREG4_4350_MEMLPDO_PU_SHIFT 31 + +#define VREG6_4350_SR_EXT_CLKDIR_MASK (1 << 20) +#define VREG6_4350_SR_EXT_CLKDIR_SHIFT 20 +#define VREG6_4350_SR_EXT_CLKDIV_MASK (0x3 << 21) +#define VREG6_4350_SR_EXT_CLKDIV_SHIFT 21 +#define VREG6_4350_SR_EXT_CLKEN_MASK (1 << 23) +#define VREG6_4350_SR_EXT_CLKEN_SHIFT 23 + +#define CC5_4350_PMU_EN_ASSERT_MASK (1 << 13) +#define CC5_4350_PMU_EN_ASSERT_SHIFT (13) + +#define CC6_4350_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4350_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4350_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4350_PMU_WAKEUP_ALPAVAIL_SHIFT (6) +#define CC6_4350_PMU_EN_EXT_PERST_MASK (1 << 17) +#define CC6_4350_PMU_EN_EXT_PERST_SHIFT (17) +#define CC6_4350_PMU_EN_WAKEUP_MASK (1 << 18) +#define CC6_4350_PMU_EN_WAKEUP_SHIFT (18) + +#define CC7_4350_PMU_EN_ASSERT_L2_MASK (1 << 26) +#define CC7_4350_PMU_EN_ASSERT_L2_SHIFT (26) +#define CC7_4350_PMU_EN_MDIO_MASK (1 << 27) +#define CC7_4350_PMU_EN_MDIO_SHIFT (27) + +#define CC6_4345_PMU_EN_PERST_DEASSERT_MASK (1 << 13) +#define CC6_4345_PMU_EN_PERST_DEASSERT_SHIF (13) +#define CC6_4345_PMU_EN_L2_DEASSERT_MASK (1 << 14) +#define CC6_4345_PMU_EN_L2_DEASSERT_SHIF (14) +#define CC6_4345_PMU_EN_ASSERT_L2_MASK (1 << 15) +#define CC6_4345_PMU_EN_ASSERT_L2_SHIFT (15) +#define CC6_4345_PMU_EN_MDIO_MASK (1 << 24) +#define CC6_4345_PMU_EN_MDIO_SHIFT (24) + +/* GCI chipcontrol register indices */ +#define CC_GCI_CHIPCTRL_00 (0) +#define CC_GCI_CHIPCTRL_01 (1) +#define CC_GCI_CHIPCTRL_02 (2) +#define CC_GCI_CHIPCTRL_03 (3) +#define CC_GCI_CHIPCTRL_04 (4) +#define CC_GCI_CHIPCTRL_05 (5) +#define CC_GCI_CHIPCTRL_06 (6) +#define CC_GCI_CHIPCTRL_07 (7) +#define CC_GCI_CHIPCTRL_08 (8) +#define CC_GCI_CHIPCTRL_11 (11) +#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12) + +#define CC_GCI_06_JTAG_SEL_SHIFT 4 +#define CC_GCI_06_JTAG_SEL_MASK (1 << 4) + +#define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00) >> 8) + +/* 4345 PMU resources */ +#define RES4345_LPLDO_PU 0 +#define RES4345_PMU_BG_PU 1 +#define RES4345_PMU_SLEEP 2 +#define RES4345_HSICLDO_PU 3 +#define RES4345_CBUCK_LPOM_PU 4 +#define RES4345_CBUCK_PFM_PU 5 +#define RES4345_COLD_START_WAIT 6 +#define RES4345_RSVD_7 7 +#define RES4345_LNLDO_PU 8 +#define RES4345_XTALLDO_PU 9 +#define RES4345_LDO3P3_PU 10 +#define RES4345_OTP_PU 11 +#define RES4345_XTAL_PU 12 +#define RES4345_SR_CLK_START 13 +#define RES4345_LQ_AVAIL 14 +#define RES4345_LQ_START 15 +#define RES4345_PERST_OVR 16 +#define RES4345_WL_CORE_RDY 17 +#define RES4345_ILP_REQ 18 +#define RES4345_ALP_AVAIL 19 +#define RES4345_MINI_PMU 20 +#define RES4345_RADIO_PU 21 +#define RES4345_SR_CLK_STABLE 22 +#define RES4345_SR_SAVE_RESTORE 23 +#define RES4345_SR_PHY_PWRSW 24 +#define RES4345_SR_VDDM_PWRSW 25 +#define RES4345_SR_SUBCORE_PWRSW 26 +#define RES4345_SR_SLEEP 27 +#define RES4345_HT_START 28 +#define RES4345_HT_AVAIL 29 +#define RES4345_MACPHY_CLK_AVAIL 30 + +/* 4335 pins +* note: only the values set as default/used are added here. +*/ +#define CC4335_PIN_GPIO_00 (0) +#define CC4335_PIN_GPIO_01 (1) +#define CC4335_PIN_GPIO_02 (2) +#define CC4335_PIN_GPIO_03 (3) +#define CC4335_PIN_GPIO_04 (4) +#define CC4335_PIN_GPIO_05 (5) +#define CC4335_PIN_GPIO_06 (6) +#define CC4335_PIN_GPIO_07 (7) +#define CC4335_PIN_GPIO_08 (8) +#define CC4335_PIN_GPIO_09 (9) +#define CC4335_PIN_GPIO_10 (10) +#define CC4335_PIN_GPIO_11 (11) +#define CC4335_PIN_GPIO_12 (12) +#define CC4335_PIN_GPIO_13 (13) +#define CC4335_PIN_GPIO_14 (14) +#define CC4335_PIN_GPIO_15 (15) +#define CC4335_PIN_SDIO_CLK (16) +#define CC4335_PIN_SDIO_CMD (17) +#define CC4335_PIN_SDIO_DATA0 (18) +#define CC4335_PIN_SDIO_DATA1 (19) +#define CC4335_PIN_SDIO_DATA2 (20) +#define CC4335_PIN_SDIO_DATA3 (21) +#define CC4335_PIN_RF_SW_CTRL_6 (22) +#define CC4335_PIN_RF_SW_CTRL_7 (23) +#define CC4335_PIN_RF_SW_CTRL_8 (24) +#define CC4335_PIN_RF_SW_CTRL_9 (25) +/* Last GPIO Pad */ +#define CC4335_PIN_GPIO_LAST (31) + +/* 4335 GCI function sel values +*/ +#define CC4335_FNSEL_HWDEF (0) +#define CC4335_FNSEL_SAMEASPIN (1) +#define CC4335_FNSEL_GPIO0 (2) +#define CC4335_FNSEL_GPIO1 (3) +#define CC4335_FNSEL_GCI0 (4) +#define CC4335_FNSEL_GCI1 (5) +#define CC4335_FNSEL_UART (6) +#define CC4335_FNSEL_SFLASH (7) +#define CC4335_FNSEL_SPROM (8) +#define CC4335_FNSEL_MISC0 (9) +#define CC4335_FNSEL_MISC1 (10) +#define CC4335_FNSEL_MISC2 (11) +#define CC4335_FNSEL_IND (12) +#define CC4335_FNSEL_PDN (13) +#define CC4335_FNSEL_PUP (14) +#define CC4335_FNSEL_TRI (15) + +/* GCI Core Control Reg */ +#define GCI_CORECTRL_SR_MASK (1 << 0) /**< SECI block Reset */ +#define GCI_CORECTRL_RSL_MASK (1 << 1) /**< ResetSECILogic */ +#define GCI_CORECTRL_ES_MASK (1 << 2) /**< EnableSECI */ +#define GCI_CORECTRL_FSL_MASK (1 << 3) /**< Force SECI Out Low */ +#define GCI_CORECTRL_SOM_MASK (7 << 4) /**< SECI Op Mode */ +#define GCI_CORECTRL_US_MASK (1 << 7) /**< Update SECI */ +#define GCI_CORECTRL_BOS_MASK (1 << 8) /**< Break On Sleep */ + +/* 4345 pins +* note: only the values set as default/used are added here. +*/ +#define CC4345_PIN_GPIO_00 (0) +#define CC4345_PIN_GPIO_01 (1) +#define CC4345_PIN_GPIO_02 (2) +#define CC4345_PIN_GPIO_03 (3) +#define CC4345_PIN_GPIO_04 (4) +#define CC4345_PIN_GPIO_05 (5) +#define CC4345_PIN_GPIO_06 (6) +#define CC4345_PIN_GPIO_07 (7) +#define CC4345_PIN_GPIO_08 (8) +#define CC4345_PIN_GPIO_09 (9) +#define CC4345_PIN_GPIO_10 (10) +#define CC4345_PIN_GPIO_11 (11) +#define CC4345_PIN_GPIO_12 (12) +#define CC4345_PIN_GPIO_13 (13) +#define CC4345_PIN_GPIO_14 (14) +#define CC4345_PIN_GPIO_15 (15) +#define CC4345_PIN_GPIO_16 (16) +#define CC4345_PIN_SDIO_CLK (17) +#define CC4345_PIN_SDIO_CMD (18) +#define CC4345_PIN_SDIO_DATA0 (19) +#define CC4345_PIN_SDIO_DATA1 (20) +#define CC4345_PIN_SDIO_DATA2 (21) +#define CC4345_PIN_SDIO_DATA3 (22) +#define CC4345_PIN_RF_SW_CTRL_0 (23) +#define CC4345_PIN_RF_SW_CTRL_1 (24) +#define CC4345_PIN_RF_SW_CTRL_2 (25) +#define CC4345_PIN_RF_SW_CTRL_3 (26) +#define CC4345_PIN_RF_SW_CTRL_4 (27) +#define CC4345_PIN_RF_SW_CTRL_5 (28) +#define CC4345_PIN_RF_SW_CTRL_6 (29) +#define CC4345_PIN_RF_SW_CTRL_7 (30) +#define CC4345_PIN_RF_SW_CTRL_8 (31) +#define CC4345_PIN_RF_SW_CTRL_9 (32) + +/* 4345 GCI function sel values +*/ +#define CC4345_FNSEL_HWDEF (0) +#define CC4345_FNSEL_SAMEASPIN (1) +#define CC4345_FNSEL_GPIO0 (2) +#define CC4345_FNSEL_GPIO1 (3) +#define CC4345_FNSEL_GCI0 (4) +#define CC4345_FNSEL_GCI1 (5) +#define CC4345_FNSEL_UART (6) +#define CC4345_FNSEL_SFLASH (7) +#define CC4345_FNSEL_SPROM (8) +#define CC4345_FNSEL_MISC0 (9) +#define CC4345_FNSEL_MISC1 (10) +#define CC4345_FNSEL_MISC2 (11) +#define CC4345_FNSEL_IND (12) +#define CC4345_FNSEL_PDN (13) +#define CC4345_FNSEL_PUP (14) +#define CC4345_FNSEL_TRI (15) + +#define MUXENAB4345_UART_MASK (0x0000000f) +#define MUXENAB4345_UART_SHIFT 0 +#define MUXENAB4345_HOSTWAKE_MASK (0x000000f0) +#define MUXENAB4345_HOSTWAKE_SHIFT 4 + +/* 4349 Group (4349, 4355, 4359) GCI AVS function sel values */ +#define CC4349_GRP_GCI_AVS_CTRL_MASK (0xffe00000) +#define CC4349_GRP_GCI_AVS_CTRL_SHIFT (21) +#define CC4349_GRP_GCI_AVS_CTRL_ENAB (1 << 5) + +/* 4345 GCI AVS function sel values */ +#define CC4345_GCI_AVS_CTRL_MASK (0xfc) +#define CC4345_GCI_AVS_CTRL_SHIFT (2) +#define CC4345_GCI_AVS_CTRL_ENAB (1 << 5) + +/* GCI GPIO for function sel GCI-0/GCI-1 */ +#define CC_GCI_GPIO_0 (0) +#define CC_GCI_GPIO_1 (1) +#define CC_GCI_GPIO_2 (2) +#define CC_GCI_GPIO_3 (3) +#define CC_GCI_GPIO_4 (4) +#define CC_GCI_GPIO_5 (5) +#define CC_GCI_GPIO_6 (6) +#define CC_GCI_GPIO_7 (7) +#define CC_GCI_GPIO_8 (8) +#define CC_GCI_GPIO_9 (9) +#define CC_GCI_GPIO_10 (10) +#define CC_GCI_GPIO_11 (11) +#define CC_GCI_GPIO_12 (12) +#define CC_GCI_GPIO_13 (13) +#define CC_GCI_GPIO_14 (14) +#define CC_GCI_GPIO_15 (15) + + +/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */ +#define CC_GCI_GPIO_INVALID 0xFF + +/* find the 4 bit mask given the bit position */ +#define GCIMASK(pos) (((uint32)0xF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL(val, pos) ((((uint32)val) << pos) & GCIMASK(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL(val, pos) ((val >> pos) & 0xF) + + +/* find the 8 bit mask given the bit position */ +#define GCIMASK_8B(pos) (((uint32)0xFF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL_8B(val, pos) ((((uint32)val) << pos) & GCIMASK_8B(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL_8B(val, pos) ((val >> pos) & 0xFF) + +/* find the 4 bit mask given the bit position */ +#define GCIMASK_4B(pos) (((uint32)0xF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL_4B(val, pos) ((((uint32)val) << pos) & GCIMASK_4B(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL_4B(val, pos) ((val >> pos) & 0xF) + + +/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */ +#define GCI_INTSTATUS_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTSTATUS_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTSTATUS_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTSTATUS_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTSTATUS_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTSTATUS_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTSTATUS_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTSTATUS_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTSTATUS_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTSTATUS_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTSTATUS_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /**< GCIGpioWake */ + +/* 4335 GCI IntMask Register bits. */ +#define GCI_INTMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTMASK_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ + +/* 4335 GCI WakeMask Register bits. */ +#define GCI_WAKEMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_WAKEMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_WAKEMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_WAKEMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_WAKE_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_WAKEMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_WAKEMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_WAKEMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_WAKEMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_WAKEMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_WAKEMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ + +#define GCI_WAKE_ON_GCI_GPIO1 1 +#define GCI_WAKE_ON_GCI_GPIO2 2 +#define GCI_WAKE_ON_GCI_GPIO3 3 +#define GCI_WAKE_ON_GCI_GPIO4 4 +#define GCI_WAKE_ON_GCI_GPIO5 5 +#define GCI_WAKE_ON_GCI_GPIO6 6 +#define GCI_WAKE_ON_GCI_GPIO7 7 +#define GCI_WAKE_ON_GCI_GPIO8 8 +#define GCI_WAKE_ON_GCI_SECI_IN 9 + +/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic +* for now only UART for bootloader. +*/ +#define MUXENAB4335_UART_MASK (0x0000000f) + +#define MUXENAB4335_UART_SHIFT 0 +#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */ +#define MUXENAB4335_HOSTWAKE_SHIFT 4 +#define MUXENAB4335_GETIX(val, name) \ + ((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1) + +/* +* Maximum delay for the PMU state transition in us. +* This is an upper bound intended for spinwaits etc. +*/ +#define PMU_MAX_TRANSITION_DLY 15000 + +/* PMU resource up transition time in ILP cycles */ +#define PMURES_UP_TRANSITION 2 + + +/* SECI configuration */ +#define SECI_MODE_UART 0x0 +#define SECI_MODE_SECI 0x1 +#define SECI_MODE_LEGACY_3WIRE_BT 0x2 +#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3 +#define SECI_MODE_HALF_SECI 0x4 + +#define SECI_RESET (1 << 0) +#define SECI_RESET_BAR_UART (1 << 1) +#define SECI_ENAB_SECI_ECI (1 << 2) +#define SECI_ENAB_SECIOUT_DIS (1 << 3) +#define SECI_MODE_MASK 0x7 +#define SECI_MODE_SHIFT 4 /* (bits 5, 6, 7) */ +#define SECI_UPD_SECI (1 << 7) + +#define SECI_SLIP_ESC_CHAR 0xDB +#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR +#define SECI_SIGNOFF_1 0 +#define SECI_REFRESH_REQ 0xDA + +/* seci clk_ctl_st bits */ +#define CLKCTL_STS_SECI_CLK_REQ (1 << 8) +#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24) + +#define SECI_UART_MSR_CTS_STATE (1 << 0) +#define SECI_UART_MSR_RTS_STATE (1 << 1) +#define SECI_UART_SECI_IN_STATE (1 << 2) +#define SECI_UART_SECI_IN2_STATE (1 << 3) + +/* GCI RX FIFO Control Register */ +#define GCI_RXF_LVL_MASK (0xFF << 0) +#define GCI_RXF_TIMEOUT_MASK (0xFF << 8) + +/* GCI UART Registers' Bit definitions */ +/* Seci Fifo Level Register */ +#define SECI_TXF_LVL_MASK (0x3F << 8) +#define TXF_AE_LVL_DEFAULT 0x4 +#define SECI_RXF_LVL_FC_MASK (0x3F << 16) + +/* SeciUARTFCR Bit definitions */ +#define SECI_UART_FCR_RFR (1 << 0) +#define SECI_UART_FCR_TFR (1 << 1) +#define SECI_UART_FCR_SR (1 << 2) +#define SECI_UART_FCR_THP (1 << 3) +#define SECI_UART_FCR_AB (1 << 4) +#define SECI_UART_FCR_ATOE (1 << 5) +#define SECI_UART_FCR_ARTSOE (1 << 6) +#define SECI_UART_FCR_ABV (1 << 7) +#define SECI_UART_FCR_ALM (1 << 8) + +/* SECI UART LCR register bits */ +#define SECI_UART_LCR_STOP_BITS (1 << 0) /* 0 - 1bit, 1 - 2bits */ +#define SECI_UART_LCR_PARITY_EN (1 << 1) +#define SECI_UART_LCR_PARITY (1 << 2) /* 0 - odd, 1 - even */ +#define SECI_UART_LCR_RX_EN (1 << 3) +#define SECI_UART_LCR_LBRK_CTRL (1 << 4) /* 1 => SECI_OUT held low */ +#define SECI_UART_LCR_TXO_EN (1 << 5) +#define SECI_UART_LCR_RTSO_EN (1 << 6) +#define SECI_UART_LCR_SLIPMODE_EN (1 << 7) +#define SECI_UART_LCR_RXCRC_CHK (1 << 8) +#define SECI_UART_LCR_TXCRC_INV (1 << 9) +#define SECI_UART_LCR_TXCRC_LSBF (1 << 10) +#define SECI_UART_LCR_TXCRC_EN (1 << 11) +#define SECI_UART_LCR_RXSYNC_EN (1 << 12) + +#define SECI_UART_MCR_TX_EN (1 << 0) +#define SECI_UART_MCR_PRTS (1 << 1) +#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2) +#define SECI_UART_MCR_HIGHRATE_EN (1 << 3) +#define SECI_UART_MCR_LOOPBK_EN (1 << 4) +#define SECI_UART_MCR_AUTO_RTS (1 << 5) +#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6) +#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7) +#define SECI_UART_MCR_XONOFF_RPT (1 << 9) + +/* SeciUARTLSR Bit Mask */ +#define SECI_UART_LSR_RXOVR_MASK (1 << 0) +#define SECI_UART_LSR_RFF_MASK (1 << 1) +#define SECI_UART_LSR_TFNE_MASK (1 << 2) +#define SECI_UART_LSR_TI_MASK (1 << 3) +#define SECI_UART_LSR_TPR_MASK (1 << 4) +#define SECI_UART_LSR_TXHALT_MASK (1 << 5) + +/* SeciUARTMSR Bit Mask */ +#define SECI_UART_MSR_CTSS_MASK (1 << 0) +#define SECI_UART_MSR_RTSS_MASK (1 << 1) +#define SECI_UART_MSR_SIS_MASK (1 << 2) +#define SECI_UART_MSR_SIS2_MASK (1 << 3) + +/* SeciUARTData Bits */ +#define SECI_UART_DATA_RF_NOT_EMPTY_BIT (1 << 12) +#define SECI_UART_DATA_RF_FULL_BIT (1 << 13) +#define SECI_UART_DATA_RF_OVRFLOW_BIT (1 << 14) +#define SECI_UART_DATA_FIFO_PTR_MASK 0xFF +#define SECI_UART_DATA_RF_RD_PTR_SHIFT 16 +#define SECI_UART_DATA_RF_WR_PTR_SHIFT 24 + +/* LTECX: ltecxmux */ +#define LTECX_EXTRACT_MUX(val, idx) (getbit4(&(val), (idx))) + +/* LTECX: ltecxmux MODE */ +#define LTECX_MUX_MODE_IDX 0 +#define LTECX_MUX_MODE_WCI2 0x0 +#define LTECX_MUX_MODE_GPIO 0x1 + + +/* LTECX GPIO Information Index */ +#define LTECX_NVRAM_FSYNC_IDX 0 +#define LTECX_NVRAM_LTERX_IDX 1 +#define LTECX_NVRAM_LTETX_IDX 2 +#define LTECX_NVRAM_WLPRIO_IDX 3 + +/* LTECX WCI2 Information Index */ +#define LTECX_NVRAM_WCI2IN_IDX 0 +#define LTECX_NVRAM_WCI2OUT_IDX 1 + +/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */ +#define LTECX_EXTRACT_PADNUM(val, idx) (getbit8(&(val), (idx))) +#define LTECX_EXTRACT_FNSEL(val, idx) (getbit4(&(val), (idx))) +#define LTECX_EXTRACT_GCIGPIO(val, idx) (getbit4(&(val), (idx))) + +/* WLAN channel numbers - used from wifi.h */ + +/* WLAN BW */ +#define ECI_BW_20 0x0 +#define ECI_BW_25 0x1 +#define ECI_BW_30 0x2 +#define ECI_BW_35 0x3 +#define ECI_BW_40 0x4 +#define ECI_BW_45 0x5 +#define ECI_BW_50 0x6 +#define ECI_BW_ALL 0x7 + +/* WLAN - number of antenna */ +#define WLAN_NUM_ANT1 TXANT_0 +#define WLAN_NUM_ANT2 TXANT_1 + +/* otpctrl1 0xF4 */ +#define OTPC_FORCE_PWR_OFF 0x02000000 +/* chipcommon s/r registers introduced with cc rev >= 48 */ +#define CC_SR_CTL0_ENABLE_MASK 0x1 +#define CC_SR_CTL0_ENABLE_SHIFT 0 +#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */ +#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to sr_engine */ +#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk in sr_engine */ +#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16 /* Allow Subcore mem StandBy? */ +#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18 +#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19 +#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power domains */ +#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25 +#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30 + +#define CC_SR_CTL1_SR_INIT_MASK 0x3FF +#define CC_SR_CTL1_SR_INIT_SHIFT 0 + +#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */ +#define ECI_INLO_PKTDUR_SHIFT 4 + +/* gci chip control bits */ +#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT 0 +#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT 1 +#define GCI_GPIO_CHIPCTRL_INVERT_BIT 2 +#define GCI_GPIO_CHIPCTRL_PULLUP_BIT 3 +#define GCI_GPIO_CHIPCTRL_PULLDN_BIT 4 +#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT 5 +#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT 6 +#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT 7 + +/* gci GPIO input status bits */ +#define GCI_GPIO_STS_VALUE_BIT 0 +#define GCI_GPIO_STS_POS_EDGE_BIT 1 +#define GCI_GPIO_STS_NEG_EDGE_BIT 2 +#define GCI_GPIO_STS_FAST_EDGE_BIT 3 +#define GCI_GPIO_STS_CLEAR 0xF + +#define GCI_GPIO_STS_VALUE (1 << GCI_GPIO_STS_VALUE_BIT) + +#endif /* _SBCHIPC_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h new file mode 100644 index 000000000000..53e26ae4e320 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h @@ -0,0 +1,285 @@ +/* + * Broadcom SiliconBackplane hardware register definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbconfig.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _SBCONFIG_H +#define _SBCONFIG_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +/* enumeration in SB is based on the premise that cores are contiguos in the + * enumeration space. + */ +#define SB_BUS_SIZE 0x10000 /**< Each bus gets 64Kbytes for cores */ +#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE) +#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /**< Max cores per bus */ + +/* + * Sonics Configuration Space Registers. + */ +#define SBCONFIGOFF 0xf00 /**< core sbconfig regs are top 256bytes of regs */ +#define SBCONFIGSIZE 256 /**< sizeof (sbconfig_t) */ + +#define SBIPSFLAG 0x08 +#define SBTPSFLAG 0x18 +#define SBTMERRLOGA 0x48 /**< sonics >= 2.3 */ +#define SBTMERRLOG 0x50 /**< sonics >= 2.3 */ +#define SBADMATCH3 0x60 +#define SBADMATCH2 0x68 +#define SBADMATCH1 0x70 +#define SBIMSTATE 0x90 +#define SBINTVEC 0x94 +#define SBTMSTATELOW 0x98 +#define SBTMSTATEHIGH 0x9c +#define SBBWA0 0xa0 +#define SBIMCONFIGLOW 0xa8 +#define SBIMCONFIGHIGH 0xac +#define SBADMATCH0 0xb0 +#define SBTMCONFIGLOW 0xb8 +#define SBTMCONFIGHIGH 0xbc +#define SBBCONFIG 0xc0 +#define SBBSTATE 0xc8 +#define SBACTCNFG 0xd8 +#define SBFLAGST 0xe8 +#define SBIDLOW 0xf8 +#define SBIDHIGH 0xfc + +/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have + * a few registers *below* that line. I think it would be very confusing to try + * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here, + */ + +#define SBIMERRLOGA 0xea8 +#define SBIMERRLOG 0xeb0 +#define SBTMPORTCONNID0 0xed8 +#define SBTMPORTLOCK0 0xef8 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _sbconfig { + uint32 PAD[2]; + uint32 sbipsflag; /**< initiator port ocp slave flag */ + uint32 PAD[3]; + uint32 sbtpsflag; /**< target port ocp slave flag */ + uint32 PAD[11]; + uint32 sbtmerrloga; /**< (sonics >= 2.3) */ + uint32 PAD; + uint32 sbtmerrlog; /**< (sonics >= 2.3) */ + uint32 PAD[3]; + uint32 sbadmatch3; /**< address match3 */ + uint32 PAD; + uint32 sbadmatch2; /**< address match2 */ + uint32 PAD; + uint32 sbadmatch1; /**< address match1 */ + uint32 PAD[7]; + uint32 sbimstate; /**< initiator agent state */ + uint32 sbintvec; /**< interrupt mask */ + uint32 sbtmstatelow; /**< target state */ + uint32 sbtmstatehigh; /**< target state */ + uint32 sbbwa0; /**< bandwidth allocation table0 */ + uint32 PAD; + uint32 sbimconfiglow; /**< initiator configuration */ + uint32 sbimconfighigh; /**< initiator configuration */ + uint32 sbadmatch0; /**< address match0 */ + uint32 PAD; + uint32 sbtmconfiglow; /**< target configuration */ + uint32 sbtmconfighigh; /**< target configuration */ + uint32 sbbconfig; /**< broadcast configuration */ + uint32 PAD; + uint32 sbbstate; /**< broadcast state */ + uint32 PAD[3]; + uint32 sbactcnfg; /**< activate configuration */ + uint32 PAD[3]; + uint32 sbflagst; /**< current sbflags */ + uint32 PAD[3]; + uint32 sbidlow; /**< identification */ + uint32 sbidhigh; /**< identification */ +} sbconfig_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* sbipsflag */ +#define SBIPS_INT1_MASK 0x3f /**< which sbflags get routed to mips interrupt 1 */ +#define SBIPS_INT1_SHIFT 0 +#define SBIPS_INT2_MASK 0x3f00 /**< which sbflags get routed to mips interrupt 2 */ +#define SBIPS_INT2_SHIFT 8 +#define SBIPS_INT3_MASK 0x3f0000 /**< which sbflags get routed to mips interrupt 3 */ +#define SBIPS_INT3_SHIFT 16 +#define SBIPS_INT4_MASK 0x3f000000 /**< which sbflags get routed to mips interrupt 4 */ +#define SBIPS_INT4_SHIFT 24 + +/* sbtpsflag */ +#define SBTPS_NUM0_MASK 0x3f /**< interrupt sbFlag # generated by this core */ +#define SBTPS_F0EN0 0x40 /**< interrupt is always sent on the backplane */ + +/* sbtmerrlog */ +#define SBTMEL_CM 0x00000007 /**< command */ +#define SBTMEL_CI 0x0000ff00 /**< connection id */ +#define SBTMEL_EC 0x0f000000 /**< error code */ +#define SBTMEL_ME 0x80000000 /**< multiple error */ + +/* sbimstate */ +#define SBIM_PC 0xf /**< pipecount */ +#define SBIM_AP_MASK 0x30 /**< arbitration policy */ +#define SBIM_AP_BOTH 0x00 /**< use both timeslaces and token */ +#define SBIM_AP_TS 0x10 /**< use timesliaces only */ +#define SBIM_AP_TK 0x20 /**< use token only */ +#define SBIM_AP_RSV 0x30 /**< reserved */ +#define SBIM_IBE 0x20000 /**< inbanderror */ +#define SBIM_TO 0x40000 /**< timeout */ +#define SBIM_BY 0x01800000 /**< busy (sonics >= 2.3) */ +#define SBIM_RJ 0x02000000 /**< reject (sonics >= 2.3) */ + +/* sbtmstatelow */ +#define SBTML_RESET 0x0001 /**< reset */ +#define SBTML_REJ_MASK 0x0006 /**< reject field */ +#define SBTML_REJ 0x0002 /**< reject */ +#define SBTML_TMPREJ 0x0004 /**< temporary reject, for error recovery */ + +#define SBTML_SICF_SHIFT 16 /**< Shift to locate the SI control flags in sbtml */ + +/* sbtmstatehigh */ +#define SBTMH_SERR 0x0001 /**< serror */ +#define SBTMH_INT 0x0002 /**< interrupt */ +#define SBTMH_BUSY 0x0004 /**< busy */ +#define SBTMH_TO 0x0020 /**< timeout (sonics >= 2.3) */ + +#define SBTMH_SISF_SHIFT 16 /**< Shift to locate the SI status flags in sbtmh */ + +/* sbbwa0 */ +#define SBBWA_TAB0_MASK 0xffff /**< lookup table 0 */ +#define SBBWA_TAB1_MASK 0xffff /**< lookup table 1 */ +#define SBBWA_TAB1_SHIFT 16 + +/* sbimconfiglow */ +#define SBIMCL_STO_MASK 0x7 /**< service timeout */ +#define SBIMCL_RTO_MASK 0x70 /**< request timeout */ +#define SBIMCL_RTO_SHIFT 4 +#define SBIMCL_CID_MASK 0xff0000 /**< connection id */ +#define SBIMCL_CID_SHIFT 16 + +/* sbimconfighigh */ +#define SBIMCH_IEM_MASK 0xc /**< inband error mode */ +#define SBIMCH_TEM_MASK 0x30 /**< timeout error mode */ +#define SBIMCH_TEM_SHIFT 4 +#define SBIMCH_BEM_MASK 0xc0 /**< bus error mode */ +#define SBIMCH_BEM_SHIFT 6 + +/* sbadmatch0 */ +#define SBAM_TYPE_MASK 0x3 /**< address type */ +#define SBAM_AD64 0x4 /**< reserved */ +#define SBAM_ADINT0_MASK 0xf8 /**< type0 size */ +#define SBAM_ADINT0_SHIFT 3 +#define SBAM_ADINT1_MASK 0x1f8 /**< type1 size */ +#define SBAM_ADINT1_SHIFT 3 +#define SBAM_ADINT2_MASK 0x1f8 /**< type2 size */ +#define SBAM_ADINT2_SHIFT 3 +#define SBAM_ADEN 0x400 /**< enable */ +#define SBAM_ADNEG 0x800 /**< negative decode */ +#define SBAM_BASE0_MASK 0xffffff00 /**< type0 base address */ +#define SBAM_BASE0_SHIFT 8 +#define SBAM_BASE1_MASK 0xfffff000 /**< type1 base address for the core */ +#define SBAM_BASE1_SHIFT 12 +#define SBAM_BASE2_MASK 0xffff0000 /**< type2 base address for the core */ +#define SBAM_BASE2_SHIFT 16 + +/* sbtmconfiglow */ +#define SBTMCL_CD_MASK 0xff /**< clock divide */ +#define SBTMCL_CO_MASK 0xf800 /**< clock offset */ +#define SBTMCL_CO_SHIFT 11 +#define SBTMCL_IF_MASK 0xfc0000 /**< interrupt flags */ +#define SBTMCL_IF_SHIFT 18 +#define SBTMCL_IM_MASK 0x3000000 /**< interrupt mode */ +#define SBTMCL_IM_SHIFT 24 + +/* sbtmconfighigh */ +#define SBTMCH_BM_MASK 0x3 /**< busy mode */ +#define SBTMCH_RM_MASK 0x3 /**< retry mode */ +#define SBTMCH_RM_SHIFT 2 +#define SBTMCH_SM_MASK 0x30 /**< stop mode */ +#define SBTMCH_SM_SHIFT 4 +#define SBTMCH_EM_MASK 0x300 /**< sb error mode */ +#define SBTMCH_EM_SHIFT 8 +#define SBTMCH_IM_MASK 0xc00 /**< int mode */ +#define SBTMCH_IM_SHIFT 10 + +/* sbbconfig */ +#define SBBC_LAT_MASK 0x3 /**< sb latency */ +#define SBBC_MAX0_MASK 0xf0000 /**< maxccntr0 */ +#define SBBC_MAX0_SHIFT 16 +#define SBBC_MAX1_MASK 0xf00000 /**< maxccntr1 */ +#define SBBC_MAX1_SHIFT 20 + +/* sbbstate */ +#define SBBS_SRD 0x1 /**< st reg disable */ +#define SBBS_HRD 0x2 /**< hold reg disable */ + +/* sbidlow */ +#define SBIDL_CS_MASK 0x3 /**< config space */ +#define SBIDL_AR_MASK 0x38 /**< # address ranges supported */ +#define SBIDL_AR_SHIFT 3 +#define SBIDL_SYNCH 0x40 /**< sync */ +#define SBIDL_INIT 0x80 /**< initiator */ +#define SBIDL_MINLAT_MASK 0xf00 /**< minimum backplane latency */ +#define SBIDL_MINLAT_SHIFT 8 +#define SBIDL_MAXLAT 0xf000 /**< maximum backplane latency */ +#define SBIDL_MAXLAT_SHIFT 12 +#define SBIDL_FIRST 0x10000 /**< this initiator is first */ +#define SBIDL_CW_MASK 0xc0000 /**< cycle counter width */ +#define SBIDL_CW_SHIFT 18 +#define SBIDL_TP_MASK 0xf00000 /**< target ports */ +#define SBIDL_TP_SHIFT 20 +#define SBIDL_IP_MASK 0xf000000 /**< initiator ports */ +#define SBIDL_IP_SHIFT 24 +#define SBIDL_RV_MASK 0xf0000000 /**< sonics backplane revision code */ +#define SBIDL_RV_SHIFT 28 +#define SBIDL_RV_2_2 0x00000000 /**< version 2.2 or earlier */ +#define SBIDL_RV_2_3 0x10000000 /**< version 2.3 */ + +/* sbidhigh */ +#define SBIDH_RC_MASK 0x000f /**< revision code */ +#define SBIDH_RCE_MASK 0x7000 /**< revision code extension field */ +#define SBIDH_RCE_SHIFT 8 +#define SBCOREREV(sbidh) \ + ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) +#define SBIDH_CC_MASK 0x8ff0 /**< core code */ +#define SBIDH_CC_SHIFT 4 +#define SBIDH_VC_MASK 0xffff0000 /**< vendor code */ +#define SBIDH_VC_SHIFT 16 + +#define SB_COMMIT 0xfd8 /**< update buffered registers value */ + +/* vendor codes */ +#define SB_VEND_BCM 0x4243 /**< Broadcom's SB vendor code */ + +#endif /* _SBCONFIG_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h new file mode 100644 index 000000000000..5692ea954b35 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h @@ -0,0 +1,420 @@ +/* + * Generic Broadcom Home Networking Division (HND) DMA engine HW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbhnddma.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _sbhnddma_h_ +#define _sbhnddma_h_ + +/* DMA structure: + * support two DMA engines: 32 bits address or 64 bit addressing + * basic DMA register set is per channel(transmit or receive) + * a pair of channels is defined for convenience + */ + + +/* 32 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 addr; /**< descriptor ring base address (4K aligned) */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 status; /**< current active descriptor, et al */ +} dma32regs_t; + +typedef volatile struct { + dma32regs_t xmt; /**< dma tx channel */ + dma32regs_t rcv; /**< dma rx channel */ +} dma32regp_t; + +typedef volatile struct { /* diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma32diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl; /**< misc control bits & bufcount */ + uint32 addr; /**< data buffer address */ +} dma32dd_t; + +/** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */ +#define D32RINGALIGN_BITS 12 +#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) +#define D32RINGALIGN (1 << D32RINGALIGN_BITS) + +#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) + +/* transmit channel control */ +#define XC_XE ((uint32)1 << 0) /**< transmit enable */ +#define XC_SE ((uint32)1 << 1) /**< transmit suspend request */ +#define XC_LE ((uint32)1 << 2) /**< loopback enable */ +#define XC_FL ((uint32)1 << 4) /**< flush request */ +#define XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define XC_MR_SHIFT 6 +#define XC_PD ((uint32)1 << 11) /**< parity check disable */ +#define XC_AE ((uint32)3 << 16) /**< address extension bits */ +#define XC_AE_SHIFT 16 +#define XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define XC_BL_SHIFT 18 +#define XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define XC_PC_SHIFT 21 +#define XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define XC_PT_SHIFT 24 + +/** Multiple outstanding reads */ +#define DMA_MR_1 0 +#define DMA_MR_2 1 +#define DMA_MR_4 2 +#define DMA_MR_8 3 +#define DMA_MR_12 4 +#define DMA_MR_16 5 +#define DMA_MR_20 6 +#define DMA_MR_32 7 + +/** DMA Burst Length in bytes */ +#define DMA_BL_16 0 +#define DMA_BL_32 1 +#define DMA_BL_64 2 +#define DMA_BL_128 3 +#define DMA_BL_256 4 +#define DMA_BL_512 5 +#define DMA_BL_1024 6 + +/** Prefetch control */ +#define DMA_PC_0 0 +#define DMA_PC_4 1 +#define DMA_PC_8 2 +#define DMA_PC_16 3 +/* others: reserved */ + +/** Prefetch threshold */ +#define DMA_PT_1 0 +#define DMA_PT_2 1 +#define DMA_PT_4 2 +#define DMA_PT_8 3 + +/* transmit descriptor table pointer */ +#define XP_LD_MASK 0xfff /**< last valid descriptor */ + +/* transmit channel status */ +#define XS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define XS_XS_MASK 0xf000 /**< transmit state */ +#define XS_XS_SHIFT 12 +#define XS_XS_DISABLED 0x0000 /**< disabled */ +#define XS_XS_ACTIVE 0x1000 /**< active */ +#define XS_XS_IDLE 0x2000 /**< idle wait */ +#define XS_XS_STOPPED 0x3000 /**< stopped */ +#define XS_XS_SUSP 0x4000 /**< suspend pending */ +#define XS_XE_MASK 0xf0000 /**< transmit errors */ +#define XS_XE_SHIFT 16 +#define XS_XE_NOERR 0x00000 /**< no error */ +#define XS_XE_DPE 0x10000 /**< descriptor protocol error */ +#define XS_XE_DFU 0x20000 /**< data fifo underrun */ +#define XS_XE_BEBR 0x30000 /**< bus error on buffer read */ +#define XS_XE_BEDA 0x40000 /**< bus error on descriptor access */ +#define XS_AD_MASK 0xfff00000 /**< active descriptor */ +#define XS_AD_SHIFT 20 + +/* receive channel control */ +#define RC_RE ((uint32)1 << 0) /**< receive enable */ +#define RC_RO_MASK 0xfe /**< receive frame offset */ +#define RC_RO_SHIFT 1 +#define RC_FM ((uint32)1 << 8) /**< direct fifo receive (pio) mode */ +#define RC_SH ((uint32)1 << 9) /**< separate rx header descriptor enable */ +#define RC_OC ((uint32)1 << 10) /**< overflow continue */ +#define RC_PD ((uint32)1 << 11) /**< parity check disable */ +#define RC_AE ((uint32)3 << 16) /**< address extension bits */ +#define RC_AE_SHIFT 16 +#define RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define RC_BL_SHIFT 18 +#define RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define RC_PC_SHIFT 21 +#define RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define RC_PT_SHIFT 24 + +/* receive descriptor table pointer */ +#define RP_LD_MASK 0xfff /**< last valid descriptor */ + +/* receive channel status */ +#define RS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define RS_RS_MASK 0xf000 /**< receive state */ +#define RS_RS_SHIFT 12 +#define RS_RS_DISABLED 0x0000 /**< disabled */ +#define RS_RS_ACTIVE 0x1000 /**< active */ +#define RS_RS_IDLE 0x2000 /**< idle wait */ +#define RS_RS_STOPPED 0x3000 /**< reserved */ +#define RS_RE_MASK 0xf0000 /**< receive errors */ +#define RS_RE_SHIFT 16 +#define RS_RE_NOERR 0x00000 /**< no error */ +#define RS_RE_DPE 0x10000 /**< descriptor protocol error */ +#define RS_RE_DFO 0x20000 /**< data fifo overflow */ +#define RS_RE_BEBW 0x30000 /**< bus error on buffer write */ +#define RS_RE_BEDA 0x40000 /**< bus error on descriptor access */ +#define RS_AD_MASK 0xfff00000 /**< active descriptor */ +#define RS_AD_SHIFT 20 + +/* fifoaddr */ +#define FA_OFF_MASK 0xffff /**< offset */ +#define FA_SEL_MASK 0xf0000 /**< select */ +#define FA_SEL_SHIFT 16 +#define FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define FA_SEL_RDD 0x40000 /**< receive dma data */ +#define FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags */ +#define CTRL_BC_MASK 0x00001fff /**< buffer byte count, real data len must <= 4KB */ +#define CTRL_AE ((uint32)3 << 16) /**< address extension bits */ +#define CTRL_AE_SHIFT 16 +#define CTRL_PARITY ((uint32)3 << 18) /**< parity bit */ +#define CTRL_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define CTRL_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define CTRL_EOF ((uint32)1 << 30) /**< end of frame */ +#define CTRL_SOF ((uint32)1 << 31) /**< start of frame */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define CTRL_CORE_MASK 0x0ff00000 + +/* 64 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 addrlow; /**< descriptor ring base address low 32-bits (8K aligned) */ + uint32 addrhigh; /**< descriptor ring base address bits 63:32 (8K aligned) */ + uint32 status0; /**< current descriptor, xmt state */ + uint32 status1; /**< active descriptor, xmt error */ +} dma64regs_t; + +typedef volatile struct { + dma64regs_t tx; /**< dma64 tx channel */ + dma64regs_t rx; /**< dma64 rx channel */ +} dma64regp_t; + +typedef volatile struct { /**< diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma64diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl1; /**< misc control bits */ + uint32 ctrl2; /**< buffer count and address extension */ + uint32 addrlow; /**< memory address of the date buffer, bits 31:0 */ + uint32 addrhigh; /**< memory address of the date buffer, bits 63:32 */ +} dma64dd_t; + +/** + * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss. + */ +#define D64RINGALIGN_BITS 13 +#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) +#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS) + +#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) + +/** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */ +#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t)) + +/** + * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross + * 64K boundary + */ +#define D64RINGBOUNDARY_LARGE (1 << 16) + +/* + * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11. + * When this field contains the value N, the burst length is 2**(N + 4) bytes. + */ +#define D64_DEF_USBBURSTLEN 2 +#define D64_DEF_SDIOBURSTLEN 1 + + +#ifndef D64_USBBURSTLEN +#define D64_USBBURSTLEN DMA_BL_64 +#endif +#ifndef D64_SDIOBURSTLEN +#define D64_SDIOBURSTLEN DMA_BL_32 +#endif + +/* transmit channel control */ +#define D64_XC_XE 0x00000001 /**< transmit enable */ +#define D64_XC_SE 0x00000002 /**< transmit suspend request */ +#define D64_XC_LE 0x00000004 /**< loopback enable */ +#define D64_XC_FL 0x00000010 /**< flush request */ +#define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define D64_XC_MR_SHIFT 6 +#define D64_XC_PD 0x00000800 /**< parity check disable */ +#define D64_XC_AE 0x00030000 /**< address extension bits */ +#define D64_XC_AE_SHIFT 16 +#define D64_XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_XC_BL_SHIFT 18 +#define D64_XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_XC_PC_SHIFT 21 +#define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_XC_PT_SHIFT 24 + +/* transmit descriptor table pointer */ +#define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* transmit channel status */ +#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /**< current descriptor pointer */ +#define D64_XS0_XS_MASK 0xf0000000 /**< transmit state */ +#define D64_XS0_XS_SHIFT 28 +#define D64_XS0_XS_DISABLED 0x00000000 /**< disabled */ +#define D64_XS0_XS_ACTIVE 0x10000000 /**< active */ +#define D64_XS0_XS_IDLE 0x20000000 /**< idle wait */ +#define D64_XS0_XS_STOPPED 0x30000000 /**< stopped */ +#define D64_XS0_XS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /**< active descriptor */ +#define D64_XS1_XE_MASK 0xf0000000 /**< transmit errors */ +#define D64_XS1_XE_SHIFT 28 +#define D64_XS1_XE_NOERR 0x00000000 /**< no error */ +#define D64_XS1_XE_DPE 0x10000000 /**< descriptor protocol error */ +#define D64_XS1_XE_DFU 0x20000000 /**< data fifo underrun */ +#define D64_XS1_XE_DTE 0x30000000 /**< data transfer error */ +#define D64_XS1_XE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_XS1_XE_COREE 0x50000000 /**< core error */ + +/* receive channel control */ +#define D64_RC_RE 0x00000001 /**< receive enable */ +#define D64_RC_RO_MASK 0x000000fe /**< receive frame offset */ +#define D64_RC_RO_SHIFT 1 +#define D64_RC_FM 0x00000100 /**< direct fifo receive (pio) mode */ +#define D64_RC_SH 0x00000200 /**< separate rx header descriptor enable */ +#define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */ +#define D64_RC_OC 0x00000400 /**< overflow continue */ +#define D64_RC_PD 0x00000800 /**< parity check disable */ +#define D64_RC_SA 0x00002000 /**< select active */ +#define D64_RC_GE 0x00004000 /**< Glom enable */ +#define D64_RC_AE 0x00030000 /**< address extension bits */ +#define D64_RC_AE_SHIFT 16 +#define D64_RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_RC_BL_SHIFT 18 +#define D64_RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_RC_PC_SHIFT 21 +#define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_RC_PT_SHIFT 24 + +/* flags for dma controller */ +#define DMA_CTRL_PEN (1 << 0) /**< partity enable */ +#define DMA_CTRL_ROC (1 << 1) /**< rx overflow continue */ +#define DMA_CTRL_RXMULTI (1 << 2) /**< allow rx scatter to multiple descriptors */ +#define DMA_CTRL_UNFRAMED (1 << 3) /**< Unframed Rx/Tx data */ +#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4) +#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /**< DMA avoidance WAR for 4331 */ +#define DMA_CTRL_RXSINGLE (1 << 6) /**< always single buffer */ +#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /**< DMA Rx glome is enabled */ + +/* receive descriptor table pointer */ +#define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* receive channel status */ +#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /**< current descriptor pointer */ +#define D64_RS0_RS_MASK 0xf0000000 /**< receive state */ +#define D64_RS0_RS_SHIFT 28 +#define D64_RS0_RS_DISABLED 0x00000000 /**< disabled */ +#define D64_RS0_RS_ACTIVE 0x10000000 /**< active */ +#define D64_RS0_RS_IDLE 0x20000000 /**< idle wait */ +#define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */ +#define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_RS1_AD_MASK 0x0001ffff /**< active descriptor */ +#define D64_RS1_RE_MASK 0xf0000000 /**< receive errors */ +#define D64_RS1_RE_SHIFT 28 +#define D64_RS1_RE_NOERR 0x00000000 /**< no error */ +#define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */ +#define D64_RS1_RE_DFU 0x20000000 /**< data fifo overflow */ +#define D64_RS1_RE_DTE 0x30000000 /**< data transfer error */ +#define D64_RS1_RE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_RS1_RE_COREE 0x50000000 /**< core error */ + +/* fifoaddr */ +#define D64_FA_OFF_MASK 0xffff /**< offset */ +#define D64_FA_SEL_MASK 0xf0000 /**< select */ +#define D64_FA_SEL_SHIFT 16 +#define D64_FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define D64_FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define D64_FA_SEL_RDD 0x40000 /**< receive dma data */ +#define D64_FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define D64_FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define D64_FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define D64_FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define D64_FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define D64_FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define D64_FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags 1 */ +#define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */ +#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */ +#define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define D64_CTRL1_EOF ((uint32)1 << 30) /**< end of frame */ +#define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */ + +/* descriptor control flags 2 */ +#define D64_CTRL2_BC_MASK 0x00007fff /**< buffer byte count. real data len must <= 16KB */ +#define D64_CTRL2_AE 0x00030000 /**< address extension bits */ +#define D64_CTRL2_AE_SHIFT 16 +#define D64_CTRL2_PARITY 0x00040000 /* parity bit */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define D64_CTRL_CORE_MASK 0x0ff00000 + +#define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */ +#define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */ +#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */ +#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */ + +/** receive frame status */ +typedef volatile struct { + uint16 len; + uint16 flags; +} dma_rxh_t; + +#endif /* _sbhnddma_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h new file mode 100644 index 000000000000..d2e42ffffdbe --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h @@ -0,0 +1,116 @@ +/* + * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbpcmcia.h 521344 2014-12-17 10:03:55Z $ + */ + +#ifndef _SBPCMCIA_H +#define _SBPCMCIA_H + +/* All the addresses that are offsets in attribute space are divided + * by two to account for the fact that odd bytes are invalid in + * attribute space and our read/write routines make the space appear + * as if they didn't exist. Still we want to show the original numbers + * as documented in the hnd_pcmcia core manual. + */ + +/* PCMCIA Function Configuration Registers */ +#define PCMCIA_FCR (0x700 / 2) + +#define FCR0_OFF 0 +#define FCR1_OFF (0x40 / 2) +#define FCR2_OFF (0x80 / 2) +#define FCR3_OFF (0xc0 / 2) + +#define PCMCIA_FCR0 (0x700 / 2) +#define PCMCIA_FCR1 (0x740 / 2) +#define PCMCIA_FCR2 (0x780 / 2) +#define PCMCIA_FCR3 (0x7c0 / 2) + +/* Standard PCMCIA FCR registers */ + +#define PCMCIA_COR 0 + +#define COR_RST 0x80 +#define COR_LEV 0x40 +#define COR_IRQEN 0x04 +#define COR_BLREN 0x01 +#define COR_FUNEN 0x01 + + +#define PCICIA_FCSR (2 / 2) +#define PCICIA_PRR (4 / 2) +#define PCICIA_SCR (6 / 2) +#define PCICIA_ESR (8 / 2) + + +#define PCM_MEMOFF 0x0000 +#define F0_MEMOFF 0x1000 +#define F1_MEMOFF 0x2000 +#define F2_MEMOFF 0x3000 +#define F3_MEMOFF 0x4000 + +/* Memory base in the function fcr's */ +#define MEM_ADDR0 (0x728 / 2) +#define MEM_ADDR1 (0x72a / 2) +#define MEM_ADDR2 (0x72c / 2) + +/* PCMCIA base plus Srom access in fcr0: */ +#define PCMCIA_ADDR0 (0x072e / 2) +#define PCMCIA_ADDR1 (0x0730 / 2) +#define PCMCIA_ADDR2 (0x0732 / 2) + +#define MEM_SEG (0x0734 / 2) +#define SROM_CS (0x0736 / 2) +#define SROM_DATAL (0x0738 / 2) +#define SROM_DATAH (0x073a / 2) +#define SROM_ADDRL (0x073c / 2) +#define SROM_ADDRH (0x073e / 2) +#define SROM_INFO2 (0x0772 / 2) /* Corerev >= 2 && <= 5 */ +#define SROM_INFO (0x07be / 2) /* Corerev >= 6 */ + +/* Values for srom_cs: */ +#define SROM_IDLE 0 +#define SROM_WRITE 1 +#define SROM_READ 2 +#define SROM_WEN 4 +#define SROM_WDS 7 +#define SROM_DONE 8 + +/* Fields in srom_info: */ +#define SRI_SZ_MASK 0x03 +#define SRI_BLANK 0x04 +#define SRI_OTP 0x80 + + +/* sbtmstatelow */ +#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */ +#define SBTML_INT_EN 0x20000 /* enable sb interrupt */ + +/* sbtmstatehigh */ +#define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */ + +#endif /* _SBPCMCIA_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h new file mode 100644 index 000000000000..f4760a22c077 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h @@ -0,0 +1,189 @@ +/* + * SDIO device core hardware definitions. + * sdio is a portion of the pcmcia core in core rev 3 - rev 8 + * + * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsdio.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SBSDIO_H +#define _SBSDIO_H + +#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */ + +/* function 1 miscellaneous registers */ +#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */ +#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */ +#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */ +#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */ +#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */ +#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */ +#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */ +#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */ +#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */ +#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */ + +/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */ +#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */ +#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */ +#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */ +#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */ +#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */ +#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */ +#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */ +#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */ +#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */ +#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */ +#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D /* MesBusyCtl at 0x1001D (rev 11) */ + +#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */ +#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */ + +/* Sdio Core Rev 12 */ +#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1 +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1 +#define SBSDIO_FUNC1_SLEEPCSR 0x1001F +#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1 + +/* SBSDIO_SPROM_CS */ +#define SBSDIO_SPROM_IDLE 0 +#define SBSDIO_SPROM_WRITE 1 +#define SBSDIO_SPROM_READ 2 +#define SBSDIO_SPROM_WEN 4 +#define SBSDIO_SPROM_WDS 7 +#define SBSDIO_SPROM_DONE 8 + +/* SBSDIO_SPROM_INFO */ +#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */ +#define SROM_BLANK 0x04 /* depreciated in corerev 6 */ +#define SROM_OTP 0x80 /* OTP present */ + +/* SBSDIO_CHIP_CTRL */ +#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu, + * 1: power on oscillator + * (for 4318 only) + */ +/* SBSDIO_WATERMARK */ +#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device + * to wait before sending data to host + */ + +/* SBSDIO_MESBUSYCTRL */ +/* When RX FIFO has less entries than this & MBE is set + * => busy signal is asserted between data blocks. +*/ +#define SBSDIO_MESBUSYCTRL_MASK 0x7f +#define SBSDIO_MESBUSYCTRL_ENAB 0x80 /* Enable busy capability for MES access */ + +/* SBSDIO_DEVICE_CTL */ +#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when + * receiving CMD53 + */ +#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is + * synchronous to the sdio clock + */ +#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host + * except the chipActive (rev 8) + */ +#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put + * external pads in tri-state; requires + * sdio bus power cycle to clear (rev 9) + */ +#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10 /* Enable function 2 tx for each block */ +#define SBSDIO_DEVCTL_F2WM_ENAB 0x10 /* Enable F2 Watermark */ +#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 0x20 /* Isolate sdio clk and cmd (non-data) */ + +/* SBSDIO_FUNC1_CHIPCLKCSR */ +#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */ +#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */ +#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */ +#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */ +/* In rev8, actual avail bits followed original docs */ +#define SBSDIO_Rev8_HT_AVAIL 0x40 +#define SBSDIO_Rev8_ALP_AVAIL 0x80 +#define SBSDIO_CSR_MASK 0x1F + +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) +#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \ + (alponly ? 1 : SBSDIO_HTAV(regval))) + +/* SBSDIO_FUNC1_SDIOPULLUP */ +#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */ +#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */ +#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */ +#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */ +#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */ + +/* function 1 OCP space */ +#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */ +#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000 +#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */ + +/* some duplication with sbsdpcmdev.h here */ +/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ +#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */ +#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */ +#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */ +#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */ + +/* direct(mapped) cis space */ +#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */ +#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ +#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */ + +#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */ + +#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple, + * link bytes + */ + +/* indirect cis access (in sprom) */ +#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from + * 8th byte + */ + +#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one + * data comamnd + */ + +#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */ + +#endif /* _SBSDIO_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h new file mode 100644 index 000000000000..c0c889e5316f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h @@ -0,0 +1,298 @@ +/* + * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific + * device core support + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsdpcmdev.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _sbsdpcmdev_h_ +#define _sbsdpcmdev_h_ + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + + +typedef volatile struct { + dma64regs_t xmt; /* dma tx */ + uint32 PAD[2]; + dma64regs_t rcv; /* dma rx */ + uint32 PAD[2]; +} dma64p_t; + +/* dma64 sdiod corerev >= 1 */ +typedef volatile struct { + dma64p_t dma64regs[2]; + dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */ + uint32 PAD[92]; +} sdiodma64_t; + +/* dma32 sdiod corerev == 0 */ +typedef volatile struct { + dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */ + uint32 PAD[108]; +} sdiodma32_t; + +/* dma32 regs for pcmcia core */ +typedef volatile struct { + dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */ + uint32 PAD[116]; +} pcmdma32_t; + +/* core registers */ +typedef volatile struct { + uint32 corecontrol; /* CoreControl, 0x000, rev8 */ + uint32 corestatus; /* CoreStatus, 0x004, rev8 */ + uint32 PAD[1]; + uint32 biststatus; /* BistStatus, 0x00c, rev8 */ + + /* PCMCIA access */ + uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */ + uint16 PAD[1]; + uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */ + uint16 PAD[1]; + uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */ + uint16 PAD[1]; + uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */ + uint16 PAD[1]; + + /* interrupt */ + uint32 intstatus; /* IntStatus, 0x020, rev8 */ + uint32 hostintmask; /* IntHostMask, 0x024, rev8 */ + uint32 intmask; /* IntSbMask, 0x028, rev8 */ + uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */ + uint32 sbintmask; /* SBIntMask, 0x030, rev8 */ + uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */ + uint32 PAD[2]; + uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */ + uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */ + uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */ + uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */ + + /* synchronized access to registers in SDIO clock domain */ + uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */ + uint32 PAD[3]; + + /* PCMCIA frame control */ + uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */ + uint8 PAD[3]; + uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */ + uint8 PAD[155]; + + /* interrupt batching control */ + uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */ + uint32 PAD[3]; + + /* counters */ + uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */ + uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */ + uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */ + uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */ + uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */ + uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */ + uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */ + uint32 PAD[40]; + uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */ + uint32 PAD[7]; + + /* DMA engines */ + volatile union { + pcmdma32_t pcm32; + sdiodma32_t sdiod32; + sdiodma64_t sdiod64; + } dma; + + /* SDIO/PCMCIA CIS region */ + char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */ + + /* PCMCIA function control registers */ + char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */ + uint16 PAD[55]; + + /* PCMCIA backplane access */ + uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */ + uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */ + uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */ + uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */ + uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */ + uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */ + uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */ + uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */ + uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */ + uint16 PAD[31]; + + /* sprom "size" & "blank" info */ + uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */ + uint32 PAD[464]; + + /* Sonics SiliconBackplane registers */ + sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */ +} sdpcmd_regs_t; + +/* corecontrol */ +#define CC_CISRDY (1 << 0) /* CIS Ready */ +#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */ +#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */ +#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */ +#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */ +#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */ + +/* corestatus */ +#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */ +#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */ +#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */ + +#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */ +#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */ +#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */ +#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */ + +/* intstatus */ +#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */ +#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */ +#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */ +#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */ +#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */ +#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */ +#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */ +#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */ +#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */ +#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */ +#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */ +#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */ +#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */ +#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */ +#define I_PC (1 << 10) /* descriptor error */ +#define I_PD (1 << 11) /* data error */ +#define I_DE (1 << 12) /* Descriptor protocol Error */ +#define I_RU (1 << 13) /* Receive descriptor Underflow */ +#define I_RO (1 << 14) /* Receive fifo Overflow */ +#define I_XU (1 << 15) /* Transmit fifo Underflow */ +#define I_RI (1 << 16) /* Receive Interrupt */ +#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */ +#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */ +#define I_XI (1 << 24) /* Transmit Interrupt */ +#define I_RF_TERM (1 << 25) /* Read Frame Terminate */ +#define I_WF_TERM (1 << 26) /* Write Frame Terminate */ +#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */ +#define I_SBINT (1 << 28) /* sbintstatus Interrupt */ +#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */ +#define I_SRESET (1 << 30) /* CCCR RES interrupt */ +#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */ +#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */ +#define I_DMA (I_RI | I_XI | I_ERRORS) + +/* sbintstatus */ +#define I_SB_SERR (1 << 8) /* Backplane SError (write) */ +#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */ +#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */ + +/* sdioaccess */ +#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */ +#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */ +#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */ +#define SDA_WRITE 0x01000000 /* Write bit */ +#define SDA_READ 0x00000000 /* Write bit cleared for Read */ +#define SDA_BUSY 0x80000000 /* Busy bit */ + +/* sdioaccess-accessible register address spaces */ +#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */ +#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */ +#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */ +#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */ + +/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */ +#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */ +#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */ +#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */ +#define SDA_DEVICECONTROL 0x009 /* DeviceControl */ +#define SDA_SBADDRLOW 0x00a /* SbAddrLow */ +#define SDA_SBADDRMID 0x00b /* SbAddrMid */ +#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */ +#define SDA_FRAMECTRL 0x00d /* FrameCtrl */ +#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */ +#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */ +#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */ +#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */ +#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */ +#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */ + +/* SDA_F2WATERMARK */ +#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */ + +/* SDA_SBADDRLOW */ +#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */ + +/* SDA_SBADDRMID */ +#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */ + +/* SDA_SBADDRHIGH */ +#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */ + +/* SDA_FRAMECTRL */ +#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */ +#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */ +#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */ + +/* pcmciaframectrl */ +#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */ + +/* intrcvlazy */ +#define IRL_TO_MASK 0x00ffffff /* timeout */ +#define IRL_FC_MASK 0xff000000 /* frame count */ +#define IRL_FC_SHIFT 24 /* frame count */ + +/* rx header */ +typedef volatile struct { + uint16 len; + uint16 flags; +} sdpcmd_rxh_t; + +/* rx header flags */ +#define RXF_CRC 0x0001 /* CRC error detected */ +#define RXF_WOOS 0x0002 /* write frame out of sync */ +#define RXF_WF_TERM 0x0004 /* write frame terminated */ +#define RXF_ABORT 0x0008 /* write frame aborted */ +#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */ + +/* HW frame tag */ +#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */ + +#define SDPCM_HWEXT_LEN 8 + +#endif /* _sbsdpcmdev_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h new file mode 100644 index 000000000000..cfe12e1bb09a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h @@ -0,0 +1,203 @@ +/* + * BCM47XX Sonics SiliconBackplane embedded ram core + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsocram.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SBSOCRAM_H +#define _SBSOCRAM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* Memcsocram core registers */ +typedef volatile struct sbsocramregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; /* rev 6 */ + uint32 errlogaddr; /* rev 6 */ + /* used for patching rev 3 & 5 */ + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; /* corev 8 */ + uint32 bankpda; + uint32 PAD[14]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; /* corerev >= 2 */ + uint32 PAD[133]; + uint32 sr_control; /* corerev >= 15 */ + uint32 sr_status; /* corerev >= 15 */ + uint32 sr_address; /* corerev >= 15 */ + uint32 sr_data; /* corerev >= 15 */ +} sbsocramregs_t; + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* corerev >= 6; port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 +/* corerev >= 3 */ +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SR_BSZ_BASE 14 + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 /* rev >= 3 */ +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 /* rev >= 3 */ +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines corerev >= 8 */ +#define SOCRAM_BANKINFO_SZMASK 0x7f +#define SOCRAM_BANKIDX_ROM_MASK 0x100 + +#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 +/* socram bankinfo memtype */ +#define SOCRAM_MEMTYPE_RAM 0 +#define SOCRAM_MEMTYPE_R0M 1 +#define SOCRAM_MEMTYPE_DEVRAM 2 + +#define SOCRAM_BANKINFO_REG 0x40 +#define SOCRAM_BANKIDX_REG 0x10 +#define SOCRAM_BANKINFO_STDBY_MASK 0x400 +#define SOCRAM_BANKINFO_STDBY_TIMER 0x800 + +/* bankinfo rev >= 10 */ +#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13 +#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000 +#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14 +#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000 +#define SOCRAM_BANKINFO_SLPSUPP_SHIFT 15 +#define SOCRAM_BANKINFO_SLPSUPP_MASK 0x8000 +#define SOCRAM_BANKINFO_RETNTRAM_SHIFT 16 +#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 +#define SOCRAM_BANKINFO_PDASZ_SHIFT 17 +#define SOCRAM_BANKINFO_PDASZ_MASK 0x003E0000 +#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24 +#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 + +/* extracoreinfo register */ +#define SOCRAM_DEVRAMBANK_MASK 0xF000 +#define SOCRAM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SOCRAM_BANKINFO_SZBASE 8192 +#define SOCRAM_BANKSIZE_SHIFT 13 /* SOCRAM_BANKINFO_SZBASE */ + + +#endif /* _SBSOCRAM_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsysmem.h b/drivers/net/wireless/bcmdhd/include/sbsysmem.h new file mode 100644 index 000000000000..99a810c434e8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsysmem.h @@ -0,0 +1,200 @@ +/* + * SiliconBackplane System Memory core + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsysmem.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SBSYSMEM_H +#define _SBSYSMEM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* sysmem core registers */ +typedef volatile struct sysmemregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; + uint32 errlogaddr; + + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; + uint32 PAD[15]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; + uint32 PAD[133]; + uint32 sr_control; + uint32 sr_status; + uint32 sr_address; + uint32 sr_data; +} sysmemregs_t; + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 + +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SR_BSZ_BASE 14 + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines */ +#define SYSMEM_BANKINFO_SZMASK 0x7f +#define SYSMEM_BANKIDX_ROM_MASK 0x100 + +#define SYSMEM_BANKIDX_MEMTYPE_SHIFT 8 +/* sysmem bankinfo memtype */ +#define SYSMEM_MEMTYPE_RAM 0 +#define SYSMEM_MEMTYPE_R0M 1 +#define SYSMEM_MEMTYPE_DEVRAM 2 + +#define SYSMEM_BANKINFO_REG 0x40 +#define SYSMEM_BANKIDX_REG 0x10 +#define SYSMEM_BANKINFO_STDBY_MASK 0x400 +#define SYSMEM_BANKINFO_STDBY_TIMER 0x800 + +#define SYSMEM_BANKINFO_DEVRAMSEL_SHIFT 13 +#define SYSMEM_BANKINFO_DEVRAMSEL_MASK 0x2000 +#define SYSMEM_BANKINFO_DEVRAMPRO_SHIFT 14 +#define SYSMEM_BANKINFO_DEVRAMPRO_MASK 0x4000 +#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 15 +#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x8000 +#define SYSMEM_BANKINFO_RETNTRAM_SHIFT 16 +#define SYSMEM_BANKINFO_RETNTRAM_MASK 0x00010000 +#define SYSMEM_BANKINFO_PDASZ_SHIFT 17 +#define SYSMEM_BANKINFO_PDASZ_MASK 0x003E0000 +#define SYSMEM_BANKINFO_DEVRAMREMAP_SHIFT 24 +#define SYSMEM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 + +/* extracoreinfo register */ +#define SYSMEM_DEVRAMBANK_MASK 0xF000 +#define SYSMEM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SYSMEM_BANKINFO_SZBASE 8192 +#define SYSMEM_BANKSIZE_SHIFT 13 /* SYSMEM_BANKINFO_SZBASE */ + +#endif /* _SBSYSMEM_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h new file mode 100644 index 000000000000..ca53afbcf3e9 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdio.h @@ -0,0 +1,625 @@ +/* + * SDIO spec header file + * Protocol and standard (common) device definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdio.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SDIO_H +#define _SDIO_H + +#ifdef BCMSDIO + +/* CCCR structure for function 0 */ +typedef volatile struct { + uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */ + uint8 sd_rev; /* RO, sd spec revision */ + uint8 io_en; /* I/O enable */ + uint8 io_rdy; /* I/O ready reg */ + uint8 intr_ctl; /* Master and per function interrupt enable control */ + uint8 intr_status; /* RO, interrupt pending status */ + uint8 io_abort; /* read/write abort or reset all functions */ + uint8 bus_inter; /* bus interface control */ + uint8 capability; /* RO, card capability */ + + uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */ + uint8 cis_base_mid; + uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */ + + /* suspend/resume registers */ + uint8 bus_suspend; /* 0xC */ + uint8 func_select; /* 0xD */ + uint8 exec_flag; /* 0xE */ + uint8 ready_flag; /* 0xF */ + + uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */ + + uint8 power_control; /* 0x12 (SDIO version 1.10) */ + + uint8 speed_control; /* 0x13 */ +} sdio_regs_t; + +/* SDIO Device CCCR offsets */ +#define SDIOD_CCCR_REV 0x00 +#define SDIOD_CCCR_SDREV 0x01 +#define SDIOD_CCCR_IOEN 0x02 +#define SDIOD_CCCR_IORDY 0x03 +#define SDIOD_CCCR_INTEN 0x04 +#define SDIOD_CCCR_INTPEND 0x05 +#define SDIOD_CCCR_IOABORT 0x06 +#define SDIOD_CCCR_BICTRL 0x07 +#define SDIOD_CCCR_CAPABLITIES 0x08 +#define SDIOD_CCCR_CISPTR_0 0x09 +#define SDIOD_CCCR_CISPTR_1 0x0A +#define SDIOD_CCCR_CISPTR_2 0x0B +#define SDIOD_CCCR_BUSSUSP 0x0C +#define SDIOD_CCCR_FUNCSEL 0x0D +#define SDIOD_CCCR_EXECFLAGS 0x0E +#define SDIOD_CCCR_RDYFLAGS 0x0F +#define SDIOD_CCCR_BLKSIZE_0 0x10 +#define SDIOD_CCCR_BLKSIZE_1 0x11 +#define SDIOD_CCCR_POWER_CONTROL 0x12 +#define SDIOD_CCCR_SPEED_CONTROL 0x13 +#define SDIOD_CCCR_UHSI_SUPPORT 0x14 +#define SDIOD_CCCR_DRIVER_STRENGTH 0x15 +#define SDIOD_CCCR_INTR_EXTN 0x16 + +/* Broadcom extensions (corerev >= 1) */ +#define SDIOD_CCCR_BRCM_CARDCAP 0xf0 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08 +#define SDIOD_CCCR_BRCM_CARDCTL 0xf1 +#define SDIOD_CCCR_BRCM_SEPINT 0xf2 + +/* cccr_sdio_rev */ +#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */ +#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */ +#define SDIO_SPEC_VERSION_3_0 0x40 /* SDIO spec version 3.0 */ + +/* sd_rev */ +#define SD_REV_PHY_MASK 0x0f /* SD format version number */ + +/* io_en */ +#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */ +#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */ + +/* io_rdys */ +#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */ +#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */ + +/* intr_ctl */ +#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */ +#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */ +#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */ + +/* intr_status */ +#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */ +#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */ + +/* io_abort */ +#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */ +#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */ + +/* bus_inter */ +#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */ +#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */ +#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */ +#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */ +#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */ +#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */ + +/* capability */ +#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */ +#define SDIO_CAP_LSC 0x40 /* low speed card */ +#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_SBS 0x08 /* support suspend/resume */ +#define SDIO_CAP_SRW 0x04 /* support read wait */ +#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */ +#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */ + +/* power_control */ +#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */ +#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */ + +/* speed_control (control device entry into high-speed clocking mode) */ +#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */ +#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */ +#define SDIO_SPEED_UHSI_DDR50 0x08 + +/* for setting bus speed in card: 0x13h */ +#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSISEL_S 1 + +/* for getting bus speed cap in card: 0x14h */ +#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSICAP_S 0 + +/* for getting driver type CAP in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3) +#define SDIO_BUS_DRVR_TYPE_CAP_S 0 + +/* for setting driver type selection in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2) +#define SDIO_BUS_DRVR_TYPE_SEL_S 4 + +/* for getting async int support in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_CAP_S 0 + +/* for setting async int selection in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_SEL_S 1 + +/* brcm sepint */ +#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */ +#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */ +#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */ + +/* FBR structure for function 1-7, FBR addresses and register offsets */ +typedef volatile struct { + uint8 devctr; /* device interface, CSA control */ + uint8 ext_dev; /* extended standard I/O device type code */ + uint8 pwr_sel; /* power selection support */ + uint8 PAD[6]; /* reserved */ + + uint8 cis_low; /* CIS LSB */ + uint8 cis_mid; + uint8 cis_high; /* CIS MSB */ + uint8 csa_low; /* code storage area, LSB */ + uint8 csa_mid; + uint8 csa_high; /* code storage area, MSB */ + uint8 csa_dat_win; /* data access window to function */ + + uint8 fnx_blk_size[2]; /* block size, little endian */ +} sdio_fbr_t; + +/* Maximum number of I/O funcs */ +#define SDIOD_MAX_FUNCS 8 +#define SDIOD_MAX_IOFUNCS 7 + +/* SDIO Device FBR Start Address */ +#define SDIOD_FBR_STARTADDR 0x100 + +/* SDIO Device FBR Size */ +#define SDIOD_FBR_SIZE 0x100 + +/* Macro to calculate FBR register base */ +#define SDIOD_FBR_BASE(n) ((n) * 0x100) + +/* Function register offsets */ +#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */ +#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */ +#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */ + +/* SDIO Function CIS ptr offset */ +#define SDIOD_FBR_CISPTR_0 0x09 +#define SDIOD_FBR_CISPTR_1 0x0A +#define SDIOD_FBR_CISPTR_2 0x0B + +/* Code Storage Area pointer */ +#define SDIOD_FBR_CSA_ADDR_0 0x0C +#define SDIOD_FBR_CSA_ADDR_1 0x0D +#define SDIOD_FBR_CSA_ADDR_2 0x0E +#define SDIOD_FBR_CSA_DATA 0x0F + +/* SDIO Function I/O Block Size */ +#define SDIOD_FBR_BLKSIZE_0 0x10 +#define SDIOD_FBR_BLKSIZE_1 0x11 + +/* devctr */ +#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */ +#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */ +#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */ +/* interface codes */ +#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */ +#define SDIOD_DIC_UART 1 +#define SDIOD_DIC_BLUETOOTH_A 2 +#define SDIOD_DIC_BLUETOOTH_B 3 +#define SDIOD_DIC_GPS 4 +#define SDIOD_DIC_CAMERA 5 +#define SDIOD_DIC_PHS 6 +#define SDIOD_DIC_WLAN 7 +#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */ + +/* pwr_sel */ +#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */ +#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */ + +/* misc defines */ +#define SDIO_FUNC_0 0 +#define SDIO_FUNC_1 1 +#define SDIO_FUNC_2 2 +#define SDIO_FUNC_3 3 +#define SDIO_FUNC_4 4 +#define SDIO_FUNC_5 5 +#define SDIO_FUNC_6 6 +#define SDIO_FUNC_7 7 + +#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */ +#define SD_CARD_TYPE_IO 1 /* IO only card */ +#define SD_CARD_TYPE_MEMORY 2 /* memory only card */ +#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */ + +#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */ +#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */ + +/* Card registers: status bit position */ +#define CARDREG_STATUS_BIT_OUTOFRANGE 31 +#define CARDREG_STATUS_BIT_COMCRCERROR 23 +#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22 +#define CARDREG_STATUS_BIT_ERROR 19 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9 +#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4 + + + +#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */ +#define SD_CMD_SEND_OPCOND 1 +#define SD_CMD_MMC_SET_RCA 3 +#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */ +#define SD_CMD_SELECT_DESELECT_CARD 7 +#define SD_CMD_SEND_CSD 9 +#define SD_CMD_SEND_CID 10 +#define SD_CMD_STOP_TRANSMISSION 12 +#define SD_CMD_SEND_STATUS 13 +#define SD_CMD_GO_INACTIVE_STATE 15 +#define SD_CMD_SET_BLOCKLEN 16 +#define SD_CMD_READ_SINGLE_BLOCK 17 +#define SD_CMD_READ_MULTIPLE_BLOCK 18 +#define SD_CMD_WRITE_BLOCK 24 +#define SD_CMD_WRITE_MULTIPLE_BLOCK 25 +#define SD_CMD_PROGRAM_CSD 27 +#define SD_CMD_SET_WRITE_PROT 28 +#define SD_CMD_CLR_WRITE_PROT 29 +#define SD_CMD_SEND_WRITE_PROT 30 +#define SD_CMD_ERASE_WR_BLK_START 32 +#define SD_CMD_ERASE_WR_BLK_END 33 +#define SD_CMD_ERASE 38 +#define SD_CMD_LOCK_UNLOCK 42 +#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */ +#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */ +#define SD_CMD_APP_CMD 55 +#define SD_CMD_GEN_CMD 56 +#define SD_CMD_READ_OCR 58 +#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */ +#define SD_ACMD_SD_STATUS 13 +#define SD_ACMD_SEND_NUM_WR_BLOCKS 22 +#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23 +#define SD_ACMD_SD_SEND_OP_COND 41 +#define SD_ACMD_SET_CLR_CARD_DETECT 42 +#define SD_ACMD_SEND_SCR 51 + +/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */ +#define SD_IO_OP_READ 0 /* Read_Write: Read */ +#define SD_IO_OP_WRITE 1 /* Read_Write: Write */ +#define SD_IO_RW_NORMAL 0 /* no RAW */ +#define SD_IO_RW_RAW 1 /* RAW */ +#define SD_IO_BYTE_MODE 0 /* Byte Mode */ +#define SD_IO_BLOCK_MODE 1 /* BlockMode */ +#define SD_IO_FIXED_ADDRESS 0 /* fix Address */ +#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */ + +/* build SD_CMD_IO_RW_DIRECT Argument */ +#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \ + (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF)) + +/* build SD_CMD_IO_RW_EXTENDED Argument */ +#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \ + (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF)) + +/* SDIO response parameters */ +#define SD_RSP_NO_NONE 0 +#define SD_RSP_NO_1 1 +#define SD_RSP_NO_2 2 +#define SD_RSP_NO_3 3 +#define SD_RSP_NO_4 4 +#define SD_RSP_NO_5 5 +#define SD_RSP_NO_6 6 + + /* Modified R6 response (to CMD3) */ +#define SD_RSP_MR6_COM_CRC_ERROR 0x8000 +#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000 +#define SD_RSP_MR6_ERROR 0x2000 + + /* Modified R1 in R4 Response (to CMD5) */ +#define SD_RSP_MR1_SBIT 0x80 +#define SD_RSP_MR1_PARAMETER_ERROR 0x40 +#define SD_RSP_MR1_RFU5 0x20 +#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10 +#define SD_RSP_MR1_COM_CRC_ERROR 0x08 +#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04 +#define SD_RSP_MR1_RFU1 0x02 +#define SD_RSP_MR1_IDLE_STATE 0x01 + + /* R5 response (to CMD52 and CMD53) */ +#define SD_RSP_R5_COM_CRC_ERROR 0x80 +#define SD_RSP_R5_ILLEGAL_COMMAND 0x40 +#define SD_RSP_R5_IO_CURRENTSTATE1 0x20 +#define SD_RSP_R5_IO_CURRENTSTATE0 0x10 +#define SD_RSP_R5_ERROR 0x08 +#define SD_RSP_R5_RFU 0x04 +#define SD_RSP_R5_FUNC_NUM_ERROR 0x02 +#define SD_RSP_R5_OUT_OF_RANGE 0x01 + +#define SD_RSP_R5_ERRBITS 0xCB + + +/* ------------------------------------------------ + * SDIO Commands and responses + * + * I/O only commands are: + * CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +/* SDIO Commands */ +#define SDIOH_CMD_0 0 +#define SDIOH_CMD_3 3 +#define SDIOH_CMD_5 5 +#define SDIOH_CMD_7 7 +#define SDIOH_CMD_11 11 +#define SDIOH_CMD_14 14 +#define SDIOH_CMD_15 15 +#define SDIOH_CMD_19 19 +#define SDIOH_CMD_52 52 +#define SDIOH_CMD_53 53 +#define SDIOH_CMD_59 59 + +/* SDIO Command Responses */ +#define SDIOH_RSP_NONE 0 +#define SDIOH_RSP_R1 1 +#define SDIOH_RSP_R2 2 +#define SDIOH_RSP_R3 3 +#define SDIOH_RSP_R4 4 +#define SDIOH_RSP_R5 5 +#define SDIOH_RSP_R6 6 + +/* + * SDIO Response Error flags + */ +#define SDIOH_RSP5_ERROR_FLAGS 0xCB + +/* ------------------------------------------------ + * SDIO Command structures. I/O only commands are: + * + * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +#define CMD5_OCR_M BITFIELD_MASK(24) +#define CMD5_OCR_S 0 + +#define CMD5_S18R_M BITFIELD_MASK(1) +#define CMD5_S18R_S 24 + +#define CMD7_RCA_M BITFIELD_MASK(16) +#define CMD7_RCA_S 16 + +#define CMD14_RCA_M BITFIELD_MASK(16) +#define CMD14_RCA_S 16 +#define CMD14_SLEEP_M BITFIELD_MASK(1) +#define CMD14_SLEEP_S 15 + +#define CMD_15_RCA_M BITFIELD_MASK(16) +#define CMD_15_RCA_S 16 + +#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52 + */ +#define CMD52_DATA_S 0 +#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD52_REG_ADDR_S 9 +#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */ +#define CMD52_RAW_S 27 +#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD52_FUNCTION_S 28 +#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD52_RW_FLAG_S 31 + + +#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */ +#define CMD53_BYTE_BLK_CNT_S 0 +#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD53_REG_ADDR_S 9 +#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */ +#define CMD53_OP_CODE_S 26 +#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */ +#define CMD53_BLK_MODE_S 27 +#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD53_FUNCTION_S 28 +#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD53_RW_FLAG_S 31 + +/* ------------------------------------------------------ + * SDIO Command Response structures for SD1 and SD4 modes + * ----------------------------------------------------- + */ +#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_IO_OCR_S 0 + +#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_S18A_S 24 + +#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */ +#define RSP4_STUFF_S 24 +#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */ +#define RSP4_MEM_PRESENT_S 27 +#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */ +#define RSP4_NUM_FUNCS_S 28 +#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */ +#define RSP4_CARD_READY_S 31 + +#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0] + */ +#define RSP6_STATUS_S 0 +#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */ +#define RSP6_IO_RCA_S 16 + +#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */ +#define RSP1_AKE_SEQ_ERROR_S 3 +#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP1_APP_CMD_S 5 +#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */ +#define RSP1_READY_FOR_DATA_S 8 +#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card + * when Cmd was received + */ +#define RSP1_CURR_STATE_S 9 +#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */ +#define RSP1_EARSE_RESET_S 13 +#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */ +#define RSP1_CARD_ECC_DISABLE_S 14 +#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */ +#define RSP1_WP_ERASE_SKIP_S 15 +#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits + * of CSD + */ +#define RSP1_CID_CSD_OVERW_S 16 +#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */ +#define RSP1_ERROR_S 19 +#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */ +#define RSP1_CC_ERROR_S 20 +#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed + * to correct data + */ +#define RSP1_CARD_ECC_FAILED_S 21 +#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */ +#define RSP1_ILLEGAL_CMD_S 22 +#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed + */ +#define RSP1_COM_CRC_ERROR_S 23 +#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */ +#define RSP1_LOCK_UNLOCK_FAIL_S 24 +#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */ +#define RSP1_CARD_LOCKED_S 25 +#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program + * write-protected blocks + */ +#define RSP1_WP_VIOLATION_S 26 +#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */ +#define RSP1_ERASE_PARAM_S 27 +#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */ +#define RSP1_ERASE_SEQ_ERR_S 28 +#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */ +#define RSP1_BLK_LEN_ERR_S 29 +#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */ +#define RSP1_ADDR_ERR_S 30 +#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */ +#define RSP1_OUT_OF_RANGE_S 31 + + +#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */ +#define RSP5_DATA_S 0 +#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */ +#define RSP5_FLAGS_S 8 +#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */ +#define RSP5_STUFF_S 16 + +/* ---------------------------------------------- + * SDIO Command Response structures for SPI mode + * ---------------------------------------------- + */ +#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */ +#define SPIRSP4_IO_OCR_S 0 +#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */ +#define SPIRSP4_STUFF_S 16 +#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */ +#define SPIRSP4_MEM_PRESENT_S 19 +#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */ +#define SPIRSP4_NUM_FUNCS_S 20 +#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */ +#define SPIRSP4_CARD_READY_S 23 +#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */ +#define SPIRSP4_IDLE_STATE_S 24 +#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP4_ILLEGAL_CMD_S 26 +#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP4_COM_CRC_ERROR_S 27 +#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP4_FUNC_NUM_ERROR_S 28 +#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP4_PARAM_ERROR_S 30 +#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP4_START_BIT_S 31 + +#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */ +#define SPIRSP5_DATA_S 16 +#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */ +#define SPIRSP5_IDLE_STATE_S 24 +#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP5_ILLEGAL_CMD_S 26 +#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP5_COM_CRC_ERROR_S 27 +#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP5_FUNC_NUM_ERROR_S 28 +#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP5_PARAM_ERROR_S 30 +#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP5_START_BIT_S 31 + +/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */ +#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error + */ +#define RSP6STAT_AKE_SEQ_ERROR_S 3 +#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP6STAT_APP_CMD_S 5 +#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data + * (buff empty) + */ +#define RSP6STAT_READY_FOR_DATA_S 8 +#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at + * Cmd reception + */ +#define RSP6STAT_CURR_STATE_S 9 +#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19 + */ +#define RSP6STAT_ERROR_S 13 +#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for + * card state Bit 22 + */ +#define RSP6STAT_ILLEGAL_CMD_S 14 +#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command + * failed Bit 23 + */ +#define RSP6STAT_COM_CRC_ERROR_S 15 + +#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ +#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE + +/* command issue options */ +#define CMD_OPTION_DEFAULT 0 +#define CMD_OPTION_TUNING 1 + +#endif /* def BCMSDIO */ +#endif /* _SDIO_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h new file mode 100644 index 000000000000..bc1fcbc0a04b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdioh.h @@ -0,0 +1,448 @@ +/* + * SDIO Host Controller Spec header file + * Register map and definitions for the Standard Host Controller + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdioh.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SDIOH_H +#define _SDIOH_H + +#define SD_SysAddr 0x000 +#define SD_BlockSize 0x004 +#define SD_BlockCount 0x006 +#define SD_Arg0 0x008 +#define SD_Arg1 0x00A +#define SD_TransferMode 0x00C +#define SD_Command 0x00E +#define SD_Response0 0x010 +#define SD_Response1 0x012 +#define SD_Response2 0x014 +#define SD_Response3 0x016 +#define SD_Response4 0x018 +#define SD_Response5 0x01A +#define SD_Response6 0x01C +#define SD_Response7 0x01E +#define SD_BufferDataPort0 0x020 +#define SD_BufferDataPort1 0x022 +#define SD_PresentState 0x024 +#define SD_HostCntrl 0x028 +#define SD_PwrCntrl 0x029 +#define SD_BlockGapCntrl 0x02A +#define SD_WakeupCntrl 0x02B +#define SD_ClockCntrl 0x02C +#define SD_TimeoutCntrl 0x02E +#define SD_SoftwareReset 0x02F +#define SD_IntrStatus 0x030 +#define SD_ErrorIntrStatus 0x032 +#define SD_IntrStatusEnable 0x034 +#define SD_ErrorIntrStatusEnable 0x036 +#define SD_IntrSignalEnable 0x038 +#define SD_ErrorIntrSignalEnable 0x03A +#define SD_CMD12ErrorStatus 0x03C +#define SD_Capabilities 0x040 +#define SD_Capabilities3 0x044 +#define SD_MaxCurCap 0x048 +#define SD_MaxCurCap_Reserved 0x04C +#define SD_ADMA_ErrStatus 0x054 +#define SD_ADMA_SysAddr 0x58 +#define SD_SlotInterruptStatus 0x0FC +#define SD_HostControllerVersion 0x0FE +#define SD_GPIO_Reg 0x100 +#define SD_GPIO_OE 0x104 +#define SD_GPIO_Enable 0x108 + +/* SD specific registers in PCI config space */ +#define SD_SlotInfo 0x40 + +/* HC 3.0 specific registers and offsets */ +#define SD3_HostCntrl2 0x03E +/* preset regsstart and count */ +#define SD3_PresetValStart 0x060 +#define SD3_PresetValCount 8 +/* preset-indiv regs */ +#define SD3_PresetVal_init 0x060 +#define SD3_PresetVal_default 0x062 +#define SD3_PresetVal_HS 0x064 +#define SD3_PresetVal_SDR12 0x066 +#define SD3_PresetVal_SDR25 0x068 +#define SD3_PresetVal_SDR50 0x06a +#define SD3_PresetVal_SDR104 0x06c +#define SD3_PresetVal_DDR50 0x06e +/* SDIO3.0 Revx specific Registers */ +#define SD3_Tuning_Info_Register 0x0EC +#define SD3_WL_BT_reset_register 0x0F0 + + +/* preset value indices */ +#define SD3_PRESETVAL_INITIAL_IX 0 +#define SD3_PRESETVAL_DESPEED_IX 1 +#define SD3_PRESETVAL_HISPEED_IX 2 +#define SD3_PRESETVAL_SDR12_IX 3 +#define SD3_PRESETVAL_SDR25_IX 4 +#define SD3_PRESETVAL_SDR50_IX 5 +#define SD3_PRESETVAL_SDR104_IX 6 +#define SD3_PRESETVAL_DDR50_IX 7 + +/* SD_Capabilities reg (0x040) */ +#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6) +#define CAP_TO_CLKFREQ_S 0 +#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1) +#define CAP_TO_CLKUNIT_S 7 +/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2 + bits are reserved. going ahead with 8 bits, as it is req for 3.0 +*/ +#define CAP_BASECLK_M BITFIELD_MASK(8) +#define CAP_BASECLK_S 8 +#define CAP_MAXBLOCK_M BITFIELD_MASK(2) +#define CAP_MAXBLOCK_S 16 +#define CAP_ADMA2_M BITFIELD_MASK(1) +#define CAP_ADMA2_S 19 +#define CAP_ADMA1_M BITFIELD_MASK(1) +#define CAP_ADMA1_S 20 +#define CAP_HIGHSPEED_M BITFIELD_MASK(1) +#define CAP_HIGHSPEED_S 21 +#define CAP_DMA_M BITFIELD_MASK(1) +#define CAP_DMA_S 22 +#define CAP_SUSPEND_M BITFIELD_MASK(1) +#define CAP_SUSPEND_S 23 +#define CAP_VOLT_3_3_M BITFIELD_MASK(1) +#define CAP_VOLT_3_3_S 24 +#define CAP_VOLT_3_0_M BITFIELD_MASK(1) +#define CAP_VOLT_3_0_S 25 +#define CAP_VOLT_1_8_M BITFIELD_MASK(1) +#define CAP_VOLT_1_8_S 26 +#define CAP_64BIT_HOST_M BITFIELD_MASK(1) +#define CAP_64BIT_HOST_S 28 + +#define SDIO_OCR_READ_FAIL (2) + + +#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1) +#define CAP_ASYNCINT_SUP_S 29 + +#define CAP_SLOTTYPE_M BITFIELD_MASK(2) +#define CAP_SLOTTYPE_S 30 + +#define CAP3_MSBits_OFFSET (32) +/* note: following are caps MSB32 bits. + So the bits start from 0, instead of 32. that is why + CAP3_MSBits_OFFSET is subtracted. +*/ +#define CAP3_SDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_SDR104_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET) + +#define CAP3_DDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET) + +/* for knowing the clk caps in a single read */ +#define CAP3_30CLKCAP_M BITFIELD_MASK(3) +#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_TC_M BITFIELD_MASK(4) +#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET) + +#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1) +#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2) +#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET) + +#define CAP3_CLK_MULT_M BITFIELD_MASK(8) +#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET) + +#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2) +#define PRESET_DRIVR_SELECT_S 14 + +#define PRESET_CLK_DIV_M BITFIELD_MASK(10) +#define PRESET_CLK_DIV_S 0 + +/* SD_MaxCurCap reg (0x048) */ +#define CAP_CURR_3_3_M BITFIELD_MASK(8) +#define CAP_CURR_3_3_S 0 +#define CAP_CURR_3_0_M BITFIELD_MASK(8) +#define CAP_CURR_3_0_S 8 +#define CAP_CURR_1_8_M BITFIELD_MASK(8) +#define CAP_CURR_1_8_S 16 + +/* SD_SysAddr: Offset 0x0000, Size 4 bytes */ + +/* SD_BlockSize: Offset 0x004, Size 2 bytes */ +#define BLKSZ_BLKSZ_M BITFIELD_MASK(12) +#define BLKSZ_BLKSZ_S 0 +#define BLKSZ_BNDRY_M BITFIELD_MASK(3) +#define BLKSZ_BNDRY_S 12 + +/* SD_BlockCount: Offset 0x006, size 2 bytes */ + +/* SD_Arg0: Offset 0x008, size = 4 bytes */ +/* SD_TransferMode Offset 0x00C, size = 2 bytes */ +#define XFER_DMA_ENABLE_M BITFIELD_MASK(1) +#define XFER_DMA_ENABLE_S 0 +#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1) +#define XFER_BLK_COUNT_EN_S 1 +#define XFER_CMD_12_EN_M BITFIELD_MASK(1) +#define XFER_CMD_12_EN_S 2 +#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1) +#define XFER_DATA_DIRECTION_S 4 +#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1) +#define XFER_MULTI_BLOCK_S 5 + +/* SD_Command: Offset 0x00E, size = 2 bytes */ +/* resp_type field */ +#define RESP_TYPE_NONE 0 +#define RESP_TYPE_136 1 +#define RESP_TYPE_48 2 +#define RESP_TYPE_48_BUSY 3 +/* type field */ +#define CMD_TYPE_NORMAL 0 +#define CMD_TYPE_SUSPEND 1 +#define CMD_TYPE_RESUME 2 +#define CMD_TYPE_ABORT 3 + +#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */ +#define CMD_RESP_TYPE_S 0 +#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */ +#define CMD_CRC_EN_S 3 +#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */ +#define CMD_INDEX_EN_S 4 +#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */ +#define CMD_DATA_EN_S 5 +#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc + */ +#define CMD_TYPE_S 6 +#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */ +#define CMD_INDEX_S 8 + +/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */ +/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */ +/* SD_PresentState : Offset 0x024, size = 4 bytes */ +#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */ +#define PRES_CMD_INHIBIT_S 0 +#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */ +#define PRES_DAT_INHIBIT_S 1 +#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */ +#define PRES_DAT_BUSY_S 2 +#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */ +#define PRES_PRESENT_RSVD_S 3 +#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */ +#define PRES_WRITE_ACTIVE_S 8 +#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */ +#define PRES_READ_ACTIVE_S 9 +#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */ +#define PRES_WRITE_DATA_RDY_S 10 +#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */ +#define PRES_READ_DATA_RDY_S 11 +#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */ +#define PRES_CARD_PRESENT_S 16 +#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */ +#define PRES_CARD_STABLE_S 17 +#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */ +#define PRES_CARD_PRESENT_RAW_S 18 +#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */ +#define PRES_WRITE_ENABLED_S 19 +#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */ +#define PRES_DAT_SIGNAL_S 20 +#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */ +#define PRES_CMD_SIGNAL_S 24 + +/* SD_HostCntrl: Offset 0x028, size = 1 bytes */ +#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */ +#define HOST_LED_S 0 +#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */ +#define HOST_DATA_WIDTH_S 1 +#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */ +#define HOST_DMA_SEL_S 3 +#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */ +#define HOST_HI_SPEED_EN_S 2 + +/* Host Control2: */ +#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */ + +#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */ + +#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */ + +#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */ + +#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */ +#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */ + +#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */ + +#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */ +#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */ + +#define HOST_CONTR_VER_2 (1) +#define HOST_CONTR_VER_3 (2) + +/* misc defines */ +#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */ +#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */ + +/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */ +#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */ +#define PWR_BUS_EN_S 0 +#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */ +#define PWR_VOLTS_S 1 + +/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */ +#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */ +#define SW_RESET_ALL_S 0 +#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */ +#define SW_RESET_CMD_S 1 +#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */ +#define SW_RESET_DAT_S 2 + +/* SD_IntrStatus: Offset 0x030, size = 2 bytes */ +/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */ +#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */ +#define INTSTAT_CMD_COMPLETE_S 0 +#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1) +#define INTSTAT_XFER_COMPLETE_S 1 +#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1) +#define INTSTAT_BLOCK_GAP_EVENT_S 2 +#define INTSTAT_DMA_INT_M BITFIELD_MASK(1) +#define INTSTAT_DMA_INT_S 3 +#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_WRITE_READY_S 4 +#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_READ_READY_S 5 +#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INSERTION_S 6 +#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1) +#define INTSTAT_CARD_REMOVAL_S 7 +#define INTSTAT_CARD_INT_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INT_S 8 +#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */ +#define INTSTAT_RETUNING_INT_S 12 +#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */ +#define INTSTAT_ERROR_INT_S 15 + +/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */ +/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */ +#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_CMD_TIMEOUT_S 0 +#define ERRINT_CMD_CRC_M BITFIELD_MASK(1) +#define ERRINT_CMD_CRC_S 1 +#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_CMD_ENDBIT_S 2 +#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1) +#define ERRINT_CMD_INDEX_S 3 +#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_DATA_TIMEOUT_S 4 +#define ERRINT_DATA_CRC_M BITFIELD_MASK(1) +#define ERRINT_DATA_CRC_S 5 +#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_DATA_ENDBIT_S 6 +#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1) +#define ERRINT_CURRENT_LIMIT_S 7 +#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1) +#define ERRINT_AUTO_CMD12_S 8 +#define ERRINT_VENDOR_M BITFIELD_MASK(4) +#define ERRINT_VENDOR_S 12 +#define ERRINT_ADMA_M BITFIELD_MASK(1) +#define ERRINT_ADMA_S 9 + +/* Also provide definitions in "normal" form to allow combined masks */ +#define ERRINT_CMD_TIMEOUT_BIT 0x0001 +#define ERRINT_CMD_CRC_BIT 0x0002 +#define ERRINT_CMD_ENDBIT_BIT 0x0004 +#define ERRINT_CMD_INDEX_BIT 0x0008 +#define ERRINT_DATA_TIMEOUT_BIT 0x0010 +#define ERRINT_DATA_CRC_BIT 0x0020 +#define ERRINT_DATA_ENDBIT_BIT 0x0040 +#define ERRINT_CURRENT_LIMIT_BIT 0x0080 +#define ERRINT_AUTO_CMD12_BIT 0x0100 +#define ERRINT_ADMA_BIT 0x0200 + +/* Masks to select CMD vs. DATA errors */ +#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\ + ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT) +#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\ + ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT) +#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS) + +/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */ +/* SD_ClockCntrl : Offset 0x02C , size = bytes */ +/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */ +/* SD_IntrStatus : Offset 0x030 , size = bytes */ +/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */ +/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */ +/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */ +/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */ +/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */ +/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */ +/* SD_Capabilities : Offset 0x040 , size = bytes */ +/* SD_MaxCurCap : Offset 0x048 , size = bytes */ +/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */ +/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */ +/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */ + +/* SDIO Host Control Register DMA Mode Definitions */ +#define SDIOH_SDMA_MODE 0 +#define SDIOH_ADMA1_MODE 1 +#define SDIOH_ADMA2_MODE 2 +#define SDIOH_ADMA2_64_MODE 3 + +#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */ +#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */ +#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */ +#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */ +#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */ +#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */ +#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */ +#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */ + +/* ADMA2 Descriptor Table Entry for 32-bit Address */ +typedef struct adma2_dscr_32b { + uint32 len_attr; + uint32 phys_addr; +} adma2_dscr_32b_t; + +/* ADMA1 Descriptor Table Entry */ +typedef struct adma1_dscr { + uint32 phys_addr_attr; +} adma1_dscr_t; + +#endif /* _SDIOH_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h new file mode 100644 index 000000000000..15b74abec9ea --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h @@ -0,0 +1,61 @@ +/* + * Structure used by apps whose drivers access SDIO drivers. + * Pulled out separately so dhdu and wlu can both use it. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdiovar.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _sdiovar_h_ +#define _sdiovar_h_ + +#include + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + +typedef struct sdreg { + int func; + int offset; + int value; +} sdreg_t; + +/* Common msglevel constants */ +#define SDH_ERROR_VAL 0x0001 /* Error */ +#define SDH_TRACE_VAL 0x0002 /* Trace */ +#define SDH_INFO_VAL 0x0004 /* Info */ +#define SDH_DEBUG_VAL 0x0008 /* Debug */ +#define SDH_DATA_VAL 0x0010 /* Data */ +#define SDH_CTRL_VAL 0x0020 /* Control Regs */ +#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */ +#define SDH_DMA_VAL 0x0080 /* DMA */ + +#define NUM_PREV_TRANSACTIONS 16 + + +#include + +#endif /* _sdiovar_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h new file mode 100644 index 000000000000..27ad7c484455 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/siutils.h @@ -0,0 +1,601 @@ +/* + * Misc utility routines for accessing the SOC Interconnects + * of Broadcom HNBU chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _siutils_h_ +#define _siutils_h_ + +#ifdef SR_DEBUG +#include "wlioctl.h" +#endif /* SR_DEBUG */ + + +/** + * Data structure to export all chip specific common variables + * public (read-only) portion of siutils handle returned by si_attach()/si_kattach() + */ +struct si_pub { + uint socitype; /**< SOCI_SB, SOCI_AI */ + + uint bustype; /**< SI_BUS, PCI_BUS */ + uint buscoretype; /**< PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */ + uint buscorerev; /**< buscore rev */ + uint buscoreidx; /**< buscore index */ + int ccrev; /**< chip common core rev */ + uint32 cccaps; /**< chip common capabilities */ + uint32 cccaps_ext; /**< chip common capabilities extension */ + int pmurev; /**< pmu core rev */ + uint32 pmucaps; /**< pmu capabilities */ + uint boardtype; /**< board type */ + uint boardrev; /* board rev */ + uint boardvendor; /**< board vendor */ + uint boardflags; /**< board flags */ + uint boardflags2; /**< board flags2 */ + uint chip; /**< chip number */ + uint chiprev; /**< chip revision */ + uint chippkg; /**< chip package option */ + uint32 chipst; /**< chip status */ + bool issim; /**< chip is in simulation or emulation */ + uint socirev; /**< SOC interconnect rev */ + bool pci_pr32414; + +}; + +/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver + * for monolithic driver, it is readonly to prevent accident change + */ +typedef const struct si_pub si_t; + +/* + * Many of the routines below take an 'sih' handle as their first arg. + * Allocate this by calling si_attach(). Free it by calling si_detach(). + * At any one time, the sih is logically focused on one particular si core + * (the "current core"). + * Use si_setcore() or si_setcoreidx() to change the association to another core. + */ +#define SI_OSH NULL /**< Use for si_kattach when no osh is available */ + +#define BADIDX (SI_MAXCORES + 1) + +/* clkctl xtal what flags */ +#define XTAL 0x1 /**< primary crystal oscillator (2050) */ +#define PLL 0x2 /**< main chip pll */ + +/* clkctl clk mode */ +#define CLK_FAST 0 /**< force fast (pll) clock */ +#define CLK_DYNAMIC 2 /**< enable dynamic clock control */ + +/* GPIO usage priorities */ +#define GPIO_DRV_PRIORITY 0 /**< Driver */ +#define GPIO_APP_PRIORITY 1 /**< Application */ +#define GPIO_HI_PRIORITY 2 /**< Highest priority. Ignore GPIO reservation */ + +/* GPIO pull up/down */ +#define GPIO_PULLUP 0 +#define GPIO_PULLDN 1 + +/* GPIO event regtype */ +#define GPIO_REGEVT 0 /**< GPIO register event */ +#define GPIO_REGEVT_INTMSK 1 /**< GPIO register event int mask */ +#define GPIO_REGEVT_INTPOL 2 /**< GPIO register event int polarity */ + +/* device path */ +#define SI_DEVPATH_BUFSZ 16 /**< min buffer size in bytes */ + +/* SI routine enumeration: to be used by update function with multiple hooks */ +#define SI_DOATTACH 1 +#define SI_PCIDOWN 2 /**< wireless interface is down */ +#define SI_PCIUP 3 /**< wireless interface is up */ + +#ifdef SR_DEBUG +#define PMU_RES 31 +#endif /* SR_DEBUG */ + +#define ISSIM_ENAB(sih) FALSE + +/* PMU clock/power control */ +#if defined(BCMPMUCTL) +#define PMUCTL_ENAB(sih) (BCMPMUCTL) +#else +#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU) +#endif + +#define AOB_ENAB(sih) ((sih)->ccrev >= 35 ? \ + ((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0) + +/* chipcommon clock/power control (exclusive with PMU's) */ +#if defined(BCMPMUCTL) && BCMPMUCTL +#define CCCTL_ENAB(sih) (0) +#define CCPLL_ENAB(sih) (0) +#else +#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL) +#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK) +#endif + +typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); + +/* External BT Coex enable mask */ +#define CC_BTCOEX_EN_MASK 0x01 +/* External PA enable mask */ +#define GPIO_CTRL_EPA_EN_MASK 0x40 +/* WL/BT control enable mask */ +#define GPIO_CTRL_5_6_EN_MASK 0x60 +#define GPIO_CTRL_7_6_EN_MASK 0xC0 +#define GPIO_OUT_7_EN_MASK 0x80 + + + +/* CR4 specific defines used by the host driver */ +#define SI_CR4_CAP (0x04) +#define SI_CR4_BANKIDX (0x40) +#define SI_CR4_BANKINFO (0x44) +#define SI_CR4_BANKPDA (0x4C) + +#define ARMCR4_TCBBNB_MASK 0xf0 +#define ARMCR4_TCBBNB_SHIFT 4 +#define ARMCR4_TCBANB_MASK 0xf +#define ARMCR4_TCBANB_SHIFT 0 + +#define SICF_CPUHALT (0x0020) +#define ARMCR4_BSZ_MASK 0x3f +#define ARMCR4_BSZ_MULT 8192 +#define SI_BPIND_1BYTE 0x1 +#define SI_BPIND_2BYTE 0x3 +#define SI_BPIND_4BYTE 0xF +#include +/* === exported functions === */ +extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *si_kattach(osl_t *osh); +extern void si_detach(si_t *sih); +extern bool si_pci_war16165(si_t *sih); +extern void * +si_d11_switch_addrbase(si_t *sih, uint coreunit); +extern uint si_corelist(si_t *sih, uint coreid[]); +extern uint si_coreid(si_t *sih); +extern uint si_flag(si_t *sih); +extern uint si_flag_alt(si_t *sih); +extern uint si_intflag(si_t *sih); +extern uint si_coreidx(si_t *sih); +extern uint si_coreunit(si_t *sih); +extern uint si_corevendor(si_t *sih); +extern uint si_corerev(si_t *sih); +extern void *si_osh(si_t *sih); +extern void si_setosh(si_t *sih, osl_t *osh); +extern uint si_backplane_access(si_t *sih, uint addr, uint size, + uint *val, bool read); +extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val); +extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern void *si_coreregs(si_t *sih); +extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val); +extern void *si_wrapperregs(si_t *sih); +extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); +extern void si_d11rsdb_core1_alt_reg_clk_en(si_t *sih); +extern bool si_iscoreup(si_t *sih); +extern uint si_numcoreunits(si_t *sih, uint coreid); +extern uint si_numd11coreunits(si_t *sih); +extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit); +extern void *si_setcoreidx(si_t *sih, uint coreidx); +extern void *si_setcore(si_t *sih, uint coreid, uint coreunit); +extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); +extern void si_restore_core(si_t *sih, uint coreid, uint intr_val); +extern int si_numaddrspaces(si_t *sih); +extern uint32 si_addrspace(si_t *sih, uint asidx); +extern uint32 si_addrspacesize(si_t *sih, uint asidx); +extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); +extern int si_corebist(si_t *sih); +extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void si_core_disable(si_t *sih, uint32 bits); +extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m); +extern uint si_chip_hostif(si_t *sih); +extern bool si_read_pmu_autopll(si_t *sih); +extern uint32 si_clock(si_t *sih); +extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */ +extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */ +extern void si_pci_setup(si_t *sih, uint coremask); +extern void si_pcmcia_init(si_t *sih); +extern void si_setint(si_t *sih, int siflag); +extern bool si_backplane64(si_t *sih); +extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg); +extern void si_deregister_intr_callback(si_t *sih); +extern void si_clkctl_init(si_t *sih); +extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih); +extern bool si_clkctl_cc(si_t *sih, uint mode); +extern int si_clkctl_xtal(si_t *sih, uint what, bool on); +extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val); +extern void si_btcgpiowar(si_t *sih); +extern bool si_deviceremoved(si_t *sih); +extern uint32 si_sysmem_size(si_t *sih); +extern uint32 si_socram_size(si_t *sih); +extern uint32 si_socdevram_size(si_t *sih); +extern uint32 si_socram_srmem_size(si_t *sih); +extern void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda); +extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap); +extern bool si_socdevram_pkg(si_t *sih); +extern bool si_socdevram_remap_isenb(si_t *sih); +extern uint32 si_socdevram_remap_size(si_t *sih); + +extern void si_watchdog(si_t *sih, uint ticks); +extern void si_watchdog_ms(si_t *sih, uint32 ms); +extern uint32 si_watchdog_msticks(void); +extern void *si_gpiosetcore(si_t *sih); +extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioin(si_t *sih); +extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val); +extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val); +extern uint32 si_gpio_int_enable(si_t *sih, bool enable); +extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode); +extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value); +extern uint8 si_gci_host_wake_gpio_init(si_t *sih); +extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state); + +/* GCI interrupt handlers */ +extern void si_gci_handler_process(si_t *sih); + +/* GCI GPIO event handlers */ +extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts, + gci_gpio_handler_t cb, void *arg); +extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i); +extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value); + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool si_pci_pmecap(si_t *sih); +extern bool si_pci_fastpmecap(struct osl_info *osh); +extern bool si_pci_pmestat(si_t *sih); +extern void si_pci_pmeclr(si_t *sih); +extern void si_pci_pmeen(si_t *sih); +extern void si_pci_pmestatclr(si_t *sih); +extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset); +extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val); +extern void si_deepsleep_count(si_t *sih, bool arm_wakeup); + + +#ifdef BCMSDIO +extern void si_sdio_init(si_t *sih); +#endif + +extern uint16 si_d11_devid(si_t *sih); +extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice, + uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader); + +#define si_eci(sih) 0 +static INLINE void * si_eci_init(si_t *sih) {return NULL;} +#define si_eci_notify_bt(sih, type, val) (0) +#define si_seci(sih) 0 +#define si_seci_upd(sih, a) do {} while (0) +static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;} +static INLINE void * si_gci_init(si_t *sih) {return NULL;} +#define si_seci_down(sih) do {} while (0) +#define si_gci(sih) 0 + +/* OTP status */ +extern bool si_is_otp_disabled(si_t *sih); +extern bool si_is_otp_powered(si_t *sih); +extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask); + +/* SPROM availability */ +extern bool si_is_sprom_available(si_t *sih); +extern bool si_is_sprom_enabled(si_t *sih); +extern void si_sprom_enable(si_t *sih, bool enable); + +/* OTP/SROM CIS stuff */ +extern int si_cis_source(si_t *sih); +#define CIS_DEFAULT 0 +#define CIS_SROM 1 +#define CIS_OTP 2 + +/* Fab-id information */ +#define DEFAULT_FAB 0x0 /**< Original/first fab used for this chip */ +#define CSM_FAB7 0x1 /**< CSM Fab7 chip */ +#define TSMC_FAB12 0x2 /**< TSMC Fab12/Fab14 chip */ +#define SMIC_FAB4 0x3 /**< SMIC Fab4 chip */ + +extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw); +extern uint16 si_fabid(si_t *sih); +extern uint16 si_chipid(si_t *sih); + +/* + * Build device path. Path size must be >= SI_DEVPATH_BUFSZ. + * The returned path is NULL terminated and has trailing '/'. + * Return 0 on success, nonzero otherwise. + */ +extern int si_devpath(si_t *sih, char *path, int size); +extern int si_devpath_pcie(si_t *sih, char *path, int size); +/* Read variable with prepending the devpath to the name */ +extern char *si_getdevpathvar(si_t *sih, const char *name); +extern int si_getdevpathintvar(si_t *sih, const char *name); +extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name); + + +extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val); +extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val); +extern uint8 si_pcieobffenable(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcieltr_reg(si_t *sih, uint32 reg, uint32 mask, uint32 val); +extern uint32 si_pcieltrspacing_reg(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcieltrhysteresiscnt_reg(si_t *sih, uint32 mask, uint32 val); +extern void si_pcie_set_error_injection(si_t *sih, uint32 mode); +extern void si_pcie_set_L1substate(si_t *sih, uint32 substate); +extern uint32 si_pcie_get_L1substate(si_t *sih); +extern void si_war42780_clkreq(si_t *sih, bool clkreq); +extern void si_pci_down(si_t *sih); +extern void si_pci_up(si_t *sih); +extern void si_pci_sleep(si_t *sih); +extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm); +extern void si_pcie_power_save_enable(si_t *sih, bool enable); +extern void si_pcie_extendL1timer(si_t *sih, bool extend); +extern int si_pci_fixcfg(si_t *sih); +extern void si_chippkg_set(si_t *sih, uint); + +extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on); +extern void si_chipcontrl_restore(si_t *sih, uint32 val); +extern uint32 si_chipcontrl_read(si_t *sih); +extern void si_chipcontrl_epa4331(si_t *sih, bool on); +extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl); +extern void si_chipcontrl_srom4360(si_t *sih, bool on); +extern void si_clk_srom4365(si_t *sih); +/* Enable BT-COEX & Ex-PA for 4313 */ +extern void si_epa_4313war(si_t *sih); +extern void si_btc_enable_chipcontrol(si_t *sih); +/* BT/WL selection for 4313 bt combo >= P250 boards */ +extern void si_btcombo_p250_4313_war(si_t *sih); +extern void si_btcombo_43228_war(si_t *sih); +extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear); +extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag); +extern void si_pmu_synth_pwrsw_4313_war(si_t *sih); +extern uint si_pll_reset(si_t *sih); +/* === debug routines === */ + +extern bool si_taclear(si_t *sih, bool details); + +#if defined(BCMDBG_PHYDUMP) +struct bcmstrbuf; +extern int si_dump_pcieinfo(si_t *sih, struct bcmstrbuf *b); +#endif + +#if defined(BCMDBG_PHYDUMP) +extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif + +extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type); +extern int si_bpind_access(si_t *sih, uint32 addr_high, uint32 addr_low, + int32* data, bool read); +#ifdef SR_DEBUG +extern void si_dump_pmu(si_t *sih, void *pmu_var); +extern void si_pmu_keep_on(si_t *sih, int32 int_val); +extern uint32 si_pmu_keep_on_get(si_t *sih); +extern uint32 si_power_island_set(si_t *sih, uint32 int_val); +extern uint32 si_power_island_get(si_t *sih); +#endif /* SR_DEBUG */ +extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val); +extern void si_pcie_set_request_size(si_t *sih, uint16 size); +extern uint16 si_pcie_get_request_size(si_t *sih); +extern void si_pcie_set_maxpayload_size(si_t *sih, uint16 size); +extern uint16 si_pcie_get_maxpayload_size(si_t *sih); +extern uint16 si_pcie_get_ssid(si_t *sih); +extern uint32 si_pcie_get_bar0(si_t *sih); +extern int si_pcie_configspace_cache(si_t *sih); +extern int si_pcie_configspace_restore(si_t *sih); +extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size); + +char *si_getnvramflvar(si_t *sih, const char *name); + + +extern uint32 si_tcm_size(si_t *sih); +extern bool si_has_flops(si_t *sih); + +extern int si_set_sromctl(si_t *sih, uint32 value); +extern uint32 si_get_sromctl(si_t *sih); + +extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val); +extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val); +extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_gci_input(si_t *sih, uint reg); +extern uint32 si_gci_int_enable(si_t *sih, bool enable); +extern void si_gci_reset(si_t *sih); +#ifdef BCMLTECOEX +extern void si_gci_seci_init(si_t *sih); +extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum, + uint32 ltecx_fnsel, uint32 ltecx_gcigpio); +extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum, + uint32 ltecx_fnsel, uint32 ltecx_gcigpio); +#endif /* BCMLTECOEX */ +extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel); +extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin); +extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel); +extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos); +extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_gci_chipstatus(si_t *sih, uint reg); +extern uint16 si_cc_get_reg16(uint32 reg_offs); +extern uint32 si_cc_get_reg32(uint32 reg_offs); +extern uint32 si_cc_set_reg32(uint32 reg_offs, uint32 val); +extern uint32 si_gci_preinit_upd_indirect(uint32 regidx, uint32 setval, uint32 mask); +extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status); +extern void si_swdenable(si_t *sih, uint32 swdflag); + +#define CHIPCTRLREG1 0x1 +#define CHIPCTRLREG2 0x2 +#define CHIPCTRLREG3 0x3 +#define CHIPCTRLREG4 0x4 +#define CHIPCTRLREG5 0x5 +#define MINRESMASKREG 0x618 +#define MAXRESMASKREG 0x61c +#define CHIPCTRLADDR 0x650 +#define CHIPCTRLDATA 0x654 +#define RSRCTABLEADDR 0x620 +#define RSRCUPDWNTIME 0x628 +#define PMUREG_RESREQ_MASK 0x68c + +void si_update_masks(si_t *sih); +void si_force_islanding(si_t *sih, bool enable); +extern uint32 si_pmu_res_req_timer_clr(si_t *sih); +extern void si_pmu_rfldo(si_t *sih, bool on); +extern void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 spert_val); +extern uint32 si_pcie_set_ctrlreg(si_t *sih, uint32 sperst_mask, uint32 spert_val); +extern void si_pcie_ltr_war(si_t *sih); +extern void si_pcie_hw_LTR_war(si_t *sih); +extern void si_pcie_hw_L1SS_war(si_t *sih); +extern void si_pciedev_crwlpciegen2(si_t *sih); +extern void si_pcie_prep_D3(si_t *sih, bool enter_D3); +extern void si_pciedev_reg_pm_clk_period(si_t *sih); + +#ifdef WLRSDB +extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits); +extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +#endif + + +/* Macro to enable clock gating changes in different cores */ +#define MEM_CLK_GATE_BIT 5 +#define GCI_CLK_GATE_BIT 18 + +#define USBAPP_CLK_BIT 0 +#define PCIE_CLK_BIT 3 +#define ARMCR4_DBG_CLK_BIT 4 +#define SAMPLE_SYNC_CLK_BIT 17 +#define PCIE_TL_CLK_BIT 18 +#define HQ_REQ_BIT 24 +#define PLL_DIV2_BIT_START 9 +#define PLL_DIV2_MASK (0x37 << PLL_DIV2_BIT_START) +#define PLL_DIV2_DIS_OP (0x37 << PLL_DIV2_BIT_START) + +#define pmu_corereg(si, cc_idx, member, mask, val) \ + (AOB_ENAB(si) ? \ + si_pmu_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val): \ + si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val)) + +/* GCI Macros */ +#define ALLONES_32 0xFFFFFFFF +#define GCI_CCTL_SECIRST_OFFSET 0 /**< SeciReset */ +#define GCI_CCTL_RSTSL_OFFSET 1 /**< ResetSeciLogic */ +#define GCI_CCTL_SECIEN_OFFSET 2 /**< EnableSeci */ +#define GCI_CCTL_FSL_OFFSET 3 /**< ForceSeciOutLow */ +#define GCI_CCTL_SMODE_OFFSET 4 /**< SeciOpMode, 6:4 */ +#define GCI_CCTL_US_OFFSET 7 /**< UpdateSeci */ +#define GCI_CCTL_BRKONSLP_OFFSET 8 /**< BreakOnSleep */ +#define GCI_CCTL_SILOWTOUT_OFFSET 9 /**< SeciInLowTimeout, 10:9 */ +#define GCI_CCTL_RSTOCC_OFFSET 11 /**< ResetOffChipCoex */ +#define GCI_CCTL_ARESEND_OFFSET 12 /**< AutoBTSigResend */ +#define GCI_CCTL_FGCR_OFFSET 16 /**< ForceGciClkReq */ +#define GCI_CCTL_FHCRO_OFFSET 17 /**< ForceHWClockReqOff */ +#define GCI_CCTL_FREGCLK_OFFSET 18 /**< ForceRegClk */ +#define GCI_CCTL_FSECICLK_OFFSET 19 /**< ForceSeciClk */ +#define GCI_CCTL_FGCA_OFFSET 20 /**< ForceGciClkAvail */ +#define GCI_CCTL_FGCAV_OFFSET 21 /**< ForceGciClkAvailValue */ +#define GCI_CCTL_SCS_OFFSET 24 /**< SeciClkStretch, 31:24 */ + +#define GCI_MODE_UART 0x0 +#define GCI_MODE_SECI 0x1 +#define GCI_MODE_BTSIG 0x2 +#define GCI_MODE_GPIO 0x3 +#define GCI_MODE_MASK 0x7 + +#define GCI_CCTL_LOWTOUT_DIS 0x0 +#define GCI_CCTL_LOWTOUT_10BIT 0x1 +#define GCI_CCTL_LOWTOUT_20BIT 0x2 +#define GCI_CCTL_LOWTOUT_30BIT 0x3 +#define GCI_CCTL_LOWTOUT_MASK 0x3 + +#define GCI_CCTL_SCS_DEF 0x19 +#define GCI_CCTL_SCS_MASK 0xFF + +#define GCI_SECIIN_MODE_OFFSET 0 +#define GCI_SECIIN_GCIGPIO_OFFSET 4 +#define GCI_SECIIN_RXID2IP_OFFSET 8 + +#define GCI_SECIOUT_MODE_OFFSET 0 +#define GCI_SECIOUT_GCIGPIO_OFFSET 4 +#define GCI_SECIOUT_SECIINRELATED_OFFSET 16 + +#define GCI_SECIAUX_RXENABLE_OFFSET 0 +#define GCI_SECIFIFO_RXENABLE_OFFSET 16 + +#define GCI_SECITX_ENABLE_OFFSET 0 + +#define GCI_GPIOCTL_INEN_OFFSET 0 +#define GCI_GPIOCTL_OUTEN_OFFSET 1 +#define GCI_GPIOCTL_PDN_OFFSET 4 + +#define GCI_GPIOIDX_OFFSET 16 + +#define GCI_LTECX_SECI_ID 0 /**< SECI port for LTECX */ + +/* To access per GCI bit registers */ +#define GCI_REG_WIDTH 32 + +/* GCI bit positions */ +/* GCI [127:000] = WLAN [127:0] */ +#define GCI_WLAN_IP_ID 0 +#define GCI_WLAN_BEGIN 0 +#define GCI_WLAN_PRIO_POS (GCI_WLAN_BEGIN + 4) + +/* GCI [639:512] = LTE [127:0] */ +#define GCI_LTE_IP_ID 4 +#define GCI_LTE_BEGIN 512 +#define GCI_LTE_FRAMESYNC_POS (GCI_LTE_BEGIN + 0) +#define GCI_LTE_RX_POS (GCI_LTE_BEGIN + 1) +#define GCI_LTE_TX_POS (GCI_LTE_BEGIN + 2) +#define GCI_LTE_AUXRXDVALID_POS (GCI_LTE_BEGIN + 56) + +/* Reg Index corresponding to ECI bit no x of ECI space */ +#define GCI_REGIDX(x) ((x)/GCI_REG_WIDTH) +/* Bit offset of ECI bit no x in 32-bit words */ +#define GCI_BITOFFSET(x) ((x)%GCI_REG_WIDTH) + +/* End - GCI Macros */ + +#ifdef REROUTE_OOBINT +#define CC_OOB 0x0 +#define M2MDMA_OOB 0x1 +#define PMU_OOB 0x2 +#define D11_OOB 0x3 +#define SDIOD_OOB 0x4 +#define WLAN_OOB 0x5 +#define PMU_OOB_BIT 0x12 +#endif /* REROUTE_OOBINT */ + +extern void si_pll_sr_reinit(si_t *sih); +extern void si_pll_closeloop(si_t *sih); + +#endif /* _siutils_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/spid.h b/drivers/net/wireless/bcmdhd/include/spid.h new file mode 100644 index 000000000000..9a39aaf0dd3f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/spid.h @@ -0,0 +1,168 @@ +/* + * SPI device spec header file + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: spid.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SPI_H +#define _SPI_H + +/* + * Brcm SPI Device Register Map. + * + */ + +typedef volatile struct { + uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */ + uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */ + uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay + * function selection, command/data error check + */ + uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */ + uint16 intr_reg; /* 0x04, Intr status register */ + uint16 intr_en_reg; /* 0x06, Intr mask register */ + uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */ + uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */ + uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */ + uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */ + uint32 test_read; /* 0x14, RO 0xfeedbead signature */ + uint32 test_rw; /* 0x18, RW */ + uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */ + uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */ + uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */ + uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */ +} spi_regs_t; + +/* SPI device register offsets */ +#define SPID_CONFIG 0x00 +#define SPID_RESPONSE_DELAY 0x01 +#define SPID_STATUS_ENABLE 0x02 +#define SPID_RESET_BP 0x03 /* (corerev >= 1) */ +#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */ +#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */ +#define SPID_STATUS_REG 0x08 /* 32 bits */ +#define SPID_F1_INFO_REG 0x0C /* 16 bits */ +#define SPID_F2_INFO_REG 0x0E /* 16 bits */ +#define SPID_F3_INFO_REG 0x10 /* 16 bits */ +#define SPID_TEST_READ 0x14 /* 32 bits */ +#define SPID_TEST_RW 0x18 /* 32 bits */ +#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */ + +/* Bit masks for SPID_CONFIG device register */ +#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */ +#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */ +#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */ +#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */ +#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */ +#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */ +#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */ + +/* Bit mask for SPID_RESPONSE_DELAY device register */ +#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */ + +/* Bit mask for SPID_STATUS_ENABLE device register */ +#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */ +#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */ +#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */ +#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */ +#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */ +#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */ + +/* Bit mask for SPID_RESET_BP device register */ +#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */ +#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */ +#define RESET_SPI 0x80 /* reset the above enabled logic */ + +/* Bit mask for SPID_INTR_REG device register */ +#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */ +#define F2_F3_FIFO_RD_UNDERFLOW 0x0002 +#define F2_F3_FIFO_WR_OVERFLOW 0x0004 +#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */ +#define DATA_ERROR 0x0010 /* Cleared by writing 1 */ +#define F2_PACKET_AVAILABLE 0x0020 +#define F3_PACKET_AVAILABLE 0x0040 +#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */ +#define MISC_INTR0 0x0100 +#define MISC_INTR1 0x0200 +#define MISC_INTR2 0x0400 +#define MISC_INTR3 0x0800 +#define MISC_INTR4 0x1000 +#define F1_INTR 0x2000 +#define F2_INTR 0x4000 +#define F3_INTR 0x8000 + +/* Bit mask for 32bit SPID_STATUS_REG device register */ +#define STATUS_DATA_NOT_AVAILABLE 0x00000001 +#define STATUS_UNDERFLOW 0x00000002 +#define STATUS_OVERFLOW 0x00000004 +#define STATUS_F2_INTR 0x00000008 +#define STATUS_F3_INTR 0x00000010 +#define STATUS_F2_RX_READY 0x00000020 +#define STATUS_F3_RX_READY 0x00000040 +#define STATUS_HOST_CMD_DATA_ERR 0x00000080 +#define STATUS_F2_PKT_AVAILABLE 0x00000100 +#define STATUS_F2_PKT_LEN_MASK 0x000FFE00 +#define STATUS_F2_PKT_LEN_SHIFT 9 +#define STATUS_F3_PKT_AVAILABLE 0x00100000 +#define STATUS_F3_PKT_LEN_MASK 0xFFE00000 +#define STATUS_F3_PKT_LEN_SHIFT 21 + +/* Bit mask for 16 bits SPID_F1_INFO_REG device register */ +#define F1_ENABLED 0x0001 +#define F1_RDY_FOR_DATA_TRANSFER 0x0002 +#define F1_MAX_PKT_SIZE 0x01FC + +/* Bit mask for 16 bits SPID_F2_INFO_REG device register */ +#define F2_ENABLED 0x0001 +#define F2_RDY_FOR_DATA_TRANSFER 0x0002 +#define F2_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 16 bits SPID_F3_INFO_REG device register */ +#define F3_ENABLED 0x0001 +#define F3_RDY_FOR_DATA_TRANSFER 0x0002 +#define F3_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */ +#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD + +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 + +#define SPI_MAX_PKT_LEN (2048*4) + +/* Misc defines */ +#define SPI_FUNC_0 0 +#define SPI_FUNC_1 1 +#define SPI_FUNC_2 2 +#define SPI_FUNC_3 3 + +#define WAIT_F2RXFIFORDY 100 +#define WAIT_F2RXFIFORDY_DELAY 20 + +#endif /* _SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h new file mode 100644 index 000000000000..f7404be99b0e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h @@ -0,0 +1,95 @@ +/* + * TRX image file header format. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: trxhdr.h 520026 2014-12-10 01:29:40Z $ + */ + +#ifndef _TRX_HDR_H +#define _TRX_HDR_H + +#include + +#define TRX_MAGIC 0x30524448 /* "HDR0" */ +#define TRX_MAX_LEN 0x3B0000 /* Max length */ +#define TRX_NO_HEADER 1 /* Do not write TRX header */ +#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */ +#define TRX_EMBED_UCODE 0x8 /* Trx contains embedded ucode image */ +#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */ +#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */ +#define TRX_BOOTLOADER 0x40 /* the image is a bootloader */ + +#define TRX_V1 1 +#define TRX_V1_MAX_OFFSETS 3 /* V1: Max number of individual files */ + +#ifndef BCMTRXV2 +#define TRX_VERSION TRX_V1 /* Version 1 */ +#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS +#endif + +/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as + * Ver 2 of trx header. To make it generic, trx_header is structure is modified + * as below where size of "offsets" field will vary as per the TRX version. + * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well. + * To make sure, other applications like "dhdl" which are yet to be enhanced to support + * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2 + * is defined. + */ +struct trx_header { + uint32 magic; /* "HDR0" */ + uint32 len; /* Length of file including header */ + uint32 crc32; /* 32-bit CRC from flag_version to end of file */ + uint32 flag_version; /* 0:15 flags, 16:31 version */ +#ifndef BCMTRXV2 + uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */ +#else + uint32 offsets[1]; /* Offsets of partitions from start of header */ +#endif +}; + +#ifdef BCMTRXV2 +#define TRX_VERSION TRX_V2 /* Version 2 */ +#define TRX_MAX_OFFSET TRX_V2_MAX_OFFSETS + +#define TRX_V2 2 +/* V2: Max number of individual files + * To support SDR signature + Config data region + */ +#define TRX_V2_MAX_OFFSETS 5 +#define SIZEOF_TRXHDR_V1 (sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32)) +#define SIZEOF_TRXHDR_V2 (sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32)) +#define TRX_VER(trx) ((trx)->flag_version>>16) +#define ISTRX_V1(trx) (TRX_VER(trx) == TRX_V1) +#define ISTRX_V2(trx) (TRX_VER(trx) == TRX_V2) +/* For V2, return size of V2 size: others, return V1 size */ +#define SIZEOF_TRX(trx) (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1) +#else +#define SIZEOF_TRX(trx) (sizeof(struct trx_header)) +#endif /* BCMTRXV2 */ + +/* Compatibility */ +typedef struct trx_header TRXHDR, *PTRXHDR; + +#endif /* _TRX_HDR_H */ diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h new file mode 100644 index 000000000000..0e110a1908ed --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/typedefs.h @@ -0,0 +1,339 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: typedefs.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _TYPEDEFS_H_ +#define _TYPEDEFS_H_ + +#ifdef SITE_TYPEDEFS + +/* + * Define SITE_TYPEDEFS in the compile to include a site-specific + * typedef file "site_typedefs.h". + * + * If SITE_TYPEDEFS is not defined, then the code section below makes + * inferences about the compile environment based on defined symbols and + * possibly compiler pragmas. + * + * Following these two sections is the Default Typedefs section. + * This section is only processed if USE_TYPEDEF_DEFAULTS is + * defined. This section has a default set of typedefs and a few + * preprocessor symbols (TRUE, FALSE, NULL, ...). + */ + +#include "site_typedefs.h" + +#else + +/* + * Infer the compile environment based on preprocessor symbols and pragmas. + * Override type definitions as needed, and include configuration-dependent + * header files to define types. + */ + +#ifdef __cplusplus + +#define TYPEDEF_BOOL +#ifndef FALSE +#define FALSE false +#endif +#ifndef TRUE +#define TRUE true +#endif + +#else /* ! __cplusplus */ + + +#endif /* ! __cplusplus */ + +#if defined(__LP64__) +#define TYPEDEF_UINTPTR +typedef unsigned long long int uintptr; +#endif + + + + + +#if defined(_NEED_SIZE_T_) +typedef long unsigned int size_t; +#endif + + + + + +#if defined(__sparc__) +#define TYPEDEF_ULONG +#endif + +/* + * If this is either a Linux hybrid build or the per-port code of a hybrid build + * then use the Linux header files to get some of the typedefs. Otherwise, define + * them entirely in this file. We can't always define the types because we get + * a duplicate typedef error; there is no way to "undefine" a typedef. + * We know when it's per-port code because each file defines LINUX_PORT at the top. + */ +#define TYPEDEF_UINT +#ifndef TARGETENV_android +#define TYPEDEF_USHORT +#define TYPEDEF_ULONG +#endif /* TARGETENV_android */ +#ifdef __KERNEL__ +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)) +#define TYPEDEF_BOOL +#endif /* >= 2.6.19 */ +/* special detection for 2.6.18-128.7.1.0.1.el5 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)) +#include +#ifdef noinline_for_stack +#define TYPEDEF_BOOL +#endif +#endif /* == 2.6.18 */ +#endif /* __KERNEL__ */ + + +/* Do not support the (u)int64 types with strict ansi for GNU C */ +#if defined(__GNUC__) && defined(__STRICT_ANSI__) +#define TYPEDEF_INT64 +#define TYPEDEF_UINT64 +#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */ + +/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode + * for signed or unsigned + */ +#if defined(__ICL) + +#define TYPEDEF_INT64 + +#if defined(__STDC__) +#define TYPEDEF_UINT64 +#endif + +#endif /* __ICL */ + +#if !defined(__DJGPP__) + +/* pick up ushort & uint from standard types.h */ +#if defined(__KERNEL__) + +/* See note above */ +#include /* sys/types.h and linux/types.h are oil and water */ + +#else + +#include + +#endif /* linux && __KERNEL__ */ + +#endif + + +/* use the default typedefs in the next section of this file */ +#define USE_TYPEDEF_DEFAULTS + +#endif /* SITE_TYPEDEFS */ + + +/* + * Default Typedefs + */ + +#ifdef USE_TYPEDEF_DEFAULTS +#undef USE_TYPEDEF_DEFAULTS + +#ifndef TYPEDEF_BOOL +typedef /* @abstract@ */ unsigned char bool; +#endif /* endif TYPEDEF_BOOL */ + +/* define uchar, ushort, uint, ulong */ + +#ifndef TYPEDEF_UCHAR +typedef unsigned char uchar; +#endif + +#ifndef TYPEDEF_USHORT +typedef unsigned short ushort; +#endif + +#ifndef TYPEDEF_UINT +typedef unsigned int uint; +#endif + +#ifndef TYPEDEF_ULONG +typedef unsigned long ulong; +#endif + +/* define [u]int8/16/32/64, uintptr */ + +#ifndef TYPEDEF_UINT8 +typedef unsigned char uint8; +#endif + +#ifndef TYPEDEF_UINT16 +typedef unsigned short uint16; +#endif + +#ifndef TYPEDEF_UINT32 +typedef unsigned int uint32; +#endif + +#ifndef TYPEDEF_UINT64 +typedef unsigned long long uint64; +#endif + +#ifndef TYPEDEF_UINTPTR +typedef unsigned int uintptr; +#endif + +#ifndef TYPEDEF_INT8 +typedef signed char int8; +#endif + +#ifndef TYPEDEF_INT16 +typedef signed short int16; +#endif + +#ifndef TYPEDEF_INT32 +typedef signed int int32; +#endif + +#ifndef TYPEDEF_INT64 +typedef signed long long int64; +#endif + +/* define float32/64, float_t */ + +#ifndef TYPEDEF_FLOAT32 +typedef float float32; +#endif + +#ifndef TYPEDEF_FLOAT64 +typedef double float64; +#endif + +/* + * abstracted floating point type allows for compile time selection of + * single or double precision arithmetic. Compiling with -DFLOAT32 + * selects single precision; the default is double precision. + */ + +#ifndef TYPEDEF_FLOAT_T + +#if defined(FLOAT32) +typedef float32 float_t; +#else /* default to double precision floating point */ +typedef float64 float_t; +#endif + +#endif /* TYPEDEF_FLOAT_T */ + +/* define macro values */ + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 /* TRUE */ +#endif + +#ifndef NULL +#define NULL 0 +#endif + +#ifndef OFF +#define OFF 0 +#endif + +#ifndef ON +#define ON 1 /* ON = 1 */ +#endif + +#define AUTO (-1) /* Auto = -1 */ + +/* define PTRSZ, INLINE */ + +#ifndef PTRSZ +#define PTRSZ sizeof(char*) +#endif + + +/* Detect compiler type. */ +#if defined(__GNUC__) || defined(__lint) + #define BWL_COMPILER_GNU +#elif defined(__CC_ARM) && __CC_ARM + #define BWL_COMPILER_ARMCC +#else + #error "Unknown compiler!" +#endif + + +#ifndef INLINE + #if defined(BWL_COMPILER_MICROSOFT) + #define INLINE __inline + #elif defined(BWL_COMPILER_GNU) + #define INLINE __inline__ + #elif defined(BWL_COMPILER_ARMCC) + #define INLINE __inline + #else + #define INLINE + #endif +#endif /* INLINE */ + +#undef TYPEDEF_BOOL +#undef TYPEDEF_UCHAR +#undef TYPEDEF_USHORT +#undef TYPEDEF_UINT +#undef TYPEDEF_ULONG +#undef TYPEDEF_UINT8 +#undef TYPEDEF_UINT16 +#undef TYPEDEF_UINT32 +#undef TYPEDEF_UINT64 +#undef TYPEDEF_UINTPTR +#undef TYPEDEF_INT8 +#undef TYPEDEF_INT16 +#undef TYPEDEF_INT32 +#undef TYPEDEF_INT64 +#undef TYPEDEF_FLOAT32 +#undef TYPEDEF_FLOAT64 +#undef TYPEDEF_FLOAT_T + +#endif /* USE_TYPEDEF_DEFAULTS */ + +/* Suppress unused parameter warning */ +#define UNUSED_PARAMETER(x) (void)(x) + +/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */ +#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) + +/* + * Including the bcmdefs.h here, to make sure everyone including typedefs.h + * gets this automatically +*/ +#include +#endif /* _TYPEDEFS_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h new file mode 100644 index 000000000000..0d5b434198ee --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h @@ -0,0 +1,350 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wlfc_proto.h 542895 2015-03-22 14:13:12Z $ + * + */ + +/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */ + + +#ifndef __wlfc_proto_definitions_h__ +#define __wlfc_proto_definitions_h__ + + /* Use TLV to convey WLFC information. + --------------------------------------------------------------------------- + | Type | Len | value | Description + --------------------------------------------------------------------------- + | 1 | 1 | (handle) | MAC OPEN + --------------------------------------------------------------------------- + | 2 | 1 | (handle) | MAC CLOSE + --------------------------------------------------------------------------- + | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn + --------------------------------------------------------------------------- + | 4 | 4+ | see pkttag comments | TXSTATUS + | | 12 | TX status & timestamps | Present only when pkt timestamp is enabled + --------------------------------------------------------------------------- + | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware] + --------------------------------------------------------------------------- + | 6 | 8 | (handle, ifid, MAC) | MAC ADD + --------------------------------------------------------------------------- + | 7 | 8 | (handle, ifid, MAC) | MAC DEL + --------------------------------------------------------------------------- + | 8 | 1 | (rssi) | RSSI - RSSI value for the packet. + --------------------------------------------------------------------------- + | 9 | 1 | (interface ID) | Interface OPEN + --------------------------------------------------------------------------- + | 10 | 1 | (interface ID) | Interface CLOSE + --------------------------------------------------------------------------- + | 11 | 8 | fifo credit returns map | FIFO credits back to the host + | | | | + | | | | -------------------------------------- + | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim | + | | | | -------------------------------------- + | | | | + --------------------------------------------------------------------------- + | 12 | 2 | MAC handle, | Host provides a bitmap of pending + | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn. + | | | | [host->firmware] + --------------------------------------------------------------------------- + | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific + | | | | MAC destination. + --------------------------------------------------------------------------- + | 15 | 12 | (pkttag, timestamps) | Send TX timestamp at reception from host + --------------------------------------------------------------------------- + | 16 | 12 | (pkttag, timestamps) | Send WLAN RX timestamp along with RX frame + --------------------------------------------------------------------------- + | 255 | N/A | N/A | FILLER - This is a special type + | | | | that has no length or value. + | | | | Typically used for padding. + --------------------------------------------------------------------------- + */ + +#define WLFC_CTL_TYPE_MAC_OPEN 1 +#define WLFC_CTL_TYPE_MAC_CLOSE 2 +#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3 +#define WLFC_CTL_TYPE_TXSTATUS 4 +#define WLFC_CTL_TYPE_PKTTAG 5 /** host<->dongle */ + +#define WLFC_CTL_TYPE_MACDESC_ADD 6 +#define WLFC_CTL_TYPE_MACDESC_DEL 7 +#define WLFC_CTL_TYPE_RSSI 8 + +#define WLFC_CTL_TYPE_INTERFACE_OPEN 9 +#define WLFC_CTL_TYPE_INTERFACE_CLOSE 10 + +#define WLFC_CTL_TYPE_FIFO_CREDITBACK 11 + +#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12 /** host->dongle */ +#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13 +#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS 14 + +#define WLFC_CTL_TYPE_TX_ENTRY_STAMP 15 +#define WLFC_CTL_TYPE_RX_STAMP 16 +#define WLFC_CTL_TYPE_TX_STATUS_STAMP 17 /** obsolete */ + +#define WLFC_CTL_TYPE_TRANS_ID 18 +#define WLFC_CTL_TYPE_COMP_TXSTATUS 19 + +#define WLFC_CTL_TYPE_TID_OPEN 20 +#define WLFC_CTL_TYPE_TID_CLOSE 21 + + +#define WLFC_CTL_TYPE_FILLER 255 + +#define WLFC_CTL_VALUE_LEN_MACDESC 8 /** handle, interface, MAC */ + +#define WLFC_CTL_VALUE_LEN_MAC 1 /** MAC-handle */ +#define WLFC_CTL_VALUE_LEN_RSSI 1 + +#define WLFC_CTL_VALUE_LEN_INTERFACE 1 +#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2 + +#define WLFC_CTL_VALUE_LEN_TXSTATUS 4 +#define WLFC_CTL_VALUE_LEN_PKTTAG 4 +#define WLFC_CTL_VALUE_LEN_TIMESTAMP 12 /** 4-byte rate info + 2 TSF */ + +#define WLFC_CTL_VALUE_LEN_SEQ 2 + +/* The high bits of ratespec report in timestamp are used for various status */ +#define WLFC_TSFLAGS_RX_RETRY (1 << 31) +#define WLFC_TSFLAGS_PM_ENABLED (1 << 30) +#define WLFC_TSFLAGS_MASK (WLFC_TSFLAGS_RX_RETRY | WLFC_TSFLAGS_PM_ENABLED) + +/* enough space to host all 4 ACs, bc/mc and atim fifo credit */ +#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6 + +#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */ +#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */ + + +#define WLFC_PKTFLAG_PKTFROMHOST 0x01 +#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 + +#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */ +#define WL_TXSTATUS_STATUS_SHIFT 24 + +#define WL_TXSTATUS_SET_STATUS(x, status) ((x) = \ + ((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \ + (((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT)) +#define WL_TXSTATUS_GET_STATUS(x) (((x) >> WL_TXSTATUS_STATUS_SHIFT) & \ + WL_TXSTATUS_STATUS_MASK) + +/** + * Bit 31 of the 32-bit packet tag is defined as 'generation ID'. It is set by the host to the + * "current" generation, and by the firmware to the "expected" generation, toggling on suppress. The + * firmware accepts a packet when the generation matches; on reset (startup) both "current" and + * "expected" are set to 0. + */ +#define WL_TXSTATUS_GENERATION_MASK 1 /* allow 1 bit */ +#define WL_TXSTATUS_GENERATION_SHIFT 31 + +#define WL_TXSTATUS_SET_GENERATION(x, gen) ((x) = \ + ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \ + (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT)) + +#define WL_TXSTATUS_GET_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \ + WL_TXSTATUS_GENERATION_MASK) + +#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */ +#define WL_TXSTATUS_FLAGS_SHIFT 27 + +#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT)) +#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \ + WL_TXSTATUS_FLAGS_MASK) + +#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */ +#define WL_TXSTATUS_FIFO_SHIFT 24 + +#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT)) +#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK) + +#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */ +#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \ + ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num)) +#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK) + +#define WL_TXSTATUS_HSLOT_MASK 0xffff /* allow 16 bits */ +#define WL_TXSTATUS_HSLOT_SHIFT 8 + +#define WL_TXSTATUS_SET_HSLOT(x, hslot) ((x) = \ + ((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \ + (((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT)) +#define WL_TXSTATUS_GET_HSLOT(x) (((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \ + WL_TXSTATUS_HSLOT_MASK) + +#define WL_TXSTATUS_FREERUNCTR_MASK 0xff /* allow 8 bits */ + +#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr) ((x) = \ + ((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \ + ((ctr) & WL_TXSTATUS_FREERUNCTR_MASK)) +#define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK) + +/* Seq number part of AMSDU */ +#define WL_SEQ_AMSDU_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_AMSDU_SHIFT 14 +#define WL_SEQ_SET_AMSDU(x, val) ((x) = \ + ((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \ + (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) +#define WL_SEQ_GET_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \ + WL_SEQ_AMSDU_MASK) + +/* Seq number is valid coming from FW */ +#define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_FROMFW_SHIFT 13 +#define WL_SEQ_SET_FROMFW(x, val) ((x) = \ + ((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \ + (((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT)) +#define WL_SEQ_GET_FROMFW(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & \ + WL_SEQ_FROMFW_MASK) + +/** + * Proptxstatus related. + * + * Pkt from bus layer (DHD for SDIO and pciedev for PCIE) + * is re-using seq number previously suppressed + * so FW should not assign new one + */ +#define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_FROMDRV_SHIFT 12 +#define WL_SEQ_SET_FROMDRV(x, val) ((x) = \ + ((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \ + (((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT)) +#define WL_SEQ_GET_FROMDRV(x) (((x) >> WL_SEQ_FROMDRV_SHIFT) & \ + WL_SEQ_FROMDRV_MASK) + +#define WL_SEQ_NUM_MASK 0xfff /* allow 12 bit */ +#define WL_SEQ_NUM_SHIFT 0 +#define WL_SEQ_SET_NUM(x, val) ((x) = \ + ((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \ + (((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT)) +#define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \ + WL_SEQ_NUM_MASK) + +#define WL_SEQ_AMSDU_SUPPR_MASK ((WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT) | \ + (WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT) | \ + (WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) + +/* 32 STA should be enough??, 6 bits; Must be power of 2 */ +#define WLFC_MAC_DESC_TABLE_SIZE 32 +#define WLFC_MAX_IFNUM 16 +#define WLFC_MAC_DESC_ID_INVALID 0xff + +/* b[7:5] -reuse guard, b[4:0] -value */ +#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f) + +#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \ + (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \ + ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + + +#define WLFC_MAX_PENDING_DATALEN 120 + +/* host is free to discard the packet */ +#define WLFC_CTL_PKTFLAG_DISCARD 0 +/* D11 suppressed a packet */ +#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1 +/* WL firmware suppressed a packet because MAC is + already in PSMode (short time window) +*/ +#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2 +/* Firmware tossed this packet */ +#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3 +/* Firmware tossed after retries */ +#define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4 +/* Firmware wrongly reported suppressed previously,now fixing to acked */ +#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5 + +#define WLFC_D11_STATUS_INTERPRET(txs) \ + ((txs)->status.was_acked ? WLFC_CTL_PKTFLAG_DISCARD : \ + (TXS_SUPR_MAGG_DONE((txs)->status.suppr_ind) ? \ + WLFC_CTL_PKTFLAG_DISCARD_NOACK : WLFC_CTL_PKTFLAG_D11SUPPRESS)) + + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_DBGMESG(x) printf x +/* wlfc-breadcrumb */ +#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \ + {printf("WLFC: %s():%d:caller:%p\n", \ + __FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0) +#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \ + banner, ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]); } while (0) +#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s)) +#else +#define WLFC_DBGMESG(x) +#define WLFC_BREADCRUMB(x) +#define WLFC_PRINTMAC(banner, ea) +#define WLFC_WHEREIS(s) +#endif + +/* AMPDU host reorder packet flags */ +#define WLHOST_REORDERDATA_MAXFLOWS 256 +#define WLHOST_REORDERDATA_LEN 10 +#define WLHOST_REORDERDATA_TOTLEN (WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */ + +#define WLHOST_REORDERDATA_FLOWID_OFFSET 0 +#define WLHOST_REORDERDATA_MAXIDX_OFFSET 2 +#define WLHOST_REORDERDATA_FLAGS_OFFSET 4 +#define WLHOST_REORDERDATA_CURIDX_OFFSET 6 +#define WLHOST_REORDERDATA_EXPIDX_OFFSET 8 + +#define WLHOST_REORDERDATA_DEL_FLOW 0x01 +#define WLHOST_REORDERDATA_FLUSH_ALL 0x02 +#define WLHOST_REORDERDATA_CURIDX_VALID 0x04 +#define WLHOST_REORDERDATA_EXPIDX_VALID 0x08 +#define WLHOST_REORDERDATA_NEW_HOLE 0x10 + +/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */ +#define WLFC_CTL_TRANS_ID_LEN 6 +#define WLFC_TYPE_TRANS_ID_LEN 6 + +#define WLFC_MODE_HANGER 1 /* use hanger */ +#define WLFC_MODE_AFQ 2 /* use afq (At Firmware Queue) */ +#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2)) + +#define WLFC_MODE_AFQ_SHIFT 2 /* afq bit */ +#define WLFC_SET_AFQ(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_AFQ_SHIFT)) +#define WLFC_GET_AFQ(x) (((x) >> WLFC_MODE_AFQ_SHIFT) & 1) + +#define WLFC_MODE_REUSESEQ_SHIFT 3 /* seq reuse bit */ +#define WLFC_SET_REUSESEQ(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT)) +#define WLFC_GET_REUSESEQ(x) (((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1) + +#define WLFC_MODE_REORDERSUPP_SHIFT 4 /* host reorder suppress pkt bit */ +#define WLFC_SET_REORDERSUPP(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT)) +#define WLFC_GET_REORDERSUPP(x) (((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1) + +#endif /* __wlfc_proto_definitions_h__ */ diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h new file mode 100644 index 000000000000..4447ca0107b5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h @@ -0,0 +1,8065 @@ +/* + * Custom OID/ioctl definitions for + * + * + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * <> + * + * $Id: wlioctl.h 609280 2016-01-01 06:31:38Z $ + */ + +#ifndef _wlioctl_h_ +#define _wlioctl_h_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + + + + +typedef struct { + uint32 num; + chanspec_t list[1]; +} chanspec_list_t; + +#define RSN_KCK_LENGTH 16 +#define RSN_KEK_LENGTH 16 + + +#ifndef INTF_NAME_SIZ +#define INTF_NAME_SIZ 16 +#endif + +/* Used to send ioctls over the transport pipe */ +typedef struct remote_ioctl { + cdc_ioctl_t msg; + uint32 data_len; + char intf_name[INTF_NAME_SIZ]; +} rem_ioctl_t; +#define REMOTE_SIZE sizeof(rem_ioctl_t) + + +/* DFS Forced param */ +typedef struct wl_dfs_forced_params { + chanspec_t chspec; + uint16 version; + chanspec_list_t chspec_list; +} wl_dfs_forced_t; + +#define DFS_PREFCHANLIST_VER 0x01 +#define WL_CHSPEC_LIST_FIXED_SIZE OFFSETOF(chanspec_list_t, list) +#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \ + (WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list)) +#define WL_DFS_FORCED_PARAMS_MAX_SIZE \ + WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t)) + +/* association decision information */ +typedef struct { + bool assoc_approved; /**< (re)association approved */ + uint16 reject_reason; /**< reason code for rejecting association */ + struct ether_addr da; + int64 sys_time; /**< current system time */ +} assoc_decision_t; + +#define DFS_SCAN_S_IDLE -1 +#define DFS_SCAN_S_RADAR_FREE 0 +#define DFS_SCAN_S_RADAR_FOUND 1 +#define DFS_SCAN_S_INPROGESS 2 +#define DFS_SCAN_S_SCAN_ABORTED 3 +#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4 +#define DFS_SCAN_S_MAX 5 + + +#define ACTION_FRAME_SIZE 1800 + +typedef struct wl_action_frame { + struct ether_addr da; + uint16 len; + uint32 packetId; + uint8 data[ACTION_FRAME_SIZE]; +} wl_action_frame_t; + +#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame) + +typedef struct ssid_info +{ + uint8 ssid_len; /**< the length of SSID */ + uint8 ssid[32]; /**< SSID string */ +} ssid_info_t; + +typedef struct wl_af_params { + uint32 channel; + int32 dwell_time; + struct ether_addr BSSID; + wl_action_frame_t action_frame; +} wl_af_params_t; + +#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params) + +#define MFP_TEST_FLAG_NORMAL 0 +#define MFP_TEST_FLAG_ANY_KEY 1 +typedef struct wl_sa_query { + uint32 flag; + uint8 action; + uint16 id; + struct ether_addr da; +} wl_sa_query_t; + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + + +/* Flags for OBSS IOVAR Parameters */ +#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD (0x01) +#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD (0x02) +#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04) +#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD (0x08) +#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD (0x10) +#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD (0x20) +#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD (0x40) + +/* OBSS IOVAR Version information */ +#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1 +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 obss_bwsw_activity_cfm_count_cfg; /* configurable count in + * seconds before we confirm that OBSS is present and + * dynamically activate dynamic bwswitch. + */ + uint8 obss_bwsw_no_activity_cfm_count_cfg; /* configurable count in + * seconds before we confirm that OBSS is GONE and + * dynamically start pseudo upgrade. If in pseudo sense time, we + * will see OBSS, [means that, we false detected that OBSS-is-gone + * in watchdog] this count will be incremented in steps of + * obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS + * detection again. Note that, at present, max 30seconds is + * allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT] + */ + uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above + */ + uint16 obss_bwsw_pseudo_sense_count_cfg; /* number of msecs/cnt to be in + * pseudo state. This is used to sense/measure the stats from lq. + */ + uint8 obss_bwsw_rx_crs_threshold_cfg; /* RX CRS default threshold */ + uint8 obss_bwsw_dur_thres; /* OBSS dyn bwsw trigger/RX CRS Sec */ + uint8 obss_bwsw_txop_threshold_cfg; /* TXOP default threshold */ +} BWL_POST_PACKED_STRUCT wlc_prot_dynbwsw_config_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 version; /**< version field */ + uint32 config_mask; + uint32 reset_mask; + wlc_prot_dynbwsw_config_t config_params; +} BWL_POST_PACKED_STRUCT obss_config_params_t; + + +/* bsscfg type */ +typedef enum bsscfg_type_t { + BSSCFG_TYPE_GENERIC = 0, /**< default */ + BSSCFG_TYPE_P2P = 1, /**< The BSS is for p2p link */ + BSSCFG_TYPE_BTA = 2, + BSSCFG_TYPE_TDLS = 4, + BSSCFG_TYPE_AWDL = 5, + BSSCFG_TYPE_PROXD = 6, + BSSCFG_TYPE_NAN = 7, + BSSCFG_TYPE_MAX +} bsscfg_type_t; + +/* bsscfg subtype */ +enum { + BSSCFG_GENERIC_STA = 1, /* GENERIC */ + BSSCFG_GENERIC_AP = 2, /* GENERIC */ + BSSCFG_P2P_GC = 3, /* P2P */ + BSSCFG_P2P_GO = 4, /* P2P */ + BSSCFG_P2P_DISC = 5, /* P2P */ +}; + +typedef struct wlc_bsscfg_info { + uint32 type; + uint32 subtype; +} wlc_bsscfg_info_t; + + + +/* Legacy structure to help keep backward compatible wl tool and tray app */ + +#define LEGACY_WL_BSS_INFO_VERSION 107 /**< older version of wl_bss_info struct */ + +typedef struct wl_bss_info_107 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + uint8 channel; /**< Channel no. */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint32 ie_length; /**< byte length of Information Elements */ + /* variable length Information Elements */ +} wl_bss_info_107_t; + +/* + * Per-BSS information structure. + */ + +#define LEGACY2_WL_BSS_INFO_VERSION 108 /**< old version of wl_bss_info struct */ + +/* BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info_108 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint32 nbss_cap; /**< 802.11N BSS Capabilities (based on HT_CAP_*) */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint32 reserved32[1]; /**< Reserved for expansion of BSS properties */ + uint8 flags; /**< flags */ + uint8 reserved[3]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint32 ie_length; /**< byte length of Information Elements */ + /* Add new fields here */ + /* variable length Information Elements */ +} wl_bss_info_108_t; + +#define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */ + +/* BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 padding1[3]; /**< explicit struct alignment padding */ + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 reserved[2]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ + /* Add new fields here */ + /* variable length Information Elements */ +} wl_bss_info_t; + +#define WL_GSCAN_BSS_INFO_VERSION 1 /* current version of wl_gscan_bss_info struct */ +#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t)) + +typedef struct wl_gscan_bss_info { + uint32 timestamp[2]; + wl_bss_info_t info; + /* Do not add any more members below, fixed */ + /* and variable length Information Elements to follow */ +} wl_gscan_bss_info_t; + + +typedef struct wl_bsscfg { + uint32 bsscfg_idx; + uint32 wsec; + uint32 WPA_auth; + uint32 wsec_index; + uint32 associated; + uint32 BSS; + uint32 phytest_on; + struct ether_addr prev_BSSID; + struct ether_addr BSSID; + uint32 targetbss_wpa2_flags; + uint32 assoc_type; + uint32 assoc_state; +} wl_bsscfg_t; + +typedef struct wl_if_add { + uint32 bsscfg_flags; + uint32 if_flags; + uint32 ap; + struct ether_addr mac_addr; + uint32 wlc_index; +} wl_if_add_t; + +typedef struct wl_bss_config { + uint32 atim_window; + uint32 beacon_period; + uint32 chanspec; +} wl_bss_config_t; + +#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /**< User application will randomly select + * radar channel. + */ + +#define DLOAD_HANDLER_VER 1 /**< Downloader version */ +#define DLOAD_FLAG_VER_MASK 0xf000 /**< Downloader version mask */ +#define DLOAD_FLAG_VER_SHIFT 12 /**< Downloader version shift */ + +#define DL_CRC_NOT_INUSE 0x0001 +#define DL_BEGIN 0x0002 +#define DL_END 0x0004 + +/* generic download types & flags */ +enum { + DL_TYPE_UCODE = 1, + DL_TYPE_CLM = 2 +}; + +/* ucode type values */ +enum { + UCODE_FW, + INIT_VALS, + BS_INIT_VALS +}; + +struct wl_dload_data { + uint16 flag; + uint16 dload_type; + uint32 len; + uint32 crc; + uint8 data[1]; +}; +typedef struct wl_dload_data wl_dload_data_t; + +struct wl_ucode_info { + uint32 ucode_type; + uint32 num_chunks; + uint32 chunk_len; + uint32 chunk_num; + uint8 data_chunk[1]; +}; +typedef struct wl_ucode_info wl_ucode_info_t; + +struct wl_clm_dload_info { + uint32 ds_id; + uint32 clm_total_len; + uint32 num_chunks; + uint32 chunk_len; + uint32 chunk_offset; + uint8 data_chunk[1]; +}; +typedef struct wl_clm_dload_info wl_clm_dload_info_t; + +typedef struct wlc_ssid { + uint32 SSID_len; + uchar SSID[DOT11_MAX_SSID_LEN]; +} wlc_ssid_t; + +typedef struct wlc_ssid_ext { + bool hidden; + uint32 SSID_len; + uchar SSID[DOT11_MAX_SSID_LEN]; +} wlc_ssid_ext_t; + + +#define MAX_PREFERRED_AP_NUM 5 +typedef struct wlc_fastssidinfo { + uint32 SSID_channel[MAX_PREFERRED_AP_NUM]; + wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM]; +} wlc_fastssidinfo_t; + +#ifdef CUSTOMER_HW_31_1 + +#define AP_NORM 0 +#define AP_STEALTH 1 +#define STREET_PASS_AP 2 + +#define NSC_MAX_TGT_SSID 20 +typedef struct nsc_ssid_entry_list { + wlc_ssid_t ssid_info; + int ssid_type; +} nsc_ssid_entry_list_t; + +typedef struct nsc_ssid_list { + uint32 num_entries; /* N wants 150 */ + nsc_ssid_entry_list_t ssid_entry[1]; +} nsc_ssid_list_t; + +#define NSC_TGT_SSID_BUFSZ (sizeof(nsc_ssid_entry_list_t) * \ + (NSC_MAX_TGT_SSID - 1) + sizeof(nsc_ssid_list_t)) + +/* Default values from N */ +#define NSC_SCPATT_ARRSZ 32 + +/* scan types */ +#define UNI_SCAN 0 +#define SP_SCAN_ACTIVE 1 +#define SP_SCAN_PASSIVE 2 +#define DOZE 3 + +/* what we found */ +typedef struct nsc_scan_results { + wlc_ssid_t ssid; + struct ether_addr mac; + int scantype; + uint16 channel; +} nsc_scan_results_t; + +typedef BWL_PRE_PACKED_STRUCT struct nsc_af_body { + uint8 type; /* should be 0x7f */ + uint8 oui[DOT11_OUI_LEN]; /* just like it says */ + uint8 subtype; + uint8 ielen; /* */ + uint8 data[1]; /* variable */ +} BWL_POST_PACKED_STRUCT nsc_af_body_t; + +typedef BWL_PRE_PACKED_STRUCT struct nsc_sdlist { + uint8 scantype; + uint16 duration; + uint16 channel; /* SP only */ + uint8 ssid_index; /* SP only */ + uint16 rate; /* SP only */ +} BWL_POST_PACKED_STRUCT nsc_sdlist_t; + +typedef struct nsc_scandes { + uint32 num_entries; /* number of list entries */ + nsc_sdlist_t sdlist[1]; /* variable */ +} nsc_scandes_t; + +#define NSC_MAX_SDLIST_ENTRIES 8 +#define NSC_SDDESC_BUFSZ (sizeof(nsc_sdlist_t) * \ + (NSC_MAX_SDLIST_ENTRIES - 1) + sizeof(nsc_scandes_t)) + +#define SCAN_ARR_END (NSC_MAX_SDLIST_ENTRIES) +#endif /* CUSTOMER_HW_31_1 */ + +typedef BWL_PRE_PACKED_STRUCT struct wnm_url { + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT wnm_url_t; + +#define WNM_BSS_SELECT_TYPE_RSSI 0 +#define WNM_BSS_SELECT_TYPE_CU 1 + +#define WNM_BSSLOAD_MONITOR_VERSION 1 +typedef struct wnm_bssload_monitor_cfg { + uint8 version; + uint8 band; + uint8 duration; /* duration between 1 to 20sec */ +} wnm_bssload_monitor_cfg_t; + +#define BSS_MAXTABLE_SIZE 10 +#define WNM_BSS_SELECT_FACTOR_VERSION 1 +typedef struct wnm_bss_select_factor_params { + uint8 low; + uint8 high; + uint8 factor; + uint8 pad; +} wnm_bss_select_factor_params_t; + +typedef struct wnm_bss_select_factor_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 pad; + uint16 count; + wnm_bss_select_factor_params_t params[1]; +} wnm_bss_select_factor_cfg_t; + +#define WNM_BSS_SELECT_WEIGHT_VERSION 1 +typedef struct wnm_bss_select_weight_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 weight; /* weightage for each type between 0 to 100 */ +} wnm_bss_select_weight_cfg_t; + +#define WNM_ROAM_TRIGGER_VERSION 1 +typedef struct wnm_roam_trigger_cfg { + uint8 version; + uint8 band; + uint16 type; + int16 trigger; /* trigger for each type in new roam algorithm */ +} wnm_roam_trigger_cfg_t; + +typedef struct chan_scandata { + uint8 txpower; + uint8 pad; + chanspec_t channel; /**< Channel num, bw, ctrl_sb and band */ + uint32 channel_mintime; + uint32 channel_maxtime; +} chan_scandata_t; + +typedef enum wl_scan_type { + EXTDSCAN_FOREGROUND_SCAN, + EXTDSCAN_BACKGROUND_SCAN, + EXTDSCAN_FORCEDBACKGROUND_SCAN +} wl_scan_type_t; + +#define WLC_EXTDSCAN_MAX_SSID 5 + +typedef struct wl_extdscan_params { + int8 nprobes; /**< 0, passive, otherwise active */ + int8 split_scan; /**< split scan */ + int8 band; /**< band */ + int8 pad; + wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */ + uint32 tx_rate; /**< in 500ksec units */ + wl_scan_type_t scan_type; /**< enum */ + int32 channel_num; + chan_scandata_t channel_list[1]; /**< list of chandata structs */ +} wl_extdscan_params_t; + +#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t)) + +#define WL_SCAN_PARAMS_SSID_MAX 10 + +typedef struct wl_scan_params { + wlc_ssid_t ssid; /**< default: {0, ""} */ + struct ether_addr bssid; /**< default: bcast */ + int8 bss_type; /**< default: any, + * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT + */ + uint8 scan_type; /**< flags, 0 use default */ + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 active_time; /**< -1 use default, dwell time per channel for + * active scanning + */ + int32 passive_time; /**< -1 use default, dwell time per channel + * for passive scanning + */ + int32 home_time; /**< -1 use default, dwell time for the home channel + * between channel scans + */ + int32 channel_num; /**< count of channels and ssids that follow + * + * low half is count of channels in channel_list, 0 + * means default (use all available channels) + * + * high half is entries in wlc_ssid_t array that + * follows channel_list, aligned for int32 (4 bytes) + * meaning an odd channel count implies a 2-byte pad + * between end of channel_list and first ssid + * + * if ssid count is zero, single ssid in the fixed + * parameter portion is assumed, otherwise ssid in + * the fixed portion is ignored + */ + uint16 channel_list[1]; /**< list of chanspecs */ +} wl_scan_params_t; + +/* size of wl_scan_params not including variable length array */ +#define WL_SCAN_PARAMS_FIXED_SIZE 64 +#define WL_MAX_ROAMSCAN_DATSZ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16))) + +#define ISCAN_REQ_VERSION 1 + +/* incremental scan struct */ +typedef struct wl_iscan_params { + uint32 version; + uint16 action; + uint16 scan_duration; + wl_scan_params_t params; +} wl_iscan_params_t; + +/* 3 fields + size of wl_scan_params, not including variable length array */ +#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t)) + +typedef struct wl_scan_results { + uint32 buflen; + uint32 version; + uint32 count; + wl_bss_info_t bss_info[1]; +} wl_scan_results_t; + +/* size of wl_scan_results not including variable length array */ +#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t)) + + +#define ESCAN_REQ_VERSION 1 + +/** event scan reduces amount of SOC memory needed to store scan results */ +typedef struct wl_escan_params { + uint32 version; + uint16 action; + uint16 sync_id; + wl_scan_params_t params; +} wl_escan_params_t; + +#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t)) + +/** event scan reduces amount of SOC memory needed to store scan results */ +typedef struct wl_escan_result { + uint32 buflen; + uint32 version; + uint16 sync_id; + uint16 bss_count; + wl_bss_info_t bss_info[1]; +} wl_escan_result_t; + +#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t)) + +typedef struct wl_gscan_result { + uint32 buflen; + uint32 version; + wl_gscan_bss_info_t bss_info[1]; +} wl_gscan_result_t; + +#define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t)) + +/* incremental scan results struct */ +typedef struct wl_iscan_results { + uint32 status; + wl_scan_results_t results; +} wl_iscan_results_t; + +/* size of wl_iscan_results not including variable length array */ +#define WL_ISCAN_RESULTS_FIXED_SIZE \ + (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results)) + +#define SCANOL_PARAMS_VERSION 1 + +typedef struct scanol_params { + uint32 version; + uint32 flags; /**< offload scanning flags */ + int32 active_time; /**< -1 use default, dwell time per channel for active scanning */ + int32 passive_time; /**< -1 use default, dwell time per channel for passive scanning */ + int32 idle_rest_time; /**< -1 use default, time idle between scan cycle */ + int32 idle_rest_time_multiplier; + int32 active_rest_time; + int32 active_rest_time_multiplier; + int32 scan_cycle_idle_rest_time; + int32 scan_cycle_idle_rest_multiplier; + int32 scan_cycle_active_rest_time; + int32 scan_cycle_active_rest_multiplier; + int32 max_rest_time; + int32 max_scan_cycles; + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 scan_start_delay; + uint32 nchannels; + uint32 ssid_count; + wlc_ssid_t ssidlist[1]; +} scanol_params_t; + +typedef struct wl_probe_params { + wlc_ssid_t ssid; + struct ether_addr bssid; + struct ether_addr mac; +} wl_probe_params_t; + +#define WL_MAXRATES_IN_SET 16 /**< max # of rates in a rateset */ +typedef struct wl_rateset { + uint32 count; /**< # rates in this set */ + uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */ +} wl_rateset_t; + +typedef struct wl_rateset_args { + uint32 count; /**< # rates in this set */ + uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */ + uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */ + uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ +} wl_rateset_args_t; + +#define TXBF_RATE_MCS_ALL 4 +#define TXBF_RATE_VHT_ALL 4 +#define TXBF_RATE_OFDM_ALL 8 + +typedef struct wl_txbf_rateset { + uint8 txbf_rate_mcs[TXBF_RATE_MCS_ALL]; /**< one for each stream */ + uint8 txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL]; /**< one for each stream */ + uint16 txbf_rate_vht[TXBF_RATE_VHT_ALL]; /**< one for each stream */ + uint16 txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL]; /**< one for each stream */ + uint8 txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /**< bitmap of ofdm rates that enables txbf */ + uint8 txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */ + uint8 txbf_rate_ofdm_cnt; + uint8 txbf_rate_ofdm_cnt_bcm; +} wl_txbf_rateset_t; + +#define OFDM_RATE_MASK 0x0000007f +typedef uint8 ofdm_rates_t; + +typedef struct wl_rates_info { + wl_rateset_t rs_tgt; + uint32 phy_type; + int32 bandtype; + uint8 cck_only; + uint8 rate_mask; + uint8 mcsallow; + uint8 bw; + uint8 txstreams; +} wl_rates_info_t; + +/* uint32 list */ +typedef struct wl_uint32_list { + /* in - # of elements, out - # of entries */ + uint32 count; + /* variable length uint32 list */ + uint32 element[1]; +} wl_uint32_list_t; + +/* used for association with a specific BSSID and chanspec list */ +typedef struct wl_assoc_params { + struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */ + uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid, + * otherwise count of chanspecs in chanspec_list + * AND paired bssids following chanspec_list + * also, chanspec_num has to be set to zero + * for bssid list to be used + */ + int32 chanspec_num; /**< 0: all available channels, + * otherwise count of chanspecs in chanspec_list + */ + chanspec_t chanspec_list[1]; /**< list of chanspecs */ +} wl_assoc_params_t; + +#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list) + +/* used for reassociation/roam to a specific BSSID and channel */ +typedef wl_assoc_params_t wl_reassoc_params_t; +#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + +/* used for association to a specific BSSID and channel */ +typedef wl_assoc_params_t wl_join_assoc_params_t; +#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + +/* used for join with or without a specific bssid and channel list */ +typedef struct wl_join_params { + wlc_ssid_t ssid; + wl_assoc_params_t params; /**< optional field, but it must include the fixed portion + * of the wl_assoc_params_t struct when it does present. + */ +} wl_join_params_t; + +#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \ + WL_ASSOC_PARAMS_FIXED_SIZE) +/* scan params for extended join */ +typedef struct wl_join_scan_params { + uint8 scan_type; /**< 0 use default, active or passive scan */ + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 active_time; /**< -1 use default, dwell time per channel for + * active scanning + */ + int32 passive_time; /**< -1 use default, dwell time per channel + * for passive scanning + */ + int32 home_time; /**< -1 use default, dwell time for the home channel + * between channel scans + */ +} wl_join_scan_params_t; + +/* extended join params */ +typedef struct wl_extjoin_params { + wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */ + wl_join_scan_params_t scan; + wl_join_assoc_params_t assoc; /**< optional field, but it must include the fixed portion + * of the wl_join_assoc_params_t struct when it does + * present. + */ +} wl_extjoin_params_t; +#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \ + WL_JOIN_ASSOC_PARAMS_FIXED_SIZE) + +#define ANT_SELCFG_MAX 4 /**< max number of antenna configurations */ +#define MAX_STREAMS_SUPPORTED 4 /**< max number of streams supported */ +typedef struct { + uint8 ant_config[ANT_SELCFG_MAX]; /**< antenna configuration */ + uint8 num_antcfg; /**< number of available antenna configurations */ +} wlc_antselcfg_t; + +typedef struct { + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_t; + +typedef struct { + chanspec_t chanspec; /**< Which channel? */ + uint16 num_secs; /**< How many secs worth of data */ + cca_congest_t secs[1]; /**< Data */ +} cca_congest_channel_req_t; + +typedef struct { + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest; /**< millisecs detecting busy CCA */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_simple_t; + +typedef struct { + uint16 status; + uint16 id; + chanspec_t chanspec; /**< Which channel? */ + uint16 len; + union { + cca_congest_simple_t cca_busy; /**< CCA busy */ + int noise; /**< noise floor */ + }; +} cca_chan_qual_event_t; + + +/* interference sources */ +enum interference_source { + ITFR_NONE = 0, /**< interference */ + ITFR_PHONE, /**< wireless phone */ + ITFR_VIDEO_CAMERA, /**< wireless video camera */ + ITFR_MICROWAVE_OVEN, /**< microwave oven */ + ITFR_BABY_MONITOR, /**< wireless baby monitor */ + ITFR_BLUETOOTH, /**< bluetooth */ + ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /**< wireless camera or baby monitor */ + ITFR_BLUETOOTH_OR_BABY_MONITOR, /**< bluetooth or baby monitor */ + ITFR_VIDEO_CAMERA_OR_PHONE, /**< video camera or phone */ + ITFR_UNIDENTIFIED /**< interference from unidentified source */ +}; + +/* structure for interference source report */ +typedef struct { + uint32 flags; /**< flags. bit definitions below */ + uint32 source; /**< last detected interference source */ + uint32 timestamp; /**< second timestamp on interferenced flag change */ +} interference_source_rep_t; + +#define WLC_CNTRY_BUF_SZ 4 /**< Country string is 3 bytes + NUL */ + + +typedef struct wl_country { + char country_abbrev[WLC_CNTRY_BUF_SZ]; /**< nul-terminated country code used in + * the Country IE + */ + int32 rev; /**< revision specifier for ccode + * on set, -1 indicates unspecified. + * on get, rev >= 0 + */ + char ccode[WLC_CNTRY_BUF_SZ]; /**< nul-terminated built-in country code. + * variable length, but fixed size in + * struct allows simple allocation for + * expected country strings <= 3 chars. + */ +} wl_country_t; + +#define CCODE_INFO_VERSION 1 + +typedef enum wl_ccode_role { + WLC_CCODE_ROLE_ACTIVE = 0, + WLC_CCODE_ROLE_HOST, + WLC_CCODE_ROLE_80211D_ASSOC, + WLC_CCODE_ROLE_80211D_SCAN, + WLC_CCODE_ROLE_DEFAULT, + WLC_CCODE_LAST +} wl_ccode_role_t; +#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST + +typedef struct wl_ccode_entry { + uint16 reserved; + uint8 band; + uint8 role; + char ccode[WLC_CNTRY_BUF_SZ]; +} wl_ccode_entry_t; + +typedef struct wl_ccode_info { + uint16 version; + uint16 count; /* Number of ccodes entries in the set */ + wl_ccode_entry_t ccodelist[1]; +} wl_ccode_info_t; +#define WL_CCODE_INFO_FIXED_LEN OFFSETOF(wl_ccode_info_t, ccodelist) + +typedef struct wl_channels_in_country { + uint32 buflen; + uint32 band; + char country_abbrev[WLC_CNTRY_BUF_SZ]; + uint32 count; + uint32 channel[1]; +} wl_channels_in_country_t; + +typedef struct wl_country_list { + uint32 buflen; + uint32 band_set; + uint32 band; + uint32 count; + char country_abbrev[1]; +} wl_country_list_t; + +typedef struct wl_rm_req_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ +} wl_rm_req_elt_t; + +typedef struct wl_rm_req { + uint32 token; /**< overall measurement set token */ + uint32 count; /**< number of measurement requests */ + void *cb; /**< completion callback function: may be NULL */ + void *cb_arg; /**< arg to completion callback function */ + wl_rm_req_elt_t req[1]; /**< variable length block of requests */ +} wl_rm_req_t; +#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req) + +typedef struct wl_rm_rep_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ + uint32 len; /**< byte length of data block */ + uint8 data[1]; /**< variable length data block */ +} wl_rm_rep_elt_t; +#define WL_RM_REP_ELT_FIXED_LEN 24 /**< length excluding data block */ + +#define WL_RPI_REP_BIN_NUM 8 +typedef struct wl_rm_rpi_rep { + uint8 rpi[WL_RPI_REP_BIN_NUM]; + int8 rpi_max[WL_RPI_REP_BIN_NUM]; +} wl_rm_rpi_rep_t; + +typedef struct wl_rm_rep { + uint32 token; /**< overall measurement set token */ + uint32 len; /**< length of measurement report block */ + wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */ +} wl_rm_rep_t; +#define WL_RM_REP_FIXED_LEN 8 + + +typedef enum sup_auth_status { + /* Basic supplicant authentication states */ + WLC_SUP_DISCONNECTED = 0, + WLC_SUP_CONNECTING, + WLC_SUP_IDREQUIRED, + WLC_SUP_AUTHENTICATING, + WLC_SUP_AUTHENTICATED, + WLC_SUP_KEYXCHANGE, + WLC_SUP_KEYED, + WLC_SUP_TIMEOUT, + WLC_SUP_LAST_BASIC_STATE, + + /* Extended supplicant authentication states */ + /* Waiting to receive handshake msg M1 */ + WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED, + /* Preparing to send handshake msg M2 */ + WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, + /* Waiting to receive handshake msg M3 */ + WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, + WLC_SUP_KEYXCHANGE_PREP_M4, /**< Preparing to send handshake msg M4 */ + WLC_SUP_KEYXCHANGE_WAIT_G1, /**< Waiting to receive handshake msg G1 */ + WLC_SUP_KEYXCHANGE_PREP_G2 /**< Preparing to send handshake msg G2 */ +} sup_auth_status_t; + +typedef struct wl_wsec_key { + uint32 index; /**< key index */ + uint32 len; /**< key length */ + uint8 data[DOT11_MAX_KEY_SIZE]; /**< key data */ + uint32 pad_1[18]; + uint32 algo; /**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ + uint32 flags; /**< misc flags */ + uint32 pad_2[2]; + int pad_3; + int iv_initialized; /**< has IV been initialized already? */ + int pad_4; + /* Rx IV */ + struct { + uint32 hi; /**< upper 32 bits of IV */ + uint16 lo; /**< lower 16 bits of IV */ + } rxiv; + uint32 pad_5[2]; + struct ether_addr ea; /**< per station */ +} wl_wsec_key_t; + +#define WSEC_MIN_PSK_LEN 8 +#define WSEC_MAX_PSK_LEN 64 + +/* Flag for key material needing passhash'ing */ +#define WSEC_PASSPHRASE (1<<0) + +/* receptacle for WLC_SET_WSEC_PMK parameter */ +typedef struct { + ushort key_len; /**< octets in key material */ + ushort flags; /**< key handling qualification */ + uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */ +} wsec_pmk_t; + +typedef struct _pmkid { + struct ether_addr BSSID; + uint8 PMKID[WPA2_PMKID_LEN]; +} pmkid_t; + +typedef struct _pmkid_list { + uint32 npmkid; + pmkid_t pmkid[1]; +} pmkid_list_t; + +typedef struct _pmkid_cand { + struct ether_addr BSSID; + uint8 preauth; +} pmkid_cand_t; + +typedef struct _pmkid_cand_list { + uint32 npmkid_cand; + pmkid_cand_t pmkid_cand[1]; +} pmkid_cand_list_t; + +#define WL_STA_ANT_MAX 4 /**< max possible rx antennas */ + +typedef struct wl_assoc_info { + uint32 req_len; + uint32 resp_len; + uint32 flags; + struct dot11_assoc_req req; + struct ether_addr reassoc_bssid; /* used in reassoc's */ + struct dot11_assoc_resp resp; +} wl_assoc_info_t; + +typedef struct wl_led_info { + uint32 index; /* led index */ + uint32 behavior; + uint8 activehi; +} wl_led_info_t; + + +/* srom read/write struct passed through ioctl */ +typedef struct { + uint byteoff; /**< byte offset */ + uint nbytes; /**< number of bytes */ + uint16 buf[1]; +} srom_rw_t; + +#define CISH_FLAG_PCIECIS (1 << 15) /* write CIS format bit for PCIe CIS */ +/* similar cis (srom or otp) struct [iovar: may not be aligned] */ +typedef struct { + uint16 source; /**< cis source */ + uint16 flags; /**< flags */ + uint32 byteoff; /**< byte offset */ + uint32 nbytes; /**< number of bytes */ + /* data follows here */ +} cis_rw_t; + +/* R_REG and W_REG struct passed through ioctl */ +typedef struct { + uint32 byteoff; /**< byte offset of the field in d11regs_t */ + uint32 val; /**< read/write value of the field */ + uint32 size; /**< sizeof the field */ + uint band; /**< band (optional) */ +} rw_reg_t; + +/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */ +/* PCL - Power Control Loop */ +typedef struct { + uint16 auto_ctrl; /**< WL_ATTEN_XX */ + uint16 bb; /**< Baseband attenuation */ + uint16 radio; /**< Radio attenuation */ + uint16 txctl1; /**< Radio TX_CTL1 value */ +} atten_t; + +/* Per-AC retry parameters */ +struct wme_tx_params_s { + uint8 short_retry; + uint8 short_fallback; + uint8 long_retry; + uint8 long_fallback; + uint16 max_rate; /* In units of 512 Kbps */ +}; + +typedef struct wme_tx_params_s wme_tx_params_t; + +#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT) + +/* Used to get specific link/ac parameters */ +typedef struct { + int32 ac; + uint8 val; + struct ether_addr ea; +} link_val_t; + + +#define WL_PM_MUTE_TX_VER 1 + +typedef struct wl_pm_mute_tx { + uint16 version; /**< version */ + uint16 len; /**< length */ + uint16 deadline; /**< deadline timer (in milliseconds) */ + uint8 enable; /**< set to 1 to enable mode; set to 0 to disable it */ +} wl_pm_mute_tx_t; + + +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /* # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /* Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ +} sta_info_t; + +#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts) + +#define WL_STA_VER 4 + +typedef struct { + uint32 auto_en; + uint32 active_ant; + uint32 rxcount; + int32 avg_snr_per_ant0; + int32 avg_snr_per_ant1; + int32 avg_snr_per_ant2; + uint32 swap_ge_rxcount0; + uint32 swap_ge_rxcount1; + uint32 swap_ge_snrthresh0; + uint32 swap_ge_snrthresh1; + uint32 swap_txfail0; + uint32 swap_txfail1; + uint32 swap_timer0; + uint32 swap_timer1; + uint32 swap_alivecheck0; + uint32 swap_alivecheck1; + uint32 rxcount_per_ant0; + uint32 rxcount_per_ant1; + uint32 acc_rxcount; + uint32 acc_rxcount_per_ant0; + uint32 acc_rxcount_per_ant1; + uint32 tx_auto_en; + uint32 tx_active_ant; + uint32 rx_policy; + uint32 tx_policy; + uint32 cell_policy; +} wlc_swdiv_stats_t; + +#define WLC_NUMRATES 16 /**< max # of rates in a rateset */ + +typedef struct wlc_rateset { + uint32 count; /**< number of rates in rates[] */ + uint8 rates[WLC_NUMRATES]; /**< rates in 500kbps units w/hi bit set if basic */ + uint8 htphy_membership; /**< HT PHY Membership */ + uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */ + uint16 vht_mcsmap; /**< supported vht mcs nss bit map */ + uint16 vht_mcsmap_prop; /**< supported prop vht mcs nss bit map */ +} wlc_rateset_t; + +/* Used to get specific STA parameters */ +typedef struct { + uint32 val; + struct ether_addr ea; +} scb_val_t; + +/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */ +typedef struct { + uint32 code; + scb_val_t ioctl_args; +} authops_t; + +/* channel encoding */ +typedef struct channel_info { + int hw_channel; + int target_channel; + int scan_channel; +} channel_info_t; + +/* For ioctls that take a list of MAC addresses */ +typedef struct maclist { + uint count; /**< number of MAC addresses */ + struct ether_addr ea[1]; /**< variable length array of MAC addresses */ +} maclist_t; + +/* get pkt count struct passed through ioctl */ +typedef struct get_pktcnt { + uint rx_good_pkt; + uint rx_bad_pkt; + uint tx_good_pkt; + uint tx_bad_pkt; + uint rx_ocast_good_pkt; /* unicast packets destined for others */ +} get_pktcnt_t; + +/* NINTENDO2 */ +#define LQ_IDX_MIN 0 +#define LQ_IDX_MAX 1 +#define LQ_IDX_AVG 2 +#define LQ_IDX_SUM 2 +#define LQ_IDX_LAST 3 +#define LQ_STOP_MONITOR 0 +#define LQ_START_MONITOR 1 + +/* Get averages RSSI, Rx PHY rate and SNR values */ +typedef struct { + int rssi[LQ_IDX_LAST]; /* Array to keep min, max, avg rssi */ + int snr[LQ_IDX_LAST]; /* Array to keep min, max, avg snr */ + int isvalid; /* Flag indicating whether above data is valid */ +} wl_lq_t; /* Link Quality */ + +typedef enum wl_wakeup_reason_type { + LCD_ON = 1, + LCD_OFF, + DRC1_WAKE, + DRC2_WAKE, + REASON_LAST +} wl_wr_type_t; + +typedef struct { +/* Unique filter id */ + uint32 id; + +/* stores the reason for the last wake up */ + uint8 reason; +} wl_wr_t; + +/* Get MAC specific rate histogram command */ +typedef struct { + struct ether_addr ea; /**< MAC Address */ + uint8 ac_cat; /**< Access Category */ + uint8 num_pkts; /**< Number of packet entries to be averaged */ +} wl_mac_ratehisto_cmd_t; /**< MAC Specific Rate Histogram command */ + +/* Get MAC rate histogram response */ +typedef struct { + uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */ + uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */ + uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /**< VHT counts */ + uint32 tsf_timer[2][2]; /**< Start and End time for 8bytes value */ + uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /* MCS counts */ +} wl_mac_ratehisto_res_t; /**< MAC Specific Rate Histogram Response */ + +/* Linux network driver ioctl encoding */ +typedef struct wl_ioctl { + uint cmd; /**< common ioctl definition */ + void *buf; /**< pointer to user buffer */ + uint len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint used; /**< bytes read or written (optional) */ + uint needed; /**< bytes needed (optional) */ +} wl_ioctl_t; + +#ifdef CONFIG_COMPAT +typedef struct compat_wl_ioctl { + uint cmd; /**< common ioctl definition */ + uint32 buf; /**< pointer to user buffer */ + uint len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint used; /**< bytes read or written (optional) */ + uint needed; /**< bytes needed (optional) */ +} compat_wl_ioctl_t; +#endif /* CONFIG_COMPAT */ + +#define WL_NUM_RATES_CCK 4 /* 1, 2, 5.5, 11 Mbps */ +#define WL_NUM_RATES_OFDM 8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */ +#define WL_NUM_RATES_MCS_1STREAM 8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */ +#define WL_NUM_RATES_EXTRA_VHT 2 /* Additional VHT 11AC rates */ +#define WL_NUM_RATES_VHT 10 +#define WL_NUM_RATES_MCS32 1 + + +/* + * Structure for passing hardware and software + * revision info up from the driver. + */ +typedef struct wlc_rev_info { + uint vendorid; /**< PCI vendor id */ + uint deviceid; /**< device id of chip */ + uint radiorev; /**< radio revision */ + uint chiprev; /**< chip revision */ + uint corerev; /**< core revision */ + uint boardid; /**< board identifier (usu. PCI sub-device id) */ + uint boardvendor; /**< board vendor (usu. PCI sub-vendor id) */ + uint boardrev; /**< board revision */ + uint driverrev; /**< driver version */ + uint ucoderev; /**< microcode version */ + uint bus; /**< bus type */ + uint chipnum; /**< chip number */ + uint phytype; /**< phy type */ + uint phyrev; /**< phy revision */ + uint anarev; /**< anacore rev */ + uint chippkg; /**< chip package info */ + uint nvramrev; /**< nvram revision number */ +} wlc_rev_info_t; + +#define WL_REV_INFO_LEGACY_LENGTH 48 + +#define WL_BRAND_MAX 10 +typedef struct wl_instance_info { + uint instance; + char brand[WL_BRAND_MAX]; +} wl_instance_info_t; + +/* structure to change size of tx fifo */ +typedef struct wl_txfifo_sz { + uint16 magic; + uint16 fifo; + uint16 size; +} wl_txfifo_sz_t; + +/* Transfer info about an IOVar from the driver */ +/* Max supported IOV name size in bytes, + 1 for nul termination */ +#define WLC_IOV_NAME_LEN 30 +typedef struct wlc_iov_trx_s { + uint8 module; + uint8 type; + char name[WLC_IOV_NAME_LEN]; +} wlc_iov_trx_t; + +/* bump this number if you change the ioctl interface */ +#define WLC_IOCTL_VERSION 2 +#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1 + +#ifdef CONFIG_USBRNDIS_RETAIL +/* struct passed in for WLC_NDCONFIG_ITEM */ +typedef struct { + char *name; + void *param; +} ndconfig_item_t; +#endif + + + +#define WL_PHY_PAVARS_LEN 32 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */ + + +#define WL_PHY_PAVAR_VER 1 /**< pavars version */ +#define WL_PHY_PAVARS2_NUM 3 /**< a1, b0, b1 */ +typedef struct wl_pavars2 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< len of this structure */ + uint16 inuse; /**< driver return 1 for a1,b0,b1 in current band range */ + uint16 phy_type; /**< phy type */ + uint16 bandrange; + uint16 chain; + uint16 inpa[WL_PHY_PAVARS2_NUM]; /**< phy pavars for one band range */ +} wl_pavars2_t; + +typedef struct wl_po { + uint16 phy_type; /**< Phy type */ + uint16 band; + uint16 cckpo; + uint32 ofdmpo; + uint16 mcspo[8]; +} wl_po_t; + +#define WL_NUM_RPCALVARS 5 /**< number of rpcal vars */ + +typedef struct wl_rpcal { + uint16 value; + uint16 update; +} wl_rpcal_t; + +typedef struct wl_aci_args { + int enter_aci_thresh; /* Trigger level to start detecting ACI */ + int exit_aci_thresh; /* Trigger level to exit ACI mode */ + int usec_spin; /* microsecs to delay between rssi samples */ + int glitch_delay; /* interval between ACI scans when glitch count is consistently high */ + uint16 nphy_adcpwr_enter_thresh; /**< ADC power to enter ACI mitigation mode */ + uint16 nphy_adcpwr_exit_thresh; /**< ADC power to exit ACI mitigation mode */ + uint16 nphy_repeat_ctr; /**< Number of tries per channel to compute power */ + uint16 nphy_num_samples; /**< Number of samples to compute power on one channel */ + uint16 nphy_undetect_window_sz; /**< num of undetects to exit ACI Mitigation mode */ + uint16 nphy_b_energy_lo_aci; /**< low ACI power energy threshold for bphy */ + uint16 nphy_b_energy_md_aci; /**< mid ACI power energy threshold for bphy */ + uint16 nphy_b_energy_hi_aci; /**< high ACI power energy threshold for bphy */ + uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */ + uint16 nphy_noise_noassoc_glitch_th_dn; + uint16 nphy_noise_assoc_glitch_th_up; + uint16 nphy_noise_assoc_glitch_th_dn; + uint16 nphy_noise_assoc_aci_glitch_th_up; + uint16 nphy_noise_assoc_aci_glitch_th_dn; + uint16 nphy_noise_assoc_enter_th; + uint16 nphy_noise_noassoc_enter_th; + uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th; + uint16 nphy_noise_noassoc_crsidx_incr; + uint16 nphy_noise_assoc_crsidx_incr; + uint16 nphy_noise_crsidx_decr; +} wl_aci_args_t; + +#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */ +#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */ +typedef struct wl_samplecollect_args { + /* version 0 fields */ + uint8 coll_us; + int cores; + /* add'l version 1 fields */ + uint16 version; /* see definition of WL_SAMPLECOLLECT_T_VERSION */ + uint16 length; /* length of entire structure */ + int8 trigger; + uint16 timeout; + uint16 mode; + uint32 pre_dur; + uint32 post_dur; + uint8 gpio_sel; + uint8 downsamp; + uint8 be_deaf; + uint8 agc; /**< loop from init gain and going down */ + uint8 filter; /**< override high pass corners to lowest */ + /* add'l version 2 fields */ + uint8 trigger_state; + uint8 module_sel1; + uint8 module_sel2; + uint16 nsamps; + int bitStart; + uint32 gpioCapMask; +} wl_samplecollect_args_t; + +#define WL_SAMPLEDATA_T_VERSION 1 /**< version of wl_samplecollect_args_t struct */ +/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */ +#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2 + +typedef struct wl_sampledata { + uint16 version; /**< structure version */ + uint16 size; /**< size of structure */ + uint16 tag; /**< Header/Data */ + uint16 length; /**< data length */ + uint32 flag; /**< bit def */ +} wl_sampledata_t; + + +/* WL_OTA START */ +/* OTA Test Status */ +enum { + WL_OTA_TEST_IDLE = 0, /**< Default Idle state */ + WL_OTA_TEST_ACTIVE = 1, /**< Test Running */ + WL_OTA_TEST_SUCCESS = 2, /**< Successfully Finished Test */ + WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */ +}; +/* OTA SYNC Status */ +enum { + WL_OTA_SYNC_IDLE = 0, /**< Idle state */ + WL_OTA_SYNC_ACTIVE = 1, /**< Waiting for Sync */ + WL_OTA_SYNC_FAIL = 2 /**< Sync pkt not recieved */ +}; + +/* Various error states dut can get stuck during test */ +enum { + WL_OTA_SKIP_TEST_CAL_FAIL = 1, /**< Phy calibration failed */ + WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */ + WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /**< Cmd flow file download failed */ + WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /**< No test found in Flow file */ + WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /**< WL UP failed */ + WL_OTA_SKIP_TEST_UNKNOWN_CALL /**< Unintentional scheduling on ota test */ +}; + +/* Differentiator for ota_tx and ota_rx */ +enum { + WL_OTA_TEST_TX = 0, /**< ota_tx */ + WL_OTA_TEST_RX = 1, /**< ota_rx */ +}; + +/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */ +enum { + WL_OTA_TEST_BW_20_IN_40MHZ = 0, /**< 20 in 40 operation */ + WL_OTA_TEST_BW_20MHZ = 1, /**< 20 Mhz operation */ + WL_OTA_TEST_BW_40MHZ = 2, /**< full 40Mhz operation */ + WL_OTA_TEST_BW_80MHZ = 3 /* full 80Mhz operation */ +}; + +#define HT_MCS_INUSE 0x00000080 /* HT MCS in use,indicates b0-6 holds an mcs */ +#define VHT_MCS_INUSE 0x00000100 /* VHT MCS in use,indicates b0-6 holds an mcs */ +#define OTA_RATE_MASK 0x0000007f /* rate/mcs value */ +#define OTA_STF_SISO 0 +#define OTA_STF_CDD 1 +#define OTA_STF_STBC 2 +#define OTA_STF_SDM 3 + +typedef struct ota_rate_info { + uint8 rate_cnt; /**< Total number of rates */ + uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /**< array of rates from 1mbps to 130mbps */ + /**< for legacy rates : ratein mbps * 2 */ + /**< for HT rates : mcs index */ +} ota_rate_info_t; + +typedef struct ota_power_info { + int8 pwr_ctrl_on; /**< power control on/off */ + int8 start_pwr; /**< starting power/index */ + int8 delta_pwr; /**< delta power/index */ + int8 end_pwr; /**< end power/index */ +} ota_power_info_t; + +typedef struct ota_packetengine { + uint16 delay; /* Inter-packet delay */ + /**< for ota_tx, delay is tx ifs in micro seconds */ + /* for ota_rx, delay is wait time in milliseconds */ + uint16 nframes; /* Number of frames */ + uint16 length; /* Packet length */ +} ota_packetengine_t; + +/* Test info vector */ +typedef struct wl_ota_test_args { + uint8 cur_test; /**< test phase */ + uint8 chan; /**< channel */ + uint8 bw; /**< bandwidth */ + uint8 control_band; /**< control band */ + uint8 stf_mode; /**< stf mode */ + ota_rate_info_t rt_info; /**< Rate info */ + ota_packetengine_t pkteng; /**< packeteng info */ + uint8 txant; /**< tx antenna */ + uint8 rxant; /**< rx antenna */ + ota_power_info_t pwr_info; /**< power sweep info */ + uint8 wait_for_sync; /**< wait for sync or not */ + uint8 ldpc; + uint8 sgi; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ +} wl_ota_test_args_t; + +#define WL_OTA_TESTVEC_T_VERSION 1 /* version of wl_ota_test_vector_t struct */ +typedef struct wl_ota_test_vector { + uint16 version; + wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /**< Test argument struct */ + uint16 test_cnt; /**< Total no of test */ + uint8 file_dwnld_valid; /**< File successfully downloaded */ + uint8 sync_timeout; /**< sync packet timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< macaddress for tx */ + struct ether_addr rx_mac; /**< macaddress for rx */ + int8 loop_test; /**< dbg feature to loop the test */ + uint16 test_rxcnt; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ +} wl_ota_test_vector_t; + + +/* struct copied back form dongle to host to query the status */ +typedef struct wl_ota_test_status { + int16 cur_test_cnt; /**< test phase */ + int8 skip_test_reason; /**< skip test reasoin */ + wl_ota_test_args_t test_arg; /**< cur test arg details */ + uint16 test_cnt; /**< total no of test downloaded */ + uint8 file_dwnld_valid; /**< file successfully downloaded ? */ + uint8 sync_timeout; /**< sync timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< tx mac address */ + struct ether_addr rx_mac; /**< rx mac address */ + uint8 test_stage; /**< check the test status */ + int8 loop_test; /**< Debug feature to puts test enfine in a loop */ + uint8 sync_status; /**< sync status */ +} wl_ota_test_status_t; +typedef struct wl_ota_rx_rssi { + uint16 pktcnt; /* Pkt count used for this rx test */ + chanspec_t chanspec; /* Channel info on which the packets are received */ + int16 rssi; /* Average RSSI of the first 50% packets received */ +} wl_ota_rx_rssi_t; + +#define WL_OTARSSI_T_VERSION 1 /* version of wl_ota_test_rssi_t struct */ +#define WL_OTA_TEST_RSSI_FIXED_SIZE OFFSETOF(wl_ota_test_rssi_t, rx_rssi) + +typedef struct wl_ota_test_rssi { + uint8 version; + uint8 testcnt; /* total measured RSSI values, valid on output only */ + wl_ota_rx_rssi_t rx_rssi[1]; /* Variable length array of wl_ota_rx_rssi_t */ +} wl_ota_test_rssi_t; + +/* WL_OTA END */ + +/* wl_radar_args_t */ +typedef struct { + int npulses; /**< required number of pulses at n * t_int */ + int ncontig; /**< required number of pulses at t_int */ + int min_pw; /**< minimum pulse width (20 MHz clocks) */ + int max_pw; /**< maximum pulse width (20 MHz clocks) */ + uint16 thresh0; /**< Radar detection, thresh 0 */ + uint16 thresh1; /**< Radar detection, thresh 1 */ + uint16 blank; /**< Radar detection, blank control */ + uint16 fmdemodcfg; /**< Radar detection, fmdemod config */ + int npulses_lp; /* Radar detection, minimum long pulses */ + int min_pw_lp; /* Minimum pulsewidth for long pulses */ + int max_pw_lp; /* Maximum pulsewidth for long pulses */ + int min_fm_lp; /* Minimum fm for long pulses */ + int max_span_lp; /* Maximum deltat for long pulses */ + int min_deltat; /* Minimum spacing between pulses */ + int max_deltat; /* Maximum spacing between pulses */ + uint16 autocorr; /**< Radar detection, autocorr on or off */ + uint16 st_level_time; /**< Radar detection, start_timing level */ + uint16 t2_min; /* minimum clocks needed to remain in state 2 */ + uint32 version; /* version */ + uint32 fra_pulse_err; /**< sample error margin for detecting French radar pulsed */ + int npulses_fra; /* Radar detection, minimum French pulses set */ + int npulses_stg2; /* Radar detection, minimum staggered-2 pulses set */ + int npulses_stg3; /* Radar detection, minimum staggered-3 pulses set */ + uint16 percal_mask; /**< defines which period cal is masked from radar detection */ + int quant; /**< quantization resolution to pulse positions */ + uint32 min_burst_intv_lp; /**< minimum burst to burst interval for bin3 radar */ + uint32 max_burst_intv_lp; /**< maximum burst to burst interval for bin3 radar */ + int nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */ + int max_pw_tol; /**< maximum tolerance allowd in detected pulse width for radar detection */ + uint16 feature_mask; /* 16-bit mask to specify enabled features */ +} wl_radar_args_t; + +#define WL_RADAR_ARGS_VERSION 2 + +typedef struct { + uint32 version; /* version */ + uint16 thresh0_20_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh1_20_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh0_40_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh1_40_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh0_80_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh1_80_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh0_20_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh1_20_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh0_40_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh1_40_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh0_80_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */ + uint16 thresh1_80_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */ +#ifdef WL11AC160 + uint16 thresh0_160_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh1_160_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh0_160_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */ + uint16 thresh1_160_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */ +#endif /* WL11AC160 */ +} wl_radar_thr_t; + +#define WL_RADAR_THR_VERSION 2 + +/* RSSI per antenna */ +typedef struct { + uint32 version; /**< version field */ + uint32 count; /**< number of valid antenna rssi */ + int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */ +} wl_rssi_ant_t; + +/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */ +typedef struct { + uint state; /**< noted by WL_DFS_CACSTATE_XX. */ + uint duration; /**< time spent in ms in state. */ + /* as dfs enters ISM state, it removes the operational channel from quiet channel + * list and notes the channel in channel_cleared. set to 0 if no channel is cleared + */ + chanspec_t chanspec_cleared; + /* chanspec cleared used to be a uint, add another to uint16 to maintain size */ + uint16 pad; +} wl_dfs_status_t; + +typedef struct { + uint state; /* noted by WL_DFS_CACSTATE_XX */ + uint duration; /* time spent in ms in state */ + chanspec_t chanspec; /* chanspec of this core */ + chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */ + uint16 sub_type; /* currently just the index of the core or the respective PLL */ + uint16 pad; +} wl_dfs_sub_status_t; + +#define WL_DFS_STATUS_ALL_VERSION (1) +typedef struct { + uint16 version; /* version field; current max version 1 */ + uint16 num_sub_status; + wl_dfs_sub_status_t dfs_sub_status[1]; /* struct array of length num_sub_status */ +} wl_dfs_status_all_t; + +#define WL_DFS_AP_MOVE_VERSION (1) +typedef struct wl_dfs_ap_move_status { + int8 version; /* version field; current max version 1 */ + int8 move_status; /* DFS move status */ + chanspec_t chanspec; /* New AP Chanspec */ + wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */ +} wl_dfs_ap_move_status_t; + + +/* data structure used in 'radar_status' wl interface, which is use to query radar det status */ +typedef struct { + bool detected; + int count; + bool pretended; + uint32 radartype; + uint32 timenow; + uint32 timefromL; + int lp_csect_single; + int detected_pulse_index; + int nconsecq_pulses; + chanspec_t ch; + int pw[10]; + int intv[10]; + int fm[10]; +} wl_radar_status_t; + +#define NUM_PWRCTRL_RATES 12 + +typedef struct { + uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /**< User set target */ + uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /**< reg and local power limit */ + uint8 txpwr_local_max; /**< local max according to the AP */ + uint8 txpwr_local_constraint; /**< local constraint according to the AP */ + uint8 txpwr_chan_reg_max; /**< Regulatory max for this channel */ + uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /**< Latest target for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /**< On G phy, OFDM power offset */ + uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /**< Max CCK power for this band (SROM) */ + uint8 txpwr_bphy_ofdm_max; /**< Max OFDM power for this band (SROM) */ + uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /**< Max power for A band (SROM) */ + int8 txpwr_antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ +} tx_power_legacy_t; + +#define WL_TX_POWER_RATES_LEGACY 45 +#define WL_TX_POWER_MCS20_FIRST 12 +#define WL_TX_POWER_MCS20_NUM 16 +#define WL_TX_POWER_MCS40_FIRST 28 +#define WL_TX_POWER_MCS40_NUM 17 + +typedef struct { + uint32 flags; + chanspec_t chanspec; /* txpwr report for this channel */ + chanspec_t local_chanspec; /* channel on which we are associated */ + uint8 local_max; /* local max according to the AP */ + uint8 local_constraint; /* local constraint according to the AP */ + int8 antgain[2]; /* Ant gain for each band - from SROM */ + uint8 rf_cores; /* count of RF Cores being reported */ + uint8 est_Pout[4]; /* Latest tx power out estimate per RF + * chain without adjustment + */ + uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ + uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /* User limit */ + uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /* Regulatory power limit */ + uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */ + uint8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */ +} tx_power_legacy2_t; + +#define WL_NUM_2x2_ELEMENTS 4 +#define WL_NUM_3x3_ELEMENTS 6 +#define WL_NUM_4x4_ELEMENTS 10 + +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 flags; + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint32 buflen; /**< ppr buffer length */ + uint8 pprbuf[1]; /**< Latest target power buffer */ +} wl_txppr_t; + +#define WL_TXPPR_VERSION 1 +#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t)) +#define TX_POWER_T_VERSION 45 +/* number of ppr serialization buffers, it should be reg, board and target */ +#define WL_TXPPR_SER_BUF_NUM (3) + +typedef struct chanspec_txpwr_max { + chanspec_t chanspec; /* chanspec */ + uint8 txpwr_max; /* max txpwr in all the rates */ + uint8 padding; +} chanspec_txpwr_max_t; + +typedef struct wl_chanspec_txpwr_max { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 count; /**< number of elements of (chanspec, txpwr_max) pair */ + chanspec_txpwr_max_t txpwr[1]; /**< array of (chanspec, max_txpwr) pair */ +} wl_chanspec_txpwr_max_t; + +#define WL_CHANSPEC_TXPWR_MAX_VER 1 +#define WL_CHANSPEC_TXPWR_MAX_LEN (sizeof(wl_chanspec_txpwr_max_t)) + +typedef struct tx_inst_power { + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ +} tx_inst_power_t; + +#define WL_NUM_TXCHAIN_MAX 4 +typedef struct wl_txchain_pwr_offsets { + int8 offset[WL_NUM_TXCHAIN_MAX]; /**< quarter dBm signed offset for each chain */ +} wl_txchain_pwr_offsets_t; +/* maximum channels returned by the get valid channels iovar */ +#define WL_NUMCHANNELS 64 + +/* + * Join preference iovar value is an array of tuples. Each tuple has a one-byte type, + * a one-byte length, and a variable length value. RSSI type tuple must be present + * in the array. + * + * Types are defined in "join preference types" section. + * + * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple + * and must be set to zero. + * + * Values are defined below. + * + * 1. RSSI - 2 octets + * offset 0: reserved + * offset 1: reserved + * + * 2. WPA - 2 + 12 * n octets (n is # tuples defined below) + * offset 0: reserved + * offset 1: # of tuples + * offset 2: tuple 1 + * offset 14: tuple 2 + * ... + * offset 2 + 12 * (n - 1) octets: tuple n + * + * struct wpa_cfg_tuple { + * uint8 akm[DOT11_OUI_LEN+1]; akm suite + * uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite + * uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite + * }; + * + * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY. + * + * 3. BAND - 2 octets + * offset 0: reserved + * offset 1: see "band preference" and "band types" + * + * 4. BAND RSSI - 2 octets + * offset 0: band types + * offset 1: +ve RSSI boost value in dB + */ + +struct tsinfo_arg { + uint8 octets[3]; +}; + +#define RATE_CCK_1MBPS 0 +#define RATE_CCK_2MBPS 1 +#define RATE_CCK_5_5MBPS 2 +#define RATE_CCK_11MBPS 3 + +#define RATE_LEGACY_OFDM_6MBPS 0 +#define RATE_LEGACY_OFDM_9MBPS 1 +#define RATE_LEGACY_OFDM_12MBPS 2 +#define RATE_LEGACY_OFDM_18MBPS 3 +#define RATE_LEGACY_OFDM_24MBPS 4 +#define RATE_LEGACY_OFDM_36MBPS 5 +#define RATE_LEGACY_OFDM_48MBPS 6 +#define RATE_LEGACY_OFDM_54MBPS 7 + +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1 + +typedef struct wl_bsstrans_rssi { + int8 rssi_2g; /**< RSSI in dbm for 2.4 G */ + int8 rssi_5g; /**< RSSI in dbm for 5G, unused for cck */ +} wl_bsstrans_rssi_t; + +#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */ + +/* RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map { + uint16 ver; + uint16 len; /* length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /* 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /* 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /* MCS0-9 */ +} wl_bsstrans_rssi_rate_map_t; + +#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1 + +/* Configure number of scans allowed per throttle period */ +typedef struct wl_bsstrans_roamthrottle { + uint16 ver; + uint16 period; + uint16 scans_allowed; +} wl_bsstrans_roamthrottle_t; + +#define NFIFO 6 /**< # tx/rx fifopairs */ +#define NREINITREASONCOUNT 8 +#define REINITREASONIDX(_x) (((_x) < NREINITREASONCOUNT) ? (_x) : 0) + +#define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */ +#define WL_CNT_VERSION_6 6 +#define WL_CNT_VERSION_11 11 + +#define WLC_WITH_XTLV_CNT + +/* + * tlv IDs uniquely identifies counter component + * packed into wl_cmd_t container + */ +enum wl_cnt_xtlv_id { + WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */ + WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */ + WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800 /* corerev >= 64 UCODEX MACSTAT */ +}; + +/* The number of variables in wl macstat cnt struct. + * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t) + */ +#define WL_CNT_MCST_VAR_NUM 64 +/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */ +#define WL_CNT_MCST_STRUCT_SZ ((uint)sizeof(uint32) * WL_CNT_MCST_VAR_NUM) + +#define INVALID_CNT_VAL (uint32)(-1) +#define WL_CNT_MCXST_STRUCT_SZ ((uint)sizeof(wl_cnt_ge64mcxst_v1_t)) + +#define WL_XTLV_CNTBUF_MAX_SIZE ((uint)(OFFSETOF(wl_cnt_info_t, data)) + \ + (uint)BCM_XTLV_HDR_SIZE + (uint)sizeof(wl_cnt_wlc_t) + \ + (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \ + (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ) + +#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint)sizeof(wl_cnt_ver_11_t)) + +/* Top structure of counters IOVar buffer */ +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 datalen; /**< length of data including all paddings. */ + uint8 data [1]; /**< variable length payload: + * 1 or more bcm_xtlv_t type of tuples. + * each tuple is padded to multiple of 4 bytes. + * 'datalen' field of this structure includes all paddings. + */ +} wl_cnt_info_t; + +/* wlc layer counters */ +typedef struct { + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + uint32 rfdisable; /**< count of radio disables */ + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /* hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; + + uint32 rxmpdu_mu; /* Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ +} wl_cnt_wlc_t; + +/* MACXSTAT counters for ucodex (corerev >= 64) */ +typedef struct { + uint32 macxsusp; + uint32 m2vmsg; + uint32 v2mmsg; + uint32 mboxout; + uint32 musnd; + uint32 sfb2v; +} wl_cnt_ge64mcxst_v1_t; + +/* MACSTAT counters for ucode (corerev >= 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */ + uint32 rxhlovfl; /**< number of length / header fifo overflows */ + uint32 missbcn_dbg; /**< number of beacon missed to receive */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_ge40mcst_v1_t; + +/* MACSTAT counters for ucode (corerev < 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 dbgoff46; + uint32 dbgoff47; + uint32 dbgoff48; /**< Used for counting txstatus queue overflow (corerev <= 4) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 phywatch; + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_lt40mcst_v1_t; + +/* MACSTAT counters for "wl counter" version <= 10 */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 PAD0; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 PAD1; + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_v_le10_mcst_t; + +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< Not used */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 rxtoolate; /**< receive too late */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< Number of PMQ overflows */ + uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /**< count of radio disables */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 bphy_badplcp; + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /* hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; /**< num of received packets with retry bit on */ + uint32 txmpdu; /**< macstat cnt only valid in ver 11. number of MPDUs txed. */ + uint32 rxnodelim; /**< macstat cnt only valid in ver 11. + * number of occasions that no valid delimiter is detected + * by ampdu parser. + */ + uint32 rxmpdu_mu; /* Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + +} wl_cnt_ver_11_t; + +typedef struct { + uint16 version; /* see definition of WL_CNT_T_VERSION */ + uint16 length; /* length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /* tx data frames */ + uint32 txbyte; /* tx data bytes */ + uint32 txretrans; /* tx mac retransmits */ + uint32 txerror; /* tx data errors (derived: sum of others) */ + uint32 txctl; /* tx management frames */ + uint32 txprshort; /* tx short preamble frames */ + uint32 txserr; /* tx status errors */ + uint32 txnobuf; /* tx out of buffers errors */ + uint32 txnoassoc; /* tx discard because we're not associated */ + uint32 txrunt; /* tx runt frames */ + uint32 txchit; /* tx header cache hit (fastpath) */ + uint32 txcmiss; /* tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /* tx fifo underflows */ + uint32 txphyerr; /* tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /* rx data frames */ + uint32 rxbyte; /* rx data bytes */ + uint32 rxerror; /* rx data errors (derived: sum of others) */ + uint32 rxctl; /* rx management frames */ + uint32 rxnobuf; /* rx out of buffers errors */ + uint32 rxnondata; /* rx non data frames in the data channel errors */ + uint32 rxbadds; /* rx bad DS errors */ + uint32 rxbadcm; /* rx bad control or management frames */ + uint32 rxfragerr; /* rx fragmentation errors */ + uint32 rxrunt; /* rx runt frames */ + uint32 rxgiant; /* rx giant frames */ + uint32 rxnoscb; /* rx no scb error */ + uint32 rxbadproto; /* rx invalid frames */ + uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */ + uint32 rxbadda; /* rx frames tossed for invalid da */ + uint32 rxfilter; /* rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /* rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /* tx/rx dma descriptor errors */ + uint32 dmada; /* tx/rx dma data errors */ + uint32 dmape; /* tx/rx dma descriptor protocol errors */ + uint32 reset; /* reset count */ + uint32 tbtt; /* cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /* callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /* number of RTS sent out by the MAC */ + uint32 txctsfrm; /* number of CTS sent out by the MAC */ + uint32 txackfrm; /* number of ACK frames sent out */ + uint32 txdnlfrm; /* Not used */ + uint32 txbcnfrm; /* beacons transmitted */ + uint32 txfunfl[6]; /* per-fifo tx underflows */ + uint32 rxtoolate; /* receive too late */ + uint32 txfbw; /* transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /* parity check of the PLCP header failed */ + uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /* Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /* number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /* number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /* beacons received from member of BSS */ + uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /* beacons received from other BSS */ + uint32 rxrsptmout; /* Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /* Number of PMQ overflows */ + uint32 rxcgprqfrm; /* Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; + uint32 frmscons; + uint32 txnack; /* obsolete */ + uint32 rxback; /* blockack rxcnt */ + uint32 txback; /* blockack txcnt */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /* dot11TransmittedFragmentCount */ + uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ + uint32 txfail; /* dot11FailedCount */ + uint32 txretry; /* dot11RetryCount */ + uint32 txretrie; /* dot11MultipleRetryCount */ + uint32 rxdup; /* dot11FrameduplicateCount */ + uint32 txrts; /* dot11RTSSuccessCount */ + uint32 txnocts; /* dot11RTSFailureCount */ + uint32 txnoack; /* dot11ACKFailureCount */ + uint32 rxfrag; /* dot11ReceivedFragmentCount */ + uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /* dot11FCSErrorCount */ + uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /* dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /* TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /* TKIPReplays */ + uint32 ccmpfmterr; /* CCMPFormatErrors */ + uint32 ccmpreplay; /* CCMPReplays */ + uint32 ccmpundec; /* CCMPDecryptErrors */ + uint32 fourwayfail; /* FourWayHandshakeFailures */ + uint32 wepundec; /* dot11WEPUndecryptableCount */ + uint32 wepicverr; /* dot11WEPICVErrorCount */ + uint32 decsuccess; /* DecryptSuccessCount */ + uint32 tkipicverr; /* TKIPICVErrorCount */ + uint32 wepexcluded; /* dot11WEPExcludedCount */ + + uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /* TKIPReplays */ + uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /* CCMPReplays */ + uint32 ccmpundec_mcst; /* CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */ + uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /* DecryptSuccessCount */ + uint32 tkipicverr_mcst; /* TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */ + + uint32 txchanrej; /* Tx frames suppressed due to channel rejection */ + uint32 txexptime; /* Tx frames suppressed due to timer expiration */ + uint32 psmwds; /* Count PSM watchdogs */ + uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /* PRQ entries read in */ + uint32 prq_undirected_entries; /* which were bcast bss & ssid */ + uint32 prq_bad_entries; /* which could not be translated to info */ + uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /* count of radio disables */ + uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ + uint32 bphy_badplcp; + + uint32 txmpdu_sgi; /* count for sgi transmit */ + uint32 rxmpdu_sgi; /* count for sgi received */ + uint32 txmpdu_stbc; /* count for stbc transmit */ + uint32 rxmpdu_stbc; /* count for stbc received */ + + uint32 rxdrop20s; /* drop secondary cnt */ +} wl_cnt_ver_6_t; + +#define WL_DELTA_STATS_T_VERSION 2 /* current version of wl_delta_stats_t struct */ + +typedef struct { + uint16 version; /* see definition of WL_DELTA_STATS_T_VERSION */ + uint16 length; /* length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /* tx data frames */ + uint32 txbyte; /* tx data bytes */ + uint32 txretrans; /* tx mac retransmits */ + uint32 txfail; /* tx failures */ + + /* receive stat counters */ + uint32 rxframe; /* rx data frames */ + uint32 rxbyte; /* rx data bytes */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + /* phy stats */ + uint32 rxbadplcp; + uint32 rxcrsglitch; + uint32 bphy_rxcrsglitch; + uint32 bphy_badplcp; + +} wl_delta_stats_t; + +typedef struct { + uint32 packets; + uint32 bytes; +} wl_traffic_stats_t; + +typedef struct { + uint16 version; /* see definition of WL_WME_CNT_VERSION */ + uint16 length; /* length of entire structure */ + + wl_traffic_stats_t tx[AC_COUNT]; /* Packets transmitted */ + wl_traffic_stats_t tx_failed[AC_COUNT]; /* Packets dropped or failed to transmit */ + wl_traffic_stats_t rx[AC_COUNT]; /* Packets received */ + wl_traffic_stats_t rx_failed[AC_COUNT]; /* Packets failed to receive */ + + wl_traffic_stats_t forward[AC_COUNT]; /* Packets forwarded by AP */ + + wl_traffic_stats_t tx_expired[AC_COUNT]; /* packets dropped due to lifetime expiry */ + +} wl_wme_cnt_t; + +struct wl_msglevel2 { + uint32 low; + uint32 high; +}; + +typedef struct wl_mkeep_alive_pkt { + uint16 version; /* Version for mkeep_alive */ + uint16 length; /* length of fixed parameters in the structure */ + uint32 period_msec; + uint16 len_bytes; + uint8 keep_alive_id; /* 0 - 3 for N = 4 */ + uint8 data[1]; +} wl_mkeep_alive_pkt_t; + +#define WL_MKEEP_ALIVE_VERSION 1 +#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data) +#define WL_MKEEP_ALIVE_PRECISION 500 + +/* TCP Keep-Alive conn struct */ +typedef struct wl_mtcpkeep_alive_conn_pkt { + struct ether_addr saddr; /* src mac address */ + struct ether_addr daddr; /* dst mac address */ + struct ipv4_addr sipaddr; /* source IP addr */ + struct ipv4_addr dipaddr; /* dest IP addr */ + uint16 sport; /* src port */ + uint16 dport; /* dest port */ + uint32 seq; /* seq number */ + uint32 ack; /* ACK number */ + uint16 tcpwin; /* TCP window */ +} wl_mtcpkeep_alive_conn_pkt_t; + +/* TCP Keep-Alive interval struct */ +typedef struct wl_mtcpkeep_alive_timers_pkt { + uint16 interval; /* interval timer */ + uint16 retry_interval; /* retry_interval timer */ + uint16 retry_count; /* retry_count */ +} wl_mtcpkeep_alive_timers_pkt_t; + +typedef struct wake_info { + uint32 wake_reason; + uint32 wake_info_len; /* size of packet */ + uchar packet[1]; +} wake_info_t; + +typedef struct wake_pkt { + uint32 wake_pkt_len; /* size of packet */ + uchar packet[1]; +} wake_pkt_t; + + +#define WL_MTCPKEEP_ALIVE_VERSION 1 + +#ifdef WLBA + +#define WLC_BA_CNT_VERSION 1 /* current version of wlc_ba_cnt_t */ + +/* block ack related stats */ +typedef struct wlc_ba_cnt { + uint16 version; /* WLC_BA_CNT_VERSION */ + uint16 length; /* length of entire structure */ + + /* transmit stat counters */ + uint32 txpdu; /* pdus sent */ + uint32 txsdu; /* sdus sent */ + uint32 txfc; /* tx side flow controlled packets */ + uint32 txfci; /* tx side flow control initiated */ + uint32 txretrans; /* retransmitted pdus */ + uint32 txbatimer; /* ba resend due to timer */ + uint32 txdrop; /* dropped packets */ + uint32 txaddbareq; /* addba req sent */ + uint32 txaddbaresp; /* addba resp sent */ + uint32 txdelba; /* delba sent */ + uint32 txba; /* ba sent */ + uint32 txbar; /* bar sent */ + uint32 txpad[4]; /* future */ + + /* receive side counters */ + uint32 rxpdu; /* pdus recd */ + uint32 rxqed; /* pdus buffered before sending up */ + uint32 rxdup; /* duplicate pdus */ + uint32 rxnobuf; /* pdus discarded due to no buf */ + uint32 rxaddbareq; /* addba req recd */ + uint32 rxaddbaresp; /* addba resp recd */ + uint32 rxdelba; /* delba recd */ + uint32 rxba; /* ba recd */ + uint32 rxbar; /* bar recd */ + uint32 rxinvba; /* invalid ba recd */ + uint32 rxbaholes; /* ba recd with holes */ + uint32 rxunexp; /* unexpected packets */ + uint32 rxpad[4]; /* future */ +} wlc_ba_cnt_t; +#endif /* WLBA */ + +/* structure for per-tid ampdu control */ +struct ampdu_tid_control { + uint8 tid; /* tid */ + uint8 enable; /* enable/disable */ +}; + +/* struct for ampdu tx/rx aggregation control */ +struct ampdu_aggr { + int8 aggr_override; /* aggr overrided by dongle. Not to be set by host. */ + uint16 conf_TID_bmap; /* bitmap of TIDs to configure */ + uint16 enab_TID_bmap; /* enable/disable per TID */ +}; + +/* structure for identifying ea/tid for sending addba/delba */ +struct ampdu_ea_tid { + struct ether_addr ea; /* Station address */ + uint8 tid; /* tid */ + uint8 initiator; /* 0 is recipient, 1 is originator */ +}; +/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */ +struct ampdu_retry_tid { + uint8 tid; /* tid */ + uint8 retry; /* retry value */ +}; + +#define BDD_FNAME_LEN 32 /* Max length of friendly name */ +typedef struct bdd_fname { + uint8 len; /* length of friendly name */ + uchar name[BDD_FNAME_LEN]; /* friendly name */ +} bdd_fname_t; + +/* structure for addts arguments */ +/* For ioctls that take a list of TSPEC */ +struct tslist { + int count; /* number of tspecs */ + struct tsinfo_arg tsinfo[1]; /* variable length array of tsinfo */ +}; + +#ifdef WLTDLS +/* structure for tdls iovars */ +typedef struct tdls_iovar { + struct ether_addr ea; /* Station address */ + uint8 mode; /* mode: depends on iovar */ + chanspec_t chanspec; + uint32 pad; /* future */ +} tdls_iovar_t; + +#define TDLS_WFD_IE_SIZE 512 +/* structure for tdls wfd ie */ +typedef struct tdls_wfd_ie_iovar { + struct ether_addr ea; /* Station address */ + uint8 mode; + uint16 length; + uint8 data[TDLS_WFD_IE_SIZE]; +} tdls_wfd_ie_iovar_t; +#endif /* WLTDLS */ + +/* structure for addts/delts arguments */ +typedef struct tspec_arg { + uint16 version; /* see definition of TSPEC_ARG_VERSION */ + uint16 length; /* length of entire structure */ + uint flag; /* bit field */ + /* TSPEC Arguments */ + struct tsinfo_arg tsinfo; /* TS Info bit field */ + uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ + uint min_srv_interval; /* Minimum Service Interval (us) */ + uint max_srv_interval; /* Maximum Service Interval (us) */ + uint inactivity_interval; /* Inactivity Interval (us) */ + uint suspension_interval; /* Suspension Interval (us) */ + uint srv_start_time; /* Service Start Time (us) */ + uint min_data_rate; /* Minimum Data Rate (bps) */ + uint mean_data_rate; /* Mean Data Rate (bps) */ + uint peak_data_rate; /* Peak Data Rate (bps) */ + uint max_burst_size; /* Maximum Burst Size (bytes) */ + uint delay_bound; /* Delay Bound (us) */ + uint min_phy_rate; /* Minimum PHY Rate (bps) */ + uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0 to 8.0) */ + uint16 medium_time; /* Medium Time (32 us/s periods) */ + uint8 dialog_token; /* dialog token */ +} tspec_arg_t; + +/* tspec arg for desired station */ +typedef struct tspec_per_sta_arg { + struct ether_addr ea; + struct tspec_arg ts; +} tspec_per_sta_arg_t; + +/* structure for max bandwidth for each access category */ +typedef struct wme_max_bandwidth { + uint32 ac[AC_COUNT]; /* max bandwidth for each access category */ +} wme_max_bandwidth_t; + +#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t)) + +/* current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_VERSION 2 /* current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_LENGTH 55 /* argument length from tsinfo to medium_time */ +#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /* default dialog token */ +#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /* default surplus bw */ + + +#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80 +#define WLC_WOWL_MAX_KEEPALIVE 2 + +/* Packet lifetime configuration per ac */ +typedef struct wl_lifetime { + uint32 ac; /* access class */ + uint32 lifetime; /* Packet lifetime value in ms */ +} wl_lifetime_t; + + +/* Channel Switch Announcement param */ +typedef struct wl_chan_switch { + uint8 mode; /* value 0 or 1 */ + uint8 count; /* count # of beacons before switching */ + chanspec_t chspec; /* chanspec */ + uint8 reg; /* regulatory class */ + uint8 frame_type; /* csa frame type, unicast or broadcast */ +} wl_chan_switch_t; + +enum { + PFN_LIST_ORDER, + PFN_RSSI +}; + +enum { + DISABLE, + ENABLE +}; + +enum { + OFF_ADAPT, + SMART_ADAPT, + STRICT_ADAPT, + SLOW_ADAPT +}; + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 +#define IMMEDIATE_EVENT_BIT 8 +#define SUPPRESS_SSID_BIT 9 +#define ENABLE_NET_OFFLOAD_BIT 10 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_BIT 11 + +#define SORT_CRITERIA_MASK 0x0001 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define ENABLE_BKGRD_SCAN_MASK 0x0004 +#define IMMEDIATE_SCAN_MASK 0x0008 +#define AUTO_CONNECT_MASK 0x0010 + +#define ENABLE_BD_SCAN_MASK 0x0020 +#define ENABLE_ADAPTSCAN_MASK 0x00c0 +#define IMMEDIATE_EVENT_MASK 0x0100 +#define SUPPRESS_SSID_MASK 0x0200 +#define ENABLE_NET_OFFLOAD_MASK 0x0400 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_MASK 0x0800 + +#define PFN_VERSION 2 +#define PFN_SCANRESULT_VERSION 1 +#define MAX_PFN_LIST_COUNT 16 + +#define PFN_COMPLETE 1 +#define PFN_INCOMPLETE 0 + +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 + +#define PFN_PARTIAL_SCAN_BIT 0 +#define PFN_PARTIAL_SCAN_MASK 1 +#define PFN_SWC_RSSI_WINDOW_MAX 8 +#define PFN_SWC_MAX_NUM_APS 16 +#define PFN_HOTLIST_MAX_NUM_APS 64 + +/* PFN network info structure */ +typedef struct wl_pfn_subnet_info { + struct ether_addr BSSID; + uint8 channel; /* channel number only */ + uint8 SSID_len; + uint8 SSID[32]; +} wl_pfn_subnet_info_t; + +typedef struct wl_pfn_net_info { + wl_pfn_subnet_info_t pfnsubnet; + int16 RSSI; /* receive signal strength (in dBm) */ + uint16 timestamp; /* age in seconds */ +} wl_pfn_net_info_t; + +typedef struct wl_pfn_lnet_info { + wl_pfn_subnet_info_t pfnsubnet; /* BSSID + channel + SSID len + SSID */ + uint16 flags; /* partial scan, etc */ + int16 RSSI; /* receive signal strength (in dBm) */ + uint32 timestamp; /* age in miliseconds */ + uint16 rtt0; /* estimated distance to this AP in centimeters */ + uint16 rtt1; /* standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_t; + +typedef struct wl_pfn_lscanresults { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_lnet_info_t netinfo[1]; +} wl_pfn_lscanresults_t; + +/* this is used to report on 1-* pfn scan results */ +typedef struct wl_pfn_scanresults { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_t netinfo[1]; +} wl_pfn_scanresults_t; + +typedef struct wl_pfn_significant_net { + uint16 flags; + uint16 channel; + struct ether_addr BSSID; + int8 rssi[PFN_SWC_RSSI_WINDOW_MAX]; +} wl_pfn_significant_net_t; + + +typedef struct wl_pfn_swc_results { + uint32 version; + uint32 pkt_count; + uint32 total_count; + wl_pfn_significant_net_t list[1]; +} wl_pfn_swc_results_t; + +/* used to report exactly one scan result */ +/* plus reports detailed scan info in bss_info */ +typedef struct wl_pfn_scanresult { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_t netinfo; + wl_bss_info_t bss_info; +} wl_pfn_scanresult_t; + +/* PFN data structure */ +typedef struct wl_pfn_param { + int32 version; /* PNO parameters version */ + int32 scan_freq; /* Scan frequency */ + int32 lost_network_timeout; /* Timeout in sec. to declare + * discovered network as lost + */ + int16 flags; /* Bit field to control features + * of PFN such as sort criteria auto + * enable switch and background scan + */ + int16 rssi_margin; /* Margin to avoid jitter for choosing a + * PFN based on RSSI sort criteria + */ + uint8 bestn; /* number of best networks in each scan */ + uint8 mscan; /* number of scans recorded */ + uint8 repeat; /* Minimum number of scan intervals + *before scan frequency changes in adaptive scan + */ + uint8 exp; /* Exponent of 2 for maximum scan interval */ + int32 slow_freq; /* slow scan period */ +} wl_pfn_param_t; + +typedef struct wl_pfn_bssid { + struct ether_addr macaddr; + /* Bit4: suppress_lost, Bit3: suppress_found */ + uint16 flags; +} wl_pfn_bssid_t; + +typedef struct wl_pfn_significant_bssid { + struct ether_addr macaddr; + int8 rssi_low_threshold; + int8 rssi_high_threshold; +} wl_pfn_significant_bssid_t; +#define WL_PFN_SUPPRESSFOUND_MASK 0x08 +#define WL_PFN_SUPPRESSLOST_MASK 0x10 +#define WL_PFN_RSSI_MASK 0xff00 +#define WL_PFN_RSSI_SHIFT 8 + +typedef struct wl_pfn_cfg { + uint32 reporttype; + int32 channel_num; + uint16 channel_list[WL_NUMCHANNELS]; + uint32 flags; +} wl_pfn_cfg_t; + +#define CH_BUCKET_REPORT_REGULAR 0 +#define CH_BUCKET_REPORT_FULL_RESULT 2 +#define CH_BUCKET_GSCAN 4 + + +typedef struct wl_pfn_gscan_channel_bucket { + uint16 bucket_end_index; + uint8 bucket_freq_multiple; + uint8 report_flag; +} wl_pfn_gscan_channel_bucket_t; + +#define GSCAN_SEND_ALL_RESULTS_MASK (1 << 0) +#define GSCAN_CFG_FLAGS_ONLY_MASK (1 << 7) + +typedef struct wl_pfn_gscan_cfg { + /* BIT0 1 = send probes/beacons to HOST + * BIT2 Reserved + * Add any future flags here + * BIT7 1 = no other useful cfg sent + */ + uint8 flags; + /* Buffer filled threshold in % to generate an event */ + uint8 buffer_threshold; + /* No. of BSSIDs with "change" to generate an evt + * change - crosses rssi threshold/lost + */ + uint8 swc_nbssid_threshold; + /* Max=8 (for now) Size of rssi cache buffer */ + uint8 swc_rssi_window_size; + uint16 count_of_channel_buckets; + uint16 lost_ap_window; + wl_pfn_gscan_channel_bucket_t channel_bucket[1]; +} wl_pfn_gscan_cfg_t; + + +#define WL_PFN_REPORT_ALLNET 0 +#define WL_PFN_REPORT_SSIDNET 1 +#define WL_PFN_REPORT_BSSIDNET 2 +#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */ +#define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /* Remaining reserved for future use */ + +typedef struct wl_pfn { + wlc_ssid_t ssid; /* ssid name and its length */ + int32 flags; /* bit2: hidden */ + int32 infra; /* BSS Vs IBSS */ + int32 auth; /* Open Vs Closed */ + int32 wpa_auth; /* WPA type */ + int32 wsec; /* wsec value */ +} wl_pfn_t; + +typedef struct wl_pfn_list { + uint32 version; + uint32 enabled; + uint32 count; + wl_pfn_t pfn[1]; +} wl_pfn_list_t; + +#define WL_PFN_MAC_OUI_ONLY_MASK 1 +#define WL_PFN_SET_MAC_UNASSOC_MASK 2 +/* To configure pfn_macaddr */ +typedef struct wl_pfn_macaddr_cfg { + uint8 version; + uint8 flags; + struct ether_addr macaddr; +} wl_pfn_macaddr_cfg_t; +#define WL_PFN_MACADDR_CFG_VER 1 +typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t { + wlc_ssid_t ssid; + uint32 cipher_type; + uint32 auth_type; + uint8 channels[4]; +} BWL_POST_PACKED_STRUCT pfn_olmsg_params; + +#define WL_PFN_HIDDEN_BIT 2 +#define WL_PFN_HIDDEN_MASK 0x4 + +#ifndef BESTN_MAX +#define BESTN_MAX 3 +#endif + +#ifndef MSCAN_MAX +#define MSCAN_MAX 90 +#endif + +/* + * WLFCTS definition + */ +typedef struct wl_txstatus_additional_info { + uint32 rspec; + uint32 enq_ts; + uint32 last_ts; + uint32 entry_ts; + uint16 seq; + uint8 rts_cnt; + uint8 tx_cnt; +} wl_txstatus_additional_info_t; + +/* Service discovery */ +typedef struct { + uint8 transaction_id; /* Transaction id */ + uint8 protocol; /* Service protocol type */ + uint16 query_len; /* Length of query */ + uint16 response_len; /* Length of response */ + uint8 qrbuf[1]; +} wl_p2po_qr_t; + +typedef struct { + uint16 period; /* extended listen period */ + uint16 interval; /* extended listen interval */ + uint16 count; /* count to repeat */ + uint16 pad; /* pad for 32bit align */ +} wl_p2po_listen_t; + +/* GAS state machine tunable parameters. Structure field values of 0 means use the default. */ +typedef struct wl_gas_config { + uint16 max_retransmit; /* Max # of firmware/driver retransmits on no Ack + * from peer (on top of the ucode retries). + */ + uint16 response_timeout; /* Max time to wait for a GAS-level response + * after sending a packet. + */ + uint16 max_comeback_delay; /* Max GAS response comeback delay. + * Exceeding this fails the GAS exchange. + */ + uint16 max_retries; /* Max # of GAS state machine retries on failure + * of a GAS frame exchange. + */ +} wl_gas_config_t; + +/* P2P Find Offload parameters */ +typedef BWL_PRE_PACKED_STRUCT struct wl_p2po_find_config { + uint16 version; /* Version of this struct */ + uint16 length; /* sizeof(wl_p2po_find_config_t) */ + int32 search_home_time; /* P2P search state home time when concurrent + * connection exists. -1 for default. + */ + uint8 num_social_channels; + /* Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX. + * 0 means use default social channels. + */ + uint8 flags; + uint16 social_channels[1]; /* Variable length array of social channels */ +} BWL_POST_PACKED_STRUCT wl_p2po_find_config_t; +#define WL_P2PO_FIND_CONFIG_VERSION 2 /* value for version field */ + +/* wl_p2po_find_config_t flags */ +#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01 /* Whether to scan for all APs in the p2po_find + * periodic scans of all channels. + * 0 means scan for only P2P devices. + * 1 means scan for P2P devices plus non-P2P APs. + */ + + +/* For adding a WFDS service to seek */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 seek_hdl; /* unique id chosen by host */ + uint8 addr[6]; /* Seek service from a specific device with this + * MAC address, all 1's for any device. + */ + uint8 service_hash[P2P_WFDS_HASH_LEN]; + uint8 service_name_len; + uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN]; + /* Service name to seek, not null terminated */ + uint8 service_info_req_len; + uint8 service_info_req[1]; /* Service info request, not null terminated. + * Variable length specified by service_info_req_len. + * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN. + */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_add_t; + +/* For deleting a WFDS service to seek */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 seek_hdl; /* delete service specified by id */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_del_t; + + +/* For adding a WFDS service to advertise */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 advertise_hdl; /* unique id chosen by host */ + uint8 service_hash[P2P_WFDS_HASH_LEN]; + uint32 advertisement_id; + uint16 service_config_method; + uint8 service_name_len; + uint8 service_name[MAX_WFDS_SVC_NAME_LEN]; + /* Service name , not null terminated */ + uint8 service_status; + uint16 service_info_len; + uint8 service_info[1]; /* Service info, not null terminated. + * Variable length specified by service_info_len. + * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN. + */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t; + +/* For deleting a WFDS service to advertise */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 advertise_hdl; /* delete service specified by hdl */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_del_t; + +/* P2P Offload discovery mode for the p2po_state iovar */ +typedef enum { + WL_P2PO_DISC_STOP, + WL_P2PO_DISC_LISTEN, + WL_P2PO_DISC_DISCOVERY +} disc_mode_t; + +/* ANQP offload */ + +#define ANQPO_MAX_QUERY_SIZE 256 +typedef struct { + uint16 max_retransmit; /* ~0 use default, max retransmit on no ACK from peer */ + uint16 response_timeout; /* ~0 use default, msec to wait for resp after tx packet */ + uint16 max_comeback_delay; /* ~0 use default, max comeback delay in resp else fail */ + uint16 max_retries; /* ~0 use default, max retries on failure */ + uint16 query_len; /* length of ANQP query */ + uint8 query_data[1]; /* ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */ +} wl_anqpo_set_t; + +typedef struct { + uint16 channel; /* channel of the peer */ + struct ether_addr addr; /* addr of the peer */ +} wl_anqpo_peer_t; + +#define ANQPO_MAX_PEER_LIST 64 +typedef struct { + uint16 count; /* number of peers in list */ + wl_anqpo_peer_t peer[1]; /* max ANQPO_MAX_PEER_LIST */ +} wl_anqpo_peer_list_t; + +#define ANQPO_MAX_IGNORE_SSID 64 +typedef struct { + bool is_clear; /* set to clear list (not used on GET) */ + uint16 count; /* number of SSID in list */ + wlc_ssid_t ssid[1]; /* max ANQPO_MAX_IGNORE_SSID */ +} wl_anqpo_ignore_ssid_list_t; + +#define ANQPO_MAX_IGNORE_BSSID 64 +typedef struct { + bool is_clear; /* set to clear list (not used on GET) */ + uint16 count; /* number of addr in list */ + struct ether_addr bssid[1]; /* max ANQPO_MAX_IGNORE_BSSID */ +} wl_anqpo_ignore_bssid_list_t; + + +struct toe_ol_stats_t { + /* Num of tx packets that don't need to be checksummed */ + uint32 tx_summed; + + /* Num of tx packets where checksum is filled by offload engine */ + uint32 tx_iph_fill; + uint32 tx_tcp_fill; + uint32 tx_udp_fill; + uint32 tx_icmp_fill; + + /* Num of rx packets where toe finds out if checksum is good or bad */ + uint32 rx_iph_good; + uint32 rx_iph_bad; + uint32 rx_tcp_good; + uint32 rx_tcp_bad; + uint32 rx_udp_good; + uint32 rx_udp_bad; + uint32 rx_icmp_good; + uint32 rx_icmp_bad; + + /* Num of tx packets in which csum error is injected */ + uint32 tx_tcp_errinj; + uint32 tx_udp_errinj; + uint32 tx_icmp_errinj; + + /* Num of rx packets in which csum error is injected */ + uint32 rx_tcp_errinj; + uint32 rx_udp_errinj; + uint32 rx_icmp_errinj; +}; + +/* Arp offload statistic counts */ +struct arp_ol_stats_t { + uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */ + uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */ + + uint32 arp_table_entries; /* ARP table entries */ + uint32 arp_table_overflow; /* ARP table additions skipped due to overflow */ + + uint32 host_request; /* ARP requests from host */ + uint32 host_reply; /* ARP replies from host */ + uint32 host_service; /* ARP requests from host serviced by ARP Agent */ + + uint32 peer_request; /* ARP requests received from network */ + uint32 peer_request_drop; /* ARP requests from network that were dropped */ + uint32 peer_reply; /* ARP replies received from network */ + uint32 peer_reply_drop; /* ARP replies from network that were dropped */ + uint32 peer_service; /* ARP request from host serviced by ARP Agent */ +}; + +/* NS offload statistic counts */ +struct nd_ol_stats_t { + uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */ + uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */ + uint32 peer_request; /* NS requests received from network */ + uint32 peer_request_drop; /* NS requests from network that were dropped */ + uint32 peer_reply_drop; /* NA replies from network that were dropped */ + uint32 peer_service; /* NS request from host serviced by firmware */ +}; + +/* + * Keep-alive packet offloading. + */ + +/* NAT keep-alive packets format: specifies the re-transmission period, the packet + * length, and packet contents. + */ +typedef struct wl_keep_alive_pkt { + uint32 period_msec; /* Retransmission period (0 to disable packet re-transmits) */ + uint16 len_bytes; /* Size of packet to transmit (0 to disable packet re-transmits) */ + uint8 data[1]; /* Variable length packet to transmit. Contents should include + * entire ethernet packet (enet header, IP header, UDP header, + * and UDP payload) in network byte order. + */ +} wl_keep_alive_pkt_t; + +#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data) + + +/* + * Dongle pattern matching filter. + */ + +#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */ + +#define MAX_WAKE_PACKET_BYTES (DOT11_A3_HDR_LEN + \ + DOT11_QOS_LEN + \ + sizeof(struct dot11_llc_snap_header) + \ + ETHER_MAX_DATA) + +typedef struct pm_wake_packet { + uint32 status; /* Is the wake reason a packet (if all the other field's valid) */ + uint32 pattern_id; /* Pattern ID that matched */ + uint32 original_packet_size; + uint32 saved_packet_size; + uchar packet[MAX_WAKE_PACKET_CACHE_BYTES]; +} pm_wake_packet_t; + +/* Packet filter types. Currently, only pattern matching is supported. */ +typedef enum wl_pkt_filter_type { + WL_PKT_FILTER_TYPE_PATTERN_MATCH=0, /* Pattern matching filter */ + WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /* Magic packet match */ + WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2, /* A pattern list (match all to match filter) */ + WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /* SECURE WOWL magic / net pattern match */ +} wl_pkt_filter_type_t; + +#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t + +/* String mapping for types that may be used by applications or debug */ +#define WL_PKT_FILTER_TYPE_NAMES \ + { "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH }, \ + { "MAGIC", WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \ + { "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH } + +/* Secured WOWL packet was encrypted, need decrypted before check filter match */ +typedef struct wl_pkt_decrypter { + uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending); + void* dec_ctx; +} wl_pkt_decrypter_t; + +/* Pattern matching filter. Specifies an offset within received packets to + * start matching, the pattern to match, the size of the pattern, and a bitmask + * that indicates which bits within the pattern should be matched. + */ +typedef struct wl_pkt_filter_pattern { + uint32 offset; /* Offset within received packet to start pattern matching. + * Offset '0' is the first byte of the ethernet header. + */ + uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */ + uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts + * at offset 0. Pattern immediately follows mask. for + * secured pattern, put the descrypter pointer to the + * beginning, mask and pattern postponed correspondingly + */ +} wl_pkt_filter_pattern_t; + +/* A pattern list is a numerically specified list of modified pattern structures. */ +typedef struct wl_pkt_filter_pattern_listel { + uint16 rel_offs; /* Offset to begin match (relative to 'base' below) */ + uint16 base_offs; /* Base for offset (defined below) */ + uint16 size_bytes; /* Size of mask/pattern */ + uint16 match_flags; /* Addition flags controlling the match */ + uint8 mask_and_data[1]; /* Variable length mask followed by data, each size_bytes */ +} wl_pkt_filter_pattern_listel_t; + +typedef struct wl_pkt_filter_pattern_list { + uint8 list_cnt; /* Number of elements in the list */ + uint8 PAD1[1]; /* Reserved (possible version: reserved) */ + uint16 totsize; /* Total size of this pattern list (includes this struct) */ + wl_pkt_filter_pattern_listel_t patterns[1]; /* Variable number of list elements */ +} wl_pkt_filter_pattern_list_t; + +/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */ +typedef struct wl_pkt_filter { + uint32 id; /* Unique filter id, specified by app. */ + uint32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */ + uint32 negate_match; /* Negate the result of filter matches */ + union { /* Filter definitions */ + wl_pkt_filter_pattern_t pattern; /* Pattern matching filter */ + wl_pkt_filter_pattern_list_t patlist; /* List of patterns to match */ + } u; +} wl_pkt_filter_t; + +/* IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */ +typedef struct wl_tcp_keep_set { + uint32 val1; + uint32 val2; +} wl_tcp_keep_set_t; + +#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u) +#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern) +#define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns) +#define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN \ + OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data) + +/* IOVAR "pkt_filter_enable" parameter. */ +typedef struct wl_pkt_filter_enable { + uint32 id; /* Unique filter id */ + uint32 enable; /* Enable/disable bool */ +} wl_pkt_filter_enable_t; + +/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */ +typedef struct wl_pkt_filter_list { + uint32 num; /* Number of installed packet filters */ + wl_pkt_filter_t filter[1]; /* Variable array of packet filters. */ +} wl_pkt_filter_list_t; + +#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter) + +/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */ +typedef struct wl_pkt_filter_stats { + uint32 num_pkts_matched; /* # filter matches for specified filter id */ + uint32 num_pkts_forwarded; /* # packets fwded from dongle to host for all filters */ + uint32 num_pkts_discarded; /* # packets discarded by dongle for all filters */ +} wl_pkt_filter_stats_t; + +/* IOVAR "pkt_filter_ports" parameter. Configure TCP/UDP port filters. */ +typedef struct wl_pkt_filter_ports { + uint8 version; /* Be proper */ + uint8 reserved; /* Be really proper */ + uint16 count; /* Number of ports following */ + /* End of fixed data */ + uint16 ports[1]; /* Placeholder for ports[] */ +} wl_pkt_filter_ports_t; + +#define WL_PKT_FILTER_PORTS_FIXED_LEN OFFSETOF(wl_pkt_filter_ports_t, ports) + +#define WL_PKT_FILTER_PORTS_VERSION 0 +#define WL_PKT_FILTER_PORTS_MAX 128 + +#define RSN_REPLAY_LEN 8 +typedef struct _gtkrefresh { + uchar KCK[RSN_KCK_LENGTH]; + uchar KEK[RSN_KEK_LENGTH]; + uchar ReplayCounter[RSN_REPLAY_LEN]; +} gtk_keyinfo_t, *pgtk_keyinfo_t; + +/* Sequential Commands ioctl */ +typedef struct wl_seq_cmd_ioctl { + uint32 cmd; /* common ioctl definition */ + uint32 len; /* length of user buffer */ +} wl_seq_cmd_ioctl_t; + +#define WL_SEQ_CMD_ALIGN_BYTES 4 + +/* These are the set of get IOCTLs that should be allowed when using + * IOCTL sequence commands. These are issued implicitly by wl.exe each time + * it is invoked. We never want to buffer these, or else wl.exe will stop working. + */ +#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \ + (((cmd) == WLC_GET_MAGIC) || \ + ((cmd) == WLC_GET_VERSION) || \ + ((cmd) == WLC_GET_AP) || \ + ((cmd) == WLC_GET_INSTANCE)) + +typedef struct wl_pkteng { + uint32 flags; + uint32 delay; /* Inter-packet delay */ + uint32 nframes; /* Number of frames */ + uint32 length; /* Packet length */ + uint8 seqno; /* Enable/disable sequence no. */ + struct ether_addr dest; /* Destination address */ + struct ether_addr src; /* Source address */ +} wl_pkteng_t; + +typedef struct wl_pkteng_stats { + uint32 lostfrmcnt; /* RX PER test: no of frames lost (skip seqno) */ + int32 rssi; /* RSSI */ + int32 snr; /* signal to noise ratio */ + uint16 rxpktcnt[NUM_80211_RATES+1]; + uint8 rssi_qdb; /* qdB portion of the computed rssi */ +} wl_pkteng_stats_t; + +typedef struct wl_txcal_params { + wl_pkteng_t pkteng; + uint8 gidx_start; + int8 gidx_step; + uint8 gidx_stop; +} wl_txcal_params_t; + + +typedef enum { + wowl_pattern_type_bitmap = 0, + wowl_pattern_type_arp, + wowl_pattern_type_na +} wowl_pattern_type_t; + +typedef struct wl_wowl_pattern { + uint32 masksize; /* Size of the mask in #of bytes */ + uint32 offset; /* Pattern byte offset in packet */ + uint32 patternoffset; /* Offset of start of pattern in the structure */ + uint32 patternsize; /* Size of the pattern itself in #of bytes */ + uint32 id; /* id */ + uint32 reasonsize; /* Size of the wakeup reason code */ + wowl_pattern_type_t type; /* Type of pattern */ + /* Mask follows the structure above */ + /* Pattern follows the mask is at 'patternoffset' from the start */ +} wl_wowl_pattern_t; + +typedef struct wl_wowl_pattern_list { + uint count; + wl_wowl_pattern_t pattern[1]; +} wl_wowl_pattern_list_t; + +typedef struct wl_wowl_wakeind { + uint8 pci_wakeind; /* Whether PCI PMECSR PMEStatus bit was set */ + uint32 ucode_wakeind; /* What wakeup-event indication was set by ucode */ +} wl_wowl_wakeind_t; + +typedef struct { + uint32 pktlen; /* size of packet */ + void *sdu; +} tcp_keepalive_wake_pkt_infop_t; + +/* per AC rate control related data structure */ +typedef struct wl_txrate_class { + uint8 init_rate; + uint8 min_rate; + uint8 max_rate; +} wl_txrate_class_t; + +/* structure for Overlap BSS scan arguments */ +typedef struct wl_obss_scan_arg { + int16 passive_dwell; + int16 active_dwell; + int16 bss_widthscan_interval; + int16 passive_total; + int16 active_total; + int16 chanwidth_transition_delay; + int16 activity_threshold; +} wl_obss_scan_arg_t; + +#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t) + +/* RSSI event notification configuration. */ +typedef struct wl_rssi_event { + uint32 rate_limit_msec; /* # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint8 num_rssi_levels; /* Number of entries in rssi_levels[] below */ + int8 rssi_levels[MAX_RSSI_LEVELS]; /* Variable number of RSSI levels. An event + * will be posted each time the RSSI of received + * beacons/packets crosses a level. + */ +} wl_rssi_event_t; + +/* CCA based channel quality event configuration */ +#define WL_CHAN_QUAL_CCA 0 +#define WL_CHAN_QUAL_NF 1 +#define WL_CHAN_QUAL_NF_LTE 2 +#define WL_CHAN_QUAL_TOTAL 3 + +#define MAX_CHAN_QUAL_LEVELS 8 + +typedef struct wl_chan_qual_metric { + uint8 id; /* metric ID */ + uint8 num_levels; /* Number of entries in rssi_levels[] below */ + uint16 flags; + int16 htol[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: hi-to-lo */ + int16 ltoh[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: lo-to-hi */ +} wl_chan_qual_metric_t; + +typedef struct wl_chan_qual_event { + uint32 rate_limit_msec; /* # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint16 flags; + uint16 num_metrics; + wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL]; /* metric array */ +} wl_chan_qual_event_t; + +typedef struct wl_action_obss_coex_req { + uint8 info; + uint8 num; + uint8 ch_list[1]; +} wl_action_obss_coex_req_t; + + +/* IOVar parameter block for small MAC address array with type indicator */ +#define WL_IOV_MAC_PARAM_LEN 4 + +#define WL_IOV_PKTQ_LOG_PRECS 16 + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 num_addrs; + char addr_type[WL_IOV_MAC_PARAM_LEN]; + struct ether_addr ea[WL_IOV_MAC_PARAM_LEN]; +} BWL_POST_PACKED_STRUCT wl_iov_mac_params_t; + +/* This is extra info that follows wl_iov_mac_params_t */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 addr_info[WL_IOV_MAC_PARAM_LEN]; +} BWL_POST_PACKED_STRUCT wl_iov_mac_extra_params_t; + +/* Combined structure */ +typedef struct { + wl_iov_mac_params_t params; + wl_iov_mac_extra_params_t extra_params; +} wl_iov_mac_full_params_t; + +/* Parameter block for PKTQ_LOG statistics */ +#define PKTQ_LOG_COUNTERS_V4 \ + /* packets requested to be stored */ \ + uint32 requested; \ + /* packets stored */ \ + uint32 stored; \ + /* packets saved, because a lowest priority queue has given away one packet */ \ + uint32 saved; \ + /* packets saved, because an older packet from the same queue has been dropped */ \ + uint32 selfsaved; \ + /* packets dropped, because pktq is full with higher precedence packets */ \ + uint32 full_dropped; \ + /* packets dropped because pktq per that precedence is full */ \ + uint32 dropped; \ + /* packets dropped, in order to save one from a queue of a highest priority */ \ + uint32 sacrificed; \ + /* packets droped because of hardware/transmission error */ \ + uint32 busy; \ + /* packets re-sent because they were not received */ \ + uint32 retry; \ + /* packets retried again (ps pretend) prior to moving power save mode */ \ + uint32 ps_retry; \ + /* suppressed packet count */ \ + uint32 suppress; \ + /* packets finally dropped after retry limit */ \ + uint32 retry_drop; \ + /* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \ + uint32 max_avail; \ + /* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \ + uint32 max_used; \ + /* the maximum capacity of the queue */ \ + uint32 queue_capacity; \ + /* count of rts attempts that failed to receive cts */ \ + uint32 rtsfail; \ + /* count of packets sent (acked) successfully */ \ + uint32 acked; \ + /* running total of phy rate of packets sent successfully */ \ + uint32 txrate_succ; \ + /* running total of phy 'main' rate */ \ + uint32 txrate_main; \ + /* actual data transferred successfully */ \ + uint32 throughput; \ + /* time difference since last pktq_stats */ \ + uint32 time_delta; + +typedef struct { + PKTQ_LOG_COUNTERS_V4 +} pktq_log_counters_v04_t; + +/* v5 is the same as V4 with extra parameter */ +typedef struct { + PKTQ_LOG_COUNTERS_V4 + /* cumulative time to transmit */ + uint32 airtime; +} pktq_log_counters_v05_t; + +typedef struct { + uint8 num_prec[WL_IOV_MAC_PARAM_LEN]; + pktq_log_counters_v04_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; + uint32 counter_info[WL_IOV_MAC_PARAM_LEN]; + uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN]; + char headings[1]; +} pktq_log_format_v04_t; + +typedef struct { + uint8 num_prec[WL_IOV_MAC_PARAM_LEN]; + pktq_log_counters_v05_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; + uint32 counter_info[WL_IOV_MAC_PARAM_LEN]; + uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN]; + char headings[1]; +} pktq_log_format_v05_t; + + +typedef struct { + uint32 version; + wl_iov_mac_params_t params; + union { + pktq_log_format_v04_t v04; + pktq_log_format_v05_t v05; + } pktq_log; +} wl_iov_pktq_log_t; + +/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */ +#define PKTQ_LOG_AUTO (1 << 31) +#define PKTQ_LOG_DEF_PREC (1 << 30) + + +#define LEGACY1_WL_PFN_MACADDR_CFG_VER 0 + +#define WL_PFN_MAC_OUI_ONLY_MASK 1 +#define WL_PFN_SET_MAC_UNASSOC_MASK 2 +#define WL_PFN_RESTRICT_LA_MAC_MASK 4 +#define WL_PFN_MACADDR_FLAG_MASK 0x7 + + +/* + * SCB_BS_DATA iovar definitions start. + */ +#define SCB_BS_DATA_STRUCT_VERSION 1 + +/* The actual counters maintained for each station */ +typedef BWL_PRE_PACKED_STRUCT struct { + /* The following counters are a subset of what pktq_stats provides per precedence. */ + uint32 retry; /* packets re-sent because they were not received */ + uint32 retry_drop; /* packets finally dropped after retry limit */ + uint32 rtsfail; /* count of rts attempts that failed to receive cts */ + uint32 acked; /* count of packets sent (acked) successfully */ + uint32 txrate_succ; /* running total of phy rate of packets sent successfully */ + uint32 txrate_main; /* running total of phy 'main' rate */ + uint32 throughput; /* actual data transferred successfully */ + uint32 time_delta; /* time difference since last pktq_stats */ + uint32 airtime; /* cumulative total medium access delay in useconds */ +} BWL_POST_PACKED_STRUCT iov_bs_data_counters_t; + +/* The structure for individual station information. */ +typedef BWL_PRE_PACKED_STRUCT struct { + struct ether_addr station_address; /* The station MAC address */ + uint16 station_flags; /* Bit mask of flags, for future use. */ + iov_bs_data_counters_t station_counters; /* The actual counter values */ +} BWL_POST_PACKED_STRUCT iov_bs_data_record_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 structure_version; /* Structure version number (for wl/wlu matching) */ + uint16 structure_count; /* Number of iov_bs_data_record_t records following */ + iov_bs_data_record_t structure_record[1]; /* 0 - structure_count records */ +} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t; + +/* Bitmask of options that can be passed in to the iovar. */ +enum { + SCB_BS_DATA_FLAG_NO_RESET = (1<<0) /* Do not clear the counters after reading */ +}; +/* + * SCB_BS_DATA iovar definitions end. + */ + +typedef struct wlc_extlog_cfg { + int max_number; + uint16 module; /* bitmap */ + uint8 level; + uint8 flag; + uint16 version; +} wlc_extlog_cfg_t; + +typedef struct log_record { + uint32 time; + uint16 module; + uint16 id; + uint8 level; + uint8 sub_unit; + uint8 seq_num; + int32 arg; + char str[MAX_ARGSTR_LEN]; +} log_record_t; + +typedef struct wlc_extlog_req { + uint32 from_last; + uint32 num; +} wlc_extlog_req_t; + +typedef struct wlc_extlog_results { + uint16 version; + uint16 record_len; + uint32 num; + log_record_t logs[1]; +} wlc_extlog_results_t; + +typedef struct log_idstr { + uint16 id; + uint16 flag; + uint8 arg_type; + const char *fmt_str; +} log_idstr_t; + +#define FMTSTRF_USER 1 + +/* flat ID definitions + * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will + * affect backward compatibility with pre-existing apps + */ +typedef enum { + FMTSTR_DRIVER_UP_ID = 0, + FMTSTR_DRIVER_DOWN_ID = 1, + FMTSTR_SUSPEND_MAC_FAIL_ID = 2, + FMTSTR_NO_PROGRESS_ID = 3, + FMTSTR_RFDISABLE_ID = 4, + FMTSTR_REG_PRINT_ID = 5, + FMTSTR_EXPTIME_ID = 6, + FMTSTR_JOIN_START_ID = 7, + FMTSTR_JOIN_COMPLETE_ID = 8, + FMTSTR_NO_NETWORKS_ID = 9, + FMTSTR_SECURITY_MISMATCH_ID = 10, + FMTSTR_RATE_MISMATCH_ID = 11, + FMTSTR_AP_PRUNED_ID = 12, + FMTSTR_KEY_INSERTED_ID = 13, + FMTSTR_DEAUTH_ID = 14, + FMTSTR_DISASSOC_ID = 15, + FMTSTR_LINK_UP_ID = 16, + FMTSTR_LINK_DOWN_ID = 17, + FMTSTR_RADIO_HW_OFF_ID = 18, + FMTSTR_RADIO_HW_ON_ID = 19, + FMTSTR_EVENT_DESC_ID = 20, + FMTSTR_PNP_SET_POWER_ID = 21, + FMTSTR_RADIO_SW_OFF_ID = 22, + FMTSTR_RADIO_SW_ON_ID = 23, + FMTSTR_PWD_MISMATCH_ID = 24, + FMTSTR_FATAL_ERROR_ID = 25, + FMTSTR_AUTH_FAIL_ID = 26, + FMTSTR_ASSOC_FAIL_ID = 27, + FMTSTR_IBSS_FAIL_ID = 28, + FMTSTR_EXTAP_FAIL_ID = 29, + FMTSTR_MAX_ID +} log_fmtstr_id_t; + +#ifdef DONGLEOVERLAYS +typedef struct { + uint32 flags_idx; /* lower 8 bits: overlay index; upper 24 bits: flags */ + uint32 offset; /* offset into overlay region to write code */ + uint32 len; /* overlay code len */ + /* overlay code follows this struct */ +} wl_ioctl_overlay_t; +#endif /* DONGLEOVERLAYS */ + +/* 11k Neighbor Report element (unversioned, deprecated) */ +typedef struct nbr_element { + uint8 id; + uint8 len; + struct ether_addr bssid; + uint32 bssid_info; + uint8 reg; + uint8 channel; + uint8 phytype; + uint8 pad; +} nbr_element_t; + +#define WL_RRM_NBR_RPT_VER 1 +/* 11k Neighbor Report element */ +typedef struct nbr_rpt_elem { + uint8 version; + uint8 id; + uint8 len; + uint8 pad; + struct ether_addr bssid; + uint8 pad_1[2]; + uint32 bssid_info; + uint8 reg; + uint8 channel; + uint8 phytype; + uint8 pad_2; + wlc_ssid_t ssid; + uint8 bss_trans_preference; + uint8 pad_3[3]; +} nbr_rpt_elem_t; + +typedef enum event_msgs_ext_command { + EVENTMSGS_NONE = 0, + EVENTMSGS_SET_BIT = 1, + EVENTMSGS_RESET_BIT = 2, + EVENTMSGS_SET_MASK = 3 +} event_msgs_ext_command_t; + +#define EVENTMSGS_VER 1 +#define EVENTMSGS_EXT_STRUCT_SIZE OFFSETOF(eventmsgs_ext_t, mask[0]) + +/* len- for SET it would be mask size from the application to the firmware */ +/* for GET it would be actual firmware mask size */ +/* maxgetsize - is only used for GET. indicate max mask size that the */ +/* application can read from the firmware */ +typedef struct eventmsgs_ext +{ + uint8 ver; + uint8 command; + uint8 len; + uint8 maxgetsize; + uint8 mask[1]; +} eventmsgs_ext_t; + +typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params { + /* no of host dma descriptors programmed by the firmware before a commit */ + uint16 max_dma_descriptors; + + uint16 host_buf_len; /* length of host buffer */ + dmaaddr_t host_buf_addr; /* physical address for bus_throughput_buf */ +} BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t; +typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_stats { + uint16 time_taken; /* no of secs the test is run */ + uint16 nbytes_per_descriptor; /* no of bytes of data dma ed per descriptor */ + + /* no of desciptors fo which dma is sucessfully completed within the test time */ + uint32 count; +} BWL_POST_PACKED_STRUCT pcie_bus_tput_stats_t; + +#define MAX_ROAMOFFL_BSSID_NUM 100 + +typedef BWL_PRE_PACKED_STRUCT struct roamoffl_bssid_list { + int32 cnt; + struct ether_addr bssid[1]; +} BWL_POST_PACKED_STRUCT roamoffl_bssid_list_t; + +/* no default structure packing */ +#include + +typedef struct keepalives_max_idle { + uint16 keepalive_count; /* nmbr of keepalives per bss_max_idle period */ + uint8 mkeepalive_index; /* mkeepalive_index for keepalive frame to be used */ + uint8 PAD; /* to align next field */ + uint16 max_interval; /* seconds */ +} keepalives_max_idle_t; + +#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0) +#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1) + +/* require strict packing */ +#include + +/* ##### Power Stats section ##### */ + +#define WL_PWRSTATS_VERSION 2 + +/* Input structure for pwrstats IOVAR */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats_query { + uint16 length; /* Number of entries in type array. */ + uint16 type[1]; /* Types (tags) to retrieve. + * Length 0 (no types) means get all. + */ +} BWL_POST_PACKED_STRUCT wl_pwrstats_query_t; + +/* This structure is for version 2; version 1 will be deprecated in by FW */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats { + uint16 version; /* Version = 2 is TLV format */ + uint16 length; /* Length of entire structure */ + uint8 data[1]; /* TLV data, a series of structures, + * each starting with type and length. + * + * Padded as necessary so each section + * starts on a 4-byte boundary. + * + * Both type and len are uint16, but the + * upper nibble of length is reserved so + * valid len values are 0-4095. + */ +} BWL_POST_PACKED_STRUCT wl_pwrstats_t; +#define WL_PWR_STATS_HDRLEN OFFSETOF(wl_pwrstats_t, data) + +/* Type values for the data section */ +#define WL_PWRSTATS_TYPE_PHY 0 /* struct wl_pwr_phy_stats */ +#define WL_PWRSTATS_TYPE_SCAN 1 /* struct wl_pwr_scan_stats */ +#define WL_PWRSTATS_TYPE_USB_HSIC 2 /* struct wl_pwr_usb_hsic_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE1 3 /* struct wl_pwr_pm_awake_stats_v1 */ +#define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */ +#define WL_PWRSTATS_TYPE_PCIE 6 /* struct wl_pwr_pcie_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /* struct wl_pwr_pm_awake_stats_v2 */ + +/* Bits for wake reasons */ +#define WLC_PMD_WAKE_SET 0x1 +#define WLC_PMD_PM_AWAKE_BCN 0x2 +#define WLC_PMD_BTA_ACTIVE 0x4 +#define WLC_PMD_SCAN_IN_PROGRESS 0x8 +#define WLC_PMD_RM_IN_PROGRESS 0x10 +#define WLC_PMD_AS_IN_PROGRESS 0x20 +#define WLC_PMD_PM_PEND 0x40 +#define WLC_PMD_PS_POLL 0x80 +#define WLC_PMD_CHK_UNALIGN_TBTT 0x100 +#define WLC_PMD_APSD_STA_UP 0x200 +#define WLC_PMD_TX_PEND_WAR 0x400 +#define WLC_PMD_GPTIMER_STAY_AWAKE 0x800 +#define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000 +#define WLC_PMD_NON_PRIM_STA_UP 0x4000 +#define WLC_PMD_AP_UP 0x8000 + +typedef BWL_PRE_PACKED_STRUCT struct wlc_pm_debug { + uint32 timestamp; /* timestamp in millisecond */ + uint32 reason; /* reason(s) for staying awake */ +} BWL_POST_PACKED_STRUCT wlc_pm_debug_t; + +/* WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */ +#define WLC_STA_AWAKE_STATES_MAX_V1 30 +#define WLC_PMD_EVENT_MAX_V1 32 +/* Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */ +typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 { + uint32 curr_time; /* ms */ + uint32 hw_macc; /* HW maccontrol */ + uint32 sw_macc; /* SW maccontrol */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /* Most recent TSF drift from beacon */ + int32 min_drift; /* Min TSF drift from beacon in magnitude */ + int32 max_drift; /* Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /* Avg TSF drift from beacon */ + + /* Wake history tracking */ + uint8 pmwake_idx; /* for stepping through pm_state */ + wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /* timestamped wake bits */ + uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1]; /* cumulative usecs per wake reason */ + uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ +} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 { + uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v1_t awake_data; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t; + +/* WL_PWRSTATS_TYPE_PM_AWAKE2 structures */ +/* Data sent as part of pwrstats IOVAR */ +typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v2 { + uint32 curr_time; /* ms */ + uint32 hw_macc; /* HW maccontrol */ + uint32 sw_macc; /* SW maccontrol */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /* Most recent TSF drift from beacon */ + int32 min_drift; /* Min TSF drift from beacon in magnitude */ + int32 max_drift; /* Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /* Avg TSF drift from beacon */ + + /* Wake history tracking */ + + /* pmstate array (type wlc_pm_debug_t) start offset */ + uint16 pm_state_offset; + /* pmstate number of array entries */ + uint16 pm_state_len; + + /* array (type uint32) start offset */ + uint16 pmd_event_wake_dur_offset; + /* pmd_event_wake_dur number of array entries */ + uint16 pmd_event_wake_dur_len; + + uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ + uint8 pmwake_idx; /* for stepping through pm_state */ + uint8 pad[3]; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT pm_awake_data_v2_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v2 { + uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v2_t awake_data; +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v2_t; + +/* Original bus structure is for HSIC */ +typedef BWL_PRE_PACKED_STRUCT struct bus_metrics { + uint32 suspend_ct; /* suspend count */ + uint32 resume_ct; /* resume count */ + uint32 disconnect_ct; /* disconnect count */ + uint32 reconnect_ct; /* reconnect count */ + uint32 active_dur; /* msecs in bus, usecs for user */ + uint32 suspend_dur; /* msecs in bus, usecs for user */ + uint32 disconnect_dur; /* msecs in bus, usecs for user */ +} BWL_POST_PACKED_STRUCT bus_metrics_t; + +/* Bus interface info for USB/HSIC */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats { + uint16 type; /* WL_PWRSTATS_TYPE_USB_HSIC */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + bus_metrics_t hsic; /* stats from hsic bus driver */ +} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t; + +typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_metrics { + uint32 d3_suspend_ct; /* suspend count */ + uint32 d0_resume_ct; /* resume count */ + uint32 perst_assrt_ct; /* PERST# assert count */ + uint32 perst_deassrt_ct; /* PERST# de-assert count */ + uint32 active_dur; /* msecs */ + uint32 d3_suspend_dur; /* msecs */ + uint32 perst_dur; /* msecs */ + uint32 l0_cnt; /* L0 entry count */ + uint32 l0_usecs; /* L0 duration in usecs */ + uint32 l1_cnt; /* L1 entry count */ + uint32 l1_usecs; /* L1 duration in usecs */ + uint32 l1_1_cnt; /* L1_1ss entry count */ + uint32 l1_1_usecs; /* L1_1ss duration in usecs */ + uint32 l1_2_cnt; /* L1_2ss entry count */ + uint32 l1_2_usecs; /* L1_2ss duration in usecs */ + uint32 l2_cnt; /* L2 entry count */ + uint32 l2_usecs; /* L2 duration in usecs */ + uint32 timestamp; /* Timestamp on when stats are collected */ + uint32 num_h2d_doorbell; /* # of doorbell interrupts - h2d */ + uint32 num_d2h_doorbell; /* # of doorbell interrupts - d2h */ + uint32 num_submissions; /* # of submissions */ + uint32 num_completions; /* # of completions */ + uint32 num_rxcmplt; /* # of rx completions */ + uint32 num_rxcmplt_drbl; /* of drbl interrupts for rx complt. */ + uint32 num_txstatus; /* # of tx completions */ + uint32 num_txstatus_drbl; /* of drbl interrupts for tx complt. */ + uint32 ltr_active_ct; /* # of times chip went to LTR ACTIVE */ + uint32 ltr_active_dur; /* # of msecs chip was in LTR ACTIVE */ + uint32 ltr_sleep_ct; /* # of times chip went to LTR SLEEP */ + uint32 ltr_sleep_dur; /* # of msecs chip was in LTR SLEEP */ + uint32 deepsleep_count; /* # of times chip went to deepsleep */ + uint32 deepsleep_dur; /* # of msecs chip was in deepsleep */ +} BWL_POST_PACKED_STRUCT pcie_bus_metrics_t; + +/* Bus interface info for PCIE */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pcie_stats { + uint16 type; /* WL_PWRSTATS_TYPE_PCIE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + pcie_bus_metrics_t pcie; /* stats from pcie bus driver */ +} BWL_POST_PACKED_STRUCT wl_pwr_pcie_stats_t; + +/* Scan information history per category */ +typedef BWL_PRE_PACKED_STRUCT struct scan_data { + uint32 count; /* Number of scans performed */ + uint32 dur; /* Total time (in us) used */ +} BWL_POST_PACKED_STRUCT scan_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_scan_stats { + uint16 type; /* WL_PWRSTATS_TYPE_SCAN */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + /* Scan history */ + scan_data_t user_scans; /* User-requested scans: (i/e/p)scan */ + scan_data_t assoc_scans; /* Scans initiated by association requests */ + scan_data_t roam_scans; /* Scans initiated by the roam engine */ + scan_data_t pno_scans[8]; /* For future PNO bucketing (BSSID, SSID, etc) */ + scan_data_t other_scans; /* Scan engine usage not assigned to the above */ +} BWL_POST_PACKED_STRUCT wl_pwr_scan_stats_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_connect_stats { + uint16 type; /* WL_PWRSTATS_TYPE_SCAN */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + /* Connection (Association + Key exchange) data */ + uint32 count; /* Number of connections performed */ + uint32 dur; /* Total time (in ms) used */ +} BWL_POST_PACKED_STRUCT wl_pwr_connect_stats_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_phy_stats { + uint16 type; /* WL_PWRSTATS_TYPE_PHY */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + uint32 tx_dur; /* TX Active duration in us */ + uint32 rx_dur; /* RX Active duration in us */ +} BWL_POST_PACKED_STRUCT wl_pwr_phy_stats_t; + + +/* ##### End of Power Stats section ##### */ + +/* IPV4 Arp offloads for ndis context */ +BWL_PRE_PACKED_STRUCT struct hostip_id { + struct ipv4_addr ipa; + uint8 id; +} BWL_POST_PACKED_STRUCT; + +/* Return values */ +#define ND_REPLY_PEER 0x1 /* Reply was sent to service NS request from peer */ +#define ND_REQ_SINK 0x2 /* Input packet should be discarded */ +#define ND_FORCE_FORWARD 0X3 /* For the dongle to forward req to HOST */ + +/* Neighbor Solicitation Response Offload IOVAR param */ +typedef BWL_PRE_PACKED_STRUCT struct nd_param { + struct ipv6_addr host_ip[2]; + struct ipv6_addr solicit_ip; + struct ipv6_addr remote_ip; + uint8 host_mac[ETHER_ADDR_LEN]; + uint32 offload_id; +} BWL_POST_PACKED_STRUCT nd_param_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh { + uint32 pfn_alert_thresh; /* time in ms */ + uint32 roam_alert_thresh; /* time in ms */ +} BWL_POST_PACKED_STRUCT wl_pfn_roam_thresh_t; + + +/* Reasons for wl_pmalert_t */ +#define PM_DUR_EXCEEDED (1<<0) +#define MPC_DUR_EXCEEDED (1<<1) +#define ROAM_ALERT_THRESH_EXCEEDED (1<<2) +#define PFN_ALERT_THRESH_EXCEEDED (1<<3) +#define CONST_AWAKE_DUR_ALERT (1<<4) +#define CONST_AWAKE_DUR_RECOVERY (1<<5) + +#define MIN_PM_ALERT_LEN 9 + +/* Data sent in EXCESS_PM_WAKE event */ +#define WL_PM_ALERT_VERSION 3 + +#define MAX_P2P_BSS_DTIM_PRD 4 + +/* This structure is for version 3; version 2 will be deprecated in by FW */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert { + uint16 version; /* Version = 3 is TLV format */ + uint16 length; /* Length of entire structure */ + uint32 reasons; /* reason(s) for pm_alert */ + uint8 data[1]; /* TLV data, a series of structures, + * each starting with type and length. + * + * Padded as necessary so each section + * starts on a 4-byte boundary. + * + * Both type and len are uint16, but the + * upper nibble of length is reserved so + * valid len values are 0-4095. + */ +} BWL_POST_PACKED_STRUCT wl_pmalert_t; + +/* Type values for the data section */ +#define WL_PMALERT_FIXED 0 /* struct wl_pmalert_fixed_t, fixed fields */ +#define WL_PMALERT_PMSTATE 1 /* struct wl_pmalert_pmstate_t, variable */ +#define WL_PMALERT_EVENT_DUR 2 /* struct wl_pmalert_event_dur_t, variable */ +#define WL_PMALERT_UCODE_DBG 3 /* struct wl_pmalert_ucode_dbg_t, variable */ +#define WL_PMALERT_PS_ALLOWED_HIST 4 /* struct wl_pmalert_ps_allowed_history, variable */ +#define WL_PMALERT_EXT_UCODE_DBG 5 /* struct wl_pmalert_ext_ucode_dbg_t, variable */ +#define WL_PMALERT_EPM_START_EVENT_DUR 6 /* struct wl_pmalert_event_dur_t, variable */ + +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed { + uint16 type; /* WL_PMALERT_FIXED */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + uint32 prev_stats_time; /* msecs */ + uint32 curr_time; /* ms */ + uint32 prev_pm_dur; /* msecs */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 prev_mpc_dur; /* msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ + uint32 hw_macc; /* HW maccontrol */ + uint32 sw_macc; /* SW maccontrol */ + + /* int32 drifts = remote - local; +ve drift -> local-clk slow */ + int32 last_drift; /* Most recent TSF drift from beacon */ + int32 min_drift; /* Min TSF drift from beacon in magnitude */ + int32 max_drift; /* Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /* Avg TSF drift from beacon */ + uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ + uint32 frts_time; /* Cumulative ms spent in data frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ + uint32 prev_frts_dur; /* Data frts duration at start of pm-period */ + uint32 cal_dur; /* Cumulative ms spent in calibration */ + uint32 prev_cal_dur; /* cal duration at start of pm-period */ +} BWL_POST_PACKED_STRUCT wl_pmalert_fixed_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_pmstate { + uint16 type; /* WL_PMALERT_PMSTATE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + uint8 pmwake_idx; /* for stepping through pm_state */ + uint8 pad[3]; + /* Array of pmstate; len of array is based on tlv len */ + wlc_pm_debug_t pmstate[1]; +} BWL_POST_PACKED_STRUCT wl_pmalert_pmstate_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_event_dur { + uint16 type; /* WL_PMALERT_EVENT_DUR */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + /* Array of event_dur, len of array is based on tlv len */ + uint32 event_dur[1]; +} BWL_POST_PACKED_STRUCT wl_pmalert_event_dur_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg { + uint16 type; /* WL_PMALERT_UCODE_DBG */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + uint32 macctrl; + uint16 m_p2p_hps; + uint32 psm_brc; + uint32 ifsstat; + uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD]; + uint32 psmdebug[20]; + uint32 phydebug[20]; +} BWL_POST_PACKED_STRUCT wl_pmalert_ucode_dbg_t; + + +/* Structures and constants used for "vndr_ie" IOVar interface */ +#define VNDR_IE_CMD_LEN 4 /* length of the set command string: + * "add", "del" (+ NUL) + */ + +#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32)) + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ + vndr_ie_t vndr_ie_data; /* vendor IE data */ +} BWL_POST_PACKED_STRUCT vndr_ie_info_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + int iecount; /* number of entries in the vndr_ie_list[] array */ + vndr_ie_info_t vndr_ie_list[1]; /* variable size list of vndr_ie_info_t structs */ +} BWL_POST_PACKED_STRUCT vndr_ie_buf_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + char cmd[VNDR_IE_CMD_LEN]; /* vndr_ie IOVar set command : "add", "del" + NUL */ + vndr_ie_buf_t vndr_ie_buffer; /* buffer containing Vendor IE list information */ +} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t; + +/* tag_ID/length/value_buffer tuple */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 id; + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT tlv_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ + tlv_t ie_data; /* IE data */ +} BWL_POST_PACKED_STRUCT ie_info_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + int iecount; /* number of entries in the ie_list[] array */ + ie_info_t ie_list[1]; /* variable size list of ie_info_t structs */ +} BWL_POST_PACKED_STRUCT ie_buf_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + char cmd[VNDR_IE_CMD_LEN]; /* ie IOVar set command : "add" + NUL */ + ie_buf_t ie_buffer; /* buffer containing IE list information */ +} BWL_POST_PACKED_STRUCT ie_setbuf_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ + uint8 id; /* IE type */ +} BWL_POST_PACKED_STRUCT ie_getbuf_t; + +/* structures used to define format of wps ie data from probe requests */ +/* passed up to applications via iovar "prbreq_wpsie" */ +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr { + struct ether_addr staAddr; + uint16 ieLen; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t; + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data { + sta_prbreq_wps_ie_hdr_t hdr; + uint8 ieData[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list { + uint32 totLen; + uint8 ieDataList[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t; + + +#ifdef WLMEDIA_TXFAILEVENT +typedef BWL_PRE_PACKED_STRUCT struct { + char dest[ETHER_ADDR_LEN]; /* destination MAC */ + uint8 prio; /* Packet Priority */ + uint8 flags; /* Flags */ + uint32 tsf_l; /* TSF timer low */ + uint32 tsf_h; /* TSF timer high */ + uint16 rates; /* Main Rates */ + uint16 txstatus; /* TX Status */ +} BWL_POST_PACKED_STRUCT txfailinfo_t; +#endif /* WLMEDIA_TXFAILEVENT */ + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 flags; + chanspec_t chanspec; /* txpwr report for this channel */ + chanspec_t local_chanspec; /* channel on which we are associated */ + uint8 local_max; /* local max according to the AP */ + uint8 local_constraint; /* local constraint according to the AP */ + int8 antgain[2]; /* Ant gain for each band - from SROM */ + uint8 rf_cores; /* count of RF Cores being reported */ + uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */ + uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain w/o adjustment */ + uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ + uint8 tx_power_max[4]; /* Maximum target power among all rates */ + uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */ + int8 sar; /* SAR limit for display by wl executable */ + int8 channel_bandwidth; /* 20, 40 or 80 MHz bandwidth? */ + uint8 version; /* Version of the data format wlu <--> driver */ + uint8 display_core; /* Displayed curpower core */ + int8 target_offsets[4]; /* Target power offsets for current rate per core */ + uint32 last_tx_ratespec; /* Ratespec for last transmition */ + uint user_target; /* user limit */ + uint32 ppr_len; /* length of each ppr serialization buffer */ + int8 SARLIMIT[MAX_STREAMS_SUPPORTED]; + uint8 pprdata[1]; /* ppr serialization buffer */ +} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + struct ipv4_addr ipv4_addr; + struct ether_addr nexthop; +} BWL_POST_PACKED_STRUCT ibss_route_entry_t; +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 num_entry; + ibss_route_entry_t route_entry[1]; +} BWL_POST_PACKED_STRUCT ibss_route_tbl_t; + +#define MAX_IBSS_ROUTE_TBL_ENTRY 64 + +#define TXPWR_TARGET_VERSION 0 +typedef BWL_PRE_PACKED_STRUCT struct { + int32 version; /* version number */ + chanspec_t chanspec; /* txpwr report for this channel */ + int8 txpwr[WL_STA_ANT_MAX]; /* Max tx target power, in qdb */ + uint8 rf_cores; /* count of RF Cores being reported */ +} BWL_POST_PACKED_STRUCT txpwr_target_max_t; + +#define BSS_PEER_INFO_PARAM_CUR_VER 0 +/* Input structure for IOV_BSS_PEER_INFO */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + struct ether_addr ea; /* peer MAC address */ +} BWL_POST_PACKED_STRUCT bss_peer_info_param_t; + +#define BSS_PEER_INFO_CUR_VER 0 + +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + struct ether_addr ea; + int32 rssi; + uint32 tx_rate; /* current tx rate */ + uint32 rx_rate; /* current rx rate */ + wl_rateset_t rateset; /* rateset in use */ + uint32 age; /* age in seconds */ +} BWL_POST_PACKED_STRUCT bss_peer_info_t; + +#define BSS_PEER_LIST_INFO_CUR_VER 0 + +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 bss_peer_info_len; /* length of bss_peer_info_t */ + uint32 count; /* number of peer info */ + bss_peer_info_t peer_info[1]; /* peer info */ +} BWL_POST_PACKED_STRUCT bss_peer_list_info_t; + +#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info) + +#define AIBSS_BCN_FORCE_CONFIG_VER_0 0 + +/* structure used to configure AIBSS beacon force xmit */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 len; + uint32 initial_min_bcn_dur; /* dur in ms to check a bcn in bcn_flood period */ + uint32 min_bcn_dur; /* dur in ms to check a bcn after bcn_flood period */ + uint32 bcn_flood_dur; /* Initial bcn xmit period in ms */ +} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t; + +#define AIBSS_TXFAIL_CONFIG_VER_0 0 +#define AIBSS_TXFAIL_CONFIG_VER_1 1 +#define AIBSS_TXFAIL_CONFIG_CUR_VER AIBSS_TXFAIL_CONFIG_VER_1 + +/* structure used to configure aibss tx fail event */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 len; + uint32 bcn_timeout; /* dur in seconds to receive 1 bcn */ + uint32 max_tx_retry; /* no of consecutive no acks to send txfail event */ + uint32 max_atim_failure; /* no of consecutive atim failure */ +} BWL_POST_PACKED_STRUCT aibss_txfail_config_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if { + uint16 version; + uint16 len; + uint32 flags; + struct ether_addr addr; + chanspec_t chspec; +} BWL_POST_PACKED_STRUCT wl_aibss_if_t; + +typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry { + struct ipv4_addr ip_addr; + struct ether_addr nexthop; +} BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t; + +typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { + uint32 num_entry; + wlc_ipfo_route_entry_t route_entry[1]; +} BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t; + +#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4 +#define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64 + +/* no strict structure packing */ +#include + + /* Global ASSERT Logging */ +#define ASSERTLOG_CUR_VER 0x0100 +#define MAX_ASSRTSTR_LEN 64 + + typedef struct assert_record { + uint32 time; + uint8 seq_num; + char str[MAX_ASSRTSTR_LEN]; + } assert_record_t; + + typedef struct assertlog_results { + uint16 version; + uint16 record_len; + uint32 num; + assert_record_t logs[1]; + } assertlog_results_t; + +#define LOGRRC_FIX_LEN 8 +#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type)) + + + /* chanim acs record */ + typedef struct { + bool valid; + uint8 trigger; + chanspec_t selected_chspc; + int8 bgnoise; + uint32 glitch_cnt; + uint8 ccastats; + uint8 chan_idle; + uint timestamp; + } chanim_acs_record_t; + + typedef struct { + chanim_acs_record_t acs_record[CHANIM_ACS_RECORD]; + uint8 count; + uint timestamp; + } wl_acs_record_t; + + typedef struct chanim_stats { + uint32 glitchcnt; /* normalized as per second count */ + uint32 badplcp; /* normalized as per second count */ + uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */ + int8 bgnoise; /* background noise level (in dBm) */ + chanspec_t chanspec; /* ctrl chanspec of the interface */ + uint32 timestamp; /* time stamp at which the stats are collected */ + uint32 bphy_glitchcnt; /* normalized as per second count */ + uint32 bphy_badplcp; /* normalized as per second count */ + uint8 chan_idle; /* normalized as 0~255 */ + } chanim_stats_t; + +#define WL_CHANIM_STATS_VERSION 2 + +typedef struct { + uint32 buflen; + uint32 version; + uint32 count; + chanim_stats_t stats[1]; +} wl_chanim_stats_t; + +#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats) + +/* Noise measurement metrics. */ +#define NOISE_MEASURE_KNOISE 0x1 + +/* scb probe parameter */ +typedef struct { + uint32 scb_timeout; + uint32 scb_activity_time; + uint32 scb_max_probe; +} wl_scb_probe_t; + +/* structure/defines for selective mgmt frame (smf) stats support */ + +#define SMFS_VERSION 1 +/* selected mgmt frame (smf) stats element */ +typedef struct wl_smfs_elem { + uint32 count; + uint16 code; /* SC or RC code */ +} wl_smfs_elem_t; + +typedef struct wl_smf_stats { + uint32 version; + uint16 length; /* reserved for future usage */ + uint8 type; + uint8 codetype; + uint32 ignored_cnt; + uint32 malformed_cnt; + uint32 count_total; /* count included the interested group */ + wl_smfs_elem_t elem[1]; +} wl_smf_stats_t; + +#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem); + +enum { + SMFS_CODETYPE_SC, + SMFS_CODETYPE_RC +}; + +typedef enum smfs_type { + SMFS_TYPE_AUTH, + SMFS_TYPE_ASSOC, + SMFS_TYPE_REASSOC, + SMFS_TYPE_DISASSOC_TX, + SMFS_TYPE_DISASSOC_RX, + SMFS_TYPE_DEAUTH_TX, + SMFS_TYPE_DEAUTH_RX, + SMFS_TYPE_MAX +} smfs_type_t; + +#ifdef PHYMON + +#define PHYMON_VERSION 1 + +typedef struct wl_phycal_core_state { + /* Tx IQ/LO calibration coeffs */ + int16 tx_iqlocal_a; + int16 tx_iqlocal_b; + int8 tx_iqlocal_ci; + int8 tx_iqlocal_cq; + int8 tx_iqlocal_di; + int8 tx_iqlocal_dq; + int8 tx_iqlocal_ei; + int8 tx_iqlocal_eq; + int8 tx_iqlocal_fi; + int8 tx_iqlocal_fq; + + /* Rx IQ calibration coeffs */ + int16 rx_iqcal_a; + int16 rx_iqcal_b; + + uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */ + uint32 papd_epsilon_table[64]; /* PAPD epsilon table */ + int16 papd_epsilon_offset; /* PAPD epsilon offset */ + uint8 curr_tx_pwrindex; /* Tx power index */ + int8 idle_tssi; /* Idle TSSI */ + int8 est_tx_pwr; /* Estimated Tx Power (dB) */ + int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */ + uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */ + uint16 init_gaincode; /* initgain required for ACI */ + int8 estirr_tx; + int8 estirr_rx; + +} wl_phycal_core_state_t; + +typedef struct wl_phycal_state { + int version; + int8 num_phy_cores; /* number of cores */ + int8 curr_temperature; /* on-chip temperature sensor reading */ + chanspec_t chspec; /* channspec for this state */ + bool aci_state; /* ACI state: ON/OFF */ + uint16 crsminpower; /* crsminpower required for ACI */ + uint16 crsminpowerl; /* crsminpowerl required for ACI */ + uint16 crsminpoweru; /* crsminpoweru required for ACI */ + wl_phycal_core_state_t phycal_core[1]; +} wl_phycal_state_t; + +#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core) +#endif /* PHYMON */ + +/* discovery state */ +typedef struct wl_p2p_disc_st { + uint8 state; /* see state */ + chanspec_t chspec; /* valid in listen state */ + uint16 dwell; /* valid in listen state, in ms */ +} wl_p2p_disc_st_t; + +/* scan request */ +typedef struct wl_p2p_scan { + uint8 type; /* 'S' for WLC_SCAN, 'E' for "escan" */ + uint8 reserved[3]; + /* scan or escan parms... */ +} wl_p2p_scan_t; + +/* i/f request */ +typedef struct wl_p2p_if { + struct ether_addr addr; + uint8 type; /* see i/f type */ + chanspec_t chspec; /* for p2p_ifadd GO */ +} wl_p2p_if_t; + +/* i/f query */ +typedef struct wl_p2p_ifq { + uint bsscfgidx; + char ifname[BCM_MSG_IFNAME_MAX]; +} wl_p2p_ifq_t; + +/* OppPS & CTWindow */ +typedef struct wl_p2p_ops { + uint8 ops; /* 0: disable 1: enable */ + uint8 ctw; /* >= 10 */ +} wl_p2p_ops_t; + +/* absence and presence request */ +typedef struct wl_p2p_sched_desc { + uint32 start; + uint32 interval; + uint32 duration; + uint32 count; /* see count */ +} wl_p2p_sched_desc_t; + +typedef struct wl_p2p_sched { + uint8 type; /* see schedule type */ + uint8 action; /* see schedule action */ + uint8 option; /* see schedule option */ + wl_p2p_sched_desc_t desc[1]; +} wl_p2p_sched_t; + +typedef struct wl_p2p_wfds_hash { + uint32 advt_id; + uint16 nw_cfg_method; + uint8 wfds_hash[6]; + uint8 name_len; + uint8 service_name[MAX_WFDS_SVC_NAME_LEN]; +} wl_p2p_wfds_hash_t; + +typedef struct wl_bcmdcs_data { + uint reason; + chanspec_t chspec; +} wl_bcmdcs_data_t; + + +/* NAT configuration */ +typedef struct { + uint32 ipaddr; /* interface ip address */ + uint32 ipaddr_mask; /* interface ip address mask */ + uint32 ipaddr_gateway; /* gateway ip address */ + uint8 mac_gateway[6]; /* gateway mac address */ + uint32 ipaddr_dns; /* DNS server ip address, valid only for public if */ + uint8 mac_dns[6]; /* DNS server mac address, valid only for public if */ + uint8 GUID[38]; /* interface GUID */ +} nat_if_info_t; + +typedef struct { + uint op; /* operation code */ + bool pub_if; /* set for public if, clear for private if */ + nat_if_info_t if_info; /* interface info */ +} nat_cfg_t; + +typedef struct { + int state; /* NAT state returned */ +} nat_state_t; + + +#define BTA_STATE_LOG_SZ 64 + +/* BTAMP Statemachine states */ +enum { + HCIReset = 1, + HCIReadLocalAMPInfo, + HCIReadLocalAMPASSOC, + HCIWriteRemoteAMPASSOC, + HCICreatePhysicalLink, + HCIAcceptPhysicalLinkRequest, + HCIDisconnectPhysicalLink, + HCICreateLogicalLink, + HCIAcceptLogicalLink, + HCIDisconnectLogicalLink, + HCILogicalLinkCancel, + HCIAmpStateChange, + HCIWriteLogicalLinkAcceptTimeout +}; + +typedef struct flush_txfifo { + uint32 txfifobmp; + uint32 hwtxfifoflush; + struct ether_addr ea; +} flush_txfifo_t; + +enum { + SPATIAL_MODE_2G_IDX = 0, + SPATIAL_MODE_5G_LOW_IDX, + SPATIAL_MODE_5G_MID_IDX, + SPATIAL_MODE_5G_HIGH_IDX, + SPATIAL_MODE_5G_UPPER_IDX, + SPATIAL_MODE_MAX_IDX +}; + +#define WLC_TXCORE_MAX 4 /* max number of txcore supports */ +#define WLC_SUBBAND_MAX 4 /* max number of sub-band supports */ +typedef struct { + uint8 band2g[WLC_TXCORE_MAX]; + uint8 band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX]; +} sar_limit_t; + +#define WLC_TXCAL_CORE_MAX 2 /* max number of txcore supports for txcal */ +#define MAX_NUM_TXCAL_MEAS 128 +#define MAX_NUM_PWR_STEP 40 +#define TXCAL_ROUNDING_FIX 1 +typedef struct wl_txcal_meas { +#ifdef TXCAL_ROUNDING_FIX + uint16 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; +#else + uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; +#endif /* TXCAL_ROUNDING_FIX */ + int16 pwr[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; + uint8 valid_cnt; +} wl_txcal_meas_t; + +typedef struct wl_txcal_power_tssi { + uint8 set_core; + uint8 channel; + int16 tempsense[WLC_TXCAL_CORE_MAX]; + int16 pwr_start[WLC_TXCAL_CORE_MAX]; + uint8 pwr_start_idx[WLC_TXCAL_CORE_MAX]; + uint8 num_entries[WLC_TXCAL_CORE_MAX]; + uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_PWR_STEP]; + bool gen_tbl; +} wl_txcal_power_tssi_t; + +/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */ +typedef struct wl_mempool_stats { + int num; /* Number of memory pools */ + bcm_mp_stats_t s[1]; /* Variable array of memory pool stats. */ +} wl_mempool_stats_t; + +typedef struct { + uint32 ipaddr; + uint32 ipaddr_netmask; + uint32 ipaddr_gateway; +} nwoe_ifconfig_t; + +/* Traffic management priority classes */ +typedef enum trf_mgmt_priority_class { + trf_mgmt_priority_low = 0, /* Maps to 802.1p BK */ + trf_mgmt_priority_medium = 1, /* Maps to 802.1p BE */ + trf_mgmt_priority_high = 2, /* Maps to 802.1p VI */ + trf_mgmt_priority_nochange = 3, /* do not update the priority */ + trf_mgmt_priority_invalid = (trf_mgmt_priority_nochange + 1) +} trf_mgmt_priority_class_t; + +/* Traffic management configuration parameters */ +typedef struct trf_mgmt_config { + uint32 trf_mgmt_enabled; /* 0 - disabled, 1 - enabled */ + uint32 flags; /* See TRF_MGMT_FLAG_xxx defines */ + uint32 host_ip_addr; /* My IP address to determine subnet */ + uint32 host_subnet_mask; /* My subnet mask */ + uint32 downlink_bandwidth; /* In units of kbps */ + uint32 uplink_bandwidth; /* In units of kbps */ + uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed tx bandwidth */ + uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed rx bandwidth */ +} trf_mgmt_config_t; + +/* Traffic management filter */ +typedef struct trf_mgmt_filter { + struct ether_addr dst_ether_addr; /* His L2 address */ + uint32 dst_ip_addr; /* His IP address */ + uint16 dst_port; /* His L4 port */ + uint16 src_port; /* My L4 port */ + uint16 prot; /* L4 protocol (only TCP or UDP) */ + uint16 flags; /* TBD. For now, this must be zero. */ + trf_mgmt_priority_class_t priority; /* Priority for filtered packets */ + uint32 dscp; /* DSCP */ +} trf_mgmt_filter_t; + +/* Traffic management filter list (variable length) */ +typedef struct trf_mgmt_filter_list { + uint32 num_filters; + trf_mgmt_filter_t filter[1]; +} trf_mgmt_filter_list_t; + +/* Traffic management global info used for all queues */ +typedef struct trf_mgmt_global_info { + uint32 maximum_bytes_per_second; + uint32 maximum_bytes_per_sampling_period; + uint32 total_bytes_consumed_per_second; + uint32 total_bytes_consumed_per_sampling_period; + uint32 total_unused_bytes_per_sampling_period; +} trf_mgmt_global_info_t; + +/* Traffic management shaping info per priority queue */ +typedef struct trf_mgmt_shaping_info { + uint32 gauranteed_bandwidth_percentage; + uint32 guaranteed_bytes_per_second; + uint32 guaranteed_bytes_per_sampling_period; + uint32 num_bytes_produced_per_second; + uint32 num_bytes_consumed_per_second; + uint32 num_queued_packets; /* Number of packets in queue */ + uint32 num_queued_bytes; /* Number of bytes in queue */ +} trf_mgmt_shaping_info_t; + +/* Traffic management shaping info array */ +typedef struct trf_mgmt_shaping_info_array { + trf_mgmt_global_info_t tx_global_shaping_info; + trf_mgmt_shaping_info_t tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES]; + trf_mgmt_global_info_t rx_global_shaping_info; + trf_mgmt_shaping_info_t rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES]; +} trf_mgmt_shaping_info_array_t; + + +/* Traffic management statistical counters */ +typedef struct trf_mgmt_stats { + uint32 num_processed_packets; /* Number of packets processed */ + uint32 num_processed_bytes; /* Number of bytes processed */ + uint32 num_discarded_packets; /* Number of packets discarded from queue */ +} trf_mgmt_stats_t; + +/* Traffic management statisics array */ +typedef struct trf_mgmt_stats_array { + trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES]; + trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES]; +} trf_mgmt_stats_array_t; + +typedef struct powersel_params { + /* LPC Params exposed via IOVAR */ + int32 tp_ratio_thresh; /* Throughput ratio threshold */ + uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */ + uint8 pwr_stab_thresh; /* Number of successes before power step down */ + uint8 pwr_sel_exp_time; /* Time lapse for expiry of database */ +} powersel_params_t; + +typedef struct lpc_params { + /* LPC Params exposed via IOVAR */ + uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */ + uint8 pwr_stab_thresh; /* Number of successes before power step down */ + uint8 lpc_exp_time; /* Time lapse for expiry of database */ + uint8 pwrup_slow_step; /* Step size for slow step up */ + uint8 pwrup_fast_step; /* Step size for fast step up */ + uint8 pwrdn_slow_step; /* Step size for slow step down */ +} lpc_params_t; + +/* tx pkt delay statistics */ +#define SCB_RETRY_SHORT_DEF 7 /* Default Short retry Limit */ +#define WLPKTDLY_HIST_NBINS 16 /* number of bins used in the Delay histogram */ + +/* structure to store per-AC delay statistics */ +typedef struct scb_delay_stats { + uint32 txmpdu_lost; /* number of MPDUs lost */ + uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /* retry times histogram */ + uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /* cumulative packet latency */ + uint32 delay_min; /* minimum packet latency observed */ + uint32 delay_max; /* maximum packet latency observed */ + uint32 delay_avg; /* packet latency average */ + uint32 delay_hist[WLPKTDLY_HIST_NBINS]; /* delay histogram */ +} scb_delay_stats_t; + +/* structure for txdelay event */ +typedef struct txdelay_event { + uint8 status; + int rssi; + chanim_stats_t chanim_stats; + scb_delay_stats_t delay_stats[AC_COUNT]; +} txdelay_event_t; + +/* structure for txdelay parameters */ +typedef struct txdelay_params { + uint16 ratio; /* Avg Txdelay Delta */ + uint8 cnt; /* Sample cnt */ + uint8 period; /* Sample period */ + uint8 tune; /* Debug */ +} txdelay_params_t; + +enum { + WNM_SERVICE_DMS = 1, + WNM_SERVICE_FMS = 2, + WNM_SERVICE_TFS = 3 +}; + +/* Definitions for WNM/NPS TCLAS */ +typedef struct wl_tclas { + uint8 user_priority; + uint8 fc_len; + dot11_tclas_fc_t fc; +} wl_tclas_t; + +#define WL_TCLAS_FIXED_SIZE OFFSETOF(wl_tclas_t, fc) + +typedef struct wl_tclas_list { + uint32 num; + wl_tclas_t tclas[1]; +} wl_tclas_list_t; + +/* Definitions for WNM/NPS Traffic Filter Service */ +typedef struct wl_tfs_req { + uint8 tfs_id; + uint8 tfs_actcode; + uint8 tfs_subelem_id; + uint8 send; +} wl_tfs_req_t; + +typedef struct wl_tfs_filter { + uint8 status; /* Status returned by the AP */ + uint8 tclas_proc; /* TCLAS processing value (0:and, 1:or) */ + uint8 tclas_cnt; /* count of all wl_tclas_t in tclas array */ + uint8 tclas[1]; /* VLA of wl_tclas_t */ +} wl_tfs_filter_t; +#define WL_TFS_FILTER_FIXED_SIZE OFFSETOF(wl_tfs_filter_t, tclas) + +typedef struct wl_tfs_fset { + struct ether_addr ea; /* Address of AP/STA involved with this filter set */ + uint8 tfs_id; /* TFS ID field chosen by STA host */ + uint8 status; /* Internal status TFS_STATUS_xxx */ + uint8 actcode; /* Action code DOT11_TFS_ACTCODE_xxx */ + uint8 token; /* Token used in last request frame */ + uint8 notify; /* Notify frame sent/received because of this set */ + uint8 filter_cnt; /* count of all wl_tfs_filter_t in filter array */ + uint8 filter[1]; /* VLA of wl_tfs_filter_t */ +} wl_tfs_fset_t; +#define WL_TFS_FSET_FIXED_SIZE OFFSETOF(wl_tfs_fset_t, filter) + +enum { + TFS_STATUS_DISABLED = 0, /* TFS filter set disabled by user */ + TFS_STATUS_DISABLING = 1, /* Empty request just sent to AP */ + TFS_STATUS_VALIDATED = 2, /* Filter set validated by AP (but maybe not enabled!) */ + TFS_STATUS_VALIDATING = 3, /* Filter set just sent to AP */ + TFS_STATUS_NOT_ASSOC = 4, /* STA not associated */ + TFS_STATUS_NOT_SUPPORT = 5, /* TFS not supported by AP */ + TFS_STATUS_DENIED = 6, /* Filter set refused by AP (=> all sets are disabled!) */ +}; + +typedef struct wl_tfs_status { + uint8 fset_cnt; /* count of all wl_tfs_fset_t in fset array */ + wl_tfs_fset_t fset[1]; /* VLA of wl_tfs_fset_t */ +} wl_tfs_status_t; + +typedef struct wl_tfs_set { + uint8 send; /* Immediatly register registered sets on AP side */ + uint8 tfs_id; /* ID of a specific set (existing or new), or nul for all */ + uint8 actcode; /* Action code for this filter set */ + uint8 tclas_proc; /* TCLAS processing operator for this filter set */ +} wl_tfs_set_t; + +typedef struct wl_tfs_term { + uint8 del; /* Delete internal set once confirmation received */ + uint8 tfs_id; /* ID of a specific set (existing), or nul for all */ +} wl_tfs_term_t; + + +#define DMS_DEP_PROXY_ARP (1 << 0) + +/* Definitions for WNM/NPS Directed Multicast Service */ +enum { + DMS_STATUS_DISABLED = 0, /* DMS desc disabled by user */ + DMS_STATUS_ACCEPTED = 1, /* Request accepted by AP */ + DMS_STATUS_NOT_ASSOC = 2, /* STA not associated */ + DMS_STATUS_NOT_SUPPORT = 3, /* DMS not supported by AP */ + DMS_STATUS_DENIED = 4, /* Request denied by AP */ + DMS_STATUS_TERM = 5, /* Request terminated by AP */ + DMS_STATUS_REMOVING = 6, /* Remove request just sent */ + DMS_STATUS_ADDING = 7, /* Add request just sent */ + DMS_STATUS_ERROR = 8, /* Non compliant AP behvior */ + DMS_STATUS_IN_PROGRESS = 9, /* Request just sent */ + DMS_STATUS_REQ_MISMATCH = 10 /* Conditions for sending DMS req not met */ +}; + +typedef struct wl_dms_desc { + uint8 user_id; + uint8 status; + uint8 token; + uint8 dms_id; + uint8 tclas_proc; + uint8 mac_len; /* length of all ether_addr in data array, 0 if STA */ + uint8 tclas_len; /* length of all wl_tclas_t in data array */ + uint8 data[1]; /* VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */ +} wl_dms_desc_t; + +#define WL_DMS_DESC_FIXED_SIZE OFFSETOF(wl_dms_desc_t, data) + +typedef struct wl_dms_status { + uint32 cnt; + wl_dms_desc_t desc[1]; +} wl_dms_status_t; + +typedef struct wl_dms_set { + uint8 send; + uint8 user_id; + uint8 tclas_proc; +} wl_dms_set_t; + +typedef struct wl_dms_term { + uint8 del; + uint8 user_id; +} wl_dms_term_t; + +typedef struct wl_service_term { + uint8 service; + union { + wl_dms_term_t dms; + } u; +} wl_service_term_t; + +/* Definitions for WNM/NPS BSS Transistion */ +typedef struct wl_bsstrans_req { + uint16 tbtt; /* time of BSS to end of life, in unit of TBTT */ + uint16 dur; /* time of BSS to keep off, in unit of minute */ + uint8 reqmode; /* request mode of BSS transition request */ + uint8 unicast; /* request by unicast or by broadcast */ +} wl_bsstrans_req_t; + +enum { + BSSTRANS_RESP_AUTO = 0, /* Currently equivalent to ENABLE */ + BSSTRANS_RESP_DISABLE = 1, /* Never answer BSS Trans Req frames */ + BSSTRANS_RESP_ENABLE = 2, /* Always answer Req frames with preset data */ + BSSTRANS_RESP_WAIT = 3, /* Send ind, wait and/or send preset data (NOT IMPL) */ + BSSTRANS_RESP_IMMEDIATE = 4 /* After an ind, set data and send resp (NOT IMPL) */ +}; + +typedef struct wl_bsstrans_resp { + uint8 policy; + uint8 status; + uint8 delay; + struct ether_addr target; +} wl_bsstrans_resp_t; + +/* "wnm_bsstrans_policy" argument programs behavior after BSSTRANS Req reception. + * BSS-Transition feature is used by multiple programs such as NPS-PF, VE-PF, + * Band-steering, Hotspot 2.0 and customer requirements. Each PF and its test plan + * mandates different behavior on receiving BSS-transition request. To accomodate + * such divergent behaviors these policies have been created. + */ +enum { + WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0, /* Roam (or disassociate) in all cases */ + WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1, /* Roam only if requested by Request Mode field */ + WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2, /* Roam only if Preferred BSS provided */ + WL_BSSTRANS_POLICY_WAIT = 3, /* Wait for deauth and send Accepted status */ + WL_BSSTRANS_POLICY_PRODUCT = 4, /* Policy for real product use cases (non-pf) */ +}; + +/* Definitions for WNM/NPS TIM Broadcast */ +typedef struct wl_timbc_offset { + int16 offset; /* offset in us */ + uint16 fix_intv; /* override interval sent from STA */ + uint16 rate_override; /* use rate override to send high rate TIM broadcast frame */ + uint8 tsf_present; /* show timestamp in TIM broadcast frame */ +} wl_timbc_offset_t; + +typedef struct wl_timbc_set { + uint8 interval; /* Interval in DTIM wished or required. */ + uint8 flags; /* Bitfield described below */ + uint16 rate_min; /* Minimum rate required for High/Low TIM frames. Optionnal */ + uint16 rate_max; /* Maximum rate required for High/Low TIM frames. Optionnal */ +} wl_timbc_set_t; + +enum { + WL_TIMBC_SET_TSF_REQUIRED = 1, /* Enable TIMBC only if TSF in TIM frames */ + WL_TIMBC_SET_NO_OVERRIDE = 2, /* ... if AP does not override interval */ + WL_TIMBC_SET_PROXY_ARP = 4, /* ... if AP support Proxy ARP */ + WL_TIMBC_SET_DMS_ACCEPTED = 8 /* ... if all DMS desc have been accepted */ +}; + +typedef struct wl_timbc_status { + uint8 status_sta; /* Status from internal state machine (check below) */ + uint8 status_ap; /* From AP response frame (check 8.4.2.86 from 802.11) */ + uint8 interval; + uint8 pad; + int32 offset; + uint16 rate_high; + uint16 rate_low; +} wl_timbc_status_t; + +enum { + WL_TIMBC_STATUS_DISABLE = 0, /* TIMBC disabled by user */ + WL_TIMBC_STATUS_REQ_MISMATCH = 1, /* AP settings do no match user requirements */ + WL_TIMBC_STATUS_NOT_ASSOC = 2, /* STA not associated */ + WL_TIMBC_STATUS_NOT_SUPPORT = 3, /* TIMBC not supported by AP */ + WL_TIMBC_STATUS_DENIED = 4, /* Req to disable TIMBC sent to AP */ + WL_TIMBC_STATUS_ENABLE = 5 /* TIMBC enabled */ +}; + +/* Definitions for PM2 Dynamic Fast Return To Sleep */ +typedef struct wl_pm2_sleep_ret_ext { + uint8 logic; /* DFRTS logic: see WL_DFRTS_LOGIC_* below */ + uint16 low_ms; /* Low FRTS timeout */ + uint16 high_ms; /* High FRTS timeout */ + uint16 rx_pkts_threshold; /* switching threshold: # rx pkts */ + uint16 tx_pkts_threshold; /* switching threshold: # tx pkts */ + uint16 txrx_pkts_threshold; /* switching threshold: # (tx+rx) pkts */ + uint32 rx_bytes_threshold; /* switching threshold: # rx bytes */ + uint32 tx_bytes_threshold; /* switching threshold: # tx bytes */ + uint32 txrx_bytes_threshold; /* switching threshold: # (tx+rx) bytes */ +} wl_pm2_sleep_ret_ext_t; + +#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */ +#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */ +#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */ + +/* Values for the passive_on_restricted_mode iovar. When set to non-zero, this iovar + * disables automatic conversions of a channel from passively scanned to + * actively scanned. These values only have an effect for country codes such + * as XZ where some 5 GHz channels are defined to be passively scanned. + */ +#define WL_PASSACTCONV_DISABLE_NONE 0 /* Enable permanent and temporary conversions */ +#define WL_PASSACTCONV_DISABLE_ALL 1 /* Disable permanent and temporary conversions */ +#define WL_PASSACTCONV_DISABLE_PERM 2 /* Disable only permanent conversions */ + +/* Definitions for Reliable Multicast */ +#define WL_RMC_CNT_VERSION 1 +#define WL_RMC_TR_VERSION 1 +#define WL_RMC_MAX_CLIENT 32 +#define WL_RMC_FLAG_INBLACKLIST 1 +#define WL_RMC_FLAG_ACTIVEACKER 2 +#define WL_RMC_FLAG_RELMCAST 4 +#define WL_RMC_MAX_TABLE_ENTRY 4 + +#define WL_RMC_VER 1 +#define WL_RMC_INDEX_ACK_ALL 255 +#define WL_RMC_NUM_OF_MC_STREAMS 4 +#define WL_RMC_MAX_TRS_PER_GROUP 1 +#define WL_RMC_MAX_TRS_IN_ACKALL 1 +#define WL_RMC_ACK_MCAST0 0x02 +#define WL_RMC_ACK_MCAST_ALL 0x01 +#define WL_RMC_ACTF_TIME_MIN 300 /* time in ms */ +#define WL_RMC_ACTF_TIME_MAX 20000 /* time in ms */ +#define WL_RMC_MAX_NUM_TRS 32 /* maximun transmitters allowed */ +#define WL_RMC_ARTMO_MIN 350 /* time in ms */ +#define WL_RMC_ARTMO_MAX 40000 /* time in ms */ + +/* RMC events in action frames */ +enum rmc_opcodes { + RELMCAST_ENTRY_OP_DISABLE = 0, /* Disable multi-cast group */ + RELMCAST_ENTRY_OP_DELETE = 1, /* Delete multi-cast group */ + RELMCAST_ENTRY_OP_ENABLE = 2, /* Enable multi-cast group */ + RELMCAST_ENTRY_OP_ACK_ALL = 3 /* Enable ACK ALL bit in AMT */ +}; + +/* RMC operational modes */ +enum rmc_modes { + WL_RMC_MODE_RECEIVER = 0, /* Receiver mode by default */ + WL_RMC_MODE_TRANSMITTER = 1, /* Transmitter mode using wl ackreq */ + WL_RMC_MODE_INITIATOR = 2 /* Initiator mode using wl ackreq */ +}; + +/* Each RMC mcast client info */ +typedef struct wl_relmcast_client { + uint8 flag; /* status of client such as AR, R, or blacklisted */ + int16 rssi; /* rssi value of RMC client */ + struct ether_addr addr; /* mac address of RMC client */ +} wl_relmcast_client_t; + +/* RMC Counters */ +typedef struct wl_rmc_cnts { + uint16 version; /* see definition of WL_CNT_T_VERSION */ + uint16 length; /* length of entire structure */ + uint16 dupcnt; /* counter for duplicate rmc MPDU */ + uint16 ackreq_err; /* counter for wl ackreq error */ + uint16 af_tx_err; /* error count for action frame transmit */ + uint16 null_tx_err; /* error count for rmc null frame transmit */ + uint16 af_unicast_tx_err; /* error count for rmc unicast frame transmit */ + uint16 mc_no_amt_slot; /* No mcast AMT entry available */ + /* Unused. Keep for rom compatibility */ + uint16 mc_no_glb_slot; /* No mcast entry available in global table */ + uint16 mc_not_mirrored; /* mcast group is not mirrored */ + uint16 mc_existing_tr; /* mcast group is already taken by transmitter */ + uint16 mc_exist_in_amt; /* mcast group is already programmed in amt */ + /* Unused. Keep for rom compatibility */ + uint16 mc_not_exist_in_gbl; /* mcast group is not in global table */ + uint16 mc_not_exist_in_amt; /* mcast group is not in AMT table */ + uint16 mc_utilized; /* mcast addressed is already taken */ + uint16 mc_taken_other_tr; /* multi-cast addressed is already taken */ + uint32 rmc_rx_frames_mac; /* no of mc frames received from mac */ + uint32 rmc_tx_frames_mac; /* no of mc frames transmitted to mac */ + uint32 mc_null_ar_cnt; /* no. of times NULL AR is received */ + uint32 mc_ar_role_selected; /* no. of times took AR role */ + uint32 mc_ar_role_deleted; /* no. of times AR role cancelled */ + uint32 mc_noacktimer_expired; /* no. of times noack timer expired */ + uint16 mc_no_wl_clk; /* no wl clk detected when trying to access amt */ + uint16 mc_tr_cnt_exceeded; /* No of transmitters in the network exceeded */ +} wl_rmc_cnts_t; + +/* RMC Status */ +typedef struct wl_relmcast_st { + uint8 ver; /* version of RMC */ + uint8 num; /* number of clients detected by transmitter */ + wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT]; + uint16 err; /* error status (used in infra) */ + uint16 actf_time; /* action frame time period */ +} wl_relmcast_status_t; + +/* Entry for each STA/node */ +typedef struct wl_rmc_entry { + /* operation on multi-cast entry such add, + * delete, ack-all + */ + int8 flag; + struct ether_addr addr; /* multi-cast group mac address */ +} wl_rmc_entry_t; + +/* RMC table */ +typedef struct wl_rmc_entry_table { + uint8 index; /* index to a particular mac entry in table */ + uint8 opcode; /* opcodes or operation on entry */ + wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY]; +} wl_rmc_entry_table_t; + +typedef struct wl_rmc_trans_elem { + struct ether_addr tr_mac; /* transmitter mac */ + struct ether_addr ar_mac; /* ar mac */ + uint16 artmo; /* AR timeout */ + uint8 amt_idx; /* amt table entry */ + uint16 flag; /* entry will be acked, not acked, programmed, full etc */ +} wl_rmc_trans_elem_t; + +/* RMC transmitters */ +typedef struct wl_rmc_trans_in_network { + uint8 ver; /* version of RMC */ + uint8 num_tr; /* number of transmitters in the network */ + wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS]; +} wl_rmc_trans_in_network_t; + +/* To update vendor specific ie for RMC */ +typedef struct wl_rmc_vsie { + uint8 oui[DOT11_OUI_LEN]; + uint16 payload; /* IE Data Payload */ +} wl_rmc_vsie_t; + + +/* structures & defines for proximity detection */ +enum proxd_method { + PROXD_UNDEFINED_METHOD = 0, + PROXD_RSSI_METHOD = 1, + PROXD_TOF_METHOD = 2 +}; + +/* structures for proximity detection device role */ +#define WL_PROXD_MODE_DISABLE 0 +#define WL_PROXD_MODE_NEUTRAL 1 +#define WL_PROXD_MODE_INITIATOR 2 +#define WL_PROXD_MODE_TARGET 3 + +#define WL_PROXD_ACTION_STOP 0 +#define WL_PROXD_ACTION_START 1 + +#define WL_PROXD_FLAG_TARGET_REPORT 0x1 +#define WL_PROXD_FLAG_REPORT_FAILURE 0x2 +#define WL_PROXD_FLAG_INITIATOR_REPORT 0x4 +#define WL_PROXD_FLAG_NOCHANSWT 0x8 +#define WL_PROXD_FLAG_NETRUAL 0x10 +#define WL_PROXD_FLAG_INITIATOR_RPTRTT 0x20 +#define WL_PROXD_FLAG_ONEWAY 0x40 +#define WL_PROXD_FLAG_SEQ_EN 0x80 + +#define WL_PROXD_RANDOM_WAKEUP 0x8000 +#define WL_PROXD_MAXREPORT 8 + +typedef struct wl_proxd_iovar { + uint16 method; /* Proxmity Detection method */ + uint16 mode; /* Mode (neutral, initiator, target) */ +} wl_proxd_iovar_t; + +/* + * structures for proximity detection parameters + * consists of two parts, common and method specific params + * common params should be placed at the beginning + */ + +/* require strict packing */ +#include + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_common { + chanspec_t chanspec; /* channel spec */ + int16 tx_power; /* tx power of Proximity Detection(PD) frames (in dBm) */ + uint16 tx_rate; /* tx rate of PD rames (in 500kbps units) */ + uint16 timeout; /* timeout value */ + uint16 interval; /* interval between neighbor finding attempts (in TU) */ + uint16 duration; /* duration of neighbor finding attempts (in ms) */ +} BWL_POST_PACKED_STRUCT wl_proxd_params_common_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_rssi_method { + chanspec_t chanspec; /* chanspec for home channel */ + int16 tx_power; /* tx power of Proximity Detection frames (in dBm) */ + uint16 tx_rate; /* tx rate of PD frames, 500kbps units */ + uint16 timeout; /* state machine wait timeout of the frames (in ms) */ + uint16 interval; /* interval between neighbor finding attempts (in TU) */ + uint16 duration; /* duration of neighbor finding attempts (in ms) */ + /* method specific ones go after this line */ + int16 rssi_thresh; /* RSSI threshold (in dBm) */ + uint16 maxconvergtmo; /* max wait converge timeout (in ms) */ +} wl_proxd_params_rssi_method_t; + +#define Q1_NS 25 /* Q1 time units */ + +#define TOF_BW_NUM 3 /* number of bandwidth that the TOF can support */ +#define TOF_BW_SEQ_NUM (TOF_BW_NUM+2) /* number of total index */ +enum tof_bw_index { + TOF_BW_20MHZ_INDEX = 0, + TOF_BW_40MHZ_INDEX = 1, + TOF_BW_80MHZ_INDEX = 2, + TOF_BW_SEQTX_INDEX = 3, + TOF_BW_SEQRX_INDEX = 4 +}; + +#define BANDWIDTH_BASE 20 /* base value of bandwidth */ +#define TOF_BW_20MHZ (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX) +#define TOF_BW_40MHZ (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX) +#define TOF_BW_80MHZ (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX) +#define TOF_BW_10MHZ 10 + +#define NFFT_BASE 64 /* base size of fft */ +#define TOF_NFFT_20MHZ (NFFT_BASE << TOF_BW_20MHZ_INDEX) +#define TOF_NFFT_40MHZ (NFFT_BASE << TOF_BW_40MHZ_INDEX) +#define TOF_NFFT_80MHZ (NFFT_BASE << TOF_BW_80MHZ_INDEX) + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_method { + chanspec_t chanspec; /* chanspec for home channel */ + int16 tx_power; /* tx power of Proximity Detection(PD) frames (in dBm) */ + uint16 tx_rate; /* tx rate of PD rames (in 500kbps units) */ + uint16 timeout; /* state machine wait timeout of the frames (in ms) */ + uint16 interval; /* interval between neighbor finding attempts (in TU) */ + uint16 duration; /* duration of neighbor finding attempts (in ms) */ + /* specific for the method go after this line */ + struct ether_addr tgt_mac; /* target mac addr for TOF method */ + uint16 ftm_cnt; /* number of the frames txed by initiator */ + uint16 retry_cnt; /* number of retransmit attampts for ftm frames */ + int16 vht_rate; /* ht or vht rate */ + /* add more params required for other methods can be added here */ +} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_method_t; + +typedef struct wl_proxd_seq_config +{ + int16 N_tx_log2; + int16 N_rx_log2; + int16 N_tx_scale; + int16 N_rx_scale; + int16 w_len; + int16 w_offset; +} wl_proxd_seq_config_t; + + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune { + uint32 Ki; /* h/w delay K factor for initiator */ + uint32 Kt; /* h/w delay K factor for target */ + int16 vhtack; /* enable/disable VHT ACK */ + int16 N_log2[TOF_BW_SEQ_NUM]; /* simple threshold crossing */ + int16 w_offset[TOF_BW_NUM]; /* offset of threshold crossing window(per BW) */ + int16 w_len[TOF_BW_NUM]; /* length of threshold crossing window(per BW) */ + int32 maxDT; /* max time difference of T4/T1 or T3/T2 */ + int32 minDT; /* min time difference of T4/T1 or T3/T2 */ + uint8 totalfrmcnt; /* total count of transfered measurement frames */ + uint16 rsv_media; /* reserve media value for TOF */ + uint32 flags; /* flags */ + uint8 core; /* core to use for tx */ + uint8 force_K; /* set to force value of K */ + int16 N_scale[TOF_BW_SEQ_NUM]; /* simple threshold crossing */ + uint8 sw_adj; /* enable sw assisted timestamp adjustment */ + uint8 hw_adj; /* enable hw assisted timestamp adjustment */ + uint8 seq_en; /* enable ranging sequence */ + uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /* number of ftm frames based on bandwidth */ + int16 N_log2_2g; /* simple threshold crossing for 2g channel */ + int16 N_scale_2g; /* simple threshold crossing for 2g channel */ + wl_proxd_seq_config_t seq_5g20; +} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t; + +typedef struct wl_proxd_params_iovar { + uint16 method; /* Proxmity Detection method */ + union { + /* common params for pdsvc */ + wl_proxd_params_common_t cmn_params; /* common parameters */ + /* method specific */ + wl_proxd_params_rssi_method_t rssi_params; /* RSSI method parameters */ + wl_proxd_params_tof_method_t tof_params; /* TOF meothod parameters */ + /* tune parameters */ + wl_proxd_params_tof_tune_t tof_tune; /* TOF tune parameters */ + } u; /* Method specific optional parameters */ +} wl_proxd_params_iovar_t; + +#define PROXD_COLLECT_GET_STATUS 0 +#define PROXD_COLLECT_SET_STATUS 1 +#define PROXD_COLLECT_QUERY_HEADER 2 +#define PROXD_COLLECT_QUERY_DATA 3 +#define PROXD_COLLECT_QUERY_DEBUG 4 +#define PROXD_COLLECT_REMOTE_REQUEST 5 +#define PROXD_COLLECT_DONE 6 + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query { + uint32 method; /* method */ + uint8 request; /* Query request. */ + uint8 status; /* 0 -- disable, 1 -- enable collection, */ + /* 2 -- enable collection & debug */ + uint16 index; /* The current frame index [0 to total_frames - 1]. */ + uint16 mode; /* Initiator or Target */ + bool busy; /* tof sm is busy */ + bool remote; /* Remote collect data */ +} BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header { + uint16 total_frames; /* The totral frames for this collect. */ + uint16 nfft; /* nfft value */ + uint16 bandwidth; /* bandwidth */ + uint16 channel; /* channel number */ + uint32 chanspec; /* channel spec */ + uint32 fpfactor; /* avb timer value factor */ + uint16 fpfactor_shift; /* avb timer value shift bits */ + int32 distance; /* distance calculated by fw */ + uint32 meanrtt; /* mean of RTTs */ + uint32 modertt; /* mode of RTTs */ + uint32 medianrtt; /* median of RTTs */ + uint32 sdrtt; /* standard deviation of RTTs */ + uint32 clkdivisor; /* clock divisor */ + uint16 chipnum; /* chip type */ + uint8 chiprev; /* chip revision */ + uint8 phyver; /* phy version */ + struct ether_addr loaclMacAddr; /* local mac address */ + struct ether_addr remoteMacAddr; /* remote mac address */ + wl_proxd_params_tof_tune_t params; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t; + + +#ifdef WL_NAN +/* ********************** NAN wl interface struct types and defs ******************** */ + +#define WL_NAN_IOCTL_VERSION 0x1 +#define NAN_IOC_BUFSZ 256 /**< some sufficient ioc buff size for our module */ +#define NAN_IOC_BUFSZ_EXT 1024 /* some sufficient ioc buff size for dump commands */ + +/* wl_nan_sub_cmd may also be used in dhd */ +typedef struct wl_nan_sub_cmd wl_nan_sub_cmd_t; +typedef int (cmd_handler_t)(void *wl, const wl_nan_sub_cmd_t *cmd, char **argv); +/* nan cmd list entry */ +struct wl_nan_sub_cmd { + char *name; + uint8 version; /* cmd version */ + uint16 id; /* id for the dongle f/w switch/case */ + uint16 type; /* base type of argument */ + cmd_handler_t *handler; /* cmd handler */ +}; + +/* container for nan iovtls & events */ +typedef BWL_PRE_PACKED_STRUCT struct wl_nan_ioc { + uint16 version; /* interface command or event version */ + uint16 id; /* nan ioctl cmd ID */ + uint16 len; /* total length of all tlv records in data[] */ + uint16 pad; /* pad to be 32 bit aligment */ + uint8 data [1]; /* var len payload of bcm_xtlv_t type */ +} BWL_POST_PACKED_STRUCT wl_nan_ioc_t; + +typedef struct wl_nan_status { + uint8 inited; + uint8 joined; + uint8 role; + uint8 hop_count; + uint32 chspec; + uint8 amr[8]; /* Anchor Master Rank */ + uint32 cnt_pend_txfrm; /* pending TX frames */ + uint32 cnt_bcn_tx; /* TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /* RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ + struct ether_addr cid; + uint32 chspec_5g; +} wl_nan_status_t; + +typedef struct wl_nan_count { + uint32 cnt_bcn_tx; /* TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /* RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ +} wl_nan_count_t; + +/* various params and ctl swithce for nan_debug instance */ +typedef struct nan_debug_params { + uint8 enabled; /* runtime debuging enabled */ + uint8 collect; /* enables debug svc sdf monitor mode */ + uint16 cmd; /* debug cmd to perform a debug action */ + uint32 msglevel; /* msg level if enabled */ + uint16 status; +} nan_debug_params_t; + +/* time slot */ +#define NAN_MAX_TIMESLOT 32 +typedef struct nan_timeslot { + uint32 abitmap; /* available bitmap */ + uint32 chanlist[NAN_MAX_TIMESLOT]; +} nan_timeslot_t; + +/* nan passive scan params */ +#define NAN_SCAN_MAX_CHCNT 8 +typedef struct nan_scan_params { + uint16 scan_time; + uint16 home_time; + uint16 ms_intvl; /* interval between merge scan */ + uint16 ms_dur; /* duration of merge scan */ + uint16 chspec_num; + uint8 pad[2]; + chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */ +} nan_scan_params_t; + +enum wl_nan_role { + WL_NAN_ROLE_AUTO = 0, + WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1, + WL_NAN_ROLE_NON_MASTER_SYNC = 2, + WL_NAN_ROLE_MASTER = 3, + WL_NAN_ROLE_ANCHOR_MASTER = 4 +}; +#define NAN_MASTER_RANK_LEN 8 +/* nan cmd IDs */ +enum wl_nan_cmds { + /* nan cfg /disc & dbg ioctls */ + WL_NAN_CMD_ENABLE = 1, + WL_NAN_CMD_ATTR = 2, + WL_NAN_CMD_NAN_JOIN = 3, + WL_NAN_CMD_LEAVE = 4, + WL_NAN_CMD_MERGE = 5, + WL_NAN_CMD_STATUS = 6, + WL_NAN_CMD_TSRESERVE = 7, + WL_NAN_CMD_TSSCHEDULE = 8, + WL_NAN_CMD_TSRELEASE = 9, + WL_NAN_CMD_OUI = 10, + + WL_NAN_CMD_COUNT = 15, + WL_NAN_CMD_CLEARCOUNT = 16, + + /* discovery engine commands */ + WL_NAN_CMD_PUBLISH = 20, + WL_NAN_CMD_SUBSCRIBE = 21, + WL_NAN_CMD_CANCEL_PUBLISH = 22, + WL_NAN_CMD_CANCEL_SUBSCRIBE = 23, + WL_NAN_CMD_TRANSMIT = 24, + WL_NAN_CMD_CONNECTION = 25, + WL_NAN_CMD_SHOW = 26, + WL_NAN_CMD_STOP = 27, /* stop nan for a given cluster ID */ + /* nan debug iovars & cmds */ + WL_NAN_CMD_SCAN_PARAMS = 46, + WL_NAN_CMD_SCAN = 47, + WL_NAN_CMD_SCAN_RESULTS = 48, + WL_NAN_CMD_EVENT_MASK = 49, + WL_NAN_CMD_EVENT_CHECK = 50, + WL_NAN_CMD_DUMP = 51, + WL_NAN_CMD_CLEAR = 52, + WL_NAN_CMD_RSSI = 53, + + WL_NAN_CMD_DEBUG = 60, + WL_NAN_CMD_TEST1 = 61, + WL_NAN_CMD_TEST2 = 62, + WL_NAN_CMD_TEST3 = 63, + WL_NAN_CMD_DISC_RESULTS = 64 +}; + +/* + * tlv IDs uniquely identifies cmd parameters + * packed into wl_nan_ioc_t container + */ +enum wl_nan_cmd_xtlv_id { + /* 0x00 ~ 0xFF: standard TLV ID whose data format is the same as NAN attribute TLV */ + WL_NAN_XTLV_ZERO = 0, /* used as tlv buf end marker */ +#ifdef NAN_STD_TLV /* rfu, don't use yet */ + WL_NAN_XTLV_MASTER_IND = 1, /* == NAN_ATTR_MASTER_IND, */ + WL_NAN_XTLV_CLUSTER = 2, /* == NAN_ATTR_CLUSTER, */ + WL_NAN_XTLV_VENDOR = 221, /* == NAN_ATTR_VENDOR, */ +#endif + /* 0x02 ~ 0xFF: reserved. In case to use with the same data format as NAN attribute TLV */ + /* 0x100 ~ : private TLV ID defined just for NAN command */ + /* common types */ + WL_NAN_XTLV_MAC_ADDR = 0x102, /* used in various cmds */ + WL_NAN_XTLV_REASON = 0x103, + WL_NAN_XTLV_ENABLED = 0x104, + /* explicit types, primarily for discovery engine iovars */ + WL_NAN_XTLV_SVC_PARAMS = 0x120, /* Contains required params: wl_nan_disc_params_t */ + WL_NAN_XTLV_MATCH_RX = 0x121, /* Matching filter to evaluate on receive */ + WL_NAN_XTLV_MATCH_TX = 0x122, /* Matching filter to send */ + WL_NAN_XTLV_SVC_INFO = 0x123, /* Service specific info */ + WL_NAN_XTLV_SVC_NAME = 0x124, /* Optional UTF-8 service name, for debugging. */ + WL_NAN_XTLV_INSTANCE_ID = 0x125, /* Identifies unique publish or subscribe instance */ + WL_NAN_XTLV_PRIORITY = 0x126, /* used in transmit cmd context */ + WL_NAN_XTLV_REQUESTOR_ID = 0x127, /* Requestor instance ID */ + WL_NAN_XTLV_VNDR = 0x128, /* Vendor specific attribute */ + WL_NAN_XTLV_SR_FILTER = 0x129, /* Service Response Filter */ + WL_NAN_XTLV_FOLLOWUP = 0x130, /* Service Info for Follow-Up SDF */ + WL_NAN_XTLV_PEER_INSTANCE_ID = 0x131, /* Used to parse remote instance Id */ + /* explicit types, primarily for NAN MAC iovars */ + WL_NAN_XTLV_DW_LEN = 0x140, /* discovery win length */ + WL_NAN_XTLV_BCN_INTERVAL = 0x141, /* beacon interval, both sync and descovery bcns? */ + WL_NAN_XTLV_CLUSTER_ID = 0x142, + WL_NAN_XTLV_IF_ADDR = 0x143, + WL_NAN_XTLV_MC_ADDR = 0x144, + WL_NAN_XTLV_ROLE = 0x145, + WL_NAN_XTLV_START = 0x146, + + WL_NAN_XTLV_MASTER_PREF = 0x147, + WL_NAN_XTLV_DW_INTERVAL = 0x148, + WL_NAN_XTLV_PTBTT_OVERRIDE = 0x149, + /* nan status command xtlvs */ + WL_NAN_XTLV_MAC_INITED = 0x14a, + WL_NAN_XTLV_MAC_ENABLED = 0x14b, + WL_NAN_XTLV_MAC_CHANSPEC = 0x14c, + WL_NAN_XTLV_MAC_AMR = 0x14d, /* anchormaster rank u8 amr[8] */ + WL_NAN_XTLV_MAC_HOPCNT = 0x14e, + WL_NAN_XTLV_MAC_AMBTT = 0x14f, + WL_NAN_XTLV_MAC_TXRATE = 0x150, + WL_NAN_XTLV_MAC_STATUS = 0x151, /* xtlv payload is nan_status_t */ + WL_NAN_XTLV_NAN_SCANPARAMS = 0x152, /* payload is nan_scan_params_t */ + WL_NAN_XTLV_DEBUGPARAMS = 0x153, /* payload is nan_scan_params_t */ + WL_NAN_XTLV_SUBSCR_ID = 0x154, /* subscriber id */ + WL_NAN_XTLV_PUBLR_ID = 0x155, /* publisher id */ + WL_NAN_XTLV_EVENT_MASK = 0x156, + WL_NAN_XTLV_MASTER_RANK = 0x158, + WL_NAN_XTLV_WARM_UP_TIME = 0x159, + WL_NAN_XTLV_PM_OPTION = 0x15a, + WL_NAN_XTLV_OUI = 0x15b, /* NAN OUI */ + WL_NAN_XTLV_MAC_COUNT = 0x15c, /* xtlv payload is nan_count_t */ + /* nan timeslot management */ + WL_NAN_XTLV_TSRESERVE = 0x160, + WL_NAN_XTLV_TSRELEASE = 0x161, + WL_NAN_XTLV_IDLE_DW_TIMEOUT = 0x162, + WL_NAN_XTLV_IDLE_DW_LEN = 0x163, + WL_NAN_XTLV_RND_FACTOR = 0x164, + WL_NAN_XTLV_SVC_DISC_TXTIME = 0x165, /* svc disc frame tx time in DW */ + WL_NAN_XTLV_OPERATING_BAND = 0x166, + WL_NAN_XTLV_STOP_BCN_TX = 0x167, + WL_NAN_XTLV_CONCUR_SCAN = 0x168, + WL_NAN_XTLV_DUMP_CLR_TYPE = 0x175, /* wl nan dump/clear subtype */ + WL_NAN_XTLV_PEER_RSSI = 0x176, /* xtlv payload for wl nan dump rssi */ + WL_NAN_XTLV_MAC_CHANSPEC_1 = 0x17A, /* to get chanspec[1] */ + WL_NAN_XTLV_DISC_RESULTS = 0x17B, /* get disc results */ + WL_NAN_XTLV_MAC_STATS = 0x17C /* xtlv payload for wl nan dump stats */ +}; + +/* Flag bits for Publish and Subscribe (wl_nan_disc_params_t flags) */ +#define WL_NAN_RANGE_LIMITED 0x0040 +/* Bits specific to Publish */ +/* Unsolicited transmissions */ +#define WL_NAN_PUB_UNSOLICIT 0x1000 +/* Solicited transmissions */ +#define WL_NAN_PUB_SOLICIT 0x2000 +#define WL_NAN_PUB_BOTH 0x3000 +/* Set for broadcast solicited transmission + * Do not set for unicast solicited transmission + */ +#define WL_NAN_PUB_BCAST 0x4000 +/* Generate event on each solicited transmission */ +#define WL_NAN_PUB_EVENT 0x8000 +/* Used for one-time solicited Publish functions to indicate transmision occurred */ +#define WL_NAN_PUB_SOLICIT_PENDING 0x10000 +/* Follow-up frames */ +#define WL_NAN_FOLLOWUP 0x20000 +/* Bits specific to Subscribe */ +/* Active subscribe mode (Leave unset for passive) */ +#define WL_NAN_SUB_ACTIVE 0x1000 + +/* Special values for time to live (ttl) parameter */ +#define WL_NAN_TTL_UNTIL_CANCEL 0xFFFFFFFF +/* Publish - runs until first transmission + * Subscribe - runs until first DiscoveryResult event + */ +#define WL_NAN_TTL_FIRST 0 + +/* The service hash (service id) is exactly this many bytes. */ +#define WL_NAN_SVC_HASH_LEN 6 + +/* Number of hash functions per bloom filter */ +#define WL_NAN_HASHES_PER_BLOOM 4 + +/* Instance ID type (unique identifier) */ +typedef uint8 wl_nan_instance_id_t; + +/* no. of max last disc results */ +#define WL_NAN_MAX_DISC_RESULTS 3 + +/** Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */ +typedef struct wl_nan_disc_params_s { + /* Periodicity of unsolicited/query transmissions, in DWs */ + uint32 period; + /* Time to live in DWs */ + uint32 ttl; + /* Flag bits */ + uint32 flags; + /* Publish or subscribe service id, i.e. hash of the service name */ + uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; + /* pad to make 4 byte alignment, can be used for something else in the future */ + uint8 pad; + /* Publish or subscribe id */ + wl_nan_instance_id_t instance_id; +} wl_nan_disc_params_t; + +/* recent discovery results */ +typedef struct wl_nan_disc_result_s +{ + wl_nan_instance_id_t instance_id; /* instance id of pub/sub req */ + wl_nan_instance_id_t peer_instance_id; /* peer instance id of pub/sub req/resp */ + uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* service descp string */ + struct ether_addr peer_mac; /* peer mac address */ +} wl_nan_disc_result_t; + +/* list of recent discovery results */ +typedef struct wl_nan_disc_results_s +{ + wl_nan_disc_result_t disc_result[WL_NAN_MAX_DISC_RESULTS]; +} wl_nan_disc_results_list_t; + +/* +* desovery interface event structures * +*/ + +/* NAN Ranging */ + +/* Bit defines for global flags */ +#define WL_NAN_RANGING_ENABLE 1 /* enable RTT */ +#define WL_NAN_RANGING_RANGED 2 /* Report to host if ranged as target */ +typedef struct nan_ranging_config { + uint32 chanspec; /* Ranging chanspec */ + uint16 timeslot; /* NAN RTT start time slot 1-511 */ + uint16 duration; /* NAN RTT duration in ms */ + struct ether_addr allow_mac; /* peer initiated ranging: the allowed peer mac + * address, a unicast (for one peer) or + * a broadcast for all. Setting it to all zeros + * means responding to none,same as not setting + * the flag bit NAN_RANGING_RESPOND + */ + uint16 flags; +} wl_nan_ranging_config_t; + +/* list of peers for self initiated ranging */ +/* Bit defines for per peer flags */ +#define WL_NAN_RANGING_REPORT (1<<0) /* Enable reporting range to target */ +typedef struct nan_ranging_peer { + uint32 chanspec; /* desired chanspec for this peer */ + uint32 abitmap; /* available bitmap */ + struct ether_addr ea; /* peer MAC address */ + uint8 frmcnt; /* frame count */ + uint8 retrycnt; /* retry count */ + uint16 flags; /* per peer flags, report or not */ +} wl_nan_ranging_peer_t; +typedef struct nan_ranging_list { + uint8 count; /* number of MAC addresses */ + uint8 num_peers_done; /* host set to 0, when read, shows number of peers + * completed, success or fail + */ + uint8 num_dws; /* time period to do the ranging, specified in dws */ + uint8 reserve; /* reserved field */ + wl_nan_ranging_peer_t rp[1]; /* variable length array of peers */ +} wl_nan_ranging_list_t; + +/* ranging results, a list for self initiated ranging and one for peer initiated ranging */ +/* There will be one structure for each peer */ +#define WL_NAN_RANGING_STATUS_SUCCESS 1 +#define WL_NAN_RANGING_STATUS_FAIL 2 +#define WL_NAN_RANGING_STATUS_TIMEOUT 3 +#define WL_NAN_RANGING_STATUS_ABORT 4 /* with partial results if sounding count > 0 */ +typedef struct nan_ranging_result { + uint8 status; /* 1: Success, 2: Fail 3: Timeout 4: Aborted */ + uint8 sounding_count; /* number of measurements completed (0 = failure) */ + struct ether_addr ea; /* initiator MAC address */ + uint32 chanspec; /* Chanspec where the ranging was done */ + uint32 timestamp; /* 32bits of the TSF timestamp ranging was completed at */ + uint32 distance; /* mean distance in meters expressed as Q4 number. + * Only valid when sounding_count > 0. Examples: + * 0x08 = 0.5m + * 0x10 = 1m + * 0x18 = 1.5m + * set to 0xffffffff to indicate invalid number + */ + int32 rtt_var; /* standard deviation in 10th of ns of RTTs measured. + * Only valid when sounding_count > 0 + */ + struct ether_addr tgtea; /* target MAC address */ +} wl_nan_ranging_result_t; +typedef struct nan_ranging_event_data { + uint8 mode; /* 1: Result of host initiated ranging */ + /* 2: Result of peer initiated ranging */ + uint8 reserved; + uint8 success_count; /* number of peers completed successfully */ + uint8 count; /* number of peers in the list */ + wl_nan_ranging_result_t rr[1]; /* variable array of ranging peers */ +} wl_nan_ranging_event_data_t; +enum { + WL_NAN_RSSI_DATA = 1, + WL_NAN_STATS_DATA = 2, +/* + * ***** ADD before this line **** + */ + WL_NAN_INVALID +}; + +typedef struct wl_nan_stats { + /* general */ + uint32 cnt_dw; /* DW slots */ + uint32 cnt_disc_bcn_sch; /* disc beacon slots */ + uint32 cnt_amr_exp; /* count of ambtt expiries resetting roles */ + uint32 cnt_bcn_upd; /* count of beacon template updates */ + uint32 cnt_bcn_tx; /* count of sync & disc bcn tx */ + uint32 cnt_bcn_rx; /* count of sync & disc bcn rx */ + uint32 cnt_sync_bcn_tx; /* count of sync bcn tx within DW */ + uint32 cnt_disc_bcn_tx; /* count of disc bcn tx */ + uint32 cnt_sdftx_bcmc; /* count of bcast/mcast sdf tx */ + uint32 cnt_sdftx_uc; /* count of unicast sdf tx */ + uint32 cnt_sdftx_fail; /* count of unicast sdf tx fails */ + uint32 cnt_sdf_rx; /* count of sdf rx */ + /* NAN roles */ + uint32 cnt_am; /* anchor master */ + uint32 cnt_master; /* master */ + uint32 cnt_nms; /* non master sync */ + uint32 cnt_nmns; /* non master non sync */ + /* TX */ + uint32 cnt_err_txtime; /* error in txtime */ + uint32 cnt_err_unsch_tx; /* tx while not in DW/ disc bcn slot */ + uint32 cnt_err_bcn_tx; /* beacon tx error */ + uint32 cnt_sync_bcn_tx_miss; /* no. of times time delta between 2 cosequetive + * sync beacons is more than dw interval + */ + /* SCANS */ + uint32 cnt_mrg_scan; /* count of merge scans completed */ + uint32 cnt_err_ms_rej; /* number of merge scan failed */ + uint32 cnt_scan_results; /* no. of nan beacons scanned */ + uint32 cnt_join_scan_rej; /* no. of join scans rejected */ + uint32 cnt_nan_scan_abort; /* no. of join scans rejected */ + /* enable/disable */ + uint32 cnt_nan_enab; /* no. of times nan feature got enabled */ + uint32 cnt_nan_disab; /* no. of times nan feature got disabled */ +} wl_nan_stats_t; + +#define WL_NAN_MAC_MAX_NAN_PEERS 6 +#define WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER 10 + +typedef struct wl_nan_nbr_rssi { + uint8 rx_chan; /* channel number on which bcn rcvd */ + int rssi_raw; /* received rssi value */ + int rssi_avg; /* normalized rssi value */ +} wl_nan_peer_rssi_t; + +typedef struct wl_nan_peer_rssi_entry { + struct ether_addr mac; /* peer mac address */ + uint8 flags; /* TODO:rssi data order: latest first, oldest first etc */ + uint8 rssi_cnt; /* rssi data sample present */ + wl_nan_peer_rssi_t rssi[WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER]; /* RSSI data frm peer */ +} wl_nan_peer_rssi_entry_t; + +#define WL_NAN_PEER_RSSI 0x1 +#define WL_NAN_PEER_RSSI_LIST 0x2 + +typedef struct wl_nan_nbr_rssi_data { + uint8 flags; /* this is a list or single rssi data */ + uint8 peer_cnt; /* number of peers */ + uint16 pad; /* padding */ + wl_nan_peer_rssi_entry_t peers[1]; /* peers data list */ +} wl_nan_peer_rssi_data_t; + +/* ********************* end of NAN section ******************************** */ +#endif /* WL_NAN */ + + +#define RSSI_THRESHOLD_SIZE 16 +#define MAX_IMP_RESP_SIZE 256 + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias { + int32 version; /* version */ + int32 threshold[RSSI_THRESHOLD_SIZE]; /* threshold */ + int32 peak_offset; /* peak offset */ + int32 bias; /* rssi bias */ + int32 gd_delta; /* GD - GD_ADJ */ + int32 imp_resp[MAX_IMP_RESP_SIZE]; /* (Hi*Hi)+(Hr*Hr) */ +} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias_avg { + int32 avg_threshold[RSSI_THRESHOLD_SIZE]; /* avg threshold */ + int32 avg_peak_offset; /* avg peak offset */ + int32 avg_rssi; /* avg rssi */ + int32 avg_bias; /* avg bias */ +} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_avg_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info { + uint16 type; /* type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */ + uint16 index; /* The current frame index, from 1 to total_frames. */ + uint16 tof_cmd; /* M_TOF_CMD */ + uint16 tof_rsp; /* M_TOF_RSP */ + uint16 tof_avb_rxl; /* M_TOF_AVB_RX_L */ + uint16 tof_avb_rxh; /* M_TOF_AVB_RX_H */ + uint16 tof_avb_txl; /* M_TOF_AVB_TX_L */ + uint16 tof_avb_txh; /* M_TOF_AVB_TX_H */ + uint16 tof_id; /* M_TOF_ID */ + uint8 tof_frame_type; + uint8 tof_frame_bw; + int8 tof_rssi; + int32 tof_cfo; + int32 gd_adj_ns; /* gound delay */ + int32 gd_h_adj_ns; /* group delay + threshold crossing */ +#ifdef RSSI_REFINE + wl_proxd_rssi_bias_t rssi_bias; /* RSSI refinement info */ +#endif + int16 nfft; /* number of samples stored in H */ + +} BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t; + +#define k_tof_collect_H_pad 1 +#define k_tof_collect_H_size (256+16+k_tof_collect_H_pad) +#define k_tof_collect_Hraw_size (2*k_tof_collect_H_size) +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data { + wl_proxd_collect_info_t info; + uint32 H[k_tof_collect_H_size]; /* raw data read from phy used to adjust timestamps */ + +} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_debug_data { + uint8 count; /* number of packets */ + uint8 stage; /* state machone stage */ + uint8 received; /* received or txed */ + uint8 paket_type; /* packet type */ + uint8 category; /* category field */ + uint8 action; /* action field */ + uint8 token; /* token number */ + uint8 follow_token; /* following token number */ + uint16 index; /* index of the packet */ + uint16 tof_cmd; /* M_TOF_CMD */ + uint16 tof_rsp; /* M_TOF_RSP */ + uint16 tof_avb_rxl; /* M_TOF_AVB_RX_L */ + uint16 tof_avb_rxh; /* M_TOF_AVB_RX_H */ + uint16 tof_avb_txl; /* M_TOF_AVB_TX_L */ + uint16 tof_avb_txh; /* M_TOF_AVB_TX_H */ + uint16 tof_id; /* M_TOF_ID */ + uint16 tof_status0; /* M_TOF_STATUS_0 */ + uint16 tof_status2; /* M_TOF_STATUS_2 */ + uint16 tof_chsm0; /* M_TOF_CHNSM_0 */ + uint16 tof_phyctl0; /* M_TOF_PHYCTL0 */ + uint16 tof_phyctl1; /* M_TOF_PHYCTL1 */ + uint16 tof_phyctl2; /* M_TOF_PHYCTL2 */ + uint16 tof_lsig; /* M_TOF_LSIG */ + uint16 tof_vhta0; /* M_TOF_VHTA0 */ + uint16 tof_vhta1; /* M_TOF_VHTA1 */ + uint16 tof_vhta2; /* M_TOF_VHTA2 */ + uint16 tof_vhtb0; /* M_TOF_VHTB0 */ + uint16 tof_vhtb1; /* M_TOF_VHTB1 */ + uint16 tof_apmductl; /* M_TOF_AMPDU_CTL */ + uint16 tof_apmdudlim; /* M_TOF_AMPDU_DLIM */ + uint16 tof_apmdulen; /* M_TOF_AMPDU_LEN */ +} BWL_POST_PACKED_STRUCT wl_proxd_debug_data_t; + +/* version of the wl_wsec_info structure */ +#define WL_WSEC_INFO_VERSION 0x01 + +/* start enum value for BSS properties */ +#define WL_WSEC_INFO_BSS_BASE 0x0100 + +/* size of len and type fields of wl_wsec_info_tlv_t struct */ +#define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data) + +/* Allowed wl_wsec_info properties; not all of them may be supported. */ +typedef enum { + WL_WSEC_INFO_NONE = 0, + WL_WSEC_INFO_MAX_KEYS = 1, + WL_WSEC_INFO_NUM_KEYS = 2, + WL_WSEC_INFO_NUM_HW_KEYS = 3, + WL_WSEC_INFO_MAX_KEY_IDX = 4, + WL_WSEC_INFO_NUM_REPLAY_CNTRS = 5, + WL_WSEC_INFO_SUPPORTED_ALGOS = 6, + WL_WSEC_INFO_MAX_KEY_LEN = 7, + WL_WSEC_INFO_FLAGS = 8, + /* add global/per-wlc properties above */ + WL_WSEC_INFO_BSS_FLAGS = (WL_WSEC_INFO_BSS_BASE + 1), + WL_WSEC_INFO_BSS_WSEC = (WL_WSEC_INFO_BSS_BASE + 2), + WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3), + WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4), + WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5), + /* add per-BSS properties above */ + WL_WSEC_INFO_MAX = 0xffff +} wl_wsec_info_type_t; + +/* tlv used to return wl_wsec_info properties */ +typedef struct { + uint16 type; + uint16 len; /* data length */ + uint8 data[1]; /* data follows */ +} wl_wsec_info_tlv_t; + +/* input/output data type for wsec_info iovar */ +typedef struct wl_wsec_info { + uint8 version; /* structure version */ + uint8 pad[2]; + uint8 num_tlvs; + wl_wsec_info_tlv_t tlvs[1]; /* tlv data follows */ +} wl_wsec_info_t; + +/* + * scan MAC definitions + */ + +/* common iovar struct */ +typedef struct wl_scanmac { + uint16 subcmd_id; /* subcommand id */ + uint16 len; /* total length of data[] */ + uint8 data[1]; /* subcommand data */ +} wl_scanmac_t; + +/* subcommand ids */ +#define WL_SCANMAC_SUBCMD_ENABLE 0 +#define WL_SCANMAC_SUBCMD_BSSCFG 1 /* only GET supported */ +#define WL_SCANMAC_SUBCMD_CONFIG 2 + +/* scanmac enable data struct */ +typedef struct wl_scanmac_enable { + uint8 enable; /* 1 - enable, 0 - disable */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_scanmac_enable_t; + +/* scanmac bsscfg data struct */ +typedef struct wl_scanmac_bsscfg { + uint32 bsscfg; /* bsscfg index */ +} wl_scanmac_bsscfg_t; + +/* scanmac config data struct */ +typedef struct wl_scanmac_config { + struct ether_addr mac; /* 6 bytes of MAC address or MAC prefix (i.e. OUI) */ + struct ether_addr random_mask; /* randomized bits on each scan */ + uint16 scan_bitmap; /* scans to use this MAC address */ + uint8 pad[2]; /* 4-byte struct alignment */ +} wl_scanmac_config_t; + +/* scan bitmap */ +#define WL_SCANMAC_SCAN_UNASSOC (0x01 << 0) /* unassociated scans */ +#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1) /* associated roam scans */ +#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2) /* associated PNO scans */ +#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3) /* associated host scans */ + +/* no default structure packing */ +#include + +enum rssi_reason { + RSSI_REASON_UNKNOW = 0, + RSSI_REASON_LOWRSSI = 1, + RSSI_REASON_NSYC = 2, + RSSI_REASON_TIMEOUT = 3 +}; + +enum tof_reason { + TOF_REASON_OK = 0, + TOF_REASON_REQEND = 1, + TOF_REASON_TIMEOUT = 2, + TOF_REASON_NOACK = 3, + TOF_REASON_INVALIDAVB = 4, + TOF_REASON_INITIAL = 5, + TOF_REASON_ABORT = 6 +}; + +enum rssi_state { + RSSI_STATE_POLL = 0, + RSSI_STATE_TPAIRING = 1, + RSSI_STATE_IPAIRING = 2, + RSSI_STATE_THANDSHAKE = 3, + RSSI_STATE_IHANDSHAKE = 4, + RSSI_STATE_CONFIRMED = 5, + RSSI_STATE_PIPELINE = 6, + RSSI_STATE_NEGMODE = 7, + RSSI_STATE_MONITOR = 8, + RSSI_STATE_LAST = 9 +}; + +enum tof_state { + TOF_STATE_IDLE = 0, + TOF_STATE_IWAITM = 1, + TOF_STATE_TWAITM = 2, + TOF_STATE_ILEGACY = 3, + TOF_STATE_IWAITCL = 4, + TOF_STATE_TWAITCL = 5, + TOF_STATE_ICONFIRM = 6, + TOF_STATE_IREPORT = 7 +}; + +enum tof_mode_type { + TOF_LEGACY_UNKNOWN = 0, + TOF_LEGACY_AP = 1, + TOF_NONLEGACY_AP = 2 +}; + +enum tof_way_type { + TOF_TYPE_ONE_WAY = 0, + TOF_TYPE_TWO_WAY = 1, + TOF_TYPE_REPORT = 2 +}; + +enum tof_rate_type { + TOF_FRAME_RATE_VHT = 0, + TOF_FRAME_RATE_LEGACY = 1 +}; + +#define TOF_ADJ_TYPE_NUM 4 /* number of assisted timestamp adjustment */ +enum tof_adj_mode { + TOF_ADJ_SOFTWARE = 0, + TOF_ADJ_HARDWARE = 1, + TOF_ADJ_SEQ = 2, + TOF_ADJ_NONE = 3 +}; + +#define FRAME_TYPE_NUM 4 /* number of frame type */ +enum frame_type { + FRAME_TYPE_CCK = 0, + FRAME_TYPE_OFDM = 1, + FRAME_TYPE_11N = 2, + FRAME_TYPE_11AC = 3 +}; + +typedef struct wl_proxd_status_iovar { + uint16 method; /* method */ + uint8 mode; /* mode */ + uint8 peermode; /* peer mode */ + uint8 state; /* state */ + uint8 reason; /* reason code */ + uint32 distance; /* distance */ + uint32 txcnt; /* tx pkt counter */ + uint32 rxcnt; /* rx pkt counter */ + struct ether_addr peer; /* peer mac address */ + int8 avg_rssi; /* average rssi */ + int8 hi_rssi; /* highest rssi */ + int8 low_rssi; /* lowest rssi */ + uint32 dbgstatus; /* debug status */ + uint16 frame_type_cnt[FRAME_TYPE_NUM]; /* frame types */ + uint8 adj_type_cnt[TOF_ADJ_TYPE_NUM]; /* adj types HW/SW */ +} wl_proxd_status_iovar_t; + +#ifdef NET_DETECT +typedef struct net_detect_adapter_features { + bool wowl_enabled; + bool net_detect_enabled; + bool nlo_enabled; +} net_detect_adapter_features_t; + +typedef enum net_detect_bss_type { + nd_bss_any = 0, + nd_ibss, + nd_ess +} net_detect_bss_type_t; + +typedef struct net_detect_profile { + wlc_ssid_t ssid; + net_detect_bss_type_t bss_type; /* Ignore for now since Phase 1 is only for ESS */ + uint32 cipher_type; /* DOT11_CIPHER_ALGORITHM enumeration values */ + uint32 auth_type; /* DOT11_AUTH_ALGORITHM enumeration values */ +} net_detect_profile_t; + +typedef struct net_detect_profile_list { + uint32 num_nd_profiles; + net_detect_profile_t nd_profile[0]; +} net_detect_profile_list_t; + +typedef struct net_detect_config { + bool nd_enabled; + uint32 scan_interval; + uint32 wait_period; + bool wake_if_connected; + bool wake_if_disconnected; + net_detect_profile_list_t nd_profile_list; +} net_detect_config_t; + +typedef enum net_detect_wake_reason { + nd_reason_unknown, + nd_net_detected, + nd_wowl_event, + nd_ucode_error +} net_detect_wake_reason_t; + +typedef struct net_detect_wake_data { + net_detect_wake_reason_t nd_wake_reason; + uint32 nd_wake_date_length; + uint8 nd_wake_data[0]; /* Wake data (currently unused) */ +} net_detect_wake_data_t; + +#endif /* NET_DETECT */ + +/* (unversioned, deprecated) */ +typedef struct bcnreq { + uint8 bcn_mode; + int dur; + int channel; + struct ether_addr da; + uint16 random_int; + wlc_ssid_t ssid; + uint16 reps; +} bcnreq_t; + +#define WL_RRM_BCN_REQ_VER 1 +typedef struct bcn_req { + uint8 version; + uint8 bcn_mode; + uint8 pad_1[2]; + int32 dur; + int32 channel; + struct ether_addr da; + uint16 random_int; + wlc_ssid_t ssid; + uint16 reps; + uint8 req_elements; + uint8 pad_2; + chanspec_list_t chspec_list; +} bcn_req_t; + +typedef struct rrmreq { + struct ether_addr da; + uint8 reg; + uint8 chan; + uint16 random_int; + uint16 dur; + uint16 reps; +} rrmreq_t; + +typedef struct framereq { + struct ether_addr da; + uint8 reg; + uint8 chan; + uint16 random_int; + uint16 dur; + struct ether_addr ta; + uint16 reps; +} framereq_t; + +typedef struct statreq { + struct ether_addr da; + struct ether_addr peer; + uint16 random_int; + uint16 dur; + uint8 group_id; + uint16 reps; +} statreq_t; + +#define WL_RRM_RPT_VER 0 +#define WL_RRM_RPT_MAX_PAYLOAD 256 +#define WL_RRM_RPT_MIN_PAYLOAD 7 +#define WL_RRM_RPT_FALG_ERR 0 +#define WL_RRM_RPT_FALG_GRP_ID_PROPR (1 << 0) +#define WL_RRM_RPT_FALG_GRP_ID_0 (1 << 1) +typedef struct { + uint16 ver; /* version */ + struct ether_addr addr; /* STA MAC addr */ + uint32 timestamp; /* timestamp of the report */ + uint16 flag; /* flag */ + uint16 len; /* length of payload data */ + unsigned char data[WL_RRM_RPT_MAX_PAYLOAD]; +} statrpt_t; + +typedef struct wlc_l2keepalive_ol_params { + uint8 flags; + uint8 prio; + uint16 period_ms; +} wlc_l2keepalive_ol_params_t; + +typedef struct wlc_dwds_config { + uint32 enable; + uint32 mode; /* STA/AP interface */ + struct ether_addr ea; +} wlc_dwds_config_t; + +typedef struct wl_el_set_params_s { + uint8 set; /* Set number */ + uint32 size; /* Size to make/expand */ +} wl_el_set_params_t; + +typedef struct wl_el_tag_params_s { + uint16 tag; + uint8 set; + uint8 flags; +} wl_el_tag_params_t; + +/* Video Traffic Interference Monitor config */ +#define INTFER_VERSION 1 +typedef struct wl_intfer_params { + uint16 version; /* version */ + uint8 period; /* sample period */ + uint8 cnt; /* sample cnt */ + uint8 txfail_thresh; /* non-TCP txfail threshold */ + uint8 tcptxfail_thresh; /* tcptxfail threshold */ +} wl_intfer_params_t; + +typedef struct wl_staprio_cfg { + struct ether_addr ea; /* mac addr */ + uint8 prio; /* scb priority */ +} wl_staprio_cfg_t; + +typedef enum wl_stamon_cfg_cmd_type { + STAMON_CFG_CMD_DEL = 0, + STAMON_CFG_CMD_ADD = 1 +} wl_stamon_cfg_cmd_type_t; + +typedef struct wlc_stamon_sta_config { + wl_stamon_cfg_cmd_type_t cmd; /* 0 - delete, 1 - add */ + struct ether_addr ea; +} wlc_stamon_sta_config_t; + +#ifdef SR_DEBUG +typedef struct /* pmu_reg */{ + uint32 pmu_control; + uint32 pmu_capabilities; + uint32 pmu_status; + uint32 res_state; + uint32 res_pending; + uint32 pmu_timer1; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 pmu_chipcontrol1[4]; + uint32 pmu_regcontrol[5]; + uint32 pmu_pllcontrol[5]; + uint32 pmu_rsrc_up_down_timer[31]; + uint32 rsrc_dep_mask[31]; +} pmu_reg_t; +#endif /* pmu_reg */ + +typedef struct wl_taf_define { + struct ether_addr ea; /* STA MAC or 0xFF... */ + uint16 version; /* version */ + uint32 sch; /* method index */ + uint32 prio; /* priority */ + uint32 misc; /* used for return value */ + char text[1]; /* used to pass and return ascii text */ +} wl_taf_define_t; + +/* Received Beacons lengths information */ +#define WL_LAST_BCNS_INFO_FIXED_LEN OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring) +typedef struct wlc_bcn_len_hist { + uint16 ver; /* version field */ + uint16 cur_index; /* current pointed index in ring buffer */ + uint32 max_bcnlen; /* Max beacon length received */ + uint32 min_bcnlen; /* Min beacon length received */ + uint32 ringbuff_len; /* Length of the ring buffer 'bcnlen_ring' */ + uint32 bcnlen_ring[1]; /* ring buffer storing received beacon lengths */ +} wlc_bcn_len_hist_t; + +/* WDS net interface types */ +#define WL_WDSIFTYPE_NONE 0x0 /* The interface type is neither WDS nor DWDS. */ +#define WL_WDSIFTYPE_WDS 0x1 /* The interface is WDS type. */ +#define WL_WDSIFTYPE_DWDS 0x2 /* The interface is DWDS type. */ + +typedef struct wl_bssload_static { + bool is_static; + uint16 sta_count; + uint8 chan_util; + uint16 aac; +} wl_bssload_static_t; + + +/* IO Var Operations - the Value of iov_op In wlc_ap_doiovar */ +typedef enum wlc_ap_iov_operation { + WLC_AP_IOV_OP_DELETE = -1, + WLC_AP_IOV_OP_DISABLE = 0, + WLC_AP_IOV_OP_ENABLE = 1, + WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 2, + WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 3, + WLC_AP_IOV_OP_MOVE = 4 +} wlc_ap_iov_oper_t; + +/* LTE coex info */ +/* Analogue of HCI Set MWS Signaling cmd */ +typedef struct { + uint16 mws_rx_assert_offset; + uint16 mws_rx_assert_jitter; + uint16 mws_rx_deassert_offset; + uint16 mws_rx_deassert_jitter; + uint16 mws_tx_assert_offset; + uint16 mws_tx_assert_jitter; + uint16 mws_tx_deassert_offset; + uint16 mws_tx_deassert_jitter; + uint16 mws_pattern_assert_offset; + uint16 mws_pattern_assert_jitter; + uint16 mws_inact_dur_assert_offset; + uint16 mws_inact_dur_assert_jitter; + uint16 mws_scan_freq_assert_offset; + uint16 mws_scan_freq_assert_jitter; + uint16 mws_prio_assert_offset_req; +} wci2_config_t; + +/* Analogue of HCI MWS Channel Params */ +typedef struct { + uint16 mws_rx_center_freq; /* MHz */ + uint16 mws_tx_center_freq; + uint16 mws_rx_channel_bw; /* KHz */ + uint16 mws_tx_channel_bw; + uint8 mws_channel_en; + uint8 mws_channel_type; /* Don't care for WLAN? */ +} mws_params_t; + +/* MWS wci2 message */ +typedef struct { + uint8 mws_wci2_data; /* BT-SIG msg */ + uint16 mws_wci2_interval; /* Interval in us */ + uint16 mws_wci2_repeat; /* No of msgs to send */ +} mws_wci2_msg_t; + +typedef struct { + uint32 config; /* MODE: AUTO (-1), Disable (0), Enable (1) */ + uint32 status; /* Current state: Disabled (0), Enabled (1) */ +} wl_config_t; + +#define WLC_RSDB_MODE_AUTO_MASK 0x80 +#define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK)))) + +#define WL_IF_STATS_T_VERSION 1 /* current version of wl_if_stats structure */ + +/* per interface counters */ +typedef struct wl_if_stats { + uint16 version; /* version of the structure */ + uint16 length; /* length of the entire structure */ + uint32 PAD; /* padding */ + + /* transmit stat counters */ + uint64 txframe; /* tx data frames */ + uint64 txbyte; /* tx data bytes */ + uint64 txerror; /* tx data errors (derived: sum of others) */ + uint64 txnobuf; /* tx out of buffer errors */ + uint64 txrunt; /* tx runt frames */ + uint64 txfail; /* tx failed frames */ + uint64 txretry; /* tx retry frames */ + uint64 txretrie; /* tx multiple retry frames */ + uint64 txfrmsnt; /* tx sent frames */ + uint64 txmulti; /* tx mulitcast sent frames */ + uint64 txfrag; /* tx fragments sent */ + + /* receive stat counters */ + uint64 rxframe; /* rx data frames */ + uint64 rxbyte; /* rx data bytes */ + uint64 rxerror; /* rx data errors (derived: sum of others) */ + uint64 rxnobuf; /* rx out of buffer errors */ + uint64 rxrunt; /* rx runt frames */ + uint64 rxfragerr; /* rx fragment errors */ + uint64 rxmulti; /* rx multicast frames */ +} +wl_if_stats_t; + +typedef struct wl_band { + uint16 bandtype; /* WL_BAND_2G, WL_BAND_5G */ + uint16 bandunit; /* bandstate[] index */ + uint16 phytype; /* phytype */ + uint16 phyrev; +} +wl_band_t; + +#define WL_WLC_VERSION_T_VERSION 1 /* current version of wlc_version structure */ + +/* wlc interface version */ +typedef struct wl_wlc_version { + uint16 version; /* version of the structure */ + uint16 length; /* length of the entire structure */ + + /* epi version numbers */ + uint16 epi_ver_major; /* epi major version number */ + uint16 epi_ver_minor; /* epi minor version number */ + uint16 epi_rc_num; /* epi RC number */ + uint16 epi_incr_num; /* epi increment number */ + + /* wlc interface version numbers */ + uint16 wlc_ver_major; /* wlc interface major version number */ + uint16 wlc_ver_minor; /* wlc interface minor version number */ +} +wl_wlc_version_t; + +/* Version of WLC interface to be returned as a part of wl_wlc_version structure. + * For the discussion related to versions update policy refer to + * http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/WlShimAbstractionLayer + * For now the policy is to increment WLC_VERSION_MAJOR each time + * there is a change that involves both WLC layer and per-port layer. + * WLC_VERSION_MINOR is currently not in use. + */ +#define WLC_VERSION_MAJOR 3 +#define WLC_VERSION_MINOR 0 + +/* begin proxd definitions */ +#include + +#define WL_PROXD_API_VERSION 0x0300 /* version 3.0 */ + +/* Minimum supported API version */ +#define WL_PROXD_API_MIN_VERSION 0x0300 + +/* proximity detection methods */ +enum { + WL_PROXD_METHOD_NONE = 0, + WL_PROXD_METHOD_RSVD1 = 1, /* backward compatibility - RSSI, not supported */ + WL_PROXD_METHOD_TOF = 2, + WL_PROXD_METHOD_RSVD2 = 3, /* 11v only - if needed */ + WL_PROXD_METHOD_FTM = 4, /* IEEE rev mc/2014 */ + WL_PROXD_METHOD_MAX +}; +typedef int16 wl_proxd_method_t; + +/* global and method configuration flags */ +enum { + WL_PROXD_FLAG_NONE = 0x00000000, + WL_PROXD_FLAG_RX_ENABLED = 0x00000001, /* respond to requests */ + WL_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /* 11mc range requests enabled */ + WL_PROXD_FLAG_TX_LCI = 0x00000004, /* transmit location, if available */ + WL_PROXD_FLAG_TX_CIVIC = 0x00000008, /* tx civic loc, if available */ + WL_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /* respond to requests w/o host action */ + WL_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /* continue requests w/o host action */ + WL_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /* publish availability */ + WL_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /* schedule using availability */ + WL_PROXD_FLAG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_flags_t; + +#define WL_PROXD_FLAGS_AVAIL (WL_PROXD_FLAG_AVAIL_PUBLISH | \ + WL_PROXD_FLAG_AVAIL_SCHEDULE) + +/* session flags */ +enum { + WL_PROXD_SESSION_FLAG_NONE = 0x00000000, /* no flags */ + WL_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /* local device is initiator */ + WL_PROXD_SESSION_FLAG_TARGET = 0x00000002, /* local device is target */ + WL_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /* (initiated) 1-way rtt */ + WL_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /* created w/ rx_auto_burst */ + WL_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /* good until cancelled */ + WL_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /* rtt detail in results */ + WL_PROXD_SESSION_FLAG_TOF_COMPAT = 0x00000040, /* TOF compatibility - TBD */ + WL_PROXD_SESSION_FLAG_AOA = 0x00000080, /* AOA along w/ RTT */ + WL_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /* Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /* Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /* Use NAN BSS, if applicable */ + WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /* e.g. FTM1 - cap or rx */ + WL_PROXD_SESSION_FLAG_REPORT_FAILURE= 0x00002000, /* report failure to target */ + WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /* report distance to target */ + WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, /* No channel switching */ + WL_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /* netrual mode */ + WL_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /* Toast */ + WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /* no param override from target */ + WL_PROXD_SESSION_FLAG_ASAP = 0x00080000, /* ASAP session */ + WL_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /* transmit LCI req */ + WL_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /* transmit civic loc req */ + WL_PROXD_SESSION_FLAG_COLLECT = 0x80000000, /* debug - collect */ + WL_PROXD_SESSION_FLAG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_session_flags_t; + +/* time units - mc supports up to 0.1ns resolution */ +enum { + WL_PROXD_TMU_TU = 0, /* 1024us */ + WL_PROXD_TMU_SEC = 1, + WL_PROXD_TMU_MILLI_SEC = 2, + WL_PROXD_TMU_MICRO_SEC = 3, + WL_PROXD_TMU_NANO_SEC = 4, + WL_PROXD_TMU_PICO_SEC = 5 +}; +typedef int16 wl_proxd_tmu_t; + +/* time interval e.g. 10ns */ +typedef struct wl_proxd_intvl { + uint32 intvl; + wl_proxd_tmu_t tmu; + uint8 pad[2]; +} wl_proxd_intvl_t; + +/* commands that can apply to proxd, method or a session */ +enum { + WL_PROXD_CMD_NONE = 0, + WL_PROXD_CMD_GET_VERSION = 1, + WL_PROXD_CMD_ENABLE = 2, + WL_PROXD_CMD_DISABLE = 3, + WL_PROXD_CMD_CONFIG = 4, + WL_PROXD_CMD_START_SESSION = 5, + WL_PROXD_CMD_BURST_REQUEST = 6, + WL_PROXD_CMD_STOP_SESSION = 7, + WL_PROXD_CMD_DELETE_SESSION = 8, + WL_PROXD_CMD_GET_RESULT = 9, + WL_PROXD_CMD_GET_INFO = 10, + WL_PROXD_CMD_GET_STATUS = 11, + WL_PROXD_CMD_GET_SESSIONS = 12, + WL_PROXD_CMD_GET_COUNTERS = 13, + WL_PROXD_CMD_CLEAR_COUNTERS = 14, + WL_PROXD_CMD_COLLECT = 15, + WL_PROXD_CMD_TUNE = 16, + WL_PROXD_CMD_DUMP = 17, + WL_PROXD_CMD_START_RANGING = 18, + WL_PROXD_CMD_STOP_RANGING = 19, + WL_PROXD_CMD_GET_RANGING_INFO = 20, + WL_PROXD_CMD_IS_TLV_SUPPORTED = 21, + + WL_PROXD_CMD_MAX +}; +typedef int16 wl_proxd_cmd_t; + +/* session ids: + * id 0 is reserved + * ids 1..0x7fff - allocated by host/app + * 0x8000-0xffff - allocated by firmware, used for auto/rx + */ +enum { + WL_PROXD_SESSION_ID_GLOBAL = 0 +}; + +#define WL_PROXD_SID_HOST_MAX 0x7fff +#define WL_PROXD_SID_HOST_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_HOST_MAX) + +/* maximum number sessions that can be allocated, may be less if tunable */ +#define WL_PROXD_MAX_SESSIONS 16 + +typedef uint16 wl_proxd_session_id_t; + +/* status - TBD BCME_ vs proxd status - range reserved for BCME_ */ +enum { + WL_PROXD_E_POLICY = -1045, + WL_PROXD_E_INCOMPLETE = -1044, + WL_PROXD_E_OVERRIDDEN = -1043, + WL_PROXD_E_ASAP_FAILED = -1042, + WL_PROXD_E_NOTSTARTED = -1041, + WL_PROXD_E_INVALIDAVB = -1040, + WL_PROXD_E_INCAPABLE = -1039, + WL_PROXD_E_MISMATCH = -1038, + WL_PROXD_E_DUP_SESSION = -1037, + WL_PROXD_E_REMOTE_FAIL = -1036, + WL_PROXD_E_REMOTE_INCAPABLE = -1035, + WL_PROXD_E_SCHED_FAIL = -1034, + WL_PROXD_E_PROTO = -1033, + WL_PROXD_E_EXPIRED = -1032, + WL_PROXD_E_TIMEOUT = -1031, + WL_PROXD_E_NOACK = -1030, + WL_PROXD_E_DEFERRED = -1029, + WL_PROXD_E_INVALID_SID = -1028, + WL_PROXD_E_REMOTE_CANCEL = -1027, + WL_PROXD_E_CANCELED = -1026, /* local */ + WL_PROXD_E_INVALID_SESSION = -1025, + WL_PROXD_E_BAD_STATE = -1024, + WL_PROXD_E_ERROR = -1, + WL_PROXD_E_OK = 0 +}; +typedef int32 wl_proxd_status_t; + +/* session states */ +enum { + WL_PROXD_SESSION_STATE_NONE = 0, + WL_PROXD_SESSION_STATE_CREATED = 1, + WL_PROXD_SESSION_STATE_CONFIGURED = 2, + WL_PROXD_SESSION_STATE_STARTED = 3, + WL_PROXD_SESSION_STATE_DELAY = 4, + WL_PROXD_SESSION_STATE_USER_WAIT = 5, + WL_PROXD_SESSION_STATE_SCHED_WAIT = 6, + WL_PROXD_SESSION_STATE_BURST = 7, + WL_PROXD_SESSION_STATE_STOPPING = 8, + WL_PROXD_SESSION_STATE_ENDED = 9, + WL_PROXD_SESSION_STATE_DESTROYING = -1 +}; +typedef int16 wl_proxd_session_state_t; + +/* RTT sample flags */ +enum { + WL_PROXD_RTT_SAMPLE_NONE = 0x00, + WL_PROXD_RTT_SAMPLE_DISCARD = 0x01 +}; +typedef uint8 wl_proxd_rtt_sample_flags_t; + +typedef struct wl_proxd_rtt_sample { + uint8 id; /* id for the sample - non-zero */ + wl_proxd_rtt_sample_flags_t flags; + int16 rssi; + wl_proxd_intvl_t rtt; /* round trip time */ + uint32 ratespec; +} wl_proxd_rtt_sample_t; + +/* result flags */ +enum { + WL_PRXOD_RESULT_FLAG_NONE = 0x0000, + WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /* LOS - if available */ + WL_PROXD_RESULT_FLAG_LOS = 0x0002, /* NLOS - if available */ + WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /* Fatal error during burst */ + WL_PROXD_RESULT_FLAG_ALL = 0xffff +}; +typedef int16 wl_proxd_result_flags_t; + +/* rtt measurement result */ +typedef struct wl_proxd_rtt_result { + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; /* current state */ + union { + wl_proxd_intvl_t retry_after; /* hint for errors */ + wl_proxd_intvl_t burst_duration; /* burst duration */ + } u; + wl_proxd_rtt_sample_t avg_rtt; + uint32 avg_dist; /* 1/256m units */ + uint16 sd_rtt; /* RTT standard deviation */ + uint8 num_valid_rtt; /* valid rtt cnt */ + uint8 num_ftm; /* actual num of ftm cnt */ + uint16 burst_num; /* in a session */ + uint16 num_rtt; /* 0 if no detail */ + wl_proxd_rtt_sample_t rtt[1]; /* variable */ +} wl_proxd_rtt_result_t; + +/* aoa measurement result */ +typedef struct wl_proxd_aoa_result { + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; + uint16 burst_num; + uint8 pad[2]; + /* wl_proxd_aoa_sample_t sample_avg; TBD */ +} BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t; + +/* global stats */ +typedef struct wl_proxd_counters { + uint32 tx; /* tx frame count */ + uint32 rx; /* rx frame count */ + uint32 burst; /* total number of burst */ + uint32 sessions; /* total number of sessions */ + uint32 max_sessions; /* max concurrency */ + uint32 sched_fail; /* scheduling failures */ + uint32 timeouts; /* timeouts */ + uint32 protoerr; /* protocol errors */ + uint32 noack; /* tx w/o ack */ + uint32 txfail; /* any tx falure */ + uint32 lci_req_tx; /* tx LCI requests */ + uint32 lci_req_rx; /* rx LCI requests */ + uint32 lci_rep_tx; /* tx LCI reports */ + uint32 lci_rep_rx; /* rx LCI reports */ + uint32 civic_req_tx; /* tx civic requests */ + uint32 civic_req_rx; /* rx civic requests */ + uint32 civic_rep_tx; /* tx civic reports */ + uint32 civic_rep_rx; /* rx civic reports */ + uint32 rctx; /* ranging contexts created */ + uint32 rctx_done; /* count of ranging done */ + uint32 publish_err; /* availability publishing errors */ + uint32 on_chan; /* count of scheduler onchan */ + uint32 off_chan; /* count of scheduler offchan */ +} wl_proxd_counters_t; + +typedef struct wl_proxd_counters wl_proxd_session_counters_t; + +enum { + WL_PROXD_CAP_NONE = 0x0000, + WL_PROXD_CAP_ALL = 0xffff +}; +typedef int16 wl_proxd_caps_t; + +/* method capabilities */ +enum { + WL_PROXD_FTM_CAP_NONE = 0x0000, + WL_PROXD_FTM_CAP_FTM1 = 0x0001 +}; +typedef uint16 wl_proxd_ftm_caps_t; + +typedef struct BWL_PRE_PACKED_STRUCT wl_proxd_tlv_id_list { + uint16 num_ids; + uint16 ids[1]; +} BWL_POST_PACKED_STRUCT wl_proxd_tlv_id_list_t; + +typedef struct wl_proxd_session_id_list { + uint16 num_ids; + wl_proxd_session_id_t ids[1]; +} wl_proxd_session_id_list_t; + +/* tlvs returned for get_info on ftm method + * configuration: + * proxd flags + * event mask + * debug mask + * session defaults (session tlvs) + * status tlv - not supported for ftm method + * info tlv + */ +typedef struct wl_proxd_ftm_info { + wl_proxd_ftm_caps_t caps; + uint16 max_sessions; + uint16 num_sessions; + uint16 rx_max_burst; +} wl_proxd_ftm_info_t; + +/* tlvs returned for get_info on session + * session config (tlvs) + * session info tlv + */ +typedef struct wl_proxd_ftm_session_info { + uint16 sid; + uint8 bss_index; + uint8 pad; + struct ether_addr bssid; + wl_proxd_session_state_t state; + wl_proxd_status_t status; + uint16 burst_num; +} wl_proxd_ftm_session_info_t; + +typedef struct wl_proxd_ftm_session_status { + uint16 sid; + wl_proxd_session_state_t state; + wl_proxd_status_t status; + uint16 burst_num; +} wl_proxd_ftm_session_status_t; + +/* rrm range request */ +typedef struct wl_proxd_range_req { + uint16 num_repeat; + uint16 init_delay_range; /* in TUs */ + uint8 pad; + uint8 num_nbr; /* number of (possible) neighbors */ + nbr_element_t nbr[1]; +} wl_proxd_range_req_t; + +#define WL_PROXD_LCI_LAT_OFF 0 +#define WL_PROXD_LCI_LONG_OFF 5 +#define WL_PROXD_LCI_ALT_OFF 10 + +#define WL_PROXD_LCI_GET_LAT(_lci, _lat, _lat_err) { \ + unsigned _off = WL_PROXD_LCI_LAT_OFF; \ + _lat_err = (_lci)->data[(_off)] & 0x3f; \ + _lat = (_lci)->data[(_off)+1]; \ + _lat |= (_lci)->data[(_off)+2] << 8; \ + _lat |= (_lci)->data[_(_off)+3] << 16; \ + _lat |= (_lci)->data[(_off)+4] << 24; \ + _lat <<= 2; \ + _lat |= (_lci)->data[(_off)] >> 6; \ +} + +#define WL_PROXD_LCI_GET_LONG(_lci, _lcilong, _long_err) { \ + unsigned _off = WL_PROXD_LCI_LONG_OFF; \ + _long_err = (_lci)->data[(_off)] & 0x3f; \ + _lcilong = (_lci)->data[(_off)+1]; \ + _lcilong |= (_lci)->data[(_off)+2] << 8; \ + _lcilong |= (_lci)->data[_(_off)+3] << 16; \ + _lcilong |= (_lci)->data[(_off)+4] << 24; \ + __lcilong <<= 2; \ + _lcilong |= (_lci)->data[(_off)] >> 6; \ +} + +#define WL_PROXD_LCI_GET_ALT(_lci, _alt_type, _alt, _alt_err) { \ + unsigned _off = WL_PROXD_LCI_ALT_OFF; \ + _alt_type = (_lci)->data[_off] & 0x0f; \ + _alt_err = (_lci)->data[(_off)] >> 4; \ + _alt_err |= ((_lci)->data[(_off)+1] & 0x03) << 4; \ + _alt = (_lci)->data[(_off)+2]; \ + _alt |= (_lci)->data[(_off)+3] << 8; \ + _alt |= (_lci)->data[_(_off)+4] << 16; \ + _alt <<= 6; \ + _alt |= (_lci)->data[(_off) + 1] >> 2; \ +} + +#define WL_PROXD_LCI_VERSION(_lci) ((_lci)->data[15] >> 6) + +/* availability. advertising mechanism bss specific */ +/* availablity flags */ +enum { + WL_PROXD_AVAIL_NONE = 0, + WL_PROXD_AVAIL_NAN_PUBLISHED = 0x0001, + WL_PROXD_AVAIL_SCHEDULED = 0x0002 /* scheduled by proxd */ +}; +typedef int16 wl_proxd_avail_flags_t; + +/* time reference */ +enum { + WL_PROXD_TREF_NONE = 0, + WL_PROXD_TREF_DEV_TSF = 1, + WL_PROXD_TREF_NAN_DW = 2, + WL_PROXD_TREF_TBTT = 3, + WL_PROXD_TREF_MAX /* last entry */ +}; +typedef int16 wl_proxd_time_ref_t; + +/* proxd channel-time slot */ +typedef struct { + wl_proxd_intvl_t start; /* from ref */ + wl_proxd_intvl_t duration; /* from start */ + uint32 chanspec; +} wl_proxd_time_slot_t; + +typedef struct wl_proxd_avail24 { + wl_proxd_avail_flags_t flags; /* for query only */ + wl_proxd_time_ref_t time_ref; + uint16 max_slots; /* for query only */ + uint16 num_slots; + wl_proxd_time_slot_t slots[1]; /* ROM compat - not used */ + wl_proxd_intvl_t repeat; + wl_proxd_time_slot_t ts0[1]; +} wl_proxd_avail24_t; +#define WL_PROXD_AVAIL24_TIMESLOT(_avail24, _i) (&(_avail24)->ts0[(_i)]) +#define WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) OFFSETOF(wl_proxd_avail24_t, ts0) +#define WL_PROXD_AVAIL24_TIMESLOTS(_avail24) WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0) +#define WL_PROXD_AVAIL24_SIZE(_avail24, _num_slots) (\ + WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) + \ + (_num_slots) * sizeof(*WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0))) + +typedef struct wl_proxd_avail { + wl_proxd_avail_flags_t flags; /* for query only */ + wl_proxd_time_ref_t time_ref; + uint16 max_slots; /* for query only */ + uint16 num_slots; + wl_proxd_intvl_t repeat; + wl_proxd_time_slot_t slots[1]; +} wl_proxd_avail_t; +#define WL_PROXD_AVAIL_TIMESLOT(_avail, _i) (&(_avail)->slots[(_i)]) +#define WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) OFFSETOF(wl_proxd_avail_t, slots) + +#define WL_PROXD_AVAIL_TIMESLOTS(_avail) WL_PROXD_AVAIL_TIMESLOT(_avail, 0) +#define WL_PROXD_AVAIL_SIZE(_avail, _num_slots) (\ + WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) + \ + (_num_slots) * sizeof(*WL_PROXD_AVAIL_TIMESLOT(_avail, 0))) + +/* collect support TBD */ + +/* debugging */ +enum { + WL_PROXD_DEBUG_NONE = 0x00000000, + WL_PROXD_DEBUG_LOG = 0x00000001, + WL_PROXD_DEBUG_IOV = 0x00000002, + WL_PROXD_DEBUG_EVENT = 0x00000004, + WL_PROXD_DEBUG_SESSION = 0x00000008, + WL_PROXD_DEBUG_PROTO = 0x00000010, + WL_PROXD_DEBUG_SCHED = 0x00000020, + WL_PROXD_DEBUG_RANGING = 0x00000040, + WL_PROXD_DEBUG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_debug_mask_t; + +/* tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ +enum { + WL_PROXD_TLV_ID_NONE = 0, + WL_PROXD_TLV_ID_METHOD = 1, + WL_PROXD_TLV_ID_FLAGS = 2, + WL_PROXD_TLV_ID_CHANSPEC = 3, /* note: uint32 */ + WL_PROXD_TLV_ID_TX_POWER = 4, + WL_PROXD_TLV_ID_RATESPEC = 5, + WL_PROXD_TLV_ID_BURST_DURATION = 6, /* intvl - length of burst */ + WL_PROXD_TLV_ID_BURST_PERIOD = 7, /* intvl - between bursts */ + WL_PROXD_TLV_ID_BURST_FTM_SEP = 8, /* intvl - between FTMs */ + WL_PROXD_TLV_ID_BURST_NUM_FTM = 9, /* uint16 - per burst */ + WL_PROXD_TLV_ID_NUM_BURST = 10, /* uint16 */ + WL_PROXD_TLV_ID_FTM_RETRIES = 11, /* uint16 at FTM level */ + WL_PROXD_TLV_ID_BSS_INDEX = 12, /* uint8 */ + WL_PROXD_TLV_ID_BSSID = 13, + WL_PROXD_TLV_ID_INIT_DELAY = 14, /* intvl - optional, non-standalone only */ + WL_PROXD_TLV_ID_BURST_TIMEOUT = 15, /* expect response within - intvl */ + WL_PROXD_TLV_ID_EVENT_MASK = 16, /* interested events - in/out */ + WL_PROXD_TLV_ID_FLAGS_MASK = 17, /* interested flags - in only */ + WL_PROXD_TLV_ID_PEER_MAC = 18, /* mac address of peer */ + WL_PROXD_TLV_ID_FTM_REQ = 19, /* dot11_ftm_req */ + WL_PROXD_TLV_ID_LCI_REQ = 20, + WL_PROXD_TLV_ID_LCI = 21, + WL_PROXD_TLV_ID_CIVIC_REQ = 22, + WL_PROXD_TLV_ID_CIVIC = 23, + WL_PROXD_TLV_ID_AVAIL24 = 24, /* ROM compatibility */ + WL_PROXD_TLV_ID_SESSION_FLAGS = 25, + WL_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /* in only */ + WL_PROXD_TLV_ID_RX_MAX_BURST = 27, /* uint16 - limit bursts per session */ + WL_PROXD_TLV_ID_RANGING_INFO = 28, /* ranging info */ + WL_PROXD_TLV_ID_RANGING_FLAGS = 29, /* uint16 */ + WL_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /* uint16, in only */ + WL_PROXD_TLV_ID_NAN_MAP_ID = 31, + WL_PROXD_TLV_ID_DEV_ADDR = 32, + WL_PROXD_TLV_ID_AVAIL = 33, /* wl_proxd_avail_t */ + WL_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */ + WL_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */ + + /* output - 512 + x */ + WL_PROXD_TLV_ID_STATUS = 512, + WL_PROXD_TLV_ID_COUNTERS = 513, + WL_PROXD_TLV_ID_INFO = 514, + WL_PROXD_TLV_ID_RTT_RESULT = 515, + WL_PROXD_TLV_ID_AOA_RESULT = 516, + WL_PROXD_TLV_ID_SESSION_INFO = 517, + WL_PROXD_TLV_ID_SESSION_STATUS = 518, + WL_PROXD_TLV_ID_SESSION_ID_LIST = 519, + + /* debug tlvs can be added starting 1024 */ + WL_PROXD_TLV_ID_DEBUG_MASK = 1024, + WL_PROXD_TLV_ID_COLLECT = 1025, /* output only */ + WL_PROXD_TLV_ID_STRBUF = 1026, + + WL_PROXD_TLV_ID_MAX +}; + +typedef struct wl_proxd_tlv { + uint16 id; + uint16 len; + uint8 data[1]; +} wl_proxd_tlv_t; + +/* proxd iovar - applies to proxd, method or session */ +typedef struct wl_proxd_iov { + uint16 version; + uint16 len; + wl_proxd_cmd_t cmd; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 pad[2]; + wl_proxd_tlv_t tlvs[1]; /* variable */ +} wl_proxd_iov_t; + +#define WL_PROXD_IOV_HDR_SIZE OFFSETOF(wl_proxd_iov_t, tlvs) + +/* The following event definitions may move to bcmevent.h, but sharing proxd types + * across needs more invasive changes unrelated to proxd + */ +enum { + WL_PROXD_EVENT_NONE = 0, /* not an event, reserved */ + WL_PROXD_EVENT_SESSION_CREATE = 1, + WL_PROXD_EVENT_SESSION_START = 2, + WL_PROXD_EVENT_FTM_REQ = 3, + WL_PROXD_EVENT_BURST_START = 4, + WL_PROXD_EVENT_BURST_END = 5, + WL_PROXD_EVENT_SESSION_END = 6, + WL_PROXD_EVENT_SESSION_RESTART = 7, + WL_PROXD_EVENT_BURST_RESCHED = 8, /* burst rescheduled - e.g. partial TSF */ + WL_PROXD_EVENT_SESSION_DESTROY = 9, + WL_PROXD_EVENT_RANGE_REQ = 10, + WL_PROXD_EVENT_FTM_FRAME = 11, + WL_PROXD_EVENT_DELAY = 12, + WL_PROXD_EVENT_VS_INITIATOR_RPT = 13, /* (target) rx initiator-report */ + WL_PROXD_EVENT_RANGING = 14, + WL_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */ + WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */ + + WL_PROXD_EVENT_MAX +}; +typedef int16 wl_proxd_event_type_t; + +/* proxd event mask - upto 32 events for now */ +typedef uint32 wl_proxd_event_mask_t; + +#define WL_PROXD_EVENT_MASK_ALL 0xfffffffe +#define WL_PROXD_EVENT_MASK_EVENT(_event_type) (1 << (_event_type)) +#define WL_PROXD_EVENT_ENABLED(_mask, _event_type) (\ + ((_mask) & WL_PROXD_EVENT_MASK_EVENT(_event_type)) != 0) + +/* proxd event - applies to proxd, method or session */ +typedef struct wl_proxd_event { + uint16 version; + uint16 len; + wl_proxd_event_type_t type; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 pad[2]; + wl_proxd_tlv_t tlvs[1]; /* variable */ +} wl_proxd_event_t; + +enum { + WL_PROXD_RANGING_STATE_NONE = 0, + WL_PROXD_RANGING_STATE_NOTSTARTED = 1, + WL_PROXD_RANGING_STATE_INPROGRESS = 2, + WL_PROXD_RANGING_STATE_DONE = 3 +}; +typedef int16 wl_proxd_ranging_state_t; + +/* proxd ranging flags */ +enum { + WL_PROXD_RANGING_FLAG_NONE = 0x0000, /* no flags */ + WL_PROXD_RANGING_FLAG_DEL_SESSIONS_ON_STOP = 0x0001, + WL_PROXD_RANGING_FLAG_ALL = 0xffff +}; +typedef uint16 wl_proxd_ranging_flags_t; + +struct wl_proxd_ranging_info { + wl_proxd_status_t status; + wl_proxd_ranging_state_t state; + wl_proxd_ranging_flags_t flags; + uint16 num_sids; + uint16 num_done; +}; +typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t; +#include +/* end proxd definitions */ + +/* require strict packing */ +#include +/* Data returned by the bssload_report iovar. + * This is also the WLC_E_BSS_LOAD event data. + */ +typedef BWL_PRE_PACKED_STRUCT struct wl_bssload { + uint16 sta_count; /* station count */ + uint16 aac; /* available admission capacity */ + uint8 chan_util; /* channel utilization */ +} BWL_POST_PACKED_STRUCT wl_bssload_t; + +/* Maximum number of configurable BSS Load levels. The number of BSS Load + * ranges is always 1 more than the number of configured levels. eg. if + * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges: + * 0-10, 11-20, 21-30, 31-255. A WLC_E_BSS_LOAD event is generated each time + * the utilization level crosses into another range, subject to the rate limit. + */ +#define MAX_BSSLOAD_LEVELS 8 +#define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1) + +/* BSS Load event notification configuration. */ +typedef struct wl_bssload_cfg { + uint32 rate_limit_msec; /* # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint8 num_util_levels; /* Number of entries in util_levels[] below */ + uint8 util_levels[MAX_BSSLOAD_LEVELS]; + /* Variable number of BSS Load utilization levels in + * low to high order. An event will be posted each time + * a received beacon's BSS Load IE channel utilization + * value crosses a level. + */ +} wl_bssload_cfg_t; + +/* Multiple roaming profile suport */ +#define WL_MAX_ROAM_PROF_BRACKETS 4 + +#define WL_MAX_ROAM_PROF_VER 1 + +#define WL_ROAM_PROF_NONE (0 << 0) +#define WL_ROAM_PROF_LAZY (1 << 0) +#define WL_ROAM_PROF_NO_CI (1 << 1) +#define WL_ROAM_PROF_SUSPEND (1 << 2) +#define WL_ROAM_PROF_SYNC_DTIM (1 << 6) +#define WL_ROAM_PROF_DEFAULT (1 << 7) /* backward compatible single default profile */ + +#define WL_FACTOR_TABLE_MAX_LIMIT 5 + +typedef struct wl_roam_prof { + int8 roam_flags; /* bit flags */ + int8 roam_trigger; /* RSSI trigger level per profile/RSSI bracket */ + int8 rssi_lower; + int8 roam_delta; + int8 rssi_boost_thresh; /* Min RSSI to qualify for RSSI boost */ + int8 rssi_boost_delta; /* RSSI boost for AP in the other band */ + uint16 nfscan; /* nuber of full scan to start with */ + uint16 fullscan_period; + uint16 init_scan_period; + uint16 backoff_multiplier; + uint16 max_scan_period; + uint8 channel_usage; + uint8 cu_avg_calc_dur; +} wl_roam_prof_t; + +typedef struct wl_roam_prof_band { + uint32 band; /* Must be just one band */ + uint16 ver; /* version of this struct */ + uint16 len; /* length in bytes of this structure */ + wl_roam_prof_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS]; +} wl_roam_prof_band_t; + +/* Data structures for Interface Create/Remove */ + +#define WL_INTERFACE_CREATE_VER (0) + +/* + * The flags filed of the wl_interface_create is designed to be + * a Bit Mask. As of now only Bit 0 and Bit 1 are used as mentioned below. + * The rest of the bits can be used, incase we have to provide + * more information to the dongle + */ + +/* + * Bit 0 of flags field is used to inform whether the interface requested to + * be created is STA or AP. + * 0 - Create a STA interface + * 1 - Create an AP interface + */ +#define WL_INTERFACE_CREATE_STA (0 << 0) +#define WL_INTERFACE_CREATE_AP (1 << 0) + +/* + * Bit 1 of flags field is used to inform whether MAC is present in the + * data structure or not. + * 0 - Ignore mac_addr field + * 1 - Use the mac_addr field + */ +#define WL_INTERFACE_MAC_DONT_USE (0 << 1) +#define WL_INTERFACE_MAC_USE (1 << 1) + +typedef struct wl_interface_create { + uint16 ver; /* version of this struct */ + uint32 flags; /* flags that defines the operation */ + struct ether_addr mac_addr; /* Optional Mac address */ +} wl_interface_create_t; + +typedef struct wl_interface_info { + uint16 ver; /* version of this struct */ + struct ether_addr mac_addr; /* MAC address of the interface */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */ + uint8 bsscfgidx; /* source bsscfg index */ +} wl_interface_info_t; + +/* no default structure packing */ +#include + +#define TBOW_MAX_SSID_LEN 32 +#define TBOW_MAX_PASSPHRASE_LEN 63 + +#define WL_TBOW_SETUPINFO_T_VERSION 1 /* version of tbow_setup_netinfo_t */ +typedef struct tbow_setup_netinfo { + uint32 version; + uint8 opmode; + uint8 pad; + uint8 macaddr[ETHER_ADDR_LEN]; + uint32 ssid_len; + uint8 ssid[TBOW_MAX_SSID_LEN]; + uint8 passphrase_len; + uint8 passphrase[TBOW_MAX_PASSPHRASE_LEN]; + chanspec_t chanspec; +} tbow_setup_netinfo_t; + +typedef enum tbow_ho_opmode { + TBOW_HO_MODE_START_GO = 0, + TBOW_HO_MODE_START_STA, + TBOW_HO_MODE_START_GC, + TBOW_HO_MODE_TEST_GO, + TBOW_HO_MODE_STOP_GO = 0x10, + TBOW_HO_MODE_STOP_STA, + TBOW_HO_MODE_STOP_GC, + TBOW_HO_MODE_TEARDOWN +} tbow_ho_opmode_t; + +/* Beacon trim feature statistics */ +/* Configuration params */ +#define M_BCNTRIM_N (0) /* Enable/Disable Beacon Trim */ +#define M_BCNTRIM_TIMEND (1) /* Waiting time for TIM IE to end */ +#define M_BCNTRIM_TSFTLRN (2) /* TSF tolerance value (usecs) */ +/* PSM internal use */ +#define M_BCNTRIM_PREVBCNLEN (3) /* Beacon length excluding the TIM IE */ +#define M_BCNTRIM_N_COUNTER (4) /* PSM's local beacon trim counter */ +#define M_BCNTRIM_STATE (5) /* PSM's Beacon trim status register */ +#define M_BCNTRIM_TIMLEN (6) /* TIM IE Length */ +#define M_BCNTRIM_BMPCTL (7) /* Bitmap control word */ +#define M_BCNTRIM_TSF_L (8) /* Lower TSF word */ +#define M_BCNTRIM_TSF_ML (9) /* Lower middle TSF word */ +#define M_BCNTRIM_RSSI (10) /* Partial beacon RSSI */ +#define M_BCNTRIM_CHANNEL (11) /* Partial beacon channel */ +/* Trimming Counters */ +#define M_BCNTRIM_SBCNRXED (12) /* Self-BSSID beacon received */ +#define M_BCNTRIM_CANTRIM (13) /* Num of beacons which can be trimmed */ +#define M_BCNTRIM_TRIMMED (14) /* # beacons which were trimmed */ +#define M_BCNTRIM_BCNLENCNG (15) /* # beacons trimmed due to length change */ +#define M_BCNTRIM_TSFADJ (16) /* # beacons not trimmed due to large TSF delta */ +#define M_BCNTRIM_TIMNOTFOUND (17) /* # beacons not trimmed due to TIM missing */ +#define M_RXTSFTMRVAL_WD0 (18) +#define M_RXTSFTMRVAL_WD1 (19) +#define M_RXTSFTMRVAL_WD2 (20) +#define M_RXTSFTMRVAL_WD3 (21) +#define BCNTRIM_STATS_NUMPARAMS (22) /* 16 bit words */ + +#define TXPWRCAP_MAX_NUM_CORES 8 +#define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2) + +typedef struct wl_txpwrcap_tbl { + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; + /* Stores values for valid antennas */ + int8 pwrcap_cell_on[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ + int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ +} wl_txpwrcap_tbl_t; + +/* -------------- dynamic BTCOEX --------------- */ +/* require strict packing */ +#include + +#define DCTL_TROWS 2 /* currently practical number of rows */ +#define DCTL_TROWS_MAX 4 /* 2 extra rows RFU */ +/* DYNCTL profile flags */ +#define DCTL_FLAGS_DYNCTL (1 << 0) /* 1 - enabled, 0 - legacy only */ +#define DCTL_FLAGS_DESENSE (1 << 1) /* auto desense is enabled */ +#define DCTL_FLAGS_MSWITCH (1 << 2) /* mode switching is enabled */ +/* for now AGG on/off is handled separately */ +#define DCTL_FLAGS_TX_AGG_OFF (1 << 3) /* TBD: allow TX agg Off */ +#define DCTL_FLAGS_RX_AGG_OFF (1 << 4) /* TBD: allow RX agg Off */ +/* used for dry run testing only */ +#define DCTL_FLAGS_DRYRUN (1 << 7) /* Eenables dynctl dry run mode */ +#define IS_DYNCTL_ON(prof) ((prof->flags & DCTL_FLAGS_DYNCTL) != 0) +#define IS_DESENSE_ON(prof) ((prof->flags & DCTL_FLAGS_DESENSE) != 0) +#define IS_MSWITCH_ON(prof) ((prof->flags & DCTL_FLAGS_MSWITCH) != 0) +/* desense level currently in use */ +#define DESENSE_OFF 0 +#define DFLT_DESENSE_MID 12 +#define DFLT_DESENSE_HIGH 2 + +/* + * dynctl data points(a set of btpwr & wlrssi thresholds) + * for mode & desense switching + */ +typedef struct btc_thr_data { + int8 mode; /* used by desense sw */ + int8 bt_pwr; /* BT tx power threshold */ + int8 bt_rssi; /* BT rssi threshold */ + /* wl rssi range when mode or desense change may be needed */ + int8 wl_rssi_high; + int8 wl_rssi_low; +} btc_thr_data_t; + +/* dynctl. profile data structure */ +#define DCTL_PROFILE_VER 0x01 +typedef BWL_PRE_PACKED_STRUCT struct dctl_prof { + uint8 version; /* dynctl profile version */ + /* dynctl profile flags bit:0 - dynctl On, bit:1 dsns On, bit:2 mode sw On, */ + uint8 flags; /* bit[6:3] reserved, bit7 - Dryrun (sim) - On */ + /* wl desense levels to apply */ + uint8 dflt_dsns_level; + uint8 low_dsns_level; + uint8 mid_dsns_level; + uint8 high_dsns_level; + /* mode switching hysteresis in dBm */ + int8 msw_btrssi_hyster; + /* default btcoex mode */ + uint8 default_btc_mode; + /* num of active rows in mode switching table */ + uint8 msw_rows; + /* num of rows in desense table */ + uint8 dsns_rows; + /* dynctl mode switching data table */ + btc_thr_data_t msw_data[DCTL_TROWS_MAX]; + /* dynctl desense switching data table */ + btc_thr_data_t dsns_data[DCTL_TROWS_MAX]; +} BWL_POST_PACKED_STRUCT dctl_prof_t; + +/* dynctl status info */ +typedef BWL_PRE_PACKED_STRUCT struct dynctl_status { + bool sim_on; /* true if simulation is On */ + uint16 bt_pwr_shm; /* BT per/task power as read from ucode */ + int8 bt_pwr; /* BT pwr extracted & converted to dBm */ + int8 bt_rssi; /* BT rssi in dBm */ + int8 wl_rssi; /* last wl rssi reading used by btcoex */ + uint8 dsns_level; /* current desense level */ + uint8 btc_mode; /* current btcoex mode */ + /* add more status items if needed, pad to 4 BB if needed */ +} BWL_POST_PACKED_STRUCT dynctl_status_t; + +/* dynctl simulation (dryrun data) */ +typedef BWL_PRE_PACKED_STRUCT struct dynctl_sim { + bool sim_on; /* simulation mode on/off */ + int8 btpwr; /* simulated BT power in dBm */ + int8 btrssi; /* simulated BT rssi in dBm */ + int8 wlrssi; /* simulated WL rssi in dBm */ +} BWL_POST_PACKED_STRUCT dynctl_sim_t; +/* no default structure packing */ +#include + +/* PTK key maintained per SCB */ +#define RSN_TEMP_ENCR_KEY_LEN 16 +typedef struct wpa_ptk { + uint8 kck[RSN_KCK_LENGTH]; /* EAPOL-Key Key Confirmation Key (KCK) */ + uint8 kek[RSN_KEK_LENGTH]; /* EAPOL-Key Key Encryption Key (KEK) */ + uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 1 (TK1) */ + uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 2 (TK2) */ +} wpa_ptk_t; + +/* GTK key maintained per SCB */ +typedef struct wpa_gtk { + uint32 idx; + uint32 key_len; + uint8 key[DOT11_MAX_KEY_SIZE]; +} wpa_gtk_t; + +/* FBT Auth Response Data structure */ +typedef struct wlc_fbt_auth_resp { + uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */ + uint8 pad[2]; + uint8 pmk_r1_name[WPA2_PMKID_LEN]; + wpa_ptk_t ptk; /* pairwise key */ + wpa_gtk_t gtk; /* group key */ + uint32 ie_len; + uint8 status; /* Status of parsing FBT authentication + Request in application + */ + uint8 ies[1]; /* IEs contains MDIE, RSNIE, + FBTIE (ANonce, SNonce,R0KH-ID, R1KH-ID) + */ +} wlc_fbt_auth_resp_t; + +/* FBT Action Response frame */ +typedef struct wlc_fbt_action_resp { + uint16 version; /* structure version */ + uint16 length; /* length of structure */ + uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */ + uint8 data_len; /* len of ie from Category */ + uint8 data[1]; /* data contains category, action, sta address, target ap, + status code,fbt response frame body + */ +} wlc_fbt_action_resp_t; + +#define MACDBG_PMAC_ADDR_INPUT_MAXNUM 16 +#define MACDBG_PMAC_OBJ_TYPE_LEN 8 + +typedef struct _wl_macdbg_pmac_param_t { + char type[MACDBG_PMAC_OBJ_TYPE_LEN]; + uint8 step; + uint8 num; + uint32 bitmap; + bool addr_raw; + uint8 addr_num; + uint16 addr[MACDBG_PMAC_ADDR_INPUT_MAXNUM]; +} wl_macdbg_pmac_param_t; + +/* IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */ +typedef struct svmp_mem { + uint32 addr; /* offset to read svmp memory from vasip base address */ + uint16 len; /* length in count of uint16's */ + uint16 val; /* set the range of addr/len with a value */ +} svmp_mem_t; + +#define WL_NAN_BAND_STR_SIZE 5 /* sizeof ("auto") */ + +/* Definitions of different NAN Bands */ +enum { /* mode selection for reading/writing tx iqlo cal coefficients */ + NAN_BAND_AUTO, + NAN_BAND_B, + NAN_BAND_A, + NAN_BAND_INVALID = 0xFF +}; + +#if defined(WL_LINKSTAT) +typedef struct { + uint32 preamble; + uint32 nss; + uint32 bw; + uint32 rateMcsIdx; + uint32 reserved; + uint32 bitrate; +} wifi_rate; + +typedef struct { + uint16 version; + uint16 length; + uint32 tx_mpdu; + uint32 rx_mpdu; + uint32 mpdu_lost; + uint32 retries; + uint32 retries_short; + uint32 retries_long; + wifi_rate rate; +} wifi_rate_stat_t; + +typedef int32 wifi_radio; + +typedef struct { + uint16 version; + uint16 length; + wifi_radio radio; + uint32 on_time; + uint32 tx_time; + uint32 rx_time; + uint32 on_time_scan; + uint32 on_time_nbd; + uint32 on_time_gscan; + uint32 on_time_roam_scan; + uint32 on_time_pno_scan; + uint32 on_time_hs20; + uint32 num_channels; + uint8 channels[1]; +} wifi_radio_stat; +#endif /* WL_LINKSTAT */ + +#ifdef WL11ULB +/* ULB Mode configured via "ulb_mode" IOVAR */ +enum { + ULB_MODE_DISABLED = 0, + ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */ + ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */ + /* Add all other enums before this */ + MAX_SUPP_ULB_MODES +}; + +/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only. + * Values of this enumeration are also used to specify 'Current Operational Bandwidth' + * and 'Primary Operational Bandwidth' sub-fields in 'ULB Operations' field (used in + * 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute) + */ +typedef enum { + ULB_BW_DISABLED = 0, + ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */ + ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */ + ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */ + /* Add all other enums before this */ + MAX_SUPP_ULB_BW +} ulb_bw_type_t; +#endif /* WL11ULB */ + +#if defined(WLRCC) +#define MAX_ROAM_CHANNEL 20 + +typedef struct { + int n; + chanspec_t channels[MAX_ROAM_CHANNEL]; +} wl_roam_channel_list_t; +#endif + + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up + * Set RA rate limit interval value(%) + */ +typedef struct nd_ra_ol_limits { + uint16 version; /* version of the iovar buffer */ + uint16 type; /* type of data provided */ + uint16 length; /* length of the entire structure */ + uint16 pad1; /* pad union to 4 byte boundary */ + union { + struct { + uint16 min_time; /* seconds, min time for RA offload hold */ + uint16 lifetime_percent; + /* percent, lifetime percentage for offload hold time */ + } lifetime_relative; + struct { + uint16 hold_time; /* seconds, RA offload hold time */ + uint16 pad2; /* unused */ + } fixed; + } limits; +} nd_ra_ol_limits_t; + +#define ND_RA_OL_LIMITS_VER 1 + +/* nd_ra_ol_limits sub-types */ +#define ND_RA_OL_LIMITS_REL_TYPE 0 /* relative, percent of RA lifetime */ +#define ND_RA_OL_LIMITS_FIXED_TYPE 1 /* fixed time */ + +/* buffer lengths for the different nd_ra_ol_limits types */ +#define ND_RA_OL_LIMITS_REL_TYPE_LEN 12 +#define ND_RA_OL_LIMITS_FIXED_TYPE_LEN 10 + +#define ND_RA_OL_SET "SET" +#define ND_RA_OL_GET "GET" +#define ND_PARAM_SIZE 50 +#define ND_VALUE_SIZE 5 +#define ND_PARAMS_DELIMETER " " +#define ND_PARAM_VALUE_DELLIMETER '=' +#define ND_LIMIT_STR_FMT ("%50s %50s") + +#define ND_RA_TYPE "TYPE" +#define ND_RA_MIN_TIME "MIN" +#define ND_RA_PER "PER" +#define ND_RA_HOLD "HOLD" + +/* + * Temperature Throttling control mode + */ +typedef struct wl_temp_control { + bool enable; + uint16 control_bit; +} wl_temp_control_t; + +#endif /* _wlioctl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h b/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h new file mode 100644 index 000000000000..c3fe428580b4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h @@ -0,0 +1,53 @@ +/* + * Custom OID/ioctl related helper functions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * <> + * + * $Id: wlioctl_utils.h 555740 2015-05-11 10:16:23Z $ + */ + +#ifndef _wlioctl_utils_h_ +#define _wlioctl_utils_h_ + +#include + +#ifndef BCMDRIVER +#define CCA_THRESH_MILLI 14 +#define CCA_THRESH_INTERFERE 6 + +extern cca_congest_channel_req_t * cca_per_chan_summary(cca_congest_channel_req_t *input, + cca_congest_channel_req_t *avg, bool percent); + +extern int cca_analyze(cca_congest_channel_req_t *input[], int num_chans, + uint flags, chanspec_t *answer); +#endif /* BCMDRIVER */ + +extern int wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, + int buflen, uint32 corerev); + +/* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */ +#define GET_WLCCNT_FROM_CNTBUF(cntbuf) \ + bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)cntbuf)->data, \ + ((wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \ + NULL, BCM_XTLV_OPTION_ALIGN32) + +#endif /* _wlioctl_utils_h_ */ diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c new file mode 100644 index 000000000000..f2abdb43b0e6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/linux_osl.c @@ -0,0 +1,2652 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_osl.c 602478 2015-11-26 04:46:12Z $ + */ + +#define LINUX_PORT + +#include +#include +#include +#include + + +#if !defined(STBLINUX) +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) +#include +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#endif /* STBLINUX */ + +#include + +#include +#include +#include +#include + + +#ifdef BCM_SECURE_DMA +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(__ARM_ARCH_7A__) +#include +#include +#endif +#include +#endif /* BCM_SECURE_DMA */ + +#include + + +#ifdef BCM_OBJECT_TRACE +#include +#endif /* BCM_OBJECT_TRACE */ + +#define PCI_CFG_RETRY 10 + +#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ +#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ +#define DUMPBUFSZ 1024 + +/* dependancy check */ +#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only" +#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */ + +#ifdef CONFIG_DHD_USE_STATIC_BUF +#ifdef DHD_USE_STATIC_CTRLBUF +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define PREALLOC_FREE_MAGIC 0xFEDC +#define PREALLOC_USED_MAGIC 0xFCDE +#else +#define DHD_SKB_HDRSIZE 336 +#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) +#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) +#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#define STATIC_BUF_MAX_NUM 16 +#define STATIC_BUF_SIZE (PAGE_SIZE*2) +#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) + +typedef struct bcm_static_buf { + struct semaphore static_sem; + unsigned char *buf_ptr; + unsigned char buf_use[STATIC_BUF_MAX_NUM]; +} bcm_static_buf_t; + +static bcm_static_buf_t *bcm_static_buf = 0; + +#ifdef DHD_USE_STATIC_CTRLBUF +#define STATIC_PKT_4PAGE_NUM 0 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE +#elif defined(ENHANCED_STATIC_BUF) +#define STATIC_PKT_4PAGE_NUM 1 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE +#else +#define STATIC_PKT_4PAGE_NUM 0 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#ifdef DHD_USE_STATIC_CTRLBUF +#define STATIC_PKT_1PAGE_NUM 0 +#define STATIC_PKT_2PAGE_NUM 64 +#else +#define STATIC_PKT_1PAGE_NUM 8 +#define STATIC_PKT_2PAGE_NUM 8 +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#define STATIC_PKT_1_2PAGE_NUM \ + ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM)) +#define STATIC_PKT_MAX_NUM \ + ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM)) + +typedef struct bcm_static_pkt { +#ifdef DHD_USE_STATIC_CTRLBUF + struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM]; + unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM]; + spinlock_t osl_pkt_lock; + uint32 last_allocated_index; +#else + struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM]; + struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM]; +#ifdef ENHANCED_STATIC_BUF + struct sk_buff *skb_16k; +#endif /* ENHANCED_STATIC_BUF */ + struct semaphore osl_pkt_sem; +#endif /* DHD_USE_STATIC_CTRLBUF */ + unsigned char pkt_use[STATIC_PKT_MAX_NUM]; +} bcm_static_pkt_t; + +static bcm_static_pkt_t *bcm_static_skb = 0; + +void* wifi_platform_prealloc(void *adapter, int section, unsigned long size); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +typedef struct bcm_mem_link { + struct bcm_mem_link *prev; + struct bcm_mem_link *next; + uint size; + int line; + void *osh; + char file[BCM_MEM_FILENAME_LEN]; +} bcm_mem_link_t; + +struct osl_cmn_info { + atomic_t malloced; + atomic_t pktalloced; /* Number of allocated packet buffers */ + spinlock_t dbgmem_lock; + bcm_mem_link_t *dbgmem_list; + spinlock_t pktalloc_lock; + atomic_t refcount; /* Number of references to this shared structure. */ +}; +typedef struct osl_cmn_info osl_cmn_t; + +struct osl_info { + osl_pubinfo_t pub; + uint32 flags; /* If specific cases to be handled in the OSL */ +#ifdef CTFPOOL + ctfpool_t *ctfpool; +#endif /* CTFPOOL */ + uint magic; + void *pdev; + uint failed; + uint bustype; + osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */ + + void *bus_handle; +#ifdef BCMDBG_CTRACE + spinlock_t ctrace_lock; + struct list_head ctrace_list; + int ctrace_num; +#endif /* BCMDBG_CTRACE */ +#ifdef BCM_SECURE_DMA + struct cma_dev *cma; + struct sec_mem_elem *sec_list_512; + struct sec_mem_elem *sec_list_base_512; + struct sec_mem_elem *sec_list_2048; + struct sec_mem_elem *sec_list_base_2048; + struct sec_mem_elem *sec_list_4096; + struct sec_mem_elem *sec_list_base_4096; + phys_addr_t contig_base; + void *contig_base_va; + phys_addr_t contig_base_alloc; + void *contig_base_alloc_va; + phys_addr_t contig_base_alloc_coherent; + void *contig_base_alloc_coherent_va; + phys_addr_t contig_delta_va_pa; + struct { + phys_addr_t pa; + void *va; + bool avail; + } sec_cma_coherent[SEC_CMA_COHERENT_MAX]; + +#endif /* BCM_SECURE_DMA */ +}; +#ifdef BCM_SECURE_DMA +phys_addr_t g_contig_delta_va_pa; +static void osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn); +static int osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn); +static void osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn); +static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, + bool iscache, bool isdecr); +static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size); +static void osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, + sec_mem_elem_t **list); +static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, + void *sec_list_base); +static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, + int direction, struct sec_cma_info *ptr_cma_info, uint offset); +static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem); +static void osl_sec_dma_init_consistent(osl_t *osh); +static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, + ulong *pap); +static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); +#endif /* BCM_SECURE_DMA */ + +#ifdef BCM_OBJECT_TRACE +/* don't clear the first 4 byte that is the pkt sn */ +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + ASSERT(OSL_PKTTAG_SZ == 32); \ + *(uint32 *)(&s->cb[4]) = 0; \ + *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ + *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ + *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ +} while (0) +#else +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + ASSERT(OSL_PKTTAG_SZ == 32); \ + *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \ + *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ + *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ + *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ +} while (0) +#endif /* BCM_OBJECT_TRACE */ + +/* PCMCIA attribute space access macros */ + +/* Global ASSERT type flag */ +uint32 g_assert_type = 1; +module_param(g_assert_type, int, 0); + +static int16 linuxbcmerrormap[] = +{ 0, /* 0 */ + -EINVAL, /* BCME_ERROR */ + -EINVAL, /* BCME_BADARG */ + -EINVAL, /* BCME_BADOPTION */ + -EINVAL, /* BCME_NOTUP */ + -EINVAL, /* BCME_NOTDOWN */ + -EINVAL, /* BCME_NOTAP */ + -EINVAL, /* BCME_NOTSTA */ + -EINVAL, /* BCME_BADKEYIDX */ + -EINVAL, /* BCME_RADIOOFF */ + -EINVAL, /* BCME_NOTBANDLOCKED */ + -EINVAL, /* BCME_NOCLK */ + -EINVAL, /* BCME_BADRATESET */ + -EINVAL, /* BCME_BADBAND */ + -E2BIG, /* BCME_BUFTOOSHORT */ + -E2BIG, /* BCME_BUFTOOLONG */ + -EBUSY, /* BCME_BUSY */ + -EINVAL, /* BCME_NOTASSOCIATED */ + -EINVAL, /* BCME_BADSSIDLEN */ + -EINVAL, /* BCME_OUTOFRANGECHAN */ + -EINVAL, /* BCME_BADCHAN */ + -EFAULT, /* BCME_BADADDR */ + -ENOMEM, /* BCME_NORESOURCE */ + -EOPNOTSUPP, /* BCME_UNSUPPORTED */ + -EMSGSIZE, /* BCME_BADLENGTH */ + -EINVAL, /* BCME_NOTREADY */ + -EPERM, /* BCME_EPERM */ + -ENOMEM, /* BCME_NOMEM */ + -EINVAL, /* BCME_ASSOCIATED */ + -ERANGE, /* BCME_RANGE */ + -EINVAL, /* BCME_NOTFOUND */ + -EINVAL, /* BCME_WME_NOT_ENABLED */ + -EINVAL, /* BCME_TSPEC_NOTFOUND */ + -EINVAL, /* BCME_ACM_NOTSUPPORTED */ + -EINVAL, /* BCME_NOT_WME_ASSOCIATION */ + -EIO, /* BCME_SDIO_ERROR */ + -ENODEV, /* BCME_DONGLE_DOWN */ + -EINVAL, /* BCME_VERSION */ + -EIO, /* BCME_TXFAIL */ + -EIO, /* BCME_RXFAIL */ + -ENODEV, /* BCME_NODEVICE */ + -EINVAL, /* BCME_NMODE_DISABLED */ + -ENODATA, /* BCME_NONRESIDENT */ + -EINVAL, /* BCME_SCANREJECT */ + -EINVAL, /* BCME_USAGE_ERROR */ + -EIO, /* BCME_IOCTL_ERROR */ + -EIO, /* BCME_SERIAL_PORT_ERR */ + -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */ + -EIO, /* BCME_DECERR */ + -EIO, /* BCME_ENCERR */ + -EIO, /* BCME_MICERR */ + -ERANGE, /* BCME_REPLAY */ + -EINVAL, /* BCME_IE_NOTFOUND */ + -EINVAL, /* BCME_DATA_NOTFOUND */ + +/* When an new error code is added to bcmutils.h, add os + * specific error translation here as well + */ +/* check if BCME_LAST changed since the last time this function was updated */ +#if BCME_LAST != -53 +#error "You need to add a OS error translation in the linuxbcmerrormap \ + for new error code defined in bcmutils.h" +#endif +}; +uint lmtest = FALSE; + +/* translate bcmerrors into linux errors */ +int +osl_error(int bcmerror) +{ + if (bcmerror > 0) + bcmerror = 0; + else if (bcmerror < BCME_LAST) + bcmerror = BCME_ERROR; + + /* Array bounds covered by ASSERT in osl_attach */ + return linuxbcmerrormap[-bcmerror]; +} +#ifdef SHARED_OSL_CMN +osl_t * +osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn) +{ +#else +osl_t * +osl_attach(void *pdev, uint bustype, bool pkttag) +{ + void **osl_cmn = NULL; +#endif /* SHARED_OSL_CMN */ + osl_t *osh; + gfp_t flags; + + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + if (!(osh = kmalloc(sizeof(osl_t), flags))) + return osh; + + ASSERT(osh); + + bzero(osh, sizeof(osl_t)); + + if (osl_cmn == NULL || *osl_cmn == NULL) { + if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) { + kfree(osh); + return NULL; + } + bzero(osh->cmn, sizeof(osl_cmn_t)); + if (osl_cmn) + *osl_cmn = osh->cmn; + atomic_set(&osh->cmn->malloced, 0); + osh->cmn->dbgmem_list = NULL; + spin_lock_init(&(osh->cmn->dbgmem_lock)); + + spin_lock_init(&(osh->cmn->pktalloc_lock)); + + } else { + osh->cmn = *osl_cmn; + } + atomic_add(1, &osh->cmn->refcount); + + bcm_object_trace_init(); + + /* Check that error map has the right number of entries in it */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); + + osh->failed = 0; + osh->pdev = pdev; + osh->pub.pkttag = pkttag; + osh->bustype = bustype; + osh->magic = OS_HANDLE_MAGIC; +#ifdef BCM_SECURE_DMA + + osl_sec_dma_setup_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION); + + osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh, + phys_to_page((u32)osh->contig_base_alloc), + CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE); + + osh->contig_base_alloc_coherent = osh->contig_base_alloc; + osl_sec_dma_init_consistent(osh); + + osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK; + + osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh, + phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE); + osh->contig_base_va = osh->contig_base_alloc_va; + + /* + * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512); + * osh->sec_list_base_512 = osh->sec_list_512; + * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048); + * osh->sec_list_base_2048 = osh->sec_list_2048; + */ + osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096); + osh->sec_list_base_4096 = osh->sec_list_4096; + +#endif /* BCM_SECURE_DMA */ + + switch (bustype) { + case PCI_BUS: + case SI_BUS: + case PCMCIA_BUS: + osh->pub.mmbus = TRUE; + break; + case JTAG_BUS: + case SDIO_BUS: + case USB_BUS: + case SPI_BUS: + case RPC_BUS: + osh->pub.mmbus = FALSE; + break; + default: + ASSERT(FALSE); + break; + } + +#ifdef BCMDBG_CTRACE + spin_lock_init(&osh->ctrace_lock); + INIT_LIST_HEAD(&osh->ctrace_list); + osh->ctrace_num = 0; +#endif /* BCMDBG_CTRACE */ + + + return osh; +} + +int osl_static_mem_init(osl_t *osh, void *adapter) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (!bcm_static_buf && adapter) { + if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter, + 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) { + printk("can not alloc static buf!\n"); + bcm_static_skb = NULL; + ASSERT(osh->magic == OS_HANDLE_MAGIC); + return -ENOMEM; + } else { + printk("alloc static buf at %p!\n", bcm_static_buf); + } + + sema_init(&bcm_static_buf->static_sem, 1); + + bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; + } + +#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF) + if (!bcm_static_skb && adapter) { + int i; + void *skb_buff_ptr = 0; + bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); + skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0); + if (!skb_buff_ptr) { + printk("cannot alloc static buf!\n"); + bcm_static_buf = NULL; + bcm_static_skb = NULL; + ASSERT(osh->magic == OS_HANDLE_MAGIC); + return -ENOMEM; + } + + bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * + (STATIC_PKT_MAX_NUM)); + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + bcm_static_skb->pkt_use[i] = 0; + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_init(&bcm_static_skb->osl_pkt_lock); + bcm_static_skb->last_allocated_index = 0; +#else + sema_init(&bcm_static_skb->osl_pkt_sem, 1); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } +#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */ +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + return 0; +} + +void osl_set_bus_handle(osl_t *osh, void *bus_handle) +{ + osh->bus_handle = bus_handle; +} + +void* osl_get_bus_handle(osl_t *osh) +{ + return osh->bus_handle; +} + +void +osl_detach(osl_t *osh) +{ + if (osh == NULL) + return; + +#ifdef BCM_SECURE_DMA + osl_sec_dma_free_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION); + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512); + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048); + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096); + osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_MEMBLOCK); +#endif /* BCM_SECURE_DMA */ + + + bcm_object_trace_deinit(); + + ASSERT(osh->magic == OS_HANDLE_MAGIC); + atomic_sub(1, &osh->cmn->refcount); + if (atomic_read(&osh->cmn->refcount) == 0) { + kfree(osh->cmn); + } + kfree(osh); +} + +int osl_static_mem_deinit(osl_t *osh, void *adapter) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) { + bcm_static_buf = 0; + } +#ifdef BCMSDIO + if (bcm_static_skb) { + bcm_static_skb = 0; + } +#endif /* BCMSDIO */ +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + return 0; +} + +static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len) +{ + struct sk_buff *skb; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) + gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; +#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA) + flags |= GFP_ATOMIC; +#endif +#ifdef DHD_USE_ATOMIC_PKTGET + flags = GFP_ATOMIC; +#endif /* DHD_USE_ATOMIC_PKTGET */ + skb = __dev_alloc_skb(len, flags); +#else + skb = dev_alloc_skb(len); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */ + return skb; +} + +#ifdef CTFPOOL + +#ifdef CTFPOOL_SPINLOCK +#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags) +#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags) +#else +#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock) +#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock) +#endif /* CTFPOOL_SPINLOCK */ +/* + * Allocate and add an object to packet pool. + */ +void * +osl_ctfpool_add(osl_t *osh) +{ + struct sk_buff *skb; +#ifdef CTFPOOL_SPINLOCK + unsigned long flags; +#endif /* CTFPOOL_SPINLOCK */ + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return NULL; + + CTFPOOL_LOCK(osh->ctfpool, flags); + ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj); + + /* No need to allocate more objects */ + if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) { + CTFPOOL_UNLOCK(osh->ctfpool, flags); + return NULL; + } + + /* Allocate a new skb and add it to the ctfpool */ + skb = osl_alloc_skb(osh, osh->ctfpool->obj_size); + if (skb == NULL) { + printf("%s: skb alloc of len %d failed\n", __FUNCTION__, + osh->ctfpool->obj_size); + CTFPOOL_UNLOCK(osh->ctfpool, flags); + return NULL; + } + + /* Add to ctfpool */ + skb->next = (struct sk_buff *)osh->ctfpool->head; + osh->ctfpool->head = skb; + osh->ctfpool->fast_frees++; + osh->ctfpool->curr_obj++; + + /* Hijack a skb member to store ptr to ctfpool */ + CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool; + + /* Use bit flag to indicate skb from fast ctfpool */ + PKTFAST(osh, skb) = FASTBUF; + + CTFPOOL_UNLOCK(osh->ctfpool, flags); + + return skb; +} + +/* + * Add new objects to the pool. + */ +void +osl_ctfpool_replenish(osl_t *osh, uint thresh) +{ + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + + /* Do nothing if no refills are required */ + while ((osh->ctfpool->refills > 0) && (thresh--)) { + osl_ctfpool_add(osh); + osh->ctfpool->refills--; + } +} + +/* + * Initialize the packet pool with specified number of objects. + */ +int32 +osl_ctfpool_init(osl_t *osh, uint numobj, uint size) +{ + gfp_t flags; + + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags); + ASSERT(osh->ctfpool); + + osh->ctfpool->max_obj = numobj; + osh->ctfpool->obj_size = size; + + spin_lock_init(&osh->ctfpool->lock); + + while (numobj--) { + if (!osl_ctfpool_add(osh)) + return -1; + osh->ctfpool->fast_frees--; + } + + return 0; +} + +/* + * Cleanup the packet pool objects. + */ +void +osl_ctfpool_cleanup(osl_t *osh) +{ + struct sk_buff *skb, *nskb; +#ifdef CTFPOOL_SPINLOCK + unsigned long flags; +#endif /* CTFPOOL_SPINLOCK */ + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + + CTFPOOL_LOCK(osh->ctfpool, flags); + + skb = osh->ctfpool->head; + + while (skb != NULL) { + nskb = skb->next; + dev_kfree_skb(skb); + skb = nskb; + osh->ctfpool->curr_obj--; + } + + ASSERT(osh->ctfpool->curr_obj == 0); + osh->ctfpool->head = NULL; + CTFPOOL_UNLOCK(osh->ctfpool, flags); + + kfree(osh->ctfpool); + osh->ctfpool = NULL; +} + +void +osl_ctfpool_stats(osl_t *osh, void *b) +{ + struct bcmstrbuf *bb; + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) { + bcm_static_buf = 0; + } +#ifdef BCMSDIO + if (bcm_static_skb) { + bcm_static_skb = 0; + } +#endif /* BCMSDIO */ +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + bb = b; + + ASSERT((osh != NULL) && (bb != NULL)); + + bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n", + osh->ctfpool->max_obj, osh->ctfpool->obj_size, + osh->ctfpool->curr_obj, osh->ctfpool->refills); + bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n", + osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees, + osh->ctfpool->slow_allocs); +} + +static inline struct sk_buff * +osl_pktfastget(osl_t *osh, uint len) +{ + struct sk_buff *skb; +#ifdef CTFPOOL_SPINLOCK + unsigned long flags; +#endif /* CTFPOOL_SPINLOCK */ + + /* Try to do fast allocate. Return null if ctfpool is not in use + * or if there are no items in the ctfpool. + */ + if (osh->ctfpool == NULL) + return NULL; + + CTFPOOL_LOCK(osh->ctfpool, flags); + if (osh->ctfpool->head == NULL) { + ASSERT(osh->ctfpool->curr_obj == 0); + osh->ctfpool->slow_allocs++; + CTFPOOL_UNLOCK(osh->ctfpool, flags); + return NULL; + } + + if (len > osh->ctfpool->obj_size) { + CTFPOOL_UNLOCK(osh->ctfpool, flags); + return NULL; + } + + ASSERT(len <= osh->ctfpool->obj_size); + + /* Get an object from ctfpool */ + skb = (struct sk_buff *)osh->ctfpool->head; + osh->ctfpool->head = (void *)skb->next; + + osh->ctfpool->fast_allocs++; + osh->ctfpool->curr_obj--; + ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); + CTFPOOL_UNLOCK(osh->ctfpool, flags); + + /* Init skb struct */ + skb->next = skb->prev = NULL; +#if defined(__ARM_ARCH_7A__) + skb->data = skb->head + NET_SKB_PAD; + skb->tail = skb->head + NET_SKB_PAD; +#else + skb->data = skb->head + 16; + skb->tail = skb->head + 16; +#endif /* __ARM_ARCH_7A__ */ + skb->len = 0; + skb->cloned = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) + skb->list = NULL; +#endif + atomic_set(&skb->users, 1); + + PKTSETCLINK(skb, NULL); + PKTCCLRATTR(skb); + PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED); + + return skb; +} +#endif /* CTFPOOL */ + +#if defined(BCM_GMAC3) +/* Account for a packet delivered to downstream forwarder. + * Decrement a GMAC forwarder interface's pktalloced count. + */ +void BCMFASTPATH +osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt) +{ + + atomic_sub(skb_cnt, &osh->cmn->pktalloced); +} + +/* Account for a downstream forwarder delivered packet to a WL/DHD driver. + * Increment a GMAC forwarder interface's pktalloced count. + */ +#ifdef BCMDBG_CTRACE +void BCMFASTPATH +osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file) +#else +void BCMFASTPATH +osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt) +#endif /* BCMDBG_CTRACE */ +{ +#if defined(BCMDBG_CTRACE) + int i; + struct sk_buff *skb; +#endif + +#if defined(BCMDBG_CTRACE) + if (skb_cnt > 1) { + struct sk_buff **skb_array = (struct sk_buff **)skbs; + for (i = 0; i < skb_cnt; i++) { + skb = skb_array[i]; +#if defined(BCMDBG_CTRACE) + ASSERT(!PKTISCHAINED(skb)); + ADD_CTRACE(osh, skb, file, line); +#endif /* BCMDBG_CTRACE */ + } + } else { + skb = (struct sk_buff *)skbs; +#if defined(BCMDBG_CTRACE) + ASSERT(!PKTISCHAINED(skb)); + ADD_CTRACE(osh, skb, file, line); +#endif /* BCMDBG_CTRACE */ + } +#endif + + atomic_add(skb_cnt, &osh->cmn->pktalloced); +} + +#endif /* BCM_GMAC3 */ + +/* Convert a driver packet to native(OS) packet + * In the process, packettag is zeroed out before sending up + * IP code depends on skb->cb to be setup correctly with various options + * In our case, that means it should be 0 + */ +struct sk_buff * BCMFASTPATH +osl_pkt_tonative(osl_t *osh, void *pkt) +{ + struct sk_buff *nskb; +#ifdef BCMDBG_CTRACE + struct sk_buff *nskb1, *nskb2; +#endif + + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(pkt); + + /* Decrement the packet counter */ + for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { + atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced); + +#ifdef BCMDBG_CTRACE + for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) { + if (PKTISCHAINED(nskb1)) { + nskb2 = PKTCLINK(nskb1); + } + else + nskb2 = NULL; + + DEL_CTRACE(osh, nskb1); + } +#endif /* BCMDBG_CTRACE */ + } + return (struct sk_buff *)pkt; +} + +/* Convert a native(OS) packet to driver packet. + * In the process, native packet is destroyed, there is no copying + * Also, a packettag is zeroed out + */ +#ifdef BCMDBG_CTRACE +void * BCMFASTPATH +osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file) +#else +void * BCMFASTPATH +osl_pkt_frmnative(osl_t *osh, void *pkt) +#endif /* BCMDBG_CTRACE */ +{ + struct sk_buff *nskb; +#ifdef BCMDBG_CTRACE + struct sk_buff *nskb1, *nskb2; +#endif + + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(pkt); + + /* Increment the packet counter */ + for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { + atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced); + +#ifdef BCMDBG_CTRACE + for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) { + if (PKTISCHAINED(nskb1)) { + nskb2 = PKTCLINK(nskb1); + } + else + nskb2 = NULL; + + ADD_CTRACE(osh, nskb1, file, line); + } +#endif /* BCMDBG_CTRACE */ + } + return (void *)pkt; +} + +/* Return a new packet. zero out pkttag */ +#ifdef BCMDBG_CTRACE +void * BCMFASTPATH +osl_pktget(osl_t *osh, uint len, int line, char *file) +#else +#ifdef BCM_OBJECT_TRACE +void * BCMFASTPATH +osl_pktget(osl_t *osh, uint len, int line, const char *caller) +#else +void * BCMFASTPATH +osl_pktget(osl_t *osh, uint len) +#endif /* BCM_OBJECT_TRACE */ +#endif /* BCMDBG_CTRACE */ +{ + struct sk_buff *skb; + uchar num = 0; + if (lmtest != FALSE) { + get_random_bytes(&num, sizeof(uchar)); + if ((num + 1) <= (256 * lmtest / 100)) + return NULL; + } + +#ifdef CTFPOOL + /* Allocate from local pool */ + skb = osl_pktfastget(osh, len); + if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) { +#else /* CTFPOOL */ + if ((skb = osl_alloc_skb(osh, len))) { +#endif /* CTFPOOL */ + skb->tail += len; + skb->len += len; + skb->priority = 0; + +#ifdef BCMDBG_CTRACE + ADD_CTRACE(osh, skb, file, line); +#endif + atomic_inc(&osh->cmn->pktalloced); +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line); +#endif /* BCM_OBJECT_TRACE */ + } + + return ((void*) skb); +} + +#ifdef CTFPOOL +static inline void +osl_pktfastfree(osl_t *osh, struct sk_buff *skb) +{ + ctfpool_t *ctfpool; +#ifdef CTFPOOL_SPINLOCK + unsigned long flags; +#endif /* CTFPOOL_SPINLOCK */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) + skb->tstamp.tv.sec = 0; +#else + skb->stamp.tv_sec = 0; +#endif + + /* We only need to init the fields that we change */ + skb->dev = NULL; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) + skb->dst = NULL; +#endif + OSL_PKTTAG_CLEAR(skb); + skb->ip_summed = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + skb_orphan(skb); +#else + skb->destructor = NULL; +#endif + + ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); + ASSERT(ctfpool != NULL); + + /* Add object to the ctfpool */ + CTFPOOL_LOCK(ctfpool, flags); + skb->next = (struct sk_buff *)ctfpool->head; + ctfpool->head = (void *)skb; + + ctfpool->fast_frees++; + ctfpool->curr_obj++; + + ASSERT(ctfpool->curr_obj <= ctfpool->max_obj); + CTFPOOL_UNLOCK(ctfpool, flags); +} +#endif /* CTFPOOL */ + +/* Free the driver packet. Free the tag if present */ +#ifdef BCM_OBJECT_TRACE +void BCMFASTPATH +osl_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller) +#else +void BCMFASTPATH +osl_pktfree(osl_t *osh, void *p, bool send) +#endif /* BCM_OBJECT_TRACE */ +{ + struct sk_buff *skb, *nskb; + if (osh == NULL) + return; + + skb = (struct sk_buff*) p; + + if (send && osh->pub.tx_fn) + osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); + + PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF) + if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) { + printk("%s: pkt %p is from static pool\n", + __FUNCTION__, p); + dump_stack(); + return; + } + + if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) { + printk("%s: pkt %p is from static pool and not in used\n", + __FUNCTION__, p); + dump_stack(); + return; + } +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */ + + /* perversion: we use skb->next to chain multi-skb packets */ + while (skb) { + nskb = skb->next; + skb->next = NULL; + +#ifdef BCMDBG_CTRACE + DEL_CTRACE(osh, skb); +#endif + + +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line); +#endif /* BCM_OBJECT_TRACE */ + +#ifdef CTFPOOL + if (PKTISFAST(osh, skb)) { + if (atomic_read(&skb->users) == 1) + smp_rmb(); + else if (!atomic_dec_and_test(&skb->users)) + goto next_skb; + osl_pktfastfree(osh, skb); + } else +#endif + { + dev_kfree_skb_any(skb); + } +#ifdef CTFPOOL +next_skb: +#endif + atomic_dec(&osh->cmn->pktalloced); + skb = nskb; + } +} + +#ifdef CONFIG_DHD_USE_STATIC_BUF +void* +osl_pktget_static(osl_t *osh, uint len) +{ + int i = 0; + struct sk_buff *skb; +#ifdef DHD_USE_STATIC_CTRLBUF + unsigned long flags; +#endif /* DHD_USE_STATIC_CTRLBUF */ + + if (!bcm_static_skb) + return osl_pktget(osh, len); + + if (len > DHD_SKB_MAX_BUFSIZE) { + printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); + return osl_pktget(osh, len); + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags); + + if (len <= DHD_SKB_2PAGE_BUFSIZE) { + uint32 index; + for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) { + index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM; + bcm_static_skb->last_allocated_index++; + if (bcm_static_skb->skb_8k[index] && + bcm_static_skb->pkt_use[index] == 0) { + break; + } + } + + if ((i != STATIC_PKT_2PAGE_NUM) && + (index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) { + bcm_static_skb->pkt_use[index] = 1; + skb = bcm_static_skb->skb_8k[index]; + skb->data = skb->head; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, NET_SKB_PAD); +#else + skb->tail = skb->data + NET_SKB_PAD; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->data += NET_SKB_PAD; + skb->cloned = 0; + skb->priority = 0; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + skb->mac_len = PREALLOC_USED_MAGIC; + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + return skb; + } + } + + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + printk("%s: all static pkt in use!\n", __FUNCTION__); + return NULL; +#else + down(&bcm_static_skb->osl_pkt_sem); + + if (len <= DHD_SKB_1PAGE_BUFSIZE) { + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (bcm_static_skb->skb_4k[i] && + bcm_static_skb->pkt_use[i] == 0) { + break; + } + } + + if (i != STATIC_PKT_MAX_NUM) { + bcm_static_skb->pkt_use[i] = 1; + + skb = bcm_static_skb->skb_4k[i]; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } + } + + if (len <= DHD_SKB_2PAGE_BUFSIZE) { + for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) { + if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] && + bcm_static_skb->pkt_use[i] == 0) { + break; + } + } + + if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) { + bcm_static_skb->pkt_use[i] = 1; + skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } + } + +#if defined(ENHANCED_STATIC_BUF) + if (bcm_static_skb->skb_16k && + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) { + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1; + + skb = bcm_static_skb->skb_16k; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } +#endif /* ENHANCED_STATIC_BUF */ + + up(&bcm_static_skb->osl_pkt_sem); + printk("%s: all static pkt in use!\n", __FUNCTION__); + return osl_pktget(osh, len); +#endif /* DHD_USE_STATIC_CTRLBUF */ +} + +void +osl_pktfree_static(osl_t *osh, void *p, bool send) +{ + int i; +#ifdef DHD_USE_STATIC_CTRLBUF + struct sk_buff *skb = (struct sk_buff *)p; + unsigned long flags; +#endif /* DHD_USE_STATIC_CTRLBUF */ + + if (!p) { + return; + } + + if (!bcm_static_skb) { + osl_pktfree(osh, p, send); + return; + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags); + + for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i]) { + if (bcm_static_skb->pkt_use[i] == 0) { + printk("%s: static pkt idx %d(%p) is double free\n", + __FUNCTION__, i, p); + } else { + bcm_static_skb->pkt_use[i] = 0; + } + + if (skb->mac_len != PREALLOC_USED_MAGIC) { + printk("%s: static pkt idx %d(%p) is not in used\n", + __FUNCTION__, i, p); + } + + skb->mac_len = PREALLOC_FREE_MAGIC; + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + return; + } + } + + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p); +#else + down(&bcm_static_skb->osl_pkt_sem); + for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_4k[i]) { + bcm_static_skb->pkt_use[i] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } + } + + for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) { + bcm_static_skb->pkt_use[i] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } + } +#ifdef ENHANCED_STATIC_BUF + if (p == bcm_static_skb->skb_16k) { + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } +#endif + up(&bcm_static_skb->osl_pkt_sem); + osl_pktfree(osh, p, send); +#endif /* DHD_USE_STATIC_CTRLBUF */ +} +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +uint32 +osl_pci_read_config(osl_t *osh, uint offset, uint size) +{ + uint val = 0; + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + /* only 4byte access supported */ + ASSERT(size == 4); + + do { + pci_read_config_dword(osh->pdev, offset, &val); + if (val != 0xffffffff) + break; + } while (retry--); + + + return (val); +} + +void +osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val) +{ + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + /* only 4byte access supported */ + ASSERT(size == 4); + + do { + pci_write_config_dword(osh->pdev, offset, val); + if (offset != PCI_BAR0_WIN) + break; + if (osl_pci_read_config(osh, offset, size) == val) + break; + } while (retry--); + +} + +/* return bus # for the pci device pointed by osh->pdev */ +uint +osl_pci_bus(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + +#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35) + return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus); +#else + return ((struct pci_dev *)osh->pdev)->bus->number; +#endif +} + +/* return slot # for the pci device pointed by osh->pdev */ +uint +osl_pci_slot(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + +#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35) + return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1; +#else + return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn); +#endif +} + +/* return domain # for the pci device pointed by osh->pdev */ +uint +osl_pcie_domain(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus); +} + +/* return bus # for the pci device pointed by osh->pdev */ +uint +osl_pcie_bus(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return ((struct pci_dev *)osh->pdev)->bus->number; +} + +/* return the pci device pointed by osh->pdev */ +struct pci_dev * +osl_pci_device(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return osh->pdev; +} + +static void +osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write) +{ +} + +void +osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE); +} + +void +osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE); +} + +void * +osl_malloc(osl_t *osh, uint size) +{ + void *addr; + gfp_t flags; + + /* only ASSERT if osh is defined */ + if (osh) + ASSERT(osh->magic == OS_HANDLE_MAGIC); +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) + { + int i = 0; + if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) + { + down(&bcm_static_buf->static_sem); + + for (i = 0; i < STATIC_BUF_MAX_NUM; i++) + { + if (bcm_static_buf->buf_use[i] == 0) + break; + } + + if (i == STATIC_BUF_MAX_NUM) + { + up(&bcm_static_buf->static_sem); + printk("all static buff in use!\n"); + goto original; + } + + bcm_static_buf->buf_use[i] = 1; + up(&bcm_static_buf->static_sem); + + bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size); + if (osh) + atomic_add(size, &osh->cmn->malloced); + + return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i)); + } + } +original: +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + if ((addr = kmalloc(size, flags)) == NULL) { + if (osh) + osh->failed++; + return (NULL); + } + if (osh && osh->cmn) + atomic_add(size, &osh->cmn->malloced); + + return (addr); +} + +void * +osl_mallocz(osl_t *osh, uint size) +{ + void *ptr; + + ptr = osl_malloc(osh, size); + + if (ptr != NULL) { + bzero(ptr, size); + } + + return ptr; +} + +void +osl_mfree(osl_t *osh, void *addr, uint size) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) + { + if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr + <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN))) + { + int buf_idx = 0; + + buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE; + + down(&bcm_static_buf->static_sem); + bcm_static_buf->buf_use[buf_idx] = 0; + up(&bcm_static_buf->static_sem); + + if (osh && osh->cmn) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + atomic_sub(size, &osh->cmn->malloced); + } + return; + } + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + if (osh && osh->cmn) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + + ASSERT(size <= osl_malloced(osh)); + + atomic_sub(size, &osh->cmn->malloced); + } + kfree(addr); +} + +uint +osl_check_memleak(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + if (atomic_read(&osh->cmn->refcount) == 1) + return (atomic_read(&osh->cmn->malloced)); + else + return 0; +} + +uint +osl_malloced(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (atomic_read(&osh->cmn->malloced)); +} + +uint +osl_malloc_failed(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (osh->failed); +} + + +uint +osl_dma_consistent_align(void) +{ + return (PAGE_SIZE); +} + +void* +osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap) +{ + void *va; + uint16 align = (1 << align_bits); + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align)) + size += align; + *alloced = size; + +#ifndef BCM_SECURE_DMA +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) + va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO); + if (va) + *pap = (ulong)__virt_to_phys((ulong)va); +#else + { + dma_addr_t pap_lin; + struct pci_dev *hwdev = osh->pdev; + gfp_t flags; +#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL + flags = GFP_ATOMIC; +#else + flags = GFP_KERNEL; +#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */ + va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags); +#ifdef BCMDMA64OSL + PHYSADDRLOSET(*pap, pap_lin & 0xffffffff); + PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff); +#else + *pap = (dmaaddr_t)pap_lin; +#endif /* BCMDMA64OSL */ + } +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#else + va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap); +#endif /* BCM_SECURE_DMA */ + return va; +} + +void +osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) +{ +#ifdef BCMDMA64OSL + dma_addr_t paddr; +#endif /* BCMDMA64OSL */ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + +#ifndef BCM_SECURE_DMA +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) + kfree(va); +#else +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); + pci_free_consistent(osh->pdev, size, va, paddr); +#else + pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa); +#endif /* BCMDMA64OSL */ +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#else + osl_sec_dma_free_consistent(osh, va, size, pa); +#endif /* BCM_SECURE_DMA */ +} + +dmaaddr_t BCMFASTPATH +osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) +{ + int dir; +#ifdef BCMDMA64OSL + dmaaddr_t ret; + dma_addr_t map_addr; +#endif /* BCMDMA64OSL */ + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; + + + + +#ifdef BCMDMA64OSL + map_addr = pci_map_single(osh->pdev, va, size, dir); + PHYSADDRLOSET(ret, map_addr & 0xffffffff); + PHYSADDRHISET(ret, (map_addr >> 32) & 0xffffffff); + return ret; +#else + return (pci_map_single(osh->pdev, va, size, dir)); +#endif /* BCMDMA64OSL */ +} + +void BCMFASTPATH +osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction) +{ + int dir; +#ifdef BCMDMA64OSL + dma_addr_t paddr; +#endif /* BCMDMA64OSL */ + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); + pci_unmap_single(osh->pdev, paddr, size, dir); +#else + pci_unmap_single(osh->pdev, (uint32)pa, size, dir); +#endif /* BCMDMA64OSL */ +} + +/* OSL function for CPU relax */ +inline void BCMFASTPATH +osl_cpu_relax(void) +{ + cpu_relax(); +} + + +#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) || \ + defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)) + +#include + +/* + * Note that its gauranteed that the Ring is cache line aligned, but + * the messages are not. And we see that __dma_inv_range in + * arch/arm64/mm/cache.S invalidates only if the request size is + * cache line aligned. If not, it will Clean and invalidate. + * So we'll better invalidate the whole ring. + * + * Also, the latest Kernel versions invoke cache maintenance operations + * from arch/arm64/mm/dma-mapping.c, __swiotlb_sync_single_for_device + * Only if is_device_dma_coherent returns 0. Since we don't have BSP + * source, assuming that its the case, since we pass NULL for the dev ptr + */ +inline void BCMFASTPATH +osl_cache_flush(void *va, uint size) +{ + /* + * using long for address arithmatic is OK, in linux + * 32 bit its 4 bytes and 64 bit its 8 bytes + */ + unsigned long end_cache_line_start; + unsigned long end_addr; + unsigned long next_cache_line_start; + + end_addr = (unsigned long)va + size; + + /* Start address beyond the cache line we plan to operate */ + end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1)); + next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES; + + /* Align the start address to cache line boundary */ + va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1)); + + /* Ensure that size is also aligned and extends partial line to full */ + size = next_cache_line_start - (unsigned long)va; + +#ifndef BCM_SECURE_DMA + +#ifdef CONFIG_ARM64 + /* + * virt_to_dma is not present in arm64/include/dma-mapping.h + * So have to convert the va to pa first and then get the dma addr + * of the same. + */ + { + phys_addr_t pa; + dma_addr_t dma_addr; + pa = virt_to_phys(va); + dma_addr = phys_to_dma(NULL, pa); + if (size > 0) + dma_sync_single_for_device(OSH_NULL, dma_addr, size, DMA_TX); + } +#else + if (size > 0) + dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX); +#endif /* !CONFIG_ARM64 */ +#else + phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa); + if (size > 0) + dma_sync_single_for_device(OSH_NULL, orig_pa, size, DMA_TX); +#endif /* defined BCM_SECURE_DMA */ +} + +inline void BCMFASTPATH +osl_cache_inv(void *va, uint size) +{ + /* + * using long for address arithmatic is OK, in linux + * 32 bit its 4 bytes and 64 bit its 8 bytes + */ + unsigned long end_cache_line_start; + unsigned long end_addr; + unsigned long next_cache_line_start; + + end_addr = (unsigned long)va + size; + + /* Start address beyond the cache line we plan to operate */ + end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1)); + next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES; + + /* Align the start address to cache line boundary */ + va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1)); + + /* Ensure that size is also aligned and extends partial line to full */ + size = next_cache_line_start - (unsigned long)va; + +#ifndef BCM_SECURE_DMA + +#ifdef CONFIG_ARM64 + /* + * virt_to_dma is not present in arm64/include/dma-mapping.h + * So have to convert the va to pa first and then get the dma addr + * of the same. + */ + { + phys_addr_t pa; + dma_addr_t dma_addr; + pa = virt_to_phys(va); + dma_addr = phys_to_dma(NULL, pa); + dma_sync_single_for_cpu(OSH_NULL, dma_addr, size, DMA_RX); + } +#else + dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX); +#endif /* !CONFIG_ARM64 */ +#else + phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa); + dma_sync_single_for_cpu(OSH_NULL, orig_pa, size, DMA_RX); +#endif /* defined BCM_SECURE_DMA */ +} + +inline void osl_prefetch(const void *ptr) +{ + /* PLD instruction is not applicable in ARM 64. We don't care for now */ +#ifndef CONFIG_ARM64 + __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc"); +#endif +} + +int osl_arch_is_coherent(void) +{ + return 0; +} + + +inline int osl_acp_war_enab(void) +{ + return 0; +} + +#endif + +#if defined(BCMASSERT_LOG) +void +osl_assert(const char *exp, const char *file, int line) +{ + char tempbuf[256]; + const char *basename; + + basename = strrchr(file, '/'); + /* skip the '/' */ + if (basename) + basename++; + + if (!basename) + basename = file; + +#ifdef BCMASSERT_LOG + snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n", + exp, basename, line); +#endif /* BCMASSERT_LOG */ + + +#if defined(BCMASSERT_LOG) + switch (g_assert_type) { + case 0: + panic("%s", tempbuf); + break; + case 1: + printk("%s", tempbuf); + break; + case 2: + printk("%s", tempbuf); + BUG(); + break; + default: + break; + } +#endif + +} +#endif + +void +osl_delay(uint usec) +{ + uint d; + + while (usec > 0) { + d = MIN(usec, 1000); + udelay(d); + usec -= d; + } +} + +void +osl_sleep(uint ms) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + if (ms < 20) + usleep_range(ms*1000, ms*1000 + 1000); + else +#endif + msleep(ms); +} + + + +/* Clone a packet. + * The pkttag contents are NOT cloned. + */ +#ifdef BCMDBG_CTRACE +void * +osl_pktdup(osl_t *osh, void *skb, int line, char *file) +#else +#ifdef BCM_OBJECT_TRACE +void * +osl_pktdup(osl_t *osh, void *skb, int line, const char *caller) +#else +void * +osl_pktdup(osl_t *osh, void *skb) +#endif /* BCM_OBJECT_TRACE */ +#endif /* BCMDBG_CTRACE */ +{ + void * p; + + ASSERT(!PKTISCHAINED(skb)); + + /* clear the CTFBUF flag if set and map the rest of the buffer + * before cloning. + */ + PKTCTFMAP(osh, skb); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) +#else + if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) +#endif + return NULL; + +#ifdef CTFPOOL + if (PKTISFAST(osh, skb)) { + ctfpool_t *ctfpool; + + /* if the buffer allocated from ctfpool is cloned then + * we can't be sure when it will be freed. since there + * is a chance that we will be losing a buffer + * from our pool, we increment the refill count for the + * object to be alloced later. + */ + ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); + ASSERT(ctfpool != NULL); + PKTCLRFAST(osh, p); + PKTCLRFAST(osh, skb); + ctfpool->refills++; + } +#endif /* CTFPOOL */ + + /* Clear PKTC context */ + PKTSETCLINK(p, NULL); + PKTCCLRFLAGS(p); + PKTCSETCNT(p, 1); + PKTCSETLEN(p, PKTLEN(osh, skb)); + + /* skb_clone copies skb->cb.. we don't want that */ + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(p); + + /* Increment the packet counter */ + atomic_inc(&osh->cmn->pktalloced); +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line); +#endif /* BCM_OBJECT_TRACE */ + +#ifdef BCMDBG_CTRACE + ADD_CTRACE(osh, (struct sk_buff *)p, file, line); +#endif + return (p); +} + +#ifdef BCMDBG_CTRACE +int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt) +{ + unsigned long flags; + struct sk_buff *skb; + int ck = FALSE; + + spin_lock_irqsave(&osh->ctrace_lock, flags); + + list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) { + if (pkt == skb) { + ck = TRUE; + break; + } + } + + spin_unlock_irqrestore(&osh->ctrace_lock, flags); + return ck; +} + +void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b) +{ + unsigned long flags; + struct sk_buff *skb; + int idx = 0; + int i, j; + + spin_lock_irqsave(&osh->ctrace_lock, flags); + + if (b != NULL) + bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num); + else + printk(" Total %d sbk not free\n", osh->ctrace_num); + + list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) { + if (b != NULL) + bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb); + else + printk("[%d] skb %p:\n", ++idx, skb); + + for (i = 0; i < skb->ctrace_count; i++) { + j = (skb->ctrace_start + i) % CTRACE_NUM; + if (b != NULL) + bcm_bprintf(b, " [%s(%d)]\n", skb->func[j], skb->line[j]); + else + printk(" [%s(%d)]\n", skb->func[j], skb->line[j]); + } + if (b != NULL) + bcm_bprintf(b, "\n"); + else + printk("\n"); + } + + spin_unlock_irqrestore(&osh->ctrace_lock, flags); + + return; +} +#endif /* BCMDBG_CTRACE */ + + +/* + * OSLREGOPS specifies the use of osl_XXX routines to be used for register access + */ + +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + */ + +uint +osl_pktalloced(osl_t *osh) +{ + if (atomic_read(&osh->cmn->refcount) == 1) + return (atomic_read(&osh->cmn->pktalloced)); + else + return 0; +} + +uint32 +osl_rand(void) +{ + uint32 rand; + + get_random_bytes(&rand, sizeof(rand)); + + return rand; +} + +/* Linux Kernel: File Operations: start */ +void * +osl_os_open_image(char *filename) +{ + struct file *fp; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) + fp = NULL; + + return fp; +} + +int +osl_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + + if (!image) + return 0; + + rdlen = kernel_read(fp, buf, len, &fp->f_pos); + if (rdlen > 0) + fp->f_pos += rdlen; + + return rdlen; +} + +void +osl_os_close_image(void *image) +{ + if (image) + filp_close((struct file *)image, NULL); +} + +int +osl_os_image_size(void *image) +{ + int len = 0, curroffset; + + if (image) { + /* store the current offset */ + curroffset = generic_file_llseek(image, 0, 1); + /* goto end of file to get length */ + len = generic_file_llseek(image, 0, 2); + /* restore back the offset */ + generic_file_llseek(image, curroffset, 0); + } + return len; +} + +/* Linux Kernel: File Operations: end */ + + +/* APIs to set/get specific quirks in OSL layer */ +void +osl_flag_set(osl_t *osh, uint32 mask) +{ + osh->flags |= mask; +} + +bool +osl_is_flag_set(osl_t *osh, uint32 mask) +{ + return (osh->flags & mask); +} + +#ifdef BCM_SECURE_DMA + +static void +osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn) +{ + int ret; + +#if defined(__ARM_ARCH_7A__) + if (regn == CONT_ARMREGION) { + ret = osl_sec_dma_alloc_contig_mem(osh, memsize, regn); + if (ret != BCME_OK) + printk("linux_osl.c: CMA memory access failed\n"); + } +#endif + /* implement the MIPS Here */ +} + +static int +osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn) +{ + u64 addr; + + printk("linux_osl.c: The value of cma mem block size = %ld\n", memsize); + osh->cma = cma_dev_get_cma_dev(regn); + printk("The value of cma = %p\n", osh->cma); + if (!osh->cma) { + printk("linux_osl.c:contig_region index is invalid\n"); + return BCME_ERROR; + } + if (cma_dev_get_mem(osh->cma, &addr, (u32)memsize, SEC_DMA_ALIGN) < 0) { + printk("linux_osl.c: contiguous memory block allocation failure\n"); + return BCME_ERROR; + } + osh->contig_base_alloc = (phys_addr_t)addr; + osh->contig_base = (phys_addr_t)osh->contig_base_alloc; + printk("contig base alloc=%lx \n", (ulong)osh->contig_base_alloc); + + return BCME_OK; +} + +static void +osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn) +{ + int ret; + + ret = cma_dev_put_mem(osh->cma, (u64)osh->contig_base, memsize); + if (ret) + printf("%s contig base free failed\n", __FUNCTION__); +} + +static void * +osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr) +{ + + struct page **map; + int order, i; + void *addr = NULL; + + size = PAGE_ALIGN(size); + order = get_order(size); + + map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC); + + if (map == NULL) + return NULL; + + for (i = 0; i < (size >> PAGE_SHIFT); i++) + map[i] = page + i; + + if (iscache) { + addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL)); + if (isdecr) { + osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page)); + g_contig_delta_va_pa = osh->contig_delta_va_pa; + } + } + else { + +#if defined(__ARM_ARCH_7A__) + addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, + pgprot_noncached(__pgprot(PAGE_KERNEL))); +#endif + if (isdecr) { + osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page)); + g_contig_delta_va_pa = osh->contig_delta_va_pa; + } + } + + kfree(map); + return (void *)addr; +} + +static void +osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size) +{ + vunmap(contig_base_va); +} + +static void +osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list) +{ + int i; + sec_mem_elem_t *sec_mem_elem; + + if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) { + + *list = sec_mem_elem; + bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max)); + for (i = 0; i < max-1; i++) { + sec_mem_elem->next = (sec_mem_elem + 1); + sec_mem_elem->size = mbsize; + sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc; + sec_mem_elem->vac = osh->contig_base_alloc_va; + + osh->contig_base_alloc += mbsize; + osh->contig_base_alloc_va += mbsize; + + sec_mem_elem = sec_mem_elem + 1; + } + sec_mem_elem->next = NULL; + sec_mem_elem->size = mbsize; + sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc; + sec_mem_elem->vac = osh->contig_base_alloc_va; + + osh->contig_base_alloc += mbsize; + osh->contig_base_alloc_va += mbsize; + + } + else + printf("%s sec mem elem kmalloc failed\n", __FUNCTION__); +} + + +static void +osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base) +{ + if (sec_list_base) + kfree(sec_list_base); +} + +static sec_mem_elem_t * BCMFASTPATH +osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction, + struct sec_cma_info *ptr_cma_info, uint offset) +{ + sec_mem_elem_t *sec_mem_elem = NULL; + + if (size <= 512 && osh->sec_list_512) { + sec_mem_elem = osh->sec_list_512; + osh->sec_list_512 = sec_mem_elem->next; + } + else if (size <= 2048 && osh->sec_list_2048) { + sec_mem_elem = osh->sec_list_2048; + osh->sec_list_2048 = sec_mem_elem->next; + } + else if (osh->sec_list_4096) { + sec_mem_elem = osh->sec_list_4096; + osh->sec_list_4096 = sec_mem_elem->next; + } else { + printf("%s No matching Pool available size=%d \n", __FUNCTION__, size); + return NULL; + } + + if (sec_mem_elem != NULL) { + sec_mem_elem->next = NULL; + + if (ptr_cma_info->sec_alloc_list_tail) { + ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem; + } + + ptr_cma_info->sec_alloc_list_tail = sec_mem_elem; + if (ptr_cma_info->sec_alloc_list == NULL) + ptr_cma_info->sec_alloc_list = sec_mem_elem; + } + return sec_mem_elem; +} + +static void BCMFASTPATH +osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem) +{ + sec_mem_elem->dma_handle = 0x0; + sec_mem_elem->va = NULL; + + if (sec_mem_elem->size == 512) { + sec_mem_elem->next = osh->sec_list_512; + osh->sec_list_512 = sec_mem_elem; + } + else if (sec_mem_elem->size == 2048) { + sec_mem_elem->next = osh->sec_list_2048; + osh->sec_list_2048 = sec_mem_elem; + } + else if (sec_mem_elem->size == 4096) { + sec_mem_elem->next = osh->sec_list_4096; + osh->sec_list_4096 = sec_mem_elem; + } + else + printf("%s free failed size=%d \n", __FUNCTION__, sec_mem_elem->size); +} + +static sec_mem_elem_t * BCMFASTPATH +osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle) +{ + sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list; + sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list; + + if (sec_mem_elem->dma_handle == dma_handle) { + + ptr_cma_info->sec_alloc_list = sec_mem_elem->next; + + if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) { + ptr_cma_info->sec_alloc_list_tail = NULL; + ASSERT(ptr_cma_info->sec_alloc_list == NULL); + } + + return sec_mem_elem; + } + + while (sec_mem_elem != NULL) { + + if (sec_mem_elem->dma_handle == dma_handle) { + + sec_prv_elem->next = sec_mem_elem->next; + if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) + ptr_cma_info->sec_alloc_list_tail = sec_prv_elem; + + return sec_mem_elem; + } + sec_prv_elem = sec_mem_elem; + sec_mem_elem = sec_mem_elem->next; + } + return NULL; +} + +static sec_mem_elem_t * +osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info) +{ + sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list; + + if (sec_mem_elem) { + + ptr_cma_info->sec_alloc_list = sec_mem_elem->next; + + if (ptr_cma_info->sec_alloc_list == NULL) + ptr_cma_info->sec_alloc_list_tail = NULL; + + return sec_mem_elem; + + } else + return NULL; +} + +static void * BCMFASTPATH +osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info) +{ + return ptr_cma_info->sec_alloc_list_tail; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info) +{ + sec_mem_elem_t *sec_mem_elem; + struct page *pa_cma_page; + uint loffset; + void *vaorig = va + size; + dma_addr_t dma_handle = 0x0; + /* packet will be the one added with osl_sec_dma_map() just before this call */ + + sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info); + + if (sec_mem_elem && sec_mem_elem->va == vaorig) { + + pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); + loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); + + dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + + } else { + printf("%s: error orig va not found va = 0x%p \n", + __FUNCTION__, vaorig); + } + return dma_handle; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset) +{ + + sec_mem_elem_t *sec_mem_elem; + struct page *pa_cma_page; + void *pa_cma_kmap_va = NULL; + int *fragva; + uint buflen = 0; + struct sk_buff *skb; + dma_addr_t dma_handle = 0x0; + uint loffset; + int i = 0; + + sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset); + + if (sec_mem_elem == NULL) { + printk("linux_osl.c: osl_sec_dma_map - cma allocation failed\n"); + return 0; + } + sec_mem_elem->va = va; + sec_mem_elem->direction = direction; + pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); + + loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); + /* pa_cma_kmap_va = kmap_atomic(pa_cma_page); + * pa_cma_kmap_va += loffset; + */ + + pa_cma_kmap_va = sec_mem_elem->vac; + + if (direction == DMA_TX) { + + if (p == NULL) { + + memcpy(pa_cma_kmap_va+offset, va, size); + buflen = size; + } + else { + for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) { + if (skb_is_nonlinear(skb)) { + + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + fragva = kmap_atomic(skb_frag_page(f)); + memcpy((pa_cma_kmap_va+offset+buflen), + (fragva + f->page_offset), skb_frag_size(f)); + kunmap_atomic(fragva); + buflen += skb_frag_size(f); + } + } + else { + memcpy((pa_cma_kmap_va+offset+buflen), skb->data, skb->len); + buflen += skb->len; + } + } + + } + if (dmah) { + dmah->nsegs = 1; + dmah->origsize = buflen; + } + } + + else if (direction == DMA_RX) + { + buflen = size; + if ((p != NULL) && (dmah != NULL)) { + dmah->nsegs = 1; + dmah->origsize = buflen; + } + } + if (direction == DMA_RX || direction == DMA_TX) { + + dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset+offset, buflen, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + + } + if (dmah) { + dmah->segs[0].addr = dma_handle; + dmah->segs[0].length = buflen; + } + sec_mem_elem->dma_handle = dma_handle; + /* kunmap_atomic(pa_cma_kmap_va-loffset); */ + return dma_handle; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map) +{ + + struct page *pa_cma_page; + phys_addr_t pa_cma; + dma_addr_t dma_handle = 0x0; + uint loffset; + + pa_cma = (phys_addr_t)(va - osh->contig_delta_va_pa); + pa_cma_page = phys_to_page(pa_cma); + loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1)); + + dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + + return dma_handle; +} + +void BCMFASTPATH +osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction, +void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset) +{ + sec_mem_elem_t *sec_mem_elem; + struct page *pa_cma_page; + void *pa_cma_kmap_va = NULL; + uint buflen = 0; + dma_addr_t pa_cma; + void *va; + uint loffset = 0; + int read_count = 0; + BCM_REFERENCE(buflen); + BCM_REFERENCE(read_count); + + sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle); + if (sec_mem_elem == NULL) { + printf("%s sec_mem_elem is NULL and dma_handle =0x%lx and dir=%d\n", + __FUNCTION__, (ulong)dma_handle, direction); + return; + } + + va = sec_mem_elem->va; + va -= offset; + pa_cma = sec_mem_elem->pa_cma; + + pa_cma_page = phys_to_page(pa_cma); + loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); + + if (direction == DMA_RX) { + + if (p == NULL) { + + /* pa_cma_kmap_va = kmap_atomic(pa_cma_page); + * pa_cma_kmap_va += loffset; + */ + + pa_cma_kmap_va = sec_mem_elem->vac; + + dma_unmap_page(osh->cma->dev, pa_cma, size, DMA_FROM_DEVICE); + memcpy(va, pa_cma_kmap_va, size); + /* kunmap_atomic(pa_cma_kmap_va); */ + } + } else { + dma_unmap_page(osh->cma->dev, pa_cma, size+offset, DMA_TO_DEVICE); + } + + osl_sec_dma_free_mem_elem(osh, sec_mem_elem); +} + +void +osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info) +{ + + sec_mem_elem_t *sec_mem_elem; + + sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info); + + while (sec_mem_elem != NULL) { + + dma_unmap_page(osh->cma->dev, sec_mem_elem->pa_cma, sec_mem_elem->size, + sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + osl_sec_dma_free_mem_elem(osh, sec_mem_elem); + + sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info); + } +} + +static void +osl_sec_dma_init_consistent(osl_t *osh) +{ + int i; + void *temp_va = osh->contig_base_alloc_coherent_va; + phys_addr_t temp_pa = osh->contig_base_alloc_coherent; + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + osh->sec_cma_coherent[i].avail = TRUE; + osh->sec_cma_coherent[i].va = temp_va; + osh->sec_cma_coherent[i].pa = temp_pa; + temp_va += SEC_CMA_COHERENT_BLK; + temp_pa += SEC_CMA_COHERENT_BLK; + } +} + +static void * +osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap) +{ + + void *temp_va = NULL; + ulong temp_pa = 0; + int i; + + if (size > SEC_CMA_COHERENT_BLK) { + printf("%s unsupported size\n", __FUNCTION__); + return NULL; + } + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + if (osh->sec_cma_coherent[i].avail == TRUE) { + temp_va = osh->sec_cma_coherent[i].va; + temp_pa = osh->sec_cma_coherent[i].pa; + osh->sec_cma_coherent[i].avail = FALSE; + break; + } + } + + if (i == SEC_CMA_COHERENT_MAX) + printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__, + temp_va, (ulong)temp_pa, size); + + *pap = (unsigned long)temp_pa; + return temp_va; +} + +static void +osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) +{ + int i = 0; + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + if (osh->sec_cma_coherent[i].va == va) { + osh->sec_cma_coherent[i].avail = TRUE; + break; + } + } + if (i == SEC_CMA_COHERENT_MAX) + printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__, + va, (ulong)pa, size); +} + +#endif /* BCM_SECURE_DMA */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER) +#include +#include +void +osl_pkt_orphan_partial(struct sk_buff *skb) +{ + uint32 fraction; + static void *p_tcp_wfree = NULL; + + if (!skb->destructor || skb->destructor == sock_wfree) + return; + + if (unlikely(!p_tcp_wfree)) { + char sym[KSYM_SYMBOL_LEN]; + sprint_symbol(sym, (unsigned long)skb->destructor); + sym[9] = 0; + if (!strcmp(sym, "tcp_wfree")) + p_tcp_wfree = skb->destructor; + else + return; + } + + if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk)) + return; + + /* abstract a certain portion of skb truesize from the socket + * sk_wmem_alloc to allow more skb can be allocated for this + * socket for better cusion meeting WiFi device requirement + */ + fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER; + skb->truesize -= fraction; + atomic_sub(fraction, &skb->sk->sk_wmem_alloc); +} +#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */ diff --git a/drivers/net/wireless/bcmdhd/pcie_core.c b/drivers/net/wireless/bcmdhd/pcie_core.c new file mode 100644 index 000000000000..c36bc62ecdb4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/pcie_core.c @@ -0,0 +1,115 @@ +/** @file pcie_core.c + * + * Contains PCIe related functions that are shared between different driver models (e.g. firmware + * builds, DHD builds, BMAC builds), in order to avoid code duplication. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcie_core.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie_core.h" + +/* local prototypes */ + +/* local variables */ + +/* function definitions */ + +#ifdef BCMDRIVER + +void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) +{ + uint32 val, i, lsc; + uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR, + PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L, + PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA, + PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL, + PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG, + PCIECFGREG_REG_BAR3_CONFIG}; + sbpcieregs_t *pcie = NULL; + uint32 origidx = si_coreidx(sih); + + /* Switch to PCIE2 core */ + pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); + BCM_REFERENCE(pcie); + ASSERT(pcie != NULL); + + /* Disable/restore ASPM Control to protect the watchdog reset */ + W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); + lsc = R_REG(osh, &sbpcieregs->configdata); + val = lsc & (~PCIE_ASPM_ENAB); + W_REG(osh, &sbpcieregs->configdata, val); + + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4); + OSL_DELAY(100000); + + W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); + W_REG(osh, &sbpcieregs->configdata, lsc); + + if (sih->buscorerev <= 13) { + /* Write configuration registers back to the shadow registers + * cause shadow registers are cleared out after watchdog reset. + */ + for (i = 0; i < ARRAYSIZE(cfg_offset); i++) { + W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]); + val = R_REG(osh, &sbpcieregs->configdata); + W_REG(osh, &sbpcieregs->configdata, val); + } + } + si_setcoreidx(sih, origidx); +} + + +/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled + * by the L12 state from MAC to save power by putting the + * SerDes analog in IDDQ mode + */ +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) +{ + sbpcieregs_t *pcie = NULL; + uint crwlpciegen2_117_disable = 0; + uint32 origidx = si_coreidx(sih); + + crwlpciegen2_117_disable = PCIE_PipeIddqDisable0 | PCIE_PipeIddqDisable1; + /* Switch to PCIE2 core */ + pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); + BCM_REFERENCE(pcie); + ASSERT(pcie != NULL); + + OR_REG(osh, &sbpcieregs->control, + crwlpciegen2_117_disable); + + si_setcoreidx(sih, origidx); +} +#endif /* BCMDRIVER */ diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c new file mode 100644 index 000000000000..0804ef455135 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/sbutils.c @@ -0,0 +1,1108 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbutils.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" + + +/* local prototypes */ +static uint _sb_coreidx(si_info_t *sii, uint32 sba); +static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, + uint ncores); +static uint32 _sb_coresba(si_info_t *sii); +static void *_sb_setcoreidx(si_info_t *sii, uint coreidx); +#define SET_SBREG(sii, r, mask, val) \ + W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val))) +#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF) + +/* sonicsrev */ +#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT) +#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT) + +#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr)) +#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v)) +#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v))) +#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v))) + +static uint32 +sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint8 tmp; + uint32 val, intr_val = 0; + + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + val = R_REG(sii->osh, sbr); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } + + return (val); +} + +static void +sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint8 tmp; + volatile uint32 dummy; + uint32 intr_val = 0; + + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) { + dummy = R_REG(sii->osh, sbr); + BCM_REFERENCE(dummy); + W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); + dummy = R_REG(sii->osh, sbr); + BCM_REFERENCE(dummy); + W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); + } else + W_REG(sii->osh, sbr, v); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } +} + +uint +sb_coreid(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT); +} + +uint +sb_intflag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + void *corereg; + sbconfig_t *sb; + uint origidx, intflag, intr_val = 0; + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + corereg = si_setcore(sih, CC_CORE_ID, 0); + ASSERT(corereg != NULL); + sb = REGS2SB(corereg); + intflag = R_SBREG(sii, &sb->sbflagst); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + + return intflag; +} + +uint +sb_flag(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK; +} + +void +sb_setint(si_t *sih, int siflag) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 vec; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + if (siflag == -1) + vec = 0; + else + vec = 1 << siflag; + W_SBREG(sii, &sb->sbintvec, vec); +} + +/* return core index of the core with address 'sba' */ +static uint +_sb_coreidx(si_info_t *sii, uint32 sba) +{ + uint i; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + for (i = 0; i < sii->numcores; i ++) + if (sba == cores_info->coresba[i]) + return i; + return BADIDX; +} + +/* return core address of the current core */ +static uint32 +_sb_coresba(si_info_t *sii) +{ + uint32 sbaddr; + + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: { + sbconfig_t *sb = REGS2SB(sii->curmap); + sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0)); + break; + } + + case PCI_BUS: + sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + break; + + case PCMCIA_BUS: { + uint8 tmp = 0; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + sbaddr = (uint32)tmp << 12; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + sbaddr |= (uint32)tmp << 16; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + sbaddr |= (uint32)tmp << 24; + break; + } + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + sbaddr = (uint32)(uintptr)sii->curmap; + break; +#endif + + + default: + sbaddr = BADCOREADDR; + break; + } + + return sbaddr; +} + +uint +sb_corevendor(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT); +} + +uint +sb_corerev(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + uint sbidh; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + sbidh = R_SBREG(sii, &sb->sbidhigh); + + return (SBCOREREV(sbidh)); +} + +/* set core-specific control flags */ +void +sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); +} + +/* set/clear core-specific control flags */ +uint32 +sb_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); + } + + /* return the new value + * for write operation, the following readback ensures the completion of write opration. + */ + return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT); +} + +/* set/clear core-specific status flags */ +uint32 +sb_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) | + (val << SBTMH_SISF_SHIFT); + W_SBREG(sii, &sb->sbtmstatehigh, w); + } + + /* return the new value */ + return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT); +} + +bool +sb_iscoreup(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbtmstatelow) & + (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) == + (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fidleing with interrupts or core switches are needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + if (regoff >= SBCONFIGOFF) { + w = (R_SBREG(sii, r) & ~mask) | val; + W_SBREG(sii, r, w); + } else { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + } + + /* readback */ + if (regoff >= SBCONFIGOFF) + w = R_SBREG(sii, r); + else { + if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) && + (coreidx == SI_CC_IDX) && + (regoff == OFFSETOF(chipcregs_t, watchdog))) { + w = val; + } else + w = R_REG(sii->osh, r); + } + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + sb_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +uint32 * +sb_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + uint32 *r = NULL; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) + return 0; + + return (r); +} + +/* Scan the enumeration space to find all cores starting from the given + * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba' + * is the default core address at chip POR time and 'regs' is the virtual + * address that the default core is mapped at. 'ncores' is the number of + * cores expected on bus 'sbba'. It returns the total number of cores + * starting from bus 'sbba', inclusive. + */ +#define SB_MAXBUSES 2 +static uint +_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores) +{ + uint next; + uint ncc = 0; + uint i; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (bus >= SB_MAXBUSES) { + SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus)); + return 0; + } + SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores)); + + /* Scan all cores on the bus starting from core 0. + * Core addresses must be contiguous on each bus. + */ + for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) { + cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE); + + /* keep and reuse the initial register mapping */ + if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) { + SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next)); + cores_info->regs[next] = regs; + } + + /* change core to 'next' and read its coreid */ + sii->curmap = _sb_setcoreidx(sii, next); + sii->curidx = next; + + cores_info->coreid[next] = sb_coreid(&sii->pub); + + /* core specific processing... */ + /* chipc provides # cores */ + if (cores_info->coreid[next] == CC_CORE_ID) { + chipcregs_t *cc = (chipcregs_t *)sii->curmap; + uint32 ccrev = sb_corerev(&sii->pub); + + /* determine numcores - this is the total # cores in the chip */ + if (((ccrev == 4) || (ccrev >= 6))) { + ASSERT(cc); + numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >> + CID_CC_SHIFT; + } else { + /* Older chips */ + uint chip = CHIPID(sii->pub.chip); + + if (chip == BCM4306_CHIP_ID) /* < 4306c0 */ + numcores = 6; + else if (chip == BCM4704_CHIP_ID) + numcores = 9; + else if (chip == BCM5365_CHIP_ID) + numcores = 7; + else { + SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", + chip)); + ASSERT(0); + numcores = 1; + } + } + SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores, + sii->pub.issim ? "QT" : "")); + } + /* scan bridged SB(s) and add results to the end of the list */ + else if (cores_info->coreid[next] == OCP_CORE_ID) { + sbconfig_t *sb = REGS2SB(sii->curmap); + uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1); + uint nsbcc; + + sii->numcores = next + 1; + + if ((nsbba & 0xfff00000) != SI_ENUM_BASE) + continue; + nsbba &= 0xfffff000; + if (_sb_coreidx(sii, nsbba) != BADIDX) + continue; + + nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16; + nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc); + if (sbba == SI_ENUM_BASE) + numcores -= nsbcc; + ncc += nsbcc; + } + } + + SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba)); + + sii->numcores = i + ncc; + return sii->numcores; +} + +/* scan the sb enumerated space to identify all cores */ +void +sb_scan(si_t *sih, void *regs, uint devid) +{ + uint32 origsba; + sbconfig_t *sb; + si_info_t *sii = SI_INFO(sih); + + sb = REGS2SB(sii->curmap); + + sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT; + + /* Save the current core info and validate it later till we know + * for sure what is good and what is bad. + */ + origsba = _sb_coresba(sii); + + /* scan all SB(s) starting from SI_ENUM_BASE */ + sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1); +} + +/* + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +void * +sb_setcoreidx(si_t *sih, uint coreidx) +{ + si_info_t *sii = SI_INFO(sih); + + if (coreidx >= sii->numcores) + return (NULL); + + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + sii->curmap = _sb_setcoreidx(sii, coreidx); + sii->curidx = coreidx; + + return (sii->curmap); +} + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static void * +_sb_setcoreidx(si_info_t *sii, uint coreidx) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 sbaddr = cores_info->coresba[coreidx]; + void *regs; + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + regs = cores_info->regs[coreidx]; + break; + + case PCI_BUS: + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr); + regs = sii->curmap; + break; + + case PCMCIA_BUS: { + uint8 tmp = (sbaddr >> 12) & 0x0f; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + tmp = (sbaddr >> 16) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + tmp = (sbaddr >> 24) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + regs = sii->curmap; + break; + } +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = (void *)(uintptr)sbaddr; + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + regs = cores_info->regs[coreidx]; + break; +#endif /* BCMSDIO */ + + + default: + ASSERT(0); + regs = NULL; + break; + } + + return regs; +} + +/* Return the address of sbadmatch0/1/2/3 register */ +static volatile uint32 * +sb_admatch(si_info_t *sii, uint asidx) +{ + sbconfig_t *sb; + volatile uint32 *addrm; + + sb = REGS2SB(sii->curmap); + + switch (asidx) { + case 0: + addrm = &sb->sbadmatch0; + break; + + case 1: + addrm = &sb->sbadmatch1; + break; + + case 2: + addrm = &sb->sbadmatch2; + break; + + case 3: + addrm = &sb->sbadmatch3; + break; + + default: + SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx)); + return 0; + } + + return (addrm); +} + +/* Return the number of address spaces in current core */ +int +sb_numaddrspaces(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + /* + 1 because of enumeration space */ + return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1; +} + +/* Return the address of the nth address space in the current core */ +uint32 +sb_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + +/* Return the size of the nth address space in the current core */ +uint32 +sb_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + + +/* do buffered registers update */ +void +sb_commit(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + + origidx = sii->curidx; + ASSERT(GOODIDX(origidx)); + + INTR_OFF(sii, intr_val); + + /* switch over to chipcommon core if there is one, else use pci */ + if (sii->pub.ccrev != NOREV) { + chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(ccregs != NULL); + + /* do the buffer registers update */ + W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT); + W_REG(sii->osh, &ccregs->broadcastdata, 0x0); + } else + ASSERT(0); + + /* restore core index */ + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); +} + +void +sb_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii; + volatile uint32 dummy; + sbconfig_t *sb; + + sii = SI_INFO(sih); + + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* if core is already in reset, just return */ + if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET) + return; + + /* if clocks are not enabled, put into reset and return */ + if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0) + goto disable; + + /* set target reject and spin until busy is clear (preserve core-specific bits) */ + OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000); + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY) + SI_ERROR(("%s: target state still busy\n", __FUNCTION__)); + + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) { + OR_SBREG(sii, &sb->sbimstate, SBIM_RJ); + dummy = R_SBREG(sii, &sb->sbimstate); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000); + } + + /* set reset and reject while enabling the clocks */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_REJ | SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(10); + + /* don't forget to clear the initiator reject bit */ + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) + AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ); + +disable: + /* leave reset and reject asserted */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET)); + OSL_DELAY(1); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +void +sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii; + sbconfig_t *sb; + volatile uint32 dummy; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* + * Must do the disable sequence first to work for arbitrary current core state. + */ + sb_core_disable(sih, (bits | resetbits)); + + /* + * Now do the initialization sequence. + */ + + /* set reset while enabling the clock and forcing them on throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) { + W_SBREG(sii, &sb->sbtmstatehigh, 0); + } + if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) { + AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO)); + } + + /* clear reset and allow it to propagate throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + /* leave clock enabled */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); +} + +/* + * Set the initiator timeout for the "master core". + * The master core is defined to be the core in control + * of the chip and so it issues accesses to non-memory + * locations (Because of dma *any* core can access memeory). + * + * The routine uses the bus to decide who is the master: + * SI_BUS => mips + * JTAG_BUS => chipc + * PCI_BUS => pci or pcie + * PCMCIA_BUS => pcmcia + * SDIO_BUS => pcmcia + * + * This routine exists so callers can disable initiator + * timeouts so accesses to very slow devices like otp + * won't cause an abort. The routine allows arbitrary + * settings of the service and request timeouts, though. + * + * Returns the timeout state before changing it or -1 + * on error. + */ + +#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK) + +uint32 +sb_set_initiator_to(si_t *sih, uint32 to, uint idx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + uint32 tmp, ret = 0xffffffff; + sbconfig_t *sb; + + + if ((to & ~TO_MASK) != 0) + return ret; + + /* Figure out the master core */ + if (idx == BADIDX) { + switch (BUSTYPE(sii->pub.bustype)) { + case PCI_BUS: + idx = sii->pub.buscoreidx; + break; + case JTAG_BUS: + idx = SI_CC_IDX; + break; + case PCMCIA_BUS: +#ifdef BCMSDIO + case SDIO_BUS: +#endif + idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0); + break; + case SI_BUS: + idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0); + break; + default: + ASSERT(0); + } + if (idx == BADIDX) + return ret; + } + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + sb = REGS2SB(sb_setcoreidx(sih, idx)); + + tmp = R_SBREG(sii, &sb->sbimconfiglow); + ret = tmp & TO_MASK; + W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to); + + sb_commit(sih); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + return ret; +} + +uint32 +sb_base(uint32 admatch) +{ + uint32 base; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + base = 0; + + if (type == 0) { + base = admatch & SBAM_BASE0_MASK; + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE1_MASK; + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE2_MASK; + } + + return (base); +} + +uint32 +sb_size(uint32 admatch) +{ + uint32 size; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + size = 0; + + if (type == 0) { + size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1); + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1); + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1); + } + + return (size); +} + +#if defined(BCMDBG_PHYDUMP) +/* print interesting sbconfig registers */ +void +sb_dumpregs(si_t *sih, struct bcmstrbuf *b) +{ + sbconfig_t *sb; + uint origidx, i, intr_val = 0; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + origidx = sii->curidx; + + INTR_OFF(sii, intr_val); + + for (i = 0; i < sii->numcores; i++) { + sb = REGS2SB(sb_setcoreidx(sih, i)); + + bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]); + + if (sii->pub.socirev > SONICS_2_2) + bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n", + sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0), + sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0)); + + bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x " + "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n", + R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh), + R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate), + R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh)); + } + + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); +} +#endif diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c new file mode 100644 index 000000000000..6ec96d0fab2a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/siutils.c @@ -0,0 +1,3245 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils.c 552034 2015-04-24 19:00:35Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef BCMPCIEDEV +#include +#endif /* BCMPCIEDEV */ +#include +#include +#include +#include +#ifdef BCMSDIO +#include +#include +#include +#include +#include +#include +#endif /* BCMSDIO */ +#include + +#ifdef BCM_SDRBL +#include +#endif /* BCM_SDRBL */ +#ifdef HNDGCI +#include +#endif /* HNDGCI */ + +#include "siutils_priv.h" + +/** + * A set of PMU registers is clocked in the ILP domain, which has an implication on register write + * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb + * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set. + */ +#define PMUREGS_ILP_SENSITIVE(regoff) \ + ((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \ + (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \ + (regoff) == OFFSETOF(pmuregs_t, res_req_timer)) + +#define CHIPCREGS_ILP_SENSITIVE(regoff) \ + ((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \ + (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \ + (regoff) == OFFSETOF(chipcregs_t, res_req_timer)) + +/* local prototypes */ +static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz); +static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh); +static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, void *regs); + + +static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff); + +#ifdef BCMLTECOEX +static void si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio, + uint8 gpioctl_mask, uint8 gpioctl_val); +#endif /* BCMLTECOEX */ + + +/* global variable to indicate reservation/release of gpio's */ +static uint32 si_gpioreservation = 0; + +/* global flag to prevent shared resources from being initialized multiple times in si_attach() */ +#ifdef SR_DEBUG +static const uint32 si_power_island_test_array[] = { + 0x0000, 0x0001, 0x0010, 0x0011, + 0x0100, 0x0101, 0x0110, 0x0111, + 0x1000, 0x1001, 0x1010, 0x1011, + 0x1100, 0x1101, 0x1110, 0x1111 +}; +#endif /* SR_DEBUG */ + +int do_4360_pcie2_war = 0; + +/* global kernel resource */ +static si_info_t ksii; +static si_cores_info_t ksii_cores_info; + +/** + * Allocate an si handle. This function may be called multiple times. + * + * devid - pci device id (used to determine chip#) + * osh - opaque OS handle + * regs - virtual address of initial core registers + * bustype - pci/pcmcia/sb/sdio/etc + * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this + * function set 'vars' to NULL, making dereferencing of this parameter undesired. + * varsz - pointer to int to return the size of the vars + */ +si_t * +si_attach(uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + si_info_t *sii; + si_cores_info_t *cores_info; + /* alloc si_info_t */ + if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) { + SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); + return (NULL); + } + + /* alloc si_cores_info_t */ + if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) { + SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); + MFREE(osh, sii, sizeof(si_info_t)); + return (NULL); + } + sii->cores_info = cores_info; + + if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) { + MFREE(osh, sii, sizeof(si_info_t)); + MFREE(osh, cores_info, sizeof(si_cores_info_t)); + return (NULL); + } + sii->vars = vars ? *vars : NULL; + sii->varsz = varsz ? *varsz : 0; + + return (si_t *)sii; +} + + +static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */ + +/** generic kernel variant of si_attach() */ +si_t * +si_kattach(osl_t *osh) +{ + static bool ksii_attached = FALSE; + si_cores_info_t *cores_info; + + if (!ksii_attached) { + void *regs = NULL; + regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); + cores_info = (si_cores_info_t *)&ksii_cores_info; + ksii.cores_info = cores_info; + + ASSERT(osh); + if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs, + SI_BUS, NULL, + osh != SI_OSH ? &(ksii.vars) : NULL, + osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) { + SI_ERROR(("si_kattach: si_doattach failed\n")); + REG_UNMAP(regs); + return NULL; + } + REG_UNMAP(regs); + + /* save ticks normalized to ms for si_watchdog_ms() */ + if (PMUCTL_ENAB(&ksii.pub)) { + /* based on 32KHz ILP clock */ + wd_msticks = 32; + } else { + wd_msticks = ALP_CLOCK / 1000; + } + + ksii_attached = TRUE; + SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n", + ksii.pub.ccrev, wd_msticks)); + } + + return &ksii.pub; +} + + +static bool +si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh) +{ + /* need to set memseg flag for CF card first before any sb registers access */ + if (BUSTYPE(bustype) == PCMCIA_BUS) + sii->memseg = TRUE; + + +#if defined(BCMSDIO) + if (BUSTYPE(bustype) == SDIO_BUS) { + int err; + uint8 clkset; + + /* Try forcing SDIO core to do ALPAvail request only */ + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + if (!err) { + uint8 clkval; + + /* If register supported, wait for ALPAvail and then force ALP */ + clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL); + if ((clkval & ~SBSDIO_AVBITS) == clkset) { + SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + if (!SBSDIO_ALPAV(clkval)) { + SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n", + clkval)); + return FALSE; + } + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + clkset, &err); + OSL_DELAY(65); + } + } + + /* Also, disable the extra SDIO pull-ups */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + } + +#endif /* BCMSDIO && BCMDONGLEHOST */ + + return TRUE; +} + +static bool +si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, void *regs) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + bool pci, pcie, pcie_gen2 = FALSE; + uint i; + uint pciidx, pcieidx, pcirev, pcierev; + + /* first, enable backplane timeouts */ + if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) + ai_enable_backplane_timeouts(&sii->pub); + + cc = si_setcoreidx(&sii->pub, SI_CC_IDX); + ASSERT((uintptr)cc); + + /* get chipcommon rev */ + sii->pub.ccrev = (int)si_corerev(&sii->pub); + + /* get chipcommon chipstatus */ + if (sii->pub.ccrev >= 11) + sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus); + + /* get chipcommon capabilites */ + sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities); + /* get chipcommon extended capabilities */ + + if (sii->pub.ccrev >= 35) + sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext); + + /* get pmu rev and caps */ + if (sii->pub.cccaps & CC_CAP_PMU) { + if (AOB_ENAB(&sii->pub)) { + uint pmucoreidx; + pmuregs_t *pmu; + pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0); + pmu = si_setcoreidx(&sii->pub, pmucoreidx); + sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities); + si_setcoreidx(&sii->pub, SI_CC_IDX); + } else + sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities); + + sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; + } + + SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n", + sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, + sii->pub.pmucaps)); + + /* figure out bus/orignal core idx */ + sii->pub.buscoretype = NODEV_CORE_ID; + sii->pub.buscorerev = (uint)NOREV; + sii->pub.buscoreidx = BADIDX; + + pci = pcie = FALSE; + pcirev = pcierev = (uint)NOREV; + pciidx = pcieidx = BADIDX; + + for (i = 0; i < sii->numcores; i++) { + uint cid, crev; + + si_setcoreidx(&sii->pub, i); + cid = si_coreid(&sii->pub); + crev = si_corerev(&sii->pub); + + /* Display cores found */ + SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n", + i, cid, crev, cores_info->coresba[i], cores_info->regs[i])); + + if (BUSTYPE(bustype) == SI_BUS) { + /* now look at the chipstatus register to figure the pacakge */ + /* for SDIO but downloaded on PCIE dev */ + if (cid == PCIE2_CORE_ID) { + if (BCM43602_CHIP(sii->pub.chip) || + (CHIPID(sii->pub.chip) == BCM4365_CHIP_ID) || + (CHIPID(sii->pub.chip) == BCM4366_CHIP_ID) || + ((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID || + CHIPID(sii->pub.chip) == BCM43454_CHIP_ID) && + CST4345_CHIPMODE_PCIE(sii->pub.chipst))) { + pcieidx = i; + pcierev = crev; + pcie = TRUE; + pcie_gen2 = TRUE; + } + } + + } + else if (BUSTYPE(bustype) == PCI_BUS) { + if (cid == PCI_CORE_ID) { + pciidx = i; + pcirev = crev; + pci = TRUE; + } else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) { + pcieidx = i; + pcierev = crev; + pcie = TRUE; + if (cid == PCIE2_CORE_ID) + pcie_gen2 = TRUE; + } + } else if ((BUSTYPE(bustype) == PCMCIA_BUS) && + (cid == PCMCIA_CORE_ID)) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } +#ifdef BCMSDIO + else if (((BUSTYPE(bustype) == SDIO_BUS) || + (BUSTYPE(bustype) == SPI_BUS)) && + ((cid == PCMCIA_CORE_ID) || + (cid == SDIOD_CORE_ID))) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } +#endif /* BCMSDIO */ + + /* find the core idx before entering this func. */ + if ((savewin && (savewin == cores_info->coresba[i])) || + (regs == cores_info->regs[i])) + *origidx = i; + } + + +#if defined(PCIE_FULL_DONGLE) + if (pcie) { + if (pcie_gen2) + sii->pub.buscoretype = PCIE2_CORE_ID; + else + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; + } + BCM_REFERENCE(pci); + BCM_REFERENCE(pcirev); + BCM_REFERENCE(pciidx); +#else + if (pci) { + sii->pub.buscoretype = PCI_CORE_ID; + sii->pub.buscorerev = pcirev; + sii->pub.buscoreidx = pciidx; + } else if (pcie) { + if (pcie_gen2) + sii->pub.buscoretype = PCIE2_CORE_ID; + else + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; + } +#endif /* defined(PCIE_FULL_DONGLE) */ + + SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, + sii->pub.buscorerev)); + + if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) && + (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3)) + OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL); + + +#if defined(BCMSDIO) + /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was + * already running. + */ + if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) { + if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) || + si_setcore(&sii->pub, ARMCM3_CORE_ID, 0)) + si_core_disable(&sii->pub, 0); + } +#endif /* BCMSDIO && BCMDONGLEHOST */ + + /* return to the original core */ + si_setcoreidx(&sii->pub, *origidx); + + return TRUE; +} + + + + +uint16 +si_chipid(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + return (sii->chipnew) ? sii->chipnew : sih->chip; +} + +static void +si_chipid_fixup(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + ASSERT(sii->chipnew == 0); + switch (sih->chip) { + case BCM43567_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM43570_CHIP_ID; /* chip class */ + break; + case BCM4358_CHIP_ID: + case BCM43566_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM43569_CHIP_ID; /* chip class */ + break; + case BCM4356_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM4354_CHIP_ID; /* chip class */ + break; + default: + break; + } +} + +/** + * Allocate an si handle. This function may be called multiple times. + * + * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this + * function set 'vars' to NULL. + */ +static si_info_t * +si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + struct si_pub *sih = &sii->pub; + uint32 w, savewin; + chipcregs_t *cc; + char *pvars = NULL; + uint origidx; +#if !defined(_CFEZ_) || defined(CFG_WL) +#endif + + ASSERT(GOODREGS(regs)); + + savewin = 0; + + sih->buscoreidx = BADIDX; + + sii->curmap = regs; + sii->sdh = sdh; + sii->osh = osh; + sii->second_bar0win = ~0x0; + + + /* check to see if we are a si core mimic'ing a pci core */ + if ((bustype == PCI_BUS) && + (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) { + SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI " + "devid:0x%x\n", __FUNCTION__, devid)); + bustype = SI_BUS; + } + + /* find Chipcommon address */ + if (bustype == PCI_BUS) { + savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + if (!GOODCOREADDR(savewin, SI_ENUM_BASE)) + savewin = SI_ENUM_BASE; + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE); + if (!regs) + return NULL; + cc = (chipcregs_t *)regs; +#ifdef BCMSDIO + } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) { + cc = (chipcregs_t *)sii->curmap; +#endif + } else { + cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); + } + + sih->bustype = bustype; + if (bustype != BUSTYPE(bustype)) { + SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", + bustype, BUSTYPE(bustype))); + return NULL; + } + + /* bus/core/clk setup for register access */ + if (!si_buscore_prep(sii, bustype, devid, sdh)) { + SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype)); + return NULL; + } + + /* ChipID recognition. + * We assume we can read chipid at offset 0 from the regs arg. + * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon), + * some way of recognizing them needs to be added here. + */ + if (!cc) { + SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__)); + return NULL; + } + w = R_REG(osh, &cc->chipid); + if ((w & 0xfffff) == 148277) w -= 65532; + sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; + /* Might as wll fill in chip id rev & pkg */ + sih->chip = w & CID_ID_MASK; + sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; + sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; + + si_chipid_fixup(sih); + + if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && CHIPREV(sih->chiprev == 0) && + (sih->chippkg != BCM4329_289PIN_PKG_ID)) { + sih->chippkg = BCM4329_182PIN_PKG_ID; + } + sih->issim = IS_SIM(sih->chippkg); + + /* scan for cores */ + if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) { + SI_MSG(("Found chip type SB (0x%08x)\n", w)); + sb_scan(&sii->pub, regs, devid); + } else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) || + (CHIPTYPE(sii->pub.socitype) == SOCI_NAI)) { + if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) + SI_MSG(("Found chip type AI (0x%08x)\n", w)); + else + SI_MSG(("Found chip type NAI (0x%08x)\n", w)); + /* pass chipc address instead of original core base */ + ai_scan(&sii->pub, (void *)(uintptr)cc, devid); + } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) { + SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip)); + /* pass chipc address instead of original core base */ + ub_scan(&sii->pub, (void *)(uintptr)cc, devid); + } else { + SI_ERROR(("Found chip of unknown type (0x%08x)\n", w)); + return NULL; + } + /* no cores found, bail out */ + if (sii->numcores == 0) { + SI_ERROR(("si_doattach: could not find any cores\n")); + return NULL; + } + /* bus/core/clk setup */ + origidx = SI_CC_IDX; + if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) { + SI_ERROR(("si_doattach: si_buscore_setup failed\n")); + goto exit; + } + +#if !defined(_CFEZ_) || defined(CFG_WL) + if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK) + >> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT | + CST4322_SPROM_PRESENT))) { + SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__)); + return NULL; + } + + /* assume current core is CC */ + if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID || + CHIPID(sih->chip) == BCM43235_CHIP_ID || + CHIPID(sih->chip) == BCM43234_CHIP_ID || + CHIPID(sih->chip) == BCM43238_CHIP_ID) && + (CHIPREV(sii->pub.chiprev) <= 2))) { + + if ((cc->chipstatus & CST43236_BP_CLK) != 0) { + uint clkdiv; + clkdiv = R_REG(osh, &cc->clkdiv); + /* otp_clk_div is even number, 120/14 < 9mhz */ + clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT); + W_REG(osh, &cc->clkdiv, clkdiv); + SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv)); + } + OSL_DELAY(10); + } + + if (bustype == PCI_BUS) { + + } +#endif +#ifdef BCM_SDRBL + /* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is + * not turned on, then we want to hold arm in reset. + * Bottomline: In sdrenable case, we allow arm to boot only when protection is + * turned on. + */ + if (CHIP_HOSTIF_PCIE(&(sii->pub))) { + uint32 sflags = si_arm_sflags(&(sii->pub)); + + /* If SDR is enabled but protection is not turned on + * then we want to force arm to WFI. + */ + if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) { + disable_arm_irq(); + while (1) { + hnd_cpu_wait(sih); + } + } + } +#endif /* BCM_SDRBL */ + + pvars = NULL; + BCM_REFERENCE(pvars); + + + + if (sii->pub.ccrev >= 20) { + uint32 gpiopullup = 0, gpiopulldown = 0; + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + + /* 4314/43142 has pin muxing, don't clear gpio bits */ + if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) || + (CHIPID(sih->chip) == BCM43142_CHIP_ID)) { + gpiopullup |= 0x402e0; + gpiopulldown |= 0x20500; + } + + + W_REG(osh, &cc->gpiopullup, gpiopullup); + W_REG(osh, &cc->gpiopulldown, gpiopulldown); + si_setcoreidx(sih, origidx); + } + + + /* clear any previous epidiag-induced target abort */ + ASSERT(!si_taclear(sih, FALSE)); + + +#ifdef BOOTLOADER_CONSOLE_OUTPUT + /* Enable console prints */ + si_muxenab(sii, 3); +#endif + + return (sii); + +exit: + + return NULL; +} + +/** may be called with core in reset */ +void +si_detach(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx; + + + if (BUSTYPE(sih->bustype) == SI_BUS) + for (idx = 0; idx < SI_MAXCORES; idx++) + if (cores_info->regs[idx]) { + REG_UNMAP(cores_info->regs[idx]); + cores_info->regs[idx] = NULL; + } + + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) + if (cores_info != &ksii_cores_info) +#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ + MFREE(sii->osh, cores_info, sizeof(si_cores_info_t)); + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) + if (sii != &ksii) +#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ + MFREE(sii->osh, sii, sizeof(si_info_t)); +} + +void * +si_osh(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->osh; +} + +void +si_setosh(si_t *sih, osl_t *osh) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + if (sii->osh != NULL) { + SI_ERROR(("osh is already set....\n")); + ASSERT(!sii->osh); + } + sii->osh = osh; +} + +/** register driver interrupt disabling and restoring callback functions */ +void +si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + sii->intr_arg = intr_arg; + sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn; + sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn; + sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn; + /* save current core id. when this function called, the current core + * must be the core which provides driver functions(il, et, wl, etc.) + */ + sii->dev_coreid = cores_info->coreid[sii->curidx]; +} + +void +si_deregister_intr_callback(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + sii->intrsoff_fn = NULL; + sii->intrsrestore_fn = NULL; + sii->intrsenabled_fn = NULL; +} + +uint +si_intflag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_intflag(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return R_REG(sii->osh, ((uint32 *)(uintptr) + (sii->oob_router + OOB_STATUSA))); + else { + ASSERT(0); + return 0; + } +} + +uint +si_flag(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_flag(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_flag(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_flag(sih); + else { + ASSERT(0); + return 0; + } +} + +uint +si_flag_alt(si_t *sih) +{ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_flag_alt(sih); + else { + ASSERT(0); + return 0; + } +} + +void +si_setint(si_t *sih, int siflag) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_setint(sih, siflag); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_setint(sih, siflag); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_setint(sih, siflag); + else + ASSERT(0); +} + +uint +si_coreid(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + return cores_info->coreid[sii->curidx]; +} + +uint +si_coreidx(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->curidx; +} + +void * +si_d11_switch_addrbase(si_t *sih, uint coreunit) +{ + return si_setcore(sih, D11_CORE_ID, coreunit); +} + +/** return the core-type instantiation # of the current core */ +uint +si_coreunit(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx; + uint coreid; + uint coreunit; + uint i; + + coreunit = 0; + + idx = sii->curidx; + + ASSERT(GOODREGS(sii->curmap)); + coreid = si_coreid(sih); + + /* count the cores of our type */ + for (i = 0; i < idx; i++) + if (cores_info->coreid[i] == coreid) + coreunit++; + + return (coreunit); +} + +uint +si_corevendor(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corevendor(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corevendor(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corevendor(sih); + else { + ASSERT(0); + return 0; + } +} + +bool +si_backplane64(si_t *sih) +{ + return ((sih->cccaps & CC_CAP_BKPLN64) != 0); +} + +uint +si_corerev(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corerev(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corerev(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corerev(sih); + else { + ASSERT(0); + return 0; + } +} + + +/* return index of coreid or BADIDX if not found */ +uint +si_findcoreidx(si_t *sih, uint coreid, uint coreunit) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint found; + uint i; + + + found = 0; + + for (i = 0; i < sii->numcores; i++) + if (cores_info->coreid[i] == coreid) { + if (found == coreunit) + return (i); + found++; + } + + return (BADIDX); +} + +/** return total coreunit of coreid or zero if not found */ +uint +si_numcoreunits(si_t *sih, uint coreid) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint found = 0; + uint i; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == coreid) { + found++; + } + } + + return found; +} + +/** return total D11 coreunits */ +uint +BCMRAMFN(si_numd11coreunits)(si_t *sih) +{ + uint found = 0; + + found = si_numcoreunits(sih, D11_CORE_ID); + +#if defined(WLRSDB) && defined(WLRSDB_DISABLED) + /* If RSDB functionality is compiled out, + * then ignore any D11 cores beyond the first + * Used in norsdb dongle build variants for rsdb chip. + */ + found = 1; +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ + + return found; +} + +/** return list of found cores */ +uint +si_corelist(si_t *sih, uint coreid[]) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint))); + return (sii->numcores); +} + +/** return current wrapper mapping */ +void * +si_wrapperregs(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + + return (sii->curwrap); +} + +/** return current register mapping */ +void * +si_coreregs(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + + return (sii->curmap); +} + +/** + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +void * +si_setcore(si_t *sih, uint coreid, uint coreunit) +{ + uint idx; + + idx = si_findcoreidx(sih, coreid, coreunit); + if (!GOODIDX(idx)) + return (NULL); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, idx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_setcoreidx(sih, idx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_setcoreidx(sih, idx); + else { + ASSERT(0); + return NULL; + } +} + +void * +si_setcoreidx(si_t *sih, uint coreidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, coreidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_setcoreidx(sih, coreidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_setcoreidx(sih, coreidx); + else { + ASSERT(0); + return NULL; + } +} + +/** Turn off interrupt as required by sb_setcore, before switch core */ +void * +si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) +{ + void *cc; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (SI_FAST(sii)) { + /* Overloading the origidx variable to remember the coreid, + * this works because the core ids cannot be confused with + * core indices. + */ + *origidx = coreid; + if (coreid == CC_CORE_ID) + return (void *)CCREGS_FAST(sii); + else if (coreid == sih->buscoretype) + return (void *)PCIEREGS(sii); + } + INTR_OFF(sii, *intr_val); + *origidx = sii->curidx; + cc = si_setcore(sih, coreid, 0); + ASSERT(cc != NULL); + + return cc; +} + +/* restore coreidx and restore interrupt */ +void +si_restore_core(si_t *sih, uint coreid, uint intr_val) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype))) + return; + + si_setcoreidx(sih, coreid); + INTR_RESTORE(sii, intr_val); +} + +int +si_numaddrspaces(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_numaddrspaces(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_numaddrspaces(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_numaddrspaces(sih); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_addrspace(si_t *sih, uint asidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspace(sih, asidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_addrspace(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_addrspace(sih, asidx); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_addrspacesize(si_t *sih, uint asidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspacesize(sih, asidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_addrspacesize(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_addrspacesize(sih, asidx); + else { + ASSERT(0); + return 0; + } +} + +void +si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) +{ + /* Only supported for SOCI_AI */ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_coreaddrspaceX(sih, asidx, addr, size); + else + *size = 0; +} + +uint32 +si_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_cflags(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_core_cflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_core_cflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +void +si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_cflags_wo(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_cflags_wo(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_cflags_wo(sih, mask, val); + else + ASSERT(0); +} + +uint32 +si_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_sflags(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_core_sflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_core_sflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +bool +si_iscoreup(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_iscoreup(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_iscoreup(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_iscoreup(sih); + else { + ASSERT(0); + return FALSE; + } +} + +uint +si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + /* only for AI back plane chips */ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return (ai_wrap_reg(sih, offset, mask, val)); + return 0; +} +/* si_backplane_access is used to read full backplane address from host for PCIE FD + * it uses secondary bar-0 window which lies at an offset of 16K from primary bar-0 + * Provides support for read/write of 1/2/4 bytes of backplane address + * Can be used to read/write + * 1. core regs + * 2. Wrapper regs + * 3. memory + * 4. BT area + * For accessing any 32 bit backplane address, [31 : 12] of backplane should be given in "region" + * [11 : 0] should be the "regoff" + * for reading 4 bytes from reg 0x200 of d11 core use it like below + * : si_backplane_access(sih, 0x18001000, 0x200, 4, 0, TRUE) + */ +static int si_backplane_addr_sane(uint addr, uint size) +{ + int bcmerror = BCME_OK; + + /* For 2 byte access, address has to be 2 byte aligned */ + if (size == 2) { + if (addr & 0x1) { + bcmerror = BCME_ERROR; + } + } + /* For 4 byte access, address has to be 4 byte aligned */ + if (size == 4) { + if (addr & 0x3) { + bcmerror = BCME_ERROR; + } + } + + return bcmerror; +} +uint +si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read) +{ + uint32 *r = NULL; + uint32 region = 0; + si_info_t *sii = SI_INFO(sih); + + /* Valid only for pcie bus */ + if (BUSTYPE(sih->bustype) != PCI_BUS) { + SI_ERROR(("Valid only for pcie bus \n")); + return BCME_ERROR; + } + + /* Split adrr into region and address offset */ + region = (addr & (0xFFFFF << 12)); + addr = addr & 0xFFF; + + /* check for address and size sanity */ + if (si_backplane_addr_sane(addr, size) != BCME_OK) + return BCME_ERROR; + + /* Update window if required */ + if (sii->second_bar0win != region) { + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region); + sii->second_bar0win = region; + } + + /* Estimate effective address + * sii->curmap : bar-0 virtual address + * PCI_SECOND_BAR0_OFFSET : secondar bar-0 offset + * regoff : actual reg offset + */ + r = (uint32 *)((char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr); + + SI_VMSG(("si curmap %p region %x regaddr %x effective addr %p READ %d\n", + (char*)sii->curmap, region, addr, r, read)); + + switch (size) { + case sizeof(uint8) : + if (read) + *val = R_REG(sii->osh, (uint8*)r); + else + W_REG(sii->osh, (uint8*)r, *val); + break; + case sizeof(uint16) : + if (read) + *val = R_REG(sii->osh, (uint16*)r); + else + W_REG(sii->osh, (uint16*)r, *val); + break; + case sizeof(uint32) : + if (read) + *val = R_REG(sii->osh, (uint32*)r); + else + W_REG(sii->osh, (uint32*)r, *val); + break; + + default : + SI_ERROR(("Invalid size %d \n", size)); + return (BCME_ERROR); + break; + } + + return (BCME_OK); +} +uint +si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corereg(sih, coreidx, regoff, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corereg(sih, coreidx, regoff, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corereg(sih, coreidx, regoff, mask, val); + else { + ASSERT(0); + return 0; + } +} + +/** ILP sensitive register access needs special treatment to avoid backplane stalls */ +bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff) +{ + if (idx == SI_CC_IDX) { + if (CHIPCREGS_ILP_SENSITIVE(regoff)) + return TRUE; + } else if (PMUREGS_ILP_SENSITIVE(regoff)) { + return TRUE; + } + + return FALSE; +} + +/** 'idx' should refer either to the chipcommon core or the PMU core */ +uint +si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val) +{ + int pmustatus_offset; + + /* prevent backplane stall on double write to 'ILP domain' registers in the PMU */ + if (mask != 0 && sih->pmurev >= 22 && + si_pmu_is_ilp_sensitive(idx, regoff)) { + pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) : + OFFSETOF(chipcregs_t, pmustatus); + + while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING) + {}; + } + + return si_corereg(sih, idx, regoff, mask, val); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +uint32 * +si_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corereg_addr(sih, coreidx, regoff); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corereg_addr(sih, coreidx, regoff); + else { + return 0; + } +} + +void +si_core_disable(si_t *sih, uint32 bits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_disable(sih, bits); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_disable(sih, bits); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_disable(sih, bits); +} + +void +si_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_reset(sih, bits, resetbits); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_reset(sih, bits, resetbits); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_reset(sih, bits, resetbits); +} + +/** Run bist on current core. Caller needs to take care of core-specific bist hazards */ +int +si_corebist(si_t *sih) +{ + uint32 cflags; + int result = 0; + + /* Read core control flags */ + cflags = si_core_cflags(sih, 0, 0); + + /* Set bist & fgc */ + si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC)); + + /* Wait for bist done */ + SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000); + + if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR) + result = BCME_ERROR; + + /* Reset core control flags */ + si_core_cflags(sih, 0xffff, cflags); + + return result; +} + +static uint32 +factor6(uint32 x) +{ + switch (x) { + case CC_F6_2: return 2; + case CC_F6_3: return 3; + case CC_F6_4: return 4; + case CC_F6_5: return 5; + case CC_F6_6: return 6; + case CC_F6_7: return 7; + default: return 0; + } +} + +/* + * Divide the clock by the divisor with protection for + * a zero divisor. + */ +static uint32 +divide_clock(uint32 clock, uint32 div) +{ + return div ? clock / div : 0; +} + + +/** calculate the speed the SI would run at given a set of clockcontrol values */ +uint32 +si_clock_rate(uint32 pll_type, uint32 n, uint32 m) +{ + uint32 n1, n2, clock, m1, m2, m3, mc; + + n1 = n & CN_N1_MASK; + n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT; + + if (pll_type == PLL_TYPE6) { + if (m & CC_T6_MMASK) + return CC_T6_M1; + else + return CC_T6_M0; + } else if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + n1 = factor6(n1); + n2 += CC_F5_BIAS; + } else if (pll_type == PLL_TYPE2) { + n1 += CC_T2_BIAS; + n2 += CC_T2_BIAS; + ASSERT((n1 >= 2) && (n1 <= 7)); + ASSERT((n2 >= 5) && (n2 <= 23)); + } else if (pll_type == PLL_TYPE5) { + return (100000000); + } else + ASSERT(0); + /* PLL types 3 and 7 use BASE2 (25Mhz) */ + if ((pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE7)) { + clock = CC_CLOCK_BASE2 * n1 * n2; + } else + clock = CC_CLOCK_BASE1 * n1 * n2; + + if (clock == 0) + return 0; + + m1 = m & CC_M1_MASK; + m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT; + m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT; + mc = (m & CC_MC_MASK) >> CC_MC_SHIFT; + + if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + m1 = factor6(m1); + if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3)) + m2 += CC_F5_BIAS; + else + m2 = factor6(m2); + m3 = factor6(m3); + + switch (mc) { + case CC_MC_BYPASS: return (clock); + case CC_MC_M1: return divide_clock(clock, m1); + case CC_MC_M1M2: return divide_clock(clock, m1 * m2); + case CC_MC_M1M2M3: return divide_clock(clock, m1 * m2 * m3); + case CC_MC_M1M3: return divide_clock(clock, m1 * m3); + default: return (0); + } + } else { + ASSERT(pll_type == PLL_TYPE2); + + m1 += CC_T2_BIAS; + m2 += CC_T2M2_BIAS; + m3 += CC_T2_BIAS; + ASSERT((m1 >= 2) && (m1 <= 7)); + ASSERT((m2 >= 3) && (m2 <= 10)); + ASSERT((m3 >= 2) && (m3 <= 7)); + + if ((mc & CC_T2MC_M1BYP) == 0) + clock /= m1; + if ((mc & CC_T2MC_M2BYP) == 0) + clock /= m2; + if ((mc & CC_T2MC_M3BYP) == 0) + clock /= m3; + + return (clock); + } + return 0; +} + +/** + * Some chips could have multiple host interfaces, however only one will be active. + * For a given chip. Depending pkgopt and cc_chipst return the active host interface. + */ +uint +si_chip_hostif(si_t *sih) +{ + uint hosti = 0; + + switch (CHIPID(sih->chip)) { + + CASE_BCM43602_CHIP: + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4360_CHIP_ID: + /* chippkg bit-0 == 0 is PCIE only pkgs + * chippkg bit-0 == 1 has both PCIE and USB cores enabled + */ + if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB)) + hosti = CHIP_HOSTIF_USBMODE; + else + hosti = CHIP_HOSTIF_PCIEMODE; + + break; + + case BCM4335_CHIP_ID: + /* TBD: like in 4360, do we need to check pkg? */ + if (CST4335_CHIPMODE_USB20D(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4335_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4345_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4345_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4349_CHIP_GRPID: + if (CST4349_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4349_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM4356_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + if (CST4350_CHIPMODE_USB20D(sih->chipst) || + CST4350_CHIPMODE_HSIC20D(sih->chipst) || + CST4350_CHIPMODE_USB30D(sih->chipst) || + CST4350_CHIPMODE_USB30D_WL(sih->chipst) || + CST4350_CHIPMODE_HSIC30D(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4350_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4350_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + default: + break; + } + + return hosti; +} + + +/** set chip watchdog reset timer to fire in 'ticks' */ +void +si_watchdog(si_t *sih, uint ticks) +{ + uint nb, maxt; + + if (PMUCTL_ENAB(sih)) { + +#if !defined(_CFEZ_) || defined(CFG_WL) + if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) && + (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) { + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2); + si_setcore(sih, USB20D_CORE_ID, 0); + si_core_disable(sih, 1); + si_setcore(sih, CC_CORE_ID, 0); + } +#endif + + nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24); + /* The mips compiler uses the sllv instruction, + * so we specially handle the 32-bit case. + */ + if (nb == 32) + maxt = 0xffffffff; + else + maxt = ((1 << nb) - 1); + + if (ticks == 1) + ticks = 2; + else if (ticks > maxt) + ticks = maxt; + + pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks); + } else { + maxt = (1 << 28) - 1; + if (ticks > maxt) + ticks = maxt; + + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks); + } +} + +/** trigger watchdog reset after ms milliseconds */ +void +si_watchdog_ms(si_t *sih, uint32 ms) +{ + si_watchdog(sih, wd_msticks * ms); +} + +uint32 si_watchdog_msticks(void) +{ + return wd_msticks; +} + +bool +si_taclear(si_t *sih, bool details) +{ + return FALSE; +} + + + +/** return the slow clock source - LPO, XTAL, or PCI */ +static uint +si_slowclk_src(si_info_t *sii) +{ + chipcregs_t *cc; + + ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); + + if (sii->pub.ccrev < 6) { + if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) && + (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) & + PCI_CFG_GPIO_SCS)) + return (SCC_SS_PCI); + else + return (SCC_SS_XTAL); + } else if (sii->pub.ccrev < 10) { + cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx); + ASSERT(cc); + return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK); + } else /* Insta-clock */ + return (SCC_SS_XTAL); +} + +/** return the ILP (slowclock) min or max frequency */ +static uint +si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc) +{ + uint32 slowclk; + uint div; + + ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); + + /* shouldn't be here unless we've established the chip has dynamic clk control */ + ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL); + + slowclk = si_slowclk_src(sii); + if (sii->pub.ccrev < 6) { + if (slowclk == SCC_SS_PCI) + return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64)); + else + return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32)); + } else if (sii->pub.ccrev < 10) { + div = 4 * + (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); + if (slowclk == SCC_SS_LPO) + return (max_freq ? LPOMAXFREQ : LPOMINFREQ); + else if (slowclk == SCC_SS_XTAL) + return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div)); + else if (slowclk == SCC_SS_PCI) + return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div)); + else + ASSERT(0); + } else { + /* Chipc rev 10 is InstaClock */ + div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT; + div = 4 * (div + 1); + return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div)); + } + return (0); +} + +static void +si_clkctl_setdelay(si_info_t *sii, void *chipcregs) +{ + chipcregs_t *cc = (chipcregs_t *)chipcregs; + uint slowmaxfreq, pll_delay, slowclk; + uint pll_on_delay, fref_sel_delay; + + pll_delay = PLL_DELAY; + + /* If the slow clock is not sourced by the xtal then add the xtal_on_delay + * since the xtal will also be powered down by dynamic clk control logic. + */ + + slowclk = si_slowclk_src(sii); + if (slowclk != SCC_SS_XTAL) + pll_delay += XTAL_ON_DELAY; + + /* Starting with 4318 it is ILP that is used for the delays */ + slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc); + + pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; + fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; + + W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay); + W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay); +} + +/** initialize power control delay registers */ +void +si_clkctl_init(si_t *sih) +{ + si_info_t *sii; + uint origidx = 0; + chipcregs_t *cc; + bool fast; + + if (!CCCTL_ENAB(sih)) + return; + + sii = SI_INFO(sih); + fast = SI_FAST(sii); + if (!fast) { + origidx = sii->curidx; + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) + return; + } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL) + return; + ASSERT(cc != NULL); + + /* set all Instaclk chip ILP to 1 MHz */ + if (sih->ccrev >= 10) + SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK, + (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); + + si_clkctl_setdelay(sii, (void *)(uintptr)cc); + + OSL_DELAY(20000); + + if (!fast) + si_setcoreidx(sih, origidx); +} + + +/** change logical "focus" to the gpio core for optimized access */ +void * +si_gpiosetcore(si_t *sih) +{ + return (si_setcoreidx(sih, SI_CC_IDX)); +} + +/** + * mask & set gpiocontrol bits. + * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin. + * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated + * to some chip-specific purpose. + */ +uint32 +si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiocontrol); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** mask&set gpio output enable bits */ +uint32 +si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioouten); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** mask&set gpio output bits */ +uint32 +si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioout); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** reserve one gpio */ +uint32 +si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return 0xffffffff; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return 0xffffffff; + } + + /* already reserved */ + if (si_gpioreservation & gpio_bitmask) + return 0xffffffff; + /* set reservation */ + si_gpioreservation |= gpio_bitmask; + + return si_gpioreservation; +} + +/** + * release one gpio. + * + * releasing the gpio doesn't change the current value on the GPIO last write value + * persists till someone overwrites it. + */ +uint32 +si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return 0xffffffff; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return 0xffffffff; + } + + /* already released */ + if (!(si_gpioreservation & gpio_bitmask)) + return 0xffffffff; + + /* clear reservation */ + si_gpioreservation &= ~gpio_bitmask; + + return si_gpioreservation; +} + +/* return the current gpioin register value */ +uint32 +si_gpioin(si_t *sih) +{ + uint regoff; + + regoff = OFFSETOF(chipcregs_t, gpioin); + return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0)); +} + +/* mask&set gpio interrupt polarity bits */ +uint32 +si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointpolarity); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio interrupt mask bits */ +uint32 +si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointmask); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* assign the gpio to an led */ +uint32 +si_gpioled(si_t *sih, uint32 mask, uint32 val) +{ + if (sih->ccrev < 16) + return 0xffffffff; + + /* gpio led powersave reg */ + return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val)); +} + +/* mask&set gpio timer val */ +uint32 +si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval) +{ + if (sih->ccrev < 16) + return 0xffffffff; + + return (si_corereg(sih, SI_CC_IDX, + OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval)); +} + +uint32 +si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val) +{ + uint offs; + + if (sih->ccrev < 20) + return 0xffffffff; + + offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup)); + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +uint32 +si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val) +{ + uint offs; + + if (sih->ccrev < 11) + return 0xffffffff; + + if (regtype == GPIO_REGEVT) + offs = OFFSETOF(chipcregs_t, gpioevent); + else if (regtype == GPIO_REGEVT_INTMSK) + offs = OFFSETOF(chipcregs_t, gpioeventintmask); + else if (regtype == GPIO_REGEVT_INTPOL) + offs = OFFSETOF(chipcregs_t, gpioeventintpolarity); + else + return 0xffffffff; + + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +uint32 +si_gpio_int_enable(si_t *sih, bool enable) +{ + uint offs; + + if (sih->ccrev < 11) + return 0xffffffff; + + offs = OFFSETOF(chipcregs_t, intmask); + return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); +} + +/** Return the size of the specified SYSMEM bank */ +static uint +sysmem_banksize(si_info_t *sii, sysmemregs_t *regs, uint8 idx, uint8 mem_type) +{ + uint banksize, bankinfo; + uint bankidx = idx | (mem_type << SYSMEM_BANKIDX_MEMTYPE_SHIFT); + + ASSERT(mem_type <= SYSMEM_MEMTYPE_DEVRAM); + + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + banksize = SYSMEM_BANKINFO_SZBASE * ((bankinfo & SYSMEM_BANKINFO_SZMASK) + 1); + return banksize; +} + +/** Return the RAM size of the SYSMEM core */ +uint32 +si_sysmem_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + + sysmemregs_t *regs; + bool wasup; + uint32 coreinfo; + uint memsize = 0; + uint8 i; + uint nb; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SYSMEM core */ + if (!(regs = si_setcore(sih, SYSMEM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) + memsize += sysmem_banksize(sii, regs, i, SYSMEM_MEMTYPE_RAM); + + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +/** Return the size of the specified SOCRAM bank */ +static uint +socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type) +{ + uint banksize, bankinfo; + uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + + ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM); + + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1); + return banksize; +} + +void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + W_REG(sii->osh, ®s->bankidx, bankidx); + W_REG(sii->osh, ®s->bankpda, bankpda); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); +} + +void +si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + if (!set) + *enable = *protect = *remap = 0; + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 10) { + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (set) { + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK; + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK; + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK; + if (*enable) { + bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT); + if (*protect) + bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT); + if ((corerev >= 16) && *remap) + bankinfo |= + (1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT); + } + W_REG(sii->osh, ®s->bankinfo, bankinfo); + } + else if (i == 0) { + if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) { + *enable = 1; + if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK) + *protect = 1; + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) + *remap = 1; + } + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); +} + +bool +si_socdevram_remap_isenb(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup, remap = FALSE; + uint corerev; + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) { + remap = TRUE; + break; + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + return remap; +} + +bool +si_socdevram_pkg(si_t *sih) +{ + if (si_socdevram_size(sih) > 0) + return TRUE; + else + return FALSE; +} + +uint32 +si_socdevram_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + uint32 memsize = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 10) { + uint32 extcinfo; + uint8 nb; + uint8 i; + + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); + for (i = 0; i < nb; i++) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +uint32 +si_socdevram_remap_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + uint32 memsize = 0, banksz; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); + + /* + * FIX: A0 Issue: Max addressable is 512KB, instead 640KB + * Only four banks are accessible to ARM + */ + if ((corerev == 16) && (nb == 5)) + nb = 4; + + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) { + banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); + memsize += banksz; + } else { + /* Account only consecutive banks for now */ + break; + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +/** Return the RAM size of the SOCRAM core */ +uint32 +si_socram_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 coreinfo; + uint memsize = 0; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + corerev = si_corerev(sih); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Calculate size from coreinfo based on rev */ + if (corerev == 0) + memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK)); + else if (corerev < 3) { + memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK)); + memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + } else if ((corerev <= 7) || (corerev == 12)) { + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + uint bsz = (coreinfo & SRCI_SRBSZ_MASK); + uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; + if (lss != 0) + nb --; + memsize = nb * (1 << (bsz + SR_BSZ_BASE)); + if (lss != 0) + memsize += (1 << ((lss - 1) + SR_BSZ_BASE)); + } else { + uint8 i; + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + + +/** Return the TCM-RAM size of the ARMCR4 core. */ +uint32 +si_tcm_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + uint8 *regs; + bool wasup; + uint32 corecap; + uint memsize = 0; + uint32 nab = 0; + uint32 nbb = 0; + uint32 totb = 0; + uint32 bxinfo = 0; + uint32 idx = 0; + uint32 *arm_cap_reg; + uint32 *arm_bidx; + uint32 *arm_binfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to CR4 core */ + if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0))) + goto done; + + /* Get info for determining size. If in reset, come out of reset, + * but remain in halt + */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT); + + arm_cap_reg = (uint32 *)(regs + SI_CR4_CAP); + corecap = R_REG(sii->osh, arm_cap_reg); + + nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; + nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; + totb = nab + nbb; + + arm_bidx = (uint32 *)(regs + SI_CR4_BANKIDX); + arm_binfo = (uint32 *)(regs + SI_CR4_BANKINFO); + for (idx = 0; idx < totb; idx++) { + W_REG(sii->osh, arm_bidx, idx); + + bxinfo = R_REG(sii->osh, arm_binfo); + memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +bool +si_has_flops(si_t *sih) +{ + uint origidx, cr4_rev; + + /* Find out CR4 core revision */ + origidx = si_coreidx(sih); + if (si_setcore(sih, ARMCR4_CORE_ID, 0)) { + cr4_rev = si_corerev(sih); + si_setcoreidx(sih, origidx); + + if (cr4_rev == 1 || cr4_rev >= 3) + return TRUE; + } + return FALSE; +} + +uint32 +si_socram_srmem_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 coreinfo; + uint memsize = 0; + + if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) { + return (32 * 1024); + } + + if (CHIPID(sih->chip) == BCM43430_CHIP_ID) { + return (64 * 1024); + } + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + corerev = si_corerev(sih); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Calculate size from coreinfo based on rev */ + if (corerev >= 16) { + uint8 i; + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) { + W_REG(sii->osh, ®s->bankidx, i); + if (R_REG(sii->osh, ®s->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + + +#if !defined(_CFEZ_) || defined(CFG_WL) +void +si_btcgpiowar(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + chipcregs_t *cc; + + /* Make sure that there is ChipCommon core present && + * UART_TX is strapped to 1 + */ + if (!(sih->cccaps & CC_CAP_UARTGPIO)) + return; + + /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */ + INTR_OFF(sii, intr_val); + + origidx = si_coreidx(sih); + + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + + W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04); + + /* restore the original index */ + si_setcoreidx(sih, origidx); + + INTR_RESTORE(sii, intr_val); +} + +void +si_chipcontrl_btshd0_4331(si_t *sih, bool on) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc; + uint origidx; + uint32 val; + uint intr_val = 0; + + INTR_OFF(sii, intr_val); + + origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + val = R_REG(sii->osh, &cc->chipcontrol); + + /* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */ + if (on) { + /* Enable bt_shd0 on gpio4: */ + val |= (CCTRL4331_BT_SHD0_ON_GPIO4); + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4); + W_REG(sii->osh, &cc->chipcontrol, val); + } + + /* restore the original index */ + si_setcoreidx(sih, origidx); + + INTR_RESTORE(sii, intr_val); +} + +void +si_chipcontrl_restore(si_t *sih, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + W_REG(sii->osh, &cc->chipcontrol, val); + si_setcoreidx(sih, origidx); +} + +uint32 +si_chipcontrl_read(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return -1; + } + val = R_REG(sii->osh, &cc->chipcontrol); + si_setcoreidx(sih, origidx); + return val; +} + +void +si_chipcontrl_epa4331(si_t *sih, bool on) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + val = R_REG(sii->osh, &cc->chipcontrol); + + if (on) { + if (sih->chippkg == 9 || sih->chippkg == 0xb) { + val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); + /* Ext PA Controls for 4331 12x9 Package */ + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + /* Ext PA Controls for 4331 12x12 Package */ + if (CHIPREV(sih->chiprev) > 0) { + W_REG(sii->osh, &cc->chipcontrol, val | + (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2)); + } else { + W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN)); + } + } + } else { + val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5); + W_REG(sii->osh, &cc->chipcontrol, val); + } + + si_setcoreidx(sih, origidx); +} + +/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */ +void +si_chipcontrl_srom4360(si_t *sih, bool on) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + val = R_REG(sii->osh, &cc->chipcontrol); + + if (on) { + val &= ~(CCTRL4360_SECI_MODE | + CCTRL4360_BTSWCTRL_MODE | + CCTRL4360_EXTRA_FEMCTRL_MODE | + CCTRL4360_BT_LGCY_MODE | + CCTRL4360_CORE2FEMCTRL4_ON); + + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + } + + si_setcoreidx(sih, origidx); +} + +void +si_clk_srom4365(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + val = R_REG(sii->osh, &cc->clkdiv2); + W_REG(sii->osh, &cc->clkdiv2, ((val&~0xf) | 0x4)); + + si_setcoreidx(sih, origidx); +} + +void +si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih) +{ +#if defined(WLRSDB) && !defined(WLRSDB_DISABLED) + ai_d11rsdb_core1_alt_reg_clk_dis(sih); +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ +} + +void +si_d11rsdb_core1_alt_reg_clk_en(si_t *sih) +{ +#if defined(WLRSDB) && !defined(WLRSDB_DISABLED) + ai_d11rsdb_core1_alt_reg_clk_en(sih); +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ +} + +void +si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl) +{ + si_info_t *sii; + chipcregs_t *cc; + uint origidx; + uint32 val; + bool sel_chip; + + sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) || + (CHIPID(sih->chip) == BCM43431_CHIP_ID); + sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb)); + + if (!sel_chip) + return; + + sii = SI_INFO(sih); + origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + val = R_REG(sii->osh, &cc->chipcontrol); + + if (enter_wowl) { + val |= CCTRL4331_EXTPA_EN; + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); + W_REG(sii->osh, &cc->chipcontrol, val); + } + si_setcoreidx(sih, origidx); +} +#endif + +uint +si_pll_reset(si_t *sih) +{ + uint err = 0; + + return (err); +} + +/** Enable BT-COEX & Ex-PA for 4313 */ +void +si_epa_4313war(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + /* EPA Fix */ + W_REG(sii->osh, &cc->gpiocontrol, + R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK); + + si_setcoreidx(sih, origidx); +} + +void +si_clk_pmu_htavail_set(si_t *sih, bool set_clear) +{ +} + +void +si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag) +{ +} + +/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */ +void +si_pmu_synth_pwrsw_4313_war(si_t *sih) +{ +} + +/** WL/BT control for 4313 btcombo boards >= P250 */ +void +si_btcombo_p250_4313_war(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + W_REG(sii->osh, &cc->gpiocontrol, + R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK); + + W_REG(sii->osh, &cc->gpioouten, + R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK); + + si_setcoreidx(sih, origidx); +} +void +si_btc_enable_chipcontrol(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + /* BT fix */ + W_REG(sii->osh, &cc->chipcontrol, + R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK); + + si_setcoreidx(sih, origidx); +} +void +si_btcombo_43228_war(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK); + W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK); + + si_setcoreidx(sih, origidx); +} + +/** check if the device is removed */ +bool +si_deviceremoved(si_t *sih) +{ + uint32 w; + + switch (BUSTYPE(sih->bustype)) { + case PCI_BUS: + ASSERT(SI_INFO(sih)->osh != NULL); + w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32)); + if ((w & 0xFFFF) != VENDOR_BROADCOM) + return TRUE; + break; + } + return FALSE; +} + +bool +si_is_sprom_available(si_t *sih) +{ + if (sih->ccrev >= 31) { + si_info_t *sii; + uint origidx; + chipcregs_t *cc; + uint32 sromctrl; + + if ((sih->cccaps & CC_CAP_SROM) == 0) + return FALSE; + + sii = SI_INFO(sih); + origidx = sii->curidx; + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT(cc); + sromctrl = R_REG(sii->osh, &cc->sromcontrol); + si_setcoreidx(sih, origidx); + return (sromctrl & SRC_PRESENT); + } + + switch (CHIPID(sih->chip)) { + case BCM4312_CHIP_ID: + return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL); + case BCM4325_CHIP_ID: + return (sih->chipst & CST4325_SPROM_SEL) != 0; + case BCM4322_CHIP_ID: case BCM43221_CHIP_ID: case BCM43231_CHIP_ID: + case BCM43222_CHIP_ID: case BCM43111_CHIP_ID: case BCM43112_CHIP_ID: + case BCM4342_CHIP_ID: { + uint32 spromotp; + spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >> + CST4322_SPROM_OTP_SEL_SHIFT; + return (spromotp & CST4322_SPROM_PRESENT) != 0; + } + case BCM4329_CHIP_ID: + return (sih->chipst & CST4329_SPROM_SEL) != 0; + case BCM4315_CHIP_ID: + return (sih->chipst & CST4315_SPROM_SEL) != 0; + case BCM4319_CHIP_ID: + return (sih->chipst & CST4319_SPROM_SEL) != 0; + case BCM4336_CHIP_ID: + case BCM43362_CHIP_ID: + return (sih->chipst & CST4336_SPROM_PRESENT) != 0; + case BCM4330_CHIP_ID: + return (sih->chipst & CST4330_SPROM_PRESENT) != 0; + case BCM4313_CHIP_ID: + return (sih->chipst & CST4313_SPROM_PRESENT) != 0; + case BCM4331_CHIP_ID: + case BCM43431_CHIP_ID: + return (sih->chipst & CST4331_SPROM_PRESENT) != 0; + case BCM43239_CHIP_ID: + return ((sih->chipst & CST43239_SPROM_MASK) && + !(sih->chipst & CST43239_SFLASH_MASK)); + case BCM4324_CHIP_ID: + case BCM43242_CHIP_ID: + return ((sih->chipst & CST4324_SPROM_MASK) && + !(sih->chipst & CST4324_SFLASH_MASK)); + case BCM4335_CHIP_ID: + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + return ((sih->chipst & CST4335_SPROM_MASK) && + !(sih->chipst & CST4335_SFLASH_MASK)); + case BCM4349_CHIP_GRPID: + return (sih->chipst & CST4349_SPROM_PRESENT) != 0; + break; + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM4356_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + return (sih->chipst & CST4350_SPROM_PRESENT) != 0; + CASE_BCM43602_CHIP: + return (sih->chipst & CST43602_SPROM_PRESENT) != 0; + case BCM43131_CHIP_ID: + case BCM43217_CHIP_ID: + case BCM43227_CHIP_ID: + case BCM43228_CHIP_ID: + case BCM43428_CHIP_ID: + return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT; + default: + return TRUE; + } +} + + +uint32 si_get_sromctl(si_t *sih) +{ + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 sromctl; + osl_t *osh = si_osh(sih); + + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT((uintptr)cc); + + sromctl = R_REG(osh, &cc->sromcontrol); + + /* return to the original core */ + si_setcoreidx(sih, origidx); + return sromctl; +} + +int si_set_sromctl(si_t *sih, uint32 value) +{ + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + osl_t *osh = si_osh(sih); + int ret = BCME_OK; + + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT((uintptr)cc); + + /* get chipcommon rev */ + if (si_corerev(sih) >= 32) { + /* SpromCtrl is only accessible if CoreCapabilities.SpromSupported and + * SpromPresent is 1. + */ + if ((R_REG(osh, &cc->capabilities) & CC_CAP_SROM) != 0 && + (R_REG(osh, &cc->sromcontrol) & SRC_PRESENT)) { + W_REG(osh, &cc->sromcontrol, value); + } else { + ret = BCME_NODEVICE; + } + } else { + ret = BCME_UNSUPPORTED; + } + + /* return to the original core */ + si_setcoreidx(sih, origidx); + + return ret; +} + +uint +si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val) +{ + uint origidx, intr_val = 0; + uint ret_val; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + origidx = si_coreidx(sih); + + INTR_OFF(sii, intr_val); + si_setcoreidx(sih, coreidx); + + ret_val = si_wrapperreg(sih, offset, mask, val); + + /* return to the original core */ + si_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + return ret_val; +} + + +/* cleanup the timer from the host when ARM is been halted + * without a chance for ARM cleanup its resources + * If left not cleanup, Intr from a software timer can still + * request HT clk when ARM is halted. + */ +uint32 +si_pmu_res_req_timer_clr(si_t *sih) +{ + uint32 mask; + + mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ; + if (CHIPID(sih->chip) != BCM4328_CHIP_ID) + mask <<= 14; + /* clear mask bits */ + pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0); + /* readback to ensure write completes */ + return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0); +} + +/** turn on/off rfldo */ +void +si_pmu_rfldo(si_t *sih, bool on) +{ +} + + +#ifdef SURVIVE_PERST_ENAB +static uint32 +si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + if (!PCIE(sii)) + return (0); + + return pcie_survive_perst(sii->pch, mask, val); +} + +static void +si_watchdog_reset(si_t *sih) +{ + uint32 i; + + /* issue a watchdog reset */ + pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, 2, 2); + /* do busy wait for 20ms */ + for (i = 0; i < 2000; i++) { + OSL_DELAY(10); + } +} +#endif /* SURVIVE_PERST_ENAB */ + +void +si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val) +{ +#ifdef SURVIVE_PERST_ENAB + if (BUSTYPE(sih->bustype) != PCI_BUS) + return; + + if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) || + (CHIPREV(sih->chiprev) >= 4)) + return; + + if (reset) { + si_info_t *sii = SI_INFO(sih); + uint32 bar0win, bar0win_after; + + /* save the bar0win */ + bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + + si_watchdog_reset(sih); + + bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + if (bar0win_after != bar0win) { + SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n", + __FUNCTION__, bar0win, bar0win_after)); + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win); + } + } + if (sperst_mask) { + /* enable survive perst */ + si_pcie_survive_perst(sih, sperst_mask, sperst_val); + } +#endif /* SURVIVE_PERST_ENAB */ +} + +void +si_pcie_ltr_war(si_t *sih) +{ +} + +void +si_pcie_hw_LTR_war(si_t *sih) +{ +} + +void +si_pciedev_reg_pm_clk_period(si_t *sih) +{ +} + +void +si_pciedev_crwlpciegen2(si_t *sih) +{ +} + +void +si_pcie_prep_D3(si_t *sih, bool enter_D3) +{ +} + + + +void +si_pll_sr_reinit(si_t *sih) +{ +} + +void +si_pll_closeloop(si_t *sih) +{ +#if defined(SAVERESTORE) + uint32 data; + + /* disable PLL open loop operation */ + switch (CHIPID(sih->chip)) { +#ifdef SAVERESTORE + case BCM43430_CHIP_ID: + if (SR_ENAB() && sr_isenab(sih)) { + /* read back the pll openloop state */ + data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0); + /* current mode is openloop (possible POR) */ + if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) != 0) { + si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, + PMU1_PLLCTL8_OPENLOOP_MASK, 0); + si_pmu_pllupd(sih); + } + } + break; +#endif /* SAVERESTORE */ + default: + /* any unsupported chip bail */ + return; + } +#endif +} diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h new file mode 100644 index 000000000000..090d7913631c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/siutils_priv.h @@ -0,0 +1,290 @@ +/* + * Include file private to the SOC Interconnect support files. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils_priv.h 520760 2014-12-15 00:54:16Z $ + */ + +#ifndef _siutils_priv_h_ +#define _siutils_priv_h_ + +#define SI_ERROR(args) + +#define SI_MSG(args) + +#ifdef BCMDBG_SI +#define SI_VMSG(args) printf args +#else +#define SI_VMSG(args) +#endif + +#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) + +typedef uint32 (*si_intrsoff_t)(void *intr_arg); +typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg); +typedef bool (*si_intrsenabled_t)(void *intr_arg); + + +#define SI_GPIO_MAX 16 + +typedef struct gci_gpio_item { + void *arg; + uint8 gci_gpio; + uint8 status; + gci_gpio_handler_t handler; + struct gci_gpio_item *next; +} gci_gpio_item_t; + + +typedef struct si_cores_info { + void *regs[SI_MAXCORES]; /* other regs va */ + + uint coreid[SI_MAXCORES]; /* id of each core */ + uint32 coresba[SI_MAXCORES]; /* backplane address of each core */ + void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */ + uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */ + uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */ + uint32 coresba2_size[SI_MAXCORES]; /* second address space size */ + + void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ + uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ + + void *wrappers2[SI_MAXCORES]; /* other cores wrapper va */ + uint32 wrapba2[SI_MAXCORES]; /* address of controlling wrapper */ + + uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */ + uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */ +} si_cores_info_t; + +/* misc si info needed by some of the routines */ +typedef struct si_info { + struct si_pub pub; /* back plane public state (must be first field) */ + + void *osh; /* osl os handle */ + void *sdh; /* bcmsdh handle */ + + uint dev_coreid; /* the core provides driver functions */ + void *intr_arg; /* interrupt callback function arg */ + si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */ + si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */ + si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */ + + void *pch; /* PCI/E core handle */ + + bool memseg; /* flag to toggle MEM_SEG register */ + + char *vars; + uint varsz; + + void *curmap; /* current regs va */ + + uint curidx; /* current core index */ + uint numcores; /* # discovered cores */ + + void *curwrap; /* current wrapper va */ + + uint32 oob_router; /* oob router registers for axi */ + + void *cores_info; + gci_gpio_item_t *gci_gpio_head; /* gci gpio interrupts head */ + uint chipnew; /* new chip number */ + uint second_bar0win; /* Backplane region */ + uint num_br; /* # discovered bridges */ + uint32 br_wrapba[SI_MAXBR]; /* address of bridge controlling wrapper */ + uint32 xtalfreq; +} si_info_t; + + +#define SI_INFO(sih) ((si_info_t *)(uintptr)sih) + +#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ + ISALIGNED((x), SI_CORE_SIZE)) +#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE)) +#define BADCOREADDR 0 +#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES) +#define NOREV -1 /* Invalid rev */ + +#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCI_CORE_ID)) + +#define PCIE_GEN1(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCIE_CORE_ID)) + +#define PCIE_GEN2(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCIE2_CORE_ID)) + +#define PCIE(si) (PCIE_GEN1(si) || PCIE_GEN2(si)) + +#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE)) + +/* Newer chips can access PCI/PCIE and CC core without requiring to change + * PCI BAR0 WIN + */ +#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13))) + +#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET)) +#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET)) + +/* + * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/ + * after core switching to avoid invalid register accesss inside ISR. + */ +#define INTR_OFF(si, intr_val) \ + if ((si)->intrsoff_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) { \ + intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); } +#define INTR_RESTORE(si, intr_val) \ + if ((si)->intrsrestore_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) { \ + (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); } + +/* dynamic clock control defines */ +#define LPOMINFREQ 25000 /* low power oscillator min */ +#define LPOMAXFREQ 43000 /* low power oscillator max */ +#define XTALMINFREQ 19800000 /* 20 MHz - 1% */ +#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */ +#define PCIMINFREQ 25000000 /* 25 MHz */ +#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */ + +#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */ +#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */ + +/* Force fast clock for 4360b0 */ +#define PCI_FORCEHT(si) \ + (((PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4311_CHIP_ID) && \ + ((CHIPREV(si->pub.chiprev) <= 1))) || \ + ((PCI(si) || PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4321_CHIP_ID)) || \ + (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4716_CHIP_ID)) || \ + (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4748_CHIP_ID))) + +/* GPIO Based LED powersave defines */ +#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */ +#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */ + +#ifndef DEFAULT_GPIOTIMERVAL +#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME) +#endif + +/* Silicon Backplane externs */ +extern void sb_scan(si_t *sih, void *regs, uint devid); +extern uint sb_coreid(si_t *sih); +extern uint sb_intflag(si_t *sih); +extern uint sb_flag(si_t *sih); +extern void sb_setint(si_t *sih, int siflag); +extern uint sb_corevendor(si_t *sih); +extern uint sb_corerev(si_t *sih); +extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern bool sb_iscoreup(si_t *sih); +extern void *sb_setcoreidx(si_t *sih, uint coreidx); +extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_commit(si_t *sih); +extern uint32 sb_base(uint32 admatch); +extern uint32 sb_size(uint32 admatch); +extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void sb_core_disable(si_t *sih, uint32 bits); +extern uint32 sb_addrspace(si_t *sih, uint asidx); +extern uint32 sb_addrspacesize(si_t *sih, uint asidx); +extern int sb_numaddrspaces(si_t *sih); + +extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx); + +extern bool sb_taclear(si_t *sih, bool details); + +#if defined(BCMDBG_PHYDUMP) +extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool sb_pci_pmecap(si_t *sih); +struct osl_info; +extern bool sb_pci_fastpmecap(struct osl_info *osh); +extern bool sb_pci_pmeclr(si_t *sih); +extern void sb_pci_pmeen(si_t *sih); +extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset); + +/* AMBA Interconnect exported externs */ +extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *ai_kattach(osl_t *osh); +extern void ai_scan(si_t *sih, void *regs, uint devid); + +extern uint ai_flag(si_t *sih); +extern uint ai_flag_alt(si_t *sih); +extern void ai_setint(si_t *sih, int siflag); +extern uint ai_coreidx(si_t *sih); +extern uint ai_corevendor(si_t *sih); +extern uint ai_corerev(si_t *sih); +extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern bool ai_iscoreup(si_t *sih); +extern void *ai_setcoreidx(si_t *sih, uint coreidx); +extern void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx); +extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits, + uint32 resetbits, void *p, void *s); +extern void ai_d11rsdb_core1_alt_reg_clk_en(si_t *sih); +extern void ai_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); + +extern void ai_core_disable(si_t *sih, uint32 bits); +extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits, + aidmp_t *pmacai, aidmp_t *smacai); +extern int ai_numaddrspaces(si_t *sih); +extern uint32 ai_addrspace(si_t *sih, uint asidx); +extern uint32 ai_addrspacesize(si_t *sih, uint asidx); +extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); +extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern void ai_enable_backplane_timeouts(si_t *sih); +extern void ai_clear_backplane_to(si_t *sih); + +#if defined(BCMDBG_PHYDUMP) +extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif + + +#define ub_scan(a, b, c) do {} while (0) +#define ub_flag(a) (0) +#define ub_setint(a, b) do {} while (0) +#define ub_coreidx(a) (0) +#define ub_corevendor(a) (0) +#define ub_corerev(a) (0) +#define ub_iscoreup(a) (0) +#define ub_setcoreidx(a, b) (0) +#define ub_core_cflags(a, b, c) (0) +#define ub_core_cflags_wo(a, b, c) do {} while (0) +#define ub_core_sflags(a, b, c) (0) +#define ub_corereg(a, b, c, d, e) (0) +#define ub_core_reset(a, b, c) do {} while (0) +#define ub_core_disable(a, b) do {} while (0) +#define ub_numaddrspaces(a) (0) +#define ub_addrspace(a, b) (0) +#define ub_addrspacesize(a, b) (0) +#define ub_view(a, b) do {} while (0) +#define ub_dumpregs(a, b) do {} while (0) + +#endif /* _siutils_priv_h_ */ diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h new file mode 100644 index 000000000000..0d04a9d86037 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/uamp_api.h @@ -0,0 +1,181 @@ +/* + * Name: uamp_api.h + * + * Description: Universal AMP API + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: uamp_api.h 514727 2014-11-12 03:02:48Z $ + * + */ + + +#ifndef UAMP_API_H +#define UAMP_API_H + + +#include "typedefs.h" + + +/***************************************************************************** +** Constant and Type Definitions +****************************************************************************** +*/ + +#define BT_API + +/* Types. */ +typedef bool BOOLEAN; +typedef uint8 UINT8; +typedef uint16 UINT16; + + +/* UAMP identifiers */ +#define UAMP_ID_1 1 +#define UAMP_ID_2 2 +typedef UINT8 tUAMP_ID; + +/* UAMP event ids (used by UAMP_CBACK) */ +#define UAMP_EVT_RX_READY 0 /* Data from AMP controller is ready to be read */ +#define UAMP_EVT_CTLR_REMOVED 1 /* Controller removed */ +#define UAMP_EVT_CTLR_READY 2 /* Controller added/ready */ +typedef UINT8 tUAMP_EVT; + + +/* UAMP Channels */ +#define UAMP_CH_HCI_CMD 0 /* HCI Command channel */ +#define UAMP_CH_HCI_EVT 1 /* HCI Event channel */ +#define UAMP_CH_HCI_DATA 2 /* HCI ACL Data channel */ +typedef UINT8 tUAMP_CH; + +/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */ +typedef union { + tUAMP_CH channel; /* UAMP_EVT_RX_READY: channel for which rx occured */ +} tUAMP_EVT_DATA; + + +/***************************************************************************** +** +** Function: UAMP_CBACK +** +** Description: Callback for events. Register callback using UAMP_Init. +** +** Parameters amp_id: AMP device identifier that generated the event +** amp_evt: event id +** p_amp_evt_data: pointer to event-specific data +** +****************************************************************************** +*/ +typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data); + +/***************************************************************************** +** external function declarations +****************************************************************************** +*/ +#ifdef __cplusplus +extern "C" +{ +#endif + +/***************************************************************************** +** +** Function: UAMP_Init +** +** Description: Initialize UAMP driver +** +** Parameters p_cback: Callback function for UAMP event notification +** +****************************************************************************** +*/ +BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback); + + +/***************************************************************************** +** +** Function: UAMP_Open +** +** Description: Open connection to local AMP device. +** +** Parameters app_id: Application specific AMP identifer. This value +** will be included in AMP messages sent to the +** BTU task, to identify source of the message +** +****************************************************************************** +*/ +BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id); + +/***************************************************************************** +** +** Function: UAMP_Close +** +** Description: Close connection to local AMP device. +** +** Parameters app_id: Application specific AMP identifer. +** +****************************************************************************** +*/ +BT_API void UAMP_Close(tUAMP_ID amp_id); + + +/***************************************************************************** +** +** Function: UAMP_Write +** +** Description: Send buffer to AMP device. Frees GKI buffer when done. +** +** +** Parameters: app_id: AMP identifer. +** p_buf: pointer to buffer to write +** num_bytes: number of bytes to write +** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD +** +** Returns: number of bytes written +** +****************************************************************************** +*/ +BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel); + +/***************************************************************************** +** +** Function: UAMP_Read +** +** Description: Read incoming data from AMP. Call after receiving a +** UAMP_EVT_RX_READY callback event. +** +** Parameters: app_id: AMP identifer. +** p_buf: pointer to buffer for holding incoming AMP data +** buf_size: size of p_buf +** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT +** +** Returns: number of bytes read +** +****************************************************************************** +*/ +BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel); + +#ifdef __cplusplus +} +#endif + +#endif /* UAMP_API_H */ diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c new file mode 100644 index 000000000000..a1f4ba77fac2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_android.c @@ -0,0 +1,3357 @@ +/* + * Linux cfg80211 driver - Android related functions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_android.c 608788 2015-12-29 10:59:33Z $ + */ + +#include +#include +#include +#ifdef CONFIG_COMPAT +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif +#ifdef BCMSDIO +#include +#endif +#ifdef WL_CFG80211 +#include +#endif +#ifdef WL_NAN +#include +#endif /* WL_NAN */ +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +/* + * Android private command strings, PLEASE define new private commands here + * so they can be updated easily in the future (if needed) + */ + +#define CMD_START "START" +#define CMD_STOP "STOP" +#define CMD_SCAN_ACTIVE "SCAN-ACTIVE" +#define CMD_SCAN_PASSIVE "SCAN-PASSIVE" +#define CMD_RSSI "RSSI" +#define CMD_LINKSPEED "LINKSPEED" +#define CMD_RXFILTER_START "RXFILTER-START" +#define CMD_RXFILTER_STOP "RXFILTER-STOP" +#define CMD_RXFILTER_ADD "RXFILTER-ADD" +#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE" +#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START" +#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP" +#define CMD_BTCOEXMODE "BTCOEXMODE" +#define CMD_SETSUSPENDOPT "SETSUSPENDOPT" +#define CMD_SETSUSPENDMODE "SETSUSPENDMODE" +#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR" +#define CMD_SETFWPATH "SETFWPATH" +#define CMD_SETBAND "SETBAND" +#define CMD_GETBAND "GETBAND" +#define CMD_COUNTRY "COUNTRY" +#define CMD_P2P_SET_NOA "P2P_SET_NOA" +#if !defined WL_ENABLE_P2P_IF +#define CMD_P2P_GET_NOA "P2P_GET_NOA" +#endif /* WL_ENABLE_P2P_IF */ +#define CMD_P2P_SD_OFFLOAD "P2P_SD_" +#define CMD_P2P_LISTEN_OFFLOAD "P2P_LO_" +#define CMD_P2P_SET_PS "P2P_SET_PS" +#define CMD_P2P_ECSA "P2P_ECSA" +#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE" +#define CMD_SETROAMMODE "SETROAMMODE" +#define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA" +#define CMD_MIRACAST "MIRACAST" +#ifdef WL_NAN +#define CMD_NAN "NAN_" +#endif /* WL_NAN */ +#define CMD_COUNTRY_DELIMITER "/" +#ifdef WL11ULB +#define CMD_ULB_MODE "ULB_MODE" +#define CMD_ULB_BW "ULB_BW" +#endif /* WL11ULB */ + +#if defined(WL_SUPPORT_AUTO_CHANNEL) +#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS" +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#define CMD_80211_MODE "MODE" /* 802.11 mode a/b/g/n/ac */ +#define CMD_CHANSPEC "CHANSPEC" +#define CMD_DATARATE "DATARATE" +#define CMD_ASSOC_CLIENTS "ASSOCLIST" +#define CMD_SET_CSA "SETCSA" +#ifdef WL_SUPPORT_AUTO_CHANNEL +#define CMD_SET_HAPD_AUTO_CHANNEL "HAPD_AUTO_CHANNEL" +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef SUPPORT_SET_LPC +#define CMD_HAPD_LPC_ENABLED "HAPD_LPC_ENABLED" +#endif /* SUPPORT_SET_LPC */ +#ifdef SUPPORT_TRIGGER_HANG_EVENT +#define CMD_TEST_FORCE_HANG "TEST_FORCE_HANG" +#endif /* SUPPORT_TRIGGER_HANG_EVENT */ +#ifdef TEST_TX_POWER_CONTROL +#define CMD_TEST_SET_TX_POWER "TEST_SET_TX_POWER" +#define CMD_TEST_GET_TX_POWER "TEST_GET_TX_POWER" +#endif /* TEST_TX_POWER_CONTROL */ +#define CMD_SARLIMIT_TX_CONTROL "SET_TX_POWER_CALLING" +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ +#define CMD_KEEP_ALIVE "KEEPALIVE" + + +#ifdef PNO_SUPPORT +#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR" +#define CMD_PNOSETUP_SET "PNOSETUP " +#define CMD_PNOENABLE_SET "PNOFORCE" +#define CMD_PNODEBUG_SET "PNODEBUG" +#define CMD_WLS_BATCHING "WLS_BATCHING" +#endif /* PNO_SUPPORT */ + +#define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER" + +#ifdef CUSTOMER_HW4_PRIVATE_CMD + + +#if defined(SUPPORT_RANDOM_MAC_SCAN) +#define ENABLE_RANDOM_MAC "ENABLE_RANDOM_MAC" +#define DISABLE_RANDOM_MAC "DISABLE_RANDOM_MAC" +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + + +#define CMD_CHANGE_RL "CHANGE_RL" +#define CMD_RESTORE_RL "RESTORE_RL" + +#define CMD_SET_RMC_ENABLE "SETRMCENABLE" +#define CMD_SET_RMC_TXRATE "SETRMCTXRATE" +#define CMD_SET_RMC_ACTPERIOD "SETRMCACTIONPERIOD" +#define CMD_SET_RMC_IDLEPERIOD "SETRMCIDLEPERIOD" +#define CMD_SET_RMC_LEADER "SETRMCLEADER" +#define CMD_SET_RMC_EVENT "SETRMCEVENT" + +#define CMD_SET_SCSCAN "SETSINGLEANT" +#define CMD_GET_SCSCAN "GETSINGLEANT" + +/* FCC_PWR_LIMIT_2G */ +#define CUSTOMER_HW4_ENABLE 0 +#define CUSTOMER_HW4_DISABLE -1 +#define CUSTOMER_HW4_EN_CONVERT(i) (i += 1) + +#ifdef WLTDLS +#define CMD_TDLS_RESET "TDLS_RESET" +#endif /* WLTDLS */ + +#ifdef IPV6_NDO_SUPPORT +#define CMD_NDRA_LIMIT "NDRA_LIMIT" +#endif /* IPV6_NDO_SUPPORT */ + +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + + +#define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD" +#define CMD_ROAM_OFFLOAD_APLIST "SETROAMOFFLAPLIST" +#define CMD_INTERFACE_CREATE "INTERFACE_CREATE" +#define CMD_INTERFACE_DELETE "INTERFACE_DELETE" + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define CMD_GET_BSS_INFO "GETBSSINFO" +#define CMD_GET_ASSOC_REJECT_INFO "GETASSOCREJECTINFO" +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +#ifdef P2PRESP_WFDIE_SRC +#define CMD_P2P_SET_WFDIE_RESP "P2P_SET_WFDIE_RESP" +#define CMD_P2P_GET_WFDIE_RESP "P2P_GET_WFDIE_RESP" +#endif /* P2PRESP_WFDIE_SRC */ + +#define CMD_DFS_AP_MOVE "DFS_AP_MOVE" +#define CMD_WBTEXT_ENABLE "WBTEXT_ENABLE" +#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG" +#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG" +#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG" +#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG" + +#ifdef WLWFDS +#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH" +#define CMD_DEL_WFDS_HASH "DEL_WFDS_HASH" +#endif /* WLWFDS */ + +#ifdef SET_RPS_CPUS +#define CMD_RPSMODE "RPSMODE" +#endif /* SET_RPS_CPUS */ + +#ifdef BT_WIFI_HANDOVER +#define CMD_TBOW_TEARDOWN "TBOW_TEARDOWN" +#endif /* BT_WIFI_HANDOVER */ + +#define CMD_MURX_BFE_CAP "MURX_BFE_CAP" + +/* miracast related definition */ +#define MIRACAST_MODE_OFF 0 +#define MIRACAST_MODE_SOURCE 1 +#define MIRACAST_MODE_SINK 2 + +#ifndef MIRACAST_AMPDU_SIZE +#define MIRACAST_AMPDU_SIZE 8 +#endif + +#ifndef MIRACAST_MCHAN_ALGO +#define MIRACAST_MCHAN_ALGO 1 +#endif + +#ifndef MIRACAST_MCHAN_BW +#define MIRACAST_MCHAN_BW 25 +#endif + +#ifdef CONNECTION_STATISTICS +#define CMD_GET_CONNECTION_STATS "GET_CONNECTION_STATS" + +struct connection_stats { + u32 txframe; + u32 txbyte; + u32 txerror; + u32 rxframe; + u32 rxbyte; + u32 txfail; + u32 txretry; + u32 txretrie; + u32 txrts; + u32 txnocts; + u32 txexptime; + u32 txrate; + u8 chan_idle; +}; +#endif /* CONNECTION_STATISTICS */ + +static LIST_HEAD(miracast_resume_list); +static u8 miracast_cur_mode; + +#ifdef DHD_LOG_DUMP +#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP" +extern void dhd_schedule_log_dump(dhd_pub_t *dhdp); +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +#endif /* DHD_LOG_DUMP */ +#ifdef DHD_TRACE_WAKE_LOCK +extern void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); +#endif /* DHD_TRACE_WAKE_LOCK */ + +struct io_cfg { + s8 *iovar; + s32 param; + u32 ioctl; + void *arg; + u32 len; + struct list_head list; +}; + +typedef struct _android_wifi_priv_cmd { + char *buf; + int used_len; + int total_len; +} android_wifi_priv_cmd; + +#ifdef CONFIG_COMPAT +typedef struct _compat_android_wifi_priv_cmd { + compat_caddr_t buf; + int used_len; + int total_len; +} compat_android_wifi_priv_cmd; +#endif /* CONFIG_COMPAT */ + +#if defined(BCMFW_ROAM_ENABLE) +#define CMD_SET_ROAMPREF "SET_ROAMPREF" + +#define MAX_NUM_SUITES 10 +#define WIDTH_AKM_SUITE 8 +#define JOIN_PREF_RSSI_LEN 0x02 +#define JOIN_PREF_RSSI_SIZE 4 /* RSSI pref header size in bytes */ +#define JOIN_PREF_WPA_HDR_SIZE 4 /* WPA pref header size in bytes */ +#define JOIN_PREF_WPA_TUPLE_SIZE 12 /* Tuple size in bytes */ +#define JOIN_PREF_MAX_WPA_TUPLES 16 +#define MAX_BUF_SIZE (JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE + \ + (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES)) +#endif /* BCMFW_ROAM_ENABLE */ + + +/** + * Extern function declarations (TODO: move them to dhd_linux.h) + */ +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +int dhd_dev_init_ioctl(struct net_device *dev); +#ifdef WL_CFG80211 +int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command); +#else +int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) +{ return 0; } +int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len) +{ return 0; } +#endif /* WK_CFG80211 */ + + +#ifdef ENABLE_4335BT_WAR +extern int bcm_bt_lock(int cookie); +extern void bcm_bt_unlock(int cookie); +static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */ +#endif /* ENABLE_4335BT_WAR */ + +extern bool ap_fw_loaded; +extern char iface_name[IFNAMSIZ]; + +/** + * Local (static) functions and variables + */ + +/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first + * time (only) in dhd_open, subsequential wifi on will be handled by + * wl_android_wifi_on + */ +static int g_wifi_on = TRUE; + +/** + * Local (static) function definitions + */ + +#ifdef WLWFDS +static int wl_android_set_wfds_hash( + struct net_device *dev, char *command, int total_len, bool enable) +{ + int error = 0; + wl_p2p_wfds_hash_t *wfds_hash = NULL; + char *smbuf = NULL; + smbuf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + + if (smbuf == NULL) { + DHD_ERROR(("%s: failed to allocated memory %d bytes\n", + __FUNCTION__, WLC_IOCTL_MAXLEN)); + return -ENOMEM; + } + + if (enable) { + wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_ADD_WFDS_HASH) + 1); + error = wldev_iovar_setbuf(dev, "p2p_add_wfds_hash", wfds_hash, + sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL); + } + else { + wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_DEL_WFDS_HASH) + 1); + error = wldev_iovar_setbuf(dev, "p2p_del_wfds_hash", wfds_hash, + sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL); + } + + if (error) { + DHD_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error)); + } + + if (smbuf) + kfree(smbuf); + return error; +} +#endif /* WLWFDS */ + +static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len) +{ + int link_speed; + int bytes_written; + int error; + + error = wldev_get_link_speed(net, &link_speed); + if (error) + return -1; + + /* Convert Kbps to Android Mbps */ + link_speed = link_speed / 1000; + bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed); + DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command)); + return bytes_written; +} + +static int wl_android_get_rssi(struct net_device *net, char *command, int total_len) +{ + wlc_ssid_t ssid = {0}; + int bytes_written = 0; + int error = 0; + scb_val_t scbval; + char *delim = NULL; + + delim = strchr(command, ' '); + /* For Ap mode rssi command would be + * driver rssi + * for STA/GC mode + * driver rssi + */ + if (delim) { + /* Ap/GO mode + * driver rssi + */ + DHD_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim)); + /* skip space from delim after finding char */ + delim++; + if (!(bcm_ether_atoe((delim), &scbval.ea))) + { + DHD_ERROR(("%s:address err\n", __FUNCTION__)); + return -1; + } + scbval.val = htod32(0); + DHD_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet))); + } + else { + /* STA/GC mode */ + memset(&scbval, 0, sizeof(scb_val_t)); + } + + error = wldev_get_rssi(net, &scbval); + if (error) + return -1; + + error = wldev_get_ssid(net, &ssid); + if (error) + return -1; + if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) { + DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__)); + } else { + memcpy(command, ssid.SSID, ssid.SSID_len); + bytes_written = ssid.SSID_len; + } + bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", scbval.val); + DHD_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written)); + return bytes_written; +} + +static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len) +{ + int suspend_flag; + int ret_now; + int ret = 0; + + suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0'; + + if (suspend_flag != 0) { + suspend_flag = 1; + } + ret_now = net_os_set_suspend_disable(dev, suspend_flag); + + if (ret_now != suspend_flag) { + if (!(ret = net_os_set_suspend(dev, ret_now, 1))) { + DHD_INFO(("%s: Suspend Flag %d -> %d\n", + __FUNCTION__, ret_now, suspend_flag)); + } else { + DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + } + + return ret; +} + +static int wl_android_set_suspendmode(struct net_device *dev, char *command, int total_len) +{ + int ret = 0; + +#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND) + int suspend_flag; + + suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0'; + if (suspend_flag != 0) + suspend_flag = 1; + + if (!(ret = net_os_set_suspend(dev, suspend_flag, 0))) + DHD_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag)); + else + DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); +#endif + + return ret; +} + +int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len) +{ + uint8 mode[4]; + int error = 0; + int bytes_written = 0; + + error = wldev_get_mode(dev, mode); + if (error) + return -1; + + DHD_INFO(("%s: mode:%s\n", __FUNCTION__, mode)); + bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode); + DHD_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + return bytes_written; + +} + +extern chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec); +int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int chsp = {0}; + uint16 band = 0; + uint16 bw = 0; + uint16 channel = 0; + u32 sb = 0; + chanspec_t chanspec; + + /* command is + * driver chanspec + */ + error = wldev_iovar_getint(dev, "chanspec", &chsp); + if (error) + return -1; + + chanspec = wl_chspec_driver_to_host(chsp); + DHD_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec)); + + channel = chanspec & WL_CHANSPEC_CHAN_MASK; + band = chanspec & WL_CHANSPEC_BAND_MASK; + bw = chanspec & WL_CHANSPEC_BW_MASK; + + DHD_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw)); + + if (bw == WL_CHANSPEC_BW_80) + bw = WL_CH_BANDWIDTH_80MHZ; + else if (bw == WL_CHANSPEC_BW_40) + bw = WL_CH_BANDWIDTH_40MHZ; + else if (bw == WL_CHANSPEC_BW_20) + bw = WL_CH_BANDWIDTH_20MHZ; + else + bw = WL_CH_BANDWIDTH_20MHZ; + + if (bw == WL_CH_BANDWIDTH_40MHZ) { + if (CHSPEC_SB_UPPER(chanspec)) { + channel += CH_10MHZ_APART; + } else { + channel -= CH_10MHZ_APART; + } + } + else if (bw == WL_CH_BANDWIDTH_80MHZ) { + sb = chanspec & WL_CHANSPEC_CTL_SB_MASK; + if (sb == WL_CHANSPEC_CTL_SB_LL) { + channel -= (CH_10MHZ_APART + CH_20MHZ_APART); + } else if (sb == WL_CHANSPEC_CTL_SB_LU) { + channel -= CH_10MHZ_APART; + } else if (sb == WL_CHANSPEC_CTL_SB_UL) { + channel += CH_10MHZ_APART; + } else { + /* WL_CHANSPEC_CTL_SB_UU */ + channel += (CH_10MHZ_APART + CH_20MHZ_APART); + } + } + bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC, + channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw); + + DHD_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + return bytes_written; + +} + +/* returns current datarate datarate returned from firmware are in 500kbps */ +int wl_android_get_datarate(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int datarate = 0; + int bytes_written = 0; + + error = wldev_get_datarate(dev, &datarate); + if (error) + return -1; + + DHD_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate)); + + bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2)); + return bytes_written; +} +int wl_android_get_assoclist(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + uint i; + char mac_buf[MAX_NUM_OF_ASSOCLIST * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST); + + error = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), false); + if (error) + return -1; + + assoc_maclist->count = dtoh32(assoc_maclist->count); + bytes_written = snprintf(command, total_len, "%s listcount: %d Stations:", + CMD_ASSOC_CLIENTS, assoc_maclist->count); + + for (i = 0; i < assoc_maclist->count; i++) { + bytes_written += snprintf(command + bytes_written, total_len, " " MACDBG, + MAC2STRDBG(assoc_maclist->ea[i].octet)); + } + return bytes_written; + +} +extern chanspec_t +wl_chspec_host_to_driver(chanspec_t chanspec); +static int wl_android_set_csa(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + char smbuf[WLC_IOCTL_SMLEN]; + wl_chan_switch_t csa_arg; + u32 chnsp = 0; + int err = 0; + + DHD_INFO(("%s: command:%s\n", __FUNCTION__, command)); + + command = (command + strlen(CMD_SET_CSA)); + /* Order is mode, count channel */ + if (!*++command) { + DHD_ERROR(("%s:error missing arguments\n", __FUNCTION__)); + return -1; + } + csa_arg.mode = bcm_atoi(command); + + if (csa_arg.mode != 0 && csa_arg.mode != 1) { + DHD_ERROR(("Invalid mode\n")); + return -1; + } + + if (!*++command) { + DHD_ERROR(("%s:error missing count\n", __FUNCTION__)); + return -1; + } + command++; + csa_arg.count = bcm_atoi(command); + + csa_arg.reg = 0; + csa_arg.chspec = 0; + command += 2; + if (!*command) { + DHD_ERROR(("%s:error missing channel\n", __FUNCTION__)); + return -1; + } + + chnsp = wf_chspec_aton(command); + if (chnsp == 0) { + DHD_ERROR(("%s:chsp is not correct\n", __FUNCTION__)); + return -1; + } + chnsp = wl_chspec_host_to_driver(chnsp); + csa_arg.chspec = chnsp; + + if (chnsp & WL_CHANSPEC_BAND_5G) { + u32 chanspec = chnsp; + err = wldev_iovar_getint(dev, "per_chan_info", &chanspec); + if (!err) { + if ((chanspec & WL_CHAN_RADAR) || (chanspec & WL_CHAN_PASSIVE)) { + DHD_ERROR(("Channel is radar sensitive\n")); + return -1; + } + if (chanspec == 0) { + DHD_ERROR(("Invalid hw channel\n")); + return -1; + } + } else { + DHD_ERROR(("does not support per_chan_info\n")); + return -1; + } + DHD_INFO(("non radar sensitivity\n")); + } + error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg), + smbuf, sizeof(smbuf), NULL); + if (error) { + DHD_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error)); + return -1; + } + return 0; +} +static int wl_android_get_band(struct net_device *dev, char *command, int total_len) +{ + uint band; + int bytes_written; + int error; + + error = wldev_get_band(dev, &band); + if (error) + return -1; + bytes_written = snprintf(command, total_len, "Band %d", band); + return bytes_written; +} + +#ifdef CUSTOMER_HW4_PRIVATE_CMD + +#ifdef FCC_PWR_LIMIT_2G +int +wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int enable = 0; + + sscanf(command+sizeof("SET_FCC_CHANNEL"), "%d", &enable); + + if ((enable != CUSTOMER_HW4_ENABLE) && (enable != CUSTOMER_HW4_DISABLE)) { + DHD_ERROR(("%s: Invalid data\n", __FUNCTION__)); + return BCME_ERROR; + } + + CUSTOMER_HW4_EN_CONVERT(enable); + + DHD_ERROR(("%s: fccpwrlimit2g set (%d)\n", __FUNCTION__, enable)); + error = wldev_iovar_setint(dev, "fccpwrlimit2g", enable); + if (error) { + DHD_ERROR(("%s: fccpwrlimit2g set returned (%d)\n", __FUNCTION__, error)); + return BCME_ERROR; + } + + return error; +} + +int +wl_android_get_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int enable = 0; + int bytes_written = 0; + + error = wldev_iovar_getint(dev, "fccpwrlimit2g", &enable); + if (error) { + DHD_ERROR(("%s: fccpwrlimit2g get error (%d)\n", __FUNCTION__, error)); + return BCME_ERROR; + } + DHD_ERROR(("%s: fccpwrlimit2g get (%d)\n", __FUNCTION__, enable)); + + bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_FCC_PWR_LIMIT_2G, enable); + + return bytes_written; +} +#endif /* FCC_PWR_LIMIT_2G */ + +#ifdef IPV6_NDO_SUPPORT +int +wl_android_nd_ra_limit(struct net_device *dev, char *command, int total_len) +{ + int err = 0; + int bytes_written = 0; + uint tokens; + char *pos, *token, *delim; + char smbuf[WLC_IOCTL_SMLEN]; + char param[ND_PARAM_SIZE+1], value[ND_VALUE_SIZE+1]; + uint16 type = 0xff, min = 0, per = 0, hold = 0; + nd_ra_ol_limits_t ra_ol_limit; + + WL_TRACE(("command=%s, len=%d\n", command, total_len)); + pos = command + strlen(CMD_NDRA_LIMIT) + 1; + memset(&ra_ol_limit, 0, sizeof(nd_ra_ol_limits_t)); + + if (!strncmp(pos, ND_RA_OL_SET, strlen(ND_RA_OL_SET))) { + WL_TRACE(("SET NDRA_LIMIT\n")); + pos += strlen(ND_RA_OL_SET) + 1; + while ((token = strsep(&pos, ND_PARAMS_DELIMETER)) != NULL) { + memset(param, 0, sizeof(param)); + memset(value, 0, sizeof(value)); + + delim = strchr(token, ND_PARAM_VALUE_DELLIMETER); + if (delim != NULL) + *delim = ' '; + + tokens = sscanf(token, ND_LIMIT_STR_FMT, param, value); + if (!strncmp(param, ND_RA_TYPE, strlen(ND_RA_TYPE))) { + type = simple_strtol(value, NULL, 0); + } else if (!strncmp(param, ND_RA_MIN_TIME, strlen(ND_RA_MIN_TIME))) { + min = simple_strtol(value, NULL, 0); + } else if (!strncmp(param, ND_RA_PER, strlen(ND_RA_PER))) { + per = simple_strtol(value, NULL, 0); + if (per > 100) { + WL_ERR(("Invalid PERCENT %d\n", per)); + err = BCME_BADARG; + goto exit; + } + } else if (!strncmp(param, ND_RA_HOLD, strlen(ND_RA_HOLD))) { + hold = simple_strtol(value, NULL, 0); + } + } + + ra_ol_limit.version = htod32(ND_RA_OL_LIMITS_VER); + ra_ol_limit.type = htod32(type); + if (type == ND_RA_OL_LIMITS_REL_TYPE) { + if ((min == 0) || (per == 0)) { + WL_ERR(("Invalid min_time %d, percent %d\n", min, per)); + err = BCME_BADARG; + goto exit; + } + ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_REL_TYPE_LEN); + ra_ol_limit.limits.lifetime_relative.min_time = htod32(min); + ra_ol_limit.limits.lifetime_relative.lifetime_percent = htod32(per); + } else if (type == ND_RA_OL_LIMITS_FIXED_TYPE) { + if (hold == 0) { + WL_ERR(("Invalid hold_time %d\n", hold)); + err = BCME_BADARG; + goto exit; + } + ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_FIXED_TYPE_LEN); + ra_ol_limit.limits.fixed.hold_time = htod32(hold); + } else { + WL_ERR(("unknown TYPE %d\n", type)); + err = BCME_BADARG; + goto exit; + } + + err = wldev_iovar_setbuf(dev, "nd_ra_limit_intv", &ra_ol_limit, + sizeof(nd_ra_ol_limits_t), smbuf, sizeof(smbuf), NULL); + if (err) { + WL_ERR(("Failed to set nd_ra_limit_intv, error = %d\n", err)); + goto exit; + } + + WL_TRACE(("TYPE %d, MIN %d, PER %d, HOLD %d\n", type, min, per, hold)); + } else if (!strncmp(pos, ND_RA_OL_GET, strlen(ND_RA_OL_GET))) { + WL_TRACE(("GET NDRA_LIMIT\n")); + err = wldev_iovar_getbuf(dev, "nd_ra_limit_intv", NULL, 0, + smbuf, sizeof(smbuf), NULL); + if (err) { + WL_ERR(("Failed to get nd_ra_limit_intv, error = %d\n", err)); + goto exit; + } + + memcpy(&ra_ol_limit, (uint8 *)smbuf, sizeof(nd_ra_ol_limits_t)); + type = ra_ol_limit.type; + if (ra_ol_limit.version != ND_RA_OL_LIMITS_VER) { + WL_ERR(("Invalid Version %d\n", ra_ol_limit.version)); + err = BCME_VERSION; + goto exit; + } + + if (ra_ol_limit.type == ND_RA_OL_LIMITS_REL_TYPE) { + min = ra_ol_limit.limits.lifetime_relative.min_time; + per = ra_ol_limit.limits.lifetime_relative.lifetime_percent; + WL_ERR(("TYPE %d, MIN %d, PER %d\n", type, min, per)); + bytes_written = snprintf(command, total_len, + "%s GET TYPE %d, MIN %d, PER %d", CMD_NDRA_LIMIT, type, min, per); + } else if (ra_ol_limit.type == ND_RA_OL_LIMITS_FIXED_TYPE) { + hold = ra_ol_limit.limits.fixed.hold_time; + WL_ERR(("TYPE %d, HOLD %d\n", type, hold)); + bytes_written = snprintf(command, total_len, + "%s GET TYPE %d, HOLD %d", CMD_NDRA_LIMIT, type, hold); + } else { + WL_ERR(("unknown TYPE %d\n", type)); + err = BCME_ERROR; + goto exit; + } + + return bytes_written; + } else { + WL_ERR(("unknown command\n")); + err = BCME_ERROR; + goto exit; + } + +exit: + return err; +} +#endif /* IPV6_NDO_SUPPORT */ +#ifdef WLTDLS +int wl_android_tdls_reset(struct net_device *dev) +{ + int ret = 0; + ret = dhd_tdls_enable(dev, false, false, NULL); + if (ret < 0) { + DHD_ERROR(("Disable tdls failed. %d\n", ret)); + return ret; + } + ret = dhd_tdls_enable(dev, true, true, NULL); + if (ret < 0) { + DHD_ERROR(("enable tdls failed. %d\n", ret)); + return ret; + } + return 0; +} +#endif /* WLTDLS */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ +static int wl_android_wbtext(struct net_device *dev, char *command, int total_len) +{ + int error = 0, argc = 0; + int data, bytes_written; + + argc = sscanf(command+sizeof("WBTEXT_ENABLE"), "%d", &data); + if (!argc) { + error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data); + if (error) { + DHD_ERROR(("%s: Failed to set wbtext error = %d\n", + __FUNCTION__, error)); + } + bytes_written = snprintf(command, total_len, "WBTEXT %s\n", + (data == WL_BSSTRANS_POLICY_PRODUCT)? "ENABLED" : "DISABLED"); + return bytes_written; + } else { + if (data) + data = WL_BSSTRANS_POLICY_PRODUCT; + + error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data); + if (error) { + DHD_ERROR(("%s: Failed to set wbtext error = %d\n", + __FUNCTION__, error)); + } + } + return error; +} + +#ifdef PNO_SUPPORT +#define PNO_PARAM_SIZE 50 +#define VALUE_SIZE 50 +#define LIMIT_STR_FMT ("%50s %50s") +static int +wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) +{ + int err = BCME_OK; + uint i, tokens; + char *pos, *pos2, *token, *token2, *delim; + char param[PNO_PARAM_SIZE], value[VALUE_SIZE]; + struct dhd_pno_batch_params batch_params; + DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + if (total_len < strlen(CMD_WLS_BATCHING)) { + DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); + err = BCME_ERROR; + goto exit; + } + pos = command + strlen(CMD_WLS_BATCHING) + 1; + memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params)); + + if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) { + pos += strlen(PNO_BATCHING_SET) + 1; + while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) { + memset(param, 0, sizeof(param)); + memset(value, 0, sizeof(value)); + if (token == NULL || !*token) + break; + if (*token == '\0') + continue; + delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER); + if (delim != NULL) + *delim = ' '; + + tokens = sscanf(token, LIMIT_STR_FMT, param, value); + if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) { + batch_params.scan_fr = simple_strtol(value, NULL, 0); + DHD_PNO(("scan_freq : %d\n", batch_params.scan_fr)); + } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) { + batch_params.bestn = simple_strtol(value, NULL, 0); + DHD_PNO(("bestn : %d\n", batch_params.bestn)); + } else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) { + batch_params.mscan = simple_strtol(value, NULL, 0); + DHD_PNO(("mscan : %d\n", batch_params.mscan)); + } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) { + i = 0; + pos2 = value; + tokens = sscanf(value, "<%s>", value); + if (tokens != 1) { + err = BCME_ERROR; + DHD_ERROR(("%s : invalid format for channel" + " <> params\n", __FUNCTION__)); + goto exit; + } + while ((token2 = strsep(&pos2, + PNO_PARAM_CHANNEL_DELIMETER)) != NULL) { + if (token2 == NULL || !*token2) + break; + if (*token2 == '\0') + continue; + if (*token2 == 'A' || *token2 == 'B') { + batch_params.band = (*token2 == 'A')? + WLC_BAND_5G : WLC_BAND_2G; + DHD_PNO(("band : %s\n", + (*token2 == 'A')? "A" : "B")); + } else { + batch_params.chan_list[i++] = + simple_strtol(token2, NULL, 0); + batch_params.nchan++; + DHD_PNO(("channel :%d\n", + batch_params.chan_list[i-1])); + } + } + } else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) { + batch_params.rtt = simple_strtol(value, NULL, 0); + DHD_PNO(("rtt : %d\n", batch_params.rtt)); + } else { + DHD_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param)); + err = BCME_ERROR; + goto exit; + } + } + err = dhd_dev_pno_set_for_batch(dev, &batch_params); + if (err < 0) { + DHD_ERROR(("failed to configure batch scan\n")); + } else { + memset(command, 0, total_len); + err = snprintf(command, total_len, "%d", err); + } + } else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) { + err = dhd_dev_pno_get_for_batch(dev, command, total_len); + if (err < 0) { + DHD_ERROR(("failed to getting batching results\n")); + } else { + err = strlen(command); + } + } else if (!strncmp(pos, PNO_BATCHING_STOP, strlen(PNO_BATCHING_STOP))) { + err = dhd_dev_pno_stop_for_batch(dev); + if (err < 0) { + DHD_ERROR(("failed to stop batching scan\n")); + } else { + memset(command, 0, total_len); + err = snprintf(command, total_len, "OK"); + } + } else { + DHD_ERROR(("%s : unknown command\n", __FUNCTION__)); + err = BCME_ERROR; + goto exit; + } +exit: + return err; +} +#ifndef WL_SCHED_SCAN +static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len) +{ + wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT]; + int res = -1; + int nssid = 0; + cmd_tlv_t *cmd_tlv_temp; + char *str_ptr; + int tlv_size_left; + int pno_time = 0; + int pno_repeat = 0; + int pno_freq_expo_max = 0; + +#ifdef PNO_SET_DEBUG + int i; + char pno_in_example[] = { + 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ', + 'S', '1', '2', '0', + 'S', + 0x05, + 'd', 'l', 'i', 'n', 'k', + 'S', + 0x04, + 'G', 'O', 'O', 'G', + 'T', + '0', 'B', + 'R', + '2', + 'M', + '2', + 0x00 + }; +#endif /* PNO_SET_DEBUG */ + DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + + if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) { + DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); + goto exit_proc; + } +#ifdef PNO_SET_DEBUG + memcpy(command, pno_in_example, sizeof(pno_in_example)); + total_len = sizeof(pno_in_example); +#endif + str_ptr = command + strlen(CMD_PNOSETUP_SET); + tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET); + + cmd_tlv_temp = (cmd_tlv_t *)str_ptr; + memset(ssids_local, 0, sizeof(ssids_local)); + + if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && + (cmd_tlv_temp->version == PNO_TLV_VERSION) && + (cmd_tlv_temp->subtype == PNO_TLV_SUBTYPE_LEGACY_PNO)) { + + str_ptr += sizeof(cmd_tlv_t); + tlv_size_left -= sizeof(cmd_tlv_t); + + if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, + MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) { + DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); + goto exit_proc; + } else { + if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) { + DHD_ERROR(("%s scan duration corrupted field size %d\n", + __FUNCTION__, tlv_size_left)); + goto exit_proc; + } + str_ptr++; + pno_time = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); + + if (str_ptr[0] != 0) { + if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) { + DHD_ERROR(("%s pno repeat : corrupted field\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); + if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) { + DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_PNO(("%s: pno_freq_expo_max=%d\n", + __FUNCTION__, pno_freq_expo_max)); + } + } + } else { + DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + goto exit_proc; + } + + res = dhd_dev_pno_set_for_ssid(dev, ssids_local, nssid, pno_time, pno_repeat, + pno_freq_expo_max, NULL, 0); +exit_proc: + return res; +} +#endif /* !WL_SCHED_SCAN */ +#endif /* PNO_SUPPORT */ + +static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len) +{ + int ret; + int bytes_written = 0; + + ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command); + if (ret) + return 0; + bytes_written = sizeof(struct ether_addr); + return bytes_written; +} + + +int +wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist) +{ + int i, j, match; + int ret = 0; + char mac_buf[MAX_NUM_OF_ASSOCLIST * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + + /* set filtering mode */ + if ((ret = wldev_ioctl(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode), true)) != 0) { + DHD_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret)); + return ret; + } + if (macmode != MACLIST_MODE_DISABLED) { + /* set the MAC filter list */ + if ((ret = wldev_ioctl(dev, WLC_SET_MACLIST, maclist, + sizeof(int) + sizeof(struct ether_addr) * maclist->count, true)) != 0) { + DHD_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret)); + return ret; + } + /* get the current list of associated STAs */ + assoc_maclist->count = MAX_NUM_OF_ASSOCLIST; + if ((ret = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, + sizeof(mac_buf), false)) != 0) { + DHD_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret)); + return ret; + } + /* do we have any STA associated? */ + if (assoc_maclist->count) { + /* iterate each associated STA */ + for (i = 0; i < assoc_maclist->count; i++) { + match = 0; + /* compare with each entry */ + for (j = 0; j < maclist->count; j++) { + DHD_INFO(("%s : associated="MACDBG " list="MACDBG "\n", + __FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet), + MAC2STRDBG(maclist->ea[j].octet))); + if (memcmp(assoc_maclist->ea[i].octet, + maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) { + match = 1; + break; + } + } + /* do conditional deauth */ + /* "if not in the allow list" or "if in the deny list" */ + if ((macmode == MACLIST_MODE_ALLOW && !match) || + (macmode == MACLIST_MODE_DENY && match)) { + scb_val_t scbval; + + scbval.val = htod32(1); + memcpy(&scbval.ea, &assoc_maclist->ea[i], + ETHER_ADDR_LEN); + if ((ret = wldev_ioctl(dev, + WLC_SCB_DEAUTHENTICATE_FOR_REASON, + &scbval, sizeof(scb_val_t), true)) != 0) + DHD_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n", + __FUNCTION__, ret)); + } + } + } + } + return ret; +} + +/* + * HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2 + * + */ +static int +wl_android_set_mac_address_filter(struct net_device *dev, const char* str) +{ + int i; + int ret = 0; + int macnum = 0; + int macmode = MACLIST_MODE_DISABLED; + struct maclist *list; + char eabuf[ETHER_ADDR_STR_LEN]; + char *token; + + /* string should look like below (macmode/macnum/maclist) */ + /* 1 2 00:11:22:33:44:55 00:11:22:33:44:ff */ + + /* get the MAC filter mode */ + token = strsep((char**)&str, " "); + if (!token) { + return -1; + } + macmode = bcm_atoi(token); + + if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) { + DHD_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode)); + return -1; + } + + token = strsep((char**)&str, " "); + if (!token) { + return -1; + } + macnum = bcm_atoi(token); + if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { + DHD_ERROR(("%s : invalid number of MAC address entries %d\n", + __FUNCTION__, macnum)); + return -1; + } + /* allocate memory for the MAC list */ + list = (struct maclist*)kmalloc(sizeof(int) + + sizeof(struct ether_addr) * macnum, GFP_KERNEL); + if (!list) { + DHD_ERROR(("%s : failed to allocate memory\n", __FUNCTION__)); + return -1; + } + /* prepare the MAC list */ + list->count = htod32(macnum); + bzero((char *)eabuf, ETHER_ADDR_STR_LEN); + for (i = 0; i < list->count; i++) { + strncpy(eabuf, strsep((char**)&str, " "), ETHER_ADDR_STR_LEN - 1); + if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) { + DHD_ERROR(("%s : mac parsing err index=%d, addr=%s\n", + __FUNCTION__, i, eabuf)); + list->count--; + break; + } + DHD_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf)); + } + /* set the list */ + if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0) + DHD_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + + kfree(list); + + return 0; +} + +/** + * Global function definitions (declared in wl_android.h) + */ + +int wl_android_wifi_on(struct net_device *dev) +{ + int ret = 0; + int retry = POWERUP_MAX_RETRY; + + DHD_ERROR(("%s in\n", __FUNCTION__)); + if (!dev) { + DHD_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -EINVAL; + } + + dhd_net_if_lock(dev); + if (!g_wifi_on) { + do { + dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY); +#ifdef BCMSDIO + ret = dhd_net_bus_resume(dev, 0); +#endif /* BCMSDIO */ +#ifdef BCMPCIE + ret = dhd_net_bus_devreset(dev, FALSE); +#endif /* BCMPCIE */ + if (ret == 0) { + break; + } + DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n", + retry)); +#ifdef BCMPCIE + dhd_net_bus_devreset(dev, TRUE); +#endif /* BCMPCIE */ + dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); + } while (retry-- > 0); + if (ret != 0) { + DHD_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n")); + goto exit; + } +#ifdef BCMSDIO + ret = dhd_net_bus_devreset(dev, FALSE); + dhd_net_bus_resume(dev, 1); +#endif /* BCMSDIO */ + +#ifndef BCMPCIE + if (!ret) { + if (dhd_dev_init_ioctl(dev) < 0) { + ret = -EFAULT; + } + } +#endif /* !BCMPCIE */ + g_wifi_on = TRUE; + } + +exit: + dhd_net_if_unlock(dev); + + return ret; +} + +int wl_android_wifi_off(struct net_device *dev, bool on_failure) +{ + int ret = 0; + + DHD_ERROR(("%s in\n", __FUNCTION__)); + if (!dev) { + DHD_TRACE(("%s: dev is null\n", __FUNCTION__)); + return -EINVAL; + } + + dhd_net_if_lock(dev); + if (g_wifi_on || on_failure) { +#if defined(BCMSDIO) || defined(BCMPCIE) + ret = dhd_net_bus_devreset(dev, TRUE); +#ifdef BCMSDIO + dhd_net_bus_suspend(dev); +#endif /* BCMSDIO */ +#endif /* BCMSDIO || BCMPCIE */ + dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); + g_wifi_on = FALSE; + } + dhd_net_if_unlock(dev); + + return ret; +} + +static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len) +{ + if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN) + return -1; + return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1); +} + +#ifdef CONNECTION_STATISTICS +static int +wl_chanim_stats(struct net_device *dev, u8 *chan_idle) +{ + int err; + wl_chanim_stats_t *list; + /* Parameter _and_ returned buffer of chanim_stats. */ + wl_chanim_stats_t param; + u8 result[WLC_IOCTL_SMLEN]; + chanim_stats_t *stats; + + memset(¶m, 0, sizeof(param)); + memset(result, 0, sizeof(result)); + + param.buflen = htod32(sizeof(wl_chanim_stats_t)); + param.count = htod32(WL_CHANIM_COUNT_ONE); + + if ((err = wldev_iovar_getbuf(dev, "chanim_stats", (char*)¶m, sizeof(wl_chanim_stats_t), + (char*)result, sizeof(result), 0)) < 0) { + WL_ERR(("Failed to get chanim results %d \n", err)); + return err; + } + + list = (wl_chanim_stats_t*)result; + + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + + if (list->buflen == 0) { + list->version = 0; + list->count = 0; + } else if (list->version != WL_CHANIM_STATS_VERSION) { + WL_ERR(("Sorry, firmware has wl_chanim_stats version %d " + "but driver supports only version %d.\n", + list->version, WL_CHANIM_STATS_VERSION)); + list->buflen = 0; + list->count = 0; + } + + stats = list->stats; + stats->glitchcnt = dtoh32(stats->glitchcnt); + stats->badplcp = dtoh32(stats->badplcp); + stats->chanspec = dtoh16(stats->chanspec); + stats->timestamp = dtoh32(stats->timestamp); + stats->chan_idle = dtoh32(stats->chan_idle); + + WL_INFORM(("chanspec: 0x%4x glitch: %d badplcp: %d idle: %d timestamp: %d\n", + stats->chanspec, stats->glitchcnt, stats->badplcp, stats->chan_idle, + stats->timestamp)); + + *chan_idle = stats->chan_idle; + + return (err); +} + +static int +wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len) +{ + wl_cnt_t* cnt = NULL; +#ifndef DISABLE_IF_COUNTERS + wl_if_stats_t* if_stats = NULL; +#endif /* DISABLE_IF_COUNTERS */ + + int link_speed = 0; + struct connection_stats *output; + unsigned int bufsize = 0; + int bytes_written = -1; + int ret = 0; + + WL_INFORM(("%s: enter Get Connection Stats\n", __FUNCTION__)); + + if (total_len <= 0) { + WL_ERR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len)); + goto error; + } + + bufsize = total_len; + if (bufsize < sizeof(struct connection_stats)) { + WL_ERR(("%s: not enough buffer size, provided=%u, requires=%zu\n", + __FUNCTION__, bufsize, + sizeof(struct connection_stats))); + goto error; + } + + output = (struct connection_stats *)command; + +#ifndef DISABLE_IF_COUNTERS + if ((if_stats = kmalloc(sizeof(*if_stats), GFP_KERNEL)) == NULL) { + WL_ERR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__)); + goto error; + } + memset(if_stats, 0, sizeof(*if_stats)); + + ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0, + (char *)if_stats, sizeof(*if_stats), NULL); + if (ret) { + WL_ERR(("%s: if_counters not supported ret=%d\n", + __FUNCTION__, ret)); + + /* In case if_stats IOVAR is not supported, get information from counters. */ +#endif /* DISABLE_IF_COUNTERS */ + if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) { + WL_ERR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__)); + goto error; + } + memset(cnt, 0, sizeof(*cnt)); + + ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, + (char *)cnt, sizeof(wl_cnt_t), NULL); + if (ret) { + WL_ERR(("%s: wldev_iovar_getbuf() failed, ret=%d\n", + __FUNCTION__, ret)); + goto error; + } + + if (dtoh16(cnt->version) > WL_CNT_T_VERSION) { + WL_ERR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n", + __FUNCTION__, WL_CNT_T_VERSION, cnt->version)); + goto error; + } + + output->txframe = dtoh32(cnt->txframe); + output->txbyte = dtoh32(cnt->txbyte); + output->txerror = dtoh32(cnt->txerror); + output->rxframe = dtoh32(cnt->rxframe); + output->rxbyte = dtoh32(cnt->rxbyte); + output->txfail = dtoh32(cnt->txfail); + output->txretry = dtoh32(cnt->txretry); + output->txretrie = dtoh32(cnt->txretrie); + output->txrts = dtoh32(cnt->txrts); + output->txnocts = dtoh32(cnt->txnocts); + output->txexptime = dtoh32(cnt->txexptime); +#ifndef DISABLE_IF_COUNTERS + } else { + /* Populate from if_stats. */ + if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) { + WL_ERR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n", + __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version)); + goto error; + } + + output->txframe = (uint32)dtoh64(if_stats->txframe); + output->txbyte = (uint32)dtoh64(if_stats->txbyte); + output->txerror = (uint32)dtoh64(if_stats->txerror); + output->rxframe = (uint32)dtoh64(if_stats->rxframe); + output->rxbyte = (uint32)dtoh64(if_stats->rxbyte); + output->txfail = (uint32)dtoh64(if_stats->txfail); + output->txretry = (uint32)dtoh64(if_stats->txretry); + output->txretrie = (uint32)dtoh64(if_stats->txretrie); + /* Unavailable */ + output->txrts = 0; + output->txnocts = 0; + output->txexptime = 0; + } +#endif /* DISABLE_IF_COUNTERS */ + + /* link_speed is in kbps */ + ret = wldev_get_link_speed(dev, &link_speed); + if (ret || link_speed < 0) { + WL_ERR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n", + __FUNCTION__, ret, link_speed)); + goto error; + } + + output->txrate = link_speed; + + /* Channel idle ratio. */ + if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) { + output->chan_idle = 0; + }; + + bytes_written = sizeof(struct connection_stats); + +error: +#ifndef DISABLE_IF_COUNTERS + if (if_stats) { + kfree(if_stats); + } +#endif /* DISABLE_IF_COUNTERS */ + if (cnt) { + kfree(cnt); + } + + return bytes_written; +} +#endif /* CONNECTION_STATISTICS */ + + +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +/* SoftAP feature */ +#define APCS_BAND_2G_LEGACY1 20 +#define APCS_BAND_2G_LEGACY2 0 +#define APCS_BAND_AUTO "band=auto" +#define APCS_BAND_2G "band=2g" +#define APCS_BAND_5G "band=5g" +#define APCS_MAX_2G_CHANNELS 11 +#define APCS_MAX_RETRY 10 +#define APCS_DEFAULT_2G_CH 1 +#define APCS_DEFAULT_5G_CH 149 +#if defined(WL_SUPPORT_AUTO_CHANNEL) +static int +wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, + char* command, int total_len) +{ + int channel = 0; + int chosen = 0; + int retry = 0; + int ret = 0; + int spect = 0; + u8 *reqbuf = NULL; + uint32 band = WLC_BAND_2G; + uint32 buf_size; + + if (cmd_str) { + WL_INFORM(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str))); + if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) { + band = WLC_BAND_AUTO; + } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) { + band = WLC_BAND_5G; + } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) { + band = WLC_BAND_2G; + } else { + /* + * For backward compatibility: Some platforms used to issue argument 20 or 0 + * to enforce the 2G channel selection + */ + channel = bcm_atoi(cmd_str); + if ((channel == APCS_BAND_2G_LEGACY1) || + (channel == APCS_BAND_2G_LEGACY2)) { + band = WLC_BAND_2G; + } else { + WL_ERR(("Invalid argument\n")); + return -EINVAL; + } + } + } else { + /* If no argument is provided, default to 2G */ + WL_ERR(("No argument given default to 2.4G scan\n")); + band = WLC_BAND_2G; + } + WL_INFORM(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band)); + + if ((ret = wldev_ioctl(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect), false)) < 0) { + WL_ERR(("ACS: error getting the spect\n")); + goto done; + } + + if (spect > 0) { + /* If STA is connected, return is STA channel, else ACS can be issued, + * set spect to 0 and proceed with ACS + */ + channel = wl_cfg80211_get_sta_channel(); + if (channel) { + channel = (channel <= CH_MAX_2G_CHANNEL) ? channel : APCS_DEFAULT_2G_CH; + goto done2; + } + + if ((ret = wl_cfg80211_set_spect(dev, 0) < 0)) { + WL_ERR(("ACS: error while setting spect\n")); + goto done; + } + } + + reqbuf = kzalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL); + if (reqbuf == NULL) { + WL_ERR(("failed to allocate chanspec buffer\n")); + return -ENOMEM; + } + + if (band == WLC_BAND_AUTO) { + WL_INFORM(("ACS full channel scan \n")); + reqbuf[0] = htod32(0); + } else if (band == WLC_BAND_5G) { + WL_INFORM(("ACS 5G band scan \n")); + if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { + WL_ERR(("ACS 5g chanspec retreival failed! \n")); + goto done; + } + } else if (band == WLC_BAND_2G) { + /* + * If channel argument is not provided/ argument 20 is provided, + * Restrict channel to 2GHz, 20MHz BW, No SB + */ + WL_INFORM(("ACS 2G band scan \n")); + if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { + WL_ERR(("ACS 2g chanspec retreival failed! \n")); + goto done; + } + } else { + WL_ERR(("ACS: No band chosen\n")); + goto done2; + } + + buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE; + ret = wldev_ioctl(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf, + buf_size, true); + if (ret < 0) { + WL_ERR(("can't start auto channel scan, err = %d\n", ret)); + channel = 0; + goto done; + } + + /* Wait for auto channel selection, max 3000 ms */ + if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G)) { + OSL_SLEEP(500); + } else { + /* + * Full channel scan at the minimum takes 1.2secs + * even with parallel scan. max wait time: 3500ms + */ + OSL_SLEEP(1000); + } + + retry = APCS_MAX_RETRY; + while (retry--) { + ret = wldev_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, + sizeof(chosen), false); + if (ret < 0) { + chosen = 0; + } else { + chosen = dtoh32(chosen); + } + + if (chosen) { + int chosen_band; + int apcs_band; +#ifdef D11AC_IOTYPES + if (wl_cfg80211_get_ioctl_version() == 1) { + channel = LCHSPEC_CHANNEL((chanspec_t)chosen); + } else { + channel = CHSPEC_CHANNEL((chanspec_t)chosen); + } +#else + channel = CHSPEC_CHANNEL((chanspec_t)chosen); +#endif /* D11AC_IOTYPES */ + apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band; + chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G; + if (apcs_band == chosen_band) { + WL_ERR(("selected channel = %d\n", channel)); + break; + } + } + WL_INFORM(("%d tried, ret = %d, chosen = 0x%x\n", + (APCS_MAX_RETRY - retry), ret, chosen)); + OSL_SLEEP(250); + } + +done: + if ((retry == 0) || (ret < 0)) { + /* On failure, fallback to a default channel */ + if ((band == WLC_BAND_5G)) { + channel = APCS_DEFAULT_5G_CH; + } else { + channel = APCS_DEFAULT_2G_CH; + } + WL_ERR(("ACS failed. Fall back to default channel (%d) \n", channel)); + } +done2: + if (spect > 0) { + if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) { + WL_ERR(("ACS: error while setting spect\n")); + } + } + + if (reqbuf) { + kfree(reqbuf); + } + + if (channel) { + snprintf(command, 4, "%d", channel); + WL_INFORM(("command result is %s \n", command)); + return strlen(command); + } else { + return ret; + } +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#ifdef CUSTOMER_HW4_PRIVATE_CMD + + +#ifdef SUPPORT_SET_LPC +static int +wl_android_set_lpc(struct net_device *dev, const char* string_num) +{ + int lpc_enabled, ret; + s32 val = 1; + + lpc_enabled = bcm_atoi(string_num); + DHD_INFO(("%s : HAPD_LPC_ENABLED = %d\n", __FUNCTION__, lpc_enabled)); + + ret = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true); + if (ret < 0) + DHD_ERROR(("WLC_DOWN error %d\n", ret)); + + wldev_iovar_setint(dev, "lpc", lpc_enabled); + + ret = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true); + if (ret < 0) + DHD_ERROR(("WLC_UP error %d\n", ret)); + + return 1; +} +#endif /* SUPPORT_SET_LPC */ + +static int +wl_android_ch_res_rl(struct net_device *dev, bool change) +{ + int error = 0; + s32 srl = 7; + s32 lrl = 4; + printk("%s enter\n", __FUNCTION__); + if (change) { + srl = 4; + lrl = 2; + } + error = wldev_ioctl(dev, WLC_SET_SRL, &srl, sizeof(s32), true); + if (error) { + DHD_ERROR(("Failed to set SRL, error = %d\n", error)); + } + error = wldev_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(s32), true); + if (error) { + DHD_ERROR(("Failed to set LRL, error = %d\n", error)); + } + return error; +} + + +static int +wl_android_rmc_enable(struct net_device *net, int rmc_enable) +{ + int err; + + err = wldev_iovar_setint(net, "rmc_ackreq", rmc_enable); + return err; +} + +static int +wl_android_rmc_set_leader(struct net_device *dev, const char* straddr) +{ + int error = BCME_OK; + char smbuf[WLC_IOCTL_SMLEN]; + wl_rmc_entry_t rmc_entry; + DHD_INFO(("%s: Set new RMC leader %s\n", __FUNCTION__, straddr)); + + memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t)); + if (!bcm_ether_atoe(straddr, &rmc_entry.addr)) { + if (strlen(straddr) == 1 && bcm_atoi(straddr) == 0) { + DHD_INFO(("%s: Set auto leader selection mode\n", __FUNCTION__)); + memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t)); + } else { + DHD_ERROR(("%s: No valid mac address provided\n", + __FUNCTION__)); + return BCME_ERROR; + } + } + + error = wldev_iovar_setbuf(dev, "rmc_ar", &rmc_entry, sizeof(wl_rmc_entry_t), + smbuf, sizeof(smbuf), NULL); + + if (error != BCME_OK) { + DHD_ERROR(("%s: Unable to set RMC leader, error = %d\n", + __FUNCTION__, error)); + } + + return error; +} + +static int wl_android_set_rmc_event(struct net_device *dev, char *command, int total_len) +{ + int err = 0; + int pid = 0; + + if (sscanf(command, CMD_SET_RMC_EVENT " %d", &pid) <= 0) { + WL_ERR(("Failed to get Parameter from : %s\n", command)); + return -1; + } + + /* set pid, and if the event was happened, let's send a notification through netlink */ + wl_cfg80211_set_rmc_pid(pid); + + WL_DBG(("RMC pid=%d\n", pid)); + + return err; +} + +int wl_android_get_singlecore_scan(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int mode = 0; + + error = wldev_iovar_getint(dev, "scan_ps", &mode); + if (error) { + DHD_ERROR(("%s: Failed to get single core scan Mode, error = %d\n", + __FUNCTION__, error)); + return -1; + } + + bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_SCSCAN, mode); + + return bytes_written; +} + +int wl_android_set_singlecore_scan(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int mode = 0; + + if (sscanf(command, "%*s %d", &mode) != 1) { + DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + error = wldev_iovar_setint(dev, "scan_ps", mode); + if (error) { + DHD_ERROR(("%s[1]: Failed to set Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return -1; + } + + return error; +} +#ifdef TEST_TX_POWER_CONTROL +static int +wl_android_set_tx_power(struct net_device *dev, const char* string_num) +{ + int err = 0; + s32 dbm; + enum nl80211_tx_power_setting type; + + dbm = bcm_atoi(string_num); + + if (dbm < -1) { + DHD_ERROR(("%s: dbm is negative...\n", __FUNCTION__)); + return -EINVAL; + } + + if (dbm == -1) + type = NL80211_TX_POWER_AUTOMATIC; + else + type = NL80211_TX_POWER_FIXED; + + err = wl_set_tx_power(dev, type, dbm); + if (unlikely(err)) { + DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + + return 1; +} + +static int +wl_android_get_tx_power(struct net_device *dev, char *command, int total_len) +{ + int err; + int bytes_written; + s32 dbm = 0; + + err = wl_get_tx_power(dev, &dbm); + if (unlikely(err)) { + DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + + bytes_written = snprintf(command, total_len, "%s %d", + CMD_TEST_GET_TX_POWER, dbm); + + DHD_ERROR(("%s: GET_TX_POWER: dBm=%d\n", __FUNCTION__, dbm)); + + return bytes_written; +} +#endif /* TEST_TX_POWER_CONTROL */ + +static int +wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num) +{ + int err = 0; + int setval = 0; + s32 mode = bcm_atoi(string_num); + + /* As Samsung specific and their requirement, '0' means activate sarlimit + * and '-1' means back to normal state (deactivate sarlimit) + */ + if (mode == 0) { + DHD_INFO(("%s: SAR limit control activated\n", __FUNCTION__)); + setval = 1; + } else if (mode == -1) { + DHD_INFO(("%s: SAR limit control deactivated\n", __FUNCTION__)); + setval = 0; + } else { + return -EINVAL; + } + + err = wldev_iovar_setint(dev, "sar_enable", setval); + if (unlikely(err)) { + DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + return 1; +} +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int mode = 0; + + if (sscanf(command, "%*s %d", &mode) != 1) { + DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + error = wldev_iovar_setint(dev, "roam_off", mode); + if (error) { + DHD_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return -1; + } + else + DHD_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return 0; +} + +int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len) +{ + char ie_buf[VNDR_IE_MAX_LEN]; + char *ioctl_buf = NULL; + char hex[] = "XX"; + char *pcmd = NULL; + int ielen = 0, datalen = 0, idx = 0, tot_len = 0; + vndr_ie_setbuf_t *vndr_ie = NULL; + s32 iecount; + uint32 pktflag; + u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + s32 err = BCME_OK; + + /* Check the VSIE (Vendor Specific IE) which was added. + * If exist then send IOVAR to delete it + */ + if (wl_cfg80211_ibss_vsie_delete(dev) != BCME_OK) { + return -EINVAL; + } + + pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1; + for (idx = 0; idx < DOT11_OUI_LEN; idx++) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + ie_buf[idx] = (uint8)simple_strtoul(hex, NULL, 16); + } + pcmd++; + while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + ie_buf[idx++] = (uint8)simple_strtoul(hex, NULL, 16); + datalen++; + } + tot_len = sizeof(vndr_ie_setbuf_t) + (datalen - 1); + vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags); + if (!vndr_ie) { + WL_ERR(("IE memory alloc failed\n")); + return -ENOMEM; + } + /* Copy the vndr_ie SET command ("add"/"del") to the buffer */ + strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1); + vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + /* Set the IE count - the buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32)); + + /* Set packet flag to indicate that BEACON's will contain this IE */ + pktflag = htod32(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG); + memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag, + sizeof(u32)); + /* Set the IE ID */ + vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID; + + memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf, + DOT11_OUI_LEN); + memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, + &ie_buf[DOT11_OUI_LEN], datalen); + + ielen = DOT11_OUI_LEN + datalen; + vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen; + + ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL); + if (!ioctl_buf) { + WL_ERR(("ioctl memory alloc failed\n")); + if (vndr_ie) { + kfree(vndr_ie); + } + return -ENOMEM; + } + memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */ + err = wldev_iovar_setbuf(dev, "ie", vndr_ie, tot_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + + + if (err != BCME_OK) { + err = -EINVAL; + if (vndr_ie) { + kfree(vndr_ie); + } + } + else { + /* do NOT free 'vndr_ie' for the next process */ + wl_cfg80211_ibss_vsie_set_buffer(vndr_ie, tot_len); + } + + if (ioctl_buf) { + kfree(ioctl_buf); + } + + return err; +} + +#if defined(BCMFW_ROAM_ENABLE) +static int +wl_android_set_roampref(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + char smbuf[WLC_IOCTL_SMLEN]; + uint8 buf[MAX_BUF_SIZE]; + uint8 *pref = buf; + char *pcmd; + int num_ucipher_suites = 0; + int num_akm_suites = 0; + wpa_suite_t ucipher_suites[MAX_NUM_SUITES]; + wpa_suite_t akm_suites[MAX_NUM_SUITES]; + int num_tuples = 0; + int total_bytes = 0; + int total_len_left; + int i, j; + char hex[] = "XX"; + + pcmd = command + strlen(CMD_SET_ROAMPREF) + 1; + total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1; + + num_akm_suites = simple_strtoul(pcmd, NULL, 16); + /* Increment for number of AKM suites field + space */ + pcmd += 3; + total_len_left -= 3; + + /* check to make sure pcmd does not overrun */ + if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE)) + return -1; + + memset(buf, 0, sizeof(buf)); + memset(akm_suites, 0, sizeof(akm_suites)); + memset(ucipher_suites, 0, sizeof(ucipher_suites)); + + /* Save the AKM suites passed in the command */ + for (i = 0; i < num_akm_suites; i++) { + /* Store the MSB first, as required by join_pref */ + for (j = 0; j < 4; j++) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + buf[j] = (uint8)simple_strtoul(hex, NULL, 16); + } + memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32)); + } + + total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE); + num_ucipher_suites = simple_strtoul(pcmd, NULL, 16); + /* Increment for number of cipher suites field + space */ + pcmd += 3; + total_len_left -= 3; + + if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE)) + return -1; + + /* Save the cipher suites passed in the command */ + for (i = 0; i < num_ucipher_suites; i++) { + /* Store the MSB first, as required by join_pref */ + for (j = 0; j < 4; j++) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + buf[j] = (uint8)simple_strtoul(hex, NULL, 16); + } + memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32)); + } + + /* Join preference for RSSI + * Type : 1 byte (0x01) + * Length : 1 byte (0x02) + * Value : 2 bytes (reserved) + */ + *pref++ = WL_JOIN_PREF_RSSI; + *pref++ = JOIN_PREF_RSSI_LEN; + *pref++ = 0; + *pref++ = 0; + + /* Join preference for WPA + * Type : 1 byte (0x02) + * Length : 1 byte (not used) + * Value : (variable length) + * reserved: 1 byte + * count : 1 byte (no of tuples) + * Tuple1 : 12 bytes + * akm[4] + * ucipher[4] + * mcipher[4] + * Tuple2 : 12 bytes + * Tuplen : 12 bytes + */ + num_tuples = num_akm_suites * num_ucipher_suites; + if (num_tuples != 0) { + if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) { + *pref++ = WL_JOIN_PREF_WPA; + *pref++ = 0; + *pref++ = 0; + *pref++ = (uint8)num_tuples; + total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE + + (JOIN_PREF_WPA_TUPLE_SIZE * num_tuples); + } else { + DHD_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__)); + return -1; + } + } else { + /* No WPA config, configure only RSSI preference */ + total_bytes = JOIN_PREF_RSSI_SIZE; + } + + /* akm-ucipher-mcipher tuples in the format required for join_pref */ + for (i = 0; i < num_ucipher_suites; i++) { + for (j = 0; j < num_akm_suites; j++) { + memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + /* Set to 0 to match any available multicast cipher */ + memset(pref, 0, WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + } + } + + prhex("join pref", (uint8 *)buf, total_bytes); + error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL); + if (error) { + DHD_ERROR(("Failed to set join_pref, error = %d\n", error)); + } + return error; +} +#endif /* defined(BCMFW_ROAM_ENABLE */ + +static int +wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config) +{ + struct io_cfg *resume_cfg; + s32 ret; + + resume_cfg = kzalloc(sizeof(struct io_cfg), GFP_KERNEL); + if (!resume_cfg) + return -ENOMEM; + + if (config->iovar) { + ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param); + if (ret) { + DHD_ERROR(("%s: Failed to get current %s value\n", + __FUNCTION__, config->iovar)); + goto error; + } + + ret = wldev_iovar_setint(dev, config->iovar, config->param); + if (ret) { + DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__, + config->iovar, config->param)); + goto error; + } + + resume_cfg->iovar = config->iovar; + } else { + resume_cfg->arg = kzalloc(config->len, GFP_KERNEL); + if (!resume_cfg->arg) { + ret = -ENOMEM; + goto error; + } + ret = wldev_ioctl(dev, config->ioctl, resume_cfg->arg, config->len, false); + if (ret) { + DHD_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__, + config->ioctl)); + goto error; + } + ret = wldev_ioctl(dev, config->ioctl + 1, config->arg, config->len, true); + if (ret) { + DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__, + config->iovar, config->param)); + goto error; + } + if (config->ioctl + 1 == WLC_SET_PM) + wl_cfg80211_update_power_mode(dev); + resume_cfg->ioctl = config->ioctl; + resume_cfg->len = config->len; + } + + list_add(&resume_cfg->list, head); + + return 0; +error: + kfree(resume_cfg->arg); + kfree(resume_cfg); + return ret; +} + +static void +wl_android_iolist_resume(struct net_device *dev, struct list_head *head) +{ + struct io_cfg *config; + struct list_head *cur, *q; + s32 ret = 0; + + list_for_each_safe(cur, q, head) { + config = list_entry(cur, struct io_cfg, list); + if (config->iovar) { + if (!ret) + ret = wldev_iovar_setint(dev, config->iovar, + config->param); + } else { + if (!ret) + ret = wldev_ioctl(dev, config->ioctl + 1, + config->arg, config->len, true); + if (config->ioctl + 1 == WLC_SET_PM) + wl_cfg80211_update_power_mode(dev); + kfree(config->arg); + } + list_del(cur); + kfree(config); + } +} +#ifdef WL11ULB +static int +wl_android_set_ulb_mode(struct net_device *dev, char *command, int total_len) +{ + int mode = 0; + + DHD_INFO(("set ulb mode (%s) \n", command)); + if (sscanf(command, "%*s %d", &mode) != 1) { + DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + return wl_cfg80211_set_ulb_mode(dev, mode); +} +static int +wl_android_set_ulb_bw(struct net_device *dev, char *command, int total_len) +{ + int bw = 0; + u8 *pos; + char *ifname = NULL; + DHD_INFO(("set ulb bw (%s) \n", command)); + + /* + * For sta/ap: IFNAME= DRIVER ULB_BW ifname + * For p2p: IFNAME=wlan0 DRIVER ULB_BW p2p-dev-wlan0 + */ + if (total_len < strlen(CMD_ULB_BW) + 2) + return -EINVAL; + + pos = command + strlen(CMD_ULB_BW) + 1; + bw = bcm_atoi(pos); + + if ((strlen(pos) >= 5)) { + ifname = pos + 2; + } + + DHD_INFO(("[ULB] ifname:%s ulb_bw:%d \n", ifname, bw)); + return wl_cfg80211_set_ulb_bw(dev, bw, ifname); +} +#endif /* WL11ULB */ +static int +wl_android_set_miracast(struct net_device *dev, char *command, int total_len) +{ + int mode, val; + int ret = 0; + struct io_cfg config; + + if (sscanf(command, "%*s %d", &mode) != 1) { + DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + DHD_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode)); + + if (miracast_cur_mode == mode) { + return 0; + } + + wl_android_iolist_resume(dev, &miracast_resume_list); + miracast_cur_mode = MIRACAST_MODE_OFF; + + switch (mode) { + case MIRACAST_MODE_SOURCE: + /* setting mchan_algo to platform specific value */ + config.iovar = "mchan_algo"; + + ret = wldev_ioctl(dev, WLC_GET_BCNPRD, &val, sizeof(int), false); + if (!ret && val > 100) { + config.param = 0; + DHD_ERROR(("%s: Connected station's beacon interval: " + "%d and set mchan_algo to %d \n", + __FUNCTION__, val, config.param)); + } else { + config.param = MIRACAST_MCHAN_ALGO; + } + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + + /* setting mchan_bw to platform specific value */ + config.iovar = "mchan_bw"; + config.param = MIRACAST_MCHAN_BW; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + + /* setting apmdu to platform specific value */ + config.iovar = "ampdu_mpdu"; + config.param = MIRACAST_AMPDU_SIZE; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + /* FALLTROUGH */ + /* Source mode shares most configurations with sink mode. + * Fall through here to avoid code duplication + */ + case MIRACAST_MODE_SINK: + /* disable internal roaming */ + config.iovar = "roam_off"; + config.param = 1; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + + /* tunr off pm */ + ret = wldev_ioctl(dev, WLC_GET_PM, &val, sizeof(val), false); + if (ret) { + goto resume; + } + + if (val != PM_OFF) { + val = PM_OFF; + config.iovar = NULL; + config.ioctl = WLC_GET_PM; + config.arg = &val; + config.len = sizeof(int); + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + } + break; + case MIRACAST_MODE_OFF: + default: + break; + } + miracast_cur_mode = mode; + + return 0; + +resume: + DHD_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret)); + wl_android_iolist_resume(dev, &miracast_resume_list); + return ret; +} + +#define NETLINK_OXYGEN 30 +#define AIBSS_BEACON_TIMEOUT 10 + +static struct sock *nl_sk = NULL; + +static void wl_netlink_recv(struct sk_buff *skb) +{ + WL_ERR(("netlink_recv called\n")); +} + +static int wl_netlink_init(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + struct netlink_kernel_cfg cfg = { + .input = wl_netlink_recv, + }; +#endif + + if (nl_sk != NULL) { + WL_ERR(("nl_sk already exist\n")); + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, + 0, wl_netlink_recv, NULL, THIS_MODULE); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, THIS_MODULE, &cfg); +#else + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, &cfg); +#endif + + if (nl_sk == NULL) { + WL_ERR(("nl_sk is not ready\n")); + return BCME_ERROR; + } + + return BCME_OK; +} + +static void wl_netlink_deinit(void) +{ + if (nl_sk) { + netlink_kernel_release(nl_sk); + nl_sk = NULL; + } +} + +s32 +wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh = NULL; + int ret = -1; + + if (nl_sk == NULL) { + WL_ERR(("nl_sk was not initialized\n")); + goto nlmsg_failure; + } + + skb = alloc_skb(NLMSG_SPACE(size), GFP_ATOMIC); + if (skb == NULL) { + WL_ERR(("failed to allocate memory\n")); + goto nlmsg_failure; + } + + nlh = nlmsg_put(skb, 0, 0, 0, size, 0); + if (nlh == NULL) { + WL_ERR(("failed to build nlmsg, skb_tailroom:%d, nlmsg_total_size:%d\n", + skb_tailroom(skb), nlmsg_total_size(size))); + dev_kfree_skb(skb); + goto nlmsg_failure; + } + + memcpy(nlmsg_data(nlh), data, size); + nlh->nlmsg_seq = seq; + nlh->nlmsg_type = type; + + /* netlink_unicast() takes ownership of the skb and frees it itself. */ + ret = netlink_unicast(nl_sk, skb, pid, 0); + WL_DBG(("netlink_unicast() pid=%d, ret=%d\n", pid, ret)); + +nlmsg_failure: + return ret; +} + + +int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len) +{ + char buf[256]; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int buf_len; + int str_len; + int res = -1; + uint period_msec = 0; + + if (extra == NULL) + { + DHD_ERROR(("%s: extra is NULL\n", __FUNCTION__)); + return -1; + } + if (sscanf(extra, "%d", &period_msec) != 1) + { + DHD_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__)); + return -EINVAL; + } + DHD_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec)); + + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(buf, str, str_len); + buf[ str_len ] = '\0'; + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); + mkeep_alive_pkt.period_msec = period_msec; + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0) + { + DHD_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res)); + } + else + { + DHD_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res)); + } + + return res; +} + +static const char * +get_string_by_separator(char *result, int result_len, const char *src, char separator) +{ + char *end = result + result_len - 1; + while ((result != end) && (*src != separator) && (*src)) { + *result++ = *src++; + } + *result = 0; + if (*src == separator) { + ++src; + } + return src; +} + +int +wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd) +{ + char sbuf[32]; + int i, cnt, size, err, ioctl_buf_len; + roamoffl_bssid_list_t *bssid_list; + const char *str = cmd; + char *ioctl_buf; + dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(); + + str = get_string_by_separator(sbuf, 32, str, ','); + cnt = bcm_atoi(sbuf); + cnt = MIN(cnt, MAX_ROAMOFFL_BSSID_NUM); + + if ((cnt > 0) && + (((dhdp->op_mode & DHD_FLAG_STA_MODE) && (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) || + FALSE)) { + WL_ERR(("Can't set ROAMOFFL_BSSID when enabled STA-SoftAP or WES\n")); + return -EINVAL; + } + + size = sizeof(int32) + sizeof(struct ether_addr) * cnt; + WL_ERR(("ROAM OFFLOAD BSSID LIST %d BSSIDs, size %d\n", cnt, size)); + bssid_list = kmalloc(size, GFP_KERNEL); + if (bssid_list == NULL) { + WL_ERR(("%s: memory alloc for bssid list(%d) failed\n", + __FUNCTION__, size)); + return -ENOMEM; + } + ioctl_buf_len = size + 64; + ioctl_buf = kmalloc(ioctl_buf_len, GFP_KERNEL); + if (ioctl_buf == NULL) { + WL_ERR(("%s: memory alloc for ioctl_buf(%d) failed\n", + __FUNCTION__, ioctl_buf_len)); + kfree(bssid_list); + return -ENOMEM; + } + + for (i = 0; i < cnt; i++) { + str = get_string_by_separator(sbuf, 32, str, ','); + bcm_ether_atoe(sbuf, &bssid_list->bssid[i]); + } + + bssid_list->cnt = (int32)cnt; + err = wldev_iovar_setbuf(dev, "roamoffl_bssid_list", + bssid_list, size, ioctl_buf, ioctl_buf_len, NULL); + kfree(bssid_list); + kfree(ioctl_buf); + + return err; +} + +#ifdef P2PRESP_WFDIE_SRC +static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int only_resp_wfdsrc = 0; + + error = wldev_iovar_getint(dev, "p2p_only_resp_wfdsrc", &only_resp_wfdsrc); + if (error) { + DHD_ERROR(("%s: Failed to get the mode for only_resp_wfdsrc, error = %d\n", + __FUNCTION__, error)); + return -1; + } + + bytes_written = snprintf(command, total_len, "%s %d", + CMD_P2P_GET_WFDIE_RESP, only_resp_wfdsrc); + + return bytes_written; +} + +static int wl_android_set_wfdie_resp(struct net_device *dev, int only_resp_wfdsrc) +{ + int error = 0; + + error = wldev_iovar_setint(dev, "p2p_only_resp_wfdsrc", only_resp_wfdsrc); + if (error) { + DHD_ERROR(("%s: Failed to set only_resp_wfdsrc %d, error = %d\n", + __FUNCTION__, only_resp_wfdsrc, error)); + return -1; + } + + return 0; +} +#endif /* P2PRESP_WFDIE_SRC */ + +#ifdef BT_WIFI_HANDOVER +static int +wl_tbow_teardown(struct net_device *dev, char *command, int total_len) +{ + int err = BCME_OK; + char buf[WLC_IOCTL_SMLEN]; + tbow_setup_netinfo_t netinfo; + memset(&netinfo, 0, sizeof(netinfo)); + netinfo.opmode = TBOW_HO_MODE_TEARDOWN; + + err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo, + sizeof(tbow_setup_netinfo_t), buf, WLC_IOCTL_SMLEN, 0, NULL); + if (err < 0) { + WL_ERR(("tbow_doho iovar error %d\n", err)); + return err; + } + return err; +} +#endif /* BT_WIFI_HANOVER */ + +#ifdef SET_RPS_CPUS +static int +wl_android_set_rps_cpus(struct net_device *dev, char *command, int total_len) +{ + int error, enable; + + enable = command[strlen(CMD_RPSMODE) + 1] - '0'; + error = dhd_rps_cpus_enable(dev, enable); + +#if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE) && defined(WL_CFG80211) + if (!error) { + void *dhdp = wl_cfg80211_get_dhdp(); + if (enable) { + DHD_TRACE(("%s : set ack suppress. TCPACK_SUP_HOLD.\n", __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD); + } else { + DHD_TRACE(("%s : clear ack suppress.\n", __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } + } +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE && WL_CFG80211 */ + + return error; +} +#endif /* SET_RPS_CPUS */ +#ifdef P2P_LISTEN_OFFLOADING +s32 +wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len) +{ + int ret = 0; + + WL_ERR(("Entry cmd:%s arg_len:%d \n", cmd, len)); + + if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) { + ret = wl_cfg80211_p2plo_listen_start(dev, buf, len); + } else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) { + ret = wl_cfg80211_p2plo_listen_stop(dev); + } else { + WL_ERR(("Request for Unsupported CMD:%s \n", buf)); + ret = -EINVAL; + } + return ret; +} +#endif /* P2P_LISTEN_OFFLOADING */ + +int +wl_android_murx_bfe_cap(struct net_device *dev, int val) +{ + int err = BCME_OK; + int iface_count = wl_cfg80211_iface_count(); + + if (iface_count > 1) { + DHD_ERROR(("%s: murx_bfe_cap change is not allowed when " + "there are multiple interfaces\n", __FUNCTION__)); + return -EINVAL; + } + /* Now there is only single interface */ + err = wldev_iovar_setint(dev, "murx_bfe_cap", val); + if (err) { + DHD_ERROR(("%s: Failed to set murx_bfe_cap IOVAR to %d," + "error %d\n", __FUNCTION__, val, err)); + err = -EINVAL; + } + return err; +} + +int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) +{ +#define PRIVATE_COMMAND_MAX_LEN 8192 + int ret = 0; + char *command = NULL; + int bytes_written = 0; + android_wifi_priv_cmd priv_cmd; + + net_os_wake_lock(net); + + if (!ifr->ifr_data) { + ret = -EINVAL; + goto exit; + } + +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + compat_android_wifi_priv_cmd compat_priv_cmd; + if (copy_from_user(&compat_priv_cmd, ifr->ifr_data, + sizeof(compat_android_wifi_priv_cmd))) { + ret = -EFAULT; + goto exit; + + } + priv_cmd.buf = compat_ptr(compat_priv_cmd.buf); + priv_cmd.used_len = compat_priv_cmd.used_len; + priv_cmd.total_len = compat_priv_cmd.total_len; + } else +#endif /* CONFIG_COMPAT */ + { + if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) { + ret = -EFAULT; + goto exit; + } + } + if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) { + DHD_ERROR(("%s: too long priavte command\n", __FUNCTION__)); + ret = -EINVAL; + goto exit; + } + command = kmalloc((priv_cmd.total_len + 1), GFP_KERNEL); + if (!command) + { + DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__)); + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) { + ret = -EFAULT; + goto exit; + } + command[priv_cmd.total_len] = '\0'; + + DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name)); + + if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) { + DHD_INFO(("%s, Received regular START command\n", __FUNCTION__)); + bytes_written = wl_android_wifi_on(net); + } + else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) { + bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len); + } + + if (!g_wifi_on) { + DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n", + __FUNCTION__, command, ifr->ifr_name)); + ret = 0; + goto exit; + } + + if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) { + bytes_written = wl_android_wifi_off(net, FALSE); + } + else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) { + wl_cfg80211_set_passive_scan(net, command); + } + else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) { + wl_cfg80211_set_passive_scan(net, command); + } + else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) { + bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) { + bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len); + } +#ifdef PKT_FILTER_SUPPORT + else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) { + bytes_written = net_os_enable_packet_filter(net, 1); + } + else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) { + bytes_written = net_os_enable_packet_filter(net, 0); + } + else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) { + int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0'; + bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num); + } + else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) { + int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0'; + bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num); + } +#endif /* PKT_FILTER_SUPPORT */ + else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) { + /* TBD: BTCOEXSCAN-START */ + } + else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) { + /* TBD: BTCOEXSCAN-STOP */ + } + else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) { +#ifdef WL_CFG80211 + void *dhdp = wl_cfg80211_get_dhdp(); + bytes_written = wl_cfg80211_set_btcoex_dhcp(net, dhdp, command); +#else +#ifdef PKT_FILTER_SUPPORT + uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0'; + + if (mode == 1) + net_os_enable_packet_filter(net, 0); /* DHCP starts */ + else + net_os_enable_packet_filter(net, 1); /* DHCP ends */ +#endif /* PKT_FILTER_SUPPORT */ +#endif /* WL_CFG80211 */ + } + else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) { + bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) { + bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) { + uint band = *(command + strlen(CMD_SETBAND) + 1) - '0'; + bytes_written = wldev_set_band(net, band); + } + else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) { + bytes_written = wl_android_get_band(net, command, priv_cmd.total_len); + } +#ifdef WL_CFG80211 + /* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */ + else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) { + /* + * Usage examples: + * DRIVER COUNTRY US + * DRIVER COUNTRY US/7 + */ + char *country_code = command + strlen(CMD_COUNTRY) + 1; + char *rev_info_delim = country_code + 2; /* 2 bytes of country code */ + int revinfo = -1; + if ((rev_info_delim) && + (strnicmp(rev_info_delim, CMD_COUNTRY_DELIMITER, + strlen(CMD_COUNTRY_DELIMITER)) == 0) && + (rev_info_delim + 1)) { + revinfo = bcm_atoi(rev_info_delim + 1); + } + bytes_written = wldev_set_country(net, country_code, true, true, revinfo); +#ifdef FCC_PWR_LIMIT_2G + if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) { + DHD_ERROR(("%s: fccpwrlimit2g deactivation is failed\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: fccpwrlimit2g is deactivated\n", __FUNCTION__)); + } +#endif /* FCC_PWR_LIMIT_2G */ + } +#endif /* WL_CFG80211 */ + else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) { + bytes_written = wl_android_set_csa(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) { + bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_CHANSPEC, strlen(CMD_CHANSPEC)) == 0) { + bytes_written = wl_android_get_chanspec(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) { + bytes_written = wl_android_get_datarate(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_ASSOC_CLIENTS, strlen(CMD_ASSOC_CLIENTS)) == 0) { + bytes_written = wl_android_get_assoclist(net, command, priv_cmd.total_len); + } + +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef WLTDLS + else if (strnicmp(command, CMD_TDLS_RESET, strlen(CMD_TDLS_RESET)) == 0) { + bytes_written = wl_android_tdls_reset(net); + } +#endif /* WLTDLS */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +#ifdef PNO_SUPPORT + else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) { + bytes_written = dhd_dev_pno_stop_for_ssid(net); + } +#ifndef WL_SCHED_SCAN + else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) { + bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len); + } +#endif /* !WL_SCHED_SCAN */ + else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) { + int enable = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0'; + bytes_written = (enable)? 0 : dhd_dev_pno_stop_for_ssid(net); + } + else if (strnicmp(command, CMD_WLS_BATCHING, strlen(CMD_WLS_BATCHING)) == 0) { + bytes_written = wls_parse_batching_cmd(net, command, priv_cmd.total_len); + } +#endif /* PNO_SUPPORT */ + else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) { + bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) { + int skip = strlen(CMD_P2P_SET_NOA) + 1; + bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip, + priv_cmd.total_len - skip); + } +#ifdef P2P_LISTEN_OFFLOADING + else if (strnicmp(command, CMD_P2P_LISTEN_OFFLOAD, strlen(CMD_P2P_LISTEN_OFFLOAD)) == 0) { + u8 *sub_command = strchr(command, ' '); + bytes_written = wl_cfg80211_p2plo_offload(net, command, sub_command, + sub_command ? strlen(sub_command) : 0); + } +#endif /* P2P_LISTEN_OFFLOADING */ +#ifdef WL_NAN + else if (strnicmp(command, CMD_NAN, strlen(CMD_NAN)) == 0) { + bytes_written = wl_cfg80211_nan_cmd_handler(net, command, + priv_cmd.total_len); + } +#endif /* WL_NAN */ +#if !defined WL_ENABLE_P2P_IF + else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) { + bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len); + } +#endif /* WL_ENABLE_P2P_IF */ + else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) { + int skip = strlen(CMD_P2P_SET_PS) + 1; + bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip, + priv_cmd.total_len - skip); + } + else if (strnicmp(command, CMD_P2P_ECSA, strlen(CMD_P2P_ECSA)) == 0) { + int skip = strlen(CMD_P2P_ECSA) + 1; + bytes_written = wl_cfg80211_set_p2p_ecsa(net, command + skip, + priv_cmd.total_len - skip); + } +#ifdef WL_CFG80211 + else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE, + strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) { + int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3; + bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip, + priv_cmd.total_len - skip, *(command + skip - 2) - '0'); + } +#endif /* WL_CFG80211 */ +#if defined(WL_SUPPORT_AUTO_CHANNEL) + else if (strnicmp(command, CMD_GET_BEST_CHANNELS, + strlen(CMD_GET_BEST_CHANNELS)) == 0) { + bytes_written = wl_cfg80211_get_best_channels(net, command, + priv_cmd.total_len); + } +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#if defined(WL_SUPPORT_AUTO_CHANNEL) + else if (strnicmp(command, CMD_SET_HAPD_AUTO_CHANNEL, + strlen(CMD_SET_HAPD_AUTO_CHANNEL)) == 0) { + int skip = strlen(CMD_SET_HAPD_AUTO_CHANNEL) + 1; + bytes_written = wl_android_set_auto_channel(net, (const char*)command+skip, command, + priv_cmd.total_len); + } +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef SUPPORT_SET_LPC + else if (strnicmp(command, CMD_HAPD_LPC_ENABLED, + strlen(CMD_HAPD_LPC_ENABLED)) == 0) { + int skip = strlen(CMD_HAPD_LPC_ENABLED) + 3; + wl_android_set_lpc(net, (const char*)command+skip); + } +#endif /* SUPPORT_SET_LPC */ +#ifdef SUPPORT_TRIGGER_HANG_EVENT + else if (strnicmp(command, CMD_TEST_FORCE_HANG, + strlen(CMD_TEST_FORCE_HANG)) == 0) { + int skip = strlen(CMD_TEST_FORCE_HANG) + 1; + net_os_send_hang_message_reason(net, (const char*)command+skip); + } +#endif /* SUPPORT_TRIGGER_HANG_EVENT */ + else if (strnicmp(command, CMD_CHANGE_RL, strlen(CMD_CHANGE_RL)) == 0) + bytes_written = wl_android_ch_res_rl(net, true); + else if (strnicmp(command, CMD_RESTORE_RL, strlen(CMD_RESTORE_RL)) == 0) + bytes_written = wl_android_ch_res_rl(net, false); + else if (strnicmp(command, CMD_SET_RMC_ENABLE, strlen(CMD_SET_RMC_ENABLE)) == 0) { + int rmc_enable = *(command + strlen(CMD_SET_RMC_ENABLE) + 1) - '0'; + bytes_written = wl_android_rmc_enable(net, rmc_enable); + } + else if (strnicmp(command, CMD_SET_RMC_TXRATE, strlen(CMD_SET_RMC_TXRATE)) == 0) { + int rmc_txrate; + sscanf(command, "%*s %10d", &rmc_txrate); + bytes_written = wldev_iovar_setint(net, "rmc_txrate", rmc_txrate * 2); + } + else if (strnicmp(command, CMD_SET_RMC_ACTPERIOD, strlen(CMD_SET_RMC_ACTPERIOD)) == 0) { + int actperiod; + sscanf(command, "%*s %10d", &actperiod); + bytes_written = wldev_iovar_setint(net, "rmc_actf_time", actperiod); + } + else if (strnicmp(command, CMD_SET_RMC_IDLEPERIOD, strlen(CMD_SET_RMC_IDLEPERIOD)) == 0) { + int acktimeout; + sscanf(command, "%*s %10d", &acktimeout); + acktimeout *= 1000; + bytes_written = wldev_iovar_setint(net, "rmc_acktmo", acktimeout); + } + else if (strnicmp(command, CMD_SET_RMC_LEADER, strlen(CMD_SET_RMC_LEADER)) == 0) { + int skip = strlen(CMD_SET_RMC_LEADER) + 1; + bytes_written = wl_android_rmc_set_leader(net, (const char*)command+skip); + } + else if (strnicmp(command, CMD_SET_RMC_EVENT, + strlen(CMD_SET_RMC_EVENT)) == 0) + bytes_written = wl_android_set_rmc_event(net, command, priv_cmd.total_len); + else if (strnicmp(command, CMD_GET_SCSCAN, strlen(CMD_GET_SCSCAN)) == 0) { + bytes_written = wl_android_get_singlecore_scan(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SET_SCSCAN, strlen(CMD_SET_SCSCAN)) == 0) { + bytes_written = wl_android_set_singlecore_scan(net, command, priv_cmd.total_len); + } +#ifdef TEST_TX_POWER_CONTROL + else if (strnicmp(command, CMD_TEST_SET_TX_POWER, + strlen(CMD_TEST_SET_TX_POWER)) == 0) { + int skip = strlen(CMD_TEST_SET_TX_POWER) + 1; + wl_android_set_tx_power(net, (const char*)command+skip); + } + else if (strnicmp(command, CMD_TEST_GET_TX_POWER, + strlen(CMD_TEST_GET_TX_POWER)) == 0) { + wl_android_get_tx_power(net, command, priv_cmd.total_len); + } +#endif /* TEST_TX_POWER_CONTROL */ + else if (strnicmp(command, CMD_SARLIMIT_TX_CONTROL, + strlen(CMD_SARLIMIT_TX_CONTROL)) == 0) { + int skip = strlen(CMD_SARLIMIT_TX_CONTROL) + 1; + wl_android_set_sarlimit_txctrl(net, (const char*)command+skip); + } +#ifdef IPV6_NDO_SUPPORT + else if (strnicmp(command, CMD_NDRA_LIMIT, strlen(CMD_NDRA_LIMIT)) == 0) { + bytes_written = wl_android_nd_ra_limit(net, command, priv_cmd.total_len); + } +#endif /* IPV6_NDO_SUPPORT */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) { + int skip = strlen(CMD_HAPD_MAC_FILTER) + 1; + wl_android_set_mac_address_filter(net, (const char*)command+skip); + } + else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0) + bytes_written = wl_android_set_roam_mode(net, command, priv_cmd.total_len); +#if defined(BCMFW_ROAM_ENABLE) + else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) { + bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len); + } +#endif /* BCMFW_ROAM_ENABLE */ + else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0) + bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len); +#ifdef WL11ULB + else if (strnicmp(command, CMD_ULB_MODE, strlen(CMD_ULB_MODE)) == 0) + bytes_written = wl_android_set_ulb_mode(net, command, priv_cmd.total_len); + else if (strnicmp(command, CMD_ULB_BW, strlen(CMD_ULB_BW)) == 0) + bytes_written = wl_android_set_ulb_bw(net, command, priv_cmd.total_len); +#endif /* WL11ULB */ + else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0) + bytes_written = wl_android_set_ibss_beacon_ouidata(net, + command, priv_cmd.total_len); + else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) { + int skip = strlen(CMD_KEEP_ALIVE) + 1; + bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip); + } + else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) { + int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0'; + bytes_written = wl_cfg80211_enable_roam_offload(net, enable); + } + else if (strnicmp(command, CMD_ROAM_OFFLOAD_APLIST, strlen(CMD_ROAM_OFFLOAD_APLIST)) == 0) { + bytes_written = wl_android_set_roam_offload_bssid_list(net, + command + strlen(CMD_ROAM_OFFLOAD_APLIST) + 1); + } +#if defined(WL_VIRTUAL_APSTA) + else if (strnicmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) { + char *name = (command + strlen(CMD_INTERFACE_CREATE) +1); + WL_INFORM(("Creating %s interface\n", name)); + bytes_written = wl_cfg80211_interface_create(net, name); + } + else if (strnicmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) { + char *name = (command + strlen(CMD_INTERFACE_DELETE) +1); + WL_INFORM(("Deleteing %s interface\n", name)); + bytes_written = wl_cfg80211_interface_delete(net, name); + } +#endif /* defined (WL_VIRTUAL_APSTA) */ +#ifdef P2PRESP_WFDIE_SRC + else if (strnicmp(command, CMD_P2P_SET_WFDIE_RESP, + strlen(CMD_P2P_SET_WFDIE_RESP)) == 0) { + int mode = *(command + strlen(CMD_P2P_SET_WFDIE_RESP) + 1) - '0'; + bytes_written = wl_android_set_wfdie_resp(net, mode); + } else if (strnicmp(command, CMD_P2P_GET_WFDIE_RESP, + strlen(CMD_P2P_GET_WFDIE_RESP)) == 0) { + bytes_written = wl_android_get_wfdie_resp(net, command, priv_cmd.total_len); + } +#endif /* P2PRESP_WFDIE_SRC */ + else if (strnicmp(command, CMD_DFS_AP_MOVE, strlen(CMD_DFS_AP_MOVE)) == 0) { + char *data = (command + strlen(CMD_DFS_AP_MOVE) +1); + bytes_written = wl_cfg80211_dfs_ap_move(net, data, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_ENABLE, strlen(CMD_WBTEXT_ENABLE)) == 0) { + bytes_written = wl_android_wbtext(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_PROFILE_CONFIG, + strlen(CMD_WBTEXT_PROFILE_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_config(net, data, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_WEIGHT_CONFIG, + strlen(CMD_WBTEXT_WEIGHT_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_weight_config(net, data, + command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_TABLE_CONFIG, + strlen(CMD_WBTEXT_TABLE_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_table_config(net, data, + command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_DELTA_CONFIG, + strlen(CMD_WBTEXT_DELTA_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_DELTA_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_delta_config(net, data, + command, priv_cmd.total_len); + } +#ifdef SET_RPS_CPUS + else if (strnicmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) { + bytes_written = wl_android_set_rps_cpus(net, command, priv_cmd.total_len); + } +#endif /* SET_RPS_CPUS */ +#ifdef WLWFDS + else if (strnicmp(command, CMD_ADD_WFDS_HASH, strlen(CMD_ADD_WFDS_HASH)) == 0) { + bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 1); + } + else if (strnicmp(command, CMD_DEL_WFDS_HASH, strlen(CMD_DEL_WFDS_HASH)) == 0) { + bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 0); + } +#endif /* WLWFDS */ +#ifdef BT_WIFI_HANDOVER + else if (strnicmp(command, CMD_TBOW_TEARDOWN, strlen(CMD_TBOW_TEARDOWN)) == 0) { + ret = wl_tbow_teardown(net, command, priv_cmd.total_len); + } +#endif /* BT_WIFI_HANDOVER */ +#ifdef FCC_PWR_LIMIT_2G + else if (strnicmp(command, CMD_GET_FCC_PWR_LIMIT_2G, + strlen(CMD_GET_FCC_PWR_LIMIT_2G)) == 0) { + bytes_written = wl_android_get_fcc_pwr_limit_2g(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SET_FCC_PWR_LIMIT_2G, + strlen(CMD_SET_FCC_PWR_LIMIT_2G)) == 0) { + bytes_written = wl_android_set_fcc_pwr_limit_2g(net, command, priv_cmd.total_len); + } +#endif /* FCC_PWR_LIMIT_2G */ + else if (strnicmp(command, CMD_MURX_BFE_CAP, + strlen(CMD_MURX_BFE_CAP)) == 0) { + uint val = *(command + strlen(CMD_MURX_BFE_CAP) + 1) - '0'; + bytes_written = wl_android_murx_bfe_cap(net, val); + } +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + else if (strnicmp(command, CMD_GET_BSS_INFO, strlen(CMD_GET_BSS_INFO)) == 0) { + bytes_written = wl_cfg80211_get_bss_info(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_GET_ASSOC_REJECT_INFO, strlen(CMD_GET_ASSOC_REJECT_INFO)) + == 0) { + bytes_written = wl_cfg80211_get_connect_failed_status(net, command, + priv_cmd.total_len); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ +#if defined(SUPPORT_RANDOM_MAC_SCAN) + else if (strnicmp(command, ENABLE_RANDOM_MAC, strlen(ENABLE_RANDOM_MAC)) == 0) { + bytes_written = wl_cfg80211_set_random_mac(net, TRUE); + } else if (strnicmp(command, DISABLE_RANDOM_MAC, strlen(DISABLE_RANDOM_MAC)) == 0) { + bytes_written = wl_cfg80211_set_random_mac(net, FALSE); + } +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +#ifdef DHD_LOG_DUMP + else if (strnicmp(command, CMD_NEW_DEBUG_PRINT_DUMP, + strlen(CMD_NEW_DEBUG_PRINT_DUMP)) == 0) { + dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(); +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_stats_dump(dhdp); +#endif /* DHD_TRACE_WAKE_LOCK */ + dhd_schedule_log_dump(dhdp); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP; + dhd_bus_mem_dump(dhdp); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + } +#endif /* DHD_LOG_DUMP */ + else { + DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command)); + snprintf(command, 3, "OK"); + bytes_written = strlen("OK"); + } + + if (bytes_written >= 0) { + if ((bytes_written == 0) && (priv_cmd.total_len > 0)) + command[0] = '\0'; + if (bytes_written >= priv_cmd.total_len) { + DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written)); + bytes_written = priv_cmd.total_len; + } else { + bytes_written++; + } + priv_cmd.used_len = bytes_written; + if (copy_to_user(priv_cmd.buf, command, bytes_written)) { + DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__)); + ret = -EFAULT; + } + } +#ifdef CONNECTION_STATISTICS + else if (strnicmp(command, CMD_GET_CONNECTION_STATS, + strlen(CMD_GET_CONNECTION_STATS)) == 0) { + bytes_written = wl_android_get_connection_stats(net, command, + priv_cmd.total_len); + } +#endif + else { + ret = bytes_written; + } + +exit: + net_os_wake_unlock(net); + if (command) { + kfree(command); + } + + return ret; +} + +int wl_android_init(void) +{ + int ret = 0; + +#ifdef ENABLE_INSMOD_NO_FW_LOAD + dhd_download_fw_on_driverload = FALSE; +#endif /* ENABLE_INSMOD_NO_FW_LOAD */ + if (!iface_name[0]) { + memset(iface_name, 0, IFNAMSIZ); + bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ); + } + + wl_netlink_init(); + + return ret; +} + +int wl_android_exit(void) +{ + int ret = 0; + struct io_cfg *cur, *q; + + wl_netlink_deinit(); + + list_for_each_entry_safe(cur, q, &miracast_resume_list, list) { + list_del(&cur->list); + kfree(cur); + } + + return ret; +} + +void wl_android_post_init(void) +{ + +#ifdef ENABLE_4335BT_WAR + bcm_bt_unlock(lock_cookie_wifi); + printk("%s: btlock released\n", __FUNCTION__); +#endif /* ENABLE_4335BT_WAR */ + + if (!dhd_download_fw_on_driverload) + g_wifi_on = FALSE; +} diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h new file mode 100644 index 000000000000..14ffc6820a44 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_android.h @@ -0,0 +1,80 @@ +/* + * Linux cfg80211 driver - Android related functions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_android.h 608194 2015-12-24 04:34:35Z $ + */ + +#include +#include +#include + +/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL + * automatically + */ +#if defined(BT_WIFI_HANDOVER) || defined(WL_NAN) +#define WL_GENL +#endif + + + +/** + * Android platform dependent functions, feel free to add Android specific functions here + * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd + * or cfg, define them as static in wl_android.c + */ + +/** + * wl_android_init will be called from module init function (dhd_module_init now), similarly + * wl_android_exit will be called from module exit function (dhd_module_cleanup now) + */ +int wl_android_init(void); +int wl_android_exit(void); +void wl_android_post_init(void); +int wl_android_wifi_on(struct net_device *dev); +int wl_android_wifi_off(struct net_device *dev, bool on_failure); +int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd); + +s32 wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size); + +/* hostap mac mode */ +#define MACLIST_MODE_DISABLED 0 +#define MACLIST_MODE_DENY 1 +#define MACLIST_MODE_ALLOW 2 + +/* max number of assoc list */ +#define MAX_NUM_OF_ASSOCLIST 64 + +/* Bandwidth */ +#define WL_CH_BANDWIDTH_20MHZ 20 +#define WL_CH_BANDWIDTH_40MHZ 40 +#define WL_CH_BANDWIDTH_80MHZ 80 +/* max number of mac filter list + * restrict max number to 10 as maximum cmd string size is 255 + */ +#define MAX_NUM_MAC_FILT 10 + +int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist); +int wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd); diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c new file mode 100644 index 000000000000..90e7a9c2f1a6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c @@ -0,0 +1,17335 @@ +/* + * Linux cfg80211 driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfg80211.c 610196 2016-01-06 11:20:45Z $ + */ +/* */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif /* PNO_SUPPORT */ + +#if defined(WL_VENDOR_EXT_SUPPORT) +#include +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + +#ifdef WL_NAN +#include +#endif /* WL_NAN */ + +#ifdef PROP_TXSTATUS +#include +#endif + +#ifdef BCMPCIE +#include +#endif + +#ifdef WL11U +#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF) +#error You should enable 'WL_ENABLE_P2P_IF' or 'WL_CFG80211_P2P_DEV_IF' \ + according to Kernel version and is supported only in Android-JB +#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */ +#endif /* WL11U */ + + +#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) + +static struct device *cfg80211_parent_dev = NULL; +/* g_bcm_cfg should be static. Do not change */ +static struct bcm_cfg80211 *g_bcm_cfg = NULL; +#ifdef CUSTOMER_HW4_DEBUG +u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION; +#else +u32 wl_dbg_level = WL_DBG_ERR; +#endif /* CUSTOMER_HW4_DEBUG */ + +#define MAX_WAIT_TIME 1500 +#ifdef WLAIBSS_MCHAN +#define IBSS_IF_NAME "ibss%d" +#endif /* WLAIBSS_MCHAN */ + +#ifdef VSDB +/* sleep time to keep STA's connecting or connection for continuous af tx or finding a peer */ +#define DEFAULT_SLEEP_TIME_VSDB 120 +#define OFF_CHAN_TIME_THRESHOLD_MS 200 +#define AF_RETRY_DELAY_TIME 40 + +/* if sta is connected or connecting, sleep for a while before retry af tx or finding a peer */ +#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg) \ + do { \ + if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) || \ + wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) { \ + OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB); \ + } \ + } while (0) +#else /* VSDB */ +/* if not VSDB, do nothing */ +#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg) +#endif /* VSDB */ + +#ifdef WL_CFG80211_SYNC_GON +#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \ + (wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \ + wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) +#else +#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM) +#endif /* WL_CFG80211_SYNC_GON */ + +#define DNGL_FUNC(func, parameters) func parameters +#define COEX_DHCP + +#define WLAN_EID_SSID 0 +#define CH_MIN_5G_CHANNEL 34 +#define CH_MIN_2G_CHANNEL 1 +#define ACTIVE_SCAN 1 +#define PASSIVE_SCAN 0 + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +(entry) = list_first_entry((ptr), type, member); \ +_Pragma("GCC diagnostic pop") \ + +#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +entry = container_of((ptr), type, member); \ +_Pragma("GCC diagnostic pop") \ + +#else +#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \ +(entry) = list_first_entry((ptr), type, member); \ + +#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \ +entry = container_of((ptr), type, member); \ + +#endif /* STRICT_GCC_WARNINGS */ + +enum rmc_event_type { + RMC_EVENT_NONE, + RMC_EVENT_LEADER_CHECK_FAIL +}; + +/* This is to override regulatory domains defined in cfg80211 module (reg.c) + * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN + * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165). + * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels. + * All the chnages in world regulatory domain are to be done here. + * + * this definition reuires disabling missing-field-initializer warning + * as the ieee80211_regdomain definition differs in plain linux and in Android + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") +#endif +static const struct ieee80211_regdomain brcm_regdom = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412-10, 2472+10, 40, 6, 20, 0), + /* If any */ + /* IEEE 802.11 channel 14 - Only JP enables + * this and for 802.11b only + */ + REG_RULE(2484-10, 2484+10, 20, 6, 20, 0), + /* IEEE 802.11a, channel 36..64 */ + REG_RULE(5150-10, 5350+10, 40, 6, 20, 0), + /* IEEE 802.11a, channel 100..165 */ + REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), } +}; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ + (defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)) +static const struct ieee80211_iface_limit common_if_limits[] = { + { + /* + * Driver can support up to 2 AP's + */ + .max = 2, + .types = BIT(NL80211_IFTYPE_AP), + }, + { + /* + * During P2P-GO removal, P2P-GO is first changed to STA and later only + * removed. So setting maximum possible number of STA interfaces according + * to kernel version. + * + * less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x) + * linux-3.8 and above - max:2 (wlan0 + group removal of p2p-wlan0-x) + */ +#ifdef WL_ENABLE_P2P_IF + .max = 3, +#else + .max = 2, +#endif /* WL_ENABLE_P2P_IF */ + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT), + }, +#if defined(WL_CFG80211_P2P_DEV_IF) + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +#endif /* WL_CFG80211_P2P_DEV_IF */ + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC), + }, +}; +#ifdef BCM4330_CHIP +#define NUM_DIFF_CHANNELS 1 +#else +#define NUM_DIFF_CHANNELS 2 +#endif +static const struct ieee80211_iface_combination +common_iface_combinations[] = { + { + .num_different_channels = NUM_DIFF_CHANNELS, + /* + * max_interfaces = 4 + * The max no of interfaces will be used in dual p2p case. + * {STA, P2P Device, P2P Group 1, P2P Group 2}. Though we + * will not be using the STA functionality in this case, it + * will remain registered as it is the primary interface. + */ + .max_interfaces = 4, + .limits = common_if_limits, + .n_limits = ARRAY_SIZE(common_if_limits), + }, +}; +#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */ + +/* Data Element Definitions */ +#define WPS_ID_CONFIG_METHODS 0x1008 +#define WPS_ID_REQ_TYPE 0x103A +#define WPS_ID_DEVICE_NAME 0x1011 +#define WPS_ID_VERSION 0x104A +#define WPS_ID_DEVICE_PWD_ID 0x1012 +#define WPS_ID_REQ_DEV_TYPE 0x106A +#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053 +#define WPS_ID_PRIM_DEV_TYPE 0x1054 + +/* Device Password ID */ +#define DEV_PW_DEFAULT 0x0000 +#define DEV_PW_USER_SPECIFIED 0x0001, +#define DEV_PW_MACHINE_SPECIFIED 0x0002 +#define DEV_PW_REKEY 0x0003 +#define DEV_PW_PUSHBUTTON 0x0004 +#define DEV_PW_REGISTRAR_SPECIFIED 0x0005 + +/* Config Methods */ +#define WPS_CONFIG_USBA 0x0001 +#define WPS_CONFIG_ETHERNET 0x0002 +#define WPS_CONFIG_LABEL 0x0004 +#define WPS_CONFIG_DISPLAY 0x0008 +#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010 +#define WPS_CONFIG_INT_NFC_TOKEN 0x0020 +#define WPS_CONFIG_NFC_INTERFACE 0x0040 +#define WPS_CONFIG_PUSHBUTTON 0x0080 +#define WPS_CONFIG_KEYPAD 0x0100 +#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280 +#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480 +#define WPS_CONFIG_VIRT_DISPLAY 0x2008 +#define WPS_CONFIG_PHY_DISPLAY 0x4008 + +#define PM_BLOCK 1 +#define PM_ENABLE 0 + + +#define WL_AKM_SUITE_SHA256_1X 0x000FAC05 +#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06 + +#ifndef IBSS_COALESCE_ALLOWED +#define IBSS_COALESCE_ALLOWED 0 +#endif + +#ifndef IBSS_INITIAL_SCAN_ALLOWED +#define IBSS_INITIAL_SCAN_ALLOWED 0 +#endif + +#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */ +#define LONG_LISTEN_TIME 2000 + +#define MAX_SCAN_ABORT_WAIT_CNT 20 +#define WAIT_SCAN_ABORT_OSL_SLEEP_TIME 10 + +#define IDSUP_4WAY_HANDSHAKE_TIMEOUT 10000 +enum idsup_event_type { + IDSUP_EVENT_SUCCESS = 0, + IDSUP_EVENT_4WAY_HANDSHAKE_TIMEOUT +}; +/* + * cfg80211_ops api/callback list + */ +static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, + const struct ether_addr *sa, const struct ether_addr *bssid, + u8 **pheader, u32 *body_len, u8 *pbody); +static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid); +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request); +#else +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request); +#endif /* WL_CFG80211_P2P_DEV_IF */ +static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed); +#ifdef WLAIBSS_MCHAN +static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name); +static s32 bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev); +#endif /* WLAIBSS_MCHAN */ +static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params); +static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, + struct net_device *dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_get_station(struct wiphy *wiphy, + struct net_device *dev, const u8 *mac, + struct station_info *sinfo); +#else +static s32 wl_cfg80211_get_station(struct wiphy *wiphy, + struct net_device *dev, u8 *mac, + struct station_info *sinfo); +#endif +static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, + struct net_device *dev, bool enabled, + s32 timeout); +static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code); +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + enum nl80211_tx_power_setting type, s32 mbm); +#else +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, s32 dbm); +#endif /* WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, + struct wireless_dev *wdev, s32 *dbm); +#else +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm); +#endif /* WL_CFG80211_P2P_DEV_IF */ +static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy, + struct net_device *dev, + u8 key_idx, bool unicast, bool multicast); +static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + struct key_params *params); +static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr); +static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + void *cookie, void (*callback) (void *cookie, + struct key_params *params)); +static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, u8 key_idx); +static s32 wl_cfg80211_resume(struct wiphy *wiphy); +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) +static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + bcm_struct_cfgdev *cfgdev, u64 cookie); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) +static s32 wl_cfg80211_del_station( + struct wiphy *wiphy, struct net_device *ndev, + struct station_del_parameters *params); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_del_station(struct wiphy *wiphy, + struct net_device *ndev, const u8* mac_addr); +#else +static s32 wl_cfg80211_del_station(struct wiphy *wiphy, + struct net_device *ndev, u8* mac_addr); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_change_station(struct wiphy *wiphy, + struct net_device *dev, const u8 *mac, struct station_parameters *params); +#else +static s32 wl_cfg80211_change_station(struct wiphy *wiphy, + struct net_device *dev, u8 *mac, struct station_parameters *params); +#endif +#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) +static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow); +#else +static s32 wl_cfg80211_suspend(struct wiphy *wiphy); +#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */ +static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa); +static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa); +static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy, + struct net_device *dev); +static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg); +static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg); +static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, + struct net_device *ndev, bool aborted, bool fw_abort); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) +#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len); +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, const u8 *data, size_t len); +#else +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data, + size_t len); +#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper); +#else +static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, enum nl80211_tdls_operation oper); +#endif +#endif +#ifdef WL_SCHED_SCAN +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid); +#else +static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev); +#endif +#endif +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) +bcm_struct_cfgdev* +wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype + iface_type, u8 *mac_addr, const char *name); +s32 +wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev); +#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */ + +s32 wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr); +s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr); +chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec); +chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec); +#ifdef WL11ULB +static s32 wl_cfg80211_get_ulb_bw(struct wireless_dev *wdev); +static chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(struct wireless_dev *wdev, s32 bssidx); +static s32 wl_cfg80211_ulbbw_to_ulbchspec(u32 ulb_bw); +#else +static inline chanspec_t wl_cfg80211_ulb_get_min_bw_chspec( + struct wireless_dev *wdev, s32 bssidx) +{ + return WL_CHANSPEC_BW_20; +} +#endif /* WL11ULB */ + +/* + * event & event Q handlers for cfg80211 interfaces + */ +static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg); +static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg); +static s32 wl_event_handler(void *data); +static void wl_init_eq(struct bcm_cfg80211 *cfg); +static void wl_flush_eq(struct bcm_cfg80211 *cfg); +static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg); +static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags); +static void wl_init_eq_lock(struct bcm_cfg80211 *cfg); +static void wl_init_event_handler(struct bcm_cfg80211 *cfg); +static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg); +static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type, + const wl_event_msg_t *msg, void *data); +static void wl_put_event(struct wl_event_q *e); +static void wl_wakeup_event(struct bcm_cfg80211 *cfg); +static s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data, bool completed); +static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#ifdef BT_WIFI_HANDOVER +static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +#endif /* BT_WIFI_HANDOVER */ +#ifdef WL_SCHED_SCAN +static s32 +wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +#endif /* WL_SCHED_SCAN */ +#ifdef PNO_SUPPORT +static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* PNO_SUPPORT */ +#ifdef GSCAN_SUPPORT +static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* GSCAN_SUPPORT */ +static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info, + enum wl_status state, bool set); +#ifdef DHD_LOSSLESS_ROAMING +static s32 wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg); +#endif /* DHD_LOSSLESS_ROAMING */ +#ifdef CUSTOM_EVENT_PM_WAKE +static s32 wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* CUSTOM_EVENT_PM_WAKE */ + +#ifdef WLTDLS +static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* WLTDLS */ +/* + * register/deregister parent device + */ +static void wl_cfg80211_clear_parent_dev(void); +/* + * ioctl utilites + */ + +/* + * cfg80211 set_wiphy_params utilities + */ +static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold); +static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold); +static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l); + +/* + * cfg profile utilities + */ +static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, const void *data, s32 item); +static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item); +static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev); + +/* + * cfg80211 connect utilites + */ +static s32 wl_set_wpa_version(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_auth_type(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_set_cipher(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_key_mgmt(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_set_sharedkey(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev); +static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, + struct wl_join_params *join_params, size_t *join_params_size); +void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg); + +/* + * information element utilities + */ +static void wl_rst_ie(struct bcm_cfg80211 *cfg); +static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v); +static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size, + bool roam); +static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size); +static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size); +static u32 wl_get_ielen(struct bcm_cfg80211 *cfg); + +#ifdef WL11U +bcm_tlv_t * +wl_cfg80211_find_interworking_ie(u8 *parse, u32 len); +static s32 +wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag, + uint8 ie_id, uint8 *data, uint8 data_len); +#endif /* WL11U */ + +static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, void *data); +static void wl_free_wdev(struct bcm_cfg80211 *cfg); +#ifdef CONFIG_CFG80211_INTERNAL_REGDB +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static int +#else +static void +#endif /* kernel version < 3.10.11 */ +wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); +#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ + +static s32 wl_inform_bss(struct bcm_cfg80211 *cfg); +static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam); +static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam); +static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy); +s32 wl_cfg80211_channel_to_freq(u32 channel); + + +static void wl_cfg80211_work_handler(struct work_struct *work); +static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr, + struct key_params *params); +/* + * key indianess swap utilities + */ +static void swap_key_from_BE(struct wl_wsec_key *key); +static void swap_key_to_BE(struct wl_wsec_key *key); + +/* + * bcm_cfg80211 memory init/deinit utilities + */ +static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg); +static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg); + +static void wl_delay(u32 ms); + +/* + * ibss mode utilities + */ +static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev); +static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg); + +/* + * link up/down , default configuration utilities + */ +static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg); +static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg); +static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e); +static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, + struct net_device *ndev); +static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e); +static void wl_link_up(struct bcm_cfg80211 *cfg); +static void wl_link_down(struct bcm_cfg80211 *cfg); +static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype); +static void wl_init_conf(struct wl_conf *conf); +static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info, + struct net_device* ndev); + +int wl_cfg80211_get_ioctl_version(void); + +/* + * find most significant bit set + */ +static __used u32 wl_find_msb(u16 bit16); + +/* + * rfkill support + */ +static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup); +static int wl_rfkill_set(void *data, bool blocked); +#ifdef DEBUGFS_CFG80211 +static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg); +static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg); +#endif + +static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel, + int nprobes, int *out_params_size); +static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role); + +#ifdef WL_CFG80211_ACL +/* ACL */ +static int wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev, + const struct cfg80211_acl_data *acl); +#endif /* WL_CFG80211_ACL */ + +/* + * Some external functions, TODO: move them to dhd_linux.h + */ +int dhd_add_monitor(char *name, struct net_device **new_ndev); +int dhd_del_monitor(struct net_device *ndev); +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); +int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); + + +#ifdef DHD_IFDEBUG +void wl_dump_ifinfo(struct bcm_cfg80211 *cfg); +#endif + +#ifdef P2P_LISTEN_OFFLOADING +s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg); +#endif /* P2P_LISTEN_OFFLOADING */ + +static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const struct ether_addr *bssid); + +static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ, + WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ }; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) +#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \ + cfg80211_disconnected(dev, reason, ie, len, gfp); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) +#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \ + cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp); +#endif + +#ifdef RSSI_OFFSET +static s32 wl_rssi_offset(s32 rssi) +{ + rssi += RSSI_OFFSET; + if (rssi > 0) + rssi = 0; + return rssi; +} +#else +#define wl_rssi_offset(x) x +#endif + +#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \ + (akm) == RSN_AKM_UNSPECIFIED || \ + (akm) == RSN_AKM_PSK) + + +extern int dhd_wait_pend8021x(struct net_device *dev); +#ifdef PROP_TXSTATUS_VSDB +extern int disable_proptx; +#endif /* PROP_TXSTATUS_VSDB */ + + +extern int passive_channel_skip; + +static s32 +wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +static s32 +wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, \ + 0))) +struct chan_info { + int freq; + int chan_type; +}; +#endif + + +#if (WL_DBG_LEVEL > 0) +#define WL_DBG_ESTR_MAX 50 +static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = { + "SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND", + "DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC", + "REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END", + "BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM", + "TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH", + "EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND", + "BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND", + "PFN_NET_LOST", + "RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START", + "IBSS_ASSOC", + "RADIO", "PSM_WATCHDOG", + "WLC_E_XXX_ASSOC_START", "WLC_E_XXX_ASSOC_ABORT", + "PROBREQ_MSG", + "SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED", + "EXCEEDED_MEDIUM_TIME", "ICV_ERROR", + "UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE", + "WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE", + "RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG", + "ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND", + "WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED", + "WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT", + "WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE", + "WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP", + "WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE" +}; +#endif /* WL_DBG_LEVEL */ + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_2GHZ , \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = NL80211_BAND_5GHZ , \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .bitrate = RATE_TO_BASE100KBPS(_rateid), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +static struct ieee80211_rate __wl_rates[] = { + RATETAB_ENT(DOT11_RATE_1M, 0), + RATETAB_ENT(DOT11_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(DOT11_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(DOT11_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(DOT11_RATE_6M, 0), + RATETAB_ENT(DOT11_RATE_9M, 0), + RATETAB_ENT(DOT11_RATE_12M, 0), + RATETAB_ENT(DOT11_RATE_18M, 0), + RATETAB_ENT(DOT11_RATE_24M, 0), + RATETAB_ENT(DOT11_RATE_36M, 0), + RATETAB_ENT(DOT11_RATE_48M, 0), + RATETAB_ENT(DOT11_RATE_54M, 0) +}; + +#define wl_a_rates (__wl_rates + 4) +#define wl_a_rates_size 8 +#define wl_g_rates (__wl_rates + 0) +#define wl_g_rates_size 12 + +static struct ieee80211_channel __wl_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0) +}; + +static struct ieee80211_channel __wl_5ghz_a_channels[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(144, 0), + CHAN5G(149, 0), CHAN5G(153, 0), + CHAN5G(157, 0), CHAN5G(161, 0), + CHAN5G(165, 0) +}; + +static struct ieee80211_supported_band __wl_band_2ghz = { + .band = NL80211_BAND_2GHZ , + .channels = __wl_2ghz_channels, + .n_channels = ARRAY_SIZE(__wl_2ghz_channels), + .bitrates = wl_g_rates, + .n_bitrates = wl_g_rates_size +}; + +static struct ieee80211_supported_band __wl_band_5ghz_a = { + .band = NL80211_BAND_5GHZ , + .channels = __wl_5ghz_a_channels, + .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels), + .bitrates = wl_a_rates, + .n_bitrates = wl_a_rates_size +}; + +static const u32 __wl_cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_AES_CMAC, +}; + +#ifdef WL_SUPPORT_ACS +/* + * The firmware code required for this feature to work is currently under + * BCMINTERNAL flag. In future if this is to enabled we need to bring the + * required firmware code out of the BCMINTERNAL flag. + */ +struct wl_dump_survey { + u32 obss; + u32 ibss; + u32 no_ctg; + u32 no_pckt; + u32 tx; + u32 idle; +}; +#endif /* WL_SUPPORT_ACS */ + + +#if defined(USE_DYNAMIC_MAXPKT_RXGLOM) +static int maxrxpktglom = 0; +#endif + +/* IOCtl version read from targeted driver */ +static int ioctl_version; +#ifdef DEBUGFS_CFG80211 +#define S_SUBLOGLEVEL 20 +static const struct { + u32 log_level; + char *sublogname; +} sublogname_map[] = { + {WL_DBG_ERR, "ERR"}, + {WL_DBG_INFO, "INFO"}, + {WL_DBG_DBG, "DBG"}, + {WL_DBG_SCAN, "SCAN"}, + {WL_DBG_TRACE, "TRACE"}, + {WL_DBG_P2P_ACTION, "P2PACTION"} +}; +#endif + +#ifdef CUSTOMER_HW4_DEBUG +uint prev_dhd_console_ms = 0; +u32 prev_wl_dbg_level = 0; +bool wl_scan_timeout_dbg_enabled = 0; +static void wl_scan_timeout_dbg_set(void); +static void wl_scan_timeout_dbg_clear(void); + +static void wl_scan_timeout_dbg_set(void) +{ + WL_ERR(("Enter \n")); + prev_dhd_console_ms = dhd_console_ms; + prev_wl_dbg_level = wl_dbg_level; + + dhd_console_ms = 1; + wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN); + + wl_scan_timeout_dbg_enabled = 1; +} +static void wl_scan_timeout_dbg_clear(void) +{ + WL_ERR(("Enter \n")); + dhd_console_ms = prev_dhd_console_ms; + wl_dbg_level = prev_wl_dbg_level; + + wl_scan_timeout_dbg_enabled = 0; +} +#endif /* CUSTOMER_HW4_DEBUG */ + +/* watchdog timer for disconnecting when fw is not associated for FW_ASSOC_WATCHDOG_TIME ms */ +uint32 fw_assoc_watchdog_ms = 0; +bool fw_assoc_watchdog_started = 0; +#define FW_ASSOC_WATCHDOG_TIME 10 * 1000 /* msec */ + +#ifdef DHD_IFDEBUG + +void wl_dump_ifinfo(struct bcm_cfg80211 *cfg) +{ + WL_ERR(("cfg=%p\n", cfg)); + if (cfg) { + WL_ERR(("cfg->wdev=%p\n", bcmcfg_to_prmry_wdev(cfg))); + if (bcmcfg_to_prmry_wdev(cfg)) { + WL_ERR(("cfg->wdev->wiphy=%p\n", bcmcfg_to_wiphy(cfg))); + WL_ERR(("cfg->wdev->netdev=%p\n", bcmcfg_to_prmry_ndev(cfg))); + } + } +} +#endif + +static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, + enum wl_pm_workq_act_type type) +{ + u16 wq_duration = 0; + + if (cfg == NULL) + return; + + mutex_lock(&cfg->pm_sync); + /* + * Make cancel and schedule work part mutually exclusive + * so that while cancelling, we are sure that there is no + * work getting scheduled. + */ + if (delayed_work_pending(&cfg->pm_enable_work)) { + cancel_delayed_work_sync(&cfg->pm_enable_work); + DHD_OS_WAKE_UNLOCK(cfg->pub); + } + + if (type == WL_PM_WORKQ_SHORT) { + wq_duration = WL_PM_ENABLE_TIMEOUT; + } else if (type == WL_PM_WORKQ_LONG) { + wq_duration = (WL_PM_ENABLE_TIMEOUT*2); + } + if (wq_duration) { + DHD_OS_WAKE_LOCK(cfg->pub); + schedule_delayed_work(&cfg->pm_enable_work, + msecs_to_jiffies((const unsigned int)wq_duration)); + } + mutex_unlock(&cfg->pm_sync); +} + +/* Return a new chanspec given a legacy chanspec + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_chspec_from_legacy(chanspec_t legacy_chspec) +{ + chanspec_t chspec; + + /* get the channel number */ + chspec = LCHSPEC_CHANNEL(legacy_chspec); + + /* convert the band */ + if (LCHSPEC_IS2G(legacy_chspec)) { + chspec |= WL_CHANSPEC_BAND_2G; + } else { + chspec |= WL_CHANSPEC_BAND_5G; + } + + /* convert the bw and sideband */ + if (LCHSPEC_IS20(legacy_chspec)) { + chspec |= WL_CHANSPEC_BW_20; + } else { + chspec |= WL_CHANSPEC_BW_40; + if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) { + chspec |= WL_CHANSPEC_CTL_SB_L; + } else { + chspec |= WL_CHANSPEC_CTL_SB_U; + } + } + + if (wf_chspec_malformed(chspec)) { + WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n", + chspec)); + return INVCHANSPEC; + } + + return chspec; +} + +/* Return a legacy chanspec given a new chanspec + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_chspec_to_legacy(chanspec_t chspec) +{ + chanspec_t lchspec; + + if (wf_chspec_malformed(chspec)) { + WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n", + chspec)); + return INVCHANSPEC; + } + + /* get the channel number */ + lchspec = CHSPEC_CHANNEL(chspec); + + /* convert the band */ + if (CHSPEC_IS2G(chspec)) { + lchspec |= WL_LCHANSPEC_BAND_2G; + } else { + lchspec |= WL_LCHANSPEC_BAND_5G; + } + + /* convert the bw and sideband */ + if (CHSPEC_IS20(chspec)) { + lchspec |= WL_LCHANSPEC_BW_20; + lchspec |= WL_LCHANSPEC_CTL_SB_NONE; + } else if (CHSPEC_IS40(chspec)) { + lchspec |= WL_LCHANSPEC_BW_40; + if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) { + lchspec |= WL_LCHANSPEC_CTL_SB_LOWER; + } else { + lchspec |= WL_LCHANSPEC_CTL_SB_UPPER; + } + } else { + /* cannot express the bandwidth */ + char chanbuf[CHANSPEC_STR_LEN]; + WL_ERR(( + "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) " + "to pre-11ac format\n", + wf_chspec_ntoa(chspec, chanbuf), chspec)); + return INVCHANSPEC; + } + + return lchspec; +} + +/* given a chanspec value, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +chanspec_t +wl_chspec_host_to_driver(chanspec_t chanspec) +{ + if (ioctl_version == 1) { + chanspec = wl_chspec_to_legacy(chanspec); + if (chanspec == INVCHANSPEC) { + return chanspec; + } + } + chanspec = htodchanspec(chanspec); + + return chanspec; +} + +/* given a channel value, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +chanspec_t +wl_ch_host_to_driver(s32 bssidx, u16 channel) +{ + chanspec_t chanspec; + + chanspec = channel & WL_CHANSPEC_CHAN_MASK; + + if (channel <= CH_MAX_2G_CHANNEL) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + chanspec |= wl_cfg80211_ulb_get_min_bw_chspec(NULL, bssidx); + + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + return wl_chspec_host_to_driver(chanspec); +} + +/* given a chanspec value from the driver, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec) +{ + chanspec = dtohchanspec(chanspec); + if (ioctl_version == 1) { + chanspec = wl_chspec_from_legacy(chanspec); + } + + return chanspec; +} + +/* + * convert ASCII string to MAC address (colon-delimited format) + * eg: 00:11:22:33:44:55 + */ +int +wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n) +{ + char *c = NULL; + int count = 0; + + memset(n, 0, ETHER_ADDR_LEN); + for (;;) { + n->octet[count++] = (uint8)simple_strtoul(a, &c, 16); + if (!*c++ || count == ETHER_ADDR_LEN) + break; + a = c; + } + return (count == ETHER_ADDR_LEN); +} + +/* There isn't a lot of sense in it, but you can transmit anything you like */ +static const struct ieee80211_txrx_stypes +wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_ADHOC] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_STATION] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_AP] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_AP_VLAN] = { + /* copy AP */ + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, +#if defined(WL_CFG80211_P2P_DEV_IF) + [NL80211_IFTYPE_P2P_DEVICE] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, +#endif /* WL_CFG80211_P2P_DEV_IF */ +}; + +static void swap_key_from_BE(struct wl_wsec_key *key) +{ + key->index = htod32(key->index); + key->len = htod32(key->len); + key->algo = htod32(key->algo); + key->flags = htod32(key->flags); + key->rxiv.hi = htod32(key->rxiv.hi); + key->rxiv.lo = htod16(key->rxiv.lo); + key->iv_initialized = htod32(key->iv_initialized); +} + +static void swap_key_to_BE(struct wl_wsec_key *key) +{ + key->index = dtoh32(key->index); + key->len = dtoh32(key->len); + key->algo = dtoh32(key->algo); + key->flags = dtoh32(key->flags); + key->rxiv.hi = dtoh32(key->rxiv.hi); + key->rxiv.lo = dtoh16(key->rxiv.lo); + key->iv_initialized = dtoh32(key->iv_initialized); +} + +/* Dump the contents of the encoded wps ie buffer and get pbc value */ +static void +wl_validate_wps_ie(char *wps_ie, s32 wps_ie_len, bool *pbc) +{ + #define WPS_IE_FIXED_LEN 6 + u16 len; + u8 *subel = NULL; + u16 subelt_id; + u16 subelt_len; + u16 val; + u8 *valptr = (uint8*) &val; + if (wps_ie == NULL || wps_ie_len < WPS_IE_FIXED_LEN) { + WL_ERR(("invalid argument : NULL\n")); + return; + } + len = (u16)wps_ie[TLV_LEN_OFF]; + + if (len > wps_ie_len) { + WL_ERR(("invalid length len %d, wps ie len %d\n", len, wps_ie_len)); + return; + } + WL_DBG(("wps_ie len=%d\n", len)); + len -= 4; /* for the WPS IE's OUI, oui_type fields */ + subel = wps_ie + WPS_IE_FIXED_LEN; + while (len >= 4) { /* must have attr id, attr len fields */ + valptr[0] = *subel++; + valptr[1] = *subel++; + subelt_id = HTON16(val); + + valptr[0] = *subel++; + valptr[1] = *subel++; + subelt_len = HTON16(val); + + len -= 4; /* for the attr id, attr len fields */ + len -= subelt_len; /* for the remaining fields in this attribute */ + WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n", + subel, subelt_id, subelt_len)); + + if (subelt_id == WPS_ID_VERSION) { + WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel)); + } else if (subelt_id == WPS_ID_REQ_TYPE) { + WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel)); + } else if (subelt_id == WPS_ID_CONFIG_METHODS) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val))); + } else if (subelt_id == WPS_ID_DEVICE_NAME) { + char devname[100]; + size_t namelen = MIN(subelt_len, sizeof(devname)); + if (namelen) { + memcpy(devname, subel, namelen); + devname[namelen - 1] = '\0'; + WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n", + devname, subelt_len)); + } + } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val))); + *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false; + } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val))); + valptr[0] = *(subel + 6); + valptr[1] = *(subel + 7); + WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val))); + } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val))); + valptr[0] = *(subel + 6); + valptr[1] = *(subel + 7); + WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val))); + } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS" + ": cat=%u\n", HTON16(val))); + } else { + WL_DBG((" unknown attr 0x%x\n", subelt_id)); + } + + subel += subelt_len; + } +} + +s32 wl_set_tx_power(struct net_device *dev, + enum nl80211_tx_power_setting type, s32 dbm) +{ + s32 err = 0; + s32 disable = 0; + s32 txpwrqdbm; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + /* Make sure radio is off or on as far as software is concerned */ + disable = WL_RADIO_SW_DISABLE << 16; + disable = htod32(disable); + err = wldev_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable), true); + if (unlikely(err)) { + WL_ERR(("WLC_SET_RADIO error (%d)\n", err)); + return err; + } + + if (dbm > 0xffff) + dbm = 0xffff; + txpwrqdbm = dbm * 4; + err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm, + sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (unlikely(err)) + WL_ERR(("qtxpower error (%d)\n", err)); + else + WL_ERR(("dBm=%d, txpwrqdbm=0x%x\n", dbm, txpwrqdbm)); + + return err; +} + +s32 wl_get_tx_power(struct net_device *dev, s32 *dbm) +{ + s32 err = 0; + s32 txpwrdbm; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + + memcpy(&txpwrdbm, cfg->ioctl_buf, sizeof(txpwrdbm)); + txpwrdbm = dtoh32(txpwrdbm); + *dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4; + + WL_INFORM(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm)); + + return err; +} + +static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy) +{ + chanspec_t chspec; + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + struct ether_addr bssid; + struct wl_bss_info *bss = NULL; + s32 bssidx = 0; /* Explicitly set to primary bssidx */ + + if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) { + /* STA interface is not associated. So start the new interface on a temp + * channel . Later proper channel will be applied by the above framework + * via set_channel (cfg80211 API). + */ + WL_DBG(("Not associated. Return a temp channel. \n")); + return wl_ch_host_to_driver(bssidx, WL_P2P_TEMP_CHAN); + } + + + *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX); + if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf, + WL_EXTRA_BUF_MAX, false))) { + WL_ERR(("Failed to get associated bss info, use temp channel \n")); + chspec = wl_ch_host_to_driver(bssidx, WL_P2P_TEMP_CHAN); + } + else { + bss = (struct wl_bss_info *) (cfg->extra_buf + 4); + chspec = bss->chanspec; + + WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec)); + } + return chspec; +} + +static bcm_struct_cfgdev * +wl_cfg80211_add_monitor_if(char *name) +{ +#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF) + WL_INFORM(("wl_cfg80211_add_monitor_if: No more support monitor interface\n")); + return ERR_PTR(-EOPNOTSUPP); +#else + struct net_device* ndev = NULL; + + dhd_add_monitor(name, &ndev); + WL_INFORM(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev)); + return ndev_to_cfgdev(ndev); +#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */ +} + +static bcm_struct_cfgdev * +wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, +#if defined(WL_CFG80211_P2P_DEV_IF) + const char *name, +#else + char *name, +#endif /* WL_CFG80211_P2P_DEV_IF */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + unsigned char name_assign_type, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + enum nl80211_iftype type, +#else + enum nl80211_iftype type, u32 *flags, +#endif + struct vif_params *params) +{ + s32 err = -ENODEV; + s32 timeout = -1; + s32 wlif_type = -1; + s32 mode = 0; + s32 val = 0; + s32 cfg_type; + s32 dhd_mode = 0; + chanspec_t chspec; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *primary_ndev; + struct net_device *new_ndev; + struct ether_addr primary_mac; +#ifdef WL_VIRTUAL_APSTA + bcm_struct_cfgdev *new_cfgdev; +#endif /* WL_VIRTUAL_APSTA */ +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + s32 up = 1; + dhd_pub_t *dhd; + bool enabled; +#endif +#endif /* PROP_TXSTATUS_VSDB */ +#if defined(SUPPORT_AP_POWERSAVE) + dhd_pub_t *dhd; +#endif /* SUPPORT_AP_POWERSAVE */ + bool hang_required = false; + + if (!cfg) + return ERR_PTR(-EINVAL); + +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd = (dhd_pub_t *)(cfg->pub); +#endif +#endif /* PROP_TXSTATUS_VSDB */ +#if defined(SUPPORT_AP_POWERSAVE) + dhd = (dhd_pub_t *)(cfg->pub); +#endif /* SUPPORT_AP_POWERSAVE */ + + /* Use primary I/F for sending cmds down to firmware */ + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) { + WL_ERR(("device is not ready\n")); + return ERR_PTR(-ENODEV); + } + + WL_DBG(("if name: %s, type: %d\n", name, type)); + switch (type) { + case NL80211_IFTYPE_ADHOC: +#ifdef WLAIBSS_MCHAN + return bcm_cfg80211_add_ibss_if(wiphy, (char *)name); +#endif /* WLAIBSS_MCHAN */ + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + WL_ERR(("Unsupported interface type\n")); + mode = WL_MODE_IBSS; + return NULL; + case NL80211_IFTYPE_MONITOR: + return wl_cfg80211_add_monitor_if((char *)name); +#if defined(WL_CFG80211_P2P_DEV_IF) + case NL80211_IFTYPE_P2P_DEVICE: + cfg->down_disc_if = FALSE; + return wl_cfgp2p_add_p2p_disc_if(cfg); +#endif /* WL_CFG80211_P2P_DEV_IF */ + case NL80211_IFTYPE_STATION: +#ifdef WL_VIRTUAL_APSTA +#ifdef WLAIBSS_MCHAN + if (cfg->ibss_cfgdev) { + WL_ERR(("AIBSS is already operational. " + " AIBSS & DUALSTA can't be used together \n")); + return ERR_PTR(-ENOMEM); + } +#endif /* WLAIBSS_MCHAN */ + if (!name) { + WL_ERR(("Interface name not provided \n")); + return ERR_PTR(-ENODEV); + } + + if (wl_cfgp2p_vif_created(cfg)) { + WL_ERR(("Could not create new iface." + "Already one p2p interface is running")); + return ERR_PTR(-ENODEV); + } + new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy, + NL80211_IFTYPE_STATION, NULL, name); + if (!new_cfgdev) + return ERR_PTR(-ENOMEM); + else + return new_cfgdev; +#endif /* WL_VIRTUAL_APSTA */ + case NL80211_IFTYPE_P2P_CLIENT: + wlif_type = WL_P2P_IF_CLIENT; + mode = WL_MODE_BSS; + break; + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_AP: + wlif_type = WL_P2P_IF_GO; + mode = WL_MODE_AP; + break; + default: + WL_ERR(("Unsupported interface type\n")); + return ERR_PTR(-ENODEV); + break; + } + + if (!name) { + WL_ERR(("name is NULL\n")); + return ERR_PTR(-ENODEV); + } + if (cfg->p2p_supported && (wlif_type != -1)) { + ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */ + +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + if (!dhd) + return ERR_PTR(-ENODEV); +#endif +#endif /* PROP_TXSTATUS_VSDB */ + if (!cfg->p2p) + return ERR_PTR(-ENODEV); + + if (cfg->cfgdev_bssidx != -1) { + WL_ERR(("Failed to start p2p, Maximum no of interface reached")); + return ERR_PTR(-ENODEV); + } + + if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) { + p2p_on(cfg) = true; + wl_cfgp2p_set_firm_p2p(cfg); + wl_cfgp2p_init_discovery(cfg); + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + } + + strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1); + cfg->p2p->vir_ifname[IFNAMSIZ - 1] = '\0'; + + wl_cfg80211_scan_abort(cfg); +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + if (!cfg->wlfc_on && !disable_proptx) { + dhd_wlfc_get_enable(dhd, &enabled); + if (!enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd_wlfc_init(dhd); + err = wldev_ioctl(primary_ndev, WLC_UP, &up, sizeof(s32), true); + if (err < 0) + WL_ERR(("WLC_UP return err:%d\n", err)); + } + cfg->wlfc_on = true; + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + + /* Dual p2p doesn't support multiple P2PGO interfaces, + * p2p_go_count is the counter for GO creation + * requests. + */ + if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) { + WL_ERR(("Fw doesnot support multiple Go")); + return ERR_PTR(-ENOMEM); + } + /* In concurrency case, STA may be already associated in a particular channel. + * so retrieve the current channel of primary interface and then start the virtual + * interface on that. + */ + chspec = wl_cfg80211_get_shared_freq(wiphy); + + /* For P2P mode, use P2P-specific driver features to create the + * bss: "cfg p2p_ifadd" + */ + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return ERR_PTR(-ENOMEM); + } + wl_set_p2p_status(cfg, IF_ADDING); + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + if (wlif_type == WL_P2P_IF_GO) + wldev_iovar_setint(primary_ndev, "mpc", 0); + cfg_type = wl_cfgp2p_get_conn_idx(cfg); + if (cfg_type == BCME_ERROR) { + wl_clr_p2p_status(cfg, IF_ADDING); + WL_ERR(("Failed to get connection idx for p2p interface")); + goto fail; + } + err = wl_cfgp2p_ifadd(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type), + htod32(wlif_type), chspec); + if (unlikely(err)) { + wl_clr_p2p_status(cfg, IF_ADDING); + WL_ERR((" virtual iface add failed (%d) \n", err)); + return ERR_PTR(-ENOMEM); + } + + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + ((wl_get_p2p_status(cfg, IF_ADDING) == false) && + (cfg->if_event_info.valid)), + msecs_to_jiffies(MAX_WAIT_TIME)); + + if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) { + struct wireless_dev *vwdev; + int pm_mode = PM_ENABLE; + wl_if_event_info *event = &cfg->if_event_info; + /* IF_ADD event has come back, we can proceed to to register + * the new interface now, use the interface name provided by caller (thus + * ignore the one from wlc) + */ + new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname, + event->mac, event->bssidx, event->name); + if (new_ndev == NULL) + goto fail; + + wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev; + wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx; + vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); + if (unlikely(!vwdev)) { + WL_ERR(("Could not allocate wireless device\n")); + err = -ENOMEM; + goto fail; + } + vwdev->wiphy = cfg->wdev->wiphy; + WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname)); + if (type == NL80211_IFTYPE_P2P_GO) { + cfg->p2p->p2p_go_count++; + } + vwdev->iftype = type; +#ifdef DHD_IFDEBUG + WL_ERR(("new_ndev: %p\n", new_ndev)); +#endif + vwdev->netdev = new_ndev; + new_ndev->ieee80211_ptr = vwdev; + SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy)); + wl_set_drv_status(cfg, READY, new_ndev); + wl_set_mode_by_netdev(cfg, new_ndev, mode); + + if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) { + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + err = -ENODEV; + goto fail; + } + err = wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode, event->bssidx); + if (unlikely(err != 0)) { + WL_ERR(("Allocation of netinfo failed (%d) \n", err)); + goto fail; + } + val = 1; + /* Disable firmware roaming for P2P interface */ + wldev_iovar_setint(new_ndev, "roam_off", val); +#ifdef WL11ULB + if (cfg->p2p_wdev && is_p2p_group_iface(new_ndev->ieee80211_ptr)) { + u32 ulb_bw = wl_cfg80211_get_ulb_bw(cfg->p2p_wdev); + if (ulb_bw) { + /* Apply ULB BW settings on the newly spawned interface */ + WL_DBG(("[ULB] Applying ULB BW for the newly" + "created P2P interface \n")); + if (wl_cfg80211_set_ulb_bw(new_ndev, + ulb_bw, new_ndev->name) < 0) { + /* + * If ulb_bw set failed, fail the iface creation. + * wl_dealloc_netinfo_by_wdev will be called by the + * unregister notifier. + */ + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + err = -EINVAL; + goto fail; + } + } + } +#endif /* WL11ULB */ + + if (mode != WL_MODE_AP) + wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1); + + WL_ERR((" virtual interface(%s) is " + "created net attach done\n", cfg->p2p->vir_ifname)); + if (mode == WL_MODE_AP) + wl_set_drv_status(cfg, CONNECTED, new_ndev); +#ifdef SUPPORT_AP_POWERSAVE + if (mode == WL_MODE_AP) { + dhd_set_ap_powersave(dhd, 0, TRUE); + } +#endif /* SUPPORT_AP_POWERSAVE */ + if (type == NL80211_IFTYPE_P2P_CLIENT) + dhd_mode = DHD_FLAG_P2P_GC_MODE; + else if (type == NL80211_IFTYPE_P2P_GO) + dhd_mode = DHD_FLAG_P2P_GO_MODE; + DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode)); + /* reinitialize completion to clear previous count */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + INIT_COMPLETION(cfg->iface_disable); +#else + init_completion(&cfg->iface_disable); +#endif + return ndev_to_cfgdev(new_ndev); + } else { + wl_clr_p2p_status(cfg, IF_ADDING); + WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname)); + + WL_ERR(("left timeout : %d\n", timeout)); + WL_ERR(("IF_ADDING status : %d\n", wl_get_p2p_status(cfg, IF_ADDING))); + WL_ERR(("event valid : %d\n", cfg->if_event_info.valid)); + + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + wl_set_p2p_status(cfg, IF_DELETING); + + err = wl_cfgp2p_ifdel(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type)); + if (err == BCME_OK) { + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + ((wl_get_p2p_status(cfg, IF_DELETING) == false) && + (cfg->if_event_info.valid)), + msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) && + cfg->if_event_info.valid) { + /* + * Should indicate upper layer this failure case of p2p + * interface creation + */ + WL_ERR(("IFDEL operation done\n")); + } else { + WL_ERR(("IFDEL didn't complete properly\n")); + hang_required = true; + } + } else { + hang_required = true; + } + + if (hang_required) { + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n", + err, ndev->name)); + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; + net_os_send_hang_message(ndev); + } + + memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ); + wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1; +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd_wlfc_get_enable(dhd, &enabled); + if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd_wlfc_deinit(dhd); + cfg->wlfc_on = false; + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + /* + * Returns -ENODEV to upperlayer to indicate that DHD + * failed to create p2p interface + */ + err = -ENODEV; + } + } +fail: + if (wlif_type == WL_P2P_IF_GO) + wldev_iovar_setint(primary_ndev, "mpc", 1); + return ERR_PTR(err); +} + +static s32 +wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) +{ + struct net_device *dev = NULL; + struct ether_addr p2p_mac; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 timeout = -1; + s32 ret = 0; + s32 index = -1; + s32 type = -1; +#ifdef CUSTOM_SET_CPUCORE + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* CUSTOM_SET_CPUCORE */ + WL_DBG(("Enter\n")); + +#ifdef CUSTOM_SET_CPUCORE + dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE; + if (!(dhd->chan_isvht80)) + dhd_set_cpucore(dhd, FALSE); +#endif /* CUSTOM_SET_CPUCORE */ +#ifdef WL_CFG80211_P2P_DEV_IF + if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { + if (dhd_download_fw_on_driverload) { + return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg); + } else { + cfg->down_disc_if = TRUE; + return 0; + } + } +#endif /* WL_CFG80211_P2P_DEV_IF */ + dev = cfgdev_to_wlc_ndev(cfgdev, cfg); + +#ifdef WLAIBSS_MCHAN + if (cfgdev == cfg->ibss_cfgdev) + return bcm_cfg80211_del_ibss_if(wiphy, cfgdev); +#endif /* WLAIBSS_MCHAN */ + +#ifdef WL_VIRTUAL_APSTA + if (cfgdev == cfg->bss_cfgdev) + return wl_cfg80211_del_iface(wiphy, cfgdev); +#endif /* WL_VIRTUAL_APSTA */ + if ((index = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) { + WL_ERR(("Find p2p index from wdev failed\n")); + return BCME_ERROR; + } + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return BCME_ERROR; + } + if (cfg->p2p_supported) { + if (wl_cfgp2p_find_type(cfg, index, &type) != BCME_OK) + return BCME_ERROR; + memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, ETHER_ADDR_LEN); + + /* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases + */ + WL_DBG(("P2P: GO_NEG_PHASE status cleared ")); + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + if (wl_cfgp2p_vif_created(cfg)) { + if (wl_get_drv_status(cfg, SCANNING, dev)) { + wl_notify_escan_complete(cfg, dev, true, true); + } + wldev_iovar_setint(dev, "mpc", 1); + /* Delete pm_enable_work */ + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); + + /* for GC */ + if (wl_get_drv_status(cfg, DISCONNECTING, dev) && + (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)) { + WL_ERR(("Wait for Link Down event for GC !\n")); + wait_for_completion_timeout + (&cfg->iface_disable, msecs_to_jiffies(500)); + } + + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + wl_set_p2p_status(cfg, IF_DELETING); + DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg)); + + /* for GO */ + if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) { + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false); + cfg->p2p->p2p_go_count--; + /* disable interface before bsscfg free */ + ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac); + /* if fw doesn't support "ifdis", + do not wait for link down of ap mode + */ + if (ret == 0) { + WL_ERR(("Wait for Link Down event for GO !!!\n")); + wait_for_completion_timeout(&cfg->iface_disable, + msecs_to_jiffies(500)); + } else if (ret != BCME_UNSUPPORTED) { + msleep(300); + } + } + wl_cfg80211_clear_per_bss_ies(cfg, index); + + if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP) + wldev_iovar_setint(dev, "buf_key_b4_m4", 0); + memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, + ETHER_ADDR_LEN); + CFGP2P_INFO(("primary idx %d : cfg p2p_ifdis "MACDBG"\n", + dev->ifindex, MAC2STRDBG(p2p_mac.octet))); + + /* delete interface after link down */ + ret = wl_cfgp2p_ifdel(cfg, &p2p_mac); + if (ret != BCME_OK) { + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n", + ret, ndev->name)); + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; + net_os_send_hang_message(ndev); + } else { + /* Wait for IF_DEL operation to be finished */ + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + ((wl_get_p2p_status(cfg, IF_DELETING) == false) && + (cfg->if_event_info.valid)), + msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) && + cfg->if_event_info.valid) { + + WL_DBG(("IFDEL operation done\n")); + wl_cfg80211_handle_ifdel(cfg, &cfg->if_event_info, dev); + } else { + WL_ERR(("IFDEL didn't complete properly\n")); + } + } + + ret = dhd_del_monitor(dev); + if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) { + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL((dhd_pub_t *)(cfg->pub)); + } + } + } + return ret; +} + +static s32 +wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + enum nl80211_iftype type, +#else + enum nl80211_iftype type, u32 *flags, +#endif + struct vif_params *params) +{ + s32 ap = 0; + s32 infra = 0; + s32 ibss = 0; + s32 wlif_type; + s32 mode = 0; + s32 err = BCME_OK; + s32 index; + s32 conn_idx = -1; + chanspec_t chspec; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_DBG(("Enter type %d\n", type)); + switch (type) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + ap = 1; + WL_ERR(("type (%d) : currently we do not support this type\n", + type)); + break; + case NL80211_IFTYPE_ADHOC: + mode = WL_MODE_IBSS; + ibss = 1; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + mode = WL_MODE_BSS; + infra = 1; + break; + case NL80211_IFTYPE_AP: + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; + /* intentional fall through */ + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_P2P_GO: + mode = WL_MODE_AP; + ap = 1; + break; + default: + return -EINVAL; + } + if (!dhd) + return -EINVAL; + + /* If any scan is going on, abort it */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + int wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + WL_ERR(("Scan in progress. Aborting the scan!\n")); + wl_cfg80211_scan_abort(cfg); + while (wl_get_drv_status_all(cfg, SCANNING) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (wl_get_drv_status_all(cfg, SCANNING)) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + } + + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return -EINVAL; + } + if (ap) { + wl_set_mode_by_netdev(cfg, ndev, mode); + if (is_p2p_group_iface(ndev->ieee80211_ptr) && + cfg->p2p && wl_cfgp2p_vif_created(cfg)) { + WL_DBG(("p2p_vif_created p2p_on (%d)\n", p2p_on(cfg))); + wldev_iovar_setint(ndev, "mpc", 0); + wl_notify_escan_complete(cfg, ndev, true, true); + + /* Dual p2p doesn't support multiple P2PGO interfaces, + * p2p_go_count is the counter for GO creation + * requests. + */ + if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) { + wl_set_mode_by_netdev(cfg, ndev, WL_MODE_BSS); + WL_ERR(("Fw doesnot support multiple GO ")); + return BCME_ERROR; + } + /* In concurrency case, STA may be already associated in a particular + * channel. so retrieve the current channel of primary interface and + * then start the virtual interface on that. + */ + chspec = wl_cfg80211_get_shared_freq(wiphy); + index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (index < 0) { + WL_ERR(("Find p2p index from ndev(%p) failed\n", ndev)); + return BCME_ERROR; + } + if (wl_cfgp2p_find_type(cfg, index, &conn_idx) != BCME_OK) + return BCME_ERROR; + + wlif_type = WL_P2P_IF_GO; + WL_DBG(("%s : ap (%d), infra (%d), iftype (%d) conn_idx (%d)\n", + ndev->name, ap, infra, type, conn_idx)); + wl_set_p2p_status(cfg, IF_CHANGING); + wl_clr_p2p_status(cfg, IF_CHANGED); + wl_cfgp2p_ifchange(cfg, wl_to_p2p_bss_macaddr(cfg, conn_idx), + htod32(wlif_type), chspec, conn_idx); + wait_event_interruptible_timeout(cfg->netif_change_event, + (wl_get_p2p_status(cfg, IF_CHANGED) == true), + msecs_to_jiffies(MAX_WAIT_TIME)); + wl_set_mode_by_netdev(cfg, ndev, mode); + dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE; + dhd->op_mode |= DHD_FLAG_P2P_GO_MODE; + wl_clr_p2p_status(cfg, IF_CHANGING); + wl_clr_p2p_status(cfg, IF_CHANGED); + if (mode == WL_MODE_AP) + wl_set_drv_status(cfg, CONNECTED, ndev); +#ifdef SUPPORT_AP_POWERSAVE + dhd_set_ap_powersave(dhd, 0, TRUE); +#endif /* SUPPORT_AP_POWERSAVE */ + } else if (((ndev == primary_ndev) || + (ndev == ((struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev)))) && + !wl_get_drv_status(cfg, AP_CREATED, ndev)) { + wl_set_drv_status(cfg, AP_CREATING, ndev); + } else { + WL_ERR(("Cannot change the interface for GO or SOFTAP\n")); + return -EINVAL; + } + } else { + /* P2P GO interface deletion is handled on the basis of role type (AP). + * So avoid changing role for p2p type. + */ + if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) + wl_set_mode_by_netdev(cfg, ndev, mode); + WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA")); +#ifdef SUPPORT_AP_POWERSAVE + dhd_set_ap_powersave(dhd, 0, FALSE); +#endif /* SUPPORT_AP_POWERSAVE */ + } + + if (ibss) { + infra = 0; + wl_set_mode_by_netdev(cfg, ndev, mode); + err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET Adhoc error %d\n", err)); + return -EINVAL; + } + } + + ndev->ieee80211_ptr->iftype = type; + return 0; +} + +s32 +wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx) +{ + bool ifadd_expected = FALSE; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + /* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd") + * redirect the IF_ADD event to ifchange as it is not a real "new" interface + */ + if (wl_get_p2p_status(cfg, IF_CHANGING)) + return wl_cfg80211_notify_ifchange(ifidx, name, mac, bssidx); + + /* Okay, we are expecting IF_ADD (as IF_ADDING is true) */ + if (wl_get_p2p_status(cfg, IF_ADDING)) { + ifadd_expected = TRUE; + wl_clr_p2p_status(cfg, IF_ADDING); + } else if (cfg->bss_pending_op) { + ifadd_expected = TRUE; + cfg->bss_pending_op = FALSE; + } + + if (ifadd_expected) { + wl_if_event_info *if_event_info = &cfg->if_event_info; + + if_event_info->valid = TRUE; + if_event_info->ifidx = ifidx; + if_event_info->bssidx = bssidx; + strncpy(if_event_info->name, name, IFNAMSIZ); + if_event_info->name[IFNAMSIZ] = '\0'; + if (mac) + memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN); + wake_up_interruptible(&cfg->netif_change_event); + return BCME_OK; + } + + return BCME_ERROR; +} + +s32 +wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx) +{ + bool ifdel_expected = FALSE; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_if_event_info *if_event_info = &cfg->if_event_info; + + if (wl_get_p2p_status(cfg, IF_DELETING)) { + ifdel_expected = TRUE; + wl_clr_p2p_status(cfg, IF_DELETING); + } else if (cfg->bss_pending_op) { + ifdel_expected = TRUE; + cfg->bss_pending_op = FALSE; + } + + if (ifdel_expected) { + if_event_info->valid = TRUE; + if_event_info->ifidx = ifidx; + if_event_info->bssidx = bssidx; + wake_up_interruptible(&cfg->netif_change_event); + return BCME_OK; + } + + return BCME_ERROR; +} + +s32 +wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (wl_get_p2p_status(cfg, IF_CHANGING)) { + wl_set_p2p_status(cfg, IF_CHANGED); + wake_up_interruptible(&cfg->netif_change_event); + return BCME_OK; + } + + return BCME_ERROR; +} + +static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info, + struct net_device* ndev) +{ + s32 type = -1; + s32 bssidx = -1; +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + bool enabled; +#endif +#endif /* PROP_TXSTATUS_VSDB */ + + bssidx = if_event_info->bssidx; + if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) && + bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2)) { + WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx)); + return BCME_ERROR; + } + + if (p2p_is_on(cfg) && wl_cfgp2p_vif_created(cfg)) { + if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) { + /* Abort any pending scan requests */ + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + WL_DBG(("ESCAN COMPLETED\n")); + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, false); + } + + memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ); + if (wl_cfgp2p_find_type(cfg, bssidx, &type) == BCME_OK) { + /* Update P2P data */ + wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type)); + wl_to_p2p_bss_ndev(cfg, type) = NULL; + wl_to_p2p_bss_bssidx(cfg, type) = -1; + } else if (wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr) < 0) { + WL_ERR(("bssidx not known for the given ndev as per net_info data \n")); + return BCME_ERROR; + } + +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd_wlfc_get_enable(dhd, &enabled); + if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd_wlfc_deinit(dhd); + cfg->wlfc_on = false; + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + } + + dhd_net_if_lock(ndev); + wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev); + dhd_net_if_unlock(ndev); + + return BCME_OK; +} + +/* Find listen channel */ +static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg, + const u8 *ie, u32 ie_len) +{ + wifi_p2p_ie_t *p2p_ie; + u8 *end, *pos; + s32 listen_channel; + +/* unfortunately const cast required here - function is + * a callback so its signature must not be changed + * and cascade of changing wl_cfgp2p_find_p2pie + * causes need for const cast in other places + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + pos = (u8 *)ie; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len); + + if (p2p_ie == NULL) + return 0; + + pos = p2p_ie->subelts; + end = p2p_ie->subelts + (p2p_ie->len - 4); + + CFGP2P_DBG((" found p2p ie ! lenth %d \n", + p2p_ie->len)); + + while (pos < end) { + uint16 attr_len; + if (pos + 2 >= end) { + CFGP2P_DBG((" -- Invalid P2P attribute")); + return 0; + } + attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0])); + + if (pos + 3 + attr_len > end) { + CFGP2P_DBG(("P2P: Attribute underflow " + "(len=%u left=%d)", + attr_len, (int) (end - pos - 3))); + return 0; + } + + /* if Listen Channel att id is 6 and the vailue is valid, + * return the listen channel + */ + if (pos[0] == 6) { + /* listen channel subel length format + * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num) + */ + listen_channel = pos[1 + 2 + 3 + 1]; + + if (listen_channel == SOCIAL_CHAN_1 || + listen_channel == SOCIAL_CHAN_2 || + listen_channel == SOCIAL_CHAN_3) { + CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel)); + return listen_channel; + } + } + pos += 3 + attr_len; + } + return 0; +} + +static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request) +{ + u32 n_ssids; + u32 n_channels; + u16 channel; + chanspec_t chanspec; + s32 i = 0, j = 0, offset; + char *ptr; + wlc_ssid_t ssid; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wireless_dev *wdev; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = 0; + params->nprobes = -1; + params->active_time = -1; + params->passive_time = -1; + params->home_time = -1; + params->channel_num = 0; + memset(¶ms->ssid, 0, sizeof(wlc_ssid_t)); + + WL_SCAN(("Preparing Scan request\n")); + WL_SCAN(("nprobes=%d\n", params->nprobes)); + WL_SCAN(("active_time=%d\n", params->active_time)); + WL_SCAN(("passive_time=%d\n", params->passive_time)); + WL_SCAN(("home_time=%d\n", params->home_time)); + WL_SCAN(("scan_type=%d\n", params->scan_type)); + + params->nprobes = htod32(params->nprobes); + params->active_time = htod32(params->active_time); + params->passive_time = htod32(params->passive_time); + params->home_time = htod32(params->home_time); + + /* if request is null just exit so it will be all channel broadcast scan */ + if (!request) + return; + + n_ssids = request->n_ssids; + n_channels = request->n_channels; + + /* Copy channel array if applicable */ + WL_SCAN(("### List of channelspecs to scan ###\n")); + if (n_channels > 0) { + for (i = 0; i < n_channels; i++) { + chanspec = 0; + channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq); + /* SKIP DFS channels for Secondary interface */ + if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) && + (request->channels[i]->flags & +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN))) +#else + (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */ + continue; + +#if defined(WL_CFG80211_P2P_DEV_IF) + wdev = request->wdev; +#else + wdev = request->dev->ieee80211_ptr; +#endif /* WL_CFG80211_P2P_DEV_IF */ + chanspec = wl_cfg80211_ulb_get_min_bw_chspec(wdev, -1); + if (chanspec == INVCHANSPEC) { + WL_ERR(("Invalid chanspec! Skipping channel\n")); + continue; + } + + if (request->channels[i]->band == NL80211_BAND_2GHZ ) { + chanspec |= WL_CHANSPEC_BAND_2G; + } else { + chanspec |= WL_CHANSPEC_BAND_5G; + } + params->channel_list[j] = channel; + params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK; + params->channel_list[j] |= chanspec; + WL_SCAN(("Chan : %d, Channel spec: %x \n", + channel, params->channel_list[j])); + params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]); + j++; + } + } else { + WL_SCAN(("Scanning all channels\n")); + } + n_channels = j; + /* Copy ssid array if applicable */ + WL_SCAN(("### List of SSIDs to scan ###\n")); + if (n_ssids > 0) { + offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16); + offset = roundup(offset, sizeof(u32)); + ptr = (char*)params + offset; + for (i = 0; i < n_ssids; i++) { + memset(&ssid, 0, sizeof(wlc_ssid_t)); + ssid.SSID_len = request->ssids[i].ssid_len; + memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len); + if (!ssid.SSID_len) + WL_SCAN(("%d: Broadcast scan\n", i)); + else + WL_SCAN(("%d: scan for %s size =%d\n", i, + ssid.SSID, ssid.SSID_len)); + memcpy(ptr, &ssid, sizeof(wlc_ssid_t)); + ptr += sizeof(wlc_ssid_t); + } + } else { + WL_SCAN(("Broadcast scan\n")); + } + /* Adding mask to channel numbers */ + params->channel_num = + htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) | + (n_channels & WL_SCAN_PARAMS_COUNT_MASK)); + + if (n_channels == 1) { + params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS); + params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS); + } +} + +static s32 +wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size) +{ + wl_uint32_list_t *list; + s32 err = BCME_OK; + if (valid_chan_list == NULL || size <= 0) + return -ENOMEM; + + memset(valid_chan_list, 0, size); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); + err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false); + if (err != 0) { + WL_ERR(("get channels failed with %d\n", err)); + } + + return err; +} + +#if defined(USE_INITIAL_SHORT_DWELL_TIME) +#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40 +bool g_first_broadcast_scan = TRUE; +#endif + +static s32 +wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, + struct cfg80211_scan_request *request, uint16 action) +{ + s32 err = BCME_OK; + u32 n_channels; + u32 n_ssids; + s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params)); + wl_escan_params_t *params = NULL; + u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)]; + u32 num_chans = 0; + s32 channel; + u32 n_valid_chan; + s32 search_state = WL_P2P_DISC_ST_SCAN; + u32 i, j, n_nodfs = 0; + u16 *default_chan_list = NULL; + wl_uint32_list_t *list; + s32 bssidx = -1; + struct net_device *dev = NULL; +#if defined(USE_INITIAL_SHORT_DWELL_TIME) + bool is_first_init_2g_scan = false; +#endif + p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN; + + WL_DBG(("Enter \n")); + + /* scan request can come with empty request : perform all default scan */ + if (!cfg) { + err = -EINVAL; + goto exit; + } + if (!cfg->p2p_supported || !p2p_scan(cfg)) { + /* LEGACY SCAN TRIGGER */ + WL_ERR((" LEGACY E-SCAN START\n")); + +#if defined(USE_INITIAL_SHORT_DWELL_TIME) + if (!request) { + err = -EINVAL; + goto exit; + } + if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) { + is_first_init_2g_scan = true; + g_first_broadcast_scan = false; + } +#endif + + /* if scan request is not empty parse scan request paramters */ + if (request != NULL) { + n_channels = request->n_channels; + n_ssids = request->n_ssids; + if (n_channels % 2) + /* If n_channels is odd, add a padd of u16 */ + params_size += sizeof(u16) * (n_channels + 1); + else + params_size += sizeof(u16) * n_channels; + + /* Allocate space for populating ssids in wl_escan_params_t struct */ + params_size += sizeof(struct wlc_ssid) * n_ssids; + } + params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL); + if (params == NULL) { + err = -ENOMEM; + goto exit; + } + wl_scan_prep(¶ms->params, request); + +#if defined(USE_INITIAL_SHORT_DWELL_TIME) + /* Override active_time to reduce scan time if it's first bradcast scan. */ + if (is_first_init_2g_scan) + params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS; +#endif + + params->version = htod32(ESCAN_REQ_VERSION); + params->action = htod16(action); + wl_escan_set_sync_id(params->sync_id, cfg); + wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY); + if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) { + WL_ERR(("ioctl buffer length not sufficient\n")); + kfree(params); + err = -ENOMEM; + goto exit; + } + if (cfg->active_scan == PASSIVE_SCAN) { + params->params.scan_type = DOT11_SCANTYPE_PASSIVE; + WL_DBG(("Passive scan_type %d \n", params->params.scan_type)); + } + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + + err = wldev_iovar_setbuf(ndev, "escan", params, params_size, + cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + WL_ERR(("LEGACY_SCAN sync ID: %d, bssidx: %d\n", params->sync_id, bssidx)); + if (unlikely(err)) { + if (err == BCME_EPERM) + /* Scan Not permitted at this point of time */ + WL_DBG((" Escan not permitted at this time (%d)\n", err)); + else + WL_ERR((" Escan set error (%d)\n", err)); + } + kfree(params); + } + else if (p2p_is_on(cfg) && p2p_scan(cfg)) { + /* P2P SCAN TRIGGER */ + s32 _freq = 0; + n_nodfs = 0; + if (request && request->n_channels) { + num_chans = request->n_channels; + WL_ERR((" chann number : %d\n", num_chans)); + default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list), + GFP_KERNEL); + if (default_chan_list == NULL) { + WL_ERR(("channel list allocation failed \n")); + err = -ENOMEM; + goto exit; + } + if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) { +#ifdef P2P_SKIP_DFS + int is_printed = false; +#endif /* P2P_SKIP_DFS */ + list = (wl_uint32_list_t *) chan_buf; + n_valid_chan = dtoh32(list->count); + for (i = 0; i < num_chans; i++) + { + _freq = request->channels[i]->center_freq; + channel = ieee80211_frequency_to_channel(_freq); + + /* ignore DFS channels */ + if (request->channels[i]->flags & +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + (IEEE80211_CHAN_NO_IR + | IEEE80211_CHAN_RADAR)) +#else + (IEEE80211_CHAN_RADAR + | IEEE80211_CHAN_PASSIVE_SCAN)) +#endif + continue; +#ifdef P2P_SKIP_DFS + if (channel >= 52 && channel <= 144) { + if (is_printed == false) { + WL_ERR(("SKIP DFS CHANs(52~144)\n")); + is_printed = true; + } + continue; + } +#endif /* P2P_SKIP_DFS */ + + for (j = 0; j < n_valid_chan; j++) { + /* allows only supported channel on + * current reguatory + */ + if (channel == (dtoh32(list->element[j]))) + default_chan_list[n_nodfs++] = + channel; + } + + } + } + if (num_chans == SOCIAL_CHAN_CNT && ( + (default_chan_list[0] == SOCIAL_CHAN_1) && + (default_chan_list[1] == SOCIAL_CHAN_2) && + (default_chan_list[2] == SOCIAL_CHAN_3))) { + /* SOCIAL CHANNELS 1, 6, 11 */ + search_state = WL_P2P_DISC_ST_SEARCH; + p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL; + WL_INFORM(("P2P SEARCH PHASE START \n")); + } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) && + (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) || + ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) && + (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) { + /* If you are already a GO, then do SEARCH only */ + WL_INFORM(("Already a GO. Do SEARCH Only")); + search_state = WL_P2P_DISC_ST_SEARCH; + num_chans = n_nodfs; + p2p_scan_purpose = P2P_SCAN_NORMAL; + + } else if (num_chans == 1) { + p2p_scan_purpose = P2P_SCAN_CONNECT_TRY; + } else if (num_chans == SOCIAL_CHAN_CNT + 1) { + /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by + * the supplicant + */ + p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL; + } else { + WL_INFORM(("P2P SCAN STATE START \n")); + num_chans = n_nodfs; + p2p_scan_purpose = P2P_SCAN_NORMAL; + } + } else { + err = -EINVAL; + goto exit; + } + err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list, + search_state, action, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL, + p2p_scan_purpose); + + if (!err) + cfg->p2p->search_state = search_state; + + kfree(default_chan_list); + } +exit: + if (unlikely(err)) { + /* Don't print Error incase of Scan suppress */ + if ((err == BCME_EPERM) && cfg->scan_suppressed) + WL_DBG(("Escan failed: Scan Suppressed \n")); + else + WL_ERR(("error (%d)\n", err)); + } + return err; +} + + +static s32 +wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request) +{ + s32 err = BCME_OK; + s32 passive_scan; + s32 passive_scan_time; + s32 passive_scan_time_org; + wl_scan_results_t *results; + WL_SCAN(("Enter \n")); + + results = wl_escan_get_buf(cfg, FALSE); + results->version = 0; + results->count = 0; + results->buflen = WL_SCAN_RESULTS_FIXED_SIZE; + + cfg->escan_info.ndev = ndev; + cfg->escan_info.wiphy = wiphy; + cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING; + passive_scan = cfg->active_scan ? 0 : 1; + err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan), true); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + goto exit; + } + + if (passive_channel_skip) { + + err = wldev_ioctl(ndev, WLC_GET_SCAN_PASSIVE_TIME, + &passive_scan_time_org, sizeof(passive_scan_time_org), false); + if (unlikely(err)) { + WL_ERR(("== error (%d)\n", err)); + goto exit; + } + + WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org)); + + passive_scan_time = 0; + err = wldev_ioctl(ndev, WLC_SET_SCAN_PASSIVE_TIME, + &passive_scan_time, sizeof(passive_scan_time), true); + if (unlikely(err)) { + WL_ERR(("== error (%d)\n", err)); + goto exit; + } + + WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n", + passive_channel_skip)); + } + + err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START); + + if (passive_channel_skip) { + err = wldev_ioctl(ndev, WLC_SET_SCAN_PASSIVE_TIME, + &passive_scan_time_org, sizeof(passive_scan_time_org), true); + if (unlikely(err)) { + WL_ERR(("== error (%d)\n", err)); + goto exit; + } + + WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n", + passive_scan_time_org)); + } + +exit: + return err; +} + +static s32 +__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct cfg80211_ssid *ssids; + struct ether_addr primary_mac; + bool p2p_ssid; +#ifdef WL11U + bcm_tlv_t *interworking_ie; +#endif + s32 err = 0; + s32 bssidx = -1; + s32 i; + + unsigned long flags; + static s32 busy_count = 0; +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + struct net_device *remain_on_channel_ndev = NULL; +#endif + + /* + * Hostapd triggers scan before starting automatic channel selection + * to collect channel characteristics. However firmware scan engine + * doesn't support any channel characteristics collection along with + * scan. Hence return scan success. + */ + if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) { + WL_INFORM(("Scan Command on SoftAP Interface. Ignoring...\n")); + return 0; + } + + ndev = ndev_to_wlc_ndev(ndev, cfg); + + if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) { + WL_ERR(("Sending Action Frames. Try it again.\n")); + return -EAGAIN; + } + + WL_DBG(("Enter wiphy (%p)\n", wiphy)); + if (wl_get_drv_status_all(cfg, SCANNING)) { + if (cfg->scan_request == NULL) { + wl_clr_drv_status_all(cfg, SCANNING); + WL_DBG(("<<<<<<<<<<>>>>>>>>>>\n")); + } else { + WL_ERR(("Scanning already\n")); + return -EAGAIN; + } + } + if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) { + WL_ERR(("Scanning being aborted\n")); + return -EAGAIN; + } + if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) { + WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n")); + return -EOPNOTSUPP; + } + +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("P2P_FIND: Discovery offload is in progress\n")); + return -EAGAIN; + } +#endif /* P2P_LISTEN_OFFLOADING */ + +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg); + if (remain_on_channel_ndev) { + WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n")); + wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true); + } +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + + + /* Arm scan timeout timer */ + mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS)); + if (request) { /* scan bss */ + ssids = request->ssids; + p2p_ssid = false; + for (i = 0; i < request->n_ssids; i++) { + if (ssids[i].ssid_len && + IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) { + p2p_ssid = true; + break; + } + } + if (p2p_ssid) { + if (cfg->p2p_supported) { + /* p2p scan trigger */ + if (p2p_on(cfg) == false) { + /* p2p on at the first time */ + p2p_on(cfg) = true; + wl_cfgp2p_set_firm_p2p(cfg); + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti = false; +#endif + } + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + p2p_scan(cfg) = true; + } + } else { + /* legacy scan trigger + * So, we have to disable p2p discovery if p2p discovery is on + */ + if (cfg->p2p_supported) { + p2p_scan(cfg) = false; + /* If Netdevice is not equals to primary and p2p is on + * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE. + */ + + if (p2p_scan(cfg) == false) { + if (wl_get_p2p_status(cfg, DISCOVERY_ON)) { + err = wl_cfgp2p_discover_enable_search(cfg, + false); + if (unlikely(err)) { + goto scan_out; + } + + } + } + } + if (!cfg->p2p_supported || !p2p_scan(cfg)) { + if ((bssidx = wl_get_bssidx_by_wdev(cfg, + ndev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from ndev(%p) failed\n", + ndev)); + err = BCME_ERROR; + goto scan_out; + } +#ifdef WL11U + if ((interworking_ie = wl_cfg80211_find_interworking_ie( + (u8 *)request->ie, request->ie_len)) != NULL) { + err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx, + VNDR_IE_CUSTOM_FLAG, interworking_ie->id, + interworking_ie->data, interworking_ie->len); + + if (unlikely(err)) { + WL_ERR(("Failed to add interworking IE")); + } + } else if (cfg->iw_ie_len != 0) { + /* we have to clear IW IE and disable gratuitous APR */ + wl_cfg80211_add_iw_ie(cfg, ndev, bssidx, + VNDR_IE_CUSTOM_FLAG, + DOT11_MNG_INTERWORKING_ID, + 0, 0); + + (void)wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0, + bssidx); + cfg->wl11u = FALSE; + cfg->iw_ie_len = 0; + memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN); + /* we don't care about error */ + } +#endif /* WL11U */ + err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(ndev), + bssidx, VNDR_IE_PRBREQ_FLAG, request->ie, + request->ie_len); + + if (unlikely(err)) { + goto scan_out; + } + + } + } + } else { /* scan in ibss */ + ssids = this_ssid; + } + + if (request && cfg->p2p_supported && !p2p_scan(cfg)) { + WL_TRACE_HW4(("START SCAN\n")); + DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub), + SCAN_WAKE_LOCK_TIMEOUT); + DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub)); + } + + if (cfg->p2p_supported) { + if (p2p_on(cfg) && p2p_scan(cfg)) { + + /* find my listen channel */ + cfg->afx_hdl->my_listen_chan = + wl_find_listen_channel(cfg, request->ie, + request->ie_len); + err = wl_cfgp2p_enable_discovery(cfg, ndev, + request->ie, request->ie_len); + + if (unlikely(err)) { + goto scan_out; + } + } + } + err = wl_do_escan(cfg, wiphy, ndev, request); + if (likely(!err)) + goto scan_success; + else + goto scan_out; + +scan_success: + busy_count = 0; + cfg->scan_request = request; + wl_set_drv_status(cfg, SCANNING, ndev); + + return 0; + +scan_out: + if (err == BCME_BUSY || err == BCME_NOTREADY) { + WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY)); + err = -EBUSY; + } else if ((err == BCME_EPERM) && cfg->scan_suppressed) { + WL_ERR(("Scan not permitted due to scan suppress\n")); + err = -EPERM; + } else { + /* For all other fw errors, use a generic error code as return + * value to cfg80211 stack + */ + err = -EAGAIN; + } + +#define SCAN_EBUSY_RETRY_LIMIT 20 + if (err == -EBUSY) { + if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) { + struct ether_addr bssid; + s32 ret = 0; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + busy_count = 0; + WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n", + wl_get_drv_status(cfg, SCANNING, ndev), + wl_get_drv_status(cfg, SCAN_ABORTING, ndev), + wl_get_drv_status(cfg, CONNECTING, ndev), + wl_get_drv_status(cfg, CONNECTED, ndev), + wl_get_drv_status(cfg, DISCONNECTING, ndev), + wl_get_drv_status(cfg, AP_CREATING, ndev), + wl_get_drv_status(cfg, AP_CREATED, ndev), + wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev), + wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev))); + +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + + bzero(&bssid, sizeof(bssid)); + if ((ret = wldev_ioctl(ndev, WLC_GET_BSSID, + &bssid, ETHER_ADDR_LEN, false)) == 0) + WL_ERR(("FW is connected with " MACDBG "/n", + MAC2STRDBG(bssid.octet))); + else + WL_ERR(("GET BSSID failed with %d\n", ret)); + + wl_cfg80211_scan_abort(cfg); + + } else { + /* Hold the context for 400msec, so that 10 subsequent scans + * can give a buffer of 4sec which is enough to + * cover any on-going scan in the firmware + */ + WL_DBG(("Enforcing delay for EBUSY case \n")); + msleep(500); + } + } else { + busy_count = 0; + } + + wl_clr_drv_status(cfg, SCANNING, ndev); + if (timer_pending(&cfg->scan_timeout)) + del_timer_sync(&cfg->scan_timeout); + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + cfg->scan_request = NULL; + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + + return err; +} + +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) +#else +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request) +#endif /* WL_CFG80211_P2P_DEV_IF */ +{ + s32 err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); +#if defined(WL_CFG80211_P2P_DEV_IF) + struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + WL_DBG(("Enter\n")); + RETURN_EIO_IF_NOT_UP(cfg); + + if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + if (wl_cfg_multip2p_operational(cfg)) { + WL_ERR(("wlan0 scan failed, p2p devices are operational")); + return -ENODEV; + } + } + + mutex_lock(&cfg->usr_sync); + err = __wl_cfg80211_scan(wiphy, ndev, request, NULL); + if (unlikely(err)) { + WL_ERR(("scan error (%d)\n", err)); + } + mutex_unlock(&cfg->usr_sync); + + return err; +} + +static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold) +{ + s32 err = 0; + + err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + return err; +} + +static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold) +{ + s32 err = 0; + + err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + return err; +} + +static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l) +{ + s32 err = 0; + u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL); + + retry = htod32(retry); + err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), true); + if (unlikely(err)) { + WL_ERR(("cmd (%d) , error (%d)\n", cmd, err)); + return err; + } + return err; +} + +static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = 0; + + RETURN_EIO_IF_NOT_UP(cfg); + WL_DBG(("Enter\n")); + if (changed & WIPHY_PARAM_RTS_THRESHOLD && + (cfg->conf->rts_threshold != wiphy->rts_threshold)) { + cfg->conf->rts_threshold = wiphy->rts_threshold; + err = wl_set_rts(ndev, cfg->conf->rts_threshold); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_FRAG_THRESHOLD && + (cfg->conf->frag_threshold != wiphy->frag_threshold)) { + cfg->conf->frag_threshold = wiphy->frag_threshold; + err = wl_set_frag(ndev, cfg->conf->frag_threshold); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_RETRY_LONG && + (cfg->conf->retry_long != wiphy->retry_long)) { + cfg->conf->retry_long = wiphy->retry_long; + err = wl_set_retry(ndev, cfg->conf->retry_long, true); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_RETRY_SHORT && + (cfg->conf->retry_short != wiphy->retry_short)) { + cfg->conf->retry_short = wiphy->retry_short; + err = wl_set_retry(ndev, cfg->conf->retry_short, false); + if (!err) { + return err; + } + } + + return err; +} +static chanspec_t +channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + u8 *buf = NULL; + wl_uint32_list_t *list; + int err = BCME_OK; + chanspec_t c = 0, ret_c = 0; + int bw = 0, tmp_bw = 0; + int i; + u32 tmp_c; + u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; +#define LOCAL_BUF_SIZE 1024 + buf = (u8 *) kzalloc(LOCAL_BUF_SIZE, kflags); + if (!buf) { + WL_ERR(("buf memory alloc failed\n")); + goto exit; + } + list = (wl_uint32_list_t *)(void *)buf; + list->count = htod32(WL_NUMCHANSPECS); + err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL, + 0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync); + if (err != BCME_OK) { + WL_ERR(("get chanspecs failed with %d\n", err)); + goto exit; + } + for (i = 0; i < dtoh32(list->count); i++) { + c = dtoh32(list->element[i]); + if (channel <= CH_MAX_2G_CHANNEL) { + if (!CHSPEC_IS20(c)) + continue; + if (channel == CHSPEC_CHANNEL(c)) { + ret_c = c; + bw = 20; + goto exit; + } + } + tmp_c = wf_chspec_ctlchan(c); + tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT]; + if (tmp_c != channel) + continue; + + if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) { + bw = tmp_bw; + ret_c = c; + if (bw == bw_cap) + goto exit; + } + } +exit: + if (buf) + kfree(buf); +#undef LOCAL_BUF_SIZE + WL_INFORM(("return chanspec %x %d\n", ret_c, bw)); + return ret_c; +} + +void +wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (cfg != NULL && ibss_vsie != NULL) { + if (cfg->ibss_vsie != NULL) { + kfree(cfg->ibss_vsie); + } + cfg->ibss_vsie = ibss_vsie; + cfg->ibss_vsie_len = ibss_vsie_len; + } +} + +static void +wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg) +{ + /* free & initiralize VSIE (Vendor Specific IE) */ + if (cfg->ibss_vsie != NULL) { + kfree(cfg->ibss_vsie); + cfg->ibss_vsie = NULL; + cfg->ibss_vsie_len = 0; + } +} + +s32 +wl_cfg80211_ibss_vsie_delete(struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + char *ioctl_buf = NULL; + s32 ret = BCME_OK; + + if (cfg != NULL && cfg->ibss_vsie != NULL) { + ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL); + if (!ioctl_buf) { + WL_ERR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* change the command from "add" to "del" */ + strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1); + cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + ret = wldev_iovar_setbuf(dev, "ie", + cfg->ibss_vsie, cfg->ibss_vsie_len, + ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + WL_ERR(("ret=%d\n", ret)); + + if (ret == BCME_OK) { + /* free & initiralize VSIE */ + kfree(cfg->ibss_vsie); + cfg->ibss_vsie = NULL; + cfg->ibss_vsie_len = 0; + } + + if (ioctl_buf) { + kfree(ioctl_buf); + } + } + + return ret; +} + +#ifdef WLAIBSS_MCHAN +static bcm_struct_cfgdev* +bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct wireless_dev* wdev = NULL; + struct net_device *new_ndev = NULL; + struct net_device *primary_ndev = NULL; + s32 timeout; + wl_aibss_if_t aibss_if; + wl_if_event_info *event = NULL; + + if (cfg->ibss_cfgdev != NULL) { + WL_ERR(("IBSS interface %s already exists\n", name)); + return NULL; + } + + WL_ERR(("Try to create IBSS interface %s\n", name)); + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + /* generate a new MAC address for the IBSS interface */ + get_primary_mac(cfg, &cfg->ibss_if_addr); + cfg->ibss_if_addr.octet[4] ^= 0x40; + memset(&aibss_if, sizeof(aibss_if), 0); + memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr)); + aibss_if.chspec = 0; + aibss_if.len = sizeof(aibss_if); + + cfg->bss_pending_op = TRUE; + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if, + sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL); + if (err) { + WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err)); + goto fail; + } + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout <= 0 || cfg->bss_pending_op) + goto fail; + + event = &cfg->if_event_info; + /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control + * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux + * and will be freed by dhd_detach unless it gets unregistered before that. The + * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will + * be freed by wl_dealloc_netinfo + */ + new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name, + event->mac, event->bssidx, event->name); + if (new_ndev == NULL) + goto fail; + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (wdev == NULL) + goto fail; + wdev->wiphy = wiphy; + wdev->iftype = NL80211_IFTYPE_ADHOC; + wdev->netdev = new_ndev; + new_ndev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy)); + + /* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if + * needs to be modified to take one parameter (bool need_rtnl_lock) + */ + ASSERT_RTNL(); + if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) + goto fail; + + wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE, event->bssidx); + cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev); + WL_ERR(("IBSS interface %s created\n", new_ndev->name)); + return cfg->ibss_cfgdev; + +fail: + WL_ERR(("failed to create IBSS interface %s \n", name)); + cfg->bss_pending_op = FALSE; + if (new_ndev) + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + if (wdev) + kfree(wdev); + return NULL; +} + +static s32 +bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = NULL; + struct net_device *primary_ndev = NULL; + s32 timeout; + + if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet)) + return -EINVAL; + ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev); + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + cfg->bss_pending_op = TRUE; + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr, + sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL); + if (err) { + WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err)); + goto fail; + } + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout <= 0 || cfg->bss_pending_op) { + WL_ERR(("timeout in waiting IF_DEL event\n")); + goto fail; + } + + wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev); + cfg->ibss_cfgdev = NULL; + return 0; + +fail: + cfg->bss_pending_op = FALSE; + return -1; +} +#endif /* WLAIBSS_MCHAN */ + +s32 +wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr) +{ + wl_interface_create_t iface; + s32 ret; + wl_interface_info_t *info; + + bzero(&iface, sizeof(wl_interface_create_t)); + + iface.ver = WL_INTERFACE_CREATE_VER; + + if (iface_type == NL80211_IFTYPE_AP) + iface.flags = WL_INTERFACE_CREATE_AP; + else + iface.flags = WL_INTERFACE_CREATE_STA; + + if (del) { + ret = wldev_iovar_setbuf(ndev, "interface_remove", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + } else { + if (addr) { + memcpy(&iface.mac_addr.octet, addr, ETH_ALEN); + iface.flags |= WL_INTERFACE_MAC_USE; + } + ret = wldev_iovar_getbuf(ndev, "interface_create", + &iface, sizeof(wl_interface_create_t), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret == 0) { + /* success */ + info = (wl_interface_info_t *)cfg->ioctl_buf; + WL_DBG(("wl interface create success!! bssidx:%d \n", + info->bsscfgidx)); + ret = info->bsscfgidx; + } + } + + if (ret < 0) + WL_ERR(("Interface %s failed!! ret %d\n", + del ? "remove" : "create", ret)); + + return ret; +} + + +s32 +wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr) +{ + s32 ret = BCME_OK; + s32 val = 0; + + struct { + s32 cfg; + s32 val; + struct ether_addr ea; + } bss_setbuf; + + WL_INFORM(("iface_type:%d del:%d \n", iface_type, del)); + + bzero(&bss_setbuf, sizeof(bss_setbuf)); + + /* AP=3, STA=2, up=1, down=0, val=-1 */ + if (del) { + val = -1; + } else if (iface_type == NL80211_IFTYPE_AP) { + /* AP Interface */ + WL_DBG(("Adding AP Interface \n")); + val = 3; + } else if (iface_type == NL80211_IFTYPE_STATION) { + WL_DBG(("Adding STA Interface \n")); + val = 2; + } else { + WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", iface_type)); + return -EINVAL; + } + + bss_setbuf.cfg = htod32(bsscfg_idx); + bss_setbuf.val = htod32(val); + + if (addr) { + memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN); + } + + ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret != 0) + WL_ERR(("'bss %d' failed with %d\n", val, ret)); + + return ret; +} + +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) +/* Create a Generic Network Interface and initialize it depending up on + * the interface type + */ +bcm_struct_cfgdev* +wl_cfg80211_create_iface(struct wiphy *wiphy, + enum nl80211_iftype iface_type, + u8 *mac_addr, const char *name) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *new_ndev = NULL; + struct net_device *primary_ndev = NULL; + s32 ret = BCME_OK; + s32 bsscfg_idx = 0; + u32 timeout; + wl_if_event_info *event = NULL; + struct wireless_dev *wdev = NULL; + u8 addr[ETH_ALEN]; + + WL_DBG(("Enter\n")); + + if (!name) { + WL_ERR(("Interface name not provided\n")); + return NULL; + } + + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + +#ifdef DHD_IFDEBUG + WL_ERR(("cfg=%p, primary_ndev=%p, ifname=%s\n", cfg, primary_ndev, name)); +#endif + + /* If any scan is going on, abort it */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + int wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + WL_ERR(("Scan in progress. Aborting the scan!\n")); + wl_cfg80211_scan_abort(cfg); + while (wl_get_drv_status_all(cfg, SCANNING) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (!wait_cnt && wl_get_drv_status_all(cfg, SCANNING)) { + WL_ERR(("Failed to abort scan\n")); + return NULL; + } + } + + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + if (likely(!mac_addr)) { + /* Use primary MAC with the locally administered bit for the + * Secondary STA I/F + */ + memcpy(addr, primary_ndev->dev_addr, ETH_ALEN); + addr[0] |= 0x02; + } else { + /* Use the application provided mac address (if any) */ + memcpy(addr, mac_addr, ETH_ALEN); + } + + if ((iface_type != NL80211_IFTYPE_STATION) && (iface_type != NL80211_IFTYPE_AP)) { + WL_ERR(("IFACE type:%d not supported. STA " + "or AP IFACE is only supported\n", iface_type)); + return NULL; + } + + cfg->bss_pending_op = TRUE; + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + + /* De-initialize the p2p discovery interface, if operational */ + if (p2p_is_on(cfg)) { + WL_DBG(("Disabling P2P Discovery Interface \n")); +#ifdef WL_CFG80211_P2P_DEV_IF + ret = wl_cfg80211_scan_stop(bcmcfg_to_p2p_wdev(cfg)); +#else + ret = wl_cfg80211_scan_stop(cfg->p2p_net); +#endif + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret)); + } + +#ifdef DHD_IFDEBUG + WL_ERR(("call wl_cfgp2p_disable_discovery()\n")); +#endif + wl_cfgp2p_disable_discovery(cfg); + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0; + p2p_on(cfg) = false; + } + + /* + * Intialize the firmware I/F. + */ + ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx, + NL80211_IFTYPE_STATION, 0, addr); + if (ret == BCME_UNSUPPORTED) { + /* Use bssidx 1 by default */ + bsscfg_idx = 1; + if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev, + bsscfg_idx, iface_type, 0, addr)) < 0) { + return NULL; + } + } else if (ret < 0) { + WL_ERR(("Interface create failed!! ret:%d \n", ret)); + goto fail; + } else { + /* Success */ + bsscfg_idx = ret; + } + + WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx)); + + /* + * Wait till the firmware send a confirmation event back. + */ + WL_DBG(("Wait for the FW I/F Event\n")); + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout <= 0 || cfg->bss_pending_op) { + WL_ERR(("ADD_IF event, didn't come. Return \n")); + goto fail; + } + + /* + * Since FW operation is successful,we can go ahead with the + * the host interface creation. + */ + event = &cfg->if_event_info; + new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, + (char*)name, addr, event->bssidx, event->name); + if (!new_ndev) { + WL_ERR(("I/F allocation failed! \n")); + goto fail; + } else + WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n", + event->ifidx, event->bssidx)); + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (!wdev) { + WL_ERR(("wireless_dev alloc failed! \n")); + goto fail; + } + + wdev->wiphy = wiphy; + wdev->iftype = iface_type; + new_ndev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy)); + +#ifdef DHD_IFDEBUG + WL_ERR(("wdev=%p, new_ndev=%p\n", wdev, new_ndev)); +#endif + + /* RTNL lock must have been acquired. */ + ASSERT_RTNL(); + + /* Set the locally administed mac addr, if not applied already */ + if (memcmp(addr, event->mac, ETH_ALEN) != 0) { + ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr", + addr, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + event->bssidx, &cfg->ioctl_buf_sync); + if (unlikely(ret)) { + WL_ERR(("set cur_etheraddr Error (%d)\n", ret)); + goto fail; + } + memcpy(new_ndev->dev_addr, addr, ETH_ALEN); + } + + if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) { + WL_ERR(("IFACE register failed \n")); + goto fail; + } + + /* Initialize with the station mode params */ + wl_alloc_netinfo(cfg, new_ndev, wdev, + (iface_type == NL80211_IFTYPE_STATION) ? + WL_MODE_BSS : WL_MODE_AP, PM_ENABLE, event->bssidx); + cfg->bss_cfgdev = ndev_to_cfgdev(new_ndev); + cfg->cfgdev_bssidx = event->bssidx; + + WL_DBG(("Host Network Interface for Secondary I/F created")); + +#ifdef DHD_IFDEBUG + WL_ERR(("cfg->bss_cfgdev=%p\n", cfg->bss_cfgdev)); +#endif + + return cfg->bss_cfgdev; + +fail: + cfg->bss_pending_op = FALSE; + cfg->cfgdev_bssidx = -1; + if (wdev) + kfree(wdev); + if (new_ndev) + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + +#ifdef DHD_IFDEBUG + WL_ERR(("failed!!!\n")); +#endif + + return NULL; +} + +s32 +wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = NULL; + struct net_device *primary_ndev = NULL; + s32 ret = BCME_OK; + s32 bsscfg_idx = 1; + u32 timeout; + u32 ifidx; + enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION; + + WL_ERR(("Enter\n")); + + if (!cfg->bss_cfgdev) + return 0; + + /* If any scan is going on, abort it */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + WL_ERR(("Scan in progress. Aborting the scan!\n")); + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + + ndev = (struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev); + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + +#ifdef DHD_IFDEBUG + WL_ERR(("cfg->bss_cfgdev=%p, ndev=%p, primary_ndev=%p\n", + cfg->bss_cfgdev, ndev, primary_ndev)); +#endif + + cfg->bss_pending_op = TRUE; + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + + /* Delete the firmware interface. "interface_remove" command + * should go on the interface to be deleted + */ + ret = wl_cfg80211_interface_ops(cfg, ndev, cfg->cfgdev_bssidx, + NL80211_IFTYPE_STATION, 1, NULL); + if (ret == BCME_UNSUPPORTED) { + if ((ret = wl_cfg80211_add_del_bss(cfg, ndev, + bsscfg_idx, iface_type, true, NULL)) < 0) { + WL_ERR(("DEL bss failed ret:%d \n", ret)); + goto exit; + } + } else if (ret < 0) { + WL_ERR(("Interface DEL failed ret:%d \n", ret)); + goto exit; + } + + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout <= 0 || cfg->bss_pending_op) { + WL_ERR(("timeout in waiting IF_DEL event\n")); + } + +exit: + ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev); + wl_cfg80211_remove_if(cfg, ifidx, ndev); + cfg->bss_cfgdev = NULL; + cfg->cfgdev_bssidx = -1; + cfg->bss_pending_op = FALSE; + + WL_ERR(("IF_DEL Done.\n")); + + return ret; +} +#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */ + +static s32 +wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct cfg80211_bss *bss; + struct ieee80211_channel *chan; + struct wl_join_params join_params; + int scan_suppress; + struct cfg80211_ssid ssid; + s32 scan_retry = 0; + s32 err = 0; + size_t join_params_size; + chanspec_t chanspec = 0; + u32 param[2] = {0, 0}; + u32 bw_cap = 0; + + WL_TRACE(("In\n")); + RETURN_EIO_IF_NOT_UP(cfg); + WL_INFORM(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid))); + if (!params->ssid || params->ssid_len <= 0) { + WL_ERR(("Invalid parameter\n")); + return -EINVAL; + } +#if defined(WL_CFG80211_P2P_DEV_IF) + chan = params->chandef.chan; +#else + chan = params->channel; +#endif /* WL_CFG80211_P2P_DEV_IF */ + if (chan) + cfg->channel = ieee80211_frequency_to_channel(chan->center_freq); + if (wl_get_drv_status(cfg, CONNECTED, dev)) { + struct wlc_ssid *lssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID); + u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID); + u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN); + if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) && + (memcmp(params->ssid, lssid->SSID, lssid->SSID_len) == 0) && + (*channel == cfg->channel))) { + WL_ERR(("Connection already existed to " MACDBG "\n", + MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID)))); + return -EISCONN; + } + WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n", + lssid->SSID, MAC2STRDBG(bssid))); + } + + /* remove the VSIE */ + wl_cfg80211_ibss_vsie_delete(dev); + + bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len); + if (!bss) { + if (IBSS_INITIAL_SCAN_ALLOWED == TRUE) { + memcpy(ssid.ssid, params->ssid, params->ssid_len); + ssid.ssid_len = params->ssid_len; + do { + if (unlikely + (__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) == + -EBUSY)) { + wl_delay(150); + } else { + break; + } + } while (++scan_retry < WL_SCAN_RETRY_MAX); + + /* rtnl lock code is removed here. don't see why rtnl lock + * needs to be released. + */ + + /* wait 4 secons till scan done.... */ + schedule_timeout_interruptible(msecs_to_jiffies(4000)); + + bss = cfg80211_get_ibss(wiphy, NULL, + params->ssid, params->ssid_len); + } + } + if (bss && ((IBSS_COALESCE_ALLOWED == TRUE) || + ((IBSS_COALESCE_ALLOWED == FALSE) && params->bssid && + !memcmp(bss->bssid, params->bssid, ETHER_ADDR_LEN)))) { + cfg->ibss_starter = false; + WL_DBG(("Found IBSS\n")); + } else { + cfg->ibss_starter = true; + } + if (chan) { + if (chan->band == NL80211_BAND_5GHZ ) + param[0] = WLC_BAND_5G; + else if (chan->band == NL80211_BAND_2GHZ ) + param[0] = WLC_BAND_2G; + err = wldev_iovar_getint(dev, "bw_cap", param); + if (unlikely(err)) { + WL_ERR(("Get bw_cap Failed (%d)\n", err)); + return err; + } + bw_cap = param[0]; + chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap); + } + /* + * Join with specific BSSID and cached SSID + * If SSID is zero join based on BSSID only + */ + memset(&join_params, 0, sizeof(join_params)); + memcpy((void *)join_params.ssid.SSID, (void *)params->ssid, + params->ssid_len); + join_params.ssid.SSID_len = htod32(params->ssid_len); + if (params->bssid) { + memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN); + err = wldev_ioctl(dev, WLC_SET_DESIRED_BSSID, &join_params.params.bssid, + ETHER_ADDR_LEN, true); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + } else + memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN); + wldev_iovar_setint(dev, "ibss_coalesce_allowed", IBSS_COALESCE_ALLOWED); + + if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) { + scan_suppress = TRUE; + /* Set the SCAN SUPPRESS Flag in the firmware to skip join scan */ + err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS, + &scan_suppress, sizeof(int), true); + if (unlikely(err)) { + WL_ERR(("Scan Suppress Setting Failed (%d)\n", err)); + return err; + } + } + + join_params.params.chanspec_list[0] = chanspec; + join_params.params.chanspec_num = 1; + wldev_iovar_setint(dev, "chanspec", chanspec); + join_params_size = sizeof(join_params); + + /* Disable Authentication, IBSS will add key if it required */ + wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED); + wldev_iovar_setint(dev, "wsec", 0); + + + err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, + join_params_size, true); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + + if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) { + scan_suppress = FALSE; + /* Reset the SCAN SUPPRESS Flag */ + err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS, + &scan_suppress, sizeof(int), true); + if (unlikely(err)) { + WL_ERR(("Reset Scan Suppress Flag Failed (%d)\n", err)); + return err; + } + } + wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID); + wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN); + cfg->rmc_event_seq = 0; /* initialize rmcfail sequence */ + return err; +} + +static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = 0; + scb_val_t scbval; + u8 *curbssid; + + RETURN_EIO_IF_NOT_UP(cfg); + wl_link_down(cfg); + + WL_ERR(("Leave IBSS\n")); + curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID); + wl_set_drv_status(cfg, DISCONNECTING, dev); + scbval.val = 0; + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(cfg, DISCONNECTING, dev); + WL_ERR(("error(%d)\n", err)); + return err; + } + + /* remove the VSIE */ + wl_cfg80211_ibss_vsie_delete(dev); + + return err; +} + + +static s32 +wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) + val = WPA_AUTH_PSK | + WPA_AUTH_UNSPECIFIED; + else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) + val = WPA2_AUTH_PSK| + WPA2_AUTH_UNSPECIFIED; + else + val = WPA_AUTH_DISABLED; + + if (is_wps_conn(sme)) + val = WPA_AUTH_DISABLED; + + WL_DBG(("setting wpa_auth to 0x%0x\n", val)); + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set wpa_auth failed (%d)\n", err)); + return err; + } + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + sec->wpa_versions = sme->crypto.wpa_versions; + return err; +} + + +static s32 +wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + switch (sme->auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + val = WL_AUTH_OPEN_SYSTEM; + WL_DBG(("open system\n")); + break; + case NL80211_AUTHTYPE_SHARED_KEY: + val = WL_AUTH_SHARED_KEY; + WL_DBG(("shared key\n")); + break; + case NL80211_AUTHTYPE_AUTOMATIC: + val = WL_AUTH_OPEN_SHARED; + WL_DBG(("automatic\n")); + break; + default: + val = 2; + WL_ERR(("invalid auth type (%d)\n", sme->auth_type)); + break; + } + + err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set auth failed (%d)\n", err)); + return err; + } + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + sec->auth_type = sme->auth_type; + return err; +} + +static s32 +wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + s32 pval = 0; + s32 gval = 0; + s32 err = 0; + s32 wsec_val = 0; + + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (sme->crypto.n_ciphers_pairwise) { + switch (sme->crypto.ciphers_pairwise[0]) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + pval = WEP_ENABLED; + break; + case WLAN_CIPHER_SUITE_TKIP: + pval = TKIP_ENABLED; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_AES_CMAC: + pval = AES_ENABLED; + break; + default: + WL_ERR(("invalid cipher pairwise (%d)\n", + sme->crypto.ciphers_pairwise[0])); + return -EINVAL; + } + } + if (sme->crypto.cipher_group) { + switch (sme->crypto.cipher_group) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + gval = WEP_ENABLED; + break; + case WLAN_CIPHER_SUITE_TKIP: + gval = TKIP_ENABLED; + break; + case WLAN_CIPHER_SUITE_CCMP: + gval = AES_ENABLED; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + gval = AES_ENABLED; + break; + default: + WL_ERR(("invalid cipher group (%d)\n", + sme->crypto.cipher_group)); + return -EINVAL; + } + } + + WL_DBG(("pval (%d) gval (%d)\n", pval, gval)); + + if (is_wps_conn(sme)) { + if (sme->privacy) + err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx); + else + /* WPS-2.0 allows no security */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx); + } else { + WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC")); + wsec_val = pval | gval; + + WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val)); + err = wldev_iovar_setint_bsscfg(dev, "wsec", + wsec_val, bssidx); + } + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; + sec->cipher_group = sme->crypto.cipher_group; + + return err; +} + +static s32 +wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (sme->crypto.n_akm_suites) { + err = wldev_iovar_getint(dev, "wpa_auth", &val); + if (unlikely(err)) { + WL_ERR(("could not get wpa_auth (%d)\n", err)); + return err; + } + if (val & (WPA_AUTH_PSK | + WPA_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA_AUTH_UNSPECIFIED; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA_AUTH_PSK; + break; + default: + WL_ERR(("invalid akm suite (0x%x)\n", + sme->crypto.akm_suites[0])); + return -EINVAL; + } + } else if (val & (WPA2_AUTH_PSK | + WPA2_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA2_AUTH_UNSPECIFIED; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA2_AUTH_PSK; + break; + default: + WL_ERR(("invalid akm suite (0x%x)\n", + sme->crypto.akm_suites[0])); + return -EINVAL; + } + } + + + WL_DBG(("setting wpa_auth to 0x%x\n", val)); + + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("could not set wpa_auth (%d)\n", err)); + return err; + } + } + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + sec->wpa_auth = sme->crypto.akm_suites[0]; + + return err; +} + +static s32 +wl_set_set_sharedkey(struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + struct wl_wsec_key key; + s32 val; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + WL_DBG(("key len (%d)\n", sme->key_len)); + if (sme->key_len) { + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n", + sec->wpa_versions, sec->cipher_pairwise)); + if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 | + NL80211_WPA_VERSION_2)) && + (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 | + WLAN_CIPHER_SUITE_WEP104))) + { + memset(&key, 0, sizeof(key)); + key.len = (u32) sme->key_len; + key.index = (u32) sme->key_idx; + if (unlikely(key.len > sizeof(key.data))) { + WL_ERR(("Too long key length (%u)\n", key.len)); + return -EINVAL; + } + memcpy(key.data, sme->key, key.len); + key.flags = WL_PRIMARY_KEY; + switch (sec->cipher_pairwise) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + break; + default: + WL_ERR(("Invalid algorithm (%d)\n", + sme->crypto.ciphers_pairwise[0])); + return -EINVAL; + } + /* Set the new key/index */ + WL_DBG(("key length (%d) key index (%d) algo (%d)\n", + key.len, key.index, key.algo)); + WL_DBG(("key \"%s\"\n", key.data)); + swap_key_from_BE(&key); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { + WL_DBG(("set auth_type to shared key\n")); + val = WL_AUTH_SHARED_KEY; /* shared key */ + err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set auth failed (%d)\n", err)); + return err; + } + } + } + } + return err; +} + +#if defined(ESCAN_RESULT_PATCH) +static u8 connect_req_bssid[6]; +static u8 broad_bssid[6]; +#endif /* ESCAN_RESULT_PATCH */ + + + +#if defined(CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX) +static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd) +{ + u32 chanspec = 0; + bool isvht80 = 0; + + if (wldev_iovar_getint(net, "chanspec", (s32 *)&chanspec) == BCME_OK) + chanspec = wl_chspec_driver_to_host(chanspec); + + isvht80 = chanspec & WL_CHANSPEC_BW_80; + WL_INFO(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80)); + + return isvht80; +} +#endif /* CUSTOM_SET_CPUCORE || CONFIG_TCPACK_FASTTX */ + +static s32 +wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct ieee80211_channel *chan = sme->channel; + wl_extjoin_params_t *ext_join_params; + struct wl_join_params join_params; + size_t join_params_size; + s32 err = 0; + wpa_ie_fixed_t *wpa_ie; + bcm_tlv_t *wpa2_ie; + u8* wpaie = 0; + u32 wpaie_len = 0; + u32 chan_cnt = 0; + struct ether_addr bssid; + s32 bssidx = -1; + int ret; + int wait_cnt; + + WL_DBG(("In\n")); + +#if defined(SUPPORT_RANDOM_MAC_SCAN) + wl_cfg80211_set_random_mac(dev, FALSE); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) + if (sme->channel_hint) { + chan = sme->channel_hint; + WL_DBG(("channel_hint (%d), channel_hint center_freq (%d)\n", + ieee80211_frequency_to_channel(sme->channel_hint->center_freq), + sme->channel_hint->center_freq)); + } + if (sme->bssid_hint) { + sme->bssid = sme->bssid_hint; + WL_DBG(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint))); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */ + + if (unlikely(!sme->ssid)) { + WL_ERR(("Invalid ssid\n")); + return -EOPNOTSUPP; + } + + if (unlikely(sme->ssid_len > DOT11_MAX_SSID_LEN)) { + WL_ERR(("Invalid SSID info: SSID=%s, length=%zd\n", + sme->ssid, sme->ssid_len)); + return -EINVAL; + } + + RETURN_EIO_IF_NOT_UP(cfg); + + /* + * Cancel ongoing scan to sync up with sme state machine of cfg80211. + */ +#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH)) + if (cfg->scan_request) { + WL_TRACE_HW4(("Aborting the scan! \n")); + wl_cfg80211_scan_abort(cfg); + wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + while (wl_get_drv_status(cfg, SCANNING, dev) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (wl_get_drv_status(cfg, SCANNING, dev)) { + wl_notify_escan_complete(cfg, dev, true, true); + } + } +#endif +#ifdef WL_SCHED_SCAN + if (cfg->sched_scan_req) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg), 0); +#else + wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg)); +#endif + } +#endif +#if defined(ESCAN_RESULT_PATCH) + if (sme->bssid) + memcpy(connect_req_bssid, sme->bssid, ETHER_ADDR_LEN); + else + bzero(connect_req_bssid, ETHER_ADDR_LEN); + bzero(broad_bssid, ETHER_ADDR_LEN); +#endif +#if defined(USE_DYNAMIC_MAXPKT_RXGLOM) + maxrxpktglom = 0; +#endif + bzero(&bssid, sizeof(bssid)); + if (!wl_get_drv_status(cfg, CONNECTED, dev)&& + (ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false)) == 0) { + if (!ETHER_ISNULLADDR(&bssid)) { + scb_val_t scbval; + wl_set_drv_status(cfg, DISCONNECTING, dev); + scbval.val = DOT11_RC_DISASSOC_LEAVING; + memcpy(&scbval.ea, &bssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + + WL_DBG(("drv status CONNECTED is not set, but connected in FW!" MACDBG "/n", + MAC2STRDBG(bssid.octet))); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(cfg, DISCONNECTING, dev); + WL_ERR(("error (%d)\n", err)); + return err; + } + wait_cnt = 500/10; + while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) { + WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", + wait_cnt)); + wait_cnt--; + OSL_SLEEP(10); + } + } else + WL_DBG(("Currently not associated!\n")); + } else { + /* if status is DISCONNECTING, wait for disconnection terminated max 500 ms */ + wait_cnt = 200/10; + while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) { + WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(10); + } + if (wl_get_drv_status(cfg, DISCONNECTING, dev)) { + WL_ERR(("Force clear DISCONNECTING status!\n")); + wl_clr_drv_status(cfg, DISCONNECTING, dev); + } + } + + /* Clean BSSID */ + bzero(&bssid, sizeof(bssid)); + if (!wl_get_drv_status(cfg, DISCONNECTING, dev)) + wl_update_prof(cfg, dev, NULL, (void *)&bssid, WL_PROF_BSSID); + + if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) { + /* we only allow to connect using virtual interface in case of P2P */ + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", + dev->ieee80211_ptr)); + return BCME_ERROR; + } + wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); + } else if (dev == bcmcfg_to_prmry_ndev(cfg)) { + /* find the RSN_IE */ + if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len, + DOT11_MNG_RSN_ID)) != NULL) { + WL_DBG((" WPA2 IE is found\n")); + } + /* find the WPA_IE */ + if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie, + sme->ie_len)) != NULL) { + WL_DBG((" WPA IE is found\n")); + } + if (wpa_ie != NULL || wpa2_ie != NULL) { + wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie; + wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len; + wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN; + err = wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("wpaie set error (%d)\n", err)); + return err; + } + } else { + err = wldev_iovar_setbuf(dev, "wpaie", NULL, 0, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("wpaie set error (%d)\n", err)); + return err; + } + } + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_ASSOCREQ_FLAG, (const u8 *)sme->ie, sme->ie_len); + if (unlikely(err)) { + return err; + } + } + if (chan) { + /* If RCC is not enabled, use the channel provided by userspace */ + cfg->channel = ieee80211_frequency_to_channel(chan->center_freq); + chan_cnt = 1; + WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel, + chan->center_freq, chan_cnt)); + } else { + /* + * No channel information from user space. if RCC is enabled, the RCC + * would prepare the channel list, else no channel would be provided + * and firmware would need to do a full channel scan. + */ + WL_DBG(("No channel info from user space\n")); + cfg->channel = 0; + } + WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len)); + WL_DBG(("3. set wpa version \n")); + err = wl_set_wpa_version(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid wpa_version\n")); + return err; + } + err = wl_set_auth_type(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid auth type\n")); + return err; + } + + err = wl_set_set_cipher(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid ciper\n")); + return err; + } + + err = wl_set_key_mgmt(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid key mgmt\n")); + return err; + } + + err = wl_set_set_sharedkey(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid shared key\n")); + return err; + } + + /* + * Join with specific BSSID and cached SSID + * If SSID is zero join based on BSSID only + */ + join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE + + chan_cnt * sizeof(chanspec_t); + ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL); + if (ext_join_params == NULL) { + err = -ENOMEM; + wl_clr_drv_status(cfg, CONNECTING, dev); + goto exit; + } + ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len); + memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len); + wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID); + ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len); + /* increate dwell time to receive probe response or detect Beacon + * from target AP at a noisy air only during connect command + */ + ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1; + ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1; + /* Set up join scan parameters */ + ext_join_params->scan.scan_type = -1; + ext_join_params->scan.nprobes = chan_cnt ? + (ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1; + ext_join_params->scan.home_time = -1; + + if (sme->bssid) + memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN); + else + memcpy(&ext_join_params->assoc.bssid, ðer_bcast, ETH_ALEN); + ext_join_params->assoc.chanspec_num = chan_cnt; + if (chan_cnt) { + if (cfg->channel) { + /* + * Use the channel provided by userspace + */ + u16 channel, band, bw, ctl_sb; + chanspec_t chspec; + channel = cfg->channel; + band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G + : WL_CHANSPEC_BAND_5G; + + /* Get min_bw set for the interface */ + bw = wl_cfg80211_ulb_get_min_bw_chspec(dev->ieee80211_ptr, bssidx); + if (bw == INVCHANSPEC) { + WL_ERR(("Invalid chanspec \n")); + kfree(ext_join_params); + return BCME_ERROR; + } + + ctl_sb = WL_CHANSPEC_CTL_SB_NONE; + chspec = (channel | band | bw | ctl_sb); + ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + ext_join_params->assoc.chanspec_list[0] |= chspec; + ext_join_params->assoc.chanspec_list[0] = + wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]); + } + } + ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num); + if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { + WL_INFORM(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID, + ext_join_params->ssid.SSID_len)); + } + wl_set_drv_status(cfg, CONNECTING, dev); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + kfree(ext_join_params); + return BCME_ERROR; + } + err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (cfg->rcc_enabled) { + WL_ERR(("Connecting with" MACDBG " ssid \"%s\", len (%d) with rcc channels \n\n", + MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), + ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len)); + } else { + WL_ERR(("Connecting with" MACDBG " ssid \"%s\", len (%d) channel=%d\n\n", + MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), + ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, cfg->channel)); + } + + kfree(ext_join_params); + if (err) { + wl_clr_drv_status(cfg, CONNECTING, dev); + if (err == BCME_UNSUPPORTED) { + WL_DBG(("join iovar is not supported\n")); + goto set_ssid; + } else { + WL_ERR(("error (%d)\n", err)); + goto exit; + } + } else + goto exit; + +set_ssid: + memset(&join_params, 0, sizeof(join_params)); + join_params_size = sizeof(join_params.ssid); + + join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len); + memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len); + join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len); + wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID); + if (sme->bssid) + memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN); + else + memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN); + + if (wl_ch_to_chanspec(dev, cfg->channel, &join_params, &join_params_size) < 0) { + WL_ERR(("Invalid chanspec\n")); + return -EINVAL; + } + + WL_DBG(("join_param_size %zu\n", join_params_size)); + + if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { + WL_INFORM(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID, + join_params.ssid.SSID_len)); + } + wl_set_drv_status(cfg, CONNECTING, dev); + err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true); + if (err) { + WL_ERR(("error (%d)\n", err)); + wl_clr_drv_status(cfg, CONNECTING, dev); + } +exit: + return err; +} + +static s32 +wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + scb_val_t scbval; + bool act = false; + s32 err = 0; + u8 *curbssid; +#ifdef CUSTOM_SET_CPUCORE + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* CUSTOM_SET_CPUCORE */ + WL_ERR(("Reason %d\n", reason_code)); + RETURN_EIO_IF_NOT_UP(cfg); + act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT); + curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID); +#ifdef ESCAN_RESULT_PATCH + if (wl_get_drv_status(cfg, CONNECTING, dev) && curbssid && + (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0)) { + WL_ERR(("Disconnecting from connecting device: " MACDBG "\n", + MAC2STRDBG(curbssid))); + act = true; + } +#endif /* ESCAN_RESULT_PATCH */ + + if (act) { + /* + * Cancel ongoing scan to sync up with sme state machine of cfg80211. + */ +#if !defined(ESCAN_RESULT_PATCH) + /* Let scan aborted by F/W */ + if (cfg->scan_request) { + WL_TRACE_HW4(("Aborting the scan! \n")); + wl_notify_escan_complete(cfg, dev, true, true); + } +#endif /* ESCAN_RESULT_PATCH */ + if (wl_get_drv_status(cfg, CONNECTING, dev) || + wl_get_drv_status(cfg, CONNECTED, dev)) { + wl_set_drv_status(cfg, DISCONNECTING, dev); + scbval.val = reason_code; + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(cfg, DISCONNECTING, dev); + WL_ERR(("error (%d)\n", err)); + return err; + } +#if defined(BCM4358_CHIP) + WL_ERR(("Wait for complete of disconnecting \n")); + OSL_SLEEP(200); +#endif /* BCM4358_CHIP */ + } + } +#ifdef CUSTOM_SET_CPUCORE + /* set default cpucore */ + if (dev == bcmcfg_to_prmry_ndev(cfg)) { + dhd->chan_isvht80 &= ~DHD_FLAG_STA_MODE; + if (!(dhd->chan_isvht80)) + dhd_set_cpucore(dhd, FALSE); + } +#endif /* CUSTOM_SET_CPUCORE */ + + return err; +} + +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + enum nl80211_tx_power_setting type, s32 mbm) +#else +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, s32 dbm) +#endif /* WL_CFG80211_P2P_DEV_IF */ +{ + + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = 0; +#if defined(WL_CFG80211_P2P_DEV_IF) + s32 dbm = MBM_TO_DBM(mbm); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \ + defined(WL_COMPAT_WIRELESS) || defined(WL_SUPPORT_BACKPORTED_KPATCHES) + dbm = MBM_TO_DBM(dbm); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + RETURN_EIO_IF_NOT_UP(cfg); + switch (type) { + case NL80211_TX_POWER_AUTOMATIC: + break; + case NL80211_TX_POWER_LIMITED: + if (dbm < 0) { + WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n")); + return -EINVAL; + } + break; + case NL80211_TX_POWER_FIXED: + if (dbm < 0) { + WL_ERR(("TX_POWER_FIXED - dbm is negative..\n")); + return -EINVAL; + } + break; + } + + err = wl_set_tx_power(ndev, type, dbm); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + + cfg->conf->tx_power = dbm; + + return err; +} + +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, + struct wireless_dev *wdev, s32 *dbm) +#else +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) +#endif /* WL_CFG80211_P2P_DEV_IF */ +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = 0; + + RETURN_EIO_IF_NOT_UP(cfg); + err = wl_get_tx_power(ndev, dbm); + if (unlikely(err)) + WL_ERR(("error (%d)\n", err)); + + return err; +} + +static s32 +wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool unicast, bool multicast) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + u32 index; + s32 wsec; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + WL_DBG(("key index (%d)\n", key_idx)); + RETURN_EIO_IF_NOT_UP(cfg); + err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); + return err; + } + if (wsec == WEP_ENABLED) { + /* Just select a new current key */ + index = (u32) key_idx; + index = htod32(index); + err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index, + sizeof(index), true); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + } + } + return err; +} + +static s32 +wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr, struct key_params *params) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct wl_wsec_key key; + s32 err = 0; + s32 bssidx; + s32 mode = wl_get_mode_by_netdev(cfg, dev); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + memset(&key, 0, sizeof(key)); + key.index = (u32) key_idx; + + if (!ETHER_ISMULTI(mac_addr)) + memcpy((char *)&key.ea, (const void *)mac_addr, ETHER_ADDR_LEN); + key.len = (u32) params->key_len; + + /* check for key index change */ + if (key.len == 0) { + /* key delete */ + swap_key_from_BE(&key); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("key delete error (%d)\n", err)); + return err; + } + } else { + if (key.len > sizeof(key.data)) { + WL_ERR(("Invalid key length (%d)\n", key.len)); + return -EINVAL; + } + WL_DBG(("Setting the key index %d\n", key.index)); + memcpy(key.data, params->key, key.len); + + if ((mode == WL_MODE_BSS) && + (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { + u8 keybuf[8]; + memcpy(keybuf, &key.data[24], sizeof(keybuf)); + memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); + memcpy(&key.data[16], keybuf, sizeof(keybuf)); + } + + /* if IW_ENCODE_EXT_RX_SEQ_VALID set */ + if (params->seq && params->seq_len == 6) { + /* rx iv */ + u8 *ivptr; + ivptr = (u8 *) params->seq; + key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key.iv_initialized = true; + } + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + break; + case WLAN_CIPHER_SUITE_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + key.algo = CRYPTO_ALGO_AES_CCM; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + case WLAN_CIPHER_SUITE_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); + break; + default: + WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); + return -EINVAL; + } + swap_key_from_BE(&key); + /* need to guarantee EAPOL 4/4 send out before set key */ + dhd_wait_pend8021x(dev); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + } + return err; +} + +int +wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable) +{ + int err; + wl_eventmsg_buf_t ev_buf; + + if (dev != bcmcfg_to_prmry_ndev(g_bcm_cfg)) { + /* roam offload is only for the primary device */ + return -1; + } + err = wldev_iovar_setint(dev, "roam_offload", enable); + if (err) + return err; + + if (enable) { + err = wldev_iovar_setint(dev, "sup_wpa_tmo", IDSUP_4WAY_HANDSHAKE_TIMEOUT); + if (err) { + WL_INFORM(("Setting 'sup_wpa_tmo' failed, err=%d\n", err)); + } + } + + bzero(&ev_buf, sizeof(wl_eventmsg_buf_t)); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable); + err = wl_cfg80211_apply_eventbuffer(dev, g_bcm_cfg, &ev_buf); + if (!err) { + g_bcm_cfg->roam_offload = enable; + } + return err; +} + +#if defined(WL_VIRTUAL_APSTA) +int +wl_cfg80211_interface_create(struct net_device *dev, char *name) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + bcm_struct_cfgdev *new_cfgdev; + + new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy, + NL80211_IFTYPE_STATION, NULL, name); + if (!new_cfgdev) { + return BCME_ERROR; + } + else { + WL_DBG(("Iface %s created successfuly\n", name)); + return BCME_OK; + } +} + +int +wl_cfg80211_interface_delete(struct net_device *dev, char *name) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *iter, *next; + int err = BCME_ERROR; + + if (name == NULL) { + return BCME_ERROR; + } + + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + if (strcmp(iter->ndev->name, name) == 0) { + err = wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev); + break; + } + } + } + if (!err) { + WL_DBG(("Iface %s deleted successfuly", name)); + } + return err; +} +#endif /* defined (WL_VIRTUAL_APSTA) */ + +static s32 +wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + struct key_params *params) +{ + struct wl_wsec_key key; + s32 val = 0; + s32 wsec = 0; + s32 err = 0; + u8 keybuf[8]; + s32 bssidx = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 mode = wl_get_mode_by_netdev(cfg, dev); + WL_DBG(("key index (%d)\n", key_idx)); + RETURN_EIO_IF_NOT_UP(cfg); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (mac_addr && + ((params->cipher != WLAN_CIPHER_SUITE_WEP40) && + (params->cipher != WLAN_CIPHER_SUITE_WEP104))) { + wl_add_keyext(wiphy, dev, key_idx, mac_addr, params); + goto exit; + } + memset(&key, 0, sizeof(key)); + /* Clear any buffered wep key */ + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); + + key.len = (u32) params->key_len; + key.index = (u32) key_idx; + + if (unlikely(key.len > sizeof(key.data))) { + WL_ERR(("Too long key length (%u)\n", key.len)); + return -EINVAL; + } + memcpy(key.data, params->key, key.len); + + key.flags = WL_PRIMARY_KEY; + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + val = WEP_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + val = WEP_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + break; + case WLAN_CIPHER_SUITE_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + val = TKIP_ENABLED; + /* wpa_supplicant switches the third and fourth quarters of the TKIP key */ + if (mode == WL_MODE_BSS) { + bcopy(&key.data[24], keybuf, sizeof(keybuf)); + bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); + bcopy(keybuf, &key.data[16], sizeof(keybuf)); + } + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + key.algo = CRYPTO_ALGO_AES_CCM; + val = AES_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + case WLAN_CIPHER_SUITE_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + val = AES_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); + break; + default: + WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); + return -EINVAL; + } + + /* Set the new key/index */ + if ((mode == WL_MODE_IBSS) && (val & (TKIP_ENABLED | AES_ENABLED))) { + WL_ERR(("IBSS KEY setted\n")); + wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE); + } + swap_key_from_BE(&key); + if ((params->cipher == WLAN_CIPHER_SUITE_WEP40) || + (params->cipher == WLAN_CIPHER_SUITE_WEP104)) { + /* + * For AP role, since we are doing a wl down before bringing up AP, + * the plumbed keys will be lost. So for AP once we bring up AP, we + * need to plumb keys again. So buffer the keys for future use. This + * is more like a WAR. If firmware later has the capability to do + * interface upgrade without doing a "wl down" and "wl apsta 0", then + * this will not be required. + */ + WL_DBG(("Buffering WEP Keys \n")); + memcpy(&cfg->wep_key, &key, sizeof(struct wl_wsec_key)); + } + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + +exit: + err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("get wsec error (%d)\n", err)); + return err; + } + + wsec |= val; + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("set wsec error (%d)\n", err)); + return err; + } + + return err; +} + +static s32 +wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr) +{ + struct wl_wsec_key key; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = 0; + s32 bssidx; + + WL_DBG(("Enter. key_idx: %d\n", key_idx)); + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2)) + return -EINVAL; + + RETURN_EIO_IF_NOT_UP(cfg); + memset(&key, 0, sizeof(key)); + + key.flags = WL_PRIMARY_KEY; + key.algo = CRYPTO_ALGO_OFF; + key.index = (u32) key_idx; + + /* Set the new key/index */ + swap_key_from_BE(&key); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + if (err == -EINVAL) { + if (key.index >= DOT11_MAX_DEFAULT_KEYS) { + /* we ignore this key index in this case */ + WL_DBG(("invalid key index (%d)\n", key_idx)); + } + } else { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + } + return err; + } + return err; +} + +static s32 +wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, + void (*callback) (void *cookie, struct key_params * params)) +{ + struct key_params params; + struct wl_wsec_key key; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct wl_security *sec; + s32 wsec; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + WL_DBG(("key index (%d)\n", key_idx)); + RETURN_EIO_IF_NOT_UP(cfg); + memset(&key, 0, sizeof(key)); + key.index = key_idx; + swap_key_to_BE(&key); + memset(¶ms, 0, sizeof(params)); + params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len); + memcpy((void *)params.key, key.data, params.key_len); + + err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); + return err; + } + switch (WSEC_ENABLED(wsec)) { + case WEP_ENABLED: + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { + params.cipher = WLAN_CIPHER_SUITE_WEP40; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) { + params.cipher = WLAN_CIPHER_SUITE_WEP104; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + } + break; + case TKIP_ENABLED: + params.cipher = WLAN_CIPHER_SUITE_TKIP; + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case AES_ENABLED: + params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + default: + WL_ERR(("Invalid algo (0x%x)\n", wsec)); + return -EINVAL; + } + + callback(cookie, ¶ms); + return err; +} + +static s32 +wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, u8 key_idx) +{ + WL_INFORM(("Not supported\n")); + return -EOPNOTSUPP; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 +wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, + const u8 *mac, struct station_info *sinfo) +#else +static s32 +wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo) +#endif +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + scb_val_t scb_val; + s32 rssi; + s32 rate; + s32 err = 0; + sta_info_t *sta; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + s8 eabuf[ETHER_ADDR_STR_LEN]; +#endif + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + bool fw_assoc_state = FALSE; + u32 dhd_assoc_state = 0; + RETURN_EIO_IF_NOT_UP(cfg); + if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) { + err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac, + ETHER_ADDR_LEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("GET STA INFO failed, %d\n", err)); + return err; + } + sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME); + sta = (sta_info_t *)cfg->ioctl_buf; + sta->len = dtoh16(sta->len); + sta->cap = dtoh16(sta->cap); + sta->flags = dtoh32(sta->flags); + sta->idle = dtoh32(sta->idle); + sta->in = dtoh32(sta->in); + sinfo->inactive_time = sta->idle * 1000; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (sta->flags & WL_STA_ASSOC) { + sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME); + sinfo->connected_time = sta->in; + } + WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n", + bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time, + sta->idle * 1000)); +#endif + } else if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS || + wl_get_mode_by_netdev(cfg, dev) == WL_MODE_IBSS) { + get_pktcnt_t pktcnt; + u8 *curmacp; + + if (cfg->roam_offload) { + struct ether_addr bssid; + err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); + if (err) { + WL_ERR(("Failed to get current BSSID\n")); + } else { + if (!ETHER_ISNULLADDR(&bssid.octet) && + memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) { + /* roaming is detected */ + err = wl_cfg80211_delayed_roam(cfg, dev, &bssid); + if (err) + WL_ERR(("Failed to handle the delayed roam, " + "err=%d", err)); + mac = (u8 *)bssid.octet; + } + } + } + dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev); + fw_assoc_state = dhd_is_associated(dhd, 0, &err); + if (!dhd_assoc_state || !fw_assoc_state) { + WL_ERR(("NOT assoc\n")); + if (err == -ERESTARTSYS) + return err; + if (!dhd_assoc_state) { + WL_TRACE_HW4(("drv state is not connected \n")); + } + if (!fw_assoc_state) { + WL_TRACE_HW4(("fw state is not associated \n")); + } + /* Disconnect due to fw is not associated for FW_ASSOC_WATCHDOG_TIME ms. + * 'err == 0' of dhd_is_associated() and '!fw_assoc_state' + * means that BSSID is null. + */ + if (dhd_assoc_state && !fw_assoc_state && !err) { + if (!fw_assoc_watchdog_started) { + fw_assoc_watchdog_ms = OSL_SYSUPTIME(); + fw_assoc_watchdog_started = TRUE; + WL_TRACE_HW4(("fw_assoc_watchdog_started \n")); + } else { + if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms > + FW_ASSOC_WATCHDOG_TIME) { + fw_assoc_watchdog_started = FALSE; + err = -ENODEV; + WL_TRACE_HW4(("fw is not associated for %d ms \n", + (OSL_SYSUPTIME() - fw_assoc_watchdog_ms))); + goto get_station_err; + } + } + } + err = -ENODEV; + return err; + } + fw_assoc_watchdog_started = FALSE; + curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID); + if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) { + WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n", + MAC2STRDBG(mac), MAC2STRDBG(curmacp))); + } + + /* Report the current tx rate */ + err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false); + if (err) { + WL_ERR(("Could not get rate (%d)\n", err)); + } else { +#if defined(USE_DYNAMIC_MAXPKT_RXGLOM) + int rxpktglom; +#endif + rate = dtoh32(rate); + sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE); + sinfo->txrate.legacy = rate * 5; + WL_DBG(("Rate %d Mbps\n", (rate / 2))); +#if defined(USE_DYNAMIC_MAXPKT_RXGLOM) + rxpktglom = ((rate/2) > 150) ? 20 : 10; + + if (maxrxpktglom != rxpktglom) { + maxrxpktglom = rxpktglom; + WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2), + maxrxpktglom)); + err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom", + (char*)&maxrxpktglom, 4, cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, NULL); + if (err < 0) { + WL_ERR(("set bus:maxtxpktglom failed, %d\n", err)); + } + } +#endif + } + + memset(&scb_val, 0, sizeof(scb_val)); + scb_val.val = 0; + err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, + sizeof(scb_val_t), false); + if (err) { + WL_ERR(("Could not get rssi (%d)\n", err)); + goto get_station_err; + } + rssi = wl_rssi_offset(dtoh32(scb_val.val)); + sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL); + sinfo->signal = rssi; + WL_DBG(("RSSI %d dBm\n", rssi)); + err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt, + sizeof(pktcnt), false); + if (!err) { + sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) | + STA_INFO_BIT(INFO_RX_DROP_MISC) | + STA_INFO_BIT(INFO_TX_PACKETS) | + STA_INFO_BIT(INFO_TX_FAILED)); + sinfo->rx_packets = pktcnt.rx_good_pkt; + sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt; + sinfo->tx_packets = pktcnt.tx_good_pkt; + sinfo->tx_failed = pktcnt.tx_bad_pkt; + } +get_station_err: + if (err && (err != -ERESTARTSYS)) { + /* Disconnect due to zero BSSID or error to get RSSI */ + WL_ERR(("force cfg80211_disconnected: %d\n", err)); + wl_clr_drv_status(cfg, CONNECTED, dev); + CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL); + wl_link_down(cfg); + } + } + else { + WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev))); + } + + return err; +} + +static s32 +wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, s32 timeout) +{ + s32 pm; + s32 err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev); + + RETURN_EIO_IF_NOT_UP(cfg); + WL_DBG(("Enter\n")); + if (cfg->p2p_net == dev || _net_info == NULL || + !wl_get_drv_status(cfg, CONNECTED, dev) || + (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_BSS && + wl_get_mode_by_netdev(cfg, dev) != WL_MODE_IBSS)) { + return err; + } + /* Enlarge pm_enable_work */ + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_LONG); + + pm = enabled ? PM_FAST : PM_OFF; + if (_net_info->pm_block) { + WL_ERR(("%s:Do not enable the power save for pm_block %d\n", + dev->name, _net_info->pm_block)); + pm = PM_OFF; + } + pm = htod32(pm); + WL_DBG(("%s:power save %s\n", dev->name, (pm ? "enabled" : "disabled"))); + err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true); + if (unlikely(err)) { + if (err == -ENODEV) + WL_DBG(("net_device is not ready yet\n")); + else + WL_ERR(("error (%d)\n", err)); + return err; + } + wl_cfg80211_update_power_mode(dev); + return err; +} + +void wl_cfg80211_update_power_mode(struct net_device *dev) +{ + int err, pm = -1; + + err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), true); + if (err) + WL_ERR(("%s:error (%d)\n", __FUNCTION__, err)); + else if (pm != -1 && dev->ieee80211_ptr) + dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true; +} + +void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (strcmp(command, "SCAN-ACTIVE") == 0) { + cfg->active_scan = 1; + } else if (strcmp(command, "SCAN-PASSIVE") == 0) { + cfg->active_scan = 0; + } else + WL_ERR(("Unknown command \n")); +} + +static __used u32 wl_find_msb(u16 bit16) +{ + u32 ret = 0; + + if (bit16 & 0xff00) { + ret += 8; + bit16 >>= 8; + } + + if (bit16 & 0xf0) { + ret += 4; + bit16 >>= 4; + } + + if (bit16 & 0xc) { + ret += 2; + bit16 >>= 2; + } + + if (bit16 & 2) + ret += bit16 & 2; + else if (bit16) + ret += bit16; + + return ret; +} + +static s32 wl_cfg80211_resume(struct wiphy *wiphy) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = BCME_OK; + + if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) { + WL_INFORM(("device is not ready\n")); + return err; + } + + return err; +} + + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) +static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) +#else +static s32 wl_cfg80211_suspend(struct wiphy *wiphy) +#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */ +{ + s32 err = BCME_OK; +#ifdef DHD_CLEAR_ON_SUSPEND + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_info *iter, *next; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + struct cfg80211_scan_info info = {}; + unsigned long flags; + if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) { + WL_INFORM(("device is not ready : status (%d)\n", + (int)cfg->status)); + return err; + } + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface doesn't have a ndev associated with it (for kernel > 3.8) */ + if (iter->ndev) + wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + if (cfg->scan_request) { + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + } + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + wl_clr_drv_status(cfg, SCANNING, iter->ndev); + wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) { + wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false); + } + } + } +#endif /* DHD_CLEAR_ON_SUSPEND */ + + + return err; +} + +static s32 +wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list, + s32 err) +{ + int i, j; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg); + + if (!pmk_list) { + WL_INFORM(("pmk_list is NULL\n")); + return -EINVAL; + } + /* pmk list is supported only for STA interface i.e. primary interface + * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init + */ + if (primary_dev != dev) { + WL_INFORM(("Not supporting Flushing pmklist on virtual" + " interfaces than primary interface\n")); + return err; + } + + WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid)); + for (i = 0; i < pmk_list->pmkids.npmkid; i++) { + WL_DBG(("PMKID[%d]: %pM =\n", i, + &pmk_list->pmkids.pmkid[i].BSSID)); + for (j = 0; j < WPA2_PMKID_LEN; j++) { + WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j])); + } + } + if (likely(!err)) { + err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list, + sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + } + + return err; +} + +static s32 +wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = 0; + int i; + + RETURN_EIO_IF_NOT_UP(cfg); + for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++) + if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN)) + break; + if (i < WL_NUM_PMKIDS_MAX) { + memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid, + ETHER_ADDR_LEN); + memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid, + WPA2_PMKID_LEN); + if (i == cfg->pmk_list->pmkids.npmkid) + cfg->pmk_list->pmkids.npmkid++; + } else { + err = -EINVAL; + } + WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n", + &cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID)); + for (i = 0; i < WPA2_PMKID_LEN; i++) { + WL_DBG(("%02x\n", + cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1]. + PMKID[i])); + } + + err = wl_update_pmklist(dev, cfg->pmk_list, err); + + return err; +} + +static s32 +wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + struct _pmkid_list pmkid = {.npmkid = 0}; + s32 err = 0; + int i; + + RETURN_EIO_IF_NOT_UP(cfg); + memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN); + memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN); + + WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n", + &pmkid.pmkid[0].BSSID)); + for (i = 0; i < WPA2_PMKID_LEN; i++) { + WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i])); + } + + for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++) + if (!memcmp + (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN)) + break; + + if ((cfg->pmk_list->pmkids.npmkid > 0) && + (i < cfg->pmk_list->pmkids.npmkid)) { + memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t)); + for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) { + memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, + &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID, + ETHER_ADDR_LEN); + memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, + &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID, + WPA2_PMKID_LEN); + } + cfg->pmk_list->pmkids.npmkid--; + } else { + err = -EINVAL; + } + + err = wl_update_pmklist(dev, cfg->pmk_list, err); + + return err; + +} + +static s32 +wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = 0; + RETURN_EIO_IF_NOT_UP(cfg); + memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list)); + err = wl_update_pmklist(dev, cfg->pmk_list, err); + return err; + +} + +static wl_scan_params_t * +wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size) +{ + wl_scan_params_t *params; + int params_size; + int num_chans; + int bssidx = 0; + + *out_params_size = 0; + + /* Our scan params only need space for 1 channel and 0 ssids */ + params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16); + params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL); + if (params == NULL) { + WL_ERR(("mem alloc failed (%d bytes)\n", params_size)); + return params; + } + memset(params, 0, params_size); + params->nprobes = nprobes; + + num_chans = (channel == 0) ? 0 : 1; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = DOT11_SCANTYPE_ACTIVE; + params->nprobes = htod32(1); + params->active_time = htod32(-1); + params->passive_time = htod32(-1); + params->home_time = htod32(10); + if (channel == -1) + params->channel_list[0] = htodchanspec(channel); + else + params->channel_list[0] = wl_ch_host_to_driver(bssidx, channel); + + /* Our scan params have 1 channel and 0 ssids */ + params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | + (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); + + *out_params_size = params_size; /* rtn size to the caller */ + return params; +} + +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + struct ieee80211_channel *channel, unsigned int duration, u64 *cookie) +#else +static s32 +wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + struct ieee80211_channel * channel, + enum nl80211_channel_type channel_type, + unsigned int duration, u64 *cookie) +#endif /* WL_CFG80211_P2P_DEV_IF */ +{ + s32 target_channel; + s32 err = BCME_OK; + struct ether_addr primary_mac; + struct net_device *ndev = NULL; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n", + ieee80211_frequency_to_channel(channel->center_freq), + duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO")); + + if (!cfg->p2p) { + WL_ERR(("cfg->p2p is not initialized\n")); + err = BCME_ERROR; + goto exit; + } + +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("P2P_FIND: Discovery offload is in progress\n")); + return -EAGAIN; + } +#endif /* P2P_LISTEN_OFFLOADING */ + +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + if (wl_get_drv_status_all(cfg, SCANNING)) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } +#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + + target_channel = ieee80211_frequency_to_channel(channel->center_freq); + memcpy(&cfg->remain_on_chan, channel, sizeof(struct ieee80211_channel)); +#if defined(WL_ENABLE_P2P_IF) + cfg->remain_on_chan_type = channel_type; +#endif /* WL_ENABLE_P2P_IF */ + *cookie = wl_cfg80211_get_new_roc_id(cfg); +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + if (wl_get_drv_status(cfg, SCANNING, ndev)) { + struct timer_list *_timer; + WL_DBG(("scan is running. go to fake listen state\n")); + + if (duration > LONG_LISTEN_TIME) { + wl_cfg80211_scan_abort(cfg); + } else { + wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); + + if (timer_pending(&cfg->p2p->listen_timer)) { + WL_DBG(("cancel current listen timer \n")); + del_timer_sync(&cfg->p2p->listen_timer); + } + + _timer = &cfg->p2p->listen_timer; + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); + + cfg->p2p->bcm_cfg = cfg; + INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0); + + err = BCME_OK; + goto exit; + } + } +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + +#ifdef WL_CFG80211_SYNC_GON + if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) { + /* do not enter listen mode again if we are in listen mode already for next af. + * remain on channel completion will be returned by waiting next af completion. + */ +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); +#else + wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev); +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + goto exit; + } +#endif /* WL_CFG80211_SYNC_GON */ + if (cfg->p2p && !cfg->p2p->on) { + /* In case of p2p_listen command, supplicant send remain_on_channel + * without turning on P2P + */ + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + p2p_on(cfg) = true; + } + + if (p2p_is_on(cfg)) { + err = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0); + if (unlikely(err)) { + goto exit; + } +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev); +#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + err = wl_cfgp2p_discover_listen(cfg, target_channel, duration); + +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + if (err == BCME_OK) { + wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev); + } else { + /* if failed, firmware may be internal scanning state. + * so other scan request shall not abort it + */ + wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); + } +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + /* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant + * and expire timer will send a completion to the upper layer + */ + err = BCME_OK; + } + +exit: + if (err == BCME_OK) { + WL_INFORM(("Success\n")); +#if defined(WL_CFG80211_P2P_DEV_IF) + cfg80211_ready_on_channel(cfgdev, *cookie, channel, + duration, GFP_KERNEL); +#else + cfg80211_ready_on_channel(cfgdev, *cookie, channel, + channel_type, duration, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } else { + WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie)); + } + return err; +} + +static s32 +wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, + bcm_struct_cfgdev *cfgdev, u64 cookie) +{ + s32 err = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + +#ifdef P2PLISTEN_AP_SAMECHN + struct net_device *dev; +#endif /* P2PLISTEN_AP_SAMECHN */ + + RETURN_EIO_IF_NOT_UP(cfg); +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { + WL_DBG((" enter ) on P2P dedicated discover interface\n")); + } +#else + WL_DBG((" enter ) netdev_ifidx: %d \n", cfgdev->ifindex)); +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#ifdef P2PLISTEN_AP_SAMECHN + if (cfg && cfg->p2p_resp_apchn_status) { + dev = bcmcfg_to_prmry_ndev(cfg); + wl_cfg80211_set_p2p_resp_ap_chn(dev, 0); + cfg->p2p_resp_apchn_status = false; + WL_DBG(("p2p_resp_apchn_status Turn OFF \n")); + } +#endif /* P2PLISTEN_AP_SAMECHN */ + + if (cfg->last_roc_id == cookie) { + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + } else { + WL_ERR(("%s : ignore, request cookie(%llu) is not matched. (cur : %llu)\n", + __FUNCTION__, cookie, cfg->last_roc_id)); + } + + return err; +} + +static void +wl_cfg80211_afx_handler(struct work_struct *work) +{ + struct afx_hdl *afx_instance; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 ret = BCME_OK; + + BCM_SET_CONTAINER_OF(afx_instance, work, struct afx_hdl, work); + if (afx_instance != NULL && cfg->afx_hdl->is_active) { + if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) { + ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan, + (100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */ + } else { + ret = wl_cfgp2p_act_frm_search(cfg, cfg->afx_hdl->dev, + cfg->afx_hdl->bssidx, cfg->afx_hdl->peer_listen_chan, + NULL); + } + if (unlikely(ret != BCME_OK)) { + WL_ERR(("ERROR occurred! returned value is (%d)\n", ret)); + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) + complete(&cfg->act_frm_scan); + } + } +} + +static s32 +wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev) +{ + u32 max_retry = WL_CHANNEL_SYNC_RETRY; + bool is_p2p_gas = false; + + if (dev == NULL) + return -1; + + WL_DBG((" enter ) \n")); + + wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev); + cfg->afx_hdl->is_active = TRUE; + + if (cfg->afx_hdl->pending_tx_act_frm) { + wl_action_frame_t *action_frame; + action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame); + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) + is_p2p_gas = true; + } + + /* Loop to wait until we find a peer's channel or the + * pending action frame tx is cancelled. + */ + while ((cfg->afx_hdl->retry < max_retry) && + (cfg->afx_hdl->peer_chan == WL_INVALID)) { + cfg->afx_hdl->is_listen = FALSE; + wl_set_drv_status(cfg, SCANNING, dev); + WL_DBG(("Scheduling the action frame for sending.. retry %d\n", + cfg->afx_hdl->retry)); + /* search peer on peer's listen channel */ + schedule_work(&cfg->afx_hdl->work); + wait_for_completion_timeout(&cfg->act_frm_scan, + msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX)); + + if ((cfg->afx_hdl->peer_chan != WL_INVALID) || + !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev))) + break; + + if (is_p2p_gas) + break; + + if (cfg->afx_hdl->my_listen_chan) { + WL_DBG(("Scheduling Listen peer in my listen channel = %d\n", + cfg->afx_hdl->my_listen_chan)); + /* listen on my listen channel */ + cfg->afx_hdl->is_listen = TRUE; + schedule_work(&cfg->afx_hdl->work); + wait_for_completion_timeout(&cfg->act_frm_scan, + msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX)); + } + if ((cfg->afx_hdl->peer_chan != WL_INVALID) || + !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev))) + break; + + cfg->afx_hdl->retry++; + + WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg); + } + + cfg->afx_hdl->is_active = FALSE; + + wl_clr_drv_status(cfg, SCANNING, dev); + wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, dev); + + return (cfg->afx_hdl->peer_chan); +} + +struct p2p_config_af_params { + s32 max_tx_retry; /* max tx retry count if tx no ack */ + /* To make sure to send successfully action frame, we have to turn off mpc + * 0: off, 1: on, (-1): do nothing + */ + s32 mpc_onoff; +#ifdef WL_CFG80211_SYNC_GON + bool extra_listen; +#endif + bool search_channel; /* 1: search peer's channel to send af */ +}; + +static s32 +wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy, + wl_action_frame_t *action_frame, wl_af_params_t *af_params, + struct p2p_config_af_params *config_af_params) +{ + s32 err = BCME_OK; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + wifi_p2p_pub_act_frame_t *act_frm = + (wifi_p2p_pub_act_frame_t *) (action_frame->data); + + /* initialize default value */ +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = true; +#endif + config_af_params->search_channel = false; + config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY; + config_af_params->mpc_onoff = -1; + cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID; + + switch (act_frm->subtype) { + case P2P_PAF_GON_REQ: { + WL_DBG(("P2P: GO_NEG_PHASE status set \n")); + wl_set_p2p_status(cfg, GO_NEG_PHASE); + + config_af_params->mpc_onoff = 0; + config_af_params->search_channel = true; + cfg->next_af_subtype = act_frm->subtype + 1; + + /* increase dwell time to wait for RESP frame */ + af_params->dwell_time = WL_MED_DWELL_TIME; + + break; + } + case P2P_PAF_GON_RSP: { + cfg->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time to wait for CONF frame */ + af_params->dwell_time = WL_MED_DWELL_TIME + 100; + break; + } + case P2P_PAF_GON_CONF: { + /* If we reached till GO Neg confirmation reset the filter */ + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + + /* turn on mpc again if go nego is done */ + config_af_params->mpc_onoff = 1; + + /* minimize dwell time */ + af_params->dwell_time = WL_MIN_DWELL_TIME; + +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = false; +#endif /* WL_CFG80211_SYNC_GON */ + break; + } + case P2P_PAF_INVITE_REQ: { + config_af_params->search_channel = true; + cfg->next_af_subtype = act_frm->subtype + 1; + + /* increase dwell time */ + af_params->dwell_time = WL_MED_DWELL_TIME; + break; + } + case P2P_PAF_INVITE_RSP: + /* minimize dwell time */ + af_params->dwell_time = WL_MIN_DWELL_TIME; +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = false; +#endif /* WL_CFG80211_SYNC_GON */ + break; + case P2P_PAF_DEVDIS_REQ: { + if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0], + action_frame->len)) { + config_af_params->search_channel = true; + } + + cfg->next_af_subtype = act_frm->subtype + 1; + /* maximize dwell time to wait for RESP frame */ + af_params->dwell_time = WL_LONG_DWELL_TIME; + break; + } + case P2P_PAF_DEVDIS_RSP: + /* minimize dwell time */ + af_params->dwell_time = WL_MIN_DWELL_TIME; +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = false; +#endif /* WL_CFG80211_SYNC_GON */ + break; + case P2P_PAF_PROVDIS_REQ: { + if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0], + action_frame->len)) { + config_af_params->search_channel = true; + } + + config_af_params->mpc_onoff = 0; + cfg->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time to wait for RESP frame */ + af_params->dwell_time = WL_MED_DWELL_TIME; + break; + } + case P2P_PAF_PROVDIS_RSP: { + cfg->next_af_subtype = P2P_PAF_GON_REQ; + af_params->dwell_time = WL_MIN_DWELL_TIME; +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = false; +#endif /* WL_CFG80211_SYNC_GON */ + break; + } + default: + WL_DBG(("Unknown p2p pub act frame subtype: %d\n", + act_frm->subtype)); + err = BCME_BADARG; + } + return err; +} + +#ifdef WL11U +static bool +wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_params, + void *frame, u16 frame_len) +{ + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; + bool result = false; + s32 i; + chanspec_t chanspec; + + /* If DFS channel is 52~148, check to block it or not */ + if (af_params && + (af_params->channel >= 52 && af_params->channel <= 148)) { + if (!wl_cfgp2p_is_p2p_action(frame, frame_len)) { + bss_list = cfg->bss_list; + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + chanspec = wl_chspec_driver_to_host(bi->chanspec); + if (CHSPEC_IS5G(chanspec) && + ((bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(chanspec)) + == af_params->channel)) { + result = true; /* do not block the action frame */ + break; + } + } + } + } + else { + result = true; + } + + WL_DBG(("result=%s", result?"true":"false")); + return result; +} +#endif /* WL11U */ +static bool +wl_cfg80211_check_dwell_overflow(int32 requested_dwell, ulong dwell_jiffies) +{ + if ((requested_dwell & CUSTOM_RETRY_MASK) && + (jiffies_to_msecs(jiffies - dwell_jiffies) > + (requested_dwell & ~CUSTOM_RETRY_MASK))) { + WL_ERR(("Action frame TX retry time over dwell time!\n")); + return true; + } + return false; +} + +static bool +wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, + bcm_struct_cfgdev *cfgdev, wl_af_params_t *af_params, + wl_action_frame_t *action_frame, u16 action_frame_len, s32 bssidx) +{ +#ifdef WL11U + struct net_device *ndev = NULL; +#endif /* WL11U */ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + bool ack = false; + u8 category, action; + s32 tx_retry; + struct p2p_config_af_params config_af_params; + struct net_info *netinfo; +#ifdef VSDB + ulong off_chan_started_jiffies = 0; +#endif + ulong dwell_jiffies = 0; + bool dwell_overflow = false; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + int32 requested_dwell = af_params->dwell_time; + + /* Add the default dwell time + * Dwell time to stay off-channel to wait for a response action frame + * after transmitting an GO Negotiation action frame + */ + af_params->dwell_time = WL_DWELL_TIME; + +#ifdef WL11U +#if defined(WL_CFG80211_P2P_DEV_IF) + ndev = dev; +#else + ndev = ndev_to_cfgdev(cfgdev); +#endif /* WL_CFG80211_P2P_DEV_IF */ +#endif /* WL11U */ + + category = action_frame->data[DOT11_ACTION_CAT_OFF]; + action = action_frame->data[DOT11_ACTION_ACT_OFF]; + + /* initialize variables */ + tx_retry = 0; + cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID; + config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY; + config_af_params.mpc_onoff = -1; + config_af_params.search_channel = false; +#ifdef WL_CFG80211_SYNC_GON + config_af_params.extra_listen = false; +#endif + + /* config parameters */ + /* Public Action Frame Process - DOT11_ACTION_CAT_PUBLIC */ + if (category == DOT11_ACTION_CAT_PUBLIC) { + if ((action == P2P_PUB_AF_ACTION) && + (action_frame_len >= sizeof(wifi_p2p_pub_act_frame_t))) { + /* p2p public action frame process */ + if (BCME_OK != wl_cfg80211_config_p2p_pub_af_tx(wiphy, + action_frame, af_params, &config_af_params)) { + WL_DBG(("Unknown subtype.\n")); + } + + } else if (action_frame_len >= sizeof(wifi_p2psd_gas_pub_act_frame_t)) { + /* service discovery process */ + if (action == P2PSD_ACTION_ID_GAS_IREQ || + action == P2PSD_ACTION_ID_GAS_CREQ) { + /* configure service discovery query frame */ + + config_af_params.search_channel = true; + + /* save next af suptype to cancel remained dwell time */ + cfg->next_af_subtype = action + 1; + + af_params->dwell_time = WL_MED_DWELL_TIME; + if (requested_dwell & CUSTOM_RETRY_MASK) { + config_af_params.max_tx_retry = + (requested_dwell & CUSTOM_RETRY_MASK) >> 24; + af_params->dwell_time = + (requested_dwell & ~CUSTOM_RETRY_MASK); + WL_DBG(("Custom retry(%d) and dwell time(%d) is set.\n", + config_af_params.max_tx_retry, + af_params->dwell_time)); + } + } else if (action == P2PSD_ACTION_ID_GAS_IRESP || + action == P2PSD_ACTION_ID_GAS_CRESP) { + /* configure service discovery response frame */ + af_params->dwell_time = WL_MIN_DWELL_TIME; + } else { + WL_DBG(("Unknown action type: %d\n", action)); + } + } else { + WL_DBG(("Unknown Frame: category 0x%x, action 0x%x, length %d\n", + category, action, action_frame_len)); + } + } else if (category == P2P_AF_CATEGORY) { + /* do not configure anything. it will be sent with a default configuration */ + } else { + WL_DBG(("Unknown Frame: category 0x%x, action 0x%x\n", + category, action)); + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev); + return false; + } + } + + /* To make sure to send successfully action frame, we have to turn off mpc */ + if (config_af_params.mpc_onoff == 0) { + wldev_iovar_setint(dev, "mpc", 0); + } + + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + /* validate channel and p2p ies */ + if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) && + netinfo && netinfo->bss.ies.probe_req_ie_len) { + config_af_params.search_channel = true; + } else { + config_af_params.search_channel = false; + } +#ifdef WL11U + if (ndev == bcmcfg_to_prmry_ndev(cfg)) + config_af_params.search_channel = false; +#endif /* WL11U */ + +#ifdef VSDB + /* if connecting on primary iface, sleep for a while before sending af tx for VSDB */ + if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) { + OSL_SLEEP(50); + } +#endif + + /* if scan is ongoing, abort current scan. */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + + /* Abort P2P listen */ + if (discover_cfgdev(cfgdev, cfg)) { + if (cfg->p2p_supported && cfg->p2p) { + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + } + } + +#ifdef WL11U + /* handling DFS channel exceptions */ + if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) { + return false; /* the action frame was blocked */ + } +#endif /* WL11U */ + + /* set status and destination address before sending af */ + if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) { + /* set this status to cancel the remained dwell time in rx process */ + wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev); + } + wl_set_drv_status(cfg, SENDING_ACT_FRM, dev); + memcpy(cfg->afx_hdl->tx_dst_addr.octet, + af_params->action_frame.da.octet, + sizeof(cfg->afx_hdl->tx_dst_addr.octet)); + + /* save af_params for rx process */ + cfg->afx_hdl->pending_tx_act_frm = af_params; + + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) { + WL_DBG(("Set GAS action frame config.\n")); + config_af_params.search_channel = false; + config_af_params.max_tx_retry = 1; + } + + /* search peer's channel */ + if (config_af_params.search_channel) { + /* initialize afx_hdl */ + if ((cfg->afx_hdl->bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + goto exit; + } + cfg->afx_hdl->dev = dev; + cfg->afx_hdl->retry = 0; + cfg->afx_hdl->peer_chan = WL_INVALID; + + if (wl_cfg80211_af_searching_channel(cfg, dev) == WL_INVALID) { + WL_ERR(("couldn't find peer's channel.\n")); + wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, + af_params->channel); + goto exit; + } + + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + /* + * Abort scan even for VSDB scenarios. Scan gets aborted in firmware + * but after the check of piggyback algorithm. + * To take care of current piggback algo, lets abort the scan here itself. + */ + wl_notify_escan_complete(cfg, dev, true, true); + /* Suspend P2P discovery's search-listen to prevent it from + * starting a scan or changing the channel. + */ + if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) { + WL_ERR(("Can not disable discovery mode\n")); + goto exit; + } + + /* update channel */ + af_params->channel = cfg->afx_hdl->peer_chan; + } + +#ifdef VSDB + off_chan_started_jiffies = jiffies; +#endif /* VSDB */ + + wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel); + + wl_cfgp2p_need_wait_actfrmae(cfg, action_frame->data, action_frame->len, true); + + dwell_jiffies = jiffies; + /* Now send a tx action frame */ + ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true; + dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies); + + /* if failed, retry it. tx_retry_max value is configure by .... */ + while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry) && + !dwell_overflow) { +#ifdef VSDB + if (af_params->channel) { + if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) > + OFF_CHAN_TIME_THRESHOLD_MS) { + WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg); + off_chan_started_jiffies = jiffies; + } else + OSL_SLEEP(AF_RETRY_DELAY_TIME); + } +#endif /* VSDB */ + ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? + false : true; + dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies); + } + + if (ack == false) { + WL_ERR(("Failed to send Action Frame(retry %d)\n", tx_retry)); + } + WL_DBG(("Complete to send action frame\n")); +exit: + /* Clear SENDING_ACT_FRM after all sending af is done */ + wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev); + +#ifdef WL_CFG80211_SYNC_GON + /* WAR: sometimes dongle does not keep the dwell time of 'actframe'. + * if we coundn't get the next action response frame and dongle does not keep + * the dwell time, go to listen state again to get next action response frame. + */ + if (ack && config_af_params.extra_listen && + wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) && + cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) { + s32 extar_listen_time; + + extar_listen_time = af_params->dwell_time - + jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies); + + if (extar_listen_time > 50) { + wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev); + WL_DBG(("Wait more time! actual af time:%d," + "calculated extar listen:%d\n", + af_params->dwell_time, extar_listen_time)); + if (wl_cfgp2p_discover_listen(cfg, cfg->af_sent_channel, + extar_listen_time + 100) == BCME_OK) { + wait_for_completion_timeout(&cfg->wait_next_af, + msecs_to_jiffies(extar_listen_time + 100 + 300)); + } + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev); + } + } +#endif /* WL_CFG80211_SYNC_GON */ + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev); + + if (cfg->afx_hdl->pending_tx_act_frm) + cfg->afx_hdl->pending_tx_act_frm = NULL; + + WL_INFORM(("-- sending Action Frame is %s, listen chan: %d\n", + (ack) ? "Succeeded!!":"Failed!!", cfg->afx_hdl->my_listen_chan)); + + + /* if all done, turn mpc on again */ + if (config_af_params.mpc_onoff == 1) { + wldev_iovar_setint(dev, "mpc", 1); + } + + return ack; +} + +#define MAX_NUM_OF_ASSOCIATED_DEV 64 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +static s32 +wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + struct cfg80211_mgmt_tx_params *params, u64 *cookie) +#else +static s32 +wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + struct ieee80211_channel *channel, bool offchan, +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0)) + enum nl80211_channel_type channel_type, + bool channel_type_valid, +#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */ + unsigned int wait, const u8* buf, size_t len, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + bool no_cck, +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + bool dont_wait_for_ack, +#endif + u64 *cookie) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ +{ + wl_action_frame_t *action_frame; + wl_af_params_t *af_params; + scb_val_t scb_val; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + struct ieee80211_channel *channel = params->chan; + const u8 *buf = params->buf; + size_t len = params->len; +#endif + const struct ieee80211_mgmt *mgmt; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *dev = NULL; + s32 err = BCME_OK; + s32 bssidx = 0; + u32 id; + bool ack = false; + s8 eabuf[ETHER_ADDR_STR_LEN]; + + WL_DBG(("Enter \n")); + + dev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (!dev) { + WL_ERR(("dev is NULL\n")); + return -EINVAL; + } + + /* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE) */ + if (discover_cfgdev(cfgdev, cfg)) { + if (!cfg->p2p_supported || !cfg->p2p) { + WL_ERR(("P2P doesn't setup completed yet\n")); + return -EINVAL; + } + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } + else { + if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) { + WL_ERR(("Find p2p index failed\n")); + return BCME_ERROR; + } + } + + WL_DBG(("TX target bssidx=%d\n", bssidx)); + + if (p2p_is_on(cfg)) { + /* Suspend P2P discovery search-listen to prevent it from changing the + * channel. + */ + if ((err = wl_cfgp2p_discover_enable_search(cfg, false)) < 0) { + WL_ERR(("Can not disable discovery mode\n")); + return -EFAULT; + } + } + *cookie = 0; + id = cfg->send_action_id++; + if (id == 0) + id = cfg->send_action_id++; + *cookie = id; + mgmt = (const struct ieee80211_mgmt *)buf; + if (ieee80211_is_mgmt(mgmt->frame_control)) { + if (ieee80211_is_probe_resp(mgmt->frame_control)) { + s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; + s32 ie_len = len - ie_offset; + if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p) { + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } + wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_PRBRSP_FLAG, (const u8 *)(buf + ie_offset), ie_len); + cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL); +#if defined(P2P_IE_MISSING_FIX) + if (!cfg->p2p_prb_noti) { + cfg->p2p_prb_noti = true; + WL_DBG(("%s: TX 802_1X Probe Response first time.\n", + __FUNCTION__)); + } +#endif + goto exit; + } else if (ieee80211_is_disassoc(mgmt->frame_control) || + ieee80211_is_deauth(mgmt->frame_control)) { + char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + int num_associated = 0; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + if (!bcmp((const uint8 *)BSSID_BROADCAST, + (const struct ether_addr *)mgmt->da, ETHER_ADDR_LEN)) { + assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV; + err = wldev_ioctl(dev, WLC_GET_ASSOCLIST, + assoc_maclist, sizeof(mac_buf), false); + if (err < 0) + WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err)); + else + num_associated = assoc_maclist->count; + } + memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN); + scb_val.val = mgmt->u.disassoc.reason_code; + err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val, + sizeof(scb_val_t), true); + if (err < 0) + WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON error %d\n", err)); + WL_ERR(("Disconnect STA : %s scb_val.val %d\n", + bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf), + scb_val.val)); + + if (num_associated > 0 && ETHER_ISBCAST(mgmt->da)) + wl_delay(400); + + cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL); + goto exit; + + } else if (ieee80211_is_action(mgmt->frame_control)) { + /* Abort the dwell time of any previous off-channel + * action frame that may be still in effect. Sending + * off-channel action frames relies on the driver's + * scan engine. If a previous off-channel action frame + * tx is still in progress (including the dwell time), + * then this new action frame will not be sent out. + */ +/* Do not abort scan for VSDB. Scan will be aborted in firmware if necessary. + * And previous off-channel action frame must be ended before new af tx. + */ +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_notify_escan_complete(cfg, dev, true, true); +#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + } + + } else { + WL_ERR(("Driver only allows MGMT packet type\n")); + goto exit; + } + + af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL); + + if (af_params == NULL) + { + WL_ERR(("unable to allocate frame\n")); + return -ENOMEM; + } + + action_frame = &af_params->action_frame; + + /* Add the packet Id */ + action_frame->packetId = *cookie; + WL_DBG(("action frame %d\n", action_frame->packetId)); + /* Add BSSID */ + memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN); + memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN); + + /* Add the length exepted for 802.11 header */ + action_frame->len = len - DOT11_MGMT_HDR_LEN; + WL_DBG(("action_frame->len: %d\n", action_frame->len)); + + /* Add the channel */ + af_params->channel = + ieee80211_frequency_to_channel(channel->center_freq); + /* Save listen_chan for searching common channel */ + cfg->afx_hdl->peer_listen_chan = af_params->channel; + WL_DBG(("channel from upper layer %d\n", cfg->afx_hdl->peer_listen_chan)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + af_params->dwell_time = params->wait; +#else + af_params->dwell_time = wait; +#endif + + memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len); + + ack = wl_cfg80211_send_action_frame(wiphy, dev, cfgdev, af_params, + action_frame, action_frame->len, bssidx); + cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL); + + kfree(af_params); +exit: + return err; +} + + +static void +wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + u16 frame_type, bool reg) +{ + + WL_DBG(("frame_type: %x, reg: %d\n", frame_type, reg)); + + if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ)) + return; + + return; +} + + +static s32 +wl_cfg80211_change_bss(struct wiphy *wiphy, + struct net_device *dev, + struct bss_parameters *params) +{ + s32 err = 0; + s32 ap_isolate = 0; + + if (params->use_cts_prot >= 0) { + } + + if (params->use_short_preamble >= 0) { + } + + if (params->use_short_slot_time >= 0) { + } + + if (params->basic_rates) { + } + + if (params->ap_isolate >= 0) { + ap_isolate = params->ap_isolate; + err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate); + if (unlikely(err)) + { + WL_ERR(("set ap_isolate Error (%d)\n", err)); + } + } + + if (params->ht_opmode >= 0) { + } + + + return 0; +} + +static s32 +wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + s32 _chan; + chanspec_t chspec = 0; + chanspec_t fw_chspec = 0; + u32 bw = WL_CHANSPEC_BW_20; +#ifdef WL11ULB + u32 ulb_bw = wl_cfg80211_get_ulb_bw(dev->ieee80211_ptr); +#endif /* WL11ULB */ + + s32 err = BCME_OK; + s32 bw_cap = 0; + struct { + u32 band; + u32 bw_cap; + } param = {0, 0}; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); +#ifdef CUSTOM_SET_CPUCORE + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* CUSTOM_SET_CPUCORE */ + + dev = ndev_to_wlc_ndev(dev, cfg); + _chan = ieee80211_frequency_to_channel(chan->center_freq); + WL_ERR(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n", + dev->ifindex, channel_type, _chan)); + + +#ifdef WL11ULB + if (ulb_bw) { + WL_DBG(("[ULB] setting AP/GO BW to ulb_bw 0x%x \n", ulb_bw)); + bw = wl_cfg80211_ulbbw_to_ulbchspec(ulb_bw); + goto set_channel; + } +#endif /* WL11ULB */ + if (chan->band == NL80211_BAND_5GHZ ) { + param.band = WLC_BAND_5G; + err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync); + if (err) { + if (err != BCME_UNSUPPORTED) { + WL_ERR(("bw_cap failed, %d\n", err)); + return err; + } else { + err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap); + if (err) { + WL_ERR(("error get mimo_bw_cap (%d)\n", err)); + } + if (bw_cap != WLC_N_BW_20ALL) + bw = WL_CHANSPEC_BW_40; + } + } else { + if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0])) + bw = WL_CHANSPEC_BW_80; + else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0])) + bw = WL_CHANSPEC_BW_40; + else + bw = WL_CHANSPEC_BW_20; + + } + + } else if (chan->band == NL80211_BAND_2GHZ ) + bw = WL_CHANSPEC_BW_20; +set_channel: + chspec = wf_channel2chspec(_chan, bw); + if (wf_chspec_valid(chspec)) { + fw_chspec = wl_chspec_host_to_driver(chspec); + if (fw_chspec != INVCHANSPEC) { + if ((err = wldev_iovar_setint(dev, "chanspec", + fw_chspec)) == BCME_BADCHAN) { + if (bw == WL_CHANSPEC_BW_80) + goto change_bw; + err = wldev_ioctl(dev, WLC_SET_CHANNEL, + &_chan, sizeof(_chan), true); + if (err < 0) { + WL_ERR(("WLC_SET_CHANNEL error %d" + "chip may not be supporting this channel\n", err)); + } + } else if (err) { + WL_ERR(("failed to set chanspec error %d\n", err)); + } + } else { + WL_ERR(("failed to convert host chanspec to fw chanspec\n")); + err = BCME_ERROR; + } + } else { +change_bw: + if (bw == WL_CHANSPEC_BW_80) + bw = WL_CHANSPEC_BW_40; + else if (bw == WL_CHANSPEC_BW_40) + bw = WL_CHANSPEC_BW_20; + else + bw = 0; + if (bw) + goto set_channel; + WL_ERR(("Invalid chanspec 0x%x\n", chspec)); + err = BCME_ERROR; + } +#ifdef CUSTOM_SET_CPUCORE + if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) { + WL_DBG(("SoftAP mode do not need to set cpucore\n")); + } else if (chspec & WL_CHANSPEC_BW_80) { + /* SoftAp only mode do not need to set cpucore */ + if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) && + dev != bcmcfg_to_prmry_ndev(cfg)) { + /* Soft AP on virtual Iface (AP+STA case) */ + dhd->chan_isvht80 |= DHD_FLAG_HOSTAP_MODE; + dhd_set_cpucore(dhd, TRUE); + } else if (is_p2p_group_iface(dev->ieee80211_ptr)) { + /* If P2P IF is vht80 */ + dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; + dhd_set_cpucore(dhd, TRUE); + } + } +#endif /* CUSTOM_SET_CPUCORE */ + if (!err && (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) { + /* Update AP/GO operating channel */ + cfg->ap_oper_channel = _chan; + } + return err; +} + +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST +struct net_device * +wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg) +{ + struct net_info *_net_info, *next; + list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { + if (_net_info->ndev && + test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state)) + return _net_info->ndev; + } + return NULL; +} +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + +static s32 +wl_validate_opensecurity(struct net_device *dev, s32 bssidx, bool privacy) +{ + s32 err = BCME_OK; + u32 wpa_val; + s32 wsec = 0; + + /* set auth */ + err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx); + if (err < 0) { + WL_ERR(("auth error %d\n", err)); + return BCME_ERROR; + } + + if (privacy) { + /* If privacy bit is set in open mode, then WEP would be enabled */ + wsec = WEP_ENABLED; + WL_DBG(("Setting wsec to %d for WEP \n", wsec)); + } + + /* set wsec */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (err < 0) { + WL_ERR(("wsec error %d\n", err)); + return BCME_ERROR; + } + + /* set upper-layer auth */ + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_ADHOC) + wpa_val = WPA_AUTH_NONE; + else + wpa_val = WPA_AUTH_DISABLED; + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_val, bssidx); + if (err < 0) { + WL_ERR(("wpa_auth error %d\n", err)); + return BCME_ERROR; + } + + return 0; +} + +static s32 +wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) +{ + s32 len = 0; + s32 err = BCME_OK; + u16 auth = 0; /* d11 open authentication */ + u32 wsec; + u32 pval = 0; + u32 gval = 0; + u32 wpa_auth = 0; + wpa_suite_mcast_t *mcast; + wpa_suite_ucast_t *ucast; + wpa_suite_auth_key_mgmt_t *mgmt; + wpa_pmkid_list_t *pmkid; + int cnt = 0; + + u16 suite_count; + u8 rsn_cap[2]; + u32 wme_bss_disable; + + if (wpa2ie == NULL) + goto exit; + + WL_DBG(("Enter \n")); + len = wpa2ie->len - WPA2_VERSION_LEN; + /* check the mcast cipher */ + mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN]; + switch (mcast->type) { + case WPA_CIPHER_NONE: + gval = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + gval = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + gval = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + gval = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + break; + } + if ((len -= WPA_SUITE_LEN) <= 0) + return BCME_BADLEN; + + /* check the unicast cipher */ + ucast = (wpa_suite_ucast_t *)&mcast[1]; + suite_count = ltoh16_ua(&ucast->count); + switch (ucast->list[0].type) { + case WPA_CIPHER_NONE: + pval = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + pval = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + pval = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + pval = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0) + return BCME_BADLEN; + + /* FOR WPS , set SEC_OW_ENABLED */ + wsec = (pval | gval | SES_OW_ENABLED); + /* check the AKM */ + mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count]; + suite_count = cnt = ltoh16_ua(&mgmt->count); + while (cnt--) { + switch (mgmt->list[cnt].type) { + case RSN_AKM_NONE: + wpa_auth |= WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: + wpa_auth |= WPA2_AUTH_UNSPECIFIED; + break; + case RSN_AKM_PSK: + wpa_auth |= WPA2_AUTH_PSK; + break; + default: + WL_ERR(("No Key Mgmt Info\n")); + } + } + + if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) { + rsn_cap[0] = *(u8 *)&mgmt->list[suite_count]; + rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1); + + if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) { + wme_bss_disable = 0; + } else { + wme_bss_disable = 1; + } + + + /* set wme_bss_disable to sync RSN Capabilities */ + err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx); + if (err < 0) { + WL_ERR(("wme_bss_disable error %d\n", err)); + return BCME_ERROR; + } + } else { + WL_DBG(("There is no RSN Capabilities. remained len %d\n", len)); + } + + len -= RSN_CAP_LEN; + if (len >= WPA2_PMKID_COUNT_LEN) { + pmkid = (wpa_pmkid_list_t *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN); + cnt = ltoh16_ua(&pmkid->count); + if (cnt != 0) { + WL_ERR(("AP has non-zero PMKID count. Wrong!\n")); + return BCME_ERROR; + } + /* since PMKID cnt is known to be 0 for AP, */ + /* so don't bother to send down this info to firmware */ + } + + + /* set auth */ + err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); + if (err < 0) { + WL_ERR(("auth error %d\n", err)); + return BCME_ERROR; + } + + /* set wsec */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (err < 0) { + WL_ERR(("wsec error %d\n", err)); + return BCME_ERROR; + } + + + /* set upper-layer auth */ + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); + if (err < 0) { + WL_ERR(("wpa_auth error %d\n", err)); + return BCME_ERROR; + } +exit: + return 0; +} + +static s32 +wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx) +{ + wpa_suite_mcast_t *mcast; + wpa_suite_ucast_t *ucast; + wpa_suite_auth_key_mgmt_t *mgmt; + u16 auth = 0; /* d11 open authentication */ + u16 count; + s32 err = BCME_OK; + s32 len = 0; + u32 i; + u32 wsec; + u32 pval = 0; + u32 gval = 0; + u32 wpa_auth = 0; + u32 tmp = 0; + + if (wpaie == NULL) + goto exit; + WL_DBG(("Enter \n")); + len = wpaie->length; /* value length */ + len -= WPA_IE_TAG_FIXED_LEN; + /* check for multicast cipher suite */ + if (len < WPA_SUITE_LEN) { + WL_INFORM(("no multicast cipher suite\n")); + goto exit; + } + + /* pick up multicast cipher */ + mcast = (wpa_suite_mcast_t *)&wpaie[1]; + len -= WPA_SUITE_LEN; + if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_CIPHER(mcast->type)) { + tmp = 0; + switch (mcast->type) { + case WPA_CIPHER_NONE: + tmp = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + tmp = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + tmp = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + tmp = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + gval |= tmp; + } + } + /* Check for unicast suite(s) */ + if (len < WPA_IE_SUITE_COUNT_LEN) { + WL_INFORM(("no unicast suite\n")); + goto exit; + } + /* walk thru unicast cipher list and pick up what we recognize */ + ucast = (wpa_suite_ucast_t *)&mcast[1]; + count = ltoh16_ua(&ucast->count); + len -= WPA_IE_SUITE_COUNT_LEN; + for (i = 0; i < count && len >= WPA_SUITE_LEN; + i++, len -= WPA_SUITE_LEN) { + if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_CIPHER(ucast->list[i].type)) { + tmp = 0; + switch (ucast->list[i].type) { + case WPA_CIPHER_NONE: + tmp = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + tmp = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + tmp = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + tmp = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + pval |= tmp; + } + } + } + len -= (count - i) * WPA_SUITE_LEN; + /* Check for auth key management suite(s) */ + if (len < WPA_IE_SUITE_COUNT_LEN) { + WL_INFORM((" no auth key mgmt suite\n")); + goto exit; + } + /* walk thru auth management suite list and pick up what we recognize */ + mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count]; + count = ltoh16_ua(&mgmt->count); + len -= WPA_IE_SUITE_COUNT_LEN; + for (i = 0; i < count && len >= WPA_SUITE_LEN; + i++, len -= WPA_SUITE_LEN) { + if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_AKM(mgmt->list[i].type)) { + tmp = 0; + switch (mgmt->list[i].type) { + case RSN_AKM_NONE: + tmp = WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: + tmp = WPA_AUTH_UNSPECIFIED; + break; + case RSN_AKM_PSK: + tmp = WPA_AUTH_PSK; + break; + default: + WL_ERR(("No Key Mgmt Info\n")); + } + wpa_auth |= tmp; + } + } + + } + /* FOR WPS , set SEC_OW_ENABLED */ + wsec = (pval | gval | SES_OW_ENABLED); + /* set auth */ + err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); + if (err < 0) { + WL_ERR(("auth error %d\n", err)); + return BCME_ERROR; + } + /* set wsec */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (err < 0) { + WL_ERR(("wsec error %d\n", err)); + return BCME_ERROR; + } + /* set upper-layer auth */ + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); + if (err < 0) { + WL_ERR(("wpa_auth error %d\n", err)); + return BCME_ERROR; + } +exit: + return 0; +} + + +static s32 +wl_cfg80211_bcn_validate_sec( + struct net_device *dev, + struct parsed_ies *ies, + u32 dev_role, + s32 bssidx, + bool privacy) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr); + + if (!bss) { + WL_ERR(("cfgbss is NULL \n")); + return BCME_ERROR; + } + + if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) { + /* For P2P GO, the sec type is WPA2-PSK */ + WL_DBG(("P2P GO: validating wpa2_ie")); + if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0) + return BCME_ERROR; + + } else if (dev_role == NL80211_IFTYPE_AP) { + + WL_DBG(("SoftAP: validating security")); + /* If wpa2_ie or wpa_ie is present validate it */ + + if ((ies->wpa2_ie || ies->wpa_ie) && + ((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 || + wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) { + bss->security_mode = false; + return BCME_ERROR; + } + + bss->security_mode = true; + if (bss->rsn_ie) { + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + } + if (bss->wpa_ie) { + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; + } + if (bss->wps_ie) { + kfree(bss->wps_ie); + bss->wps_ie = NULL; + } + if (ies->wpa_ie != NULL) { + /* WPAIE */ + bss->rsn_ie = NULL; + bss->wpa_ie = kmemdup(ies->wpa_ie, + ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else if (ies->wpa2_ie != NULL) { + /* RSNIE */ + bss->wpa_ie = NULL; + bss->rsn_ie = kmemdup(ies->wpa2_ie, + ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } + if (!ies->wpa2_ie && !ies->wpa_ie) { + wl_validate_opensecurity(dev, bssidx, privacy); + bss->security_mode = false; + } + + if (ies->wps_ie) { + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + } + } + + return 0; + +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) +static s32 wl_cfg80211_bcn_set_params( + struct cfg80211_ap_settings *info, + struct net_device *dev, + u32 dev_role, s32 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 err = BCME_OK; + + WL_DBG(("interval (%d) \ndtim_period (%d) \n", + info->beacon_interval, info->dtim_period)); + + if (info->beacon_interval) { + if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD, + &info->beacon_interval, sizeof(s32), true)) < 0) { + WL_ERR(("Beacon Interval Set Error, %d\n", err)); + return err; + } + } + + if (info->dtim_period) { + if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD, + &info->dtim_period, sizeof(s32), true)) < 0) { + WL_ERR(("DTIM Interval Set Error, %d\n", err)); + return err; + } + } + + if ((info->ssid) && (info->ssid_len > 0) && + (info->ssid_len <= 32)) { + WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len)); + if (dev_role == NL80211_IFTYPE_AP) { + /* Store the hostapd SSID */ + memset(cfg->hostapd_ssid.SSID, 0x00, 32); + memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len); + cfg->hostapd_ssid.SSID_len = info->ssid_len; + } else { + /* P2P GO */ + memset(cfg->p2p->ssid.SSID, 0x00, 32); + memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len); + cfg->p2p->ssid.SSID_len = info->ssid_len; + } + } + + if (info->hidden_ssid) { + if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0) + WL_ERR(("failed to set hidden : %d\n", err)); + WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid)); + } + + return err; +} +#endif + +static s32 +wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies) +{ + s32 err = BCME_OK; + + memset(ies, 0, sizeof(struct parsed_ies)); + + /* find the WPSIE */ + if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) { + WL_DBG(("WPSIE in beacon \n")); + ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; + } else { + WL_ERR(("No WPSIE in beacon \n")); + } + + /* find the RSN_IE */ + if ((ies->wpa2_ie = bcm_parse_tlvs(ptr, len, + DOT11_MNG_RSN_ID)) != NULL) { + WL_DBG((" WPA2 IE found\n")); + ies->wpa2_ie_len = ies->wpa2_ie->len; + } + + /* find the WPA_IE */ + if ((ies->wpa_ie = wl_cfgp2p_find_wpaie(ptr, len)) != NULL) { + WL_DBG((" WPA found\n")); + ies->wpa_ie_len = ies->wpa_ie->length; + } + + return err; + +} + +#define MAX_AP_LINK_WAIT_TIME 10000 +static s32 +wl_cfg80211_bcn_bringup_ap( + struct net_device *dev, + struct parsed_ies *ies, + u32 dev_role, s32 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_join_params join_params; + struct wiphy *wiphy; + bool is_bssup = false; + s32 infra = 1; + s32 join_params_size = 0; + s32 ap = 1; + s32 pm; + s32 wsec; +#ifdef SOFTAP_UAPSD_OFF + uint32 wme_apsd = 0; +#endif /* SOFTAP_UAPSD_OFF */ + s32 err = BCME_OK; + s32 is_rsdb_supported = BCME_ERROR; + u32 timeout; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + + is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE); + if (is_rsdb_supported < 0) + return (-ENODEV); + + WL_DBG(("Enter dev_role:%d bssidx:%d\n", dev_role, bssidx)); + + /* Common code for SoftAP and P2P GO */ + wiphy = bcmcfg_to_wiphy(cfg); + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return -EINVAL; + } + wldev_iovar_setint(dev, "mpc", 0); + + wl_clr_drv_status(cfg, AP_CREATED, dev); + + if (dev_role == NL80211_IFTYPE_P2P_GO) { + is_bssup = wl_cfgp2p_bss_isup(dev, bssidx); + if (!is_bssup && (ies->wpa2_ie != NULL)) { + + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + goto exit; + } + + err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid, + sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &cfg->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("GO SSID setting error %d\n", err)); + goto exit; + } + + /* Do abort scan before creating GO */ + wl_cfg80211_scan_abort(cfg); + + if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) { + WL_ERR(("GO Bring up error %d\n", err)); + goto exit; + } + } else + WL_DBG(("Bss is already up\n")); + } else if ((dev_role == NL80211_IFTYPE_AP) && + (wl_get_drv_status(cfg, AP_CREATING, dev))) { + + /* Device role SoftAP */ + WL_DBG(("Creating AP bssidx:%d dev_role:%d\n", bssidx, dev_role)); + + /* Clear the status bit after use */ + wl_clr_drv_status(cfg, AP_CREATING, dev); + + /* AP on primary Interface */ + if (bssidx == 0) { + if (is_rsdb_supported) { + if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx, + NL80211_IFTYPE_AP, 0, NULL)) < 0) { + WL_ERR(("wl add_del_bss returned error:%d\n", err)); + goto exit; + } + } else if (is_rsdb_supported == 0) { + /* AP mode switch not supported. Try setting up AP explicitly */ + err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true); + if (err < 0) { + WL_ERR(("WLC_DOWN error %d\n", err)); + goto exit; + } + err = wldev_iovar_setint(dev, "apsta", 0); + if (err < 0) { + WL_ERR(("wl apsta 0 error %d\n", err)); + goto exit; + } + + if ((err = wldev_ioctl(dev, + WLC_SET_AP, &ap, sizeof(s32), true)) < 0) { + WL_ERR(("setting AP mode failed %d \n", err)); + goto exit; + } + + } + + pm = 0; + if ((err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true)) != 0) { + WL_ERR(("wl PM 0 returned error:%d\n", err)); + goto exit; + } + + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + goto exit; + } + } else if (cfg->cfgdev_bssidx && (bssidx == cfg->cfgdev_bssidx)) { + + WL_DBG(("Bringup SoftAP on virtual Interface bssidx:%d \n", bssidx)); + + if ((err = wl_cfg80211_add_del_bss(cfg, dev, + bssidx, NL80211_IFTYPE_AP, 0, NULL)) < 0) { + WL_ERR(("wl bss ap returned error:%d\n", err)); + goto exit; + } + + } + +#ifdef SOFTAP_UAPSD_OFF + err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("failed to disable uapsd, error=%d\n", err)); + } +#endif /* SOFTAP_UAPSD_OFF */ + + err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + goto exit; + } + + err = wldev_iovar_getint(dev, "wsec", (s32 *)&wsec); + if (unlikely(err)) { + WL_ERR(("Could not get wsec %d\n", err)); + goto exit; + } + if ((wsec == WEP_ENABLED) && cfg->wep_key.len) { + WL_DBG(("Applying buffered WEP KEY \n")); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &cfg->wep_key, + sizeof(struct wl_wsec_key), cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + /* clear the key after use */ + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + goto exit; + } + } + + memset(&join_params, 0, sizeof(join_params)); + /* join parameters starts with ssid */ + join_params_size = sizeof(join_params.ssid); + memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID, + cfg->hostapd_ssid.SSID_len); + join_params.ssid.SSID_len = htod32(cfg->hostapd_ssid.SSID_len); + + /* create softap */ + if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, + join_params_size, true)) != 0) { + WL_ERR(("SoftAP/GO set ssid failed! \n")); + goto exit; + } else { + WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID)); + } + + if (bssidx != 0) { + /* AP on Virtual Interface */ + if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) { + WL_ERR(("GO Bring up error %d\n", err)); + goto exit; + } + } + + } + /* Wait for Linkup event to mark successful AP/GO bring up */ + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + wl_get_drv_status(cfg, AP_CREATED, dev), msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME)); + if (timeout <= 0 || !wl_get_drv_status(cfg, AP_CREATED, dev)) { + WL_ERR(("Link up didn't come for AP interface. AP/GO creation failed! \n")); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + err = -ENODEV; + goto exit; + } + +exit: + if (cfg->wep_key.len) + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); + return err; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) +s32 +wl_cfg80211_parse_ap_ies( + struct net_device *dev, + struct cfg80211_beacon_data *info, + struct parsed_ies *ies) +{ + struct parsed_ies prb_ies; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + u8 *vndr = NULL; + u32 vndr_ie_len = 0; + s32 err = BCME_OK; + + /* Parse Beacon IEs */ + if (wl_cfg80211_parse_ies((u8 *)info->tail, + info->tail_len, ies) < 0) { + WL_ERR(("Beacon get IEs failed \n")); + err = -EINVAL; + goto fail; + } + + vndr = (u8 *)info->proberesp_ies; + vndr_ie_len = info->proberesp_ies_len; + + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + /* SoftAP mode */ + struct ieee80211_mgmt *mgmt; + mgmt = (struct ieee80211_mgmt *)info->probe_resp; + if (mgmt != NULL) { + vndr = (u8 *)&mgmt->u.probe_resp.variable; + vndr_ie_len = info->probe_resp_len - + offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + } + } + + /* Parse Probe Response IEs */ + if (wl_cfg80211_parse_ies(vndr, vndr_ie_len, &prb_ies) < 0) { + WL_ERR(("PROBE RESP get IEs failed \n")); + err = -EINVAL; + } + +fail: + + return err; +} + +s32 +wl_cfg80211_set_ies( + struct net_device *dev, + struct cfg80211_beacon_data *info, + s32 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + u8 *vndr = NULL; + u32 vndr_ie_len = 0; + s32 err = BCME_OK; + + /* Set Beacon IEs to FW */ + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_BEACON_FLAG, (const u8 *)info->tail, + info->tail_len)) < 0) { + WL_ERR(("Set Beacon IE Failed \n")); + } else { + WL_DBG(("Applied Vndr IEs for Beacon \n")); + } + + vndr = (u8 *)info->proberesp_ies; + vndr_ie_len = info->proberesp_ies_len; + + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + /* SoftAP mode */ + struct ieee80211_mgmt *mgmt; + mgmt = (struct ieee80211_mgmt *)info->probe_resp; + if (mgmt != NULL) { + vndr = (u8 *)&mgmt->u.probe_resp.variable; + vndr_ie_len = info->probe_resp_len - + offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + } + } + + /* Set Probe Response IEs to FW */ + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) { + WL_ERR(("Set Probe Resp IE Failed \n")); + } else { + WL_DBG(("Applied Vndr IEs for Probe Resp \n")); + } + + return err; +} +#endif + +static s32 wl_cfg80211_hostapd_sec( + struct net_device *dev, + struct parsed_ies *ies, + s32 bssidx) +{ + bool update_bss = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr); + + if (!bss) { + WL_ERR(("cfgbss is NULL \n")); + return -EINVAL; + } + + if (ies->wps_ie) { + if (bss->wps_ie && + memcmp(bss->wps_ie, ies->wps_ie, ies->wps_ie_len)) { + WL_DBG((" WPS IE is changed\n")); + kfree(bss->wps_ie); + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + } else if (bss->wps_ie == NULL) { + WL_DBG((" WPS IE is added\n")); + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + } + + if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) { + if (!bss->security_mode) { + /* change from open mode to security mode */ + update_bss = true; + if (ies->wpa_ie != NULL) { + bss->wpa_ie = kmemdup(ies->wpa_ie, + ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else { + bss->rsn_ie = kmemdup(ies->wpa2_ie, + ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } + } else if (bss->wpa_ie) { + /* change from WPA2 mode to WPA mode */ + if (ies->wpa_ie != NULL) { + update_bss = true; + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + bss->wpa_ie = kmemdup(ies->wpa_ie, + ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else if (memcmp(bss->rsn_ie, + ies->wpa2_ie, ies->wpa2_ie->len + + WPA_RSN_IE_TAG_FIXED_LEN)) { + update_bss = true; + kfree(bss->rsn_ie); + bss->rsn_ie = kmemdup(ies->wpa2_ie, + ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + bss->wpa_ie = NULL; + } + } + if (update_bss) { + bss->security_mode = true; + wl_cfgp2p_bss(cfg, dev, bssidx, 0); + if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 || + wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) { + return BCME_ERROR; + } + wl_cfgp2p_bss(cfg, dev, bssidx, 1); + } + } + } else { + WL_ERR(("No WPSIE in beacon \n")); + } + return 0; +} + +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) +static s32 +wl_cfg80211_del_station( + struct wiphy *wiphy, struct net_device *ndev, + struct station_del_parameters *params) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 +wl_cfg80211_del_station( + struct wiphy *wiphy, + struct net_device *ndev, + const u8* mac_addr) +#else +static s32 +wl_cfg80211_del_station( + struct wiphy *wiphy, + struct net_device *ndev, + u8* mac_addr) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ +{ + struct net_device *dev; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + scb_val_t scb_val; + s8 eabuf[ETHER_ADDR_STR_LEN]; + int err; + char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + int num_associated = 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) + const u8 *mac_addr = params->mac; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ + + WL_DBG(("Entry\n")); + if (mac_addr == NULL) { + WL_DBG(("mac_addr is NULL ignore it\n")); + return 0; + } + + dev = ndev_to_wlc_ndev(ndev, cfg); + + if (p2p_is_on(cfg)) { + /* Suspend P2P discovery search-listen to prevent it from changing the + * channel. + */ + if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) { + WL_ERR(("Can not disable discovery mode\n")); + return -EFAULT; + } + } + + assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV; + err = wldev_ioctl(ndev, WLC_GET_ASSOCLIST, + assoc_maclist, sizeof(mac_buf), false); + if (err < 0) + WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err)); + else + num_associated = assoc_maclist->count; + + memcpy(scb_val.ea.octet, mac_addr, ETHER_ADDR_LEN); + scb_val.val = DOT11_RC_DEAUTH_LEAVING; + err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val, + sizeof(scb_val_t), true); + if (err < 0) + WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err)); + WL_ERR(("Disconnect STA : %s scb_val.val %d\n", + bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf), + scb_val.val)); + + if (num_associated > 0 && ETHER_ISBCAST(mac_addr)) + wl_delay(400); + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 +wl_cfg80211_change_station( + struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +#else +static s32 +wl_cfg80211_change_station( + struct wiphy *wiphy, + struct net_device *dev, + u8 *mac, + struct station_parameters *params) +#endif +{ + int err; +#ifdef DHD_LOSSLESS_ROAMING + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); +#endif + + WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x " + "sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac), + params->sta_flags_mask, params->sta_flags_set, dev->name)); + + /* Processing only authorize/de-authorize flag for now */ + if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) { + WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n")); + return -ENOTSUPP; + } + + if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + err = wldev_ioctl(dev, WLC_SCB_DEAUTHORIZE, (u8 *)mac, ETH_ALEN, true); +#else + err = wldev_ioctl(dev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN, true); +#endif + if (err) + WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err)); + return err; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + err = wldev_ioctl(dev, WLC_SCB_AUTHORIZE, (u8 *)mac, ETH_ALEN, true); +#else + err = wldev_ioctl(dev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN, true); +#endif + if (err) + WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err)); +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif + return err; +} +#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */ + +static s32 +wl_cfg80211_set_scb_timings( + struct bcm_cfg80211 *cfg, + struct net_device *dev) +{ + int err; + u32 ps_pretend; + wl_scb_probe_t scb_probe; + + bzero(&scb_probe, sizeof(wl_scb_probe_t)); + scb_probe.scb_timeout = WL_SCB_TIMEOUT; + scb_probe.scb_activity_time = WL_SCB_ACTIVITY_TIME; + scb_probe.scb_max_probe = WL_SCB_MAX_PROBE; + err = wldev_iovar_setbuf(dev, "scb_probe", (void *)&scb_probe, + sizeof(wl_scb_probe_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("set 'scb_probe' failed, error = %d\n", err)); + return err; + } + + ps_pretend = MAX(WL_SCB_MAX_PROBE / 2, WL_MIN_PSPRETEND_THRESHOLD); + err = wldev_iovar_setint(dev, "pspretend_threshold", ps_pretend); + if (unlikely(err)) { + if (err == BCME_UNSUPPORTED) { + /* Ignore error if fw doesn't support the iovar */ + WL_DBG(("wl pspretend_threshold %d set error %d\n", + ps_pretend, err)); + } else { + WL_ERR(("wl pspretend_threshold %d set error %d\n", + ps_pretend, err)); + return err; + } + } + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) +static s32 +wl_cfg80211_start_ap( + struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_ap_settings *info) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = BCME_OK; + struct parsed_ies ies; + s32 bssidx = 0; + u32 dev_role = 0; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_DBG(("Enter \n")); + +#if defined(SUPPORT_RANDOM_MAC_SCAN) + wl_cfg80211_set_random_mac(dev, FALSE); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + + if ((dev == bcmcfg_to_prmry_ndev(cfg)) || + (dev == ((struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev)))) { + WL_DBG(("Start AP req on iface: %s \n", dev->name)); + dev_role = NL80211_IFTYPE_AP; + } +#if defined(WL_ENABLE_P2P_IF) + else if (dev == cfg->p2p_net) { + /* Group Add request on p2p0 */ + WL_DBG(("Start AP req on P2P iface: GO\n")); + dev = bcmcfg_to_prmry_ndev(cfg); + dev_role = NL80211_IFTYPE_P2P_GO; + } +#endif /* WL_ENABLE_P2P_IF */ + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (p2p_is_on(cfg) && (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) { + dev_role = NL80211_IFTYPE_P2P_GO; + } else if (dev_role == NL80211_IFTYPE_AP) { + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; + /* + * Enabling Softap is causing issues with STA NDO operations + * as NDO is not interface specific. So disable NDO while + * Softap is enabled + */ + err = dhd_ndo_enable(dhd, FALSE); + WL_DBG(("%s: Disabling NDO on Hostapd mode %d\n", __FUNCTION__, err)); + if (err) { + /* Non fatal error. */ + WL_ERR(("%s: Disabling NDO Failed %d\n", __FUNCTION__, err)); + } else { + cfg->revert_ndo_disable = true; + } + +#ifdef PKT_FILTER_SUPPORT + /* Disable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Disable pkt_filter\n")); + dhd_enable_packet_filter(0, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, FALSE); +#endif /* ARP_OFFLOAD_SUPPORT */ + if ((dhd->op_mode & DHD_FLAG_STA_MODE) && wl_cfg80211_is_roam_offload()) { + WL_ERR(("Cleare roam_offload_bssid_list at STA-SoftAP MODE.\n")); + wl_android_set_roam_offload_bssid_list(dev, "0"); + } + } else { + /* only AP or GO role need to be handled here. */ + err = -EINVAL; + goto fail; + } + + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -EINVAL; + goto fail; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + if ((err = wl_cfg80211_set_channel(wiphy, dev, + dev->ieee80211_ptr->preset_chandef.chan, + NL80211_CHAN_HT20) < 0)) { + WL_ERR(("Set channel failed \n")); + goto fail; + } +#endif + + if ((err = wl_cfg80211_bcn_set_params(info, dev, + dev_role, bssidx)) < 0) { + WL_ERR(("Beacon params set failed \n")); + goto fail; + } + + /* Parse IEs */ + if ((err = wl_cfg80211_parse_ap_ies(dev, &info->beacon, &ies)) < 0) { + WL_ERR(("Set IEs failed \n")); + goto fail; + } + + if ((err = wl_cfg80211_bcn_validate_sec(dev, &ies, + dev_role, bssidx, info->privacy)) < 0) + { + WL_ERR(("Beacon set security failed \n")); + goto fail; + } + + if ((err = wl_cfg80211_bcn_bringup_ap(dev, &ies, + dev_role, bssidx)) < 0) { + WL_ERR(("Beacon bring up AP/GO failed \n")); + goto fail; + } + + /* Set GC/STA SCB expiry timings. */ + if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) { + WL_ERR(("scb setting failed \n")); + goto fail; + } + + WL_DBG(("** AP/GO Created **\n")); + +#ifdef WL_CFG80211_ACL + /* Enfoce Admission Control. */ + if ((err = wl_cfg80211_set_mac_acl(wiphy, dev, info->acl)) < 0) { + WL_ERR(("Set ACL failed\n")); + } +#endif /* WL_CFG80211_ACL */ + + /* Set IEs to FW */ + if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0) + WL_ERR(("Set IEs failed \n")); + + /* Enable Probe Req filter, WPS-AP certification 4.2.13 */ + if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) { + bool pbc = 0; + wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc); + if (pbc) { + WL_DBG(("set WLC_E_PROBREQ_MSG\n")); + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true); + } + } + +fail: + if (err) { + WL_ERR(("ADD/SET beacon failed\n")); + wldev_iovar_setint(dev, "mpc", 1); + if (dev_role == NL80211_IFTYPE_AP) { + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Enable pkt_filter\n")); + dhd_enable_packet_filter(1, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ + } + } + + return err; +} + +static s32 +wl_cfg80211_stop_ap( + struct wiphy *wiphy, + struct net_device *dev) +{ + int err = 0; + u32 dev_role = 0; + int infra = 0; + int ap = 0; + s32 bssidx = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 is_rsdb_supported = BCME_ERROR; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_DBG(("Enter \n")); + + is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE); + if (is_rsdb_supported < 0) + return (-ENODEV); + + wl_clr_drv_status(cfg, AP_CREATING, dev); + wl_clr_drv_status(cfg, AP_CREATED, dev); + cfg->ap_oper_channel = 0; + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) { + dev_role = NL80211_IFTYPE_AP; + WL_DBG(("stopping AP operation\n")); + } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { + dev_role = NL80211_IFTYPE_P2P_GO; + WL_DBG(("stopping P2P GO operation\n")); + } else { + WL_ERR(("no AP/P2P GO interface is operational.\n")); + return -EINVAL; + } + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (!check_dev_role_integrity(cfg, dev_role)) { + WL_ERR(("role integrity check failed \n")); + err = -EINVAL; + goto exit; + } + + if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 0)) < 0) { + WL_ERR(("bss down error %d\n", err)); + } + + if (dev_role == NL80211_IFTYPE_AP) { + if (cfg->revert_ndo_disable == true) { + err = dhd_ndo_enable(dhd, TRUE); + WL_DBG(("%s: Enabling back NDO on Softap turn off %d\n", + __FUNCTION__, err)); + if (err) { + WL_ERR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, err)); + } + cfg->revert_ndo_disable = false; + } + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Enable pkt_filter\n")); + dhd_enable_packet_filter(1, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is disabled, enable arpoe back for STA mode. */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, TRUE); +#endif /* ARP_OFFLOAD_SUPPORT */ + /* + * Bring down the AP interface by changing role to STA. + * Don't do a down or "WLC_SET_AP 0" since the shared + * interface may be still running + */ + if (is_rsdb_supported) { + if ((err = wl_cfg80211_add_del_bss(cfg, dev, + bssidx, NL80211_IFTYPE_STATION, 0, NULL)) < 0) { + if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), + true)) < 0) { + WL_ERR(("setting AP mode failed %d \n", err)); + err = -ENOTSUPP; + goto exit; + } + } + } else if (is_rsdb_supported == 0) { + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + err = -ENOTSUPP; + goto exit; + } + err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + err = -EINVAL; + goto exit; + } + } + + /* Turn on the MPC */ + wldev_iovar_setint(dev, "mpc", 1); + + wl_cfg80211_clear_per_bss_ies(cfg, bssidx); + } else { + WL_DBG(("Stopping P2P GO \n")); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub), + DHD_EVENT_TIMEOUT_MS*3); + DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub)); + } + +exit: + + if (dev_role == NL80211_IFTYPE_AP) { + /* clear the AP mode */ + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + } + return err; +} + +static s32 +wl_cfg80211_change_beacon( + struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_beacon_data *info) +{ + s32 err = BCME_OK; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct parsed_ies ies; + u32 dev_role = 0; + s32 bssidx = 0; + bool pbc = 0; + + WL_DBG(("Enter \n")); + + if (dev == bcmcfg_to_prmry_ndev(cfg)) { + dev_role = NL80211_IFTYPE_AP; + } +#if defined(WL_ENABLE_P2P_IF) + else if (dev == cfg->p2p_net) { + /* Group Add request on p2p0 */ + dev = bcmcfg_to_prmry_ndev(cfg); + dev_role = NL80211_IFTYPE_P2P_GO; + } +#endif /* WL_ENABLE_P2P_IF */ + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { + dev_role = NL80211_IFTYPE_P2P_GO; + } + + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -EINVAL; + goto fail; + } + + if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) { + WL_ERR(("P2P already down status!\n")); + err = BCME_ERROR; + goto fail; + } + + /* Parse IEs */ + if ((err = wl_cfg80211_parse_ap_ies(dev, info, &ies)) < 0) { + WL_ERR(("Parse IEs failed \n")); + goto fail; + } + + /* Set IEs to FW */ + if ((err = wl_cfg80211_set_ies(dev, info, bssidx)) < 0) { + WL_ERR(("Set IEs failed \n")); + goto fail; + } + + if (dev_role == NL80211_IFTYPE_AP) { + if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) { + WL_ERR(("Hostapd update sec failed \n")); + err = -EINVAL; + goto fail; + } + /* Enable Probe Req filter, WPS-AP certification 4.2.13 */ + if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) { + wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc); + WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc)); + if (pbc) + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true); + else + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false); + } + } + +fail: + return err; +} +#else +static s32 +wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info) +{ + s32 err = BCME_OK; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 ie_offset = 0; + s32 bssidx = 0; + u32 dev_role = NL80211_IFTYPE_AP; + struct parsed_ies ies; + bcm_tlv_t *ssid_ie; + bool pbc = 0; + bool privacy; + bool is_bss_up = 0; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n", + info->interval, info->dtim_period, info->head_len, info->tail_len)); + + if (dev == bcmcfg_to_prmry_ndev(cfg)) { + dev_role = NL80211_IFTYPE_AP; + } +#if defined(WL_ENABLE_P2P_IF) + else if (dev == cfg->p2p_net) { + /* Group Add request on p2p0 */ + dev = bcmcfg_to_prmry_ndev(cfg); + dev_role = NL80211_IFTYPE_P2P_GO; + } +#endif /* WL_ENABLE_P2P_IF */ + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { + dev_role = NL80211_IFTYPE_P2P_GO; + } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) { + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; + } + + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -ENODEV; + goto fail; + } + + if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) { + WL_ERR(("P2P already down status!\n")); + err = BCME_ERROR; + goto fail; + } + + ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; + /* find the SSID */ + if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset], + info->head_len - ie_offset, + DOT11_MNG_SSID_ID)) != NULL) { + if (dev_role == NL80211_IFTYPE_AP) { + /* Store the hostapd SSID */ + memset(&cfg->hostapd_ssid.SSID[0], 0x00, 32); + memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data, ssid_ie->len); + cfg->hostapd_ssid.SSID_len = ssid_ie->len; + } else { + /* P2P GO */ + memset(&cfg->p2p->ssid.SSID[0], 0x00, 32); + memcpy(cfg->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len); + cfg->p2p->ssid.SSID_len = ssid_ie->len; + } + } + + if (wl_cfg80211_parse_ies((u8 *)info->tail, + info->tail_len, &ies) < 0) { + WL_ERR(("Beacon get IEs failed \n")); + err = -EINVAL; + goto fail; + } + + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_BEACON_FLAG, (u8 *)info->tail, + info->tail_len)) < 0) { + WL_ERR(("Beacon set IEs failed \n")); + goto fail; + } else { + WL_DBG(("Applied Vndr IEs for Beacon \n")); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies, + info->proberesp_ies_len)) < 0) { + WL_ERR(("ProbeRsp set IEs failed \n")); + goto fail; + } else { + WL_DBG(("Applied Vndr IEs for ProbeRsp \n")); + } +#endif + + is_bss_up = wl_cfgp2p_bss_isup(dev, bssidx); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + privacy = info->privacy; +#else + privacy = 0; +#endif + if (!is_bss_up && + (wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx, privacy) < 0)) + { + WL_ERR(("Beacon set security failed \n")); + err = -EINVAL; + goto fail; + } + + /* Set BI and DTIM period */ + if (info->interval) { + if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD, + &info->interval, sizeof(s32), true)) < 0) { + WL_ERR(("Beacon Interval Set Error, %d\n", err)); + return err; + } + } + if (info->dtim_period) { + if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD, + &info->dtim_period, sizeof(s32), true)) < 0) { + WL_ERR(("DTIM Interval Set Error, %d\n", err)); + return err; + } + } + + /* If bss is already up, skip bring up */ + if (!is_bss_up && + (err = wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx)) < 0) + { + WL_ERR(("Beacon bring up AP/GO failed \n")); + goto fail; + } + + /* Set GC/STA SCB expiry timings. */ + if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) { + WL_ERR(("scb setting failed \n")); + goto fail; + } + + if (wl_get_drv_status(cfg, AP_CREATED, dev)) { + /* Soft AP already running. Update changed params */ + if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) { + WL_ERR(("Hostapd update sec failed \n")); + err = -EINVAL; + goto fail; + } + } + + /* Enable Probe Req filter */ + if (((dev_role == NL80211_IFTYPE_P2P_GO) || + (dev_role == NL80211_IFTYPE_AP)) && (ies.wps_ie != NULL)) { + wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc); + if (pbc) + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true); + } + + WL_DBG(("** ADD/SET beacon done **\n")); + +fail: + if (err) { + WL_ERR(("ADD/SET beacon failed\n")); + wldev_iovar_setint(dev, "mpc", 1); + if (dev_role == NL80211_IFTYPE_AP) { + /* clear the AP mode */ + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + } + } + return err; + +} +#endif + +#ifdef WL_SCHED_SCAN +#define PNO_TIME 30 +#define PNO_REPEAT 4 +#define PNO_FREQ_EXPO_MAX 2 +static bool +is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count) +{ + int i; + + if (!ssid || !ssid_list) + return FALSE; + + for (i = 0; i < count; i++) { + if (ssid->ssid_len == ssid_list[i].ssid_len) { + if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0) + return TRUE; + } + } + return FALSE; +} + +static int +wl_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) +{ + ushort pno_time = PNO_TIME; + int pno_repeat = PNO_REPEAT; + int pno_freq_expo_max = PNO_FREQ_EXPO_MAX; + wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT]; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct cfg80211_ssid *ssid = NULL; + struct cfg80211_ssid *hidden_ssid_list = NULL; + int ssid_cnt = 0; + int i; + int ret = 0; + + if (!request) { + WL_ERR(("Sched scan request was NULL\n")); + return -EINVAL; + } + + WL_DBG(("Enter \n")); + WL_PNO((">>> SCHED SCAN START\n")); + WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n", + request->n_match_sets, request->n_ssids)); + WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n", + request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max)); + + + if (!request->n_ssids || !request->n_match_sets) { + WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids)); + return -EINVAL; + } + + memset(&ssids_local, 0, sizeof(ssids_local)); + + if (request->n_ssids > 0) { + hidden_ssid_list = request->ssids; + } + + for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) { + ssid = &request->match_sets[i].ssid; + /* No need to include null ssid */ + if (ssid->ssid_len) { + memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid, ssid->ssid_len); + ssids_local[ssid_cnt].SSID_len = ssid->ssid_len; + if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) { + ssids_local[ssid_cnt].hidden = TRUE; + WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid)); + } else { + ssids_local[ssid_cnt].hidden = FALSE; + WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid)); + } + ssid_cnt++; + } + } + + if (ssid_cnt) { + if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt, + pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) { + WL_ERR(("PNO setup failed!! ret=%d \n", ret)); + return -EINVAL; + } + cfg->sched_scan_req = request; + } else { + return -EINVAL; + } + + return 0; +} + +static int +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid) +#else +wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) +#endif +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + WL_DBG(("Enter \n")); + WL_PNO((">>> SCHED SCAN STOP\n")); + + if (dhd_dev_pno_stop_for_ssid(dev) < 0) + WL_ERR(("PNO Stop for SSID failed")); + + if (cfg->scan_request && cfg->sched_scan_running) { + WL_PNO((">>> Sched scan running. Aborting it..\n")); + wl_notify_escan_complete(cfg, dev, true, true); + } + + cfg->sched_scan_req = NULL; + cfg->sched_scan_running = FALSE; + + return 0; +} +#endif /* WL_SCHED_SCAN */ + +#ifdef WL_SUPPORT_ACS +/* + * Currently the dump_obss IOVAR is returning string as output so we need to + * parse the output buffer in an unoptimized way. Going forward if we get the + * IOVAR output in binary format this method can be optimized + */ +static int wl_parse_dump_obss(char *buf, struct wl_dump_survey *survey) +{ + int i; + char *token; + char delim[] = " \n"; + + token = strsep(&buf, delim); + while (token != NULL) { + if (!strcmp(token, "OBSS")) { + for (i = 0; i < OBSS_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->obss = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "IBSS")) { + for (i = 0; i < IBSS_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->ibss = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "TXDur")) { + for (i = 0; i < TX_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->tx = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "Category")) { + for (i = 0; i < CTG_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->no_ctg = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "Packet")) { + for (i = 0; i < PKT_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->no_pckt = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "Opp(time):")) { + for (i = 0; i < IDLE_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->idle = simple_strtoul(token, NULL, 10); + } + + token = strsep(&buf, delim); + } + + return 0; +} + +static int wl_dump_obss(struct net_device *ndev, cca_msrmnt_query req, + struct wl_dump_survey *survey) +{ + cca_stats_n_flags *results; + char *buf; + int retry, err; + + buf = kzalloc(sizeof(char) * WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!buf)) { + WL_ERR(("%s: buf alloc failed\n", __func__)); + return -ENOMEM; + } + + retry = IOCTL_RETRY_COUNT; + while (retry--) { + err = wldev_iovar_getbuf(ndev, "dump_obss", &req, sizeof(req), + buf, WLC_IOCTL_MAXLEN, NULL); + if (err >= 0) { + break; + } + WL_DBG(("attempt = %d, err = %d, \n", + (IOCTL_RETRY_COUNT - retry), err)); + } + + if (retry <= 0) { + WL_ERR(("failure, dump_obss IOVAR failed\n")); + err = -EINVAL; + goto exit; + } + + results = (cca_stats_n_flags *)(buf); + wl_parse_dump_obss(results->buf, survey); + kfree(buf); + + return 0; +exit: + kfree(buf); + return err; +} + +static int wl_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev, + int idx, struct survey_info *info) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct wl_dump_survey *survey; + struct ieee80211_supported_band *band; + struct ieee80211_channel*chan; + cca_msrmnt_query req; + int val, err, noise, retry; + + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) { + return -ENOENT; + } + band = wiphy->bands[NL80211_BAND_2GHZ ]; + if (band && idx >= band->n_channels) { + idx -= band->n_channels; + band = NULL; + } + + if (!band || idx >= band->n_channels) { + /* Move to 5G band */ + band = wiphy->bands[NL80211_BAND_5GHZ ]; + if (idx >= band->n_channels) { + return -ENOENT; + } + } + + chan = &band->channels[idx]; + /* Setting current channel to the requested channel */ + if ((err = wl_cfg80211_set_channel(wiphy, ndev, chan, + NL80211_CHAN_HT20) < 0)) { + WL_ERR(("Set channel failed \n")); + } + + if (!idx) { + /* Disable mpc */ + val = 0; + err = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, + sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("set 'mpc' failed, error = %d\n", err)); + } + + /* Set interface up, explicitly. */ + val = 1; + err = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true); + if (err < 0) { + WL_ERR(("set interface up failed, error = %d\n", err)); + } + } + + /* Get noise value */ + retry = IOCTL_RETRY_COUNT; + while (retry--) { + err = wldev_ioctl(ndev, WLC_GET_PHY_NOISE, &noise, + sizeof(noise), false); + if (err >= 0) { + break; + } + WL_DBG(("attempt = %d, err = %d, \n", + (IOCTL_RETRY_COUNT - retry), err)); + } + + if (retry <= 0) { + WL_ERR(("Get Phy Noise failed, error = %d\n", err)); + noise = CHAN_NOISE_DUMMY; + } + + survey = (struct wl_dump_survey *) kzalloc(sizeof(struct wl_dump_survey), + GFP_KERNEL); + if (unlikely(!survey)) { + WL_ERR(("%s: alloc failed\n", __func__)); + return -ENOMEM; + } + + /* Start Measurement for obss stats on current channel */ + req.msrmnt_query = 0; + req.time_req = ACS_MSRMNT_DELAY; + if ((err = wl_dump_obss(ndev, req, survey)) < 0) { + goto exit; + } + + /* + * Wait for the meaurement to complete, adding a buffer value of 10 to take + * into consideration any delay in IOVAR completion + */ + msleep(ACS_MSRMNT_DELAY + 10); + + /* Issue IOVAR to collect measurement results */ + req.msrmnt_query = 1; + if ((err = wl_dump_obss(ndev, req, survey)) < 0) { + goto exit; + } + + info->channel = chan; + info->noise = noise; + info->channel_time = ACS_MSRMNT_DELAY; + info->channel_time_busy = ACS_MSRMNT_DELAY - survey->idle; + info->channel_time_rx = survey->obss + survey->ibss + survey->no_ctg + + survey->no_pckt; + info->channel_time_tx = survey->tx; + info->filled = SURVEY_INFO_NOISE_DBM |SURVEY_INFO_CHANNEL_TIME | + SURVEY_INFO_CHANNEL_TIME_BUSY | SURVEY_INFO_CHANNEL_TIME_RX | + SURVEY_INFO_CHANNEL_TIME_TX; + kfree(survey); + + return 0; +exit: + kfree(survey); + return err; +} +#endif /* WL_SUPPORT_ACS */ + +static struct cfg80211_ops wl_cfg80211_ops = { + .add_virtual_intf = wl_cfg80211_add_virtual_iface, + .del_virtual_intf = wl_cfg80211_del_virtual_iface, + .change_virtual_intf = wl_cfg80211_change_virtual_iface, +#if defined(WL_CFG80211_P2P_DEV_IF) + .start_p2p_device = wl_cfgp2p_start_p2p_device, + .stop_p2p_device = wl_cfgp2p_stop_p2p_device, +#endif /* WL_CFG80211_P2P_DEV_IF */ + .scan = wl_cfg80211_scan, + .set_wiphy_params = wl_cfg80211_set_wiphy_params, + .join_ibss = wl_cfg80211_join_ibss, + .leave_ibss = wl_cfg80211_leave_ibss, + .get_station = wl_cfg80211_get_station, + .set_tx_power = wl_cfg80211_set_tx_power, + .get_tx_power = wl_cfg80211_get_tx_power, + .add_key = wl_cfg80211_add_key, + .del_key = wl_cfg80211_del_key, + .get_key = wl_cfg80211_get_key, + .set_default_key = wl_cfg80211_config_default_key, + .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key, + .set_power_mgmt = wl_cfg80211_set_power_mgmt, + .connect = wl_cfg80211_connect, + .disconnect = wl_cfg80211_disconnect, + .suspend = wl_cfg80211_suspend, + .resume = wl_cfg80211_resume, + .set_pmksa = wl_cfg80211_set_pmksa, + .del_pmksa = wl_cfg80211_del_pmksa, + .flush_pmksa = wl_cfg80211_flush_pmksa, + .remain_on_channel = wl_cfg80211_remain_on_channel, + .cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel, + .mgmt_tx = wl_cfg80211_mgmt_tx, + .mgmt_frame_register = wl_cfg80211_mgmt_frame_register, + .change_bss = wl_cfg80211_change_bss, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + .set_channel = wl_cfg80211_set_channel, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) + .set_beacon = wl_cfg80211_add_set_beacon, + .add_beacon = wl_cfg80211_add_set_beacon, +#else + .change_beacon = wl_cfg80211_change_beacon, + .start_ap = wl_cfg80211_start_ap, + .stop_ap = wl_cfg80211_stop_ap, +#endif +#ifdef WL_SCHED_SCAN + .sched_scan_start = wl_cfg80211_sched_scan_start, + .sched_scan_stop = wl_cfg80211_sched_scan_stop, +#endif /* WL_SCHED_SCAN */ +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) + .del_station = wl_cfg80211_del_station, + .change_station = wl_cfg80211_change_station, + .mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait, +#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) + .tdls_mgmt = wl_cfg80211_tdls_mgmt, + .tdls_oper = wl_cfg80211_tdls_oper, +#endif +#ifdef WL_SUPPORT_ACS + .dump_survey = wl_cfg80211_dump_survey, +#endif /* WL_SUPPORT_ACS */ +#ifdef WL_CFG80211_ACL + .set_mac_acl = wl_cfg80211_set_mac_acl, +#endif /* WL_CFG80211_ACL */ +}; + +s32 wl_mode_to_nl80211_iftype(s32 mode) +{ + s32 err = 0; + + switch (mode) { + case WL_MODE_BSS: + return NL80211_IFTYPE_STATION; + case WL_MODE_IBSS: + return NL80211_IFTYPE_ADHOC; + case WL_MODE_AP: + return NL80211_IFTYPE_AP; + default: + return NL80211_IFTYPE_UNSPECIFIED; + } + + return err; +} + +#ifdef CONFIG_CFG80211_INTERNAL_REGDB +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static int +#else +static void +#endif /* kernel version < 3.9.0 */ +wl_cfg80211_reg_notifier( + struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy); + int ret = 0; + int revinfo = -1; + + if (!request || !cfg) { + WL_ERR(("Invalid arg\n")); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) + return -EINVAL; +#else + return; +#endif /* kernel version < 3.9.0 */ + } + + WL_DBG(("ccode: %c%c Initiator: %d\n", + request->alpha2[0], request->alpha2[1], request->initiator)); + + /* We support only REGDOM_SET_BY_USER as of now */ + if ((request->initiator != NL80211_REGDOM_SET_BY_USER) && + (request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE)) { + WL_ERR(("reg_notifier for intiator:%d not supported : set default\n", + request->initiator)); + /* in case of no supported country by regdb + lets driver setup platform default Locale + */ + } + + WL_ERR(("Set country code %c%c from %s\n", + request->alpha2[0], request->alpha2[1], + ((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User"))); + + if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2, + false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false), + revinfo)) < 0) { + WL_ERR(("set country Failed :%d\n", ret)); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) + return ret; +#else + return; +#endif /* kernel version < 3.9.0 */ +} +#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ + +#ifdef CONFIG_PM +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +static const struct wiphy_wowlan_support brcm_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY, + .n_patterns = WL_WOWLAN_MAX_PATTERNS, + .pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN, + .pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + .max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) +static struct cfg80211_wowlan brcm_wowlan_config = { + .disconnect = true, + .gtk_rekey_failure = true, + .eap_identity_req = true, + .four_way_handshake = true, +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ +#endif /* CONFIG_PM */ + +static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *context) +{ + s32 err = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) + dhd_pub_t *dhd = (dhd_pub_t *)context; + BCM_REFERENCE(dhd); + + if (!dhd) { + WL_ERR(("DHD is NULL!!")); + err = -ENODEV; + return err; + } +#endif + + wdev->wiphy = + wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211)); + if (unlikely(!wdev->wiphy)) { + WL_ERR(("Couldn not allocate wiphy device\n")); + err = -ENOMEM; + return err; + } + set_wiphy_dev(wdev->wiphy, sdiofunc_dev); + wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX; + /* Report how many SSIDs Driver can support per Scan request */ + wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX; + wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; +#ifdef WL_SCHED_SCAN + wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT; + wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT; + wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; +#endif +#endif /* WL_SCHED_SCAN */ + wdev->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_ADHOC) +#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF) + | BIT(NL80211_IFTYPE_MONITOR) +#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF) + | BIT(NL80211_IFTYPE_P2P_CLIENT) + | BIT(NL80211_IFTYPE_P2P_GO) +#endif /* WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_CFG80211_P2P_DEV_IF) + | BIT(NL80211_IFTYPE_P2P_DEVICE) +#endif /* WL_CFG80211_P2P_DEV_IF */ + | BIT(NL80211_IFTYPE_AP); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ + (defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)) + WL_DBG(("Setting interface combinations for common mode\n")); + wdev->wiphy->iface_combinations = common_iface_combinations; + wdev->wiphy->n_iface_combinations = + ARRAY_SIZE(common_iface_combinations); +#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */ + + wdev->wiphy->bands[NL80211_BAND_2GHZ ] = &__wl_band_2ghz; + + wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + wdev->wiphy->cipher_suites = __wl_cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); + wdev->wiphy->max_remain_on_channel_duration = 5000; + wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes; +#ifndef WL_POWERSAVE_DISABLED + wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; +#else + wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; +#endif /* !WL_POWERSAVE_DISABLED */ + wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK | + WIPHY_FLAG_4ADDR_AP | +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) + WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS | +#endif + WIPHY_FLAG_4ADDR_STATION; +#if ((defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(3, 2, 0))) + /* + * If FW ROAM flag is advertised, upper layer wouldn't provide + * the bssid & freq in the connect command. This will result a + * delay in initial connection time due to firmware doing a full + * channel scan to figure out the channel & bssid. However kernel + * ver >= 3.15, provides bssid_hint & freq_hint and hence kernel + * ver >= 3.15 won't have any issue. So if this flags need to be + * advertised for kernel < 3.15, suggest to use RCC along with it + * to avoid the initial connection delay. + */ + wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; +#endif +#ifdef UNSET_FW_ROAM_WIPHY_FLAG + wdev->wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_FW_ROAM; +#endif /* UNSET_FW_ROAM_WIPHY_FLAG */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_OFFCHAN_TX; +#endif +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 4, 0)) + /* From 3.4 kernel ownards AP_SME flag can be advertised + * to remove the patch from supplicant + */ + wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME; + +#ifdef WL_CFG80211_ACL + /* Configure ACL capabilities. */ + wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) + /* Supplicant distinguish between the SoftAP mode and other + * modes (e.g. P2P, WPS, HS2.0) when it builds the probe + * response frame from Supplicant MR1 and Kernel 3.4.0 or + * later version. To add Vendor specific IE into the + * probe response frame in case of SoftAP mode, + * AP_PROBE_RESP_OFFLOAD flag is set to wiphy->flags variable. + */ + if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) { + wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + wdev->wiphy->probe_resp_offload = 0; + } +#endif +#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */ + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) + wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; +#endif + +#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF) + /* + * From linux-3.10 kernel, wowlan packet filter is mandated to avoid the + * disconnection of connected network before suspend. So a dummy wowlan + * filter is configured for kernels linux-3.8 and above. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + wdev->wiphy->wowlan = &brcm_wowlan_support; + /* If this is not provided cfg stack will get disconnect + * during suspend. + */ + wdev->wiphy->wowlan_config = &brcm_wowlan_config; +#else + wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; + wdev->wiphy->wowlan.n_patterns = WL_WOWLAN_MAX_PATTERNS; + wdev->wiphy->wowlan.pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN; + wdev->wiphy->wowlan.pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + wdev->wiphy->wowlan.max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ +#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */ + + WL_DBG(("Registering custom regulatory)\n")); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; +#else + wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; +#endif + wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom); +#if defined(WL_VENDOR_EXT_SUPPORT) + WL_ERR(("Registering Vendor80211\n")); + err = wl_cfgvendor_attach(wdev->wiphy); + if (unlikely(err < 0)) { + WL_ERR(("Couldn not attach vendor commands (%d)\n", err)); + } +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + /* Now we can register wiphy with cfg80211 module */ + err = wiphy_register(wdev->wiphy); + if (unlikely(err < 0)) { + WL_ERR(("Couldn not register wiphy device (%d)\n", err)); + wiphy_free(wdev->wiphy); + } + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS) + wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS; +#endif + + return err; +} + +static void wl_free_wdev(struct bcm_cfg80211 *cfg) +{ + struct wireless_dev *wdev = cfg->wdev; + struct wiphy *wiphy = NULL; + if (!wdev) { + WL_ERR(("wdev is invalid\n")); + return; + } + if (wdev->wiphy) { + wiphy = wdev->wiphy; + +#if defined(WL_VENDOR_EXT_SUPPORT) + wl_cfgvendor_detach(wdev->wiphy); +#endif /* if defined(WL_VENDOR_EXT_SUPPORT) */ +#if defined(CONFIG_PM) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + /* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */ + WL_DBG(("wl_free_wdev Clearing wowlan Config \n")); + wdev->wiphy->wowlan = NULL; + wdev->wiphy->wowlan_config = NULL; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ +#endif + wiphy_unregister(wdev->wiphy); + wdev->wiphy->dev.parent = NULL; + wdev->wiphy = NULL; + } + + wl_delete_all_netinfo(cfg); + if (wiphy) + wiphy_free(wiphy); + + /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg", + * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!! + */ +} + +static s32 wl_inform_bss(struct bcm_cfg80211 *cfg) +{ + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; /* must be initialized */ + s32 err = 0; + s32 i; + + bss_list = cfg->bss_list; + WL_DBG(("scanned AP count (%d)\n", bss_list->count)); + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + err = wl_inform_single_bss(cfg, bi, false); + if (unlikely(err)) + break; + } + return err; +} + +static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam) +{ + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + struct ieee80211_mgmt *mgmt; + struct ieee80211_channel *channel; + struct ieee80211_supported_band *band; + struct wl_cfg80211_bss_info *notif_bss_info; + struct wl_scan_req *sr = wl_to_sr(cfg); + struct beacon_proberesp *beacon_proberesp; + struct cfg80211_bss *cbss = NULL; + s32 mgmt_type; + s32 signal; + u32 freq; + s32 err = 0; + gfp_t aflags; + + if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) { + WL_DBG(("Beacon is larger than buffer. Discarding\n")); + return err; + } + aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt) + - sizeof(u8) + WL_BSS_INFO_MAX, aflags); + if (unlikely(!notif_bss_info)) { + WL_ERR(("notif_bss_info alloc failed\n")); + return -ENOMEM; + } + mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf; + notif_bss_info->channel = + wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); + + if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[NL80211_BAND_2GHZ ]; + else + band = wiphy->bands[NL80211_BAND_5GHZ ]; + if (!band) { + WL_ERR(("No valid band")); + kfree(notif_bss_info); + return -EINVAL; + } + notif_bss_info->rssi = wl_rssi_offset(dtoh16(bi->RSSI)); + memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN); + mgmt_type = cfg->active_scan ? + IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON; + if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) { + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type); + } + beacon_proberesp = cfg->active_scan ? + (struct beacon_proberesp *)&mgmt->u.probe_resp : + (struct beacon_proberesp *)&mgmt->u.beacon; + beacon_proberesp->timestamp = 0; + beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period); + beacon_proberesp->capab_info = cpu_to_le16(bi->capability); + wl_rst_ie(cfg); + wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam); + wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length); + wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX - + offsetof(struct wl_cfg80211_bss_info, frame_buf)); + notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt, + u.beacon.variable) + wl_get_ielen(cfg); +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(notif_bss_info->channel); + (void)band->band; +#else + freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band); +#endif + if (freq == 0) { + WL_ERR(("Invalid channel, fail to chcnage channel to freq\n")); + kfree(notif_bss_info); + return -EINVAL; + } + channel = ieee80211_get_channel(wiphy, freq); + if (unlikely(!channel)) { + WL_ERR(("ieee80211_get_channel error\n")); + kfree(notif_bss_info); + return -EINVAL; + } + WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM" + "mgmt_type %d frame_len %d\n", bi->SSID, + notif_bss_info->rssi, notif_bss_info->channel, + mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type, + notif_bss_info->frame_len)); + + signal = notif_bss_info->rssi * 100; + if (!mgmt->u.probe_resp.timestamp) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct timespec ts; + get_monotonic_boottime(&ts); + mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000) + + ts.tv_nsec / 1000; +#else + struct timeval tv; + do_gettimeofday(&tv); + mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000) + + tv.tv_usec; +#endif + } + + + cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt, + le16_to_cpu(notif_bss_info->frame_len), signal, aflags); + if (unlikely(!cbss)) { + WL_ERR(("cfg80211_inform_bss_frame error\n")); + kfree(notif_bss_info); + return -EINVAL; + } + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + cfg80211_put_bss(wiphy, cbss); +#else + cfg80211_put_bss(cbss); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ + kfree(notif_bss_info); + return err; +} + +static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, struct net_device *ndev) +{ + u32 event = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + u16 flags = ntoh16(e->flags); + + WL_DBG(("event %d, status %d flags %x\n", event, status, flags)); + if (event == WLC_E_SET_SSID) { + if (status == WLC_E_STATUS_SUCCESS) { + if (!wl_is_ibssmode(cfg, ndev)) + return true; + } + } else if (event == WLC_E_LINK) { + if (flags & WLC_EVENT_MSG_LINK) + return true; + } + + WL_DBG(("wl_is_linkup false\n")); + return false; +} + +static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e) +{ + u32 event = ntoh32(e->event_type); + u16 flags = ntoh16(e->flags); + + if (event == WLC_E_DEAUTH_IND || + event == WLC_E_DISASSOC_IND || + event == WLC_E_DISASSOC || + event == WLC_E_DEAUTH) { +#if (WL_DBG_LEVEL > 0) + WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event])); +#endif /* (WL_DBG_LEVEL > 0) */ + return true; + } else if (event == WLC_E_LINK) { + if (!(flags & WLC_EVENT_MSG_LINK)) { +#if (WL_DBG_LEVEL > 0) + WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event])); +#endif /* (WL_DBG_LEVEL > 0) */ + return true; + } + } + + return false; +} + +static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e) +{ + u32 event = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + + if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS) + return true; + if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS) + return true; + + return false; +} + +/* The mainline kernel >= 3.2.0 has support for indicating new/del station + * to AP/P2P GO via events. If this change is backported to kernel for which + * this driver is being built, then define WL_CFG80211_STA_EVENT. You + * should use this new/del sta event mechanism for BRCM supplicant >= 22. + */ +static s32 +wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + u32 event = ntoh32(e->event_type); + u32 reason = ntoh32(e->reason); + u32 len = ntoh32(e->datalen); + u32 status = ntoh32(e->status); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) + bool isfree = false; + u8 *mgmt_frame; + u8 bsscfgidx = e->bsscfgidx; + s32 freq; + s32 channel; + u8 *body = NULL; + u16 fc = 0; + + struct ieee80211_supported_band *band; + struct ether_addr da; + struct ether_addr bssid; + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + channel_info_t ci; +#else + struct station_info sinfo; +#endif + + WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason)); + /* if link down, bsscfg is disabled. */ + if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS && + wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) { + wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false); + WL_INFORM(("AP mode link down !! \n")); + complete(&cfg->iface_disable); + return 0; + } + + if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) && + (reason == WLC_E_REASON_INITIAL_ASSOC) && + (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) { + if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) { + /* AP/GO brought up successfull in firmware */ + WL_ERR(("** AP/GO Link up event **\n")); + wl_set_drv_status(cfg, AP_CREATED, ndev); + wake_up_interruptible(&cfg->netif_change_event); + return 0; + } + } + + if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) { + WL_ERR(("event %s(%d) status %d reason %d\n", + bcmevent_get_name(event), event, ntoh32(e->status), reason)); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) + WL_DBG(("Enter \n")); + if (!len && (event == WLC_E_DEAUTH)) { + len = 2; /* reason code field */ + data = &reason; + } + if (len) { + body = kzalloc(len, GFP_KERNEL); + + if (body == NULL) { + WL_ERR(("wl_notify_connect_status: Failed to allocate body\n")); + return WL_INVALID; + } + } + memset(&bssid, 0, ETHER_ADDR_LEN); + WL_DBG(("Enter event %d ndev %p\n", event, ndev)); + if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) { + kfree(body); + return WL_INVALID; + } + if (len) + memcpy(body, data, len); + + wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync); + memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN); + err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); + switch (event) { + case WLC_E_ASSOC_IND: + fc = FC_ASSOC_REQ; + break; + case WLC_E_REASSOC_IND: + fc = FC_REASSOC_REQ; + break; + case WLC_E_DISASSOC_IND: + fc = FC_DISASSOC; + break; + case WLC_E_DEAUTH_IND: + fc = FC_DISASSOC; + break; + case WLC_E_DEAUTH: + fc = FC_DISASSOC; + break; + default: + fc = 0; + goto exit; + } + if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false))) { + kfree(body); + return err; + } + + channel = dtoh32(ci.hw_channel); + if (channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[NL80211_BAND_2GHZ ]; + else + band = wiphy->bands[NL80211_BAND_5GHZ ]; + if (!band) { + WL_ERR(("No valid band")); + if (body) + kfree(body); + return -EINVAL; + } +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(channel); + (void)band->band; +#else + freq = ieee80211_channel_to_frequency(channel, band->band); +#endif + + err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid, + &mgmt_frame, &len, body); + if (err < 0) + goto exit; + isfree = true; + + if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) { +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) && (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 18, 0))) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len); +#else + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); +#endif + } else if (event == WLC_E_DISASSOC_IND) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); +#else + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); +#endif + } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); +#else + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); +#endif + } + +exit: + if (isfree) + kfree(mgmt_frame); + if (body) + kfree(body); +#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */ + sinfo.filled = 0; + if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) && + reason == DOT11_SC_SUCCESS) { + /* Linux ver >= 4.0 assoc_req_ies_len is used instead of + * STATION_INFO_ASSOC_REQ_IES flag + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) + sinfo.filled = STA_INFO_BIT(INFO_ASSOC_REQ_IES); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */ + if (!data) { + WL_ERR(("No IEs present in ASSOC/REASSOC_IND")); + return -EINVAL; + } + sinfo.assoc_req_ies = data; + sinfo.assoc_req_ies_len = len; + cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC); + } else if (event == WLC_E_DISASSOC_IND) { + cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); + } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { + cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); + } +#endif + return err; +} + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define MAX_ASSOC_REJECT_ERR_STATUS 5 +int wl_get_connect_failed_status(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e) +{ + u32 status = ntoh32(e->status); + + cfg->assoc_reject_status = 0; + + if (status == WLC_E_STATUS_FAIL) { + WL_ERR(("auth assoc status event=%d e->status %d e->reason %d \n", + ntoh32(cfg->event_auth_assoc.event_type), + (int)ntoh32(cfg->event_auth_assoc.status), + (int)ntoh32(cfg->event_auth_assoc.reason))); + + switch ((int)ntoh32(cfg->event_auth_assoc.status)) { + case WLC_E_STATUS_NO_ACK: + cfg->assoc_reject_status = 1; + break; + case WLC_E_STATUS_FAIL: + cfg->assoc_reject_status = 2; + break; + case WLC_E_STATUS_UNSOLICITED: + cfg->assoc_reject_status = 3; + break; + case WLC_E_STATUS_TIMEOUT: + cfg->assoc_reject_status = 4; + break; + case WLC_E_STATUS_ABORT: + cfg->assoc_reject_status = 5; + break; + default: + break; + } + if (cfg->assoc_reject_status) { + if (ntoh32(cfg->event_auth_assoc.event_type) == WLC_E_ASSOC) { + cfg->assoc_reject_status += MAX_ASSOC_REJECT_ERR_STATUS; + } + } + } + + WL_ERR(("assoc_reject_status %d \n", cfg->assoc_reject_status)); + + return 0; +} + +s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len) +{ + struct bcm_cfg80211 *cfg = NULL; + int bytes_written = 0; + + cfg = g_bcm_cfg; + + if (cfg == NULL) { + return -1; + } + + memset(cmd, 0, total_len); + bytes_written = snprintf(cmd, 30, "assoc_reject.status %d", cfg->assoc_reject_status); + + WL_ERR(("cmd: %s \n", cmd)); + + return bytes_written; +} +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +static s32 +wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e) +{ + u32 reason = ntoh32(e->reason); + u32 event = ntoh32(e->event_type); + struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); + WL_DBG(("event type : %d, reason : %d\n", event, reason)); + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + memcpy(&cfg->event_auth_assoc, e, sizeof(wl_event_msg_t)); + WL_ERR(("event=%d status %d reason %d \n", + ntoh32(cfg->event_auth_assoc.event_type), + ntoh32(cfg->event_auth_assoc.status), + ntoh32(cfg->event_auth_assoc.reason))); +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + if (sec) { + switch (event) { + case WLC_E_ASSOC: + case WLC_E_AUTH: + sec->auth_assoc_res_status = reason; + default: + break; + } + } else + WL_ERR(("sec is NULL\n")); + return 0; +} + +static s32 +wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + u32 event = ntoh32(e->event_type); + u16 flags = ntoh16(e->flags); + u32 status = ntoh32(e->status); + bool active; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + struct ieee80211_channel *channel = NULL; + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + u32 chanspec, chan; + u32 freq, band; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */ + + if (event == WLC_E_JOIN) { + WL_DBG(("joined in IBSS network\n")); + } + if (event == WLC_E_START) { + WL_DBG(("started IBSS network\n")); + } + if (event == WLC_E_JOIN || event == WLC_E_START || + (event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + err = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec); + if (unlikely(err)) { + WL_ERR(("Could not get chanspec %d\n", err)); + return err; + } + chan = wf_chspec_ctlchan(wl_chspec_driver_to_host(chanspec)); + band = (chan <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ ; + freq = ieee80211_channel_to_frequency(chan, band); + channel = ieee80211_get_channel(wiphy, freq); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */ + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + /* ROAM or Redundant */ + u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (memcmp(cur_bssid, &e->addr, ETHER_ADDR_LEN) == 0) { + WL_DBG(("IBSS connected event from same BSSID(" + MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid))); + return err; + } + WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n", + MAC2STRDBG(cur_bssid), MAC2STRDBG((const u8 *)&e->addr))); + wl_get_assoc_ies(cfg, ndev); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + wl_update_bss_info(cfg, ndev, false); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL); +#else + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL); +#endif + } + else { + /* New connection */ + WL_INFORM(("IBSS connected to " MACDBG "\n", + MAC2STRDBG((const u8 *)&e->addr))); + wl_link_up(cfg); + wl_get_assoc_ies(cfg, ndev); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + wl_update_bss_info(cfg, ndev, false); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL); +#else + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL); +#endif + wl_set_drv_status(cfg, CONNECTED, ndev); + active = true; + wl_update_prof(cfg, ndev, NULL, (const void *)&active, WL_PROF_ACT); + } + } else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) || + event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) { + wl_clr_drv_status(cfg, CONNECTED, ndev); + wl_link_down(cfg); + wl_init_prof(cfg, ndev); + } + else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) { + WL_DBG(("no action - join fail (IBSS mode)\n")); + } + else { + WL_DBG(("no action (IBSS mode)\n")); +} + return err; +} + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define WiFiALL_OUI "\x50\x6F\x9A" /* Wi-FiAll OUI */ +#define WiFiALL_OUI_LEN 3 +#define WiFiALL_OUI_TYPE 16 + +int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, uint8 *mac) +{ + s32 err = 0; + struct wl_bss_info *bi; + uint8 eabuf[ETHER_ADDR_LEN]; + u32 rate, channel, freq, supported_rate, nss = 0, mcs_map, mode_80211 = 0; + char rate_str[4]; + u8 *ie = NULL; + u32 ie_len; + struct wiphy *wiphy; + struct cfg80211_bss *bss; + bcm_tlv_t *interworking_ie = NULL; + bcm_tlv_t *tlv_ie = NULL; + bcm_tlv_t *vht_ie = NULL; + vndr_ie_t *vndrie; + int16 ie_11u_rel_num = -1, ie_mu_mimo_cap = -1; + u32 i, remained_len, count = 0; + char roam_count_str[4], akm_str[4]; + s32 val = 0; + + /* get BSS information */ + + strncpy(cfg->bss_info, "x x x x x x x x x x x x x", GET_BSS_INFO_LEN); + + *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX); + + err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX, false); + if (unlikely(err)) { + WL_ERR(("Could not get bss info %d\n", err)); + cfg->roam_count = 0; + return -1; + } + + if (!mac) { + WL_ERR(("mac is null \n")); + cfg->roam_count = 0; + return -1; + } + + memcpy(eabuf, mac, ETHER_ADDR_LEN); + + bi = (struct wl_bss_info *)(cfg->extra_buf + 4); + channel = wf_chspec_ctlchan(bi->chanspec); + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(channel); +#else + if (channel > 14) { + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ ); + } else { + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ ); + } +#endif + + err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false); + if (err) { + WL_ERR(("Could not get rate (%d)\n", err)); + snprintf(rate_str, sizeof(rate_str), "x"); // Unknown + + } else { + rate = dtoh32(rate); + snprintf(rate_str, sizeof(rate_str), "%d", (rate/2)); + } + + //supported maximum rate + supported_rate = (bi->rateset.rates[bi->rateset.count - 1] & 0x7f) / 2; + + if (supported_rate < 12) { + mode_80211 = 0; //11b maximum rate is 11Mbps. 11b mode + } else { + //It's not HT Capable case. + if (channel > 14) { + mode_80211 = 3; // 11a mode + } else { + mode_80211 = 1; // 11g mode + } + } + + if (bi->n_cap) { + /* check Rx MCS Map for HT */ + nss = 0; + mode_80211 = 2; + for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) { + int8 bitmap = 0xFF; + if (i == MAX_STREAMS_SUPPORTED-1) { + bitmap = 0x7F; + } + if (bi->basic_mcs[i] & bitmap) { + nss++; + } + } + } + + if (bi->vht_cap) { + nss = 0; + mode_80211 = 4; + for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) { + mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap)); + if (mcs_map != VHT_CAP_MCS_MAP_NONE) { + nss++; + } + } + } + + if (nss) { + nss = nss - 1; + } + + wiphy = bcmcfg_to_wiphy(cfg); + bss = cfg80211_get_bss(wiphy, NULL, eabuf, + bi->SSID, strlen(bi->SSID), WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + + if (!bss) { + WL_ERR(("Could not find the AP\n")); + } else { +#if defined(WL_CFG80211_P2P_DEV_IF) + ie = (u8 *)bss->ies->data; + ie_len = bss->ies->len; +#else + ie = bss->information_elements; + ie_len = bss->len_information_elements; +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + + if (ie) { + ie_mu_mimo_cap = 0; + ie_11u_rel_num = 0; + + if (bi->vht_cap) { + if ((vht_ie = bcm_parse_tlvs(ie, (u32)ie_len, + DOT11_MNG_VHT_CAP_ID)) != NULL) { + ie_mu_mimo_cap = (vht_ie->data[2] & 0x08) >> 3; + } + } + + if ((interworking_ie = bcm_parse_tlvs(ie, (u32)ie_len, + DOT11_MNG_INTERWORKING_ID)) != NULL) { + if ((tlv_ie = bcm_parse_tlvs(ie, (u32)ie_len, DOT11_MNG_VS_ID)) != NULL) { + remained_len = ie_len; + + while (tlv_ie) { + if (count > MAX_VNDR_IE_NUMBER) + break; + + if (tlv_ie->id == DOT11_MNG_VS_ID) { + vndrie = (vndr_ie_t *) tlv_ie; + + if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) { + WL_ERR(("%s: invalid vndr ie." + "length is too small %d\n", + __FUNCTION__, vndrie->len)); + break; + } + + if (!bcmp(vndrie->oui, + (u8*)WiFiALL_OUI, WiFiALL_OUI_LEN) && + (vndrie->data[0] == WiFiALL_OUI_TYPE)) + { + WL_ERR(("Found Wi-FiAll OUI oui.\n")); + ie_11u_rel_num = vndrie->data[1]; + ie_11u_rel_num = (ie_11u_rel_num & 0xf0)>>4; + ie_11u_rel_num += 1; + + break; + } + } + count++; + tlv_ie = bcm_next_tlv(tlv_ie, &remained_len); + } + } + } + } + + for (i = 0; i < bi->SSID_len; i++) { + if (bi->SSID[i] == ' ') { + bi->SSID[i] = '_'; + } + } + + //0 : None, 1 : OKC, 2 : FT, 3 : CCKM + err = wldev_iovar_getint(dev, "wpa_auth", &val); + if (unlikely(err)) { + WL_ERR(("could not get wpa_auth (%d)\n", err)); + snprintf(akm_str, sizeof(akm_str), "x"); // Unknown + } else { + WL_ERR(("wpa_auth val %d \n", val)); +#if defined(BCMEXTCCX) + if (val & (WPA_AUTH_CCKM | WPA2_AUTH_CCKM)) { + snprintf(akm_str, sizeof(akm_str), "3"); + } else +#endif + if (val & WPA2_AUTH_FT) { + snprintf(akm_str, sizeof(akm_str), "2"); + } else if (val & (WPA_AUTH_UNSPECIFIED | WPA2_AUTH_UNSPECIFIED)) { + snprintf(akm_str, sizeof(akm_str), "1"); + } else { + snprintf(akm_str, sizeof(akm_str), "0"); + } + } + + if (cfg->roam_offload) { + snprintf(roam_count_str, sizeof(roam_count_str), "x"); // Unknown + } else { + snprintf(roam_count_str, sizeof(roam_count_str), "%d", cfg->roam_count); + } + cfg->roam_count = 0; + + WL_ERR(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), bi->SSID)); + WL_ERR(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d," + "MU-MIMO:%d, Passpoint:%d, SNR:%d, Noise:%d, \n" + "akm:%s roam:%s \n", + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), (rate / 2), mode_80211, nss, + ie_mu_mimo_cap, ie_11u_rel_num, bi->SNR, bi->phy_noise, + akm_str, roam_count_str)); + + if (ie) { + snprintf(cfg->bss_info, GET_BSS_INFO_LEN, + "%02x:%02x:%02x %d %s %d %s %d %d %d %d %d %d %s %s", + eabuf[0], eabuf[1], eabuf[2], + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), rate_str, mode_80211, nss, + ie_mu_mimo_cap, ie_11u_rel_num, + bi->SNR, bi->phy_noise, akm_str, roam_count_str); + } else { + //ie_mu_mimo_cap and ie_11u_rel_num is unknow. + snprintf(cfg->bss_info, GET_BSS_INFO_LEN, + "%02x:%02x:%02x %d %s %d %s %d %d x x %d %d %s %s", + eabuf[0], eabuf[1], eabuf[2], + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), rate_str, mode_80211, nss, + bi->SNR, bi->phy_noise, akm_str, roam_count_str); + } + + + return 0; +} + +s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len) +{ + struct bcm_cfg80211 *cfg = NULL; + + cfg = g_bcm_cfg; + + if (cfg == NULL) { + return -1; + } + + memset(cmd, 0, total_len); + memcpy(cmd, cfg->bss_info, GET_BSS_INFO_LEN); + + WL_ERR(("cmd: %s \n", cmd)); + + return GET_BSS_INFO_LEN; +} + +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +static s32 +wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + bool act; + struct net_device *ndev = NULL; + s32 err = 0; + u32 event = ntoh32(e->event_type); + struct wiphy *wiphy = NULL; + struct cfg80211_bss *bss = NULL; + struct wlc_ssid *ssid = NULL; + u8 *bssid = 0; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + err = wl_notify_connect_status_ap(cfg, ndev, e, data); + } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) { + err = wl_notify_connect_status_ibss(cfg, ndev, e, data); + } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) { + WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n", + ntoh32(e->event_type), ntoh32(e->status), ndev)); + if (event == WLC_E_ASSOC || event == WLC_E_AUTH) { + wl_get_auth_assoc_status(cfg, ndev, e); + return 0; + } + DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub); + if (wl_is_linkup(cfg, e, ndev)) { + wl_link_up(cfg); + act = true; + if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) { +#ifdef DHD_LOSSLESS_ROAMING + bool is_connected = wl_get_drv_status(cfg, CONNECTED, ndev); +#endif + + WL_ERR(("wl_bss_connect_done succeeded with " MACDBG "\n", + MAC2STRDBG((const u8*)(&e->addr)))); + wl_bss_connect_done(cfg, ndev, e, data, true); + WL_DBG(("joined in BSS network \"%s\"\n", + ((struct wlc_ssid *) + wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID)); +#ifdef DHD_LOSSLESS_ROAMING + if (event == WLC_E_LINK && is_connected && + !cfg->roam_offload) { + wl_bss_roaming_done(cfg, ndev, e, data); + } +#endif /* DHD_LOSSLESS_ROAMING */ + + } + wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + + } else if (wl_is_linkdown(cfg, e)) { +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif +#ifdef P2PLISTEN_AP_SAMECHN + if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + wl_cfg80211_set_p2p_resp_ap_chn(ndev, 0); + cfg->p2p_resp_apchn_status = false; + WL_DBG(("p2p_resp_apchn_status Turn OFF \n")); + } +#endif /* P2PLISTEN_AP_SAMECHN */ + wl_cfg80211_cancel_scan(cfg); + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + wl_get_bss_info(cfg, ndev, (u8*)(&e->addr)); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + /* Explicitly calling unlink to remove BSS in CFG */ + wiphy = bcmcfg_to_wiphy(cfg); + ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID); + bssid = (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (ssid && bssid) { + bss = cfg80211_get_bss(wiphy, NULL, bssid, + ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + if (bss) { + cfg80211_unlink_bss(wiphy, bss); + } + } + + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + scb_val_t scbval; + u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + s32 reason = 0; + struct ether_addr bssid_dongle; + struct ether_addr bssid_null = {{0, 0, 0, 0, 0, 0}}; + + if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) + reason = ntoh32(e->reason); + /* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */ + reason = (reason == WLAN_REASON_UNSPECIFIED)? 0 : reason; + + WL_ERR(("link down if %s may call cfg80211_disconnected. " + "event : %d, reason=%d from " MACDBG "\n", + ndev->name, event, ntoh32(e->reason), + MAC2STRDBG((const u8*)(&e->addr)))); + + /* roam offload does not sync BSSID always, get it from dongle */ + if (cfg->roam_offload) { + if (wldev_ioctl(ndev, WLC_GET_BSSID, &bssid_dongle, + sizeof(bssid_dongle), false) == BCME_OK) { + /* if not roam case, it would return null bssid */ + if (memcmp(&bssid_dongle, &bssid_null, + ETHER_ADDR_LEN) != 0) { + curbssid = (u8 *)&bssid_dongle; + } + } + } + if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) { + bool fw_assoc_state = TRUE; + dhd_pub_t *dhd = (dhd_pub_t *)cfg->pub; + fw_assoc_state = dhd_is_associated(dhd, e->ifidx, &err); + if (!fw_assoc_state) { + WL_ERR(("Even sends up even different BSSID" + " cur: " MACDBG " event: " MACDBG"\n", + MAC2STRDBG(curbssid), + MAC2STRDBG((const u8*)(&e->addr)))); + } else { + WL_ERR(("BSSID of event is not the connected BSSID" + "(ignore it) cur: " MACDBG + " event: " MACDBG"\n", + MAC2STRDBG(curbssid), + MAC2STRDBG((const u8*)(&e->addr)))); + return 0; + } + } + wl_clr_drv_status(cfg, CONNECTED, ndev); + if (! wl_get_drv_status(cfg, DISCONNECTING, ndev)) { + /* To make sure disconnect, explictly send dissassoc + * for BSSID 00:00:00:00:00:00 issue + */ + scbval.val = WLAN_REASON_DEAUTH_LEAVING; + + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + err = wldev_ioctl(ndev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (err < 0) { + WL_ERR(("WLC_DISASSOC error %d\n", err)); + err = 0; + } + CFG80211_DISCONNECTED(ndev, reason, NULL, 0, + false, GFP_KERNEL); + wl_link_down(cfg); + wl_init_prof(cfg, ndev); + memset(&cfg->last_roamed_addr, 0, ETHER_ADDR_LEN); + } + } + else if (wl_get_drv_status(cfg, CONNECTING, ndev)) { + WL_ERR(("link down, during connecting\n")); +#ifdef ESCAN_RESULT_PATCH + if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) || + (memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) || + (memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0)) + /* In case this event comes while associating another AP */ +#endif /* ESCAN_RESULT_PATCH */ + wl_bss_connect_done(cfg, ndev, e, data, false); + } + wl_clr_drv_status(cfg, DISCONNECTING, ndev); + + /* if link down, bsscfg is diabled */ + if (ndev != bcmcfg_to_prmry_ndev(cfg)) + complete(&cfg->iface_disable); + + } else if (wl_is_nonetwork(cfg, e)) { + WL_ERR(("connect failed event=%d e->status %d e->reason %d \n", + event, (int)ntoh32(e->status), (int)ntoh32(e->reason))); +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + if (event == WLC_E_SET_SSID) { + wl_get_connect_failed_status(cfg, e); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + /* Clean up any pending scan request */ + wl_cfg80211_cancel_scan(cfg); + if (wl_get_drv_status(cfg, CONNECTING, ndev)) + wl_bss_connect_done(cfg, ndev, e, data, false); + } else { + WL_DBG(("%s nothing\n", __FUNCTION__)); + } + DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub); + } + else { + WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(cfg, ndev))); + } + return err; +} + +void wl_cfg80211_set_rmc_pid(int pid) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + if (pid > 0) + cfg->rmc_event_pid = pid; + WL_DBG(("set pid for rmc event : pid=%d\n", pid)); +} + +#ifdef WL_RELMCAST +static s32 +wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + u32 evt = ntoh32(e->event_type); + u32 reason = ntoh32(e->reason); + int ret = -1; + + switch (reason) { + case WLC_E_REASON_RMC_AR_LOST: + case WLC_E_REASON_RMC_AR_NO_ACK: + if (cfg->rmc_event_pid != 0) { + ret = wl_netlink_send_msg(cfg->rmc_event_pid, + RMC_EVENT_LEADER_CHECK_FAIL, + cfg->rmc_event_seq++, NULL, 0); + } + break; + default: + break; + } + WL_DBG(("rmcevent : evt=%d, pid=%d, ret=%d\n", evt, cfg->rmc_event_pid, ret)); + return ret; +} +#endif /* WL_RELMCAST */ +static s32 +wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + bool act; + struct net_device *ndev = NULL; + s32 err = 0; + u32 event = be32_to_cpu(e->event_type); + u32 status = be32_to_cpu(e->status); +#ifdef DHD_LOSSLESS_ROAMING + struct wl_security *sec; +#endif + WL_DBG(("Enter \n")); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) { + wl_add_remove_eventmsg(ndev, WLC_E_ROAM, false); + cfg->disable_roam_event = TRUE; + } + + if ((cfg->disable_roam_event) && (event == WLC_E_ROAM)) + return err; + + if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) { + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { +#ifdef DHD_LOSSLESS_ROAMING + if (cfg->roam_offload) { + wl_bss_roaming_done(cfg, ndev, e, data); + wl_del_roam_timeout(cfg); + } + else { + sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); + /* In order to reduce roaming delay, wl_bss_roaming_done is + * early called with WLC_E_LINK event. It is called from + * here only if WLC_E_LINK event is blocked for specific + * security type. + */ + if (IS_AKM_SUITE_FT(sec)) { + wl_bss_roaming_done(cfg, ndev, e, data); + } + /* Roam timer is deleted mostly from wl_cfg80211_change_station + * after roaming is finished successfully. We need to delete + * the timer from here only for some security types that aren't + * using wl_cfg80211_change_station to authorize SCB + */ + if (IS_AKM_SUITE_FT(sec) || IS_AKM_SUITE_CCKM(sec)) { + wl_del_roam_timeout(cfg); + } + } +#else + wl_bss_roaming_done(cfg, ndev, e, data); +#endif /* DHD_LOSSLESS_ROAMING */ + } else { + wl_bss_connect_done(cfg, ndev, e, data, true); + } + act = true; + wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + } +#ifdef DHD_LOSSLESS_ROAMING + else if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status != WLC_E_STATUS_SUCCESS) { + wl_del_roam_timeout(cfg); + } +#endif + return err; +} + +#ifdef QOS_MAP_SET +/* up range from low to high with up value */ +static bool +up_table_set(uint8 *up_table, uint8 up, uint8 low, uint8 high) +{ + int i; + + if (up > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) { + return FALSE; + } + + for (i = low; i <= high; i++) { + up_table[i] = up; + } + + return TRUE; +} + +/* set user priority table */ +static void +wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie) +{ + uint8 len; + + if (up_table == NULL || qos_map_ie == NULL) { + return; + } + + /* clear table to check table was set or not */ + memset(up_table, 0xff, UP_TABLE_MAX); + + /* length of QoS Map IE must be 16+n*2, n is number of exceptions */ + if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID && + (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH && + (len % 2) == 0) { + uint8 *except_ptr = (uint8 *)qos_map_ie->data; + uint8 except_len = len - QOS_MAP_FIXED_LENGTH; + uint8 *range_ptr = except_ptr + except_len; + int i; + + /* fill in ranges */ + for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) { + uint8 low = range_ptr[i]; + uint8 high = range_ptr[i + 1]; + if (low == 255 && high == 255) { + continue; + } + + if (!up_table_set(up_table, i / 2, low, high)) { + /* clear the table on failure */ + memset(up_table, 0xff, UP_TABLE_MAX); + return; + } + } + + /* update exceptions */ + for (i = 0; i < except_len; i += 2) { + uint8 dscp = except_ptr[i]; + uint8 up = except_ptr[i+1]; + + /* exceptions with invalid dscp/up are ignored */ + up_table_set(up_table, up, dscp, dscp); + } + } + + if (wl_dbg_level & WL_DBG_DBG) { + prhex("UP table", up_table, UP_TABLE_MAX); + } +} + +/* get user priority table */ +uint8 * +wl_get_up_table(void) +{ + return (uint8 *)(g_bcm_cfg->up_table); +} +#endif /* QOS_MAP_SET */ + +#ifdef DHD_LOSSLESS_ROAMING +static s32 +wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + struct wl_security *sec; + struct net_device *ndev; + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); + /* Disable Lossless Roaming for specific AKM suite + * Any other AKM suite can be added below if transition time + * is delayed because of Lossless Roaming + * and it causes any certication failure + */ + if (IS_AKM_SUITE_FT(sec)) { + return err; + } + + dhdp->dequeue_prec_map = 1 << PRIO_8021D_NC; + /* Restore flow control */ + dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF); + + mod_timer(&cfg->roam_timeout, jiffies + msecs_to_jiffies(WL_ROAM_TIMEOUT_MS)); + + return err; +} +#endif /* DHD_LOSSLESS_ROAMING */ + +static s32 +wl_notify_idsup_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; +#if defined(WL_VENDOR_EXT_SUPPORT) + u32 idsup_status; + u32 reason = ntoh32(e->reason); + struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + + if (cfg->roam_offload) { +#if defined(WL_VENDOR_EXT_SUPPORT) + switch (reason) { + case WLC_E_SUP_WPA_PSK_TMO: + idsup_status = IDSUP_EVENT_4WAY_HANDSHAKE_TIMEOUT; + break; + case WLC_E_SUP_OTHER: + idsup_status = IDSUP_EVENT_SUCCESS; + break; + default: + WL_ERR(("Other type at IDSUP. " + "event=%d e->status %d e->reason %d \n", + (int)ntoh32(e->event_type), (int)ntoh32(e->status), + (int)ntoh32(e->reason))); + return err; + } + + err = wl_cfgvendor_send_async_event(wiphy, ndev, + BRCM_VENDOR_EVENT_IDSUP_STATUS, &idsup_status, sizeof(u32)); +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + } + return err; +} + +#ifdef CUSTOM_EVENT_PM_WAKE +static s32 +wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + struct net_device *ndev = NULL; + u8 *pbuf = NULL; + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + pbuf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL); + if (pbuf == NULL) { + WL_ERR(("failed to allocate local pbuf\n")); + return -ENOMEM; + } + + err = wldev_iovar_getbuf_bsscfg(ndev, "dump", + "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync); + + if (err) { + WL_ERR(("dump ioctl err = %d", err)); + } else { + WL_ERR(("PM status : %s\n", pbuf)); + } + + if (pbuf) { + kfree(pbuf); + } + return err; +} +#endif /* CUSTOM_EVENT_PM_WAKE */ + +static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + wl_assoc_info_t assoc_info; + struct wl_connect_info *conn_info = wl_to_conn(cfg); + s32 err = 0; +#ifdef QOS_MAP_SET + bcm_tlv_t * qos_map_ie = NULL; +#endif /* QOS_MAP_SET */ + + WL_DBG(("Enter \n")); + err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc info (%d)\n", err)); + return err; + } + memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t)); + assoc_info.req_len = htod32(assoc_info.req_len); + assoc_info.resp_len = htod32(assoc_info.resp_len); + assoc_info.flags = htod32(assoc_info.flags); + if (conn_info->req_ie_len) { + conn_info->req_ie_len = 0; + bzero(conn_info->req_ie, sizeof(conn_info->req_ie)); + } + if (conn_info->resp_ie_len) { + conn_info->resp_ie_len = 0; + bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie)); + } + if (assoc_info.req_len) { + err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc req (%d)\n", err)); + return err; + } + conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req); + if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) { + conn_info->req_ie_len -= ETHER_ADDR_LEN; + } + if (conn_info->req_ie_len <= MAX_REQ_LINE) + memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len); + else { + WL_ERR(("IE size %d above max %d size \n", + conn_info->req_ie_len, MAX_REQ_LINE)); + return err; + } + } else { + conn_info->req_ie_len = 0; + } + if (assoc_info.resp_len) { + err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc resp (%d)\n", err)); + return err; + } + conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp); + if (conn_info->resp_ie_len <= MAX_REQ_LINE) { + memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len); + } else { + WL_ERR(("IE size %d above max %d size \n", + conn_info->resp_ie_len, MAX_REQ_LINE)); + return err; + } + +#ifdef QOS_MAP_SET + /* find qos map set ie */ + if ((qos_map_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len, + DOT11_MNG_QOS_MAP_ID)) != NULL) { + WL_DBG((" QoS map set IE found in assoc response\n")); + if (!cfg->up_table) { + cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL); + } + wl_set_up_table(cfg->up_table, qos_map_ie); + } else { + kfree(cfg->up_table); + cfg->up_table = NULL; + } +#endif /* QOS_MAP_SET */ + } else { + conn_info->resp_ie_len = 0; + } + WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len, + conn_info->resp_ie_len)); + + return err; +} + +static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, struct wl_join_params *join_params, + size_t *join_params_size) +{ + struct bcm_cfg80211 *cfg; + s32 bssidx = -1; + chanspec_t chanspec = 0, chspec; + + if (ch != 0) { + cfg = (struct bcm_cfg80211 *)wiphy_priv(dev->ieee80211_ptr->wiphy); + if (cfg && cfg->rcc_enabled) { + } else { + join_params->params.chanspec_num = 1; + join_params->params.chanspec_list[0] = ch; + + if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + /* Get the min_bw set for the interface */ + chspec = wl_cfg80211_ulb_get_min_bw_chspec(dev->ieee80211_ptr, bssidx); + if (chspec == INVCHANSPEC) { + WL_ERR(("Invalid chanspec \n")); + return -EINVAL; + } + chanspec |= chspec; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE + + join_params->params.chanspec_num * sizeof(chanspec_t); + + join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + join_params->params.chanspec_list[0] |= chanspec; + join_params->params.chanspec_list[0] = + wl_chspec_host_to_driver(join_params->params.chanspec_list[0]); + + join_params->params.chanspec_num = + htod32(join_params->params.chanspec_num); + } + + WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n", + join_params->params.chanspec_list[0], + join_params->params.chanspec_num)); + } + return 0; +} + +static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam) +{ + struct wl_bss_info *bi; + struct wlc_ssid *ssid; + struct bcm_tlv *tim; + s32 beacon_interval; + s32 dtim_period; + size_t ie_len; + u8 *ie; + u8 *curbssid; + s32 err = 0; + struct wiphy *wiphy; + u32 channel; + + wiphy = bcmcfg_to_wiphy(cfg); + + ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID); + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + + mutex_lock(&cfg->usr_sync); + + *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX); + err = wldev_ioctl(ndev, WLC_GET_BSS_INFO, + cfg->extra_buf, WL_EXTRA_BUF_MAX, false); + if (unlikely(err)) { + WL_ERR(("Could not get bss info %d\n", err)); + goto update_bss_info_out; + } + bi = (struct wl_bss_info *)(cfg->extra_buf + 4); + channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); + wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN); + + if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) { + WL_ERR(("Bssid doesn't match\n")); + err = -EIO; + goto update_bss_info_out; + } + err = wl_inform_single_bss(cfg, bi, roam); + if (unlikely(err)) + goto update_bss_info_out; + + ie = ((u8 *)bi) + bi->ie_offset; + ie_len = bi->ie_length; + beacon_interval = cpu_to_le16(bi->beacon_period); + tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM); + if (tim) { + dtim_period = tim->data[1]; + } else { + /* + * active scan was done so we could not get dtim + * information out of probe response. + * so we speficially query dtim information. + */ + err = wldev_ioctl(ndev, WLC_GET_DTIMPRD, + &dtim_period, sizeof(dtim_period), false); + if (unlikely(err)) { + WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err)); + goto update_bss_info_out; + } + } + + wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT); + wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD); + +update_bss_info_out: + if (unlikely(err)) { + WL_ERR(("Failed with error %d\n", err)); + } + mutex_unlock(&cfg->usr_sync); + return err; +} + +static s32 +wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + struct wl_connect_info *conn_info = wl_to_conn(cfg); + s32 err = 0; + u8 *curbssid; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + struct ieee80211_supported_band *band; + struct ieee80211_channel *notify_channel = NULL; + u32 *channel; + u32 freq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + struct cfg80211_roam_info roam_info = {}; +#endif +#endif + + + if (memcmp(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN) == 0) { + WL_INFORM(("BSSID already updated\n")); + return err; + } + + /* Skip calling cfg80211_roamed If current bssid and + * roamed bssid are same. Also clear timer roam_timeout. + */ + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) { + WL_ERR(("BSS already present, Skipping roamed event to upper layer\n")); +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif /* DHD_LOSSLESS_ROAMING */ + return err; + } + + wl_get_assoc_ies(cfg, ndev); + wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), WL_PROF_BSSID); + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + wl_update_bss_info(cfg, ndev, true); + wl_update_pmklist(ndev, cfg->pmk_list, err); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) + /* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */ + channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN); + if (*channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[NL80211_BAND_2GHZ ]; + else + band = wiphy->bands[NL80211_BAND_5GHZ ]; + freq = ieee80211_channel_to_frequency(*channel, band->band); + notify_channel = ieee80211_get_channel(wiphy, freq); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + roam_info.channel = notify_channel; + roam_info.bssid = curbssid; + roam_info.req_ie = conn_info->req_ie; + roam_info.req_ie_len = conn_info->req_ie_len; + roam_info.resp_ie = conn_info->resp_ie; + roam_info.resp_ie_len = conn_info->resp_ie_len; +#endif +#endif + WL_ERR(("wl_bss_roaming_done succeeded to " MACDBG "\n", + MAC2STRDBG((const u8*)(&e->addr)))); + + cfg80211_roamed(ndev, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + &roam_info, +#else +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) + notify_channel, +#endif + curbssid, + conn_info->req_ie, conn_info->req_ie_len, + conn_info->resp_ie, conn_info->resp_ie_len, +#endif + GFP_KERNEL); + WL_DBG(("Report roaming result\n")); + + memcpy(&cfg->last_roamed_addr, (void *)&e->addr, ETHER_ADDR_LEN); + wl_set_drv_status(cfg, CONNECTED, ndev); + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + cfg->roam_count++; +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + + return err; +} + +static s32 +wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data, bool completed) +{ + struct wl_connect_info *conn_info = wl_to_conn(cfg); + struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); +#if defined(CUSTOM_SET_CPUCORE) + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif + s32 err = 0; + u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (!sec) { + WL_ERR(("sec is NULL\n")); + return -ENODEV; + } + WL_DBG((" enter\n")); +#ifdef ESCAN_RESULT_PATCH + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) { + WL_DBG((" Connected event of connected device e=%d s=%d, ignore it\n", + ntoh32(e->event_type), ntoh32(e->status))); + return err; + } + } + if (memcmp(curbssid, broad_bssid, ETHER_ADDR_LEN) == 0 && + memcmp(broad_bssid, connect_req_bssid, ETHER_ADDR_LEN) != 0) { + WL_DBG(("copy bssid\n")); + memcpy(curbssid, connect_req_bssid, ETHER_ADDR_LEN); + } + +#else + if (cfg->scan_request) { + wl_notify_escan_complete(cfg, ndev, true, true); + } +#endif /* ESCAN_RESULT_PATCH */ + if (wl_get_drv_status(cfg, CONNECTING, ndev)) { + wl_cfg80211_scan_abort(cfg); + wl_clr_drv_status(cfg, CONNECTING, ndev); + if (completed) { + wl_get_assoc_ies(cfg, ndev); + wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), + WL_PROF_BSSID); + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + wl_update_bss_info(cfg, ndev, false); + wl_update_pmklist(ndev, cfg->pmk_list, err); + wl_set_drv_status(cfg, CONNECTED, ndev); + if (ndev != bcmcfg_to_prmry_ndev(cfg)) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) + init_completion(&cfg->iface_disable); +#else + /* reinitialize completion to clear previous count */ + INIT_COMPLETION(cfg->iface_disable); +#endif + } +#ifdef CUSTOM_SET_CPUCORE + if (wl_get_chan_isvht80(ndev, dhd)) { + if (ndev == bcmcfg_to_prmry_ndev(cfg)) + dhd->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */ + else if (is_p2p_group_iface(ndev->ieee80211_ptr)) + dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */ + dhd_set_cpucore(dhd, TRUE); + } +#endif /* CUSTOM_SET_CPUCORE */ + + } + cfg80211_connect_result(ndev, + curbssid, + conn_info->req_ie, + conn_info->req_ie_len, + conn_info->resp_ie, + conn_info->resp_ie_len, + completed ? WLAN_STATUS_SUCCESS : + (sec->auth_assoc_res_status) ? + sec->auth_assoc_res_status : + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + if (completed) + WL_INFORM(("Report connect result - connection succeeded\n")); + else + WL_ERR(("Report connect result - connection failed\n")); + } +#ifdef CONFIG_TCPACK_FASTTX + if (wl_get_chan_isvht80(ndev, dhd)) + wldev_iovar_setint(ndev, "tcpack_fast_tx", 0); + else + wldev_iovar_setint(ndev, "tcpack_fast_tx", 1); +#endif /* CONFIG_TCPACK_FASTTX */ + + return err; +} + +static s32 +wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct net_device *ndev = NULL; + u16 flags = ntoh16(e->flags); + enum nl80211_key_type key_type; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + mutex_lock(&cfg->usr_sync); + if (flags & WLC_EVENT_MSG_GROUP) + key_type = NL80211_KEYTYPE_GROUP; + else + key_type = NL80211_KEYTYPE_PAIRWISE; + + cfg80211_michael_mic_failure(ndev, (const u8 *)&e->addr, key_type, -1, + NULL, GFP_KERNEL); + mutex_unlock(&cfg->usr_sync); + + return 0; +} + +#ifdef BT_WIFI_HANDOVER +static s32 +wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct net_device *ndev = NULL; + u32 event = ntoh32(e->event_type); + u32 datalen = ntoh32(e->datalen); + s32 err; + + WL_ERR(("wl_notify_bt_wifi_handover_req: event_type : %d, datalen : %d\n", event, datalen)); + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + err = wl_genl_send_msg(ndev, event, data, (u16)datalen, 0, 0); + + return err; +} +#endif /* BT_WIFI_HANDOVER */ + +#ifdef PNO_SUPPORT +static s32 +wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct net_device *ndev = NULL; + + WL_ERR((">>> PNO Event\n")); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + +#ifndef WL_SCHED_SCAN + mutex_lock(&cfg->usr_sync); + /* TODO: Use cfg80211_sched_scan_results(wiphy); */ + CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL); + mutex_unlock(&cfg->usr_sync); +#else + /* If cfg80211 scheduled scan is supported, report the pno results via sched + * scan results + */ + wl_notify_sched_scan_results(cfg, ndev, e, data); +#endif /* WL_SCHED_SCAN */ + return 0; +} +#endif /* PNO_SUPPORT */ + +#ifdef GSCAN_SUPPORT +static s32 +wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + u32 event = be32_to_cpu(e->event_type); + void *ptr; + int send_evt_bytes = 0; + int batch_event_result_dummy = 0; + struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + u32 len = ntoh32(e->datalen); + + switch (event) { + case WLC_E_PFN_SWC: + ptr = dhd_dev_swc_scan_event(ndev, data, &send_evt_bytes); + if (send_evt_bytes) { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_GSCAN_SIGNIFICANT_EVENT, ptr, send_evt_bytes); + kfree(ptr); + } + break; + case WLC_E_PFN_BEST_BATCHING: + err = dhd_dev_retrieve_batch_scan(ndev); + if (err < 0) { + WL_ERR(("Batch retrieval already in progress %d\n", err)); + } else { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_GSCAN_BATCH_SCAN_EVENT, + &batch_event_result_dummy, sizeof(int)); + } + break; + case WLC_E_PFN_SCAN_COMPLETE: + batch_event_result_dummy = WIFI_SCAN_COMPLETE; + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_SCAN_COMPLETE_EVENT, + &batch_event_result_dummy, sizeof(int)); + break; + case WLC_E_PFN_BSSID_NET_FOUND: + ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes, + HOTLIST_FOUND); + if (ptr) { + wl_cfgvendor_send_hotlist_event(wiphy, ndev, + ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT); + dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND); + } + break; + case WLC_E_PFN_BSSID_NET_LOST: + /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE + * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore + */ + if (len) { + ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes, + HOTLIST_LOST); + if (ptr) { + wl_cfgvendor_send_hotlist_event(wiphy, ndev, + ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT); + dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST); + } + } + break; + case WLC_E_PFN_GSCAN_FULL_RESULT: + ptr = dhd_dev_process_full_gscan_result(ndev, data, &send_evt_bytes); + if (ptr) { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes); + kfree(ptr); + } + break; + default: + WL_ERR(("%s: Unexpected event! - %d\n", __FUNCTION__, event)); + + } + return err; +} +#endif /* GSCAN_SUPPORT */ + +static s32 +wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct channel_info channel_inform; + struct wl_scan_results *bss_list; + struct net_device *ndev = NULL; + struct cfg80211_scan_info info = {}; + u32 len = WL_SCAN_BUF_MAX; + s32 err = 0; + unsigned long flags; + + WL_DBG(("Enter \n")); + if (!wl_get_drv_status(cfg, SCANNING, ndev)) { + WL_ERR(("scan is not ready \n")); + return err; + } + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + mutex_lock(&cfg->usr_sync); + wl_clr_drv_status(cfg, SCANNING, ndev); + err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform, + sizeof(channel_inform), false); + if (unlikely(err)) { + WL_ERR(("scan busy (%d)\n", err)); + goto scan_done_out; + } + channel_inform.scan_channel = dtoh32(channel_inform.scan_channel); + if (unlikely(channel_inform.scan_channel)) { + + WL_DBG(("channel_inform.scan_channel (%d)\n", + channel_inform.scan_channel)); + } + cfg->bss_list = cfg->scan_results; + bss_list = cfg->bss_list; + memset(bss_list, 0, len); + bss_list->buflen = htod32(len); + err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false); + if (unlikely(err) && unlikely(!cfg->scan_suppressed)) { + WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err)); + err = -EINVAL; + goto scan_done_out; + } + bss_list->buflen = dtoh32(bss_list->buflen); + bss_list->version = dtoh32(bss_list->version); + bss_list->count = dtoh32(bss_list->count); + + err = wl_inform_bss(cfg); + +scan_done_out: + del_timer_sync(&cfg->scan_timeout); + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + if (cfg->scan_request) { + info.aborted = false; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + WL_DBG(("cfg80211_scan_done\n")); + mutex_unlock(&cfg->usr_sync); + return err; +} + +static s32 +wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, + const struct ether_addr *sa, const struct ether_addr *bssid, + u8 **pheader, u32 *body_len, u8 *pbody) +{ + struct dot11_management_header *hdr; + u32 totlen = 0; + s32 err = 0; + u8 *offset; + u32 prebody_len = *body_len; + switch (fc) { + case FC_ASSOC_REQ: + /* capability , listen interval */ + totlen = DOT11_ASSOC_REQ_FIXED_LEN; + *body_len += DOT11_ASSOC_REQ_FIXED_LEN; + break; + + case FC_REASSOC_REQ: + /* capability, listen inteval, ap address */ + totlen = DOT11_REASSOC_REQ_FIXED_LEN; + *body_len += DOT11_REASSOC_REQ_FIXED_LEN; + break; + } + totlen += DOT11_MGMT_HDR_LEN + prebody_len; + *pheader = kzalloc(totlen, GFP_KERNEL); + if (*pheader == NULL) { + WL_ERR(("memory alloc failed \n")); + return -ENOMEM; + } + hdr = (struct dot11_management_header *) (*pheader); + hdr->fc = htol16(fc); + hdr->durid = 0; + hdr->seq = 0; + offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len); + bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN); + bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN); + bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN); + if ((pbody != NULL) && prebody_len) + bcopy((const char*)pbody, offset, prebody_len); + *body_len = totlen; + return err; +} + + +void +wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + if (timer_pending(&cfg->p2p->listen_timer)) { + del_timer_sync(&cfg->p2p->listen_timer); + } + if (cfg->afx_hdl != NULL) { + if (cfg->afx_hdl->dev != NULL) { + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, cfg->afx_hdl->dev); + } + cfg->afx_hdl->peer_chan = WL_INVALID; + } + complete(&cfg->act_frm_scan); + WL_DBG(("*** Wake UP ** Working afx searching is cleared\n")); + } else if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) { + if (!(wl_get_p2p_status(cfg, ACTION_TX_COMPLETED) || + wl_get_p2p_status(cfg, ACTION_TX_NOACK))) + wl_set_p2p_status(cfg, ACTION_TX_COMPLETED); + + WL_DBG(("*** Wake UP ** abort actframe iovar\n")); + /* if channel is not zero, "actfame" uses off channel scan. + * So abort scan for off channel completion. + */ + if (cfg->af_sent_channel) + wl_cfg80211_scan_abort(cfg); + } +#ifdef WL_CFG80211_SYNC_GON + else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) { + WL_DBG(("*** Wake UP ** abort listen for next af frame\n")); + /* So abort scan to cancel listen */ + wl_cfg80211_scan_abort(cfg); + } +#endif /* WL_CFG80211_SYNC_GON */ +} + +#if defined(WLTDLS) +bool wl_cfg80211_is_tdls_tunneled_frame(void *frame, u32 frame_len) +{ + unsigned char *data; + + if (frame == NULL) { + WL_ERR(("Invalid frame \n")); + return false; + } + + if (frame_len < 5) { + WL_ERR(("Invalid frame length [%d] \n", frame_len)); + return false; + } + + data = frame; + + if (!memcmp(data, TDLS_TUNNELED_PRB_REQ, 5) || + !memcmp(data, TDLS_TUNNELED_PRB_RESP, 5)) { + WL_DBG(("TDLS Vendor Specific Received type\n")); + return true; + } + + return false; +} +#endif /* WLTDLS */ + + +int wl_cfg80211_get_ioctl_version(void) +{ + return ioctl_version; +} + +static s32 +wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct ieee80211_supported_band *band; + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + struct ether_addr da; + struct ether_addr bssid; + bool isfree = false; + s32 err = 0; + s32 freq; + struct net_device *ndev = NULL; + wifi_p2p_pub_act_frame_t *act_frm = NULL; + wifi_p2p_action_frame_t *p2p_act_frm = NULL; + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL; +#if defined(WLTDLS) && defined(TDLS_MSG_ONLY_WFD) + dhd_pub_t *dhdp; +#endif /* WLTDLS && TDLS_MSG_ONLY_WFD */ + wl_event_rx_frame_data_t *rxframe = + (wl_event_rx_frame_data_t*)data; + u32 event = ntoh32(e->event_type); + u8 *mgmt_frame; + u8 bsscfgidx = e->bsscfgidx; + u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t); + u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK)); + + memset(&bssid, 0, ETHER_ADDR_LEN); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[NL80211_BAND_2GHZ ]; + else + band = wiphy->bands[NL80211_BAND_5GHZ ]; + if (!band) { + WL_ERR(("No valid band")); + return -EINVAL; + } +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(channel); + (void)band->band; +#else + freq = ieee80211_channel_to_frequency(channel, band->band); +#endif + if (event == WLC_E_ACTION_FRAME_RX) { + wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync); + + err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); + if (err < 0) + WL_ERR(("WLC_GET_BSSID error %d\n", err)); + memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN); + err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid, + &mgmt_frame, &mgmt_frame_len, + (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1)); + if (err < 0) { + WL_ERR(("Error in receiving action frame len %d channel %d freq %d\n", + mgmt_frame_len, channel, freq)); + goto exit; + } + isfree = true; + if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + act_frm = (wifi_p2p_pub_act_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + } else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + p2p_act_frm = (wifi_p2p_action_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + (void) p2p_act_frm; + } else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + if (sd_act_frm && wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) { + if (cfg->next_af_subtype == sd_act_frm->action) { + WL_DBG(("We got a right next frame of SD!(%d)\n", + sd_act_frm->action)); + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev); + + /* Stop waiting for next AF. */ + wl_stop_wait_next_action_frame(cfg, ndev); + } + } + (void) sd_act_frm; +#ifdef WLTDLS + } else if ((mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) || + (wl_cfg80211_is_tdls_tunneled_frame( + &mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN))) { + if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) { + WL_ERR((" TDLS Action Frame Received type = %d \n", + mgmt_frame[DOT11_MGMT_HDR_LEN + 1])); + } +#ifdef TDLS_MSG_ONLY_WFD + dhdp = (dhd_pub_t *)(cfg->pub); + if (!dhdp->tdls_mode) { + WL_DBG((" TDLS Frame filtered \n")); + return 0; + } +#else + if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) { + cfg->tdls_mgmt_frame = mgmt_frame; + cfg->tdls_mgmt_frame_len = mgmt_frame_len; + cfg->tdls_mgmt_freq = freq; + return 0; + } +#endif /* TDLS_MSG_ONLY_WFD */ +#endif /* WLTDLS */ +#ifdef QOS_MAP_SET + } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_QOS) { + /* update QoS map set table */ + bcm_tlv_t * qos_map_ie = NULL; + if ((qos_map_ie = bcm_parse_tlvs(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN, + DOT11_MNG_QOS_MAP_ID)) != NULL) { + WL_DBG((" QoS map set IE found in QoS action frame\n")); + if (!cfg->up_table) { + cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL); + } + wl_set_up_table(cfg->up_table, qos_map_ie); + } else { + kfree(cfg->up_table); + cfg->up_table = NULL; + } +#endif /* QOS_MAP_SET */ + } else { + /* + * if we got normal action frame and ndev is p2p0, + * we have to change ndev from p2p0 to wlan0 + */ + + + if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) { + u8 action = 0; + if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN, &action) != BCME_OK) { + WL_DBG(("Recived action is not public action frame\n")); + } else if (cfg->next_af_subtype == action) { + WL_DBG(("Recived action is the waiting action(%d)\n", + action)); + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev); + + /* Stop waiting for next AF. */ + wl_stop_wait_next_action_frame(cfg, ndev); + } + } + } + + if (act_frm) { + + if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) { + if (cfg->next_af_subtype == act_frm->subtype) { + WL_DBG(("We got a right next frame!(%d)\n", + act_frm->subtype)); + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev); + + if (cfg->next_af_subtype == P2P_PAF_GON_CONF) { + OSL_SLEEP(20); + } + + /* Stop waiting for next AF. */ + wl_stop_wait_next_action_frame(cfg, ndev); + } + } + } + + wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN, channel); + /* + * After complete GO Negotiation, roll back to mpc mode + */ + if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) || + (act_frm->subtype == P2P_PAF_PROVDIS_RSP))) { + wldev_iovar_setint(ndev, "mpc", 1); + } + if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) { + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + } + } else if (event == WLC_E_PROBREQ_MSG) { + + /* Handle probe reqs frame + * WPS-AP certification 4.2.13 + */ + struct parsed_ies prbreq_ies; + u32 prbreq_ie_len = 0; + bool pbc = 0; + + WL_DBG((" Event WLC_E_PROBREQ_MSG received\n")); + mgmt_frame = (u8 *)(data); + mgmt_frame_len = ntoh32(e->datalen); + + prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN; + + /* Parse prob_req IEs */ + if (wl_cfg80211_parse_ies(&mgmt_frame[DOT11_MGMT_HDR_LEN], + prbreq_ie_len, &prbreq_ies) < 0) { + WL_ERR(("Prob req get IEs failed\n")); + return 0; + } + if (prbreq_ies.wps_ie != NULL) { + wl_validate_wps_ie((char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc); + WL_DBG((" wps_ie exist pbc = %d\n", pbc)); + /* if pbc method, send prob_req mgmt frame to upper layer */ + if (!pbc) + return 0; + } else + return 0; + } else { + mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1); + + /* wpa supplicant use probe request event for restarting another GON Req. + * but it makes GON Req repetition. + * so if src addr of prb req is same as my target device, + * do not send probe request event during sending action frame. + */ + if (event == WLC_E_P2P_PROBREQ_MSG) { + WL_DBG((" Event %s\n", (event == WLC_E_P2P_PROBREQ_MSG) ? + "WLC_E_P2P_PROBREQ_MSG":"WLC_E_PROBREQ_MSG")); + + + /* Filter any P2P probe reqs arriving during the + * GO-NEG Phase + */ + if (cfg->p2p && +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti && +#endif + wl_get_p2p_status(cfg, GO_NEG_PHASE)) { + WL_DBG(("Filtering P2P probe_req while " + "being in GO-Neg state\n")); + return 0; + } + } + } + + if (discover_cfgdev(cfgdev, cfg)) + WL_DBG(("Rx Managment frame For P2P Discovery Interface \n")); + else + WL_DBG(("Rx Managment frame For Iface (%s) \n", ndev->name)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); +#elif(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ + defined(WL_COMPAT_WIRELESS) + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); +#else + cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); +#endif /* LINUX_VERSION >= VERSION(3, 14, 0) */ + + WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", + mgmt_frame_len, ntoh32(e->datalen), channel, freq)); +exit: + if (isfree) + kfree(mgmt_frame); + return 0; +} + +#ifdef WL_SCHED_SCAN +/* If target scan is not reliable, set the below define to "1" to do a + * full escan + */ +#define FULL_ESCAN_ON_PFN_NET_FOUND 0 +static s32 +wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + wl_pfn_net_info_t *netinfo, *pnetinfo; + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + int err = 0; + struct cfg80211_scan_request *request = NULL; + struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT]; + struct ieee80211_channel *channel = NULL; + int channel_req = 0; + int band = 0; + struct wl_pfn_scanresults *pfn_result = (struct wl_pfn_scanresults *)data; + int n_pfn_results = pfn_result->count; + + WL_DBG(("Enter\n")); + + if (e->event_type == WLC_E_PFN_NET_LOST) { + WL_PNO(("PFN NET LOST event. Do Nothing \n")); + return 0; + } + WL_PNO((">>> PFN NET FOUND event. count:%d \n", n_pfn_results)); + if (n_pfn_results > 0) { + int i; + + if (n_pfn_results > MAX_PFN_LIST_COUNT) + n_pfn_results = MAX_PFN_LIST_COUNT; + pnetinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t) + - sizeof(wl_pfn_net_info_t)); + + memset(&ssid, 0x00, sizeof(ssid)); + + request = kzalloc(sizeof(*request) + + sizeof(*request->channels) * n_pfn_results, + GFP_KERNEL); + channel = (struct ieee80211_channel *)kzalloc( + (sizeof(struct ieee80211_channel) * n_pfn_results), + GFP_KERNEL); + if (!request || !channel) { + WL_ERR(("No memory")); + err = -ENOMEM; + goto out_err; + } + + request->wiphy = wiphy; + + for (i = 0; i < n_pfn_results; i++) { + netinfo = &pnetinfo[i]; + if (!netinfo) { + WL_ERR(("Invalid netinfo ptr. index:%d", i)); + err = -EINVAL; + goto out_err; + } + WL_PNO((">>> SSID:%s Channel:%d \n", + netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel)); + /* PFN result doesn't have all the info which are required by the supplicant + * (For e.g IEs) Do a target Escan so that sched scan results are reported + * via wl_inform_single_bss in the required format. Escan does require the + * scan request in the form of cfg80211_scan_request. For timebeing, create + * cfg80211_scan_request one out of the received PNO event. + */ + ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN, netinfo->pfnsubnet.SSID_len); + memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID, + ssid[i].ssid_len); + request->n_ssids++; + + channel_req = netinfo->pfnsubnet.channel; + band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ + : NL80211_BAND_5GHZ; + channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band); + channel[i].band = band; + channel[i].flags |= IEEE80211_CHAN_NO_HT40; + request->channels[i] = &channel[i]; + request->n_channels++; + } + + /* assign parsed ssid array */ + if (request->n_ssids) + request->ssids = &ssid[0]; + + if (wl_get_drv_status_all(cfg, SCANNING)) { + /* Abort any on-going scan */ + wl_notify_escan_complete(cfg, ndev, true, true); + } + + if (wl_get_p2p_status(cfg, DISCOVERY_ON)) { + WL_PNO((">>> P2P discovery was ON. Disabling it\n")); + err = wl_cfgp2p_discover_enable_search(cfg, false); + if (unlikely(err)) { + wl_clr_drv_status(cfg, SCANNING, ndev); + goto out_err; + } + p2p_scan(cfg) = false; + } + + wl_set_drv_status(cfg, SCANNING, ndev); +#if FULL_ESCAN_ON_PFN_NET_FOUND + WL_PNO((">>> Doing Full ESCAN on PNO event\n")); + err = wl_do_escan(cfg, wiphy, ndev, NULL); +#else + WL_PNO((">>> Doing targeted ESCAN on PNO event\n")); + err = wl_do_escan(cfg, wiphy, ndev, request); +#endif + if (err) { + wl_clr_drv_status(cfg, SCANNING, ndev); + goto out_err; + } + cfg->sched_scan_running = TRUE; + } + else { + WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n")); + } +out_err: + if (request) + kfree(request); + if (channel) + kfree(channel); + return err; +} +#endif /* WL_SCHED_SCAN */ + +static void wl_init_conf(struct wl_conf *conf) +{ + WL_DBG(("Enter \n")); + conf->frag_threshold = (u32)-1; + conf->rts_threshold = (u32)-1; + conf->retry_short = (u32)-1; + conf->retry_long = (u32)-1; + conf->tx_power = -1; +} + +static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + unsigned long flags; + struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev); + + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + memset(profile, 0, sizeof(struct wl_profile)); + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); +} + +static void wl_init_event_handler(struct bcm_cfg80211 *cfg) +{ + memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler)); + + cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status; + cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_LINK] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status; + cfg->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status; + cfg->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame; + cfg->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame; + cfg->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame; + cfg->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete; + cfg->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete; + cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete; + cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_START] = wl_notify_connect_status; +#ifdef PNO_SUPPORT + cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status; +#endif /* PNO_SUPPORT */ +#ifdef GSCAN_SUPPORT + cfg->evt_handler[WLC_E_PFN_BEST_BATCHING] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_SCAN_COMPLETE] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_GSCAN_FULL_RESULT] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_SWC] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_BSSID_NET_FOUND] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_BSSID_NET_LOST] = wl_notify_gscan_event; +#endif /* GSCAN_SUPPORT */ +#ifdef WLTDLS + cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler; +#endif /* WLTDLS */ + cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status; +#ifdef WL_RELMCAST + cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status; +#endif +#ifdef BT_WIFI_HANDOVER + cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req; +#endif +#ifdef WL_NAN + cfg->evt_handler[WLC_E_NAN] = wl_cfgnan_notify_nan_status; + cfg->evt_handler[WLC_E_PROXD] = wl_cfgnan_notify_proxd_status; +#endif /* WL_NAN */ + cfg->evt_handler[WLC_E_CSA_COMPLETE_IND] = wl_csa_complete_ind; +#ifdef DHD_LOSSLESS_ROAMING + cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status; +#endif + cfg->evt_handler[WLC_E_AP_STARTED] = wl_ap_start_ind; +#ifdef CUSTOM_EVENT_PM_WAKE + cfg->evt_handler[WLC_E_EXCESS_PM_WAKE_EVENT] = wl_check_pmstatus; +#endif /* CUSTOM_EVENT_PM_WAKE */ + cfg->evt_handler[WLC_E_PSK_SUP] = wl_notify_idsup_status; +} + +#if defined(STATIC_WL_PRIV_STRUCT) +static void +wl_init_escan_result_buf(struct bcm_cfg80211 *cfg) +{ + cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub, + DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE); + bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE); +} + +static void +wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg) +{ + cfg->escan_info.escan_buf = NULL; + +} +#endif /* STATIC_WL_PRIV_STRUCT */ + +static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg) +{ + WL_DBG(("Enter \n")); + + cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL); + if (unlikely(!cfg->scan_results)) { + WL_ERR(("Scan results alloc failed\n")); + goto init_priv_mem_out; + } + cfg->conf = (void *)kzalloc(sizeof(*cfg->conf), GFP_KERNEL); + if (unlikely(!cfg->conf)) { + WL_ERR(("wl_conf alloc failed\n")); + goto init_priv_mem_out; + } + cfg->scan_req_int = + (void *)kzalloc(sizeof(*cfg->scan_req_int), GFP_KERNEL); + if (unlikely(!cfg->scan_req_int)) { + WL_ERR(("Scan req alloc failed\n")); + goto init_priv_mem_out; + } + cfg->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!cfg->ioctl_buf)) { + WL_ERR(("Ioctl buf alloc failed\n")); + goto init_priv_mem_out; + } + cfg->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!cfg->escan_ioctl_buf)) { + WL_ERR(("Ioctl buf alloc failed\n")); + goto init_priv_mem_out; + } + cfg->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (unlikely(!cfg->extra_buf)) { + WL_ERR(("Extra buf alloc failed\n")); + goto init_priv_mem_out; + } + cfg->pmk_list = (void *)kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL); + if (unlikely(!cfg->pmk_list)) { + WL_ERR(("pmk list alloc failed\n")); + goto init_priv_mem_out; + } +#if defined(STATIC_WL_PRIV_STRUCT) + cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL); + if (unlikely(!cfg->conn_info)) { + WL_ERR(("cfg->conn_info alloc failed\n")); + goto init_priv_mem_out; + } + cfg->ie = (void *)kzalloc(sizeof(*cfg->ie), GFP_KERNEL); + if (unlikely(!cfg->ie)) { + WL_ERR(("cfg->ie alloc failed\n")); + goto init_priv_mem_out; + } + wl_init_escan_result_buf(cfg); +#endif /* STATIC_WL_PRIV_STRUCT */ + cfg->afx_hdl = (void *)kzalloc(sizeof(*cfg->afx_hdl), GFP_KERNEL); + if (unlikely(!cfg->afx_hdl)) { + WL_ERR(("afx hdl alloc failed\n")); + goto init_priv_mem_out; + } else { + init_completion(&cfg->act_frm_scan); + init_completion(&cfg->wait_next_af); + + INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler); + } +#ifdef WLTDLS + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + } +#endif /* WLTDLS */ + return 0; + +init_priv_mem_out: + wl_deinit_priv_mem(cfg); + + return -ENOMEM; +} + +static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg) +{ + kfree(cfg->scan_results); + cfg->scan_results = NULL; + kfree(cfg->conf); + cfg->conf = NULL; + kfree(cfg->scan_req_int); + cfg->scan_req_int = NULL; + kfree(cfg->ioctl_buf); + cfg->ioctl_buf = NULL; + kfree(cfg->escan_ioctl_buf); + cfg->escan_ioctl_buf = NULL; + kfree(cfg->extra_buf); + cfg->extra_buf = NULL; + kfree(cfg->pmk_list); + cfg->pmk_list = NULL; +#if defined(STATIC_WL_PRIV_STRUCT) + kfree(cfg->conn_info); + cfg->conn_info = NULL; + kfree(cfg->ie); + cfg->ie = NULL; + wl_deinit_escan_result_buf(cfg); +#endif /* STATIC_WL_PRIV_STRUCT */ + if (cfg->afx_hdl) { + cancel_work_sync(&cfg->afx_hdl->work); + kfree(cfg->afx_hdl); + cfg->afx_hdl = NULL; + } + +} + +static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg) +{ + int ret = 0; + WL_DBG(("Enter \n")); + + /* Do not use DHD in cfg driver */ + cfg->event_tsk.thr_pid = -1; + + PROC_START(wl_event_handler, cfg, &cfg->event_tsk, 0, "wl_event_handler"); + if (cfg->event_tsk.thr_pid < 0) + ret = -ENOMEM; + return ret; +} + +static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg) +{ + if (cfg->event_tsk.thr_pid >= 0) + PROC_STOP(&cfg->event_tsk); +} + +void wl_terminate_event_handler(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (cfg) { + wl_destroy_event_handler(cfg); + wl_flush_eq(cfg); + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void wl_scan_timeout(struct timer_list *t) +#else +static void wl_scan_timeout(unsigned long data) +#endif +{ + wl_event_msg_t msg; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + struct bcm_cfg80211 *cfg = from_timer(cfg, t, scan_timeout); +#else + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; +#endif + struct wireless_dev *wdev = NULL; + struct net_device *ndev = NULL; + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; + s32 i; + u32 channel; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + uint32 prev_memdump_mode = dhdp->memdump_enabled; +#endif /* DHD_DEBUG && BCMPCIE */ + + if (!(cfg->scan_request)) { + WL_ERR(("timer expired but no scan request\n")); + return; + } + + bss_list = wl_escan_get_buf(cfg, FALSE); + if (!bss_list) { + WL_ERR(("bss_list is null. Didn't receive any partial scan results\n")); + } else { + WL_ERR(("scanned AP count (%d)\n", bss_list->count)); + + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); + WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel)); + } + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + if (cfg->scan_request->dev) + wdev = cfg->scan_request->dev->ieee80211_ptr; +#else + wdev = cfg->scan_request->wdev; +#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */ + if (!wdev) { + WL_ERR(("No wireless_dev present\n")); + return; + } + ndev = wdev_to_wlc_ndev(wdev, cfg); + + bzero(&msg, sizeof(wl_event_msg_t)); + WL_ERR(("timer expired\n")); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_enabled = DUMP_MEMFILE; + dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT; + dhd_bus_mem_dump(dhdp); + dhdp->memdump_enabled = prev_memdump_mode; + } +#endif /* DHD_DEBUG && BCMPCIE */ + msg.event_type = hton32(WLC_E_ESCAN_RESULT); + msg.status = hton32(WLC_E_STATUS_TIMEOUT); + msg.reason = 0xFFFFFFFF; + wl_cfg80211_event(ndev, &msg, NULL); +#ifdef CUSTOMER_HW4_DEBUG + if (!wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_set(); +#endif /* CUSTOMER_HW4_DEBUG */ +} + +#ifdef DHD_LOSSLESS_ROAMING +static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + /* restore prec_map to ALLPRIO */ + dhdp->dequeue_prec_map = ALLPRIO; + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } + +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void wl_roam_timeout(struct timer_list *t) +#else +static void wl_roam_timeout(unsigned long data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + struct bcm_cfg80211 *cfg = from_timer(cfg, t, roam_timeout); +#else + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; +#endif + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + WL_ERR(("roam timer expired\n")); + + /* restore prec_map to ALLPRIO */ + dhdp->dequeue_prec_map = ALLPRIO; +} + +#endif /* DHD_LOSSLESS_ROAMING */ + +static s32 +wl_cfg80211_netdev_notifier_call(struct notifier_block * nb, + unsigned long state, void *ptr) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) + struct net_device *dev = ptr; +#else + struct net_device *dev = netdev_notifier_info_to_dev(ptr); +#endif /* LINUX_VERSION < VERSION(3, 11, 0) */ + struct wireless_dev *wdev = ndev_to_wdev(dev); + struct bcm_cfg80211 *cfg = g_bcm_cfg; + +#ifdef DHD_IFDEBUG + WL_ERR(("Enter \n")); +#endif + + if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg)) + return NOTIFY_DONE; + + switch (state) { + case NETDEV_DOWN: + { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) + int max_wait_timeout = 2; + int max_wait_count = 100; + int refcnt = 0; + unsigned long limit = jiffies + max_wait_timeout * HZ; +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_DOWN(+) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif + while (work_pending(&wdev->cleanup_work)) { + if (refcnt%5 == 0) { + WL_ERR(("[NETDEV_DOWN] wait for " + "complete of cleanup_work" + " (%d th)\n", refcnt)); + } + if (!time_before(jiffies, limit)) { + WL_ERR(("[NETDEV_DOWN] cleanup_work" + " of CFG80211 is not" + " completed in %d sec\n", + max_wait_timeout)); + break; + } + if (refcnt >= max_wait_count) { + WL_ERR(("[NETDEV_DOWN] cleanup_work" + " of CFG80211 is not" + " completed in %d loop\n", + max_wait_count)); + break; + } + set_current_state(TASK_INTERRUPTIBLE); + (void)schedule_timeout(100); + set_current_state(TASK_RUNNING); + refcnt++; + } +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_DOWN(-) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif +#endif /* LINUX_VERSION < VERSION(3, 14, 0) */ + break; + } + case NETDEV_UNREGISTER: +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_UNREGISTER(+) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif + /* after calling list_del_rcu(&wdev->list) */ + wl_cfg80211_clear_per_bss_ies(cfg, + wl_get_bssidx_by_wdev(cfg, wdev)); + wl_dealloc_netinfo_by_wdev(cfg, wdev); +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_UNREGISTER(-) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif + break; + case NETDEV_GOING_DOWN: + /* + * At NETDEV_DOWN state, wdev_cleanup_work work will be called. + * In front of door, the function checks whether current scan + * is working or not. If the scanning is still working, + * wdev_cleanup_work call WARN_ON and make the scan done forcibly. + */ +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_GOING_DOWN wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif + if (wl_get_drv_status(cfg, SCANNING, dev)) + wl_notify_escan_complete(cfg, dev, true, true); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block wl_cfg80211_netdev_notifier = { + .notifier_call = wl_cfg80211_netdev_notifier_call, +}; + +/* + * to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool wl_cfg80211_netdev_notifier_registered = FALSE; + +static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg) +{ + struct wireless_dev *wdev = NULL; + struct net_device *ndev = NULL; + + if (!cfg->scan_request) + return; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + if (cfg->scan_request->dev) + wdev = cfg->scan_request->dev->ieee80211_ptr; +#else + wdev = cfg->scan_request->wdev; +#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */ + + if (!wdev) { + WL_ERR(("No wireless_dev present\n")); + return; + } + + ndev = wdev_to_wlc_ndev(wdev, cfg); + wl_notify_escan_complete(cfg, ndev, true, true); + WL_ERR(("Scan aborted! \n")); +} + +static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg) +{ + wl_scan_params_t *params = NULL; + s32 params_size = 0; + s32 err = BCME_OK; + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + if (!in_atomic()) { + /* Our scan params only need space for 1 channel and 0 ssids */ + params = wl_cfg80211_scan_alloc_params(-1, 0, ¶ms_size); + if (params == NULL) { + WL_ERR(("scan params allocation failed \n")); + err = -ENOMEM; + } else { + /* Do a scan abort to stop the driver's scan engine */ + err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true); + if (err < 0) { + WL_ERR(("scan abort failed \n")); + } + kfree(params); + } + } +#ifdef WLTDLS + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + } +#endif /* WLTDLS */ +} + +static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, + struct net_device *ndev, + bool aborted, bool fw_abort) +{ + s32 err = BCME_OK; + unsigned long flags; + struct net_device *dev; + struct cfg80211_scan_info info = {}; + WL_DBG(("Enter \n")); + + mutex_lock(&cfg->scan_complete); + + if (!ndev) { + WL_ERR(("ndev is null\n")); + err = BCME_ERROR; + goto out; + } + + if (cfg->escan_info.ndev != ndev) { + WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev)); + err = BCME_ERROR; + goto out; + } + + if (cfg->scan_request) { + dev = bcmcfg_to_prmry_ndev(cfg); +#if defined(WL_ENABLE_P2P_IF) + if (cfg->scan_request->dev != cfg->p2p_net) + dev = cfg->scan_request->dev; +#elif defined(WL_CFG80211_P2P_DEV_IF) + if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) { +#ifdef DHD_IFDEBUG + WL_ERR(("%s: dev: %p\n", __FUNCTION__, cfg->scan_request->wdev->netdev)); +#endif + dev = cfg->scan_request->wdev->netdev; + } +#endif /* WL_ENABLE_P2P_IF */ + } + else { + WL_DBG(("cfg->scan_request is NULL may be internal scan." + "doing scan_abort for ndev %p primary %p", + ndev, bcmcfg_to_prmry_ndev(cfg))); + dev = ndev; + } + if (fw_abort && !in_atomic()) + wl_cfg80211_scan_abort(cfg); + if (timer_pending(&cfg->scan_timeout)) + del_timer_sync(&cfg->scan_timeout); +#if defined(ESCAN_RESULT_PATCH) + if (likely(cfg->scan_request)) { + cfg->bss_list = wl_escan_get_buf(cfg, aborted); + wl_inform_bss(cfg); + } +#endif /* ESCAN_RESULT_PATCH */ + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); +#ifdef WL_SCHED_SCAN + if (cfg->sched_scan_req && !cfg->scan_request) { + WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n")); + if (!aborted) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy, 0); +#else + cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy); +#endif + + cfg->sched_scan_running = FALSE; + cfg->sched_scan_req = NULL; + } +#endif /* WL_SCHED_SCAN */ + if (likely(cfg->scan_request)) { + info.aborted = aborted; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); + DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub)); + } + if (p2p_is_on(cfg)) + wl_clr_p2p_status(cfg, SCANNING); + wl_clr_drv_status(cfg, SCANNING, dev); + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + +out: + mutex_unlock(&cfg->scan_complete); + return err; +} + +#ifdef ESCAN_BUF_OVERFLOW_MGMT +static void +wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate) +{ + int idx; + for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) { + int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1; + if (bss->RSSI < candidate[idx].RSSI) { + if (len) + memcpy(&candidate[idx + 1], &candidate[idx], + sizeof(removal_element_t) * len); + candidate[idx].RSSI = bss->RSSI; + candidate[idx].length = bss->length; + memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN); + return; + } + } +} + +static void +wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate, + wl_bss_info_t *bi) +{ + int idx1, idx2; + int total_delete_len = 0; + for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) { + int cur_len = WL_SCAN_RESULTS_FIXED_SIZE; + wl_bss_info_t *bss = NULL; + if (candidate[idx1].RSSI >= bi->RSSI) + continue; + for (idx2 = 0; idx2 < list->count; idx2++) { + bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) : + list->bss_info; + if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) && + candidate[idx1].RSSI == bss->RSSI && + candidate[idx1].length == dtoh32(bss->length)) { + u32 delete_len = dtoh32(bss->length); + WL_DBG(("delete scan info of " MACDBG " to add new AP\n", + MAC2STRDBG(bss->BSSID.octet))); + if (idx2 < list->count -1) { + memmove((u8 *)bss, (u8 *)bss + delete_len, + list->buflen - cur_len - delete_len); + } + list->buflen -= delete_len; + list->count--; + total_delete_len += delete_len; + /* if delete_len is greater than or equal to result length */ + if (total_delete_len >= bi->length) { + return; + } + break; + } + cur_len += dtoh32(bss->length); + } + } +} +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + +static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = BCME_OK; + s32 status = ntoh32(e->status); + wl_bss_info_t *bi; + wl_escan_result_t *escan_result; + wl_bss_info_t *bss = NULL; + wl_scan_results_t *list; + wifi_p2p_ie_t * p2p_ie; + struct net_device *ndev = NULL; + u32 bi_length; + u32 i; + u8 *p2p_dev_addr = NULL; + + WL_DBG((" enter event type : %d, status : %d \n", + ntoh32(e->event_type), ntoh32(e->status))); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + mutex_lock(&cfg->usr_sync); + /* P2P SCAN is coming from primary interface */ + if (wl_get_p2p_status(cfg, SCANNING)) { + if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) + ndev = cfg->afx_hdl->dev; + else + ndev = cfg->escan_info.ndev; + + } + if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) { + WL_ERR(("escan is not ready ndev %p drv_status 0x%x e_type %d e_states %d\n", + ndev, wl_get_drv_status(cfg, SCANNING, ndev), + ntoh32(e->event_type), ntoh32(e->status))); + goto exit; + } + escan_result = (wl_escan_result_t *)data; + + if (status == WLC_E_STATUS_PARTIAL) { + WL_INFORM(("WLC_E_STATUS_PARTIAL \n")); + if (!escan_result) { + WL_ERR(("Invalid escan result (NULL pointer)\n")); + goto exit; + } + if (dtoh16(escan_result->bss_count) != 1) { + WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count)); + goto exit; + } + bi = escan_result->bss_info; + if (!bi) { + WL_ERR(("Invalid escan bss info (NULL pointer)\n")); + goto exit; + } + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) { + WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + if (wl_escan_check_sync_id(status, escan_result->sync_id, + cfg->escan_info.cur_sync_id) < 0) + goto exit; + + if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) { + if (dtoh16(bi->capability) & DOT11_CAP_IBSS) { + WL_DBG(("Ignoring IBSS result\n")); + goto exit; + } + } + + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length); + if (p2p_dev_addr && !memcmp(p2p_dev_addr, + cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) { + s32 channel = wf_chspec_ctlchan( + wl_chspec_driver_to_host(bi->chanspec)); + + if ((channel > MAXCHANNEL) || (channel <= 0)) + channel = WL_INVALID; + else + WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found," + " channel : %d\n", + MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet), + channel)); + + wl_clr_p2p_status(cfg, SCANNING); + cfg->afx_hdl->peer_chan = channel; + complete(&cfg->act_frm_scan); + goto exit; + } + + } else { + int cur_len = WL_SCAN_RESULTS_FIXED_SIZE; +#ifdef ESCAN_BUF_OVERFLOW_MGMT + removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT]; + int remove_lower_rssi = FALSE; + + bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT); +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + + list = wl_escan_get_buf(cfg, FALSE); + if (scan_req_match(cfg)) { + /* p2p scan && allow only probe response */ + if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) && + (bi->flags & WL_BSS_FLAGS_FROM_BEACON)) + goto exit; + if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset, + bi->ie_length)) == NULL) { + WL_ERR(("Couldn't find P2PIE in probe" + " response/beacon\n")); + goto exit; + } + } +#ifdef ESCAN_BUF_OVERFLOW_MGMT + if (bi_length > ESCAN_BUF_SIZE - list->buflen) + remove_lower_rssi = TRUE; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + + for (i = 0; i < list->count; i++) { + bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) + : list->bss_info; +#ifdef ESCAN_BUF_OVERFLOW_MGMT + WL_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n", + bss->SSID, MAC2STRDBG(bss->BSSID.octet), + i, bss->RSSI, list->count)); + + if (remove_lower_rssi) + wl_cfg80211_find_removal_candidate(bss, candidate); +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + + if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) && + (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec)) + == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) && + bi->SSID_len == bss->SSID_len && + !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) { + + /* do not allow beacon data to update + *the data recd from a probe response + */ + if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) && + (bi->flags & WL_BSS_FLAGS_FROM_BEACON)) + goto exit; + + WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d" + " flags 0x%x, new: RSSI %d flags 0x%x\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), i, + bss->RSSI, bss->flags, bi->RSSI, bi->flags)); + + if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == + (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) { + /* preserve max RSSI if the measurements are + * both on-channel or both off-channel + */ + WL_SCAN(("%s("MACDBG"), same onchan" + ", RSSI: prev %d new %d\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + bss->RSSI, bi->RSSI)); + bi->RSSI = MAX(bss->RSSI, bi->RSSI); + } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) && + (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) { + /* preserve the on-channel rssi measurement + * if the new measurement is off channel + */ + WL_SCAN(("%s("MACDBG"), prev onchan" + ", RSSI: prev %d new %d\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + bss->RSSI, bi->RSSI)); + bi->RSSI = bss->RSSI; + bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL; + } + if (dtoh32(bss->length) != bi_length) { + u32 prev_len = dtoh32(bss->length); + + WL_SCAN(("bss info replacement" + " is occured(bcast:%d->probresp%d)\n", + bss->ie_length, bi->ie_length)); + WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + prev_len, bi_length)); + + if (list->buflen - prev_len + bi_length + > ESCAN_BUF_SIZE) { + WL_ERR(("Buffer is too small: keep the" + " previous result of this AP\n")); + /* Only update RSSI */ + bss->RSSI = bi->RSSI; + bss->flags |= (bi->flags + & WL_BSS_FLAGS_RSSI_ONCHANNEL); + goto exit; + } + + if (i < list->count - 1) { + /* memory copy required by this case only */ + memmove((u8 *)bss + bi_length, + (u8 *)bss + prev_len, + list->buflen - cur_len - prev_len); + } + list->buflen -= prev_len; + list->buflen += bi_length; + } + list->version = dtoh32(bi->version); + memcpy((u8 *)bss, (u8 *)bi, bi_length); + goto exit; + } + cur_len += dtoh32(bss->length); + } + if (bi_length > ESCAN_BUF_SIZE - list->buflen) { +#ifdef ESCAN_BUF_OVERFLOW_MGMT + wl_cfg80211_remove_lowRSSI_info(list, candidate, bi); + if (bi_length > ESCAN_BUF_SIZE - list->buflen) { + WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n", + MAC2STRDBG(bi->BSSID.octet), bi->RSSI)); + goto exit; + } +#else + WL_ERR(("Buffer is too small: ignoring\n")); + goto exit; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + } + + memcpy(&(((char *)list)[list->buflen]), bi, bi_length); + list->version = dtoh32(bi->version); + list->buflen += bi_length; + list->count++; + + /* + * !Broadcast && number of ssid = 1 && number of channels =1 + * means specific scan to association + */ + if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) { + WL_ERR(("P2P assoc scan fast aborted.\n")); + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true); + goto exit; + } + } + } + else if (status == WLC_E_STATUS_SUCCESS) { + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id, + escan_result->sync_id); + + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + WL_INFORM(("ACTION FRAME SCAN DONE\n")); + wl_clr_p2p_status(cfg, SCANNING); + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + if (cfg->afx_hdl->peer_chan == WL_INVALID) + complete(&cfg->act_frm_scan); + } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) { + WL_INFORM(("ESCAN COMPLETED\n")); + cfg->bss_list = wl_escan_get_buf(cfg, FALSE); + if (!scan_req_match(cfg)) { + WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n", + cfg->bss_list->count)); + } + wl_inform_bss(cfg); + wl_notify_escan_complete(cfg, ndev, false, false); + } + wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT); +#ifdef CUSTOMER_HW4_DEBUG + if (wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_clear(); +#endif /* CUSTOMER_HW4_DEBUG */ + } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) || + (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) || + (status == WLC_E_STATUS_NEWASSOC)) { + /* Handle all cases of scan abort */ + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + wl_escan_print_sync_id(status, escan_result->sync_id, + cfg->escan_info.cur_sync_id); + WL_DBG(("ESCAN ABORT reason: %d\n", status)); + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + WL_INFORM(("ACTION FRAME SCAN DONE\n")); + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + wl_clr_p2p_status(cfg, SCANNING); + if (cfg->afx_hdl->peer_chan == WL_INVALID) + complete(&cfg->act_frm_scan); + } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) { + WL_INFORM(("ESCAN ABORTED\n")); + cfg->bss_list = wl_escan_get_buf(cfg, TRUE); + if (!scan_req_match(cfg)) { + WL_TRACE_HW4(("scan_req_match=0: scanned AP count=%d\n", + cfg->bss_list->count)); + } + wl_inform_bss(cfg); + wl_notify_escan_complete(cfg, ndev, true, false); + } else { + /* If there is no pending host initiated scan, do nothing */ + WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n")); + } + wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT); + } else if (status == WLC_E_STATUS_TIMEOUT) { + WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request)); + WL_ERR(("reason[0x%x]\n", e->reason)); + if (e->reason == 0xFFFFFFFF) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + } else { + WL_ERR(("unexpected Escan Event %d : abort\n", status)); + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + wl_escan_print_sync_id(status, escan_result->sync_id, + cfg->escan_info.cur_sync_id); + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + WL_INFORM(("ACTION FRAME SCAN DONE\n")); + wl_clr_p2p_status(cfg, SCANNING); + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + if (cfg->afx_hdl->peer_chan == WL_INVALID) + complete(&cfg->act_frm_scan); + } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) { + cfg->bss_list = wl_escan_get_buf(cfg, TRUE); + if (!scan_req_match(cfg)) { + WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): " + "scanned AP count=%d\n", + cfg->bss_list->count)); + } + wl_inform_bss(cfg); + wl_notify_escan_complete(cfg, ndev, true, false); + } + wl_escan_increment_sync_id(cfg, 2); + } +exit: + mutex_unlock(&cfg->usr_sync); + return err; +} + +static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable) +{ + u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED); + bool p2p_connected = wl_cfgp2p_vif_created(cfg); + struct net_info *iter, *next; + + if (!cfg->roamoff_on_concurrent) + return; + if (enable && (p2p_connected||(connected_cnt > 1))) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + if (iter->ndev && iter->wdev && + iter->wdev->iftype == NL80211_IFTYPE_STATION) { + if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE) + == BCME_OK) { + iter->roam_off = TRUE; + } + else { + WL_ERR(("error to enable roam_off\n")); + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + } + else if (!enable) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + if (iter->ndev && iter->wdev && + iter->wdev->iftype == NL80211_IFTYPE_STATION) { + if (iter->roam_off != WL_INVALID) { + if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE) + == BCME_OK) { + iter->roam_off = FALSE; + } + else { + WL_ERR(("error to disable roam_off\n")); + } + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + } + return; +} + +static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg) +{ + struct net_info *iter, *next; + u32 ctl_chan = 0; + u32 chanspec = 0; + u32 pre_ctl_chan = 0; + u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED); + cfg->vsdb_mode = false; + + if (connected_cnt <= 1) { + return; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface ndev could be null */ + if (iter->ndev) { + chanspec = 0; + ctl_chan = 0; + if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) { + if (wldev_iovar_getint(iter->ndev, "chanspec", + (s32 *)&chanspec) == BCME_OK) { + chanspec = wl_chspec_driver_to_host(chanspec); + ctl_chan = wf_chspec_ctlchan(chanspec); + wl_update_prof(cfg, iter->ndev, NULL, + &ctl_chan, WL_PROF_CHAN); + } + if (!cfg->vsdb_mode) { + if (!pre_ctl_chan && ctl_chan) + pre_ctl_chan = ctl_chan; + else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) { + cfg->vsdb_mode = true; + } + } + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + WL_ERR(("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel")); + return; +} + +#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF) +extern int g_frameburst; +#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */ + +static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info, + enum wl_status state, bool set) +{ + s32 pm = PM_FAST; + s32 err = BCME_OK; + u32 mode; + u32 chan = 0; + struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (dhd->busstate == DHD_BUS_DOWN) { + WL_ERR(("%s : busstate is DHD_BUS_DOWN!\n", __FUNCTION__)); + return 0; + } + WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n", + state, set, _net_info->pm_restore, _net_info->ndev->name)); + + if (state != WL_STATUS_CONNECTED) + return 0; + mode = wl_get_mode_by_netdev(cfg, _net_info->ndev); + if (set) { + wl_cfg80211_concurrent_roam(cfg, 1); + wl_cfg80211_determine_vsdb_mode(cfg); + if (mode == WL_MODE_AP) { + if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false)) + WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n")); + } + + pm = PM_OFF; + if ((err = wldev_ioctl(_net_info->ndev, WLC_SET_PM, &pm, + sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + _net_info->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + _net_info->ndev->name, err)); + + wl_cfg80211_update_power_mode(_net_info->ndev); + } + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT); +#if defined(WLTDLS) + if (wl_cfg80211_is_concurrent_mode()) { + err = wldev_iovar_setint(primary_dev, "tdls_enable", 0); + } +#endif /* defined(WLTDLS) */ + +#ifdef DISABLE_FRAMEBURST_VSDB +#ifdef USE_WFA_CERT_CONF + if (g_frameburst) +#endif /* USE_WFA_CERT_CONF */ + { + if (wl_cfg80211_is_concurrent_mode()) { + int frameburst = 0; + if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst, + sizeof(frameburst), true) != 0) { + WL_DBG(("frameburst set error\n")); + } + WL_DBG(("Frameburst Disabled\n")); + } + } +#endif /* DISABLE_FRAMEBURST_VSDB */ + } else { /* clear */ + chan = 0; + /* clear chan information when the net device is disconnected */ + wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN); + wl_cfg80211_determine_vsdb_mode(cfg); + if (primary_dev == _net_info->ndev) { + pm = PM_FAST; + if ((err = wldev_ioctl(_net_info->ndev, WLC_SET_PM, &pm, + sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + _net_info->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + _net_info->ndev->name, err)); + + wl_cfg80211_update_power_mode(_net_info->ndev); + } + } + + wl_cfg80211_concurrent_roam(cfg, 0); +#if defined(WLTDLS) + if (!wl_cfg80211_is_concurrent_mode()) { + err = wldev_iovar_setint(primary_dev, "tdls_enable", 1); + } +#endif /* defined(WLTDLS) */ + +#ifdef DISABLE_FRAMEBURST_VSDB +#ifdef USE_WFA_CERT_CONF + if (g_frameburst) +#endif /* USE_WFA_CERT_CONF */ + { + int frameburst = 1; + if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst, + sizeof(frameburst), true) != 0) { + WL_DBG(("frameburst set error\n")); + } + WL_DBG(("Frameburst Enabled\n")); + } +#endif /* DISABLE_FRAMEBURST_VSDB */ + } + return err; +} +static s32 wl_init_scan(struct bcm_cfg80211 *cfg) +{ + int err = 0; + + cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler; + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + wl_escan_init_sync_id(cfg); + + /* Init scan_timeout timer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&cfg->scan_timeout, wl_scan_timeout, 0); +#else + init_timer(&cfg->scan_timeout); + cfg->scan_timeout.data = (unsigned long) cfg; + cfg->scan_timeout.function = wl_scan_timeout; +#endif + + return err; +} + +#ifdef DHD_LOSSLESS_ROAMING +static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg) +{ + int err = 0; + + /* Init roam timer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&cfg->roam_timeout, wl_roam_timeout, 0); +#else + init_timer(&cfg->roam_timeout); + cfg->roam_timeout.data = (unsigned long) cfg; + cfg->roam_timeout.function = wl_roam_timeout; + +#endif + return err; +} +#endif /* DHD_LOSSLESS_ROAMING */ + +static s32 wl_init_priv(struct bcm_cfg80211 *cfg) +{ + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = 0; + + cfg->scan_request = NULL; + cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT); + cfg->roam_on = false; + cfg->active_scan = true; + cfg->rf_blocked = false; + cfg->vsdb_mode = false; +#if defined(BCMSDIO) + cfg->wlfc_on = false; +#endif + cfg->roamoff_on_concurrent = true; + cfg->disable_roam_event = false; + cfg->cfgdev_bssidx = -1; + /* register interested state */ + set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state); + spin_lock_init(&cfg->cfgdrv_lock); + mutex_init(&cfg->ioctl_buf_sync); + init_waitqueue_head(&cfg->netif_change_event); + init_completion(&cfg->send_af_done); + init_completion(&cfg->iface_disable); + wl_init_eq(cfg); + err = wl_init_priv_mem(cfg); + if (err) + return err; + if (wl_create_event_handler(cfg)) + return -ENOMEM; + wl_init_event_handler(cfg); + mutex_init(&cfg->usr_sync); + mutex_init(&cfg->event_sync); + mutex_init(&cfg->scan_complete); + err = wl_init_scan(cfg); + if (err) + return err; +#ifdef DHD_LOSSLESS_ROAMING + err = wl_init_roam_timeout(cfg); + if (err) { + return err; + } +#endif /* DHD_LOSSLESS_ROAMING */ + wl_init_conf(cfg->conf); + wl_init_prof(cfg, ndev); + wl_link_down(cfg); + DNGL_FUNC(dhd_cfg80211_init, (cfg)); + + return err; +} + +static void wl_deinit_priv(struct bcm_cfg80211 *cfg) +{ + DNGL_FUNC(dhd_cfg80211_deinit, (cfg)); + wl_destroy_event_handler(cfg); + wl_flush_eq(cfg); + wl_link_down(cfg); + del_timer_sync(&cfg->scan_timeout); +#ifdef DHD_LOSSLESS_ROAMING + del_timer_sync(&cfg->roam_timeout); +#endif + wl_deinit_priv_mem(cfg); + if (wl_cfg80211_netdev_notifier_registered) { + wl_cfg80211_netdev_notifier_registered = FALSE; + unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier); + } +} + +#if defined(WL_ENABLE_P2P_IF) +static s32 wl_cfg80211_attach_p2p(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + WL_TRACE(("Enter \n")); + + if (wl_cfgp2p_register_ndev(cfg) < 0) { + WL_ERR(("P2P attach failed. \n")); + return -ENODEV; + } + + return 0; +} + +static s32 wl_cfg80211_detach_p2p(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wireless_dev *wdev; + + WL_DBG(("Enter \n")); + if (!cfg) { + WL_ERR(("Invalid Ptr\n")); + return -EINVAL; + } else + wdev = cfg->p2p_wdev; + + if (!wdev) { + WL_ERR(("Invalid Ptr\n")); + return -EINVAL; + } + + wl_cfgp2p_unregister_ndev(cfg); + + cfg->p2p_wdev = NULL; + cfg->p2p_net = NULL; + WL_DBG(("Freeing 0x%p \n", wdev)); + kfree(wdev); + + return 0; +} +#endif + +s32 wl_cfg80211_attach_post(struct net_device *ndev) +{ + struct bcm_cfg80211 * cfg = NULL; + s32 err = 0; + s32 ret = 0; + WL_TRACE(("In\n")); + if (unlikely(!ndev)) { + WL_ERR(("ndev is invaild\n")); + return -ENODEV; + } + cfg = g_bcm_cfg; + if (unlikely(!cfg)) { + WL_ERR(("cfg is invaild\n")); + return -EINVAL; + } + if (!wl_get_drv_status(cfg, READY, ndev)) { + if (cfg->wdev) { + ret = wl_cfgp2p_supported(cfg, ndev); + if (ret > 0) { +#if !defined(WL_ENABLE_P2P_IF) + cfg->wdev->wiphy->interface_modes |= + (BIT(NL80211_IFTYPE_P2P_CLIENT)| + BIT(NL80211_IFTYPE_P2P_GO)); +#endif /* !WL_ENABLE_P2P_IF */ + if ((err = wl_cfgp2p_init_priv(cfg)) != 0) + goto fail; + +#if defined(WL_ENABLE_P2P_IF) + if (cfg->p2p_net) { + /* Update MAC addr for p2p0 interface here. */ + memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN); + cfg->p2p_net->dev_addr[0] |= 0x02; + WL_ERR(("%s: p2p_dev_addr="MACDBG "\n", + cfg->p2p_net->name, + MAC2STRDBG(cfg->p2p_net->dev_addr))); + } else { + WL_ERR(("p2p_net not yet populated." + " Couldn't update the MAC Address for p2p0 \n")); + return -ENODEV; + } +#endif /* WL_ENABLE_P2P_IF */ + cfg->p2p_supported = true; + } else if (ret == 0) { + if ((err = wl_cfgp2p_init_priv(cfg)) != 0) + goto fail; + } else { + /* SDIO bus timeout */ + err = -ENODEV; + goto fail; + } + } + } + wl_set_drv_status(cfg, READY, ndev); +fail: + return err; +} + +s32 wl_cfg80211_attach(struct net_device *ndev, void *context) +{ + struct wireless_dev *wdev; + struct bcm_cfg80211 *cfg; + s32 err = 0; + struct device *dev; + + WL_TRACE(("In\n")); + if (!ndev) { + WL_ERR(("ndev is invaild\n")); + return -ENODEV; + } + WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev())); + dev = wl_cfg80211_get_parent_dev(); + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (unlikely(!wdev)) { + WL_ERR(("Could not allocate wireless device\n")); + return -ENOMEM; + } + err = wl_setup_wiphy(wdev, dev, context); + if (unlikely(err)) { + kfree(wdev); + return -ENOMEM; + } + wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS); + cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy); + cfg->wdev = wdev; + cfg->pub = context; + INIT_LIST_HEAD(&cfg->net_list); + spin_lock_init(&cfg->net_list_sync); + ndev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); + wdev->netdev = ndev; + cfg->state_notifier = wl_notifier_change_state; + err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE, 0); + if (err) { + WL_ERR(("Failed to alloc net_info (%d)\n", err)); + goto cfg80211_attach_out; + } + err = wl_init_priv(cfg); + if (err) { + WL_ERR(("Failed to init iwm_priv (%d)\n", err)); + goto cfg80211_attach_out; + } + + err = wl_setup_rfkill(cfg, TRUE); + if (err) { + WL_ERR(("Failed to setup rfkill %d\n", err)); + goto cfg80211_attach_out; + } +#ifdef DEBUGFS_CFG80211 + err = wl_setup_debugfs(cfg); + if (err) { + WL_ERR(("Failed to setup debugfs %d\n", err)); + goto cfg80211_attach_out; + } +#endif + if (!wl_cfg80211_netdev_notifier_registered) { + wl_cfg80211_netdev_notifier_registered = TRUE; + err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier); + if (err) { + wl_cfg80211_netdev_notifier_registered = FALSE; + WL_ERR(("Failed to register notifierl %d\n", err)); + goto cfg80211_attach_out; + } + } +#if defined(COEX_DHCP) + cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev); + if (!cfg->btcoex_info) + goto cfg80211_attach_out; +#endif +#if defined(SUPPORT_RANDOM_MAC_SCAN) + cfg->random_mac_enabled = FALSE; +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + g_bcm_cfg = cfg; + +#ifdef CONFIG_CFG80211_INTERNAL_REGDB + wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier; +#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ + +#if defined(WL_ENABLE_P2P_IF) + err = wl_cfg80211_attach_p2p(); + if (err) + goto cfg80211_attach_out; +#endif + + INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler); + mutex_init(&cfg->pm_sync); + + return err; + +cfg80211_attach_out: + wl_setup_rfkill(cfg, FALSE); + wl_free_wdev(cfg); + return err; +} + +void wl_cfg80211_detach(void *para) +{ + struct bcm_cfg80211 *cfg; + + (void)para; + cfg = g_bcm_cfg; + + WL_TRACE(("In\n")); + + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); + +#if defined(COEX_DHCP) + wl_cfg80211_btcoex_deinit(); + cfg->btcoex_info = NULL; +#endif + + wl_setup_rfkill(cfg, FALSE); +#ifdef DEBUGFS_CFG80211 + wl_free_debugfs(cfg); +#endif + if (cfg->p2p_supported) { + if (timer_pending(&cfg->p2p->listen_timer)) + del_timer_sync(&cfg->p2p->listen_timer); + wl_cfgp2p_deinit_priv(cfg); + } + + if (timer_pending(&cfg->scan_timeout)) + del_timer_sync(&cfg->scan_timeout); +#ifdef DHD_LOSSLESS_ROAMING + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } +#endif /* DHD_LOSSLESS_ROAMING */ + +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfg->p2p_wdev) + wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); +#endif /* WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_ENABLE_P2P_IF) + wl_cfg80211_detach_p2p(); +#endif + + wl_cfg80211_ibss_vsie_free(cfg); + wl_cfg80211_clear_mgmt_vndr_ies(cfg); + wl_deinit_priv(cfg); + g_bcm_cfg = NULL; + wl_cfg80211_clear_parent_dev(); + wl_free_wdev(cfg); + /* PLEASE do NOT call any function after wl_free_wdev, the driver's private + * structure "cfg", which is the private part of wiphy, has been freed in + * wl_free_wdev !!!!!!!!!!! + */ +} + +static void wl_wakeup_event(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + if (dhd->up && (cfg->event_tsk.thr_pid >= 0)) { + up(&cfg->event_tsk.sema); + } +} + +static s32 wl_event_handler(void *data) +{ + struct bcm_cfg80211 *cfg = NULL; + struct wl_event_q *e; + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + struct wireless_dev *wdev = NULL; + + cfg = (struct bcm_cfg80211 *)tsk->parent; + + WL_ERR(("tsk Enter, tsk = 0x%p\n", tsk)); + + while (down_interruptible (&tsk->sema) == 0) { + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + while ((e = wl_deq_event(cfg))) { + WL_DBG(("event type (%d), ifidx: %d bssidx: %d \n", + e->etype, e->emsg.ifidx, e->emsg.bsscfgidx)); + + if (e->emsg.ifidx > WL_MAX_IFS) { + WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx)); + goto fail; + } + + if (!(wdev = wl_get_wdev_by_bssidx(cfg, e->emsg.bsscfgidx))) { + /* For WLC_E_IF would be handled by wl_host_event */ + if (e->etype != WLC_E_IF) + WL_ERR(("No wdev corresponding to bssidx: 0x%x found!" + " Ignoring event.\n", e->emsg.bsscfgidx)); + } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) { + dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub); + if (dhd->busstate == DHD_BUS_DOWN) { + WL_ERR((": BUS is DOWN.\n")); + } else { +#ifdef DHD_IFDEBUG + if (cfg->iface_cnt == 0) { + wl_dump_ifinfo(cfg); + } +#endif + cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev), + &e->emsg, e->edata); + } + } else { + WL_DBG(("Unknown Event (%d): ignoring\n", e->etype)); + } +fail: + wl_put_event(e); + DHD_EVENT_WAKE_UNLOCK(cfg->pub); + } + } + WL_ERR(("was terminated\n")); + complete_and_exit(&tsk->completed, 0); + return 0; +} + +void +wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data) +{ + u32 event_type = ntoh32(e->event_type); + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *netinfo; + +#if (WL_DBG_LEVEL > 0) + s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ? + wl_dbg_estr[event_type] : (s8 *) "Unknown"; + WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr)); +#endif /* (WL_DBG_LEVEL > 0) */ + + if (cfg->event_tsk.thr_pid == -1) { + WL_ERR(("Event handler is not created\n")); + return; + } + + if ((cfg == NULL) || (cfg->p2p_supported && cfg->p2p == NULL)) { + WL_ERR(("Stale event ignored\n")); + return; + } + + if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) { + WL_ERR(("during IF change, ignore event %d\n", event_type)); + return; + } + +#ifdef DHD_IFDEBUG + if (event_type != WLC_E_ESCAN_RESULT) { + WL_ERR(("Event_type %d , status : %d, reason : %d, bssidx:%d \n", + event_type, ntoh32(e->status), ntoh32(e->reason), e->bsscfgidx)); + } +#endif + netinfo = wl_get_netinfo_by_bssidx(cfg, e->bsscfgidx); + if (!netinfo) { + /* Since the netinfo entry is not there, the netdev entry is not + * created via cfg80211 interface. so the event is not of interest + * to the cfg80211 layer. + */ + WL_ERR(("ignore event %d, not interested\n", event_type)); + return; + } + + if (event_type == WLC_E_PFN_NET_FOUND) { + WL_DBG((" PNOEVENT: PNO_NET_FOUND\n")); + } + else if (event_type == WLC_E_PFN_NET_LOST) { + WL_DBG((" PNOEVENT: PNO_NET_LOST\n")); + } + + DHD_EVENT_WAKE_LOCK(cfg->pub); + if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) { + wl_wakeup_event(cfg); + } else { + DHD_EVENT_WAKE_UNLOCK(cfg->pub); + } +} + +static void wl_init_eq(struct bcm_cfg80211 *cfg) +{ + wl_init_eq_lock(cfg); + INIT_LIST_HEAD(&cfg->eq_list); +} + +static void wl_flush_eq(struct bcm_cfg80211 *cfg) +{ + struct wl_event_q *e; + unsigned long flags; + + flags = wl_lock_eq(cfg); + while (!list_empty_careful(&cfg->eq_list)) { + BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list); + list_del(&e->eq_list); + kfree(e); + } + wl_unlock_eq(cfg, flags); +} + +/* +* retrieve first queued event from head +*/ + +static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg) +{ + struct wl_event_q *e = NULL; + unsigned long flags; + + flags = wl_lock_eq(cfg); + if (likely(!list_empty(&cfg->eq_list))) { + BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list); + list_del(&e->eq_list); + } + wl_unlock_eq(cfg, flags); + + return e; +} + +/* + * push event to tail of the queue + */ + +static s32 +wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event, + const wl_event_msg_t *msg, void *data) +{ + struct wl_event_q *e; + s32 err = 0; + uint32 evtq_size; + uint32 data_len; + unsigned long flags; + gfp_t aflags; + + data_len = 0; + if (data) + data_len = ntoh32(msg->datalen); + evtq_size = sizeof(struct wl_event_q) + data_len; + aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + e = kzalloc(evtq_size, aflags); + if (unlikely(!e)) { + WL_ERR(("event alloc failed\n")); + return -ENOMEM; + } + e->etype = event; + memcpy(&e->emsg, msg, sizeof(wl_event_msg_t)); + if (data) + memcpy(e->edata, data, data_len); + flags = wl_lock_eq(cfg); + list_add_tail(&e->eq_list, &cfg->eq_list); + wl_unlock_eq(cfg, flags); + + return err; +} + +static void wl_put_event(struct wl_event_q *e) +{ + kfree(e); +} + +static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype) +{ + s32 infra = 0; + s32 err = 0; + s32 mode = 0; + switch (iftype) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_WDS: + WL_ERR(("type (%d) : currently we do not support this mode\n", + iftype)); + err = -EINVAL; + return err; + case NL80211_IFTYPE_ADHOC: + mode = WL_MODE_IBSS; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + mode = WL_MODE_BSS; + infra = 1; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + mode = WL_MODE_AP; + infra = 1; + break; + default: + err = -EINVAL; + WL_ERR(("invalid type (%d)\n", iftype)); + return err; + } + infra = htod32(infra); + err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true); + if (unlikely(err)) { + WL_ERR(("WLC_SET_INFRA error (%d)\n", err)); + return err; + } + + wl_set_mode_by_netdev(cfg, ndev, mode); + + return 0; +} + +void wl_cfg80211_add_to_eventbuffer(struct wl_eventmsg_buf *ev, u16 event, bool set) +{ + if (!ev || (event > WLC_E_LAST)) + return; + + if (ev->num < MAX_EVENT_BUF_NUM) { + ev->event[ev->num].type = event; + ev->event[ev->num].set = set; + ev->num++; + } else { + WL_ERR(("evenbuffer doesn't support > %u events. Update" + " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM)); + ASSERT(0); + } +} + +s32 wl_cfg80211_apply_eventbuffer( + struct net_device *ndev, + struct bcm_cfg80211 *cfg, + wl_eventmsg_buf_t *ev) +{ + char eventmask[WL_EVENTING_MASK_LEN]; + int i, ret = 0; + s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; + + if (!ev || (!ev->num)) + return -EINVAL; + + mutex_lock(&cfg->event_sync); + + /* Read event_msgs mask */ + bcm_mkiovar("event_msgs", NULL, 0, iovbuf, + sizeof(iovbuf)); + ret = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false); + if (unlikely(ret)) { + WL_ERR(("Get event_msgs error (%d)\n", ret)); + goto exit; + } + memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN); + + /* apply the set bits */ + for (i = 0; i < ev->num; i++) { + if (ev->event[i].set) + setbit(eventmask, ev->event[i].type); + else + clrbit(eventmask, ev->event[i].type); + } + + /* Write updated Event mask */ + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, + sizeof(iovbuf)); + ret = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); + if (unlikely(ret)) { + WL_ERR(("Set event_msgs error (%d)\n", ret)); + } + +exit: + mutex_unlock(&cfg->event_sync); + return ret; +} + +s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add) +{ + s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; + s8 eventmask[WL_EVENTING_MASK_LEN]; + s32 err = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (!ndev || !cfg) + return -ENODEV; + + mutex_lock(&cfg->event_sync); + + /* Setup event_msgs */ + bcm_mkiovar("event_msgs", NULL, 0, iovbuf, + sizeof(iovbuf)); + err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false); + if (unlikely(err)) { + WL_ERR(("Get event_msgs error (%d)\n", err)); + goto eventmsg_out; + } + memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN); + if (add) { + setbit(eventmask, event); + } else { + clrbit(eventmask, event); + } + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, + sizeof(iovbuf)); + err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); + if (unlikely(err)) { + WL_ERR(("Set event_msgs error (%d)\n", err)); + goto eventmsg_out; + } + +eventmsg_out: + mutex_unlock(&cfg->event_sync); + return err; +} + +static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap) +{ + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + struct ieee80211_channel *band_chan_arr = NULL; + wl_uint32_list_t *list; + u32 i, j, index, n_2g, n_5g, band, channel, array_size; + u32 *n_cnt = NULL; + chanspec_t c = 0; + s32 err = BCME_OK; + bool update; + bool ht40_allowed; + u8 *pbuf = NULL; + bool dfs_radar_disabled = FALSE; + +#define LOCAL_BUF_LEN 1024 + pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL); + + if (pbuf == NULL) { + WL_ERR(("failed to allocate local buf\n")); + return -ENOMEM; + } + list = (wl_uint32_list_t *)(void *)pbuf; + list->count = htod32(WL_NUMCHANSPECS); + + + err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL, + 0, pbuf, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync); + if (err != 0) { + WL_ERR(("get chanspecs failed with %d\n", err)); + kfree(pbuf); + return err; + } +#undef LOCAL_BUF_LEN + + list = (wl_uint32_list_t *)(void *)pbuf; + band = array_size = n_2g = n_5g = 0; + for (i = 0; i < dtoh32(list->count); i++) { + index = 0; + update = false; + ht40_allowed = false; + c = (chanspec_t)dtoh32(list->element[i]); + c = wl_chspec_driver_to_host(c); + channel = wf_chspec_ctlchan(c); + + if (!CHSPEC_IS40(c) && ! CHSPEC_IS20(c)) { + WL_DBG(("HT80/160/80p80 center channel : %d\n", channel)); + continue; + } + if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) && + (channel <= CH_MAX_2G_CHANNEL)) { + band_chan_arr = __wl_2ghz_channels; + array_size = ARRAYSIZE(__wl_2ghz_channels); + n_cnt = &n_2g; + band = NL80211_BAND_2GHZ ; + ht40_allowed = (bw_cap == WLC_N_BW_40ALL)? true : false; + } else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) { + band_chan_arr = __wl_5ghz_a_channels; + array_size = ARRAYSIZE(__wl_5ghz_a_channels); + n_cnt = &n_5g; + band = NL80211_BAND_5GHZ ; + ht40_allowed = (bw_cap == WLC_N_BW_20ALL)? false : true; + } else { + WL_ERR(("Invalid channel Sepc. 0x%x.\n", c)); + continue; + } + if (!ht40_allowed && CHSPEC_IS40(c)) + continue; + for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) { + if (band_chan_arr[j].hw_value == channel) { + update = true; + break; + } + } + if (update) + index = j; + else + index = *n_cnt; + if (index < array_size) { +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + band_chan_arr[index].center_freq = + ieee80211_channel_to_frequency(channel); +#else + band_chan_arr[index].center_freq = + ieee80211_channel_to_frequency(channel, band); +#endif + band_chan_arr[index].hw_value = channel; + + if (CHSPEC_IS40(c) && ht40_allowed) { + /* assuming the order is HT20, HT40 Upper, + * HT40 lower from chanspecs + */ + u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40; + if (CHSPEC_SB_UPPER(c)) { + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + band_chan_arr[index].flags &= + ~IEEE80211_CHAN_NO_HT40; + band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS; + } else { + /* It should be one of + * IEEE80211_CHAN_NO_HT40 or IEEE80211_CHAN_NO_HT40PLUS + */ + band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40; + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + band_chan_arr[index].flags |= + IEEE80211_CHAN_NO_HT40MINUS; + } + } else { + band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40; + if (!dfs_radar_disabled) { + if (band == NL80211_BAND_2GHZ ) + channel |= WL_CHANSPEC_BAND_2G; + else + channel |= WL_CHANSPEC_BAND_5G; + channel |= WL_CHANSPEC_BW_20; + channel = wl_chspec_host_to_driver(channel); + err = wldev_iovar_getint(dev, "per_chan_info", &channel); + if (!err) { + if (channel & WL_CHAN_RADAR) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + band_chan_arr[index].flags |= + (IEEE80211_CHAN_RADAR + | IEEE80211_CHAN_NO_IBSS); +#else + band_chan_arr[index].flags |= + IEEE80211_CHAN_RADAR; +#endif + } + + if (channel & WL_CHAN_PASSIVE) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + band_chan_arr[index].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; +#else + band_chan_arr[index].flags |= + IEEE80211_CHAN_NO_IR; +#endif + } else if (err == BCME_UNSUPPORTED) { + dfs_radar_disabled = TRUE; + WL_ERR(("does not support per_chan_info\n")); + } + } + } + if (!update) + (*n_cnt)++; + } + + } + __wl_band_2ghz.n_channels = n_2g; + __wl_band_5ghz_a.n_channels = n_5g; + kfree(pbuf); + return err; +} + +s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify) +{ + struct wiphy *wiphy; + struct net_device *dev; + u32 bandlist[3]; + u32 nband = 0; + u32 i = 0; + s32 err = 0; + s32 index = 0; + s32 nmode = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + u32 j = 0; + s32 vhtmode = 0; + s32 txstreams = 0; + s32 rxstreams = 0; + s32 ldpc_cap = 0; + s32 stbc_rx = 0; + s32 stbc_tx = 0; + s32 txbf_bfe_cap = 0; + s32 txbf_bfr_cap = 0; +#endif + bool rollback_lock = false; + s32 bw_cap = 0; + s32 cur_band = -1; + struct ieee80211_supported_band *bands[NUM_NL80211_BANDS] = {NULL, }; + + if (cfg == NULL) { + cfg = g_bcm_cfg; + mutex_lock(&cfg->usr_sync); + rollback_lock = true; + } + dev = bcmcfg_to_prmry_ndev(cfg); + + memset(bandlist, 0, sizeof(bandlist)); + err = wldev_ioctl(dev, WLC_GET_BANDLIST, bandlist, + sizeof(bandlist), false); + if (unlikely(err)) { + WL_ERR(("error read bandlist (%d)\n", err)); + goto end_bands; + } + err = wldev_ioctl(dev, WLC_GET_BAND, &cur_band, + sizeof(s32), false); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + goto end_bands; + } + + err = wldev_iovar_getint(dev, "nmode", &nmode); + if (unlikely(err)) { + WL_ERR(("error reading nmode (%d)\n", err)); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + err = wldev_iovar_getint(dev, "vhtmode", &vhtmode); + if (unlikely(err)) { + WL_ERR(("error reading vhtmode (%d)\n", err)); + } + + if (vhtmode) { + err = wldev_iovar_getint(dev, "txstreams", &txstreams); + if (unlikely(err)) { + WL_ERR(("error reading txstreams (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "rxstreams", &rxstreams); + if (unlikely(err)) { + WL_ERR(("error reading rxstreams (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "ldpc_cap", &ldpc_cap); + if (unlikely(err)) { + WL_ERR(("error reading ldpc_cap (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "stbc_rx", &stbc_rx); + if (unlikely(err)) { + WL_ERR(("error reading stbc_rx (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "stbc_tx", &stbc_tx); + if (unlikely(err)) { + WL_ERR(("error reading stbc_tx (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "txbf_bfe_cap", &txbf_bfe_cap); + if (unlikely(err)) { + WL_ERR(("error reading txbf_bfe_cap (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "txbf_bfr_cap", &txbf_bfr_cap); + if (unlikely(err)) { + WL_ERR(("error reading txbf_bfr_cap (%d)\n", err)); + } + } +#endif + + /* For nmode and vhtmode check bw cap */ + if (nmode || +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + vhtmode || +#endif + 0) { + err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap); + if (unlikely(err)) { + WL_ERR(("error get mimo_bw_cap (%d)\n", err)); + } + } + + err = wl_construct_reginfo(cfg, bw_cap); + if (err) { + WL_ERR(("wl_construct_reginfo() fails err=%d\n", err)); + if (err != BCME_UNSUPPORTED) + goto end_bands; + err = 0; + } + wiphy = bcmcfg_to_wiphy(cfg); + nband = bandlist[0]; + + for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) { + index = -1; + if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) { + bands[NL80211_BAND_5GHZ ] = + &__wl_band_5ghz_a; + index = NL80211_BAND_5GHZ ; + if (nmode && (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G)) + bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + /* VHT capabilities. */ + if (vhtmode) { + /* Supported */ + bands[index]->vht_cap.vht_supported = TRUE; + + for (j = 1; j <= VHT_CAP_MCS_MAP_NSS_MAX; j++) { + /* TX stream rates. */ + if (j <= txstreams) { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9, + bands[index]->vht_cap.vht_mcs.tx_mcs_map); + } else { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE, + bands[index]->vht_cap.vht_mcs.tx_mcs_map); + } + + /* RX stream rates. */ + if (j <= rxstreams) { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9, + bands[index]->vht_cap.vht_mcs.rx_mcs_map); + } else { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE, + bands[index]->vht_cap.vht_mcs.rx_mcs_map); + } + } + + + /* Capabilities */ + /* 80 MHz is mandatory */ + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_80; + + if (WL_BW_CAP_160MHZ(bw_cap)) { + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_160; + } + + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + + if (ldpc_cap) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_RXLDPC; + + if (stbc_tx) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_TXSTBC; + + if (stbc_rx) + bands[index]->vht_cap.cap |= + (stbc_rx << VHT_CAP_INFO_RX_STBC_SHIFT); + + if (txbf_bfe_cap) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + + if (txbf_bfr_cap) { + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + } + + if (txbf_bfe_cap || txbf_bfr_cap) { + bands[index]->vht_cap.cap |= + (2 << VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT); + bands[index]->vht_cap.cap |= + ((txstreams - 1) << + VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT); + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB; + } + + /* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */ + bands[index]->vht_cap.cap |= + (7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT); + WL_INFORM(("%s band[%d] vht_enab=%d vht_cap=%08x " + "vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n", + __FUNCTION__, index, + bands[index]->vht_cap.vht_supported, + bands[index]->vht_cap.cap, + bands[index]->vht_cap.vht_mcs.rx_mcs_map, + bands[index]->vht_cap.vht_mcs.tx_mcs_map)); + } +#endif + } + else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) { + bands[NL80211_BAND_2GHZ ] = + &__wl_band_2ghz; + index = NL80211_BAND_2GHZ ; + if (bw_cap == WLC_N_BW_40ALL) + bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + } + + if ((index >= 0) && nmode) { + bands[index]->ht_cap.cap |= + (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40); + bands[index]->ht_cap.ht_supported = TRUE; + bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + /* An HT shall support all EQM rates for one spatial stream */ + bands[index]->ht_cap.mcs.rx_mask[0] = 0xff; + } + + } + + wiphy->bands[NL80211_BAND_2GHZ ] = bands[NL80211_BAND_2GHZ ]; + wiphy->bands[NL80211_BAND_5GHZ ] = bands[NL80211_BAND_5GHZ ]; + + /* check if any bands populated otherwise makes 2Ghz as default */ + if (wiphy->bands[NL80211_BAND_2GHZ ] == NULL && + wiphy->bands[NL80211_BAND_5GHZ ] == NULL) { + /* Setup 2Ghz band as default */ + wiphy->bands[NL80211_BAND_2GHZ ] = &__wl_band_2ghz; + } + + if (notify) + wiphy_apply_custom_regulatory(wiphy, &brcm_regdom); + + end_bands: + if (rollback_lock) + mutex_unlock(&cfg->usr_sync); + return err; +} + +static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg) +{ + s32 err = 0; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + struct wireless_dev *wdev = ndev->ieee80211_ptr; + + WL_DBG(("In\n")); + + err = dhd_config_dongle(cfg); + if (unlikely(err)) + return err; + + err = wl_config_ifmode(cfg, ndev, wdev->iftype); + if (unlikely(err && err != -EINPROGRESS)) { + WL_ERR(("wl_config_ifmode failed\n")); + if (err == -1) { + WL_ERR(("return error %d\n", err)); + return err; + } + } + err = wl_update_wiphybands(cfg, true); + if (unlikely(err)) { + WL_ERR(("wl_update_wiphybands failed\n")); + if (err == -1) { + WL_ERR(("return error %d\n", err)); + return err; + } + } + + err = wl_create_event_handler(cfg); + if (err) { + WL_ERR(("wl_create_event_handler failed\n")); + return err; + } + wl_init_event_handler(cfg); + + err = wl_init_scan(cfg); + if (err) { + WL_ERR(("wl_init_scan failed\n")); + return err; + } +#ifdef DHD_LOSSLESS_ROAMING + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } +#endif /* DHD_LOSSLESS_ROAMING */ + + err = dhd_monitor_init(cfg->pub); + + wl_set_drv_status(cfg, READY, ndev); + return err; +} + +static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg) +{ + s32 err = 0; + unsigned long flags; + struct net_info *iter, *next; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + struct cfg80211_scan_info info = {}; +#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF) + struct net_device *p2p_net = cfg->p2p_net; +#endif +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif +#endif /* PROP_TXSTATUS_VSDB */ + WL_DBG(("In\n")); + /* Delete pm_enable_work */ + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); + +#ifdef WL_NAN + wl_cfgnan_stop_handler(ndev, g_bcm_cfg, NULL, 0, NULL); +#endif /* WL_NAN */ + + if (cfg->p2p_supported) { + wl_clr_p2p_status(cfg, GO_NEG_PHASE); +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + if (wl_cfgp2p_vif_created(cfg)) { + bool enabled = false; + dhd_wlfc_get_enable(dhd, &enabled); + if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd_wlfc_deinit(dhd); + cfg->wlfc_on = false; + } + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + } + + + /* If primary BSS is operational (for e.g SoftAP), bring it down */ + if (wl_cfgp2p_bss_isup(ndev, 0)) { + if (wl_cfgp2p_bss(cfg, ndev, 0, 0) < 0) + WL_ERR(("BSS down failed \n")); + } + + /* Check if cfg80211 interface is already down */ + if (!wl_get_drv_status(cfg, READY, ndev)) + return err; /* it is even not ready */ + + /* clear all the security setting on primary Interface */ + wl_cfg80211_clear_security(cfg); + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + if (iter->ndev) /* p2p discovery iface is null */ + wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + +#ifdef P2P_LISTEN_OFFLOADING + wl_cfg80211_p2plo_deinit(cfg); +#endif /* P2P_LISTEN_OFFLOADING */ + + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + if (cfg->scan_request) { + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface ndev ptr could be null */ + if (iter->ndev == NULL) + continue; + wl_clr_drv_status(cfg, READY, iter->ndev); + wl_clr_drv_status(cfg, SCANNING, iter->ndev); + wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev); + wl_clr_drv_status(cfg, CONNECTING, iter->ndev); + wl_clr_drv_status(cfg, CONNECTED, iter->ndev); + wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev); + wl_clr_drv_status(cfg, AP_CREATED, iter->ndev); + wl_clr_drv_status(cfg, AP_CREATING, iter->ndev); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype = + NL80211_IFTYPE_STATION; +#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF) + if (p2p_net) + dev_close(p2p_net); +#endif + + /* Avoid deadlock from wl_cfg80211_down */ + mutex_unlock(&cfg->usr_sync); + wl_destroy_event_handler(cfg); + mutex_lock(&cfg->usr_sync); + wl_flush_eq(cfg); + wl_link_down(cfg); + if (cfg->p2p_supported) { + if (timer_pending(&cfg->p2p->listen_timer)) + del_timer_sync(&cfg->p2p->listen_timer); + wl_cfgp2p_down(cfg); + } + + if (timer_pending(&cfg->scan_timeout)) { + del_timer_sync(&cfg->scan_timeout); + } + + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); + + dhd_monitor_uninit(); +#ifdef WLAIBSS_MCHAN + bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev); +#endif /* WLAIBSS_MCHAN */ + +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) + /* Clean up if not removed already */ + if (cfg->bss_cfgdev) + wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev); +#endif /* defined (WL_VIRTUAL_APSTA) || defined (DUAL_STA_STATIC_IF) */ + +#ifdef WL11U + /* Clear interworking element. */ + if (cfg->wl11u) { + cfg->wl11u = FALSE; + cfg->iw_ie_len = 0; + memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN); + } +#endif /* WL11U */ + +#ifdef CUSTOMER_HW4_DEBUG + if (wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_clear(); +#endif /* CUSTOMER_HW4_DEBUG */ + + cfg->disable_roam_event = false; + + DNGL_FUNC(dhd_cfg80211_down, (cfg)); + +#ifdef DHD_IFDEBUG + /* Printout all netinfo entries */ + wl_probe_wdev_all(cfg); +#endif /* DHD_IFDEBUG */ + + return err; +} + +s32 wl_cfg80211_up(void *para) +{ + struct bcm_cfg80211 *cfg; + s32 err = 0; + int val = 1; + dhd_pub_t *dhd; +#ifdef DISABLE_PM_BCNRX + s32 interr = 0; + uint param = 0; + s8 iovbuf[WLC_IOCTL_SMLEN]; +#endif /* DISABLE_PM_BCNRX */ + + (void)para; + WL_DBG(("In\n")); + cfg = g_bcm_cfg; + + if ((err = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val, + sizeof(int), false) < 0)) { + WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err)); + return err; + } + val = dtoh32(val); + if (val != WLC_IOCTL_VERSION && val != 1) { + WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n", + val, WLC_IOCTL_VERSION)); + return BCME_VERSION; + } + ioctl_version = val; + WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version)); + + mutex_lock(&cfg->usr_sync); + dhd = (dhd_pub_t *)(cfg->pub); + if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) { + err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg)); + if (unlikely(err)) { + mutex_unlock(&cfg->usr_sync); + return err; + } + } + err = __wl_cfg80211_up(cfg); + if (unlikely(err)) + WL_ERR(("__wl_cfg80211_up failed\n")); + + + + /* IOVAR configurations with 'up' condition */ +#ifdef DISABLE_PM_BCNRX + bcm_mkiovar("pm_bcnrx", (char *)¶m, 4, iovbuf, sizeof(iovbuf)); + interr = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); + if (unlikely(interr)) + WL_ERR(("Set pm_bcnrx returned (%d)\n", interr)); +#endif /* DISABLE_PM_BCNRX */ + + mutex_unlock(&cfg->usr_sync); + +#ifdef WLAIBSS_MCHAN + bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME); +#endif /* WLAIBSS_MCHAN */ + +#ifdef DUAL_STA_STATIC_IF +#ifdef WL_VIRTUAL_APSTA +#error "Both DUAL STA and DUAL_STA_STATIC_IF can't be enabled together" +#endif + /* Static Interface support is currently supported only for STA only builds (without P2P) */ + wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d"); +#endif /* DUAL_STA_STATIC_IF */ + + return err; +} + +/* Private Event to Supplicant with indication that chip hangs */ +int wl_cfg80211_hang(struct net_device *dev, u16 reason) +{ + struct bcm_cfg80211 *cfg; + dhd_pub_t *dhd; +#if defined(SOFTAP_SEND_HANGEVT) + /* specifc mac address used for hang event */ + uint8 hang_mac[ETHER_ADDR_LEN] = {0x11, 0x11, 0x11, 0x11, 0x11, 0x11}; +#endif /* SOFTAP_SEND_HANGEVT */ + if (!g_bcm_cfg) { + return BCME_ERROR; + } + + cfg = g_bcm_cfg; + dhd = (dhd_pub_t *)(cfg->pub); + +#ifdef DHD_USE_EXTENDED_HANG_REASON + if (dhd->hang_reason != 0) { + reason = dhd->hang_reason; + } +#endif /* DHD_USE_EXTENDED_HANG_REASON */ + + WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason))); + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); +#if defined(SOFTAP_SEND_HANGEVT) + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + cfg80211_del_sta(dev, hang_mac, GFP_ATOMIC); + } else +#endif /* SOFTAP_SEND_HANGEVT */ + { + CFG80211_DISCONNECTED(dev, reason, NULL, 0, false, GFP_KERNEL); + } + if (cfg != NULL) { + wl_link_down(cfg); + } + return 0; +} + +s32 wl_cfg80211_down(void *para) +{ + struct bcm_cfg80211 *cfg; + s32 err = 0; + + (void)para; + WL_DBG(("In\n")); + cfg = g_bcm_cfg; + mutex_lock(&cfg->usr_sync); + err = __wl_cfg80211_down(cfg); + mutex_unlock(&cfg->usr_sync); + + return err; +} + +static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item) +{ + unsigned long flags; + void *rptr = NULL; + struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev); + + if (!profile) + return NULL; + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + switch (item) { + case WL_PROF_SEC: + rptr = &profile->sec; + break; + case WL_PROF_ACT: + rptr = &profile->active; + break; + case WL_PROF_BSSID: + rptr = profile->bssid; + break; + case WL_PROF_SSID: + rptr = &profile->ssid; + break; + case WL_PROF_CHAN: + rptr = &profile->channel; + break; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + if (!rptr) + WL_ERR(("invalid item (%d)\n", item)); + return rptr; +} + +static s32 +wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, const void *data, s32 item) +{ + s32 err = 0; + const struct wlc_ssid *ssid; + unsigned long flags; + struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev); + + if (!profile) + return WL_INVALID; + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + switch (item) { + case WL_PROF_SSID: + ssid = (const wlc_ssid_t *) data; + memset(profile->ssid.SSID, 0, + sizeof(profile->ssid.SSID)); + memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len); + profile->ssid.SSID_len = ssid->SSID_len; + break; + case WL_PROF_BSSID: + if (data) + memcpy(profile->bssid, data, ETHER_ADDR_LEN); + else + memset(profile->bssid, 0, ETHER_ADDR_LEN); + break; + case WL_PROF_SEC: + memcpy(&profile->sec, data, sizeof(profile->sec)); + break; + case WL_PROF_ACT: + profile->active = *(const bool *)data; + break; + case WL_PROF_BEACONINT: + profile->beacon_interval = *(const u16 *)data; + break; + case WL_PROF_DTIMPERIOD: + profile->dtim_period = *(const u8 *)data; + break; + case WL_PROF_CHAN: + profile->channel = *(const u32*)data; + break; + default: + err = -EOPNOTSUPP; + break; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + + if (err == -EOPNOTSUPP) + WL_ERR(("unsupported item (%d)\n", item)); + + return err; +} + +void wl_cfg80211_dbg_level(u32 level) +{ + /* + * prohibit to change debug level + * by insmod parameter. + * eventually debug level will be configured + * in compile time by using CONFIG_XXX + */ + /* wl_dbg_level = level; */ +} + +static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS; +} + +static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg) +{ + return cfg->ibss_starter; +} + +static void wl_rst_ie(struct bcm_cfg80211 *cfg) +{ + struct wl_ie *ie = wl_to_ie(cfg); + + ie->offset = 0; +} + +static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v) +{ + struct wl_ie *ie = wl_to_ie(cfg); + s32 err = 0; + + if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) { + WL_ERR(("ei crosses buffer boundary\n")); + return -ENOSPC; + } + ie->buf[ie->offset] = t; + ie->buf[ie->offset + 1] = l; + memcpy(&ie->buf[ie->offset + 2], v, l); + ie->offset += l + 2; + + return err; +} + +static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size, + bool roam) +{ + u8 *ssidie; + /* cfg80211_find_ie defined in kernel returning const u8 */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + if (!ssidie) + return; + if (ssidie[1] != bi->SSID_len) { + if (ssidie[1]) { + WL_ERR(("%s: Wrong SSID len: %d != %d\n", + __FUNCTION__, ssidie[1], bi->SSID_len)); + } + if (roam) { + WL_ERR(("Changing the SSID Info.\n")); + memmove(ssidie + bi->SSID_len + 2, + (ssidie + 2) + ssidie[1], + *ie_size - (ssidie + 2 + ssidie[1] - ie_stream)); + memcpy(ssidie + 2, bi->SSID, bi->SSID_len); + *ie_size = *ie_size + bi->SSID_len - ssidie[1]; + ssidie[1] = bi->SSID_len; + } + return; + } + if (*(ssidie + 2) == '\0') + memcpy(ssidie + 2, bi->SSID, bi->SSID_len); + return; +} + +static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size) +{ + struct wl_ie *ie = wl_to_ie(cfg); + s32 err = 0; + + if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) { + WL_ERR(("ei_stream crosses buffer boundary\n")); + return -ENOSPC; + } + memcpy(&ie->buf[ie->offset], ie_stream, ie_size); + ie->offset += ie_size; + + return err; +} + +static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size) +{ + struct wl_ie *ie = wl_to_ie(cfg); + s32 err = 0; + + if (unlikely(ie->offset > dst_size)) { + WL_ERR(("dst_size is not enough\n")); + return -ENOSPC; + } + memcpy(dst, &ie->buf[0], ie->offset); + + return err; +} + +static u32 wl_get_ielen(struct bcm_cfg80211 *cfg) +{ + struct wl_ie *ie = wl_to_ie(cfg); + + return ie->offset; +} + +static void wl_link_up(struct bcm_cfg80211 *cfg) +{ + cfg->link_up = true; +} + +static void wl_link_down(struct bcm_cfg80211 *cfg) +{ + struct wl_connect_info *conn_info = wl_to_conn(cfg); + + WL_DBG(("In\n")); + cfg->link_up = false; + conn_info->req_ie_len = 0; + conn_info->resp_ie_len = 0; +} + +static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg) +{ + unsigned long flags; + + spin_lock_irqsave(&cfg->eq_lock, flags); + return flags; +} + +static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags) +{ + spin_unlock_irqrestore(&cfg->eq_lock, flags); +} + +static void wl_init_eq_lock(struct bcm_cfg80211 *cfg) +{ + spin_lock_init(&cfg->eq_lock); +} + +static void wl_delay(u32 ms) +{ + if (in_atomic() || (ms < jiffies_to_msecs(1))) { + OSL_DELAY(ms*1000); + } else { + OSL_SLEEP(ms); + } +} + +s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct ether_addr primary_mac; + if (!cfg->p2p) + return -1; + if (!p2p_is_on(cfg)) { + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + } else { + memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet, + ETHER_ADDR_LEN); + } + + return 0; +} +s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + + cfg = g_bcm_cfg; + + return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len); +} + +s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + cfg = g_bcm_cfg; + + return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len); +} + +s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + cfg = g_bcm_cfg; + + return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len); +} + +s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + cfg = g_bcm_cfg; + + return wl_cfgp2p_set_p2p_ecsa(cfg, net, buf, len); +} + +#ifdef P2PLISTEN_AP_SAMECHN +s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable) +{ + s32 ret = wldev_iovar_setint(net, "p2p_resp_ap_chn", enable); + + if ((ret == 0) && enable) { + /* disable PM for p2p responding on infra AP channel */ + s32 pm = PM_OFF; + + ret = wldev_ioctl(net, WLC_SET_PM, &pm, sizeof(pm), true); + } + + return ret; +} +#endif /* P2PLISTEN_AP_SAMECHN */ + +s32 wl_cfg80211_channel_to_freq(u32 channel) +{ + int freq = 0; + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(channel); +#else + { + u16 band = 0; + if (channel <= CH_MAX_2G_CHANNEL) + band = NL80211_BAND_2GHZ ; + else + band = NL80211_BAND_5GHZ ; + freq = ieee80211_channel_to_frequency(channel, band); + } +#endif + return freq; +} + + +#ifdef WLTDLS +static s32 +wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) { + + struct net_device *ndev = NULL; + u32 reason = ntoh32(e->reason); + s8 *msg = NULL; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + switch (reason) { + case WLC_E_TDLS_PEER_DISCOVERED : + msg = " TDLS PEER DISCOVERD "; + break; + case WLC_E_TDLS_PEER_CONNECTED : +#ifdef PCIE_FULL_DONGLE + dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]); +#endif /* PCIE_FULL_DONGLE */ + if (cfg->tdls_mgmt_frame) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + 0); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + 0, GFP_ATOMIC); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ + defined(WL_COMPAT_WIRELESS) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + GFP_ATOMIC); +#else + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + GFP_ATOMIC); +#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */ + } + msg = " TDLS PEER CONNECTED "; + break; + case WLC_E_TDLS_PEER_DISCONNECTED : +#ifdef PCIE_FULL_DONGLE + dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]); +#endif /* PCIE_FULL_DONGLE */ + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + cfg->tdls_mgmt_freq = 0; + } + msg = "TDLS PEER DISCONNECTED "; + break; + } + if (msg) { + WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)), + (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary")); + } + return 0; + +} +#endif /* WLTDLS */ + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) +static s32 +#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, const u8 *data, size_t len) +#else +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data, + size_t len) +#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ +{ + s32 ret = 0; +#ifdef WLTDLS + struct bcm_cfg80211 *cfg; + tdls_wfd_ie_iovar_t info; + memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t)); + cfg = g_bcm_cfg; + +#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2) + /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10 + * and that cuases build error + */ + BCM_REFERENCE(peer_capability); +#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ + + switch (action_code) { + /* We need to set TDLS Wifi Display IE to firmware + * using tdls_wfd_ie iovar + */ + case WLAN_TDLS_SET_PROBE_WFD_IE: + WL_ERR(("%s WLAN_TDLS_SET_PROBE_WFD_IE\n", __FUNCTION__)); + info.mode = TDLS_WFD_PROBE_IE_TX; + memcpy(&info.data, data, len); + info.length = len; + break; + case WLAN_TDLS_SET_SETUP_WFD_IE: + WL_ERR(("%s WLAN_TDLS_SET_SETUP_WFD_IE\n", __FUNCTION__)); + info.mode = TDLS_WFD_IE_TX; + memcpy(&info.data, data, len); + info.length = len; + break; + case WLAN_TDLS_SET_WFD_ENABLED: + WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_ENABLED\n", __FUNCTION__)); + dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true); + goto out; + case WLAN_TDLS_SET_WFD_DISABLED: + WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_DISABLED\n", __FUNCTION__)); + dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false); + goto out; + default: + WL_ERR(("Unsupported action code : %d\n", action_code)); + goto out; + } + + ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + + if (ret) { + WL_ERR(("tdls_wfd_ie error %d\n", ret)); + } +out: +#endif /* WLTDLS */ + return ret; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 +wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper) +#else +static s32 +wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, enum nl80211_tdls_operation oper) +#endif +{ + s32 ret = 0; +#ifdef WLTDLS + struct bcm_cfg80211 *cfg; + tdls_iovar_t info; + dhd_pub_t *dhdp; + bool tdls_auto_mode = false; + cfg = g_bcm_cfg; + dhdp = (dhd_pub_t *)(cfg->pub); + memset(&info, 0, sizeof(tdls_iovar_t)); + if (peer) { + memcpy(&info.ea, peer, ETHER_ADDR_LEN); + } else { + return -1; + } + switch (oper) { + case NL80211_TDLS_DISCOVERY_REQ: + /* If the discovery request is broadcast then we need to set + * info.mode to Tunneled Probe Request + */ + if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) { + info.mode = TDLS_MANUAL_EP_WFD_TPQ; + WL_ERR(("%s TDLS TUNNELED PRBOBE REQUEST\n", __FUNCTION__)); + } else { + info.mode = TDLS_MANUAL_EP_DISCOVERY; + } + break; + case NL80211_TDLS_SETUP: + if (dhdp->tdls_mode == true) { + info.mode = TDLS_MANUAL_EP_CREATE; + tdls_auto_mode = false; + ret = dhd_tdls_enable(dev, false, tdls_auto_mode, NULL); + if (ret < 0) { + return ret; + } + } else { + tdls_auto_mode = true; + } + break; + case NL80211_TDLS_TEARDOWN: + info.mode = TDLS_MANUAL_EP_DELETE; + break; + default: + WL_ERR(("Unsupported operation : %d\n", oper)); + goto out; + } + /* turn on TDLS */ + ret = dhd_tdls_enable(dev, true, tdls_auto_mode, NULL); + if (ret < 0) { + return ret; + } + if (info.mode) { + ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret) { + WL_ERR(("tdls_endpoint error %d\n", ret)); + } + } +out: +#endif /* WLTDLS */ + return ret; +} +#endif + +s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *ndev, char *buf, int len, + enum wl_management_type type) +{ + struct bcm_cfg80211 *cfg; + s32 ret = 0; + struct ether_addr primary_mac; + s32 bssidx = 0; + s32 pktflag = 0; + cfg = g_bcm_cfg; + + if (wl_get_drv_status(cfg, AP_CREATING, ndev)) { + /* Vendor IEs should be set to FW + * after SoftAP interface is brought up + */ + WL_DBG(("Skipping set IE since AP is not up \n")); + goto exit; + } else if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + /* Either stand alone AP case or P2P discovery */ + if (wl_get_drv_status(cfg, AP_CREATED, ndev)) { + /* Stand alone AP case on primary interface */ + WL_DBG(("Apply IEs for Primary AP Interface \n")); + bssidx = 0; + } else { + /* P2P Discovery case (p2p listen) */ + if (!cfg->p2p->on) { + /* Turn on Discovery interface */ + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + p2p_on(cfg) = true; + ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0); + if (unlikely(ret)) { + WL_ERR(("Enable discovery failed \n")); + goto exit; + } + } + WL_DBG(("Apply IEs for P2P Discovery Iface \n")); + ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } + } else { + /* Virtual AP/ P2P Group Interface */ + WL_DBG(("Apply IEs for iface:%s\n", ndev->name)); + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + } + + if (ndev != NULL) { + switch (type) { + case WL_BEACON: + pktflag = VNDR_IE_BEACON_FLAG; + break; + case WL_PROBE_RESP: + pktflag = VNDR_IE_PRBRSP_FLAG; + break; + case WL_ASSOC_RESP: + pktflag = VNDR_IE_ASSOCRSP_FLAG; + break; + } + if (pktflag) { + ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, + ndev_to_cfgdev(ndev), bssidx, pktflag, buf, len); + } + } +exit: + return ret; +} + +#ifdef WL_SUPPORT_AUTO_CHANNEL +static s32 +wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev) +{ + u32 val = 0; + s32 ret = BCME_ERROR; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wiphy *wiphy; + /* Disable mpc, to avoid automatic interface down. */ + val = 0; + + wiphy = bcmcfg_to_wiphy(cfg); + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return ret; + } + ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, + sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("set 'mpc' failed, error = %d\n", ret)); + goto done; + } + + /* Set interface up, explicitly. */ + val = 1; + + ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true); + if (ret < 0) { + WL_ERR(("set interface up failed, error = %d\n", ret)); + goto done; + } + + /* Stop all scan explicitly, till auto channel selection complete. */ + wl_set_drv_status(cfg, SCANNING, ndev); + if (cfg->escan_info.ndev == NULL) { + ret = BCME_OK; + goto done; + } + ret = wl_notify_escan_complete(cfg, ndev, true, true); + if (ret < 0) { + WL_ERR(("set scan abort failed, error = %d\n", ret)); + goto done; + } + +done: + return ret; +} + +static bool +wl_cfg80211_valid_channel_p2p(int channel) +{ + bool valid = false; + + /* channel 1 to 14 */ + if ((channel >= 1) && (channel <= 14)) { + valid = true; + } + /* channel 36 to 48 */ + else if ((channel >= 36) && (channel <= 48)) { + valid = true; + } + /* channel 149 to 161 */ + else if ((channel >= 149) && (channel <= 161)) { + valid = true; + } + else { + valid = false; + WL_INFORM(("invalid P2P chanspec, channel = %d\n", channel)); + } + + return valid; +} + +s32 +wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen) +{ + s32 ret = BCME_ERROR; + struct bcm_cfg80211 *cfg = NULL; + wl_uint32_list_t *list = NULL; + chanspec_t chanspec = 0; + + memset(buf, 0, buflen); + + cfg = g_bcm_cfg; + list = (wl_uint32_list_t *)buf; + list->count = htod32(WL_NUMCHANSPECS); + + /* Restrict channels to 2.4GHz, 20MHz BW, no SB. */ + chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 | + WL_CHANSPEC_CTL_SB_NONE); + chanspec = wl_chspec_host_to_driver(chanspec); + + ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec, + sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("get 'chanspecs' failed, error = %d\n", ret)); + } + + return ret; +} + +s32 +wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen) +{ + u32 channel = 0; + s32 ret = BCME_ERROR; + s32 i = 0; + s32 j = 0; + struct bcm_cfg80211 *cfg = NULL; + wl_uint32_list_t *list = NULL; + chanspec_t chanspec = 0; + + memset(buf, 0, buflen); + + cfg = g_bcm_cfg; + list = (wl_uint32_list_t *)buf; + list->count = htod32(WL_NUMCHANSPECS); + + /* Restrict channels to 5GHz, 20MHz BW, no SB. */ + chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 | + WL_CHANSPEC_CTL_SB_NONE); + chanspec = wl_chspec_host_to_driver(chanspec); + + ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec, + sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("get 'chanspecs' failed, error = %d\n", ret)); + goto done; + } + + /* Skip DFS and inavlid P2P channel. */ + for (i = 0, j = 0; i < dtoh32(list->count); i++) { + chanspec = (chanspec_t) dtoh32(list->element[i]); + channel = CHSPEC_CHANNEL(chanspec); + + ret = wldev_iovar_getint(ndev, "per_chan_info", &channel); + if (ret < 0) { + WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret)); + goto done; + } + + if (CHANNEL_IS_RADAR(channel) || + !(wl_cfg80211_valid_channel_p2p(CHSPEC_CHANNEL(chanspec)))) { + continue; + } else { + list->element[j] = list->element[i]; + } + + j++; + } + + list->count = j; + +done: + return ret; +} + +static s32 +wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen, + int *channel) +{ + s32 ret = BCME_ERROR; + int chosen = 0; + int retry = 0; + + /* Start auto channel selection scan. */ + ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true); + if (ret < 0) { + WL_ERR(("can't start auto channel scan, error = %d\n", ret)); + *channel = 0; + goto done; + } + + /* Wait for auto channel selection, worst case possible delay is 5250ms. */ + retry = CHAN_SEL_RETRY_COUNT; + + while (retry--) { + OSL_SLEEP(CHAN_SEL_IOCTL_DELAY); + + ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen), + false); + if ((ret == 0) && (dtoh32(chosen) != 0)) { + *channel = (u16)(chosen & 0x00FF); + WL_INFORM(("selected channel = %d\n", *channel)); + break; + } + WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n", + (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen))); + } + + if (retry <= 0) { + WL_ERR(("failure, auto channel selection timed out\n")); + *channel = 0; + ret = BCME_ERROR; + } + +done: + return ret; +} + +static s32 +wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev) +{ + u32 val = 0; + s32 ret = BCME_ERROR; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + /* Clear scan stop driver status. */ + wl_clr_drv_status(cfg, SCANNING, ndev); + + /* Enable mpc back to 1, irrespective of initial state. */ + val = 1; + + ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, + sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("set 'mpc' failed, error = %d\n", ret)); + } + + return ret; +} + +s32 +wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len) +{ + int channel = 0; + s32 ret = BCME_ERROR; + u8 *buf = NULL; + char *pos = cmd; + struct bcm_cfg80211 *cfg = NULL; + struct net_device *ndev = NULL; + + memset(cmd, 0, total_len); + + buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) { + WL_ERR(("failed to allocate chanspec buffer\n")); + return -ENOMEM; + } + + /* + * Always use primary interface, irrespective of interface on which + * command came. + */ + cfg = g_bcm_cfg; + ndev = bcmcfg_to_prmry_ndev(cfg); + + /* + * Make sure that FW and driver are in right state to do auto channel + * selection scan. + */ + ret = wl_cfg80211_set_auto_channel_scan_state(ndev); + if (ret < 0) { + WL_ERR(("can't set auto channel scan state, error = %d\n", ret)); + goto done; + } + + /* Best channel selection in 2.4GHz band. */ + ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE); + if (ret < 0) { + WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret)); + goto done; + } + + ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE, + &channel); + if (ret < 0) { + WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret)); + goto done; + } + + if (CHANNEL_IS_2G(channel)) { + channel = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ ); + } else { + WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel)); + channel = 0; + } + + pos += snprintf(pos, total_len, "%04d ", channel); + + /* Best channel selection in 5GHz band. */ + ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE); + if (ret < 0) { + WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret)); + goto done; + } + + ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE, + &channel); + if (ret < 0) { + WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret)); + goto done; + } + + if (CHANNEL_IS_5G(channel)) { + channel = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ ); + } else { + WL_ERR(("invalid 5GHz channel, channel = %d\n", channel)); + channel = 0; + } + + pos += snprintf(pos, total_len, "%04d ", channel); + + /* Set overall best channel same as 5GHz best channel. */ + pos += snprintf(pos, total_len, "%04d ", channel); + +done: + if (NULL != buf) { + kfree(buf); + } + + /* Restore FW and driver back to normal state. */ + ret = wl_cfg80211_restore_auto_channel_scan_state(ndev); + if (ret < 0) { + WL_ERR(("can't restore auto channel scan state, error = %d\n", ret)); + } + + return (pos - cmd); +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +static const struct rfkill_ops wl_rfkill_ops = { + .set_block = wl_rfkill_set +}; + +static int wl_rfkill_set(void *data, bool blocked) +{ + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; + + WL_DBG(("Enter \n")); + WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked")); + + if (!cfg) + return -EINVAL; + + cfg->rf_blocked = blocked; + + return 0; +} + +static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup) +{ + s32 err = 0; + + WL_DBG(("Enter \n")); + if (!cfg) + return -EINVAL; + if (setup) { + cfg->rfkill = rfkill_alloc("brcmfmac-wifi", + wl_cfg80211_get_parent_dev(), + RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg); + + if (!cfg->rfkill) { + err = -ENOMEM; + goto err_out; + } + + err = rfkill_register(cfg->rfkill); + + if (err) + rfkill_destroy(cfg->rfkill); + } else { + if (!cfg->rfkill) { + err = -ENOMEM; + goto err_out; + } + + rfkill_unregister(cfg->rfkill); + rfkill_destroy(cfg->rfkill); + } + +err_out: + return err; +} + +#ifdef DEBUGFS_CFG80211 +/** +* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level +* to turn on SCAN and DBG log. +* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level +* To see current setting of debug level, +* cat /sys/kernel/debug/dhd/debug_level +*/ +static ssize_t +wl_debuglevel_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL]; + char *params, *token, *colon; + uint i, tokens, log_on = 0; + memset(tbuf, 0, sizeof(tbuf)); + memset(sublog, 0, sizeof(sublog)); + if (copy_from_user(&tbuf, userbuf, min_t(size_t, (sizeof(tbuf) - 1), count))) + return -EFAULT; + + params = &tbuf[0]; + colon = strchr(params, '\n'); + if (colon != NULL) + *colon = '\0'; + while ((token = strsep(¶ms, " ")) != NULL) { + memset(sublog, 0, sizeof(sublog)); + if (token == NULL || !*token) + break; + if (*token == '\0') + continue; + colon = strchr(token, ':'); + if (colon != NULL) { + *colon = ' '; + } + tokens = sscanf(token, "%s %u", sublog, &log_on); + if (colon != NULL) + *colon = ':'; + + if (tokens == 2) { + for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { + if (!strncmp(sublog, sublogname_map[i].sublogname, + strlen(sublogname_map[i].sublogname))) { + if (log_on) + wl_dbg_level |= + (sublogname_map[i].log_level); + else + wl_dbg_level &= + ~(sublogname_map[i].log_level); + } + } + } else + WL_ERR(("%s: can't parse '%s' as a " + "SUBMODULE:LEVEL (%d tokens)\n", + tbuf, token, tokens)); + + + } + return count; +} + +static ssize_t +wl_debuglevel_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *param; + char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)]; + uint i; + memset(tbuf, 0, sizeof(tbuf)); + param = &tbuf[0]; + for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { + param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ", + sublogname_map[i].sublogname, + (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0); + } + *param = '\n'; + return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0])); + +} +static const struct file_operations fops_debuglevel = { + .open = NULL, + .write = wl_debuglevel_write, + .read = wl_debuglevel_read, + .owner = THIS_MODULE, + .llseek = NULL, +}; + +static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg) +{ + s32 err = 0; + struct dentry *_dentry; + if (!cfg) + return -EINVAL; + cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cfg->debugfs || IS_ERR(cfg->debugfs)) { + if (cfg->debugfs == ERR_PTR(-ENODEV)) + WL_ERR(("Debugfs is not enabled on this kernel\n")); + else + WL_ERR(("Can not create debugfs directory\n")); + cfg->debugfs = NULL; + goto exit; + + } + _dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR, + cfg->debugfs, cfg, &fops_debuglevel); + if (!_dentry || IS_ERR(_dentry)) { + WL_ERR(("failed to create debug_level debug file\n")); + wl_free_debugfs(cfg); + } +exit: + return err; +} +static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg) +{ + if (!cfg) + return -EINVAL; + if (cfg->debugfs) + debugfs_remove_recursive(cfg->debugfs); + cfg->debugfs = NULL; + return 0; +} +#endif /* DEBUGFS_CFG80211 */ + +struct device *wl_cfg80211_get_parent_dev(void) +{ + return cfg80211_parent_dev; +} + +void wl_cfg80211_set_parent_dev(void *dev) +{ + cfg80211_parent_dev = dev; +} + +static void wl_cfg80211_clear_parent_dev(void) +{ + cfg80211_parent_dev = NULL; +} + +void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac) +{ + wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL, + 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN); +} +static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (((dev_role == NL80211_IFTYPE_AP) && + !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) || + ((dev_role == NL80211_IFTYPE_P2P_GO) && + !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE))) + { + WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode)); + return false; + } + return true; +} + +int wl_cfg80211_do_driver_init(struct net_device *net) +{ + struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net); + + if (!cfg || !cfg->wdev) + return -EINVAL; + + if (dhd_do_driver_init(cfg->wdev->netdev) < 0) + return -1; + + return 0; +} + +void wl_cfg80211_enable_trace(bool set, u32 level) +{ + if (set) + wl_dbg_level = level & WL_DBG_LEVEL; + else + wl_dbg_level |= (WL_DBG_LEVEL & level); +} +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) +static s32 +wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + bcm_struct_cfgdev *cfgdev, u64 cookie) +{ + /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION + * is passed with CMD_FRAME. This callback is supposed to cancel + * the OFFCHANNEL Wait. Since we are already taking care of that + * with the tx_mgmt logic, do nothing here. + */ + + return 0; +} +#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */ + +#ifdef WL11U +bcm_tlv_t * +wl_cfg80211_find_interworking_ie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) { + return (bcm_tlv_t *)ie; + } + return NULL; +} + + +static s32 +wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag, + uint8 ie_id, uint8 *data, uint8 data_len) +{ + s32 err = BCME_OK; + s32 buf_len; + s32 iecount; + ie_setbuf_t *ie_setbuf; + + if (ie_id != DOT11_MNG_INTERWORKING_ID) + return BCME_UNSUPPORTED; + + /* Validate the pktflag parameter */ + if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG | + VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG | + VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG| + VNDR_IE_CUSTOM_FLAG))) { + WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag)); + return -1; + } + + /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */ + pktflag = htod32(pktflag); + + buf_len = sizeof(ie_setbuf_t) + data_len - 1; + ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL); + + if (!ie_setbuf) { + WL_ERR(("Error allocating buffer for IE\n")); + return -ENOMEM; + } + + if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) { + WL_ERR(("Previous IW IE is equals to current IE\n")); + err = BCME_OK; + goto exit; + } + + strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1); + ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + /* Buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int)); + memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32)); + + /* Now, add the IE to the buffer */ + ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id; + + /* if already set with previous values, delete it first */ + if (cfg->iw_ie_len != 0) { + WL_DBG(("Different IW_IE was already set. clear first\n")); + + ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0; + + err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) + goto exit; + } + + ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len; + memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len); + + err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (err == BCME_OK) { + memcpy(cfg->iw_ie, data, data_len); + cfg->iw_ie_len = data_len; + cfg->wl11u = TRUE; + + err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx); + } + +exit: + if (ie_setbuf) + kfree(ie_setbuf); + return err; +} +#endif /* WL11U */ + +s32 +wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, char *command, int total_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + char ioctl_buf[50]; + int err = 0; + uint32 val = 0; + chanspec_t chanspec = 0; + int abort; + int bytes_written = 0; + wl_dfs_ap_move_status_t *status; + char chanbuf[CHANSPEC_STR_LEN]; + const char *dfs_state_str[DFS_SCAN_S_MAX] = { + "Radar Free On Channel", + "Radar Found On Channel", + "Radar Scan In Progress", + "Radar Scan Aborted", + "RSDB Mode switch in Progress For Scan" + }; + if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + bytes_written = snprintf(command, total_len, "AP is not UP\n"); + return bytes_written; + } + if (!*data) { + if ((err = wldev_iovar_getbuf(ndev, "dfs_ap_move", NULL, 0, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("setting dfs_ap_move failed with err=%d \n", err)); + return err; + } + status = (wl_dfs_ap_move_status_t *)cfg->ioctl_buf; + + if (status->version != WL_DFS_AP_MOVE_VERSION) { + err = BCME_UNSUPPORTED; + WL_ERR(("err=%d version=%d\n", err, status->version)); + return err; + } + + if (status->move_status != (int8) DFS_SCAN_S_IDLE) { + chanspec = wl_chspec_driver_to_host(status->chanspec); + if (chanspec != 0 && chanspec != INVCHANSPEC) { + wf_chspec_ntoa(chanspec, chanbuf); + bytes_written = snprintf(command, total_len, + "AP Target Chanspec %s (0x%x)\n", chanbuf, chanspec); + } + bytes_written += snprintf(command + bytes_written, total_len, + "%s\n", dfs_state_str[status->move_status]); + return bytes_written; + } else { + bytes_written = snprintf(command, total_len, "dfs AP move in IDLE state\n"); + return bytes_written; + } + + } + + abort = bcm_atoi(data); + if (abort == -1) { + if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &abort, + sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) { + WL_ERR(("seting dfs_ap_move failed with err %d\n", err)); + return err; + } + } else { + chanspec = wf_chspec_aton(data); + if (chanspec != 0) { + val = wl_chspec_host_to_driver(chanspec); + if (val != INVCHANSPEC) { + if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &val, + sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) { + WL_ERR(("seting dfs_ap_move failed with err %d\n", err)); + return err; + } + WL_DBG((" set dfs_ap_move successfull")); + } else { + err = BCME_USAGE_ERROR; + } + } + } + return err; +} + +s32 +wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len) +{ + uint i = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_roam_prof_band_t *rp; + int err = -EINVAL, bytes_written = 0; + size_t len = strlen(data); + int rp_len = 0; + data[len] = '\0'; + rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp) + * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL); + if (unlikely(!rp)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + + rp->ver = WL_MAX_ROAM_PROF_VER; + if (*data && (!strncmp(data, "b", 1))) { + rp->band = WLC_BAND_2G; + } else if (*data && (!strncmp(data, "a", 1))) { + rp->band = WLC_BAND_5G; + } else { + err = snprintf(command, total_len, "Missing band\n"); + goto exit; + } + data++; + rp->len = 0; + /* Getting roam profile from fw */ + if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting roam_profile failed with err=%d \n", err)); + goto exit; + } + memcpy(rp, cfg->ioctl_buf, sizeof(*rp) * WL_MAX_ROAM_PROF_BRACKETS); + /* roam_prof version get */ + if (rp->ver != WL_MAX_ROAM_PROF_VER) { + WL_ERR(("bad version (=%d) in return data\n", rp->ver)); + err = -EINVAL; + goto exit; + } + if ((rp->len % sizeof(wl_roam_prof_t)) != 0) { + WL_ERR(("bad length (=%d) in return data\n", rp->len)); + err = -EINVAL; + goto exit; + } + + if (!*data) { + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* printing contents of roam profile data from fw and exits + * if code hits any of one of the below condtion. If remaining + * length of buffer is less than roam profile size or + * if there is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + bytes_written += snprintf(command+bytes_written, + total_len, "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n", + rp->roam_prof[i].roam_trigger, rp->roam_prof[i].rssi_lower, + rp->roam_prof[i].channel_usage, + rp->roam_prof[i].cu_avg_calc_dur); + } + err = bytes_written; + goto exit; + } else { + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* reading contents of roam profile data from fw and exits + * if code hits any of one of the below condtion, If remaining + * length of buffer is less than roam profile size or if there + * is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + } + /* Do not set roam_prof from upper layer if fw doesn't have 2 rows */ + if (i != 2) { + WL_ERR(("FW must have 2 rows to fill roam_prof\n")); + err = -EINVAL; + goto exit; + } + /* setting roam profile to fw */ + data++; + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + rp->roam_prof[i].roam_trigger = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].rssi_lower = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].channel_usage = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].cu_avg_calc_dur = simple_strtol(data, &data, 10); + + rp_len += sizeof(wl_roam_prof_t); + if (*data == '\0') { + break; + } + data++; + } + if (i != 1) { + WL_ERR(("Only two roam_prof rows supported.\n")); + err = -EINVAL; + goto exit; + } + rp->len = rp_len; + if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp, + sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting roam_profile failed with err %d\n", err)); + } + } +exit: + if (rp) { + kfree(rp); + } + return err; +} + +int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data, + char *command, int total_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int bytes_written = 0, err = -EINVAL, argc = 0; + char rssi[5], band[5], weight[5]; + char *endptr = NULL; + wnm_bss_select_weight_cfg_t *bwcfg; + + bwcfg = kzalloc(sizeof(*bwcfg), GFP_KERNEL); + if (unlikely(!bwcfg)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + bwcfg->version = WNM_BSSLOAD_MONITOR_VERSION; + bwcfg->type = 0; + bwcfg->weight = 0; + + argc = sscanf(data, "%s %s %s", rssi, band, weight); + + if (!strcasecmp(rssi, "rssi")) + bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI; + else if (!strcasecmp(rssi, "cu")) + bwcfg->type = WNM_BSS_SELECT_TYPE_CU; + else { + /* Usage DRIVER WBTEXT_WEIGHT_CONFIG */ + WL_ERR(("%s: Command usage error\n", __func__)); + goto exit; + } + + if (!strcasecmp(band, "a")) + bwcfg->band = WLC_BAND_5G; + else if (!strcasecmp(band, "b")) + bwcfg->band = WLC_BAND_2G; + else if (!strcasecmp(band, "all")) + bwcfg->band = WLC_BAND_ALL; + else { + WL_ERR(("%s: Command usage error\n", __func__)); + goto exit; + } + + if (argc == 2) { + /* If there is no data after band, getting wnm_bss_select_weight from fw */ + if (bwcfg->band == WLC_BAND_ALL) { + WL_ERR(("band option \"all\" is for set only, not get\n")); + goto exit; + } + if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg, + sizeof(*bwcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_weight failed with err=%d \n", err)); + goto exit; + } + memcpy(bwcfg, cfg->ioctl_buf, sizeof(*bwcfg)); + bytes_written = snprintf(command, total_len, "%s %s weight = %d\n", + (bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU", + (bwcfg->band == WLC_BAND_2G) ? "2G" : "5G", bwcfg->weight); + err = bytes_written; + goto exit; + } else { + /* if weight is non integer returns command usage error */ + bwcfg->weight = simple_strtol(weight, &endptr, 0); + if (*endptr != '\0') { + WL_ERR(("%s: Command usage error", __func__)); + goto exit; + } + /* setting weight for iovar wnm_bss_select_weight to fw */ + if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg, + sizeof(*bwcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_weight failed with err=%d\n", err)); + } + } +exit: + if (bwcfg) { + kfree(bwcfg); + } + return err; +} + +/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */ +#define WBTEXT_TUPLE_MIN_LEN_CHECK 5 + +int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data, + char *command, int total_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int bytes_written = 0, err = -EINVAL; + char rssi[5], band[5]; + int btcfg_len = 0, i = 0, parsed_len = 0; + wnm_bss_select_factor_cfg_t *btcfg; + size_t slen = strlen(data); + char *start_addr = NULL; + data[slen] = '\0'; + + btcfg = kzalloc((sizeof(*btcfg) + sizeof(*btcfg) * + WL_FACTOR_TABLE_MAX_LIMIT), GFP_KERNEL); + if (unlikely(!btcfg)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + + btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION; + btcfg->band = WLC_BAND_AUTO; + btcfg->type = 0; + btcfg->count = 0; + + sscanf(data, "%s %s", rssi, band); + + if (!strcasecmp(rssi, "rssi")) { + btcfg->type = WNM_BSS_SELECT_TYPE_RSSI; + } + else if (!strcasecmp(rssi, "cu")) { + btcfg->type = WNM_BSS_SELECT_TYPE_CU; + } + else { + WL_ERR(("%s: Command usage error\n", __func__)); + goto exit; + } + + if (!strcasecmp(band, "a")) { + btcfg->band = WLC_BAND_5G; + } + else if (!strcasecmp(band, "b")) { + btcfg->band = WLC_BAND_2G; + } + else if (!strcasecmp(band, "all")) { + btcfg->band = WLC_BAND_ALL; + } + else { + WL_ERR(("%s: Command usage, Wrong band\n", __func__)); + goto exit; + } + + if ((slen - 1) == (strlen(rssi) + strlen(band))) { + /* Getting factor table using iovar 'wnm_bss_select_table' from fw */ + if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg, + sizeof(*btcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_table failed with err=%d \n", err)); + goto exit; + } + memcpy(btcfg, cfg->ioctl_buf, sizeof(*btcfg)); + memcpy(btcfg, cfg->ioctl_buf, (btcfg->count+1) * sizeof(*btcfg)); + + bytes_written += snprintf(command + bytes_written, total_len, + "No of entries in table: %d\n", btcfg->count); + bytes_written += snprintf(command + bytes_written, total_len, "%s factor table\n", + (btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU"); + bytes_written += snprintf(command + bytes_written, total_len, + "low\thigh\tfactor\n"); + for (i = 0; i <= btcfg->count-1; i++) { + bytes_written += snprintf(command + bytes_written, total_len, + "%d\t%d\t%d\n", btcfg->params[i].low, btcfg->params[i].high, + btcfg->params[i].factor); + } + err = bytes_written; + goto exit; + } else { + memset(btcfg->params, 0, sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT); + data += (strlen(rssi) + strlen(band) + 2); + start_addr = data; + slen = slen - (strlen(rssi) + strlen(band) + 2); + for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) { + if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) { + btcfg->params[i].low = simple_strtol(data, &data, 10); + data++; + btcfg->params[i].high = simple_strtol(data, &data, 10); + data++; + btcfg->params[i].factor = simple_strtol(data, &data, 10); + btcfg->count++; + if (*data == '\0') { + break; + } + data++; + parsed_len = data - start_addr; + } else { + WL_ERR(("%s:Command usage:less no of args\n", __func__)); + goto exit; + } + } + btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg)); + if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len, + cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting wnm_bss_select_table failed with err %d\n", err)); + goto exit; + } + } +exit: + if (btcfg) { + kfree(btcfg); + } + return err; +} + +s32 +wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len) +{ + uint i = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0; + char delta[5], band[5], *endptr = NULL; + wl_roam_prof_band_t *rp; + + rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp) + * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL); + if (unlikely(!rp)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + + argc = sscanf(data, "%s %s", band, delta); + if (!strcasecmp(band, "a")) + rp->band = WLC_BAND_5G; + else if (!strcasecmp(band, "b")) + rp->band = WLC_BAND_2G; + else { + WL_ERR(("%s: Missing band\n", __func__)); + goto exit; + } + /* Getting roam profile from fw */ + if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting roam_profile failed with err=%d \n", err)); + goto exit; + } + memcpy(rp, cfg->ioctl_buf, sizeof(wl_roam_prof_band_t)); + if (rp->ver != WL_MAX_ROAM_PROF_VER) { + WL_ERR(("bad version (=%d) in return data\n", rp->ver)); + err = -EINVAL; + goto exit; + } + if ((rp->len % sizeof(wl_roam_prof_t)) != 0) { + WL_ERR(("bad length (=%d) in return data\n", rp->len)); + err = -EINVAL; + goto exit; + } + + if (argc == 2) { + /* if delta is non integer returns command usage error */ + val = simple_strtol(delta, &endptr, 0); + if (*endptr != '\0') { + WL_ERR(("%s: Command usage error", __func__)); + goto exit; + } + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* + * Checking contents of roam profile data from fw and exits + * if code hits below condtion. If remaining length of buffer is + * less than roam profile size or if there is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + if (rp->roam_prof[i].channel_usage != 0) { + rp->roam_prof[i].roam_delta = val; + } + len += sizeof(wl_roam_prof_t); + } + } + else { + if (rp->roam_prof[i].channel_usage != 0) { + bytes_written = snprintf(command, total_len, + "%s Delta %d\n", (rp->band == WLC_BAND_2G) ? "2G" : "5G", + rp->roam_prof[0].roam_delta); + } + err = bytes_written; + goto exit; + } + rp->len = len; + if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp, + sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting roam_profile failed with err %d\n", err)); + } +exit : + if (rp) { + kfree(rp); + } + return err; +} + + +int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev) +{ + struct bcm_cfg80211 *cfg = NULL; + struct net_device *ndev = NULL; + struct cfg80211_scan_info info = {}; + unsigned long flags; + int clear_flag = 0; + int ret = 0; + + WL_TRACE(("Enter\n")); + + cfg = g_bcm_cfg; + if (!cfg) + return -EINVAL; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); +#ifdef WL_CFG80211_P2P_DEV_IF + if (cfg->scan_request && cfg->scan_request->wdev == cfgdev) { +#else + if (cfg->scan_request && cfg->scan_request->dev == cfgdev) { +#endif + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + clear_flag = 1; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + + if (clear_flag) + wl_clr_drv_status(cfg, SCANNING, ndev); + + return ret; +} + +bool wl_cfg80211_is_concurrent_mode(void) +{ + if ((g_bcm_cfg) && (wl_get_drv_status_all(g_bcm_cfg, CONNECTED) > 1)) { + return true; + } else { + return false; + } +} + +void* wl_cfg80211_get_dhdp() +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + return cfg->pub; +} + +bool wl_cfg80211_is_p2p_active(void) +{ + return (g_bcm_cfg && g_bcm_cfg->p2p); +} + +bool wl_cfg80211_is_roam_offload(void) +{ + return (g_bcm_cfg && g_bcm_cfg->roam_offload); +} + +bool wl_cfg80211_is_event_from_connected_bssid(const wl_event_msg_t *e, int ifidx) +{ + dhd_pub_t *dhd = NULL; + struct net_device *ndev = NULL; + u8 *curbssid = NULL; + + dhd = (dhd_pub_t *)(g_bcm_cfg->pub); + + if (dhd) { + ndev = dhd_idx2net(dhd, ifidx); + } + + if (!dhd || !ndev) { + return false; + } + + curbssid = wl_read_prof(g_bcm_cfg, ndev, WL_PROF_BSSID); + + return memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0; +} + +static void wl_cfg80211_work_handler(struct work_struct * work) +{ + struct bcm_cfg80211 *cfg = NULL; + struct net_info *iter, *next; + s32 err = BCME_OK; + s32 pm = PM_FAST; + BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work); + WL_DBG(("Enter \n")); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface ndev could be null */ + if (iter->ndev) { + if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) || + (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS && + wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_IBSS)) + continue; + if (iter->ndev) { + if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, + &pm, sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + iter->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + iter->ndev->name, err)); + } else + wl_cfg80211_update_power_mode(iter->ndev); + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) + _Pragma("GCC diagnostic pop") +#endif + DHD_OS_WAKE_UNLOCK(cfg->pub); +} + +u8 +wl_get_action_category(void *frame, u32 frame_len) +{ + u8 category; + u8 *ptr = (u8 *)frame; + if (frame == NULL) + return DOT11_ACTION_CAT_ERR_MASK; + if (frame_len < DOT11_ACTION_HDR_LEN) + return DOT11_ACTION_CAT_ERR_MASK; + category = ptr[DOT11_ACTION_CAT_OFF]; + WL_INFORM(("Action Category: %d\n", category)); + return category; +} + +int +wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action) +{ + u8 *ptr = (u8 *)frame; + if (frame == NULL || ret_action == NULL) + return BCME_ERROR; + if (frame_len < DOT11_ACTION_HDR_LEN) + return BCME_ERROR; + if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len)) + return BCME_ERROR; + *ret_action = ptr[DOT11_ACTION_ACT_OFF]; + WL_INFORM(("Public Action : %d\n", *ret_action)); + return BCME_OK; +} + + +static int +wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const struct ether_addr *bssid) +{ + s32 err; + wl_event_msg_t e; + + bzero(&e, sizeof(e)); + e.event_type = cpu_to_be32(WLC_E_BSSID); + memcpy(&e.addr, bssid, ETHER_ADDR_LEN); + /* trigger the roam event handler */ + WL_INFORM(("Delayed roam to " MACDBG "\n", MAC2STRDBG((u8*)(bssid)))); + err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL); + + return err; +} + +static s32 +wl_cfg80211_parse_vndr_ies(u8 *parse, u32 len, + struct parsed_vndr_ies *vndr_ies) +{ + s32 err = BCME_OK; + vndr_ie_t *vndrie; + bcm_tlv_t *ie; + struct parsed_vndr_ie_info *parsed_info; + u32 count = 0; + s32 remained_len; + + remained_len = (s32)len; + memset(vndr_ies, 0, sizeof(*vndr_ies)); + + WL_INFORM(("---> len %d\n", len)); + ie = (bcm_tlv_t *) parse; + if (!bcm_valid_tlv(ie, remained_len)) + ie = NULL; + while (ie) { + if (count >= MAX_VNDR_IE_NUMBER) + break; + if (ie->id == DOT11_MNG_VS_ID) { + vndrie = (vndr_ie_t *) ie; + /* len should be bigger than OUI length + one data length at least */ + if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) { + WL_ERR(("%s: invalid vndr ie. length is too small %d\n", + __FUNCTION__, vndrie->len)); + goto end; + } + /* if wpa or wme ie, do not add ie */ + if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) && + ((vndrie->data[0] == WPA_OUI_TYPE) || + (vndrie->data[0] == WME_OUI_TYPE))) { + CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n")); + goto end; + } + + parsed_info = &vndr_ies->ie_info[count++]; + + /* save vndr ie information */ + parsed_info->ie_ptr = (char *)vndrie; + parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN); + memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t)); + vndr_ies->count = count; + + WL_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x len:%d\n", + parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1], + parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0], + parsed_info->ie_len)); + } +end: + ie = bcm_next_tlv(ie, &remained_len); + } + return err; +} + +s32 +wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + s32 index; + struct net_info *netinfo; + s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, + VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG}; + + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + if (!netinfo || !netinfo->wdev) { + WL_ERR(("netinfo or netinfo->wdev is NULL\n")); + return -1; + } + + WL_DBG(("clear management vendor IEs for bssidx:%d \n", bssidx)); + /* Clear the IEs set in the firmware so that host is in sync with firmware */ + for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) { + if (wl_cfg80211_set_mgmt_vndr_ies(cfg, wdev_to_cfgdev(netinfo->wdev), + bssidx, vndrie_flag[index], NULL, 0) < 0) + WL_ERR(("vndr_ies clear failed. Ignoring.. \n")); + } + + return 0; +} + +s32 +wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg) +{ + struct net_info *iter, *next; + + WL_DBG(("clear management vendor IEs \n")); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + wl_cfg80211_clear_per_bss_ies(cfg, iter->bssidx); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + return 0; +} + +#define WL_VNDR_IE_MAXLEN 2048 +static s8 g_mgmt_ie_buf[WL_VNDR_IE_MAXLEN]; +int +wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len) +{ + struct net_device *ndev = NULL; + s32 ret = BCME_OK; + u8 *curr_ie_buf = NULL; + u8 *mgmt_ie_buf = NULL; + u32 mgmt_ie_buf_len = 0; + u32 *mgmt_ie_len = 0; + u32 del_add_ie_buf_len = 0; + u32 total_ie_buf_len = 0; + u32 parsed_ie_buf_len = 0; + struct parsed_vndr_ies old_vndr_ies; + struct parsed_vndr_ies new_vndr_ies; + s32 i; + u8 *ptr; + s32 remained_buf_len; + wl_bss_vndr_ies_t *ies = NULL; + struct net_info *netinfo; + + WL_DBG(("Enter. pktflag:0x%x bssidx:%x vnd_ie_len:%d \n", + pktflag, bssidx, vndr_ie_len)); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (bssidx > WL_MAX_IFS) { + WL_ERR(("bssidx > supported concurrent Ifaces \n")); + return -EINVAL; + } + + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + if (!netinfo) { + WL_ERR(("net_info ptr is NULL \n")); + return -EINVAL; + } + + /* Clear the global buffer */ + memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf)); + curr_ie_buf = g_mgmt_ie_buf; + ies = &netinfo->bss.ies; + + switch (pktflag) { + case VNDR_IE_PRBRSP_FLAG : + mgmt_ie_buf = ies->probe_res_ie; + mgmt_ie_len = &ies->probe_res_ie_len; + mgmt_ie_buf_len = sizeof(ies->probe_res_ie); + break; + case VNDR_IE_ASSOCRSP_FLAG : + mgmt_ie_buf = ies->assoc_res_ie; + mgmt_ie_len = &ies->assoc_res_ie_len; + mgmt_ie_buf_len = sizeof(ies->assoc_res_ie); + break; + case VNDR_IE_BEACON_FLAG : + mgmt_ie_buf = ies->beacon_ie; + mgmt_ie_len = &ies->beacon_ie_len; + mgmt_ie_buf_len = sizeof(ies->beacon_ie); + break; + case VNDR_IE_PRBREQ_FLAG : + mgmt_ie_buf = ies->probe_req_ie; + mgmt_ie_len = &ies->probe_req_ie_len; + mgmt_ie_buf_len = sizeof(ies->probe_req_ie); + break; + case VNDR_IE_ASSOCREQ_FLAG : + mgmt_ie_buf = ies->assoc_req_ie; + mgmt_ie_len = &ies->assoc_req_ie_len; + mgmt_ie_buf_len = sizeof(ies->assoc_req_ie); + break; + default: + mgmt_ie_buf = NULL; + mgmt_ie_len = NULL; + WL_ERR(("not suitable packet type (%d)\n", pktflag)); + return BCME_ERROR; + } + + if (vndr_ie_len > mgmt_ie_buf_len) { + WL_ERR(("extra IE size too big\n")); + ret = -ENOMEM; + } else { + /* parse and save new vndr_ie in curr_ie_buff before comparing it */ + if (vndr_ie && vndr_ie_len && curr_ie_buf) { + ptr = curr_ie_buf; +/* must discard vndr_ie constness, attempt to change vndr_ie arg to non-const + * causes cascade of errors in other places, fix involves const casts there + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + if ((ret = wl_cfg80211_parse_vndr_ies((u8 *)vndr_ie, + vndr_ie_len, &new_vndr_ies)) < 0) { + WL_ERR(("parse vndr ie failed \n")); + goto exit; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + for (i = 0; i < new_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &new_vndr_ies.ie_info[i]; + + if ((parsed_ie_buf_len + vndrie_info->ie_len) > WL_VNDR_IE_MAXLEN) { + WL_ERR(("IE size is too big (%d > %d)\n", + parsed_ie_buf_len, WL_VNDR_IE_MAXLEN)); + ret = -EINVAL; + goto exit; + } + + memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr, + vndrie_info->ie_len); + parsed_ie_buf_len += vndrie_info->ie_len; + } + } + + if (mgmt_ie_buf != NULL) { + if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) && + (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) { + WL_INFORM(("Previous mgmt IE is equals to current IE")); + goto exit; + } + + /* parse old vndr_ie */ + if ((ret = wl_cfg80211_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, + &old_vndr_ies)) < 0) { + WL_ERR(("parse vndr ie failed \n")); + goto exit; + } + /* make a command to delete old ie */ + for (i = 0; i < old_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &old_vndr_ies.ie_info[i]; + + WL_INFORM(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n", + vndrie_info->vndrie.id, vndrie_info->vndrie.len, + vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1], + vndrie_info->vndrie.oui[2])); + + del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf, + pktflag, vndrie_info->vndrie.oui, + vndrie_info->vndrie.id, + vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN, + vndrie_info->ie_len - VNDR_IE_FIXED_LEN, + "del"); + + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + } + + *mgmt_ie_len = 0; + /* Add if there is any extra IE */ + if (mgmt_ie_buf && parsed_ie_buf_len) { + ptr = mgmt_ie_buf; + + remained_buf_len = mgmt_ie_buf_len; + + /* make a command to add new ie */ + for (i = 0; i < new_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &new_vndr_ies.ie_info[i]; + + WL_INFORM(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n", + vndrie_info->vndrie.id, vndrie_info->vndrie.len, + vndrie_info->ie_len - 2, + vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1], + vndrie_info->vndrie.oui[2])); + + del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf, + pktflag, vndrie_info->vndrie.oui, + vndrie_info->vndrie.id, + vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN, + vndrie_info->ie_len - VNDR_IE_FIXED_LEN, + "add"); + + /* verify remained buf size before copy data */ + if (remained_buf_len >= vndrie_info->ie_len) { + remained_buf_len -= vndrie_info->ie_len; + } else { + WL_ERR(("no space in mgmt_ie_buf: pktflag = %d, " + "found vndr ies # = %d(cur %d), remained len %d, " + "cur mgmt_ie_len %d, new ie len = %d\n", + pktflag, new_vndr_ies.count, i, remained_buf_len, + *mgmt_ie_len, vndrie_info->ie_len)); + break; + } + + /* save the parsed IE in cfg struct */ + memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr, + vndrie_info->ie_len); + *mgmt_ie_len += vndrie_info->ie_len; + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + } + + if (total_ie_buf_len && cfg->ioctl_buf != NULL) { + ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf, + total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &cfg->ioctl_buf_sync); + if (ret) + WL_ERR(("vndr ie set error : %d\n", ret)); + } + } +exit: + +return ret; +} + +#ifdef WL_CFG80211_ACL +static int +wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev, + const struct cfg80211_acl_data *acl) +{ + int i; + int ret = 0; + int macnum = 0; + int macmode = MACLIST_MODE_DISABLED; + struct maclist *list; + + /* get the MAC filter mode */ + if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) { + macmode = MACLIST_MODE_ALLOW; + } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED && + acl->n_acl_entries) { + macmode = MACLIST_MODE_DENY; + } + + /* if acl == NULL, macmode is still disabled.. */ + if (macmode == MACLIST_MODE_DISABLED) { + if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0) + WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + + return ret; + } + + macnum = acl->n_acl_entries; + if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { + WL_ERR(("%s : invalid number of MAC address entries %d\n", + __FUNCTION__, macnum)); + return -1; + } + + /* allocate memory for the MAC list */ + list = (struct maclist*)kmalloc(sizeof(int) + + sizeof(struct ether_addr) * macnum, GFP_KERNEL); + if (!list) { + WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__)); + return -1; + } + + /* prepare the MAC list */ + list->count = htod32(macnum); + for (i = 0; i < macnum; i++) { + memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN); + } + /* set the list */ + if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0) + WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + + kfree(list); + + return ret; +} +#endif /* WL_CFG80211_ACL */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) +int wl_chspec_chandef(chanspec_t chanspec, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + struct cfg80211_chan_def *chandef, +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + struct chan_info *chaninfo, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) */ +struct wiphy *wiphy) + +{ + uint16 freq = 0; + int chan_type = 0; + int channel = 0; + struct ieee80211_channel *chan; + + if (!chandef) { + return -1; + } + channel = CHSPEC_CHANNEL(chanspec); + + switch (CHSPEC_BW(chanspec)) { + case WL_CHANSPEC_BW_20: + chan_type = NL80211_CHAN_HT20; + break; + case WL_CHANSPEC_BW_40: + { + if (CHSPEC_SB_UPPER(chanspec)) { + channel += CH_10MHZ_APART; + } else { + channel -= CH_10MHZ_APART; + } + } + chan_type = NL80211_CHAN_HT40PLUS; + break; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + case WL_CHANSPEC_BW_80: + case WL_CHANSPEC_BW_8080: + { + uint16 sb = CHSPEC_CTL_SB(chanspec); + + if (sb == WL_CHANSPEC_CTL_SB_LL) { + channel -= (CH_10MHZ_APART + CH_20MHZ_APART); + } else if (sb == WL_CHANSPEC_CTL_SB_LU) { + channel -= CH_10MHZ_APART; + } else if (sb == WL_CHANSPEC_CTL_SB_UL) { + channel += CH_10MHZ_APART; + } else { + /* WL_CHANSPEC_CTL_SB_UU */ + channel += (CH_10MHZ_APART + CH_20MHZ_APART); + } + + if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU) + chan_type = NL80211_CHAN_HT40MINUS; + else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU) + chan_type = NL80211_CHAN_HT40PLUS; + } + break; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + default: + chan_type = NL80211_CHAN_HT20; + break; + + } + + if (CHSPEC_IS5G(chanspec)) + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ); + else + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ); + + chan = ieee80211_get_channel(wiphy, freq); + WL_DBG(("channel:%d freq:%d chan_type: %d chan_ptr:%p \n", + channel, freq, chan_type, chan)); + + if (unlikely(!chan)) { + /* fw and cfg80211 channel lists are not in sync */ + WL_ERR(("Couldn't find matching channel in wiphy channel list \n")); + ASSERT(0); + return -EINVAL; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + cfg80211_chandef_create(chandef, chan, chan_type); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + chaninfo->freq = freq; + chaninfo->chan_type = chan_type; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + return 0; +} + +void +wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy) +{ + u32 freq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + struct cfg80211_chan_def chandef; +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + struct chan_info chaninfo; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + + if (!wiphy) { + WL_ERR(("wiphy is null\n")); + return; + } +#ifndef ALLOW_CHSW_EVT + /* Channel switch support is only for AP/GO/ADHOC/MESH */ + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION || + dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) { + WL_ERR(("No channel switch notify support for STA/GC\n")); + return; + } +#endif /* !ALLOW_CHSW_EVT */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + if (wl_chspec_chandef(chanspec, &chandef, wiphy)) { +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + if (wl_chspec_chandef(chanspec, &chaninfo, wiphy)) { +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + + WL_ERR(("chspec_chandef failed\n")); + return; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1; + cfg80211_ch_switch_notify(dev, &chandef); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + freq = chan_info.freq; + cfg80211_ch_switch_notify(dev, chan_info.freq, chan_info.chan_type); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + + WL_ERR(("Channel switch notification for freq: %d chanspec: 0x%x\n", freq, chanspec)); + return; +} +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ + +#ifdef WL11ULB +s32 +wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode) +{ + int ret; + int cur_mode; + + ret = wldev_iovar_getint(dev, "ulb_mode", &cur_mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode get failed. ret:%d \n", ret)); + return ret; + } + + if (cur_mode == mode) { + /* If request mode is same as that of the current mode, then + * do nothing (Avoid unnecessary wl down and up). + */ + WL_INFORM(("[ULB] No change in ulb_mode. Do nothing.\n")); + return 0; + } + + /* setting of ulb_mode requires wl to be down */ + ret = wldev_ioctl(dev, WLC_DOWN, NULL, 0, true); + if (unlikely(ret)) { + WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret)); + return ret; + } + + if (mode >= MAX_SUPP_ULB_MODES) { + WL_ERR(("[ULB] unsupported ulb_mode :[%d]\n", mode)); + return -EINVAL; + } + + ret = wldev_iovar_setint(dev, "ulb_mode", mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode set failed. ret:%d \n", ret)); + return ret; + } + + ret = wldev_ioctl(dev, WLC_UP, NULL, 0, true); + if (unlikely(ret)) { + WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret)); + return ret; + } + + WL_DBG(("[ULB] ulb_mode set to %d successfully \n", mode)); + + return ret; +} + +static s32 +wl_cfg80211_ulbbw_to_ulbchspec(u32 bw) +{ + if (bw == ULB_BW_DISABLED) { + return WL_CHANSPEC_BW_20; + } else if (bw == ULB_BW_10MHZ) { + return WL_CHANSPEC_BW_10; + } else if (bw == ULB_BW_5MHZ) { + return WL_CHANSPEC_BW_5; + } else if (bw == ULB_BW_2P5MHZ) { + return WL_CHANSPEC_BW_2P5; + } else { + WL_ERR(("[ULB] unsupported value for ulb_bw \n")); + return -EINVAL; + } +} + +static chanspec_t +wl_cfg80211_ulb_get_min_bw_chspec(struct wireless_dev *wdev, s32 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *_netinfo; + + /* + * Return the chspec value corresponding to the + * BW setting for a particular interface + */ + if (wdev) { + /* if wdev is provided, use it */ + _netinfo = wl_get_netinfo_by_wdev(cfg, wdev); + } else if (bssidx >= 0) { + /* if wdev is not provided, use it */ + _netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + } else { + WL_ERR(("[ULB] wdev/bssidx not provided\n")); + return INVCHANSPEC; + } + + if (unlikely(!_netinfo)) { + WL_ERR(("[ULB] net_info is null \n")); + return INVCHANSPEC; + } + + if (_netinfo->ulb_bw) { + WL_DBG(("[ULB] wdev_ptr:%p ulb_bw:0x%x \n", _netinfo->wdev, _netinfo->ulb_bw)); + return wl_cfg80211_ulbbw_to_ulbchspec(_netinfo->ulb_bw); + } else { + return WL_CHANSPEC_BW_20; + } +} + +static s32 +wl_cfg80211_get_ulb_bw(struct wireless_dev *wdev) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *_netinfo = wl_get_netinfo_by_wdev(cfg, wdev); + + /* + * Return the ulb_bw setting for a + * particular interface + */ + if (unlikely(!_netinfo)) { + WL_ERR(("[ULB] net_info is null \n")); + return -1; + } + + return _netinfo->ulb_bw; +} + +s32 +wl_cfg80211_set_ulb_bw(struct net_device *dev, + u32 ulb_bw, char *ifname) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int ret; + int mode; + struct net_info *_netinfo = NULL, *iter, *next; + u32 bssidx; + enum nl80211_iftype iftype; + + if (!ifname) + return -EINVAL; + + WL_DBG(("[ULB] Enter. bw_type:%d \n", ulb_bw)); + + ret = wldev_iovar_getint(dev, "ulb_mode", &mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode not supported \n")); + return ret; + } + + if (mode != ULB_MODE_STD_ALONE_MODE) { + WL_ERR(("[ULB] ulb bw modification allowed only in stand-alone mode\n")); + return -EINVAL; + } + + if (ulb_bw >= MAX_SUPP_ULB_BW) { + WL_ERR(("[ULB] unsupported value (%d) for ulb_bw \n", ulb_bw)); + return -EINVAL; + } + +#ifdef WL_CFG80211_P2P_DEV_IF + if (strcmp(ifname, "p2p-dev-wlan0") == 0) { + iftype = NL80211_IFTYPE_P2P_DEVICE; + /* Use wdev corresponding to the dedicated p2p discovery interface */ + if (likely(cfg->p2p_wdev)) { + _netinfo = wl_get_netinfo_by_wdev(cfg, cfg->p2p_wdev); + } else { + return -ENODEV; + } + } +#endif /* WL_CFG80211_P2P_DEV_IF */ + if (!_netinfo) { + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + if (strncmp(iter->ndev->name, ifname, strlen(ifname)) == 0) { + _netinfo = wl_get_netinfo_by_netdev(cfg, iter->ndev); + iftype = NL80211_IFTYPE_STATION; + } + } + } + } + + if (!_netinfo) + return -ENODEV; + bssidx = _netinfo->bssidx; + _netinfo->ulb_bw = ulb_bw; + + + WL_DBG(("[ULB] Applying ulb_bw:%d for bssidx:%d \n", ulb_bw, bssidx)); + ret = wldev_iovar_setbuf_bsscfg(dev, "ulb_bw", (void *)&ulb_bw, 4, + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, + &cfg->ioctl_buf_sync); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_bw set failed. ret:%d \n", ret)); + return ret; + } + + return ret; +} +#endif /* WL11ULB */ + +static void +wl_ap_channel_ind(struct bcm_cfg80211 *cfg, + struct net_device *ndev, + chanspec_t chanspec) +{ + u32 channel = LCHSPEC_CHANNEL(chanspec); + + WL_DBG(("(%s) AP channel:%d chspec:0x%x \n", + ndev->name, channel, chanspec)); + if (cfg->ap_oper_channel && (cfg->ap_oper_channel != channel)) { + /* + * If cached channel is different from the channel indicated + * by the event, notify user space about the channel switch. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg)); +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ + cfg->ap_oper_channel = channel; + } +} + +static s32 +wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, +const wl_event_msg_t *e, void *data) +{ + struct net_device *ndev = NULL; + chanspec_t chanspec; + u32 channel; + + WL_DBG(("Enter\n")); + if (unlikely(e->status)) { + WL_ERR(("status:0x%x \n", e->status)); + return -1; + } + + if (!data) { + return -EINVAL; + } + + if (likely(cfgdev)) { + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + chanspec = *((chanspec_t *)data); + channel = LCHSPEC_CHANNEL(chanspec); + + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + /* For AP/GO role */ + wl_ap_channel_ind(cfg, ndev, chanspec); + } + } + + return 0; +} + +static s32 +wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, +const wl_event_msg_t *e, void *data) +{ + int error = 0; + u32 chanspec = 0; + struct net_device *ndev = NULL; + struct wiphy *wiphy = NULL; + + WL_DBG(("Enter\n")); + if (unlikely(e->status)) { + WL_ERR(("status:0x%x \n", e->status)); + return -1; + } + + if (likely(cfgdev)) { + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + wiphy = bcmcfg_to_wiphy(cfg); + error = wldev_iovar_getint(ndev, "chanspec", &chanspec); + if (unlikely(error)) { + WL_ERR(("Get chanspec error: %d \n", error)); + return -1; + } + + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + /* For AP/GO role */ + wl_ap_channel_ind(cfg, ndev, chanspec); + } else { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + wl_cfg80211_ch_switch_notify(ndev, chanspec, wiphy); +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ + } + + } + + return 0; +} + +#ifdef WL_NAN +int +wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, int cmd_len) +{ + return wl_cfgnan_cmd_handler(ndev, g_bcm_cfg, cmd, cmd_len); +} +#endif /* WL_NAN */ + +void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg) +{ + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + int err; + + /* Clear the security settings on the primary Interface */ + err = wldev_iovar_setint(dev, "wsec", 0); + if (unlikely(err)) { + WL_ERR(("wsec clear failed \n")); + } + err = wldev_iovar_setint(dev, "auth", 0); + if (unlikely(err)) { + WL_ERR(("auth clear failed \n")); + } + err = wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED); + if (unlikely(err)) { + WL_ERR(("wpa_auth clear failed \n")); + } +} + +#ifdef WL_CFG80211_P2P_DEV_IF +void wl_cfg80211_del_p2p_wdev(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wireless_dev *wdev = NULL; + + WL_DBG(("Enter \n")); + if (!cfg) { + WL_ERR(("Invalid Ptr\n")); + return; + } else { + wdev = cfg->p2p_wdev; + } + + if (wdev && cfg->down_disc_if) { + wl_cfgp2p_del_p2p_disc_if(wdev, cfg); + cfg->down_disc_if = FALSE; + } +} +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_SUPPORT_AUTO_CHANNEL) +int +wl_cfg80211_set_spect(struct net_device *dev, int spect) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int down = 1; + int up = 1; + int err = BCME_OK; + + if (!wl_get_drv_status_all(cfg, CONNECTED)) { + err = wldev_ioctl(dev, WLC_DOWN, &down, sizeof(down), true); + if (err) { + WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err)); + return err; + } + + err = wldev_ioctl(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect), true); + if (err) { + WL_ERR(("%s: error setting spect: code: %d\n", __func__, err)); + return err; + } + + err = wldev_ioctl(dev, WLC_UP, &up, sizeof(up), true); + if (err) { + WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err)); + return err; + } + } + return err; +} + +int +wl_cfg80211_get_sta_channel(void) +{ + struct net_device *ndev = bcmcfg_to_prmry_ndev(g_bcm_cfg); + int channel = 0; + + if (wl_get_drv_status(g_bcm_cfg, CONNECTED, ndev)) { + channel = g_bcm_cfg->channel; + } + return channel; +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef P2P_LISTEN_OFFLOADING +s32 +wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg) +{ + s32 bssidx; + int ret = 0; + int p2plo_pause = 0; + if (!cfg || !cfg->p2p) { + WL_ERR(("Wl %p or cfg->p2p %p is null\n", + cfg, cfg ? cfg->p2p : 0)); + return 0; + } + + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), + "p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL); + if (ret < 0) { + WL_ERR(("p2po_stop Failed :%d\n", ret)); + } + + return ret; +} +s32 +wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + wl_p2plo_listen_t p2plo_listen; + int ret = -EAGAIN; + int channel = 0; + int period = 0; + int interval = 0; + int count = 0; + + if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) { + WL_ERR(("Sending Action Frames. Try it again.\n")); + goto exit; + } + + if (wl_get_drv_status_all(cfg, SCANNING)) { + WL_ERR(("Scanning already\n")); + goto exit; + } + + if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) { + WL_ERR(("Scanning being aborted\n")); + goto exit; + } + + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("p2p listen offloading already running\n")); + goto exit; + } + + /* Just in case if it is not enabled */ + if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) { + WL_ERR(("cfgp2p_enable discovery failed")); + goto exit; + } + + bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t)); + + if (len) { + sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count); + if ((channel == 0) || (period == 0) || + (interval == 0) || (count == 0)) { + WL_ERR(("Wrong argument %d/%d/%d/%d \n", + channel, period, interval, count)); + ret = -EAGAIN; + goto exit; + } + p2plo_listen.period = period; + p2plo_listen.interval = interval; + p2plo_listen.count = count; + + WL_ERR(("channel:%d period:%d, interval:%d count:%d\n", + channel, period, interval, count)); + } else { + WL_ERR(("Argument len is wrong.\n")); + ret = -EAGAIN; + goto exit; + } + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel, + sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_listen_channel Failed :%d\n", ret)); + goto exit; + } + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen, + sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_listen Failed :%d\n", ret)); + goto exit; + } + + wl_set_p2p_status(cfg, DISC_IN_PROGRESS); + cfg->last_roc_id = P2PO_COOKIE; +exit : + return ret; +} +s32 +wl_cfg80211_p2plo_listen_stop(struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + int ret = -EAGAIN; + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL, + 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_stop Failed :%d\n", ret)); + goto exit; + } + +exit: + return ret; +} +#endif /* P2P_LISTEN_OFFLOADING */ +u64 +wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg) +{ + u64 id = 0; + id = ++cfg->last_roc_id; +#ifdef P2P_LISTEN_OFFLOADING + if (id == P2PO_COOKIE) { + id = ++cfg->last_roc_id; + } +#endif /* P2P_LISTEN_OFFLOADING */ + if (id == 0) + id = ++cfg->last_roc_id; + return id; +} + +#if defined(SUPPORT_RANDOM_MAC_SCAN) +int +wl_cfg80211_set_random_mac(struct net_device *dev, bool enable) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int ret; + + if (cfg->random_mac_enabled == enable) { + WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled")); + return BCME_OK; + } + + if (enable) { + ret = wl_cfg80211_random_mac_enable(dev); + } else { + ret = wl_cfg80211_random_mac_disable(dev); + } + + if (!ret) { + cfg->random_mac_enabled = enable; + } + + return ret; +} + +int +wl_cfg80211_random_mac_enable(struct net_device *dev) +{ + u8 current_mac[ETH_ALEN] = {0, }; + s32 err = BCME_ERROR; + uint8 buffer[20] = {0, }; + wl_scanmac_t *sm = NULL; + int len = 0; + wl_scanmac_enable_t *sm_enable = NULL; + wl_scanmac_config_t *sm_config = NULL; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) || + wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) { + WL_ERR(("Fail to Set random mac, current state is wrong\n")); + return err; + } + + /* Read current mac address */ + err = wldev_iovar_getbuf_bsscfg(dev, "cur_etheraddr", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed to get current dongle mac address\n")); + return err; + } + + memcpy(current_mac, cfg->ioctl_buf, ETH_ALEN); + + /* Enable scan mac */ + sm = (wl_scanmac_t *)buffer; + sm_enable = (wl_scanmac_enable_t *)sm->data; + sm->len = sizeof(*sm_enable); + sm_enable->enable = 1; + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE; + + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed to enable scanmac, err=%d\n", err)); + return err; + } + + /* Configure scanmac */ + memset(buffer, 0x0, sizeof(buffer)); + sm_config = (wl_scanmac_config_t *)sm->data; + sm->len = sizeof(*sm_config); + sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG; + sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC; + + /* Set current mac address */ + memcpy(&sm_config->mac.octet, current_mac, ETH_ALEN); + sm_config->mac.octet[3] = 0x0; + sm_config->mac.octet[4] = 0x0; + sm_config->mac.octet[5] = 0x0; + + /* Set randomize mac address(last 3bytes) */ + memset(&sm_config->random_mask.octet, 0x0, ETH_ALEN); + sm_config->random_mask.octet[3] = 0xff; + sm_config->random_mask.octet[4] = 0xff; + sm_config->random_mask.octet[5] = 0xff; + + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed scanmac configuration\n")); + + /* Disable scan mac for clean-up */ + wl_cfg80211_random_mac_disable(dev); + return err; + } + + WL_ERR(("random MAC enable done")); + return err; +} + +int +wl_cfg80211_random_mac_disable(struct net_device *dev) +{ + s32 err = BCME_ERROR; + uint8 buffer[20] = {0, }; + wl_scanmac_t *sm = NULL; + int len = 0; + wl_scanmac_enable_t *sm_enable = NULL; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + sm = (wl_scanmac_t *)buffer; + sm_enable = (wl_scanmac_enable_t *)sm->data; + sm->len = sizeof(*sm_enable); + sm_enable->enable = 0; + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + + sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE; + + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed to disable scanmac, err=%d\n", err)); + return err; + } + + WL_ERR(("random MAC disable done\n")); + return err; +} +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + +int +wl_cfg80211_iface_count(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *iter, *next; + int iface_count = 0; + + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + iface_count++; + } + } + return iface_count; +} + +#ifdef DHD_LOG_DUMP +struct bcm_cfg80211* +wl_get_bcm_cfg80211_ptr(void) +{ + return g_bcm_cfg; +} +#endif /* DHD_LOG_DUMP */ + +#define CHECK_DONGLE_IDLE_TIME 50 +#define CHECK_DONGLE_IDLE_CNT 100 +int +wl_check_dongle_idle(struct wiphy *wiphy) +{ + int error = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *primary_ndev; + int retry = 0; + struct channel_info ci; + if (!cfg) + return FALSE; + /* Use primary I/F for sending cmds down to firmware */ + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + while (retry++ < CHECK_DONGLE_IDLE_CNT) { + error = wldev_ioctl(primary_ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false); + if (error != BCME_OK || ci.scan_channel != 0) { + WL_ERR(("Firmware is busy(err:%d scan channel:%d). wait %dms\n", + error, ci.scan_channel, CHECK_DONGLE_IDLE_TIME)); + } else { + break; + } + wl_delay(CHECK_DONGLE_IDLE_TIME); + } + if (retry >= CHECK_DONGLE_IDLE_CNT) { + WL_ERR(("DONGLE is BUSY too long\n")); + return FALSE; + } + WL_DBG(("DONGLE is idle\n")); + return TRUE; +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h new file mode 100644 index 000000000000..945ab5801b6b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h @@ -0,0 +1,1442 @@ +/* + * Linux cfg80211 driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfg80211.h 608788 2015-12-29 10:59:33Z $ + */ + +/** + * Older Linux versions support the 'iw' interface, more recent ones the 'cfg80211' interface. + */ + +#ifndef _wl_cfg80211_h_ +#define _wl_cfg80211_h_ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct wl_conf; +struct wl_iface; +struct bcm_cfg80211; +struct wl_security; +struct wl_ibss; + + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh64(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#define WL_DBG_NONE 0 +#define WL_DBG_P2P_ACTION (1 << 5) +#define WL_DBG_TRACE (1 << 4) +#define WL_DBG_SCAN (1 << 3) +#define WL_DBG_DBG (1 << 2) +#define WL_DBG_INFO (1 << 1) +#define WL_DBG_ERR (1 << 0) + +#ifdef DHD_LOG_DUMP +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +struct bcm_cfg80211 *wl_get_bcm_cfg80211_ptr(void); +#endif /* DHD_LOG_DUMP */ + +/* 0 invalidates all debug messages. default is 1 */ +#define WL_DBG_LEVEL 0xFF + +#ifdef CUSTOMER_HW4_DEBUG +#define CFG80211_ERROR_TEXT "CFG80211-INFO2) " +#else +#define CFG80211_ERROR_TEXT "CFG80211-ERROR) " +#endif /* CUSTOMER_HW4_DEBUG */ + +#if defined(DHD_DEBUG) +#ifdef DHD_LOG_DUMP +#define WL_ERR(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define WL_ERR(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + } \ +} while (0) +#endif /* DHD_LOG_DUMP */ +#else /* defined(DHD_DEBUG) */ +#define WL_ERR(args) \ +do { \ + if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) { \ + printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + } \ +} while (0) +#endif /* defined(DHD_DEBUG) */ + +#ifdef WL_INFORM +#undef WL_INFORM +#endif + +#define WL_INFORM(args) \ +do { \ + if (wl_dbg_level & WL_DBG_INFO) { \ + printk(KERN_INFO "CFG80211-INFO) %s : ", __func__); \ + printk args; \ + } \ +} while (0) + + +#ifdef WL_SCAN +#undef WL_SCAN +#endif +#define WL_SCAN(args) \ +do { \ + if (wl_dbg_level & WL_DBG_SCAN) { \ + printk(KERN_INFO "CFG80211-SCAN) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#ifdef WL_TRACE +#undef WL_TRACE +#endif +#define WL_TRACE(args) \ +do { \ + if (wl_dbg_level & WL_DBG_TRACE) { \ + printk(KERN_INFO "CFG80211-TRACE) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#ifdef WL_TRACE_HW4 +#undef WL_TRACE_HW4 +#endif +#ifdef CUSTOMER_HW4_DEBUG +#define WL_TRACE_HW4(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO "CFG80211-TRACE) %s : ", __func__); \ + printk args; \ + } \ +} while (0) +#else +#define WL_TRACE_HW4 WL_TRACE +#endif /* CUSTOMER_HW4_DEBUG */ +#if (WL_DBG_LEVEL > 0) +#define WL_DBG(args) \ +do { \ + if (wl_dbg_level & WL_DBG_DBG) { \ + printk(KERN_DEBUG "CFG80211-DEBUG) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#else /* !(WL_DBG_LEVEL > 0) */ +#define WL_DBG(args) +#endif /* (WL_DBG_LEVEL > 0) */ +#define WL_PNO(x) +#define WL_SD(x) + + +#define WL_SCAN_RETRY_MAX 3 +#define WL_NUM_PMKIDS_MAX MAXPMKID +#define WL_SCAN_BUF_MAX (1024 * 8) +#define WL_TLV_INFO_MAX 1500 +#define WL_SCAN_IE_LEN_MAX 2048 +#define WL_BSS_INFO_MAX 2048 +#define WL_ASSOC_INFO_MAX 512 +#define WL_IOCTL_LEN_MAX 2048 +#define WL_EXTRA_BUF_MAX 2048 +#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1) +#define WL_AP_MAX 256 +#define WL_FILE_NAME_MAX 256 +#define WL_DWELL_TIME 200 +#define WL_MED_DWELL_TIME 400 +#define WL_MIN_DWELL_TIME 100 +#define WL_LONG_DWELL_TIME 1000 +#define IFACE_MAX_CNT 4 +#define WL_SCAN_CONNECT_DWELL_TIME_MS 200 +#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20 +#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320 +#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400 +#define WL_AF_TX_MAX_RETRY 5 + +#define WL_AF_SEARCH_TIME_MAX 450 +#define WL_AF_TX_EXTRA_TIME_MAX 200 + +#define WL_SCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */ +#define WL_CHANNEL_SYNC_RETRY 5 +#define WL_INVALID -1 + +#ifdef DHD_LOSSLESS_ROAMING +#define WL_ROAM_TIMEOUT_MS 1000 /* Roam timeout */ +#endif +/* Bring down SCB Timeout to 20secs from 60secs default */ +#ifndef WL_SCB_TIMEOUT +#define WL_SCB_TIMEOUT 20 +#endif + +#ifndef WL_SCB_ACTIVITY_TIME +#define WL_SCB_ACTIVITY_TIME 5 +#endif + +#ifndef WL_SCB_MAX_PROBE +#define WL_SCB_MAX_PROBE 3 +#endif + +#ifndef WL_MIN_PSPRETEND_THRESHOLD +#define WL_MIN_PSPRETEND_THRESHOLD 2 +#endif + +/* SCAN_SUPPRESS timer values in ms */ +#define WL_SCAN_SUPPRESS_TIMEOUT 31000 /* default Framwork DHCP timeout is 30 sec */ +#define WL_SCAN_SUPPRESS_RETRY 3000 + +#define WL_PM_ENABLE_TIMEOUT 10000 + +/* cfg80211 wowlan definitions */ +#define WL_WOWLAN_MAX_PATTERNS 8 +#define WL_WOWLAN_MIN_PATTERN_LEN 1 +#define WL_WOWLAN_MAX_PATTERN_LEN 255 +#define WL_WOWLAN_PKT_FILTER_ID_FIRST 201 +#define WL_WOWLAN_PKT_FILTER_ID_LAST (WL_WOWLAN_PKT_FILTER_ID_FIRST + \ + WL_WOWLAN_MAX_PATTERNS - 1) + +#ifdef WLTDLS +#define TDLS_TUNNELED_PRB_REQ "\x7f\x50\x6f\x9a\04" +#define TDLS_TUNNELED_PRB_RESP "\x7f\x50\x6f\x9a\05" +#endif /* WLTDLS */ + + +/* driver status */ +enum wl_status { + WL_STATUS_READY = 0, + WL_STATUS_SCANNING, + WL_STATUS_SCAN_ABORTING, + WL_STATUS_CONNECTING, + WL_STATUS_CONNECTED, + WL_STATUS_DISCONNECTING, + WL_STATUS_AP_CREATING, + WL_STATUS_AP_CREATED, + /* whole sending action frame procedure: + * includes a) 'finding common channel' for public action request frame + * and b) 'sending af via 'actframe' iovar' + */ + WL_STATUS_SENDING_ACT_FRM, + /* find a peer to go to a common channel before sending public action req frame */ + WL_STATUS_FINDING_COMMON_CHANNEL, + /* waiting for next af to sync time of supplicant. + * it includes SENDING_ACT_FRM and WAITING_NEXT_ACT_FRM_LISTEN + */ + WL_STATUS_WAITING_NEXT_ACT_FRM, +#ifdef WL_CFG80211_SYNC_GON + /* go to listen state to wait for next af after SENDING_ACT_FRM */ + WL_STATUS_WAITING_NEXT_ACT_FRM_LISTEN, +#endif /* WL_CFG80211_SYNC_GON */ + /* it will be set when upper layer requests listen and succeed in setting listen mode. + * if set, other scan request can abort current listen state + */ + WL_STATUS_REMAINING_ON_CHANNEL, +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + /* it's fake listen state to keep current scan state. + * it will be set when upper layer requests listen but scan is running. then just run + * a expire timer without actual listen state. + * if set, other scan request does not need to abort scan. + */ + WL_STATUS_FAKE_REMAINING_ON_CHANNEL +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ +}; + +/* wi-fi mode */ +enum wl_mode { + WL_MODE_BSS, + WL_MODE_IBSS, + WL_MODE_AP +}; + +/* driver profile list */ +enum wl_prof_list { + WL_PROF_MODE, + WL_PROF_SSID, + WL_PROF_SEC, + WL_PROF_IBSS, + WL_PROF_BAND, + WL_PROF_CHAN, + WL_PROF_BSSID, + WL_PROF_ACT, + WL_PROF_BEACONINT, + WL_PROF_DTIMPERIOD +}; + +/* donlge escan state */ +enum wl_escan_state { + WL_ESCAN_STATE_IDLE, + WL_ESCAN_STATE_SCANING +}; +/* fw downloading status */ +enum wl_fw_status { + WL_FW_LOADING_DONE, + WL_NVRAM_LOADING_DONE +}; + +enum wl_management_type { + WL_BEACON = 0x1, + WL_PROBE_RESP = 0x2, + WL_ASSOC_RESP = 0x4 +}; + +enum wl_pm_workq_act_type { + WL_PM_WORKQ_SHORT, + WL_PM_WORKQ_LONG, + WL_PM_WORKQ_DEL +}; + +/* beacon / probe_response */ +struct beacon_proberesp { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + u8 variable[0]; +} __attribute__ ((packed)); + +/* driver configuration */ +struct wl_conf { + u32 frag_threshold; + u32 rts_threshold; + u32 retry_short; + u32 retry_long; + s32 tx_power; + struct ieee80211_channel channel; +}; + +typedef s32(*EVENT_HANDLER) (struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); + +/* bss inform structure for cfg80211 interface */ +struct wl_cfg80211_bss_info { + u16 band; + u16 channel; + s16 rssi; + u16 frame_len; + u8 frame_buf[1]; +}; + +/* basic structure of scan request */ +struct wl_scan_req { + struct wlc_ssid ssid; +}; + +/* basic structure of information element */ +struct wl_ie { + u16 offset; + u8 buf[WL_TLV_INFO_MAX]; +}; + +/* event queue for cfg80211 main event */ +struct wl_event_q { + struct list_head eq_list; + u32 etype; + wl_event_msg_t emsg; + s8 edata[1]; +}; + +/* security information with currently associated ap */ +struct wl_security { + u32 wpa_versions; + u32 auth_type; + u32 cipher_pairwise; + u32 cipher_group; + u32 wpa_auth; + u32 auth_assoc_res_status; +}; + +/* ibss information for currently joined ibss network */ +struct wl_ibss { + u8 beacon_interval; /* in millisecond */ + u8 atim; /* in millisecond */ + s8 join_only; + u8 band; + u8 channel; +}; + +typedef struct wl_bss_vndr_ies { + u8 probe_req_ie[VNDR_IES_BUF_LEN]; + u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN]; + u8 assoc_req_ie[VNDR_IES_BUF_LEN]; + u8 assoc_res_ie[VNDR_IES_BUF_LEN]; + u8 beacon_ie[VNDR_IES_MAX_BUF_LEN]; + u32 probe_req_ie_len; + u32 probe_res_ie_len; + u32 assoc_req_ie_len; + u32 assoc_res_ie_len; + u32 beacon_ie_len; +} wl_bss_vndr_ies_t; + +typedef struct wl_cfgbss { + u8 *wpa_ie; + u8 *rsn_ie; + u8 *wps_ie; + bool security_mode; + struct wl_bss_vndr_ies ies; /* Common for STA, P2P GC, GO, AP, P2P Disc Interface */ +} wl_cfgbss_t; + +/* cfg driver profile */ +struct wl_profile { + u32 mode; + s32 band; + u32 channel; + struct wlc_ssid ssid; + struct wl_security sec; + struct wl_ibss ibss; + u8 bssid[ETHER_ADDR_LEN]; + u16 beacon_interval; + u8 dtim_period; + bool active; +}; + +struct net_info { + struct net_device *ndev; + struct wireless_dev *wdev; + struct wl_profile profile; + s32 mode; + s32 roam_off; + unsigned long sme_state; + bool pm_restore; + bool pm_block; + s32 pm; + s32 bssidx; + wl_cfgbss_t bss; + u32 ulb_bw; + struct list_head list; /* list of all net_info structure */ +}; + +/* association inform */ +#define MAX_REQ_LINE 1024 +struct wl_connect_info { + u8 req_ie[MAX_REQ_LINE]; + s32 req_ie_len; + u8 resp_ie[MAX_REQ_LINE]; + s32 resp_ie_len; +}; + +/* firmware /nvram downloading controller */ +struct wl_fw_ctrl { + const struct firmware *fw_entry; + unsigned long status; + u32 ptr; + s8 fw_name[WL_FILE_NAME_MAX]; + s8 nvram_name[WL_FILE_NAME_MAX]; +}; + +/* assoc ie length */ +struct wl_assoc_ielen { + u32 req_len; + u32 resp_len; +}; + +/* wpa2 pmk list */ +struct wl_pmk_list { + pmkid_list_t pmkids; + pmkid_t foo[MAXPMKID - 1]; +}; + +#ifdef DHD_MAX_IFS +#define WL_MAX_IFS DHD_MAX_IFS +#else +#define WL_MAX_IFS 16 +#endif + +#define ESCAN_BUF_SIZE (64 * 1024) + +struct escan_info { + u32 escan_state; +#if defined(STATIC_WL_PRIV_STRUCT) +#ifndef CONFIG_DHD_USE_STATIC_BUF +#error STATIC_WL_PRIV_STRUCT should be used with CONFIG_DHD_USE_STATIC_BUF +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + u8 *escan_buf; +#else + u8 escan_buf[ESCAN_BUF_SIZE]; +#endif /* STATIC_WL_PRIV_STRUCT */ + struct wiphy *wiphy; + struct net_device *ndev; +}; + +#ifdef ESCAN_BUF_OVERFLOW_MGMT +#define BUF_OVERFLOW_MGMT_COUNT 3 +typedef struct { + int RSSI; + int length; + struct ether_addr BSSID; +} removal_element_t; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + +struct afx_hdl { + wl_af_params_t *pending_tx_act_frm; + struct ether_addr tx_dst_addr; + struct net_device *dev; + struct work_struct work; + s32 bssidx; + u32 retry; + s32 peer_chan; + s32 peer_listen_chan; /* search channel: configured by upper layer */ + s32 my_listen_chan; /* listen chanel: extract it from prb req or gon req */ + bool is_listen; + bool ack_recv; + bool is_active; +}; + +struct parsed_ies { + wpa_ie_fixed_t *wps_ie; + u32 wps_ie_len; + wpa_ie_fixed_t *wpa_ie; + u32 wpa_ie_len; + bcm_tlv_t *wpa2_ie; + u32 wpa2_ie_len; +}; + + +#ifdef P2P_LISTEN_OFFLOADING +typedef struct { + uint16 period; /* listen offload period */ + uint16 interval; /* listen offload interval */ + uint16 count; /* listen offload count */ + uint16 pad; /* pad for 32bit align */ +} wl_p2plo_listen_t; +#endif /* P2P_LISTEN_OFFLOADING */ + +#ifdef WL11U +/* Max length of Interworking element */ +#define IW_IES_MAX_BUF_LEN 9 +#endif +#define MAX_EVENT_BUF_NUM 16 +typedef struct wl_eventmsg_buf { + u16 num; + struct { + u16 type; + bool set; + } event [MAX_EVENT_BUF_NUM]; +} wl_eventmsg_buf_t; + +typedef struct wl_if_event_info { + bool valid; + int ifidx; + int bssidx; + uint8 mac[ETHER_ADDR_LEN]; + char name[IFNAMSIZ+1]; +} wl_if_event_info; + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define GET_BSS_INFO_LEN 90 +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +/* private data of cfg80211 interface */ +struct bcm_cfg80211 { + struct wireless_dev *wdev; /* representing cfg cfg80211 device */ + + struct wireless_dev *p2p_wdev; /* representing cfg cfg80211 device for P2P */ + struct net_device *p2p_net; /* reference to p2p0 interface */ + + struct wl_conf *conf; + struct cfg80211_scan_request *scan_request; /* scan request object */ + EVENT_HANDLER evt_handler[WLC_E_LAST]; + struct list_head eq_list; /* used for event queue */ + struct list_head net_list; /* used for struct net_info */ + spinlock_t net_list_sync; /* to protect scan status (and others if needed) */ + spinlock_t eq_lock; /* for event queue synchronization */ + spinlock_t cfgdrv_lock; /* to protect scan status (and others if needed) */ + struct completion act_frm_scan; + struct completion iface_disable; + struct completion wait_next_af; + struct mutex usr_sync; /* maily for up/down synchronization */ + struct mutex scan_complete; /* serialize scan_complete call */ + struct wl_scan_results *bss_list; + struct wl_scan_results *scan_results; + + /* scan request object for internal purpose */ + struct wl_scan_req *scan_req_int; + /* information element object for internal purpose */ +#if defined(STATIC_WL_PRIV_STRUCT) + struct wl_ie *ie; +#else + struct wl_ie ie; +#endif + + /* association information container */ +#if defined(STATIC_WL_PRIV_STRUCT) + struct wl_connect_info *conn_info; +#else + struct wl_connect_info conn_info; +#endif +#ifdef DEBUGFS_CFG80211 + struct dentry *debugfs; +#endif /* DEBUGFS_CFG80211 */ + struct wl_pmk_list *pmk_list; /* wpa2 pmk list */ + tsk_ctl_t event_tsk; /* task of main event handler thread */ + void *pub; + u32 iface_cnt; + u32 channel; /* current channel */ + u32 af_sent_channel; /* channel action frame is sent */ + /* next af subtype to cancel the remained dwell time in rx process */ + u8 next_af_subtype; +#ifdef WL_CFG80211_SYNC_GON + ulong af_tx_sent_jiffies; +#endif /* WL_CFG80211_SYNC_GON */ + struct escan_info escan_info; /* escan information */ + bool active_scan; /* current scan mode */ + bool ibss_starter; /* indicates this sta is ibss starter */ + bool link_up; /* link/connection up flag */ + + /* indicate whether chip to support power save mode */ + bool pwr_save; + bool roam_on; /* on/off switch for self-roaming */ + bool scan_tried; /* indicates if first scan attempted */ +#if defined(BCMSDIO) || defined(BCMPCIE) + bool wlfc_on; +#endif + bool vsdb_mode; + bool roamoff_on_concurrent; + u8 *ioctl_buf; /* ioctl buffer */ + struct mutex ioctl_buf_sync; + u8 *escan_ioctl_buf; + u8 *extra_buf; /* maily to grab assoc information */ + struct dentry *debugfsdir; + struct rfkill *rfkill; + bool rf_blocked; + struct ieee80211_channel remain_on_chan; + enum nl80211_channel_type remain_on_chan_type; + u64 send_action_id; + u64 last_roc_id; + wait_queue_head_t netif_change_event; + wl_if_event_info if_event_info; + struct completion send_af_done; + struct afx_hdl *afx_hdl; + struct p2p_info *p2p; + bool p2p_supported; + void *btcoex_info; + struct timer_list scan_timeout; /* Timer for catch scan event timeout */ +#if defined(P2P_IE_MISSING_FIX) + bool p2p_prb_noti; +#endif + s32(*state_notifier) (struct bcm_cfg80211 *cfg, + struct net_info *_net_info, enum wl_status state, bool set); + unsigned long interrested_state; + wlc_ssid_t hostapd_ssid; +#ifdef WL11U + bool wl11u; + u8 iw_ie[IW_IES_MAX_BUF_LEN]; + u32 iw_ie_len; +#endif /* WL11U */ + bool sched_scan_running; /* scheduled scan req status */ +#ifdef WL_SCHED_SCAN + struct cfg80211_sched_scan_request *sched_scan_req; /* scheduled scan req */ +#endif /* WL_SCHED_SCAN */ + bool scan_suppressed; + struct timer_list scan_supp_timer; + struct work_struct wlan_work; + struct mutex event_sync; /* maily for up/down synchronization */ + bool disable_roam_event; + struct delayed_work pm_enable_work; + struct mutex pm_sync; /* mainly for pm work synchronization */ + + vndr_ie_setbuf_t *ibss_vsie; /* keep the VSIE for IBSS */ + int ibss_vsie_len; + u32 rmc_event_pid; + u32 rmc_event_seq; +#ifdef WLAIBSS_MCHAN + struct ether_addr ibss_if_addr; + bcm_struct_cfgdev *ibss_cfgdev; /* For AIBSS */ +#endif /* WLAIBSS_MCHAN */ + bcm_struct_cfgdev *bss_cfgdev; /* For DUAL STA/STA+AP */ + s32 cfgdev_bssidx; + bool bss_pending_op; /* indicate where there is a pending IF operation */ + int roam_offload; +#ifdef WL_NAN + bool nan_enable; + bool nan_running; +#endif /* WL_NAN */ +#ifdef WL_CFG80211_P2P_DEV_IF + bool down_disc_if; +#endif /* WL_CFG80211_P2P_DEV_IF */ +#ifdef P2PLISTEN_AP_SAMECHN + bool p2p_resp_apchn_status; +#endif /* P2PLISTEN_AP_SAMECHN */ + struct wl_wsec_key wep_key; +#ifdef WLTDLS + u8 *tdls_mgmt_frame; + u32 tdls_mgmt_frame_len; + s32 tdls_mgmt_freq; +#endif /* WLTDLS */ + bool need_wait_afrx; +#ifdef QOS_MAP_SET + uint8 *up_table; /* user priority table, size is UP_TABLE_MAX */ +#endif /* QOS_MAP_SET */ + struct ether_addr last_roamed_addr; +#ifdef DHD_LOSSLESS_ROAMING + struct timer_list roam_timeout; /* Timer for catch roam timeout */ +#endif + bool rcc_enabled; /* flag for Roam channel cache feature */ +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + char bss_info[GET_BSS_INFO_LEN]; + wl_event_msg_t event_auth_assoc; + u32 assoc_reject_status; + u32 roam_count; +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + u16 ap_oper_channel; + bool revert_ndo_disable; +#if defined(SUPPORT_RANDOM_MAC_SCAN) + bool random_mac_enabled; +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +}; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) + +#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +list_for_each_entry_safe((pos), (next), (head), member) \ +_Pragma("GCC diagnostic pop") \ + +#else +#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \ +list_for_each_entry_safe((pos), (next), (head), member) \ + +#endif /* STRICT_GCC_WARNINGS */ + +static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss) +{ + return bss = bss ? + (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info; +} + +static inline void +wl_probe_wdev_all(struct bcm_cfg80211 *cfg) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + int idx = 0; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, + &cfg->net_list, list) { + WL_ERR(("%s: net_list[%d] bssidx: %d, " + "ndev: %p, wdev: %p \n", __FUNCTION__, + idx++, _net_info->bssidx, + _net_info->ndev, _net_info->wdev)); + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return; +} + +static inline struct net_info * +wl_get_netinfo_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if ((bssidx >= 0) && (_net_info->bssidx == bssidx)) { + info = _net_info; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; +} + +static inline void +wl_dealloc_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_netinfo enter wdev=%p \n", wdev)); +#endif + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + wl_cfgbss_t *bss = &_net_info->bss; + + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + kfree(bss->wps_ie); + bss->wps_ie = NULL; + list_del(&_net_info->list); + cfg->iface_cnt--; + kfree(_net_info); + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt)); +#endif +} + +static inline s32 +wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev, + struct wireless_dev * wdev, s32 mode, bool pm_block, u8 bssidx) +{ + struct net_info *_net_info; + s32 err = 0; + unsigned long int flags; +#ifdef DHD_IFDEBUG + WL_ERR(("alloc_netinfo enter bssidx=%d wdev=%p ndev=%p\n", bssidx, wdev, ndev)); +#endif + /* Check whether there is any duplicate entry for the + * same bssidx * + */ + if ((_net_info = wl_get_netinfo_by_bssidx(cfg, bssidx))) { + /* We have a duplicate entry for the same bssidx + * already present which shouldn't have been the case. + * Attempt recovery. + */ + WL_ERR(("Duplicate entry for bssidx=%d present\n", bssidx)); + wl_probe_wdev_all(cfg); +#ifdef DHD_DEBUG + ASSERT(0); +#endif /* DHD_DEBUG */ + WL_ERR(("Removing the Dup entry for bssidx=%d \n", bssidx)); + wl_dealloc_netinfo_by_wdev(cfg, _net_info->wdev); + } + if (cfg->iface_cnt == IFACE_MAX_CNT) + return -ENOMEM; + _net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL); + if (!_net_info) + err = -ENOMEM; + else { + _net_info->mode = mode; + _net_info->ndev = ndev; + _net_info->wdev = wdev; + _net_info->pm_restore = 0; + _net_info->pm = 0; + _net_info->pm_block = pm_block; + _net_info->roam_off = WL_INVALID; + _net_info->bssidx = bssidx; + spin_lock_irqsave(&cfg->net_list_sync, flags); + cfg->iface_cnt++; + list_add(&_net_info->list, &cfg->net_list); + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + } +#ifdef DHD_IFDEBUG + WL_ERR(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt)); +#endif + return err; +} + +static inline void +wl_delete_all_netinfo(struct bcm_cfg80211 *cfg) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + wl_cfgbss_t *bss = &_net_info->bss; + + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + kfree(bss->wps_ie); + bss->wps_ie = NULL; + list_del(&_net_info->list); + if (_net_info->wdev) + kfree(_net_info->wdev); + kfree(_net_info); + } + cfg->iface_cnt = 0; + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +} +static inline u32 +wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status) + +{ + struct net_info *_net_info, *next; + u32 cnt = 0; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (_net_info->ndev && + test_bit(status, &_net_info->sme_state)) + cnt++; + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return cnt; +} +static inline void +wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + switch (op) { + case 1: + break; /* set all status is not allowed */ + case 2: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + clear_bit(status, &_net_info->sme_state); + if (cfg->state_notifier && + test_bit(status, &(cfg->interrested_state))) + cfg->state_notifier(cfg, _net_info, status, false); + return; + case 4: + break; /* change all status is not allowed */ + default: + break; /* unknown operation */ + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +} +static inline void +wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status, + struct net_device *ndev, u32 op) +{ + + struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + switch (op) { + case 1: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + set_bit(status, &_net_info->sme_state); + if (cfg->state_notifier && + test_bit(status, &(cfg->interrested_state))) + cfg->state_notifier(cfg, _net_info, status, true); + return; + case 2: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + clear_bit(status, &_net_info->sme_state); + if (cfg->state_notifier && + test_bit(status, &(cfg->interrested_state))) + cfg->state_notifier(cfg, _net_info, status, false); + return; + case 4: + change_bit(status, &_net_info->sme_state); + break; + } + } + + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + +} + +static inline wl_cfgbss_t * +wl_get_cfgbss_by_wdev(struct bcm_cfg80211 *cfg, + struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + wl_cfgbss_t *bss = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + bss = &_net_info->bss; + break; + } + } + + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return bss; +} + +static inline u32 +wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status, + struct net_device *ndev) +{ + struct net_info *_net_info, *next; + u32 stat = 0; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + stat = test_bit(status, &_net_info->sme_state); + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return stat; +} + +static inline s32 +wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + struct net_info *_net_info, *next; + s32 mode = -1; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + mode = _net_info->mode; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return mode; +} + +static inline void +wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev, + s32 mode) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) + _net_info->mode = mode; + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +} + +static inline s32 +wl_get_bssidx_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + s32 bssidx = -1; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (_net_info->wdev && (_net_info->wdev == wdev)) { + bssidx = _net_info->bssidx; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return bssidx; +} + +static inline struct wireless_dev * +wl_get_wdev_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + struct net_info *_net_info, *next; + struct wireless_dev *wdev = NULL; + unsigned long int flags; + + if (bssidx < 0) + return NULL; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (_net_info->bssidx == bssidx) { + wdev = _net_info->wdev; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return wdev; +} + +static inline struct wl_profile * +wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + struct net_info *_net_info, *next; + struct wl_profile *prof = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + prof = &_net_info->profile; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return prof; +} +static inline struct net_info * +wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + info = _net_info; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; +} + +static inline struct net_info * +wl_get_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + info = _net_info; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; +} + +#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \ + (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0) +#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy) +#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev) +#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev) +#define bcmcfg_to_p2p_wdev(cfg) (cfg->p2p_wdev) +#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr)) +#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr) +#define wdev_to_ndev(wdev) (wdev->netdev) + +#if defined(WL_ENABLE_P2P_IF) +#define ndev_to_wlc_ndev(ndev, cfg) ((ndev == cfg->p2p_net) ? \ + bcmcfg_to_prmry_ndev(cfg) : ndev) +#else +#define ndev_to_wlc_ndev(ndev, cfg) (ndev) +#endif /* WL_ENABLE_P2P_IF */ + +#define wdev_to_wlc_ndev(wdev, cfg) \ + (wdev_to_ndev(wdev) ? \ + wdev_to_ndev(wdev) : bcmcfg_to_prmry_ndev(cfg)) +#if defined(WL_CFG80211_P2P_DEV_IF) +#define cfgdev_to_wlc_ndev(cfgdev, cfg) wdev_to_wlc_ndev(cfgdev, cfg) +#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg) +#elif defined(WL_ENABLE_P2P_IF) +#define cfgdev_to_wlc_ndev(cfgdev, cfg) ndev_to_wlc_ndev(cfgdev, cfg) +#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_ndev(cfg) +#else +#define cfgdev_to_wlc_ndev(cfgdev, cfg) (cfgdev) +#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) (cfgdev) +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_CFG80211_P2P_DEV_IF) +#define cfgdev_to_wdev(cfgdev) (cfgdev) +#define ndev_to_cfgdev(ndev) ndev_to_wdev(ndev) +#define cfgdev_to_ndev(cfgdev) (cfgdev ? (cfgdev->netdev) : NULL) +#define wdev_to_cfgdev(cfgdev) (cfgdev) +#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) +#else +#define cfgdev_to_wdev(cfgdev) (cfgdev->ieee80211_ptr) +#define wdev_to_cfgdev(cfgdev) cfgdev ? (cfgdev->netdev) : NULL +#define ndev_to_cfgdev(ndev) (ndev) +#define cfgdev_to_ndev(cfgdev) (cfgdev) +#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net) +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_CFG80211_P2P_DEV_IF) +#define scan_req_match(cfg) (((cfg) && (cfg->scan_request) && \ + (cfg->scan_request->wdev == cfg->p2p_wdev)) ? true : false) +#elif defined(WL_ENABLE_P2P_IF) +#define scan_req_match(cfg) (((cfg) && (cfg->scan_request) && \ + (cfg->scan_request->dev == cfg->p2p_net)) ? true : false) +#else +#define scan_req_match(cfg) (((cfg) && p2p_is_on(cfg) && p2p_scan(cfg)) ? \ + true : false) +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) +#define scan_req_iftype(req) (req->dev->ieee80211_ptr->iftype) +#else +#define scan_req_iftype(req) (req->wdev->iftype) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */ + +#define wl_to_sr(w) (w->scan_req_int) +#if defined(STATIC_WL_PRIV_STRUCT) +#define wl_to_ie(w) (w->ie) +#define wl_to_conn(w) (w->conn_info) +#else +#define wl_to_ie(w) (&w->ie) +#define wl_to_conn(w) (&w->conn_info) +#endif +#define wiphy_from_scan(w) (w->escan_info.wiphy) +#define wl_get_drv_status_all(cfg, stat) \ + (wl_get_status_all(cfg, WL_STATUS_ ## stat)) +#define wl_get_drv_status(cfg, stat, ndev) \ + (wl_get_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev)) +#define wl_set_drv_status(cfg, stat, ndev) \ + (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 1)) +#define wl_clr_drv_status(cfg, stat, ndev) \ + (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 2)) +#define wl_clr_drv_status_all(cfg, stat) \ + (wl_set_status_all(cfg, WL_STATUS_ ## stat, 2)) +#define wl_chg_drv_status(cfg, stat, ndev) \ + (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 4)) + +#define for_each_bss(list, bss, __i) \ + for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss)) + +#define for_each_ndev(cfg, iter, next) \ + list_for_each_entry_safe(iter, next, &cfg->net_list, list) + +/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0. + * In addtion to that, wpa_version is WPA_VERSION_1 + */ +#define is_wps_conn(_sme) \ + ((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \ + (!_sme->crypto.n_ciphers_pairwise) && \ + (!_sme->crypto.cipher_group)) + +#define IS_AKM_SUITE_FT(sec) false + +#define IS_AKM_SUITE_CCKM(sec) false + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) +#define STA_INFO_BIT(info) (1ul << NL80211_STA_ ## info) +#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len)) +#else +#define STA_INFO_BIT(info) (STATION_ ## info) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */ + +extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context); +extern s32 wl_cfg80211_attach_post(struct net_device *ndev); +extern void wl_cfg80211_detach(void *para); + +extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e, + void *data); +void wl_cfg80211_set_parent_dev(void *dev); +struct device *wl_cfg80211_get_parent_dev(void); + +/* clear IEs */ +extern s32 wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg); +extern s32 wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx); + +extern s32 wl_cfg80211_up(void *para); +extern s32 wl_cfg80211_down(void *para); +extern s32 wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx); +extern s32 wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx); +extern s32 wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx); +extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name, + uint8 *mac, uint8 bssidx, char *dngl_name); +extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev); +extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev); +extern int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev); +extern bool wl_cfg80211_is_concurrent_mode(void); +extern void* wl_cfg80211_get_dhdp(void); +extern bool wl_cfg80211_is_p2p_active(void); +extern bool wl_cfg80211_is_roam_offload(void); +extern bool wl_cfg80211_is_event_from_connected_bssid(const wl_event_msg_t *e, int ifidx); +extern void wl_cfg80211_dbg_level(u32 level); +extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len, + enum wl_management_type type); +extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len); +#ifdef WL11ULB +extern s32 wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode); +extern s32 wl_cfg80211_set_ulb_bw(struct net_device *dev, + u32 ulb_bw, char *ifname); +#endif /* WL11ULB */ +#ifdef P2PLISTEN_AP_SAMECHN +extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable); +#endif /* P2PLISTEN_AP_SAMECHN */ + +/* btcoex functions */ +void* wl_cfg80211_btcoex_init(struct net_device *ndev); +void wl_cfg80211_btcoex_deinit(void); + +#ifdef WL_SUPPORT_AUTO_CHANNEL +#define CHANSPEC_BUF_SIZE 1024 +#define CHAN_SEL_IOCTL_DELAY 300 +#define CHAN_SEL_RETRY_COUNT 15 +#define CHANNEL_IS_RADAR(channel) (((channel & WL_CHAN_RADAR) || \ + (channel & WL_CHAN_PASSIVE)) ? true : false) +#define CHANNEL_IS_2G(channel) (((channel >= 1) && (channel <= 14)) ? \ + true : false) +#define CHANNEL_IS_5G(channel) (((channel >= 36) && (channel <= 165)) ? \ + true : false) +extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command, + int total_len); +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +extern int wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n); +extern int wl_cfg80211_hang(struct net_device *dev, u16 reason); +extern s32 wl_mode_to_nl80211_iftype(s32 mode); +int wl_cfg80211_do_driver_init(struct net_device *net); +void wl_cfg80211_enable_trace(bool set, u32 level); +extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify); +extern s32 wl_cfg80211_if_is_group_owner(void); +extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec); +extern chanspec_t wl_ch_host_to_driver(s32 bssidx, u16 channel); +extern s32 wl_set_tx_power(struct net_device *dev, + enum nl80211_tx_power_setting type, s32 dbm); +extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm); +extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add); +extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev); +extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set); +extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev, + struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev); +extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac); +extern void wl_cfg80211_update_power_mode(struct net_device *dev); +extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command); +extern void wl_terminate_event_handler(void); +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +extern s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len); +extern s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len); +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +#define SCAN_BUF_CNT 2 +#define SCAN_BUF_NEXT 1 +#define WL_SCANTYPE_LEGACY 0x1 +#define WL_SCANTYPE_P2P 0x2 +#define wl_escan_set_sync_id(a, b) ((a) = htod16(0x1234)) +#define wl_escan_set_type(a, b) +#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf) +#define wl_escan_check_sync_id(a, b, c) 0 +#define wl_escan_print_sync_id(a, b, c) +#define wl_escan_increment_sync_id(a, b) +#define wl_escan_init_sync_id(a) +extern void wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len); +extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev); +extern void wl_cfg80211_set_rmc_pid(int pid); +extern int wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, s32 bssidx, s32 pktflag, + const u8 *vndr_ie, u32 vndr_ie_len); + + +/* Action frame specific functions */ +extern u8 wl_get_action_category(void *frame, u32 frame_len); +extern int wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action); + +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST +struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg); +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + +#ifdef WL_SUPPORT_ACS +#define ACS_MSRMNT_DELAY 1000 /* dump_obss delay in ms */ +#define IOCTL_RETRY_COUNT 5 +#define CHAN_NOISE_DUMMY -80 +#define OBSS_TOKEN_IDX 15 +#define IBSS_TOKEN_IDX 15 +#define TX_TOKEN_IDX 14 +#define CTG_TOKEN_IDX 13 +#define PKT_TOKEN_IDX 15 +#define IDLE_TOKEN_IDX 12 +#endif /* WL_SUPPORT_ACS */ + +extern int wl_cfg80211_get_ioctl_version(void); +extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable); +extern s32 wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, + void *buf, s32 buflen); +extern s32 wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, + void *buf, s32 buflen); +#if defined(WL_VIRTUAL_APSTA) +extern int wl_cfg80211_interface_create(struct net_device *dev, char *name); +extern int wl_cfg80211_interface_delete(struct net_device *dev, char *name); +#endif /* defined (WL_VIRTUAL_APSTA) */ + +#ifdef WL_NAN +extern int wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, + int cmd_len); +#endif /* WL_NAN */ + +#ifdef WL_CFG80211_P2P_DEV_IF +extern void wl_cfg80211_del_p2p_wdev(void); +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_SUPPORT_AUTO_CHANNEL) +extern int wl_cfg80211_set_spect(struct net_device *dev, int spect); +extern int wl_cfg80211_get_sta_channel(void); +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#ifdef P2P_LISTEN_OFFLOADING +extern s32 wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len); +extern s32 wl_cfg80211_p2plo_listen_stop(struct net_device *dev); +#endif /* P2P_LISTEN_OFFLOADING */ + +#define RETURN_EIO_IF_NOT_UP(wlpriv) \ +do { \ + struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \ + if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) { \ + WL_INFORM(("device is not ready\n")); \ + return -EIO; \ + } \ +} while (0) + +#ifdef QOS_MAP_SET +extern uint8 *wl_get_up_table(void); +#endif /* QOS_MAP_SET */ + +#define P2PO_COOKIE 65535 +u64 wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg); +#if defined(SUPPORT_RANDOM_MAC_SCAN) +int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable); +int wl_cfg80211_random_mac_enable(struct net_device *dev); +int wl_cfg80211_random_mac_disable(struct net_device *dev); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +int wl_cfg80211_iface_count(void); +int wl_check_dongle_idle(struct wiphy *wiphy); +#endif /* _wl_cfg80211_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c new file mode 100644 index 000000000000..1aaa8fe4bfe6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c @@ -0,0 +1,564 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfg_btcoex.c 514727 2014-11-12 03:02:48Z $ + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef PKT_FILTER_SUPPORT +extern uint dhd_pkt_filter_enable; +extern uint dhd_master_mode; +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +#endif + +struct btcoex_info { + struct timer_list timer; + u32 timer_ms; + u32 timer_on; + u32 ts_dhcp_start; /* ms ts ecord time stats */ + u32 ts_dhcp_ok; /* ms ts ecord time stats */ + bool dhcp_done; /* flag, indicates that host done with + * dhcp before t1/t2 expiration + */ + s32 bt_state; + struct work_struct work; + struct net_device *dev; +}; + +static struct btcoex_info *btcoex_info_loc = NULL; + +/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */ + +/* use New SCO/eSCO smart YG suppression */ +#define BT_DHCP_eSCO_FIX +/* this flag boost wifi pkt priority to max, caution: -not fair to sco */ +#define BT_DHCP_USE_FLAGS +/* T1 start SCO/ESCo priority suppression */ +#define BT_DHCP_OPPR_WIN_TIME 2500 +/* T2 turn off SCO/SCO supperesion is (timeout) */ +#define BT_DHCP_FLAG_FORCE_TIME 5500 + +enum wl_cfg80211_btcoex_status { + BT_DHCP_IDLE, + BT_DHCP_START, + BT_DHCP_OPPR_WIN, + BT_DHCP_FLAG_FORCE_TIMEOUT +}; + +/* + * get named driver variable to uint register value and return error indication + * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, ®_value) + */ +static int +dev_wlc_intvar_get_reg(struct net_device *dev, char *name, + uint reg, int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + bcm_mkiovar(name, (char *)(®), sizeof(reg), + (char *)(&var), sizeof(var.buf)); + error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false); + + *retval = dtoh32(var.val); + return (error); +} + +static int +dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + char ioctlbuf_local[1024]; +#else + static char ioctlbuf_local[1024]; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + + bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local)); + + return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true)); +} +/* +get named driver variable to uint register value and return error indication +calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value) +*/ +static int +dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val) +{ + char reg_addr[8]; + + memset(reg_addr, 0, sizeof(reg_addr)); + memcpy((char *)®_addr[0], (char *)addr, 4); + memcpy((char *)®_addr[4], (char *)val, 4); + + return (dev_wlc_bufvar_set(dev, name, (char *)®_addr[0], sizeof(reg_addr))); +} + +static bool btcoex_is_sco_active(struct net_device *dev) +{ + int ioc_res = 0; + bool res = FALSE; + int sco_id_cnt = 0; + int param27; + int i; + + for (i = 0; i < 12; i++) { + + ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); + + WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); + + if (ioc_res < 0) { + WL_ERR(("ioc read btc params error\n")); + break; + } + + if ((param27 & 0x6) == 2) { /* count both sco & esco */ + sco_id_cnt++; + } + + if (sco_id_cnt > 2) { + WL_TRACE(("sco/esco detected, pkt id_cnt:%d samples:%d\n", + sco_id_cnt, i)); + res = TRUE; + break; + } + + OSL_SLEEP(5); + } + + return res; +} + +#if defined(BT_DHCP_eSCO_FIX) +/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */ +static int set_btc_esco_params(struct net_device *dev, bool trump_sco) +{ + static bool saved_status = FALSE; + + char buf_reg50va_dhcp_on[8] = + { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 }; + char buf_reg51va_dhcp_on[8] = + { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg64va_dhcp_on[8] = + { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg65va_dhcp_on[8] = + { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg71va_dhcp_on[8] = + { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + uint32 regaddr; + static uint32 saved_reg50; + static uint32 saved_reg51; + static uint32 saved_reg64; + static uint32 saved_reg65; + static uint32 saved_reg71; + + if (trump_sco) { + /* this should reduce eSCO agressive retransmit + * w/o breaking it + */ + + /* 1st save current */ + WL_TRACE(("Do new SCO/eSCO coex algo {save &" + "override}\n")); + if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) { + saved_status = TRUE; + WL_TRACE(("saved bt_params[50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + saved_reg50, saved_reg51, + saved_reg64, saved_reg65, saved_reg71)); + } else { + WL_ERR((":%s: save btc_params failed\n", + __FUNCTION__)); + saved_status = FALSE; + return -1; + } + + WL_TRACE(("override with [50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + *(u32 *)(buf_reg50va_dhcp_on+4), + *(u32 *)(buf_reg51va_dhcp_on+4), + *(u32 *)(buf_reg64va_dhcp_on+4), + *(u32 *)(buf_reg65va_dhcp_on+4), + *(u32 *)(buf_reg71va_dhcp_on+4))); + + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg50va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg51va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg64va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg65va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg71va_dhcp_on[0], 8); + + saved_status = TRUE; + } else if (saved_status) { + /* restore previously saved bt params */ + WL_TRACE(("Do new SCO/eSCO coex algo {save &" + "override}\n")); + + regaddr = 50; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg50); + regaddr = 51; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg51); + regaddr = 64; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg64); + regaddr = 65; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg65); + regaddr = 71; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg71); + + WL_TRACE(("restore bt_params[50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + saved_reg50, saved_reg51, saved_reg64, + saved_reg65, saved_reg71)); + + saved_status = FALSE; + } else { + WL_ERR((":%s att to restore not saved BTCOEX params\n", + __FUNCTION__)); + return -1; + } + return 0; +} +#endif /* BT_DHCP_eSCO_FIX */ + +static void +wl_cfg80211_bt_setflag(struct net_device *dev, bool set) +{ +#if defined(BT_DHCP_USE_FLAGS) + char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 }; + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; +#endif + + +#if defined(BT_DHCP_eSCO_FIX) + /* set = 1, save & turn on 0 - off & restore prev settings */ + set_btc_esco_params(dev, set); +#endif + +#if defined(BT_DHCP_USE_FLAGS) + WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set)); + if (set == TRUE) + /* Forcing bt_flag7 */ + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_dhcp_on[0], + sizeof(buf_flag7_dhcp_on)); + else + /* Restoring default bt flag7 */ + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], + sizeof(buf_flag7_default)); +#endif +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void wl_cfg80211_bt_timerfunc(struct timer_list *t) +#else +static void wl_cfg80211_bt_timerfunc(ulong data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + struct btcoex_info *bt_local = from_timer(bt_local, t, timer); +#else + struct btcoex_info *bt_local = (struct btcoex_info *)data; +#endif + WL_TRACE(("Enter\n")); + bt_local->timer_on = 0; + schedule_work(&bt_local->work); +} + +static void wl_cfg80211_bt_handler(struct work_struct *work) +{ + struct btcoex_info *btcx_inf; + + btcx_inf = container_of(work, struct btcoex_info, work); + + if (btcx_inf->timer_on) { + btcx_inf->timer_on = 0; + del_timer_sync(&btcx_inf->timer); + } + + switch (btcx_inf->bt_state) { + case BT_DHCP_START: + /* DHCP started + * provide OPPORTUNITY window to get DHCP address + */ + WL_TRACE(("bt_dhcp stm: started \n")); + + btcx_inf->bt_state = BT_DHCP_OPPR_WIN; + mod_timer(&btcx_inf->timer, + jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME)); + btcx_inf->timer_on = 1; + break; + + case BT_DHCP_OPPR_WIN: + if (btcx_inf->dhcp_done) { + WL_TRACE(("DHCP Done before T1 expiration\n")); + goto btc_coex_idle; + } + + /* DHCP is not over yet, start lowering BT priority + * enforce btc_params + flags if necessary + */ + WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME)); + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE); + btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT; + mod_timer(&btcx_inf->timer, + jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME)); + btcx_inf->timer_on = 1; + break; + + case BT_DHCP_FLAG_FORCE_TIMEOUT: + if (btcx_inf->dhcp_done) { + WL_TRACE(("DHCP Done before T2 expiration\n")); + } else { + /* Noo dhcp during T1+T2, restore BT priority */ + WL_TRACE(("DHCP wait interval T2:%d msec expired\n", + BT_DHCP_FLAG_FORCE_TIME)); + } + + /* Restoring default bt priority */ + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE); +btc_coex_idle: + btcx_inf->bt_state = BT_DHCP_IDLE; + btcx_inf->timer_on = 0; + break; + + default: + WL_ERR(("error g_status=%d !!!\n", btcx_inf->bt_state)); + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE); + btcx_inf->bt_state = BT_DHCP_IDLE; + btcx_inf->timer_on = 0; + break; + } + + net_os_wake_unlock(btcx_inf->dev); +} + +void* wl_cfg80211_btcoex_init(struct net_device *ndev) +{ + struct btcoex_info *btco_inf = NULL; + + btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL); + if (!btco_inf) + return NULL; + + btco_inf->bt_state = BT_DHCP_IDLE; + btco_inf->ts_dhcp_start = 0; + btco_inf->ts_dhcp_ok = 0; + /* Set up timer for BT */ + btco_inf->timer_ms = 10; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&btco_inf->timer, wl_cfg80211_bt_timerfunc, btco_inf->timer_ms); +#else + init_timer(&btco_inf->timer); + btco_inf->timer.data = (ulong)btco_inf; + btco_inf->timer.function = wl_cfg80211_bt_timerfunc; +#endif + + btco_inf->dev = ndev; + + INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler); + + btcoex_info_loc = btco_inf; + return btco_inf; +} + +void wl_cfg80211_btcoex_deinit() +{ + if (!btcoex_info_loc) + return; + + if (btcoex_info_loc->timer_on) { + btcoex_info_loc->timer_on = 0; + del_timer_sync(&btcoex_info_loc->timer); + } + + cancel_work_sync(&btcoex_info_loc->work); + + kfree(btcoex_info_loc); +} + +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command) +{ + + struct btcoex_info *btco_inf = btcoex_info_loc; + char powermode_val = 0; + char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 }; + char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 }; + char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 }; + + uint32 regaddr; + static uint32 saved_reg66; + static uint32 saved_reg41; + static uint32 saved_reg68; + static bool saved_status = FALSE; + + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; + + /* Figure out powermode 1 or o command */ + strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1); + + if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { + WL_TRACE_HW4(("DHCP session starts\n")); + + +#ifdef PKT_FILTER_SUPPORT + dhd->dhcp_in_progress = 1; + + if (dhd->early_suspended) { + WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n")); + dhd_enable_packet_filter(0, dhd); + } +#endif + + /* Retrieve and saved orig regs value */ + if ((saved_status == FALSE) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) { + saved_status = TRUE; + WL_TRACE(("Saved 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + + /* Disable PM mode during dhpc session */ + + /* Disable PM mode during dhpc session */ + /* Start BT timer only for SCO connection */ + if (btcoex_is_sco_active(dev)) { + /* btc_params 66 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg66va_dhcp_on[0], + sizeof(buf_reg66va_dhcp_on)); + /* btc_params 41 0x33 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg41va_dhcp_on[0], + sizeof(buf_reg41va_dhcp_on)); + /* btc_params 68 0x190 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg68va_dhcp_on[0], + sizeof(buf_reg68va_dhcp_on)); + saved_status = TRUE; + + btco_inf->bt_state = BT_DHCP_START; + btco_inf->timer_on = 1; + mod_timer(&btco_inf->timer, btco_inf->timer.expires); + WL_TRACE(("enable BT DHCP Timer\n")); + } + } + else if (saved_status == TRUE) { + WL_ERR(("was called w/o DHCP OFF. Continue\n")); + } + } + else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) { + + + +#ifdef PKT_FILTER_SUPPORT + dhd->dhcp_in_progress = 0; + WL_TRACE_HW4(("DHCP is complete \n")); + + /* Enable packet filtering */ + if (dhd->early_suspended) { + WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n")); + dhd_enable_packet_filter(1, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ + + /* Restoring PM mode */ + + /* Stop any bt timer because DHCP session is done */ + WL_TRACE(("disable BT DHCP Timer\n")); + if (btco_inf->timer_on) { + btco_inf->timer_on = 0; + del_timer_sync(&btco_inf->timer); + + if (btco_inf->bt_state != BT_DHCP_IDLE) { + /* need to restore original btc flags & extra btc params */ + WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state)); + /* wake up btcoex thread to restore btlags+params */ + schedule_work(&btco_inf->work); + } + } + + /* Restoring btc_flag paramter anyway */ + if (saved_status == TRUE) + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], sizeof(buf_flag7_default)); + + /* Restore original values */ + if (saved_status == TRUE) { + regaddr = 66; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg66); + regaddr = 41; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg41); + regaddr = 68; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg68); + + WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + } + saved_status = FALSE; + + } + else { + WL_ERR(("Unkwown yet power setting, ignored\n")); + } + + snprintf(command, 3, "OK"); + + return (strlen("OK")); +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c new file mode 100644 index 000000000000..35901ab1b835 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c @@ -0,0 +1,2591 @@ +/* + * Linux cfgp2p driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgp2p.c 604795 2015-12-08 13:45:42Z $ + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static s8 scanparambuf[WLC_IOCTL_SMLEN]; +static bool +wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type); + +static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev, + struct wireless_dev *wdev, bool notify); + +#if defined(WL_ENABLE_P2P_IF) +static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev); +static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd); +static int wl_cfgp2p_if_open(struct net_device *net); +static int wl_cfgp2p_if_stop(struct net_device *net); + +static const struct net_device_ops wl_cfgp2p_if_ops = { + .ndo_open = wl_cfgp2p_if_open, + .ndo_stop = wl_cfgp2p_if_stop, + .ndo_do_ioctl = wl_cfgp2p_do_ioctl, + .ndo_start_xmit = wl_cfgp2p_start_xmit, +}; +#endif /* WL_ENABLE_P2P_IF */ + + +bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + + if (frame == NULL) + return false; + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1) + return false; + + if (pact_frm->category == P2P_PUB_AF_CATEGORY && + pact_frm->action == P2P_PUB_AF_ACTION && + pact_frm->oui_type == P2P_VER && + memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) { + return true; + } + + return false; +} + +bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len) +{ + wifi_p2p_action_frame_t *act_frm; + + if (frame == NULL) + return false; + act_frm = (wifi_p2p_action_frame_t *)frame; + if (frame_len < sizeof(wifi_p2p_action_frame_t) -1) + return false; + + if (act_frm->category == P2P_AF_CATEGORY && + act_frm->type == P2P_VER && + memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) { + return true; + } + + return false; +} + +#define GAS_RESP_LEN 2 +#define DOUBLE_TLV_BODY_OFF 4 +#define GAS_RESP_OFFSET 4 +#define GAS_CRESP_OFFSET 5 + +bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len) +{ + bcm_tlv_t *ie = (bcm_tlv_t *)data; + u8 *frame = NULL; + u16 id, flen; + + /* Skipped first ANQP Element, if frame has anqp elemnt */ + ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID); + + if (ie == NULL) + return false; + + frame = (uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN; + id = ((u16) (((frame)[1] << 8) | (frame)[0])); + flen = ((u16) (((frame)[3] << 8) | (frame)[2])); + + /* If the contents match the OUI and the type */ + if (flen >= WFA_OUI_LEN + 1 && + id == P2PSD_GAS_NQP_INFOID && + !bcmp(&frame[DOUBLE_TLV_BODY_OFF], (const uint8*)WFA_OUI, WFA_OUI_LEN) && + subtype == frame[DOUBLE_TLV_BODY_OFF+WFA_OUI_LEN]) { + return true; + } + + return false; +} + +bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len) +{ + + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + + if (frame == NULL) + return false; + + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1)) + return false; + if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) + return false; + +#ifdef WL11U + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET, + frame_len); + + else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET, + frame_len); + else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ) + return true; + else + return false; +#else + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) + return true; + else + return false; +#endif /* WL11U */ +} + +bool wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len) +{ + + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + + if (frame == NULL) + return false; + + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1)) + return false; + if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) + return false; + + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data, + frame_len); + else + return false; +} + +void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + wifi_p2p_action_frame_t *act_frm; + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + if (!frame || frame_len <= 2) + return; + + if (wl_cfgp2p_is_pub_action(frame, frame_len)) { + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + switch (pact_frm->subtype) { + case P2P_PAF_GON_REQ: + CFGP2P_ACTION(("%s P2P Group Owner Negotiation Req Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_GON_RSP: + CFGP2P_ACTION(("%s P2P Group Owner Negotiation Rsp Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_GON_CONF: + CFGP2P_ACTION(("%s P2P Group Owner Negotiation Confirm Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_INVITE_REQ: + CFGP2P_ACTION(("%s P2P Invitation Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_INVITE_RSP: + CFGP2P_ACTION(("%s P2P Invitation Response Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_DEVDIS_REQ: + CFGP2P_ACTION(("%s P2P Device Discoverability Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_DEVDIS_RSP: + CFGP2P_ACTION(("%s P2P Device Discoverability Response Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_PROVDIS_REQ: + CFGP2P_ACTION(("%s P2P Provision Discovery Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_PROVDIS_RSP: + CFGP2P_ACTION(("%s P2P Provision Discovery Response Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + default: + CFGP2P_ACTION(("%s Unknown Public Action Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + + } + + } else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) { + act_frm = (wifi_p2p_action_frame_t *)frame; + switch (act_frm->subtype) { + case P2P_AF_NOTICE_OF_ABSENCE: + CFGP2P_ACTION(("%s P2P Notice of Absence Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_AF_PRESENCE_REQ: + CFGP2P_ACTION(("%s P2P Presence Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_AF_PRESENCE_RSP: + CFGP2P_ACTION(("%s P2P Presence Response Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_AF_GO_DISC_REQ: + CFGP2P_ACTION(("%s P2P Discoverability Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + default: + CFGP2P_ACTION(("%s Unknown P2P Action Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + } + + } else if (wl_cfgp2p_is_gas_action(frame, frame_len)) { + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + switch (sd_act_frm->action) { + case P2PSD_ACTION_ID_GAS_IREQ: + CFGP2P_ACTION(("%s GAS Initial Request," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + break; + case P2PSD_ACTION_ID_GAS_IRESP: + CFGP2P_ACTION(("%s GAS Initial Response," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + break; + case P2PSD_ACTION_ID_GAS_CREQ: + CFGP2P_ACTION(("%s GAS Comback Request," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + break; + case P2PSD_ACTION_ID_GAS_CRESP: + CFGP2P_ACTION(("%s GAS Comback Response," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + break; + default: + CFGP2P_ACTION(("%s Unknown GAS Frame," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + } + + + } +} + +/* + * Initialize variables related to P2P + * + */ +s32 +wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg) +{ + if (!(cfg->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) { + CFGP2P_ERR(("struct p2p_info allocation failed\n")); + return -ENOMEM; + } + + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg); + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1) = NULL; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) = -1; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2) = NULL; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) = -1; + return BCME_OK; + +} +/* + * Deinitialize variables related to P2P + * + */ +void +wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg) +{ + CFGP2P_ERR(("In\n")); + if (cfg->p2p) { + kfree(cfg->p2p); + cfg->p2p = NULL; + } + cfg->p2p_supported = 0; +} +/* + * Set P2P functions into firmware + */ +s32 +wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg) +{ + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } }; + s32 ret = BCME_OK; + s32 val = 0; + /* Do we have to check whether APSTA is enabled or not ? */ + ret = wldev_iovar_getint(ndev, "apsta", &val); + if (ret < 0) { + CFGP2P_ERR(("get apsta error %d\n", ret)); + return ret; + } + if (val == 0) { + val = 1; + ret = wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true); + if (ret < 0) { + CFGP2P_ERR(("WLC_DOWN error %d\n", ret)); + return ret; + } + + ret = wldev_iovar_setint(ndev, "apsta", val); + if (ret < 0) { + /* return error and fail the initialization */ + CFGP2P_ERR(("wl apsta %d set error. ret: %d\n", val, ret)); + return ret; + } + + ret = wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true); + if (ret < 0) { + CFGP2P_ERR(("WLC_UP error %d\n", ret)); + return ret; + } + } + + /* In case of COB type, firmware has default mac address + * After Initializing firmware, we have to set current mac address to + * firmware for P2P device address + */ + ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr, + sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync); + if (ret && ret != BCME_UNSUPPORTED) { + CFGP2P_ERR(("failed to update device address ret %d\n", ret)); + } + return ret; +} + +int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg) +{ + if (!cfg->p2p) { + CFGP2P_DBG(("p2p not enabled! \n")); + return false; + } + + if ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) && + (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1)) + return true; + else + return false; +} + +/* Create a new P2P BSS. + * Parameters: + * @mac : MAC address of the BSS to create + * @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT + * @chspec : chspec to use if creating a GO BSS. + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec) +{ + wl_p2p_if_t ifreq; + s32 err; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + + ifreq.type = if_type; + ifreq.chspec = chspec; + memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet)); + + CFGP2P_DBG(("---cfg p2p_ifadd "MACDBG" %s %u\n", + MAC2STRDBG(ifreq.addr.octet), + (if_type == WL_P2P_IF_GO) ? "go" : "client", + (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT)); + + err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err < 0)) { + printk("'cfg p2p_ifadd' error %d\n", err); + return err; + } + + return err; +} + +/* Disable a P2P BSS. + * Parameters: + * @mac : MAC address of the BSS to disable + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac) +{ + s32 ret; + struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg); + + CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdis "MACDBG"\n", + netdev->ifindex, MAC2STRDBG(mac->octet))); + ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(ret < 0)) { + printk("'cfg p2p_ifdis' error %d\n", ret); + } + return ret; +} + +/* Delete a P2P BSS. + * Parameters: + * @mac : MAC address of the BSS to delete + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac) +{ + s32 ret; + struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg); + + CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdel "MACDBG"\n", + netdev->ifindex, MAC2STRDBG(mac->octet))); + ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(ret < 0)) { + printk("'cfg p2p_ifdel' error %d\n", ret); + } + return ret; +} + +/* Change a P2P Role. + * Parameters: + * @mac : MAC address of the BSS to change a role + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec, s32 conn_idx) +{ + wl_p2p_if_t ifreq; + s32 err; + + struct net_device *netdev = wl_to_p2p_bss_ndev(cfg, conn_idx); + + ifreq.type = if_type; + ifreq.chspec = chspec; + memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet)); + + CFGP2P_INFO(("---cfg p2p_ifchange "MACDBG" %s %u" + " chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet), + (if_type == WL_P2P_IF_GO) ? "go" : "client", + (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT, + ifreq.chspec)); + + err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err < 0)) { + printk("'cfg p2p_ifupd' error %d\n", err); + } else if (if_type == WL_P2P_IF_GO) { + cfg->p2p->p2p_go_count++; + } + return err; +} + + +/* Get the index of a created P2P BSS. + * Parameters: + * @mac : MAC address of the created BSS + * @index : output: index of created BSS + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index) +{ + s32 ret; + u8 getbuf[64]; + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + + CFGP2P_INFO(("---cfg p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet))); + + ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf, + sizeof(getbuf), wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY), NULL); + + if (ret == 0) { + memcpy(index, getbuf, sizeof(s32)); + CFGP2P_INFO(("---cfg p2p_if ==> %d\n", *index)); + } + + return ret; +} + +static s32 +wl_cfgp2p_set_discovery(struct bcm_cfg80211 *cfg, s32 on) +{ + s32 ret = BCME_OK; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + CFGP2P_DBG(("enter\n")); + + ret = wldev_iovar_setint(ndev, "p2p_disc", on); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret)); + } + + return ret; +} + +/* Set the WL driver's P2P mode. + * Parameters : + * @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}. + * @channel : the channel to listen + * @listen_ms : the time (milli seconds) to wait + * @bssidx : bss index for BSSCFG + * Returns 0 if success + */ + +s32 +wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 listen_ms, int bssidx) +{ + wl_p2p_disc_st_t discovery_mode; + s32 ret; + struct net_device *dev; + CFGP2P_DBG(("enter\n")); + + if (unlikely(bssidx == WL_INVALID)) { + CFGP2P_ERR((" %d index out of range\n", bssidx)); + return -1; + } + + dev = wl_cfgp2p_find_ndev(cfg, bssidx); + if (unlikely(dev == NULL)) { + CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx)); + return BCME_NOTFOUND; + } + +#ifdef P2PLISTEN_AP_SAMECHN + CFGP2P_DBG(("p2p0 listen channel %d AP connection chan %d \n", + channel, cfg->channel)); + if ((mode == WL_P2P_DISC_ST_LISTEN) && (cfg->channel == channel)) { + struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + if (cfg->p2p_resp_apchn_status) { + CFGP2P_DBG(("p2p_resp_apchn_status already ON \n")); + return BCME_OK; + } + + if (wl_get_drv_status(cfg, CONNECTED, primary_ndev)) { + ret = wl_cfg80211_set_p2p_resp_ap_chn(primary_ndev, 1); + cfg->p2p_resp_apchn_status = true; + CFGP2P_DBG(("p2p_resp_apchn_status ON \n")); + return ret; + } + } +#endif /* P2PLISTEN_AP_SAMECHN */ + + /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */ + discovery_mode.state = mode; + discovery_mode.chspec = wl_ch_host_to_driver(bssidx, channel); + discovery_mode.dwell = listen_ms; + ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode, + sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &cfg->ioctl_buf_sync); + + return ret; +} + +/* Get the index of the P2P Discovery BSS */ +static s32 +wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index) +{ + s32 ret; + struct net_device *dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + + ret = wldev_iovar_getint(dev, "p2p_dev", index); + CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret)); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("'p2p_dev' error %d\n", ret)); + return ret; + } + return ret; +} + +int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg) +{ + int i; + s32 connected_cnt; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (!dhd) + return (-ENODEV); + for (i = P2PAPI_BSSCFG_CONNECTION1; i < P2PAPI_BSSCFG_MAX; i++) { + if (wl_to_p2p_bss_bssidx(cfg, i) == -1) { + if (i == P2PAPI_BSSCFG_CONNECTION2) { + if (!(dhd->op_mode & DHD_FLAG_MP2P_MODE)) { + CFGP2P_ERR(("Multi p2p not supported")); + return BCME_ERROR; + } + if ((connected_cnt = wl_get_drv_status_all(cfg, CONNECTED)) > 1) { + CFGP2P_ERR(("Failed to create second p2p interface" + "Already one connection exists")); + return BCME_ERROR; + } + } + return i; + } + } + return BCME_ERROR; +} + +s32 +wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg) +{ + + s32 bssidx = 0; + s32 ret = BCME_OK; + + CFGP2P_DBG(("enter\n")); + + if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) { + CFGP2P_ERR(("do nothing, already initialized\n")); + return ret; + } + + ret = wl_cfgp2p_set_discovery(cfg, 1); + if (ret < 0) { + CFGP2P_ERR(("set discover error\n")); + return ret; + } + /* Enable P2P Discovery in the WL Driver */ + ret = wl_cfgp2p_get_disc_idx(cfg, &bssidx); + + if (ret < 0) { + return ret; + } + /* In case of CFG80211 case, check if p2p_discovery interface has allocated p2p_wdev */ + if (!cfg->p2p_wdev) { + CFGP2P_ERR(("p2p_wdev is NULL.\n")); + return BCME_NODEVICE; + } + /* Make an entry in the netinfo */ + wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_MODE_BSS, 0, bssidx); + + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = bssidx; + + /* Set the initial discovery state to SCAN */ + ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + + if (unlikely(ret != 0)) { + CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); + wl_cfgp2p_set_discovery(cfg, 0); + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL; + return 0; + } + return ret; +} + +/* Deinitialize P2P Discovery + * Parameters : + * @cfg : wl_private data + * Returns 0 if succes + */ +static s32 +wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg) +{ + s32 ret = BCME_OK; + s32 bssidx; + + CFGP2P_DBG(("enter\n")); + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + if (bssidx <= 0) { + CFGP2P_ERR(("do nothing, not initialized\n")); + return -1; + } + + /* Clear our saved WPS and P2P IEs for the discovery BSS */ + wl_cfg80211_clear_per_bss_ies(cfg, bssidx); + + /* Set the discovery state to SCAN */ + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + bssidx); + /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */ + ret = wl_cfgp2p_set_discovery(cfg, 0); + + /* Remove the p2p disc entry in the netinfo */ +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_net_info by wdev=%p\n", cfg->p2p_wdev)); +#endif + wl_dealloc_netinfo_by_wdev(cfg, cfg->p2p_wdev); + + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL; + + return ret; + +} +/* Enable P2P Discovery + * Parameters: + * @cfg : wl_private data + * @ie : probe request ie (WPS IE + P2P IE) + * @ie_len : probe request ie length + * Returns 0 if success. + */ +s32 +wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, + const u8 *ie, u32 ie_len) +{ + s32 ret = BCME_OK; + s32 bssidx; + + CFGP2P_DBG(("enter\n")); + if (wl_get_p2p_status(cfg, DISCOVERY_ON)) { + CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n")); + goto set_ie; + } + + ret = wl_cfgp2p_init_discovery(cfg); + if (unlikely(ret < 0)) { + CFGP2P_ERR((" init discovery error %d\n", ret)); + goto exit; + } + + wl_set_p2p_status(cfg, DISCOVERY_ON); + /* Set wsec to any non-zero value in the discovery bsscfg to ensure our + * P2P probe responses have the privacy bit set in the 802.11 WPA IE. + * Some peer devices may not initiate WPS with us if this bit is not set. + */ + ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), + "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + if (unlikely(ret < 0)) { + CFGP2P_ERR((" wsec error %d\n", ret)); + } +set_ie: + if (ie_len) { + + if (bcmcfg_to_prmry_ndev(cfg) == dev) { + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev)); + return BCME_ERROR; + } + + ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), + bssidx, + VNDR_IE_PRBREQ_FLAG, ie, ie_len); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("set probreq ie occurs error %d\n", ret)); + goto exit; + } + } +exit: + return ret; +} + +/* Disable P2P Discovery + * Parameters: + * @cfg : wl_private_data + * Returns 0 if success. + */ +s32 +wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg) +{ + s32 ret = BCME_OK; + s32 bssidx; + + CFGP2P_DBG((" enter\n")); + wl_clr_p2p_status(cfg, DISCOVERY_ON); + +#ifdef DHD_IFDEBUG + WL_ERR(("%s: (cfg)->p2p->bss[type].bssidx: %d\n", + __FUNCTION__, (cfg)->p2p->bss[P2PAPI_BSSCFG_DEVICE].bssidx)); +#endif + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + if (bssidx <= 0) { + CFGP2P_ERR((" do nothing, not initialized\n")); + return 0; + } + + ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + bssidx); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); + } + /* Do a scan abort to stop the driver's scan engine in case it is still + * waiting out an action frame tx dwell time. + */ + wl_clr_p2p_status(cfg, DISCOVERY_ON); + ret = wl_cfgp2p_deinit_discovery(cfg); + + return ret; +} + +s32 +wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, + u32 num_chans, u16 *channels, + s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr, + p2p_scan_purpose_t p2p_scan_purpose) +{ + s32 ret = BCME_OK; + s32 memsize; + s32 eparams_size; + u32 i; + s8 *memblk; + wl_p2p_scan_t *p2p_params; + wl_escan_params_t *eparams; + wlc_ssid_t ssid; + /* Scan parameters */ +#define P2PAPI_SCAN_NPROBES 1 +#define P2PAPI_SCAN_DWELL_TIME_MS 80 +#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40 +#define P2PAPI_SCAN_HOME_TIME_MS 60 +#define P2PAPI_SCAN_NPROBS_TIME_MS 30 +#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100 + + struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + /* Allocate scan params which need space for 3 channels and 0 ssids */ + eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE + + OFFSETOF(wl_escan_params_t, params)) + + num_chans * sizeof(eparams->params.channel_list[0]); + + memsize = sizeof(wl_p2p_scan_t) + eparams_size; + memblk = scanparambuf; + if (memsize > sizeof(scanparambuf)) { + CFGP2P_ERR((" scanpar buf too small (%u > %zu)\n", + memsize, sizeof(scanparambuf))); + return -1; + } + memset(memblk, 0, memsize); + memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN); + if (search_state == WL_P2P_DISC_ST_SEARCH) { + /* + * If we in SEARCH STATE, we don't need to set SSID explictly + * because dongle use P2P WILDCARD internally by default + */ + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx); + /* use null ssid */ + ssid.SSID_len = 0; + memset(&ssid.SSID, 0, sizeof(ssid.SSID)); + } else if (search_state == WL_P2P_DISC_ST_SCAN) { + /* SCAN STATE 802.11 SCAN + * WFD Supplicant has p2p_find command with (type=progressive, type= full) + * So if P2P_find command with type=progressive, + * we have to set ssid to P2P WILDCARD because + * we just do broadcast scan unless setting SSID + */ + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx); + /* use wild card ssid */ + ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN; + memset(&ssid.SSID, 0, sizeof(ssid.SSID)); + memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN); + } else { + CFGP2P_ERR((" invalid search state %d\n", search_state)); + return -1; + } + + + /* Fill in the P2P scan structure at the start of the iovar param block */ + p2p_params = (wl_p2p_scan_t*) memblk; + p2p_params->type = 'E'; + /* Fill in the Scan structure that follows the P2P scan structure */ + eparams = (wl_escan_params_t*) (p2p_params + 1); + eparams->params.bss_type = DOT11_BSSTYPE_ANY; + if (active) + eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE; + else + eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE; + + if (tx_dst_addr == NULL) + memcpy(&eparams->params.bssid, ðer_bcast, ETHER_ADDR_LEN); + else + memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN); + + if (ssid.SSID_len) + memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t)); + + eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS); + + switch (p2p_scan_purpose) { + case P2P_SCAN_SOCIAL_CHANNEL: + eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS); + break; + case P2P_SCAN_AFX_PEER_NORMAL: + case P2P_SCAN_AFX_PEER_REDUCED: + eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS); + break; + case P2P_SCAN_CONNECT_TRY: + eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS); + break; + default : + if (wl_get_drv_status_all(cfg, CONNECTED)) + eparams->params.active_time = -1; + else + eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS); + break; + } + + if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY) + eparams->params.nprobes = htod32(eparams->params.active_time / + WL_SCAN_JOIN_PROBE_INTERVAL_MS); + else + eparams->params.nprobes = htod32((eparams->params.active_time / + P2PAPI_SCAN_NPROBS_TIME_MS)); + + + if (eparams->params.nprobes <= 0) + eparams->params.nprobes = 1; + CFGP2P_DBG(("nprobes # %d, active_time %d\n", + eparams->params.nprobes, eparams->params.active_time)); + eparams->params.passive_time = htod32(-1); + eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | + (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); + + for (i = 0; i < num_chans; i++) { + eparams->params.channel_list[i] = wl_ch_host_to_driver(bssidx, channels[i]); + } + eparams->version = htod32(ESCAN_REQ_VERSION); + eparams->action = htod16(action); + wl_escan_set_sync_id(eparams->sync_id, cfg); + wl_escan_set_type(cfg, WL_SCANTYPE_P2P); + CFGP2P_INFO(("SCAN CHANNELS : ")); + + for (i = 0; i < num_chans; i++) { + if (i == 0) CFGP2P_INFO(("%d", channels[i])); + else CFGP2P_INFO((",%d", channels[i])); + } + + CFGP2P_INFO(("\n")); + + ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan", + memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + WL_SCAN(("P2P_SEARCH sync ID: %d, bssidx: %d\n", eparams->sync_id, bssidx)); + if (ret == BCME_OK) + wl_set_p2p_status(cfg, SCANNING); + return ret; +} + +/* search function to reach at common channel to send action frame + * Parameters: + * @cfg : wl_private data + * @ndev : net device for bssidx + * @bssidx : bssidx for BSS + * Returns 0 if success. + */ +s32 +wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev, + s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr) +{ + s32 ret = 0; + u32 chan_cnt = 0; + u16 *default_chan_list = NULL; + p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL; + if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID) + return -EINVAL; + WL_TRACE_HW4((" Enter\n")); + if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY)) + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + if (channel) + chan_cnt = AF_PEER_SEARCH_CNT; + else + chan_cnt = SOCIAL_CHAN_CNT; + + if (cfg->afx_hdl->pending_tx_act_frm && cfg->afx_hdl->is_active) { + wl_action_frame_t *action_frame; + action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame); + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) { + chan_cnt = 1; + p2p_scan_purpose = P2P_SCAN_AFX_PEER_REDUCED; + } + } + + default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL); + if (default_chan_list == NULL) { + CFGP2P_ERR(("channel list allocation failed \n")); + ret = -ENOMEM; + goto exit; + } + if (channel) { + u32 i; + /* insert same channel to the chan_list */ + for (i = 0; i < chan_cnt; i++) { + default_chan_list[i] = channel; + } + } else { + default_chan_list[0] = SOCIAL_CHAN_1; + default_chan_list[1] = SOCIAL_CHAN_2; + default_chan_list[2] = SOCIAL_CHAN_3; + } + ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt, + default_chan_list, WL_P2P_DISC_ST_SEARCH, + WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose); + kfree(default_chan_list); +exit: + return ret; +} + +/* Check whether pointed-to IE looks like WPA. */ +#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE) +/* Check whether pointed-to IE looks like WPS. */ +#define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE) +/* Check whether the given IE looks like WFA P2P IE. */ +#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P) +/* Check whether the given IE looks like WFA WFDisplay IE. */ +#ifndef WFA_OUI_TYPE_WFD +#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */ +#endif +#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD) + + +/* Is any of the tlvs the expected entry? If + * not update the tlvs buffer pointer/length. + */ +static bool +wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type) +{ + /* If the contents match the OUI and the type */ + if (ie[TLV_LEN_OFF] >= oui_len + 1 && + !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) && + type == ie[TLV_BODY_OFF + oui_len]) { + return TRUE; + } + + if (tlvs == NULL) + return FALSE; + /* point to the next ie */ + ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + + return FALSE; +} + +wpa_ie_fixed_t * +wl_cfgp2p_find_wpaie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) { + return (wpa_ie_fixed_t *)ie; + } + } + return NULL; +} + +wpa_ie_fixed_t * +wl_cfgp2p_find_wpsie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) { + return (wpa_ie_fixed_t *)ie; + } + } + return NULL; +} + +wifi_p2p_ie_t * +wl_cfgp2p_find_p2pie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) { + return (wifi_p2p_ie_t *)ie; + } + } + return NULL; +} + +wifi_wfd_ie_t * +wl_cfgp2p_find_wfdie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) { + return (wifi_wfd_ie_t *)ie; + } + } + return NULL; +} +u32 +wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag, + s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd) +{ + vndr_ie_setbuf_t hdr; /* aligned temporary vndr_ie buffer header */ + s32 iecount; + u32 data_offset; + + /* Validate the pktflag parameter */ + if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG | + VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG | + VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) { + CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag)); + return -1; + } + + /* Copy the vndr_ie SET command ("add"/"del") to the buffer */ + strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1); + hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + /* Set the IE count - the buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32)); + + /* Copy packet flags that indicate which packets will contain this IE */ + pktflag = htod32(pktflag); + memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag, + sizeof(u32)); + + /* Add the IE ID to the buffer */ + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id; + + /* Add the IE length to the buffer */ + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = + (uint8) VNDR_IE_MIN_LEN + datalen; + + /* Add the IE OUI to the buffer */ + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[0] = oui[0]; + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[1] = oui[1]; + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[2] = oui[2]; + + /* Copy the aligned temporary vndr_ie buffer header to the IE buffer */ + memcpy(iebuf, &hdr, sizeof(hdr) - 1); + + /* Copy the IE data to the IE buffer */ + data_offset = + (u8*)&hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data[0] - + (u8*)&hdr; + memcpy(iebuf + data_offset, data, datalen); + return data_offset + datalen; + +} + +struct net_device * +wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + u32 i; + struct net_device *ndev = NULL; + if (bssidx < 0) { + CFGP2P_ERR((" bsscfg idx is invalid\n")); + goto exit; + } + + for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { + if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) { + ndev = wl_to_p2p_bss_ndev(cfg, i); + break; + } + } + +exit: + return ndev; +} +/* + * Search the driver array idx based on bssidx argument + * Parameters: Note that this idx is applicable only + * for primary and P2P interfaces. The virtual AP/STA is not + * covered here. + * @cfg : wl_private data + * @bssidx : bssidx which indicate bsscfg->idx of firmware. + * @type : output arg to store array idx of p2p->bss. + * Returns error + */ + +s32 +wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type) +{ + u32 i; + if (bssidx < 0 || type == NULL) { + CFGP2P_ERR((" argument is invalid\n")); + goto exit; + } + + for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { + if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) { + *type = i; + return BCME_OK; + } + } + +exit: + return BCME_BADARG; +} + +/* + * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE + */ +s32 +wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 ret = BCME_OK; + struct net_device *ndev = NULL; + + if (!cfg || !cfg->p2p || !cfgdev) + return BCME_ERROR; + + CFGP2P_DBG((" Enter\n")); + +#ifdef DHD_IFDEBUG + WL_ERR(("%s: cfg: %p, cfgdev: %p, cfg->wdev: %p, cfg->p2p_wdev: %p\n", + __FUNCTION__, cfg, cfgdev, cfg->wdev, cfg->p2p_wdev)); +#endif + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + wl_clr_p2p_status(cfg, DISC_IN_PROGRESS); + CFGP2P_ERR(("DISC_IN_PROGRESS cleared\n")); + if (ndev && (ndev->ieee80211_ptr != NULL)) { +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) { + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); + } else { + CFGP2P_ERR(("Invalid cfgdev. Dropping the" + "remain_on_channel_expired event.\n")); + } +#else + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + } +#endif /* P2P_LISTEN_OFFLOADING */ + + if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) { + wl_set_p2p_status(cfg, LISTEN_EXPIRED); + if (timer_pending(&cfg->p2p->listen_timer)) { + del_timer_sync(&cfg->p2p->listen_timer); + } + + if (cfg->afx_hdl->is_listen == TRUE && + wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + WL_DBG(("Listen DONE for action frame\n")); + complete(&cfg->act_frm_scan); + } +#ifdef WL_CFG80211_SYNC_GON + else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) { + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, ndev); + WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n", + jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies))); + + if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev); + + complete(&cfg->wait_next_af); + } +#endif /* WL_CFG80211_SYNC_GON */ + +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL)) { +#else + if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL) || + wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL)) { +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + WL_DBG(("Listen DONE for remain on channel expired\n")); + wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev); +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + if (ndev && (ndev->ieee80211_ptr != NULL)) { +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) { + /* + * To prevent kernel panic, + * if cfgdev->wiphy may be invalid, adding explicit check + */ + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); + } else { + CFGP2P_ERR(("Invalid cfgdev. Dropping the" + "remain_on_channel_expired event.\n")); + } +#else + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + } + if (wl_add_remove_eventmsg(bcmcfg_to_prmry_ndev(cfg), + WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) { + CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n")); + } + } else + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); + + return ret; + +} + +/* + * Timer expire callback function for LISTEN + * We can't report cfg80211_remain_on_channel_expired from Timer ISR context, + * so lets do it from thread context. + */ +void +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +wl_cfgp2p_listen_expired(struct timer_list *t) +#else +wl_cfgp2p_listen_expired(unsigned long data) +#endif +{ + wl_event_msg_t msg; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + struct p2p_info *ip2p; + struct bcm_cfg80211 *cfg; + + ip2p = from_timer(ip2p, t, listen_timer); + cfg = ip2p->bcm_cfg; +#else + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data; +#endif + CFGP2P_DBG((" Enter\n")); + bzero(&msg, sizeof(wl_event_msg_t)); + msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE); + msg.bsscfgidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); +#if defined(WL_ENABLE_P2P_IF) + wl_cfg80211_event(cfg->p2p_net ? cfg->p2p_net : + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, NULL); +#else + wl_cfg80211_event(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, + NULL); +#endif /* WL_ENABLE_P2P_IF */ +} +/* + * Routine for cancelling the P2P LISTEN + */ +static s32 +wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev, + struct wireless_dev *wdev, bool notify) +{ + WL_DBG(("Enter \n")); + /* Irrespective of whether timer is running or not, reset + * the LISTEN state. + */ + if (timer_pending(&cfg->p2p->listen_timer)) { + del_timer_sync(&cfg->p2p->listen_timer); + if (notify) { +#if defined(WL_CFG80211_P2P_DEV_IF) + if (wdev) + cfg80211_remain_on_channel_expired(wdev, cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); +#else + if (ndev && ndev->ieee80211_ptr) + cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id, + &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + } + return 0; +} +/* + * Do a P2P Listen on the given channel for the given duration. + * A listen consists of sitting idle and responding to P2P probe requests + * with a P2P probe response. + * + * This fn assumes dongle p2p device discovery is already enabled. + * Parameters : + * @cfg : wl_private data + * @channel : channel to listen + * @duration_ms : the time (milli seconds) to wait + */ +s32 +wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms) +{ +#define EXTRA_DELAY_TIME 100 + s32 ret = BCME_OK; + struct timer_list *_timer; + s32 extra_delay; + struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg); + + CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms)); + if (unlikely(wl_get_p2p_status(cfg, DISCOVERY_ON) == 0)) { + + CFGP2P_ERR((" Discovery is not set, so we have noting to do\n")); + + ret = BCME_NOTREADY; + goto exit; + } + if (timer_pending(&cfg->p2p->listen_timer)) { + CFGP2P_DBG(("previous LISTEN is not completed yet\n")); + goto exit; + + } +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + else + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); +#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) { + CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n")); + } + + ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + _timer = &cfg->p2p->listen_timer; + + /* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle , + * otherwise we will wait up to duration_ms + 100ms + duration / 10 + */ + if (ret == BCME_OK) { + extra_delay = EXTRA_DELAY_TIME + (duration_ms / 10); + } else { + /* if failed to set listen, it doesn't need to wait whole duration. */ + duration_ms = 100 + duration_ms / 20; + extra_delay = 0; + } + + cfg->p2p->bcm_cfg = cfg; + INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay); +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + +#undef EXTRA_DELAY_TIME +exit: + return ret; +} + + +s32 +wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable) +{ + s32 ret = BCME_OK; + CFGP2P_DBG((" Enter\n")); + if (!wl_get_p2p_status(cfg, DISCOVERY_ON)) { + + CFGP2P_DBG((" do nothing, discovery is off\n")); + return ret; + } + if (wl_get_p2p_status(cfg, SEARCH_ENABLED) == enable) { + CFGP2P_DBG(("already : %d\n", enable)); + return ret; + } + + wl_chg_p2p_status(cfg, SEARCH_ENABLED); + /* When disabling Search, reset the WL driver's p2p discovery state to + * WL_P2P_DISC_ST_SCAN. + */ + if (!enable) { + wl_clr_p2p_status(cfg, SCANNING); + ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + } + + return ret; +} + +/* + * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE + */ +s32 +wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 ret = BCME_OK; + u32 event_type = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + struct net_device *ndev = NULL; + CFGP2P_DBG((" Enter\n")); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) { + if (event_type == WLC_E_ACTION_FRAME_COMPLETE) { + + CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status)); + if (status == WLC_E_STATUS_SUCCESS) { + wl_set_p2p_status(cfg, ACTION_TX_COMPLETED); + CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n")); + if (!cfg->need_wait_afrx && cfg->af_sent_channel) { + CFGP2P_DBG(("no need to wait next AF.\n")); + wl_stop_wait_next_action_frame(cfg, ndev); + } + } + else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) { + wl_set_p2p_status(cfg, ACTION_TX_NOACK); + CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n")); + wl_stop_wait_next_action_frame(cfg, ndev); + } + } else { + CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received," + "status : %d\n", status)); + + if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) + complete(&cfg->send_af_done); + } + } + return ret; +} +/* Send an action frame immediately without doing channel synchronization. + * + * This function does not wait for a completion event before returning. + * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action + * frame is transmitted. + * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an + * 802.11 ack has been received for the sent action frame. + */ +s32 +wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev, + wl_af_params_t *af_params, s32 bssidx) +{ + s32 ret = BCME_OK; + s32 evt_ret = BCME_OK; + s32 timeout = 0; + wl_eventmsg_buf_t buf; + + + CFGP2P_INFO(("\n")); + CFGP2P_INFO(("channel : %u , dwell time : %u\n", + af_params->channel, af_params->dwell_time)); + + wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED); + wl_clr_p2p_status(cfg, ACTION_TX_NOACK); + + bzero(&buf, sizeof(wl_eventmsg_buf_t)); + wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true); + wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true); + if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) + return evt_ret; + + cfg->af_sent_channel = af_params->channel; +#ifdef WL_CFG80211_SYNC_GON + cfg->af_tx_sent_jiffies = jiffies; +#endif /* WL_CFG80211_SYNC_GON */ + + ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (ret < 0) { + CFGP2P_ERR((" sending action frame is failed\n")); + goto exit; + } + + timeout = wait_for_completion_timeout(&cfg->send_af_done, + msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX)); + + if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) { + CFGP2P_INFO(("tx action frame operation is completed\n")); + ret = BCME_OK; + } else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) { + CFGP2P_INFO(("bcast tx action frame operation is completed\n")); + ret = BCME_OK; + } else { + ret = BCME_ERROR; + CFGP2P_INFO(("tx action frame operation is failed\n")); + } + /* clear status bit for action tx */ + wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED); + wl_clr_p2p_status(cfg, ACTION_TX_NOACK); + +exit: + CFGP2P_INFO((" via act frame iovar : status = %d\n", ret)); + + bzero(&buf, sizeof(wl_eventmsg_buf_t)); + wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false); + wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false); + if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) { + WL_ERR(("TX frame events revert back failed \n")); + return evt_ret; + } + + return ret; +} + +/* Generate our P2P Device Address and P2P Interface Address from our primary + * MAC address. + */ +void +wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr) +{ + struct ether_addr *mac_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE); + struct ether_addr *int_addr; + + memcpy(mac_addr, primary_addr, sizeof(struct ether_addr)); + mac_addr->octet[0] |= 0x02; + WL_DBG(("P2P Discovery address:"MACDBG "\n", MAC2STRDBG(mac_addr->octet))); + + int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION1); + memcpy(int_addr, mac_addr, sizeof(struct ether_addr)); + int_addr->octet[4] ^= 0x80; + WL_DBG(("Primary P2P Interface address:"MACDBG "\n", MAC2STRDBG(int_addr->octet))); + + int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION2); + memcpy(int_addr, mac_addr, sizeof(struct ether_addr)); + int_addr->octet[4] ^= 0x90; +} + +/* P2P IF Address change to Virtual Interface MAC Address */ +void +wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id) +{ + wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf; + u16 len = ie->len; + u8 *subel; + u8 subelt_id; + u16 subelt_len; + CFGP2P_DBG((" Enter\n")); + + /* Point subel to the P2P IE's subelt field. + * Subtract the preceding fields (id, len, OUI, oui_type) from the length. + */ + subel = ie->subelts; + len -= 4; /* exclude OUI + OUI_TYPE */ + + while (len >= 3) { + /* attribute id */ + subelt_id = *subel; + subel += 1; + len -= 1; + + /* 2-byte little endian */ + subelt_len = *subel++; + subelt_len |= *subel++ << 8; + + len -= 2; + len -= subelt_len; /* for the remaining subelt fields */ + + if (subelt_id == element_id) { + if (subelt_id == P2P_SEID_INTINTADDR) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_DEV_ID) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Device ID ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_DEV_INFO) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Device INFO ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_GROUP_ID) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("GROUP ID ATTR FOUND\n")); + } return; + } else { + CFGP2P_DBG(("OTHER id : %d\n", subelt_id)); + } + subel += subelt_len; + } +} +/* + * Check if a BSS is up. + * This is a common implementation called by most OSL implementations of + * p2posl_bss_isup(). DO NOT call this function directly from the + * common code -- call p2posl_bss_isup() instead to allow the OSL to + * override the common implementation if necessary. + */ +bool +wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx) +{ + s32 result, val; + bool isup = false; + s8 getbuf[64]; + + /* Check if the BSS is up */ + *(int*)getbuf = -1; + result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx, + sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL); + if (result != 0) { + CFGP2P_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result)); + CFGP2P_ERR(("NOTE: this ioctl error is normal " + "when the BSS has not been created yet.\n")); + } else { + val = *(int*)getbuf; + val = dtoh32(val); + CFGP2P_INFO(("---cfg bss -C %d ==> %d\n", bsscfg_idx, val)); + isup = (val ? TRUE : FALSE); + } + return isup; +} + + +/* Bring up or down a BSS */ +s32 +wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 is_up) +{ + s32 ret = BCME_OK; + s32 val = is_up ? 1 : 0; + + struct { + s32 cfg; + s32 val; + } bss_setbuf; + + bss_setbuf.cfg = htod32(bsscfg_idx); + bss_setbuf.val = htod32(val); + CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, is_up ? "up" : "down")); + ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + + if (ret != 0) { + CFGP2P_ERR(("'bss %d' failed with %d\n", is_up, ret)); + } + + return ret; +} + +/* Check if 'p2p' is supported in the driver */ +s32 +wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + s32 ret = BCME_OK; + s32 p2p_supported = 0; + ret = wldev_iovar_getint(ndev, "p2p", + &p2p_supported); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + CFGP2P_INFO(("p2p is unsupported\n")); + return 0; + } else { + CFGP2P_ERR(("cfg p2p error %d\n", ret)); + return ret; + } + } + if (p2p_supported == 1) { + CFGP2P_INFO(("p2p is supported\n")); + } else { + CFGP2P_INFO(("p2p is unsupported\n")); + p2p_supported = 0; + } + return p2p_supported; +} +/* Cleanup P2P resources */ +s32 +wl_cfgp2p_down(struct bcm_cfg80211 *cfg) +{ + struct net_device *ndev = NULL; + struct wireless_dev *wdev = NULL; + s32 i = 0, index = -1; + +#if defined(WL_CFG80211_P2P_DEV_IF) + ndev = bcmcfg_to_prmry_ndev(cfg); + wdev = bcmcfg_to_p2p_wdev(cfg); +#elif defined(WL_ENABLE_P2P_IF) + ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg); + wdev = ndev_to_wdev(ndev); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE); + wl_cfgp2p_disable_discovery(cfg); + +#if defined(WL_CFG80211_P2P_DEV_IF) && !defined(KEEP_WIFION_OPTION) + if (cfg->p2p_wdev) { + /* If p2p wdev is left out, clean it up */ + WL_ERR(("Clean up the p2p discovery IF\n")); + wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); + } +#endif /* WL_CFG80211_P2P_DEV_IF !defined(KEEP_WIFION_OPTION) */ + + for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { + index = wl_to_p2p_bss_bssidx(cfg, i); + if (index != WL_INVALID) + wl_cfg80211_clear_per_bss_ies(cfg, index); + } + wl_cfgp2p_deinit_priv(cfg); + return 0; +} + +int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg) +{ + if (cfg->p2p && ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) || + (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1))) + return true; + else + return false; + +} + +s32 +wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len) +{ + s32 ret = -1; + int count, start, duration; + wl_p2p_sched_t dongle_noa; + s32 bssidx, type; + int iovar_len = sizeof(dongle_noa); + CFGP2P_DBG((" Enter\n")); + + memset(&dongle_noa, 0, sizeof(dongle_noa)); + + if (wl_cfgp2p_vif_created(cfg)) { + cfg->p2p->noa.desc[0].start = 0; + + sscanf(buf, "%10d %10d %10d", &count, &start, &duration); + CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n", + count, start, duration)); + if (count != -1) + cfg->p2p->noa.desc[0].count = count; + + /* supplicant gives interval as start */ + if (start != -1) + cfg->p2p->noa.desc[0].interval = start; + + if (duration != -1) + cfg->p2p->noa.desc[0].duration = duration; + + if (cfg->p2p->noa.desc[0].count != 255 && cfg->p2p->noa.desc[0].count != 0) { + cfg->p2p->noa.desc[0].start = 200; + dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS; + dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF; + dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS; + } + else if (cfg->p2p->noa.desc[0].count == 0) { + cfg->p2p->noa.desc[0].start = 0; + dongle_noa.type = WL_P2P_SCHED_TYPE_ABS; + dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL; + dongle_noa.action = WL_P2P_SCHED_ACTION_RESET; + } + else { + /* Continuous NoA interval. */ + dongle_noa.action = WL_P2P_SCHED_ACTION_DOZE; + dongle_noa.type = WL_P2P_SCHED_TYPE_ABS; + if ((cfg->p2p->noa.desc[0].interval == 102) || + (cfg->p2p->noa.desc[0].interval == 100)) { + cfg->p2p->noa.desc[0].start = 100 - + cfg->p2p->noa.desc[0].duration; + dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT; + } + else { + dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL; + } + } + /* Put the noa descriptor in dongle format for dongle */ + dongle_noa.desc[0].count = htod32(cfg->p2p->noa.desc[0].count); + if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) { + dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start); + dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration); + } + else { + dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start*1000); + dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000); + } + dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000); + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) + return BCME_ERROR; + + if (dongle_noa.action == WL_P2P_SCHED_ACTION_RESET) { + iovar_len -= sizeof(wl_p2p_sched_desc_t); + } + + ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, type), + "p2p_noa", &dongle_noa, iovar_len, cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + + if (ret < 0) { + CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret)); + } + } + else { + CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n")); + } + return ret; +} +s32 +wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int buf_len) +{ + + wifi_p2p_noa_desc_t *noa_desc; + int len = 0, i; + char _buf[200]; + + CFGP2P_DBG((" Enter\n")); + buf[0] = '\0'; + if (wl_cfgp2p_vif_created(cfg)) { + if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) { + _buf[0] = 1; /* noa index */ + _buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) | + (cfg->p2p->ops.ctw & 0x7f); /* ops + ctw */ + len += 2; + if (cfg->p2p->noa.desc[0].count) { + noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len]; + noa_desc->cnt_type = cfg->p2p->noa.desc[0].count; + noa_desc->duration = cfg->p2p->noa.desc[0].duration; + noa_desc->interval = cfg->p2p->noa.desc[0].interval; + noa_desc->start = cfg->p2p->noa.desc[0].start; + len += sizeof(wifi_p2p_noa_desc_t); + } + if (buf_len <= len * 2) { + CFGP2P_ERR(("ERROR: buf_len %d in not enough for" + "returning noa in string format\n", buf_len)); + return -1; + } + /* We have to convert the buffer data into ASCII strings */ + for (i = 0; i < len; i++) { + snprintf(buf, 3, "%02x", _buf[i]); + buf += 2; + } + buf[i*2] = '\0'; + } + } + else { + CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n")); + return -1; + } + return len * 2; +} +s32 +wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len) +{ + int ps, ctw; + int ret = -1; + s32 legacy_ps; + s32 conn_idx; + s32 bssidx; + struct net_device *dev; + + CFGP2P_DBG((" Enter\n")); + if (wl_cfgp2p_vif_created(cfg)) { + sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw); + CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw)); + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) + return BCME_ERROR; + dev = wl_to_p2p_bss_ndev(cfg, conn_idx); + if (ctw != -1) { + cfg->p2p->ops.ctw = ctw; + ret = 0; + } + if (ps != -1) { + cfg->p2p->ops.ops = ps; + ret = wldev_iovar_setbuf(dev, + "p2p_ops", &cfg->p2p->ops, sizeof(cfg->p2p->ops), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret < 0) { + CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret)); + } + } + + if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) { + ret = wldev_ioctl(dev, + WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true); + if (unlikely(ret)) + CFGP2P_ERR(("error (%d)\n", ret)); + wl_cfg80211_update_power_mode(dev); + } + else + CFGP2P_ERR(("ilegal setting\n")); + } + else { + CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n")); + ret = -1; + } + return ret; +} + +s32 +wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len) +{ + int ch, bw; + s32 conn_idx; + s32 bssidx; + struct net_device *dev; + char smbuf[WLC_IOCTL_SMLEN]; + wl_chan_switch_t csa_arg; + u32 chnsp = 0; + int err = 0; + + CFGP2P_DBG((" Enter\n")); + if (wl_cfgp2p_vif_created(cfg)) { + sscanf(buf, "%10d %10d", &ch, &bw); + CFGP2P_DBG(("Enter ch %d bw %d\n", ch, bw)); + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) { + return BCME_ERROR; + } + dev = wl_to_p2p_bss_ndev(cfg, conn_idx); + if (ch <= 0 || bw <= 0) { + CFGP2P_ERR(("Negative value not permitted!\n")); + return BCME_ERROR; + } + + csa_arg.mode = DOT11_CSA_MODE_ADVISORY; + csa_arg.count = P2P_ECSA_CNT; + csa_arg.reg = 0; + + sprintf(buf, "%d/%d", ch, bw); + chnsp = wf_chspec_aton(buf); + if (chnsp == 0) { + CFGP2P_ERR(("%s:chsp is not correct\n", __FUNCTION__)); + return BCME_ERROR; + } + chnsp = wl_chspec_host_to_driver(chnsp); + csa_arg.chspec = chnsp; + + err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg), + smbuf, sizeof(smbuf), NULL); + if (err) { + CFGP2P_ERR(("%s:set p2p_ecsa failed:%d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + } else { + CFGP2P_ERR(("ERROR: set_p2p_ecsa in non-p2p mode\n")); + return BCME_ERROR; + } + return BCME_OK; +} + +u8 * +wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id) +{ + wifi_p2p_ie_t *ie = NULL; + u16 len = 0; + u8 *subel; + u8 subelt_id; + u16 subelt_len; + + if (!buf) { + WL_ERR(("P2P IE not present")); + return 0; + } + + ie = (wifi_p2p_ie_t*) buf; + len = ie->len; + + /* Point subel to the P2P IE's subelt field. + * Subtract the preceding fields (id, len, OUI, oui_type) from the length. + */ + subel = ie->subelts; + len -= 4; /* exclude OUI + OUI_TYPE */ + + while (len >= 3) { + /* attribute id */ + subelt_id = *subel; + subel += 1; + len -= 1; + + /* 2-byte little endian */ + subelt_len = *subel++; + subelt_len |= *subel++ << 8; + + len -= 2; + len -= subelt_len; /* for the remaining subelt fields */ + + if (subelt_id == element_id) { + /* This will point to start of subelement attrib after + * attribute id & len + */ + return subel; + } + + /* Go to next subelement */ + subel += subelt_len; + } + + /* Not Found */ + return NULL; +} + +#define P2P_GROUP_CAPAB_GO_BIT 0x01 + +u8* +wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib) +{ + bcm_tlv_t *ie; + u8* pAttrib; + + CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len)); + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) { + /* Have the P2p ie. Now check for attribute */ + if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) { + CFGP2P_INFO(("P2P attribute %d was found at parse %p", + attrib, parse)); + return pAttrib; + } + else { + parse += (ie->len + TLV_HDR_LEN); + len -= (ie->len + TLV_HDR_LEN); + CFGP2P_INFO(("P2P Attribute %d not found Moving parse" + " to %p len to %d", attrib, parse, len)); + } + } + else { + /* It was not p2p IE. parse will get updated automatically to next TLV */ + CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len)); + } + } + CFGP2P_ERR(("P2P attribute %d was NOT found", attrib)); + return NULL; +} + +u8 * +wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length) +{ + u8 *capability = NULL; + bool p2p_go = 0; + u8 *ptr = NULL; + + if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset, + bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) { + WL_ERR(("P2P Capability attribute not found")); + return NULL; + } + + /* Check Group capability for Group Owner bit */ + p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT; + if (!p2p_go) { + return bi->BSSID.octet; + } + + /* In probe responses, DEVICE INFO attribute will be present */ + if (!(ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset, + bi->ie_length, P2P_SEID_DEV_INFO))) { + /* If DEVICE_INFO is not found, this might be a beacon frame. + * check for DEVICE_ID in the beacon frame. + */ + ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset, + bi->ie_length, P2P_SEID_DEV_ID); + } + + if (!ptr) + WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE ")); + + return ptr; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) +static void +wl_cfgp2p_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + snprintf(info->driver, sizeof(info->driver), "p2p"); + snprintf(info->version, sizeof(info->version), "%lu", (unsigned long)(0)); +} + +struct ethtool_ops cfgp2p_ethtool_ops = { + .get_drvinfo = wl_cfgp2p_ethtool_get_drvinfo +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + +#if defined(WL_ENABLE_P2P_IF) +s32 +wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg) +{ + int ret = 0; + struct net_device* net = NULL; + struct wireless_dev *wdev = NULL; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 }; + + if (cfg->p2p_net) { + CFGP2P_ERR(("p2p_net defined already.\n")); + return -EINVAL; + } + + /* Allocate etherdev, including space for private structure */ + if (!(net = alloc_etherdev(sizeof(struct bcm_cfg80211 *)))) { + CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); + return -ENODEV; + } + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (unlikely(!wdev)) { + WL_ERR(("Could not allocate wireless device\n")); + free_netdev(net); + return -ENOMEM; + } + + strncpy(net->name, "p2p%d", sizeof(net->name) - 1); + net->name[IFNAMSIZ - 1] = '\0'; + + /* Copy the reference to bcm_cfg80211 */ + memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *)); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + ASSERT(!net->open); + net->do_ioctl = wl_cfgp2p_do_ioctl; + net->hard_start_xmit = wl_cfgp2p_start_xmit; + net->open = wl_cfgp2p_if_open; + net->stop = wl_cfgp2p_if_stop; +#else + ASSERT(!net->netdev_ops); + net->netdev_ops = &wl_cfgp2p_if_ops; +#endif + + /* Register with a dummy MAC addr */ + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + wdev->wiphy = cfg->wdev->wiphy; + + wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS); + + net->ieee80211_ptr = wdev; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + net->ethtool_ops = &cfgp2p_ethtool_ops; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + + SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy)); + + /* Associate p2p0 network interface with new wdev */ + wdev->netdev = net; + + ret = register_netdev(net); + if (ret) { + CFGP2P_ERR((" register_netdevice failed (%d)\n", ret)); + free_netdev(net); + kfree(wdev); + return -ENODEV; + } + + /* store p2p net ptr for further reference. Note that iflist won't have this + * entry as there corresponding firmware interface is a "Hidden" interface. + */ + cfg->p2p_wdev = wdev; + cfg->p2p_net = net; + + printk("%s: P2P Interface Registered\n", net->name); + + return ret; +} + +s32 +wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg) +{ + + if (!cfg || !cfg->p2p_net) { + CFGP2P_ERR(("Invalid Ptr\n")); + return -EINVAL; + } + + unregister_netdev(cfg->p2p_net); + free_netdev(cfg->p2p_net); + + return 0; +} +static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + + if (skb) + { + CFGP2P_DBG(("(%s) is not used for data operations.Droping the packet.\n", + ndev->name)); + dev_kfree_skb_any(skb); + } + + return 0; +} + +static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd) +{ + int ret = 0; + struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + + /* There is no ifidx corresponding to p2p0 in our firmware. So we should + * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs. + * For Android PRIV CMD handling map it to primary I/F + */ + if (cmd == SIOCDEVPRIVATE+1) { + ret = wl_android_priv_cmd(ndev, ifr, cmd); + + } else { + CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n", + __FUNCTION__, cmd)); + return -1; + } + + return ret; +} +#endif + +#if defined(WL_ENABLE_P2P_IF) +static int wl_cfgp2p_if_open(struct net_device *net) +{ + struct wireless_dev *wdev = net->ieee80211_ptr; + + if (!wdev || !wl_cfg80211_is_p2p_active()) + return -EINVAL; + WL_TRACE(("Enter\n")); +#if !defined(WL_IFACE_COMB_NUM_CHANNELS) + /* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now, + * do it here. This will make sure that in concurrent mode, supplicant + * is not dependent on a particular order of interface initialization. + * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N + * -iwlan0. + */ + wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT) + | BIT(NL80211_IFTYPE_P2P_GO)); +#endif /* !WL_IFACE_COMB_NUM_CHANNELS */ + wl_cfg80211_do_driver_init(net); + + return 0; +} + +static int wl_cfgp2p_if_stop(struct net_device *net) +{ + struct wireless_dev *wdev = net->ieee80211_ptr; + + if (!wdev) + return -EINVAL; + + wl_cfg80211_scan_stop(net); + +#if !defined(WL_IFACE_COMB_NUM_CHANNELS) + wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes) + & (~(BIT(NL80211_IFTYPE_P2P_CLIENT)| + BIT(NL80211_IFTYPE_P2P_GO))); +#endif /* !WL_IFACE_COMB_NUM_CHANNELS */ + return 0; +} + +bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops) +{ + return (if_ops == &wl_cfgp2p_if_ops); +} +#endif /* WL_ENABLE_P2P_IF */ + +#if defined(WL_CFG80211_P2P_DEV_IF) +struct wireless_dev * +wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg) +{ + struct wireless_dev *wdev = NULL; + struct ether_addr primary_mac; + + if (!cfg || !cfg->p2p_supported) + return ERR_PTR(-EINVAL); + + WL_TRACE(("Enter\n")); + + if (cfg->p2p_wdev) { +#ifndef EXPLICIT_DISCIF_CLEANUP + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* EXPLICIT_DISCIF_CLEANUP */ + /* + * This is not expected. This can happen due to + * supplicant crash/unclean de-initialization which + * didn't free the p2p discovery interface. Indicate + * driver hang to user space so that the framework + * can rei-init the Wi-Fi. + */ + CFGP2P_ERR(("p2p_wdev defined already.\n")); + wl_probe_wdev_all(cfg); +#ifdef EXPLICIT_DISCIF_CLEANUP + /* + * CUSTOMER_HW4 design doesn't delete the p2p discovery + * interface on ifconfig wlan0 down context which comes + * without a preceeding NL80211_CMD_DEL_INTERFACE for p2p + * discovery. But during supplicant crash the DEL_IFACE + * command will not happen and will cause a left over iface + * even after ifconfig wlan0 down. So delete the iface + * first and then indicate the HANG event + */ + wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); +#else + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; + net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg)); + return ERR_PTR(-ENODEV); +#endif /* EXPLICIT_DISCIF_CLEANUP */ + } + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (unlikely(!wdev)) { + WL_ERR(("Could not allocate wireless device\n")); + return ERR_PTR(-ENOMEM); + } + + memset(&primary_mac, 0, sizeof(primary_mac)); + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + + wdev->wiphy = cfg->wdev->wiphy; + wdev->iftype = NL80211_IFTYPE_P2P_DEVICE; + memcpy(wdev->address, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE), ETHER_ADDR_LEN); + + + /* store p2p wdev ptr for further reference. */ + cfg->p2p_wdev = wdev; + + CFGP2P_ERR(("P2P interface registered\n")); +#ifdef DHD_IFDEBUG + WL_ERR(("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev)); +#endif + return wdev; +} + +int +wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + int ret = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + if (!cfg) + return -EINVAL; + + WL_TRACE(("Enter\n")); + + ret = wl_cfgp2p_set_firm_p2p(cfg); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret)); + goto exit; + } + + ret = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret)); + goto exit; + } + + p2p_on(cfg) = true; +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti = false; +#endif + + CFGP2P_DBG(("P2P interface started\n")); + +exit: + return ret; +} + +void +wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + int ret = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + if (!cfg) + return; + + CFGP2P_DBG(("Enter\n")); + + ret = wl_cfg80211_scan_stop(wdev); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret)); + } + + if (!cfg->p2p) + return; + + ret = wl_cfgp2p_disable_discovery(cfg); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret)); + } + + p2p_on(cfg) = false; + + CFGP2P_DBG(("Exit. P2P interface stopped\n")); + + return; +} + +int +wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg) +{ + bool rollback_lock = false; + + if (!wdev) + return -EINVAL; + + WL_TRACE(("Enter\n")); +#ifdef DHD_IFDEBUG + WL_ERR(("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev)); +#endif + + if (!rtnl_is_locked()) { + rtnl_lock(); + rollback_lock = true; + } + + cfg80211_unregister_wdev(wdev); + + if (rollback_lock) + rtnl_unlock(); + + synchronize_rcu(); + + kfree(wdev); + + if (cfg) + cfg->p2p_wdev = NULL; + + CFGP2P_ERR(("P2P interface unregistered\n")); + + return 0; +} +#endif /* WL_CFG80211_P2P_DEV_IF */ + +void +wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + int status = 0; + + if (!frame || (frame_len < (sizeof(*pact_frm) + WL_P2P_AF_STATUS_OFFSET - 1))) { + return; + } + + if (wl_cfgp2p_is_pub_action(frame, frame_len)) { + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + if (pact_frm->subtype == P2P_PAF_GON_RSP && tx) { + CFGP2P_ACTION(("Check TX P2P Group Owner Negotiation Rsp Frame status\n")); + status = pact_frm->elts[WL_P2P_AF_STATUS_OFFSET]; + if (status) { + cfg->need_wait_afrx = false; + return; + } + } + } + + cfg->need_wait_afrx = true; + return; +} + +int +wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request) +{ + if (request && (request->n_ssids == 1) && + (request->n_channels == 1) && + IS_P2P_SSID(request->ssids[0].ssid, WL_P2P_WILDCARD_SSID_LEN) && + (request->ssids[0].ssid_len > WL_P2P_WILDCARD_SSID_LEN)) { + return true; + } + return false; +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h new file mode 100644 index 000000000000..aab1061b43a7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h @@ -0,0 +1,458 @@ +/* + * Linux cfgp2p driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgp2p.h 608203 2015-12-24 05:30:44Z $ + */ +#ifndef _wl_cfgp2p_h_ +#define _wl_cfgp2p_h_ +#include +#include + +struct bcm_cfg80211; +extern u32 wl_dbg_level; + +typedef struct wifi_p2p_ie wifi_wfd_ie_t; +/* Enumeration of the usages of the BSSCFGs used by the P2P Library. Do not + * confuse this with a bsscfg index. This value is an index into the + * saved_ie[] array of structures which in turn contains a bsscfg index field. + */ +typedef enum { + P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */ + P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */ + P2PAPI_BSSCFG_CONNECTION1, /* maps to driver's P2P connection bsscfg */ + P2PAPI_BSSCFG_CONNECTION2, + P2PAPI_BSSCFG_MAX +} p2p_bsscfg_type_t; + +typedef enum { + P2P_SCAN_PURPOSE_MIN, + P2P_SCAN_SOCIAL_CHANNEL, /* scan for social channel */ + P2P_SCAN_AFX_PEER_NORMAL, /* scan for action frame search */ + P2P_SCAN_AFX_PEER_REDUCED, /* scan for action frame search with short time */ + P2P_SCAN_DURING_CONNECTED, /* scan during connected status */ + P2P_SCAN_CONNECT_TRY, /* scan for connecting */ + P2P_SCAN_NORMAL, /* scan during not-connected status */ + P2P_SCAN_PURPOSE_MAX +} p2p_scan_purpose_t; + +/* vendor ies max buffer length for probe response or beacon */ +#define VNDR_IES_MAX_BUF_LEN 1400 +/* normal vendor ies buffer length */ +#define VNDR_IES_BUF_LEN 512 + +struct p2p_bss { + s32 bssidx; + struct net_device *dev; + void *private_data; + struct ether_addr mac_addr; +}; + +struct p2p_info { + bool on; /* p2p on/off switch */ + bool scan; + int16 search_state; + s8 vir_ifname[IFNAMSIZ]; + unsigned long status; + struct p2p_bss bss[P2PAPI_BSSCFG_MAX]; + struct timer_list listen_timer; + struct bcm_cfg80211 *bcm_cfg; + wl_p2p_sched_t noa; + wl_p2p_ops_t ops; + wlc_ssid_t ssid; + s8 p2p_go_count; +}; + +#define MAX_VNDR_IE_NUMBER 10 + +struct parsed_vndr_ie_info { + char *ie_ptr; + u32 ie_len; /* total length including id & length field */ + vndr_ie_t vndrie; +}; + +struct parsed_vndr_ies { + u32 count; + struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER]; +}; + +/* dongle status */ +enum wl_cfgp2p_status { + WLP2P_STATUS_DISCOVERY_ON = 0, + WLP2P_STATUS_SEARCH_ENABLED, + WLP2P_STATUS_IF_ADDING, + WLP2P_STATUS_IF_DELETING, + WLP2P_STATUS_IF_CHANGING, + WLP2P_STATUS_IF_CHANGED, + WLP2P_STATUS_LISTEN_EXPIRED, + WLP2P_STATUS_ACTION_TX_COMPLETED, + WLP2P_STATUS_ACTION_TX_NOACK, + WLP2P_STATUS_SCANNING, + WLP2P_STATUS_GO_NEG_PHASE, + WLP2P_STATUS_DISC_IN_PROGRESS +}; + + +#define wl_to_p2p_bss_ndev(cfg, type) ((cfg)->p2p->bss[type].dev) +#define wl_to_p2p_bss_bssidx(cfg, type) ((cfg)->p2p->bss[type].bssidx) +#define wl_to_p2p_bss_macaddr(cfg, type) &((cfg)->p2p->bss[type].mac_addr) +#define wl_to_p2p_bss_saved_ie(cfg, type) ((cfg)->p2p->bss[type].saved_ie) +#define wl_to_p2p_bss_private(cfg, type) ((cfg)->p2p->bss[type].private_data) +#define wl_to_p2p_bss(cfg, type) ((cfg)->p2p->bss[type]) +#define wl_get_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \ + test_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status)) +#define wl_set_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \ + set_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status)) +#define wl_clr_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \ + clear_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status)) +#define wl_chg_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \ + change_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status)) +#define p2p_on(cfg) ((cfg)->p2p->on) +#define p2p_scan(cfg) ((cfg)->p2p->scan) +#define p2p_is_on(cfg) ((cfg)->p2p && (cfg)->p2p->on) + +/* dword align allocation */ +#define WLC_IOCTL_MAXLEN 8192 + +#ifdef CUSTOMER_HW4_DEBUG +#define CFGP2P_ERROR_TEXT "CFGP2P-INFO2) " +#else +#define CFGP2P_ERROR_TEXT "CFGP2P-ERROR) " +#endif /* CUSTOMER_HW4_DEBUG */ + +#ifdef DHD_LOG_DUMP +#define CFGP2P_ERR(args) \ + do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + dhd_log_dump_print("[%s] %s: ", \ + dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ + } while (0) +#else +#define CFGP2P_ERR(args) \ + do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + } \ + } while (0) +#endif /* DHD_LOG_DUMP */ +#define CFGP2P_INFO(args) \ + do { \ + if (wl_dbg_level & WL_DBG_INFO) { \ + printk(KERN_INFO "CFGP2P-INFO) %s : ", __func__); \ + printk args; \ + } \ + } while (0) +#define CFGP2P_DBG(args) \ + do { \ + if (wl_dbg_level & WL_DBG_DBG) { \ + printk(KERN_DEBUG "CFGP2P-DEBUG) %s :", __func__); \ + printk args; \ + } \ + } while (0) + +#define CFGP2P_ACTION(args) \ + do { \ + if (wl_dbg_level & WL_DBG_P2P_ACTION) { \ + printk(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__); \ + printk args; \ + } \ + } while (0) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +#define INIT_TIMER(timer, func, duration, extra_delay) \ + do { \ + timer_setup(timer, func, duration + extra_delay); \ + add_timer(timer); \ + } while (0); +#else +#define INIT_TIMER(timer, func, duration, extra_delay) \ + do { \ + init_timer(timer); \ + timer->function = func; \ + timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \ + timer->data = (unsigned long) cfg; \ + add_timer(timer); \ + } while (0); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF) +#define WL_CFG80211_P2P_DEV_IF + +#ifdef WL_ENABLE_P2P_IF +#undef WL_ENABLE_P2P_IF +#endif + +#ifdef WL_SUPPORT_BACKPORTED_KPATCHES +#undef WL_SUPPORT_BACKPORTED_KPATCHES +#endif +#else +#ifdef WLP2P +#ifndef WL_ENABLE_P2P_IF +/* Enable P2P network Interface if P2P support is enabled */ +#define WL_ENABLE_P2P_IF +#endif /* WL_ENABLE_P2P_IF */ +#endif /* WLP2P */ +#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */ + +#ifndef WL_CFG80211_P2P_DEV_IF +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))) +#error Disable 'WL_ENABLE_P2P_IF', if 'WL_CFG80211_P2P_DEV_IF' is enabled \ + or kernel version is 3.8.0 or above +#endif /* WL_ENABLE_P2P_IF && (WL_CFG80211_P2P_DEV_IF || (LINUX_VERSION >= VERSION(3, 8, 0))) */ + +#if !defined(WLP2P) && (defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)) +#error WLP2P not defined +#endif /* !WLP2P && (WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF) */ + +#if defined(WL_CFG80211_P2P_DEV_IF) +#define bcm_struct_cfgdev struct wireless_dev +#else +#define bcm_struct_cfgdev struct net_device +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#define P2P_ECSA_CNT 50 + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +extern void +wl_cfgp2p_listen_expired(struct timer_list *t); +#else +extern void +wl_cfgp2p_listen_expired(unsigned long data); +#endif + +extern bool +wl_cfgp2p_is_pub_action(void *frame, u32 frame_len); +extern bool +wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len); +extern bool +wl_cfgp2p_is_gas_action(void *frame, u32 frame_len); +extern bool +wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len); +extern bool +wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len); +extern void +wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel); +extern s32 +wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg); +extern void +wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg); +extern s32 +wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg); +extern s32 +wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, + u32 channel, u16 listen_ms, int bssidx); +extern s32 +wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec); +extern s32 +wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac); +extern s32 +wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac); +extern s32 +wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec, s32 conn_idx); + +extern s32 +wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index); + +extern s32 +wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg); +extern s32 +wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *ie, + u32 ie_len); +extern s32 +wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg); +extern s32 +wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, u32 num_chans, + u16 *channels, + s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr, + p2p_scan_purpose_t p2p_scan_purpose); + +extern s32 +wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev, + s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr); + +extern wpa_ie_fixed_t * +wl_cfgp2p_find_wpaie(u8 *parse, u32 len); + +extern wpa_ie_fixed_t * +wl_cfgp2p_find_wpsie(u8 *parse, u32 len); + +extern wifi_p2p_ie_t * +wl_cfgp2p_find_p2pie(u8 *parse, u32 len); + +extern wifi_wfd_ie_t * +wl_cfgp2p_find_wfdie(u8 *parse, u32 len); +extern s32 +wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, + s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len); +extern s32 +wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx); + +extern struct net_device * +wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx); +extern s32 +wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type); + + +extern s32 +wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +extern s32 +wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms); + +extern s32 +wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable); + +extern s32 +wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); + +extern s32 +wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev, + wl_af_params_t *af_params, s32 bssidx); + +extern void +wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr); + +extern void +wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id); +extern bool +wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx); + +extern s32 +wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up); + + +extern s32 +wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev); + +extern s32 +wl_cfgp2p_down(struct bcm_cfg80211 *cfg); + +extern s32 +wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + +extern s32 +wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + +extern s32 +wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + +extern s32 +wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + +extern u8 * +wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id); + +extern u8* +wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib); + +extern u8 * +wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length); + +extern s32 +wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg); + +extern s32 +wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg); + +extern bool +wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops); + +extern u32 +wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag, + s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd); + +extern int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg); + +extern +int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg); + +extern +int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg); + +#if defined(WL_CFG80211_P2P_DEV_IF) +extern struct wireless_dev * +wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg); + +extern int +wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev); + +extern void +wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev); + +extern int +wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg); + +#endif /* WL_CFG80211_P2P_DEV_IF */ + +extern void +wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx); + +extern int +wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request); + +/* WiFi Direct */ +#define SOCIAL_CHAN_1 1 +#define SOCIAL_CHAN_2 6 +#define SOCIAL_CHAN_3 11 +#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \ + (channel == SOCIAL_CHAN_2) || \ + (channel == SOCIAL_CHAN_3)) +#define SOCIAL_CHAN_CNT 3 +#define AF_PEER_SEARCH_CNT 2 +#define WL_P2P_WILDCARD_SSID "DIRECT-" +#define WL_P2P_WILDCARD_SSID_LEN 7 +#define WL_P2P_INTERFACE_PREFIX "p2p" +#define WL_P2P_TEMP_CHAN 11 +#define WL_P2P_AF_STATUS_OFFSET 9 + +/* If the provision discovery is for JOIN operations, + * or the device discoverablity frame is destined to GO + * then we need not do an internal scan to find GO. + */ +#define IS_ACTPUB_WITHOUT_GROUP_ID(p2p_ie, len) \ + (wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_GROUP_ID) == NULL) + +#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \ + ((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \ + (frame->action == P2PSD_ACTION_ID_GAS_CREQ))) + +#define IS_P2P_PUB_ACT_RSP_SUBTYPE(subtype) ((subtype == P2P_PAF_GON_RSP) || \ + ((subtype == P2P_PAF_GON_CONF) || \ + (subtype == P2P_PAF_INVITE_RSP) || \ + (subtype == P2P_PAF_PROVDIS_RSP))) +#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3)) +#define IS_P2P_SSID(ssid, len) (!memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) && \ + (len == WL_P2P_WILDCARD_SSID_LEN)) +#endif /* _wl_cfgp2p_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.c b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c new file mode 100644 index 000000000000..a8176a2a53ba --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c @@ -0,0 +1,1470 @@ +/* + * Linux cfg80211 Vendor Extension Code + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgvendor.c 605796 2015-12-11 13:45:36Z $ + */ + +/* + * New vendor interface additon to nl80211/cfg80211 to allow vendors + * to implement proprietary features over the cfg80211 stack. +*/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif /* PNO_SUPPORT */ +#ifdef RTT_SUPPORT +#include +#endif /* RTT_SUPPORT */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef PROP_TXSTATUS +#include +#endif +#include + +#if defined(WL_VENDOR_EXT_SUPPORT) +/* + * This API is to be used for asynchronous vendor events. This + * shouldn't be used in response to a vendor command from its + * do_it handler context (instead wl_cfgvendor_send_cmd_reply should + * be used). + */ +int wl_cfgvendor_send_async_event(struct wiphy *wiphy, + struct net_device *dev, int event_id, const void *data, int len) +{ + u16 kflags; + struct sk_buff *skb; + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, len, event_id, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + /* Push the data to the skb */ + nla_put_nohdr(skb, len, data); + + cfg80211_vendor_event(skb, kflags); + + return 0; +} + +static int +wl_cfgvendor_send_cmd_reply(struct wiphy *wiphy, + struct net_device *dev, const void *data, int len) +{ + struct sk_buff *skb; + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + /* Push the data to the skb */ + nla_put_nohdr(skb, len, data); + + return cfg80211_vendor_cmd_reply(skb); +} + +static int +wl_cfgvendor_get_feature_set(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int reply; + + reply = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg)); + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + &reply, sizeof(int)); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + + return err; +} + +static int +wl_cfgvendor_get_feature_set_matrix(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct sk_buff *skb; + int *reply; + int num, mem_needed, i; + + reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), &num); + + if (!reply) { + WL_ERR(("Could not get feature list matrix\n")); + err = -EINVAL; + return err; + } + mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * num) + + ATTRIBUTE_U32_LEN; + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + err = -ENOMEM; + goto exit; + } + + nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, num); + for (i = 0; i < num; i++) { + nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply[i]); + } + + err = cfg80211_vendor_cmd_reply(skb); + + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + +exit: + kfree(reply); + return err; +} + +static int +wl_cfgvendor_set_pno_mac_oui(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + uint8 pno_random_mac_oui[DOT11_OUI_LEN]; + + type = nla_type(data); + + if (type == ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI) { + memcpy(pno_random_mac_oui, nla_data(data), DOT11_OUI_LEN); + + err = dhd_dev_pno_set_mac_oui(bcmcfg_to_prmry_ndev(cfg), pno_random_mac_oui); + + if (unlikely(err)) + WL_ERR(("Bad OUI, could not set:%d \n", err)); + + + } else { + err = -1; + } + + return err; +} + +#ifdef CUSTOM_FORCE_NODFS_FLAG +static int +wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + u32 nodfs; + + type = nla_type(data); + if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) { + nodfs = nla_get_u32(data); + err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs); + } else { + err = -1; + } + return err; +} +#endif /* CUSTOM_FORCE_NODFS_FLAG */ + +#ifdef GSCAN_SUPPORT +int +wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy, + struct net_device *dev, void *data, int len, wl_vendor_event_t event) +{ + u16 kflags; + const void *ptr; + struct sk_buff *skb; + int malloc_len, total, iter_cnt_to_send, cnt; + gscan_results_cache_t *cache = (gscan_results_cache_t *)data; + total = len/sizeof(wifi_gscan_result_t); + while (total > 0) { + malloc_len = (total * sizeof(wifi_gscan_result_t)) + VENDOR_DATA_OVERHEAD; + if (malloc_len > NLMSG_DEFAULT_SIZE) { + malloc_len = NLMSG_DEFAULT_SIZE; + } + iter_cnt_to_send = + (malloc_len - VENDOR_DATA_OVERHEAD)/sizeof(wifi_gscan_result_t); + total = total - iter_cnt_to_send; + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, malloc_len, event, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, malloc_len, event, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + while (cache && iter_cnt_to_send) { + ptr = (const void *) &cache->results[cache->tot_consumed]; + + if (iter_cnt_to_send < (cache->tot_count - cache->tot_consumed)) { + cnt = iter_cnt_to_send; + } else { + cnt = (cache->tot_count - cache->tot_consumed); + } + + iter_cnt_to_send -= cnt; + cache->tot_consumed += cnt; + /* Push the data to the skb */ + nla_append(skb, cnt * sizeof(wifi_gscan_result_t), ptr); + if (cache->tot_consumed == cache->tot_count) { + cache = cache->next; + } + + } + + cfg80211_vendor_event(skb, kflags); + } + + return 0; +} + + +static int +wl_cfgvendor_gscan_get_capabilities(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + dhd_pno_gscan_capabilities_t *reply = NULL; + uint32 reply_len = 0; + + + reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_CAPABILITIES, NULL, &reply_len); + if (!reply) { + WL_ERR(("Could not get capabilities\n")); + err = -EINVAL; + return err; + } + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + reply, reply_len); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } + + kfree(reply); + return err; +} + +static int +wl_cfgvendor_gscan_get_channel_list(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0, type, band; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + uint16 *reply = NULL; + uint32 reply_len = 0, num_channels, mem_needed; + struct sk_buff *skb; + + type = nla_type(data); + + if (type == GSCAN_ATTRIBUTE_BAND) { + band = nla_get_u32(data); + } else { + return -EINVAL; + } + + reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len); + + if (!reply) { + WL_ERR(("Could not get channel list\n")); + err = -EINVAL; + return err; + } + num_channels = reply_len/ sizeof(uint32); + mem_needed = reply_len + VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2); + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + err = -ENOMEM; + goto exit; + } + + nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels); + nla_put(skb, GSCAN_ATTRIBUTE_CHANNEL_LIST, reply_len, reply); + + err = cfg80211_vendor_cmd_reply(skb); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } +exit: + kfree(reply); + return err; +} + +static int +wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_results_cache_t *results, *iter; + uint32 reply_len, complete = 0, num_results_iter; + int32 mem_needed; + wifi_gscan_result_t *ptr; + uint16 num_scan_ids, num_results; + struct sk_buff *skb; + struct nlattr *scan_hdr; + + dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg)); + dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len); + + if (!results) { + WL_ERR(("No results to send %d\n", err)); + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + results, 0); + + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + return err; + } + num_scan_ids = reply_len & 0xFFFF; + num_results = (reply_len & 0xFFFF0000) >> 16; + mem_needed = (num_results * sizeof(wifi_gscan_result_t)) + + (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) + + VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN; + + if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) { + mem_needed = (int32)NLMSG_DEFAULT_SIZE; + complete = 0; + } else { + complete = 1; + } + + WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed, + (int)NLMSG_DEFAULT_SIZE)); + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + return -ENOMEM; + } + iter = results; + + nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, complete); + mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD); + while (iter && ((mem_needed - GSCAN_BATCH_RESULT_HDR_LEN) > 0)) { + + scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS); + nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id); + nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag); + + num_results_iter = + (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t); + + if ((iter->tot_count - iter->tot_consumed) < num_results_iter) + num_results_iter = iter->tot_count - iter->tot_consumed; + nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter); + if (num_results_iter) { + ptr = &iter->results[iter->tot_consumed]; + iter->tot_consumed += num_results_iter; + nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, + num_results_iter * sizeof(wifi_gscan_result_t), ptr); + } + nla_nest_end(skb, scan_hdr); + mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN + + (num_results_iter * sizeof(wifi_gscan_result_t)); + iter = iter->next; + } + + dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg)); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + + return cfg80211_vendor_cmd_reply(skb); +} + +static int +wl_cfgvendor_initiate_gscan(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type, tmp = len; + int run = 0xFF; + int flush = 0; + const struct nlattr *iter; + + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + if (type == GSCAN_ATTRIBUTE_ENABLE_FEATURE) + run = nla_get_u32(iter); + else if (type == GSCAN_ATTRIBUTE_FLUSH_FEATURE) + flush = nla_get_u32(iter); + } + + if (run != 0xFF) { + err = dhd_dev_pno_run_gscan(bcmcfg_to_prmry_ndev(cfg), run, flush); + + if (unlikely(err)) { + WL_ERR(("Could not run gscan:%d \n", err)); + } + return err; + } else { + return -EINVAL; + } + + +} + +static int +wl_cfgvendor_enable_full_scan_result(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + bool real_time = FALSE; + + type = nla_type(data); + + if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) { + real_time = nla_get_u32(data); + + err = dhd_dev_pno_enable_full_scan_result(bcmcfg_to_prmry_ndev(cfg), real_time); + + if (unlikely(err)) { + WL_ERR(("Could not run gscan:%d \n", err)); + } + + } else { + err = -EINVAL; + } + + return err; +} + +static int +wl_cfgvendor_set_scan_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_scan_params_t *scan_param; + int j = 0; + int type, tmp, tmp1, tmp2, k = 0; + const struct nlattr *iter, *iter1, *iter2; + struct dhd_pno_gscan_channel_bucket *ch_bucket; + + scan_param = kzalloc(sizeof(gscan_scan_params_t), GFP_KERNEL); + if (!scan_param) { + WL_ERR(("Could not set GSCAN scan cfg, mem alloc failure\n")); + err = -EINVAL; + return err; + + } + + scan_param->scan_fr = PNO_SCAN_MIN_FW_SEC; + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + + if (j >= GSCAN_MAX_CH_BUCKETS) { + break; + } + + switch (type) { + case GSCAN_ATTRIBUTE_BASE_PERIOD: + scan_param->scan_fr = nla_get_u32(iter)/1000; + break; + case GSCAN_ATTRIBUTE_NUM_BUCKETS: + scan_param->nchannel_buckets = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_CH_BUCKET_1: + case GSCAN_ATTRIBUTE_CH_BUCKET_2: + case GSCAN_ATTRIBUTE_CH_BUCKET_3: + case GSCAN_ATTRIBUTE_CH_BUCKET_4: + case GSCAN_ATTRIBUTE_CH_BUCKET_5: + case GSCAN_ATTRIBUTE_CH_BUCKET_6: + case GSCAN_ATTRIBUTE_CH_BUCKET_7: + nla_for_each_nested(iter1, iter, tmp1) { + type = nla_type(iter1); + ch_bucket = + scan_param->channel_bucket; + + switch (type) { + case GSCAN_ATTRIBUTE_BUCKET_ID: + break; + case GSCAN_ATTRIBUTE_BUCKET_PERIOD: + ch_bucket[j].bucket_freq_multiple = + nla_get_u32(iter1)/1000; + break; + case GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS: + ch_bucket[j].num_channels = + nla_get_u32(iter1); + break; + case GSCAN_ATTRIBUTE_BUCKET_CHANNELS: + nla_for_each_nested(iter2, iter1, tmp2) { + if (k >= PFN_SWC_RSSI_WINDOW_MAX) + break; + ch_bucket[j].chan_list[k] = + nla_get_u32(iter2); + k++; + } + k = 0; + break; + case GSCAN_ATTRIBUTE_BUCKETS_BAND: + ch_bucket[j].band = (uint16) + nla_get_u32(iter1); + break; + case GSCAN_ATTRIBUTE_REPORT_EVENTS: + ch_bucket[j].report_flag = (uint8) + nla_get_u32(iter1); + break; + default: + WL_ERR(("bucket attribute type error %d\n", + type)); + break; + } + } + j++; + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_SCAN_CFG_ID, scan_param, 0) < 0) { + WL_ERR(("Could not set GSCAN scan cfg\n")); + err = -EINVAL; + } + + kfree(scan_param); + return err; + +} + +static int +wl_cfgvendor_hotlist_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_hotlist_scan_params_t *hotlist_params; + int tmp, tmp1, tmp2, type, j = 0, dummy; + const struct nlattr *outer, *inner, *iter; + uint8 flush = 0; + struct bssid_t *pbssid; + + hotlist_params = (gscan_hotlist_scan_params_t *)kzalloc(len, GFP_KERNEL); + if (!hotlist_params) { + WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len)); + return -ENOMEM; + } + + hotlist_params->lost_ap_window = GSCAN_LOST_AP_WINDOW_DEFAULT; + + nla_for_each_attr(iter, data, len, tmp2) { + type = nla_type(iter); + switch (type) { + case GSCAN_ATTRIBUTE_HOTLIST_BSSIDS: + pbssid = hotlist_params->bssid; + nla_for_each_nested(outer, iter, tmp) { + nla_for_each_nested(inner, outer, tmp1) { + type = nla_type(inner); + + switch (type) { + case GSCAN_ATTRIBUTE_BSSID: + memcpy(&(pbssid[j].macaddr), + nla_data(inner), ETHER_ADDR_LEN); + break; + case GSCAN_ATTRIBUTE_RSSI_LOW: + pbssid[j].rssi_reporting_threshold = + (int8) nla_get_u8(inner); + break; + case GSCAN_ATTRIBUTE_RSSI_HIGH: + dummy = (int8) nla_get_u8(inner); + break; + default: + WL_ERR(("ATTR unknown %d\n", + type)); + break; + } + } + j++; + } + hotlist_params->nbssid = j; + break; + case GSCAN_ATTRIBUTE_HOTLIST_FLUSH: + flush = nla_get_u8(iter); + break; + case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE: + hotlist_params->lost_ap_window = nla_get_u32(iter); + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GEOFENCE_SCAN_CFG_ID, + hotlist_params, flush) < 0) { + WL_ERR(("Could not set GSCAN HOTLIST cfg\n")); + err = -EINVAL; + goto exit; + } +exit: + kfree(hotlist_params); + return err; +} +static int +wl_cfgvendor_set_batch_scan_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0, tmp, type; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_batch_params_t batch_param; + const struct nlattr *iter; + + batch_param.mscan = batch_param.bestn = 0; + batch_param.buffer_threshold = GSCAN_BATCH_NO_THR_SET; + + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + + switch (type) { + case GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN: + batch_param.bestn = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE: + batch_param.mscan = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_REPORT_THRESHOLD: + batch_param.buffer_threshold = nla_get_u32(iter); + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_BATCH_SCAN_CFG_ID, + &batch_param, 0) < 0) { + WL_ERR(("Could not set batch cfg\n")); + err = -EINVAL; + return err; + } + + return err; +} + +static int +wl_cfgvendor_significant_change_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_swc_params_t *significant_params; + int tmp, tmp1, tmp2, type, j = 0; + const struct nlattr *outer, *inner, *iter; + uint8 flush = 0; + wl_pfn_significant_bssid_t *bssid; + + significant_params = (gscan_swc_params_t *) kzalloc(len, GFP_KERNEL); + if (!significant_params) { + WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len)); + return -ENOMEM; + } + + nla_for_each_attr(iter, data, len, tmp2) { + type = nla_type(iter); + + switch (type) { + case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH: + flush = nla_get_u8(iter); + break; + case GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE: + significant_params->rssi_window = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE: + significant_params->lost_ap_window = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_MIN_BREACHING: + significant_params->swc_threshold = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS: + bssid = significant_params->bssid_elem_list; + nla_for_each_nested(outer, iter, tmp) { + nla_for_each_nested(inner, outer, tmp1) { + switch (nla_type(inner)) { + case GSCAN_ATTRIBUTE_BSSID: + memcpy(&(bssid[j].macaddr), + nla_data(inner), + ETHER_ADDR_LEN); + break; + case GSCAN_ATTRIBUTE_RSSI_HIGH: + bssid[j].rssi_high_threshold + = (int8) nla_get_u8(inner); + break; + case GSCAN_ATTRIBUTE_RSSI_LOW: + bssid[j].rssi_low_threshold + = (int8) nla_get_u8(inner); + break; + default: + WL_ERR(("ATTR unknown %d\n", + type)); + break; + } + } + j++; + } + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + significant_params->nbssid = j; + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, + significant_params, flush) < 0) { + WL_ERR(("Could not set GSCAN significant cfg\n")); + err = -EINVAL; + goto exit; + } +exit: + kfree(significant_params); + return err; +} +#endif /* GSCAN_SUPPORT */ + +#ifdef RTT_SUPPORT +void +wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data) +{ + struct wireless_dev *wdev = (struct wireless_dev *)ctx; + struct wiphy *wiphy; + struct sk_buff *skb; + uint32 tot_len = NLMSG_DEFAULT_SIZE, entry_len = 0; + gfp_t kflags; + rtt_report_t *rtt_report = NULL; + rtt_result_t *rtt_result = NULL; + struct list_head *rtt_list; + wiphy = wdev->wiphy; + + WL_DBG(("In\n")); + /* Push the data to the skb */ + if (!rtt_data) { + WL_ERR(("rtt_data is NULL\n")); + goto exit; + } + rtt_list = (struct list_head *)rtt_data; + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + goto exit; + } + /* fill in the rtt results on each entry */ + list_for_each_entry(rtt_result, rtt_list, list) { + entry_len = 0; + entry_len = sizeof(rtt_report_t); + rtt_report = kzalloc(entry_len, kflags); + if (!rtt_report) { + WL_ERR(("rtt_report alloc failed")); + kfree_skb(skb); + goto exit; + } + rtt_report->addr = rtt_result->peer_mac; + rtt_report->num_measurement = 1; /* ONE SHOT */ + rtt_report->status = rtt_result->err_code; + rtt_report->type = + (rtt_result->TOF_type == TOF_TYPE_ONE_WAY) ? RTT_ONE_WAY: RTT_TWO_WAY; + rtt_report->peer = rtt_result->target_info->peer; + rtt_report->channel = rtt_result->target_info->channel; + rtt_report->rssi = rtt_result->avg_rssi; + /* tx_rate */ + rtt_report->tx_rate = rtt_result->tx_rate; + /* RTT */ + rtt_report->rtt = rtt_result->meanrtt; + rtt_report->rtt_sd = rtt_result->sdrtt/10; + /* convert to centi meter */ + if (rtt_result->distance != 0xffffffff) + rtt_report->distance = (rtt_result->distance >> 2) * 25; + else /* invalid distance */ + rtt_report->distance = -1; + rtt_report->ts = rtt_result->ts; + nla_append(skb, entry_len, rtt_report); + kfree(rtt_report); + } + cfg80211_vendor_event(skb, kflags); +exit: + return; +} + +static int +wl_cfgvendor_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) { + int err = 0, rem, rem1, rem2, type; + rtt_config_params_t rtt_param; + rtt_target_info_t* rtt_target = NULL; + const struct nlattr *iter, *iter1, *iter2; + int8 eabuf[ETHER_ADDR_STR_LEN]; + int8 chanbuf[CHANSPEC_STR_LEN]; + int32 feature_set = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + feature_set = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg)); + + WL_DBG(("In\n")); + err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt); + if (err < 0) { + WL_ERR(("failed to register rtt_noti_callback\n")); + goto exit; + } + memset(&rtt_param, 0, sizeof(rtt_param)); + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case RTT_ATTRIBUTE_TARGET_CNT: + rtt_param.rtt_target_cnt = nla_get_u8(iter); + if (rtt_param.rtt_target_cnt > RTT_MAX_TARGET_CNT) { + WL_ERR(("exceed max target count : %d\n", + rtt_param.rtt_target_cnt)); + err = BCME_RANGE; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_INFO: + rtt_target = rtt_param.target_info; + nla_for_each_nested(iter1, iter, rem1) { + nla_for_each_nested(iter2, iter1, rem2) { + type = nla_type(iter2); + switch (type) { + case RTT_ATTRIBUTE_TARGET_MAC: + memcpy(&rtt_target->addr, nla_data(iter2), + ETHER_ADDR_LEN); + break; + case RTT_ATTRIBUTE_TARGET_TYPE: + rtt_target->type = nla_get_u8(iter2); + if (!(feature_set & WIFI_FEATURE_D2D_RTT)) { + if (rtt_target->type == RTT_TWO_WAY || + rtt_target->type == RTT_INVALID) { + WL_ERR(("doesn't support RTT type" + " : %d\n", + rtt_target->type)); + err = -EINVAL; + goto exit; + } else if (rtt_target->type == RTT_AUTO) { + rtt_target->type = RTT_ONE_WAY; + } + } else if (rtt_target->type == RTT_INVALID) { + WL_ERR(("doesn't support RTT type" + " : %d\n", + rtt_target->type)); + err = -EINVAL; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_PEER: + rtt_target->peer = nla_get_u8(iter2); + if (rtt_target->peer != RTT_PEER_AP) { + WL_ERR(("doesn't support peer type : %d\n", + rtt_target->peer)); + err = -EINVAL; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_CHAN: + memcpy(&rtt_target->channel, nla_data(iter2), + sizeof(rtt_target->channel)); + break; + case RTT_ATTRIBUTE_TARGET_MODE: + rtt_target->continuous = nla_get_u8(iter2); + break; + case RTT_ATTRIBUTE_TARGET_INTERVAL: + rtt_target->interval = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT: + rtt_target->measure_cnt = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_PKT: + rtt_target->ftm_cnt = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_RETRY: + rtt_target->retry_cnt = nla_get_u32(iter2); + } + } + /* convert to chanspec value */ + rtt_target->chanspec = + dhd_rtt_convert_to_chspec(rtt_target->channel); + if (rtt_target->chanspec == 0) { + WL_ERR(("Channel is not valid \n")); + goto exit; + } + WL_INFORM(("Target addr %s, Channel : %s for RTT \n", + bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, + eabuf), + wf_chspec_ntoa(rtt_target->chanspec, chanbuf))); + rtt_target++; + } + break; + } + } + WL_DBG(("leave :target_cnt : %d\n", rtt_param.rtt_target_cnt)); + if (dhd_dev_rtt_set_cfg(bcmcfg_to_prmry_ndev(cfg), &rtt_param) < 0) { + WL_ERR(("Could not set RTT configuration\n")); + err = -EINVAL; + } +exit: + return err; +} + +static int +wl_cfgvendor_rtt_cancel_config(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int err = 0, rem, type, target_cnt = 0; + int target_cnt_chk = 0; + const struct nlattr *iter; + struct ether_addr *mac_list = NULL, *mac_addr = NULL; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case RTT_ATTRIBUTE_TARGET_CNT: + if (mac_list != NULL) { + WL_ERR(("mac_list is not NULL\n")); + goto exit; + } + target_cnt = nla_get_u8(iter); + mac_list = (struct ether_addr *)kzalloc(target_cnt * ETHER_ADDR_LEN, + GFP_KERNEL); + if (mac_list == NULL) { + WL_ERR(("failed to allocate mem for mac list\n")); + goto exit; + } + mac_addr = &mac_list[0]; + break; + case RTT_ATTRIBUTE_TARGET_MAC: + if (mac_addr) { + memcpy(mac_addr++, nla_data(iter), ETHER_ADDR_LEN); + target_cnt_chk++; + if (target_cnt_chk > target_cnt) { + WL_ERR(("over target count\n")); + goto exit; + } + break; + } else { + WL_ERR(("mac_list is NULL\n")); + goto exit; + } + } + } + if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) { + WL_ERR(("Could not cancel RTT configuration\n")); + err = -EINVAL; + goto exit; + } + +exit: + if (mac_list) { + kfree(mac_list); + } + return err; +} +static int +wl_cfgvendor_rtt_get_capability(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + rtt_capabilities_t capability; + + err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability); + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + goto exit; + } + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + &capability, sizeof(capability)); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } +exit: + return err; +} + +#endif /* RTT_SUPPORT */ + +static int +wl_cfgvendor_priv_bcm_handler(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int err = 0; + int data_len = 0; + + WL_INFORM(("%s: Enter \n", __func__)); + + bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN); + + if (strncmp((char *)data, BRCM_VENDOR_SCMD_CAPA, strlen(BRCM_VENDOR_SCMD_CAPA)) == 0) { + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "cap", NULL, 0, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + data_len = strlen(cfg->ioctl_buf); + cfg->ioctl_buf[data_len] = '\0'; + } + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + cfg->ioctl_buf, data_len+1); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + else + WL_INFORM(("Vendor Command reply sent successfully!\n")); + + return err; +} + +#ifdef LINKSTAT_SUPPORT +#define NUM_RATE 32 +#define NUM_PEER 1 +#define NUM_CHAN 11 +#define HEADER_SIZE sizeof(ver_len) +static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + static char iovar_buf[WLC_IOCTL_MAXLEN]; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int err = 0, i; + wifi_iface_stat *iface; + wifi_radio_stat *radio; + wl_wme_cnt_t *wl_wme_cnt; + wl_cnt_v_le10_mcst_t *macstat_cnt; + wl_cnt_wlc_t *wlc_cnt; + scb_val_t scbval; + char *output; + + WL_INFORM(("%s: Enter \n", __func__)); + RETURN_EIO_IF_NOT_UP(cfg); + + bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN); + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + + output = cfg->ioctl_buf; + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (err != BCME_OK && err != BCME_UNSUPPORTED) { + WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat))); + return err; + } + radio = (wifi_radio_stat *)iovar_buf; + radio->num_channels = NUM_CHAN; + memcpy(output, iovar_buf+HEADER_SIZE, sizeof(wifi_radio_stat)-HEADER_SIZE); + + output += (sizeof(wifi_radio_stat) - HEADER_SIZE); + output += (NUM_CHAN*sizeof(wifi_channel_stat)); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "wme_counters", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + wl_wme_cnt = (wl_wme_cnt_t *)iovar_buf; + iface = (wifi_iface_stat *)output; + + iface->ac[WIFI_AC_VO].ac = WIFI_AC_VO; + iface->ac[WIFI_AC_VO].tx_mpdu = wl_wme_cnt->tx[AC_VO].packets; + iface->ac[WIFI_AC_VO].rx_mpdu = wl_wme_cnt->rx[AC_VO].packets; + iface->ac[WIFI_AC_VO].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VO].packets; + + iface->ac[WIFI_AC_VI].ac = WIFI_AC_VI; + iface->ac[WIFI_AC_VI].tx_mpdu = wl_wme_cnt->tx[AC_VI].packets; + iface->ac[WIFI_AC_VI].rx_mpdu = wl_wme_cnt->rx[AC_VI].packets; + iface->ac[WIFI_AC_VI].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VI].packets; + + iface->ac[WIFI_AC_BE].ac = WIFI_AC_BE; + iface->ac[WIFI_AC_BE].tx_mpdu = wl_wme_cnt->tx[AC_BE].packets; + iface->ac[WIFI_AC_BE].rx_mpdu = wl_wme_cnt->rx[AC_BE].packets; + iface->ac[WIFI_AC_BE].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BE].packets; + + iface->ac[WIFI_AC_BK].ac = WIFI_AC_BK; + iface->ac[WIFI_AC_BK].tx_mpdu = wl_wme_cnt->tx[AC_BK].packets; + iface->ac[WIFI_AC_BK].rx_mpdu = wl_wme_cnt->rx[AC_BK].packets; + iface->ac[WIFI_AC_BK].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BK].packets; + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "counters", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(err)) { + WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wl_cnt_wlc_t))); + return err; + } + + /* Translate traditional (ver <= 10) counters struct to new xtlv type struct */ + err = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0); + if (err != BCME_OK) { + WL_ERR(("%s wl_cntbuf_to_xtlv_format ERR %d\n", __FUNCTION__, err)); + return err; + } + + if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) { + WL_ERR(("%s wlc_cnt NULL!\n", __FUNCTION__)); + return BCME_ERROR; + } + + iface->ac[WIFI_AC_BE].retries = wlc_cnt->txretry; + + if ((macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data, + ((wl_cnt_info_t *)iovar_buf)->datalen, + WL_CNT_XTLV_CNTV_LE10_UCODE, NULL, + BCM_XTLV_OPTION_ALIGN32)) == NULL) { + macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data, + ((wl_cnt_info_t *)iovar_buf)->datalen, + WL_CNT_XTLV_LT40_UCODE_V1, NULL, + BCM_XTLV_OPTION_ALIGN32); + } + + if (macstat_cnt == NULL) { + printf("wlmTxGetAckedPackets: macstat_cnt NULL!\n"); + return FALSE; + } + + iface->beacon_rx = macstat_cnt->rxbeaconmbss; + + err = wldev_get_rssi(bcmcfg_to_prmry_ndev(cfg), &scbval); + if (unlikely(err)) { + WL_ERR(("get_rssi error (%d)\n", err)); + return err; + } + iface->rssi_mgmt = scbval.val; + + iface->num_peers = NUM_PEER; + iface->peer_info->num_rate = NUM_RATE; + + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + output = (char *)iface + sizeof(wifi_iface_stat) + NUM_PEER*sizeof(wifi_peer_info); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "ratestat", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (err != BCME_OK && err != BCME_UNSUPPORTED) { + WL_ERR(("error (%d) - size = %zu\n", err, NUM_RATE*sizeof(wifi_rate_stat))); + return err; + } + for (i = 0; i < NUM_RATE; i++) + memcpy(output, iovar_buf+HEADER_SIZE+i*sizeof(wifi_rate_stat), + sizeof(wifi_rate_stat)-HEADER_SIZE); + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + cfg->ioctl_buf, + sizeof(wifi_radio_stat)-HEADER_SIZE + + NUM_CHAN*sizeof(wifi_channel_stat) + + sizeof(wifi_iface_stat)+NUM_PEER*sizeof(wifi_peer_info) + + NUM_RATE*(sizeof(wifi_rate_stat)-HEADER_SIZE)); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + + return err; +} +#endif /* LINKSTAT_SUPPORT */ + +static const struct wiphy_vendor_command wl_vendor_cmds [] = { + { + { + .vendor_id = OUI_BRCM, + .subcmd = BRCM_VENDOR_SCMD_BCM_STR + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_priv_bcm_handler + }, +#ifdef GSCAN_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_CAPABILITIES + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_capabilities + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_scan_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_batch_scan_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_ENABLE_GSCAN + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_initiate_gscan + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_enable_full_scan_result + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_HOTLIST + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_hotlist_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_significant_change_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_batch_results + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_channel_list + }, +#endif /* GSCAN_SUPPORT */ +#ifdef RTT_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_SET_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_set_config + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_CANCEL_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_cancel_config + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_GETCAPABILITY + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_get_capability + }, +#endif /* RTT_SUPPORT */ + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_get_feature_set + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_get_feature_set_matrix + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_PNO_RANDOM_MAC_OUI + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_pno_mac_oui + }, +#ifdef CUSTOM_FORCE_NODFS_FLAG + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_NODFS_CHANNELS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_nodfs_flag + + }, +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef LINKSTAT_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = LSTATS_SUBCMD_GET_INFO + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_lstats_get_info + }, +#endif /* LINKSTAT_SUPPORT */ +}; + +static const struct nl80211_vendor_cmd_info wl_vendor_events [] = { + { OUI_BRCM, BRCM_VENDOR_EVENT_UNSPEC }, + { OUI_BRCM, BRCM_VENDOR_EVENT_PRIV_STR }, +#ifdef GSCAN_SUPPORT + { OUI_GOOGLE, GOOGLE_GSCAN_SIGNIFICANT_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_BATCH_SCAN_EVENT }, + { OUI_GOOGLE, GOOGLE_SCAN_FULL_RESULTS_EVENT }, +#endif /* GSCAN_SUPPORT */ +#ifdef RTT_SUPPORT + { OUI_GOOGLE, GOOGLE_RTT_COMPLETE_EVENT }, +#endif /* RTT_SUPPORT */ +#ifdef GSCAN_SUPPORT + { OUI_GOOGLE, GOOGLE_SCAN_COMPLETE_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT }, +#endif /* GSCAN_SUPPORT */ + { OUI_BRCM, BRCM_VENDOR_EVENT_IDSUP_STATUS } +}; + +int wl_cfgvendor_attach(struct wiphy *wiphy) +{ + + WL_INFORM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n", + NL80211_CMD_VENDOR)); + + wiphy->vendor_commands = wl_vendor_cmds; + wiphy->n_vendor_commands = ARRAY_SIZE(wl_vendor_cmds); + wiphy->vendor_events = wl_vendor_events; + wiphy->n_vendor_events = ARRAY_SIZE(wl_vendor_events); + + return 0; +} + +int wl_cfgvendor_detach(struct wiphy *wiphy) +{ + WL_INFORM(("Vendor: Unregister BRCM cfg80211 vendor interface \n")); + + wiphy->vendor_commands = NULL; + wiphy->vendor_events = NULL; + wiphy->n_vendor_commands = 0; + wiphy->n_vendor_events = 0; + + return 0; +} +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.h b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h new file mode 100644 index 000000000000..0d9b4842931d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h @@ -0,0 +1,267 @@ +/* + * Linux cfg80211 Vendor Extension Code + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgvendor.h 605796 2015-12-11 13:45:36Z $ + */ + + +#ifndef _wl_cfgvendor_h_ +#define _wl_cfgvendor_h_ + +#define OUI_BRCM 0x001018 +#define OUI_GOOGLE 0x001A11 +#define BRCM_VENDOR_SUBCMD_PRIV_STR 1 +#define ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4) +#define VENDOR_ID_OVERHEAD ATTRIBUTE_U32_LEN +#define VENDOR_SUBCMD_OVERHEAD ATTRIBUTE_U32_LEN +#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN) + +#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN +#define SCAN_INDEX_HDR_LEN (NLA_HDRLEN) +#define SCAN_ID_HDR_LEN ATTRIBUTE_U32_LEN +#define SCAN_FLAGS_HDR_LEN ATTRIBUTE_U32_LEN +#define GSCAN_NUM_RESULTS_HDR_LEN ATTRIBUTE_U32_LEN +#define GSCAN_RESULTS_HDR_LEN (NLA_HDRLEN) +#define GSCAN_BATCH_RESULT_HDR_LEN (SCAN_INDEX_HDR_LEN + SCAN_ID_HDR_LEN + \ + SCAN_FLAGS_HDR_LEN + \ + GSCAN_NUM_RESULTS_HDR_LEN + \ + GSCAN_RESULTS_HDR_LEN) + +#define VENDOR_REPLY_OVERHEAD (VENDOR_ID_OVERHEAD + \ + VENDOR_SUBCMD_OVERHEAD + \ + VENDOR_DATA_OVERHEAD) + +#define GSCAN_ATTR_SET1 10 +#define GSCAN_ATTR_SET2 20 +#define GSCAN_ATTR_SET3 30 +#define GSCAN_ATTR_SET4 40 +#define GSCAN_ATTR_SET5 50 +#define GSCAN_ATTR_SET6 60 + +typedef enum { + /* don't use 0 as a valid subcommand */ + VENDOR_NL80211_SUBCMD_UNSPECIFIED, + + /* define all vendor startup commands between 0x0 and 0x0FFF */ + VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001, + VENDOR_NL80211_SUBCMD_RANGE_END = 0x0FFF, + + /* define all GScan related commands between 0x1000 and 0x10FF */ + ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000, + ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END = 0x10FF, + + /* define all NearbyDiscovery related commands between 0x1100 and 0x11FF */ + ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1100, + ANDROID_NL80211_SUBCMD_NBD_RANGE_END = 0x11FF, + + /* define all RTT related commands between 0x1100 and 0x11FF */ + ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100, + ANDROID_NL80211_SUBCMD_RTT_RANGE_END = 0x11FF, + + ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200, + ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END = 0x12FF, + + ANDROID_NL80211_SUBCMD_TDLS_RANGE_START = 0x1300, + ANDROID_NL80211_SUBCMD_TDLS_RANGE_END = 0x13FF, + /* This is reserved for future usage */ + +} ANDROID_VENDOR_SUB_COMMAND; + +enum andr_vendor_subcmd { + GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START, + GSCAN_SUBCMD_SET_CONFIG, + GSCAN_SUBCMD_SET_SCAN_CONFIG, + GSCAN_SUBCMD_ENABLE_GSCAN, + GSCAN_SUBCMD_GET_SCAN_RESULTS, + GSCAN_SUBCMD_SCAN_RESULTS, + GSCAN_SUBCMD_SET_HOTLIST, + GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG, + GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS, + GSCAN_SUBCMD_GET_CHANNEL_LIST, + /* ANDR_WIFI_XXX although not related to gscan are defined here */ + ANDR_WIFI_SUBCMD_GET_FEATURE_SET, + ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX, + ANDR_WIFI_PNO_RANDOM_MAC_OUI, + ANDR_WIFI_NODFS_CHANNELS, + RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START, + RTT_SUBCMD_CANCEL_CONFIG, + RTT_SUBCMD_GETCAPABILITY, + + LSTATS_SUBCMD_GET_INFO = ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START, + /* Add more sub commands here */ + VENDOR_SUBCMD_MAX +}; + +enum gscan_attributes { + GSCAN_ATTRIBUTE_NUM_BUCKETS = GSCAN_ATTR_SET1, + GSCAN_ATTRIBUTE_BASE_PERIOD, + GSCAN_ATTRIBUTE_BUCKETS_BAND, + GSCAN_ATTRIBUTE_BUCKET_ID, + GSCAN_ATTRIBUTE_BUCKET_PERIOD, + GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS, + GSCAN_ATTRIBUTE_BUCKET_CHANNELS, + GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN, + GSCAN_ATTRIBUTE_REPORT_THRESHOLD, + GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE, + GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND, + + GSCAN_ATTRIBUTE_ENABLE_FEATURE = GSCAN_ATTR_SET2, + GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, + GSCAN_ATTRIBUTE_FLUSH_FEATURE, + GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS, + GSCAN_ATTRIBUTE_REPORT_EVENTS, + /* remaining reserved for additional attributes */ + GSCAN_ATTRIBUTE_NUM_OF_RESULTS = GSCAN_ATTR_SET3, + GSCAN_ATTRIBUTE_FLUSH_RESULTS, + GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */ + GSCAN_ATTRIBUTE_SCAN_ID, /* indicates scan number */ + GSCAN_ATTRIBUTE_SCAN_FLAGS, /* indicates if scan was aborted */ + GSCAN_ATTRIBUTE_AP_FLAGS, /* flags on significant change event */ + GSCAN_ATTRIBUTE_NUM_CHANNELS, + GSCAN_ATTRIBUTE_CHANNEL_LIST, + + /* remaining reserved for additional attributes */ + + GSCAN_ATTRIBUTE_SSID = GSCAN_ATTR_SET4, + GSCAN_ATTRIBUTE_BSSID, + GSCAN_ATTRIBUTE_CHANNEL, + GSCAN_ATTRIBUTE_RSSI, + GSCAN_ATTRIBUTE_TIMESTAMP, + GSCAN_ATTRIBUTE_RTT, + GSCAN_ATTRIBUTE_RTTSD, + + /* remaining reserved for additional attributes */ + + GSCAN_ATTRIBUTE_HOTLIST_BSSIDS = GSCAN_ATTR_SET5, + GSCAN_ATTRIBUTE_RSSI_LOW, + GSCAN_ATTRIBUTE_RSSI_HIGH, + GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM, + GSCAN_ATTRIBUTE_HOTLIST_FLUSH, + + /* remaining reserved for additional attributes */ + GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = GSCAN_ATTR_SET6, + GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE, + GSCAN_ATTRIBUTE_MIN_BREACHING, + GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS, + GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH, + GSCAN_ATTRIBUTE_MAX +}; + +enum gscan_bucket_attributes { + GSCAN_ATTRIBUTE_CH_BUCKET_1, + GSCAN_ATTRIBUTE_CH_BUCKET_2, + GSCAN_ATTRIBUTE_CH_BUCKET_3, + GSCAN_ATTRIBUTE_CH_BUCKET_4, + GSCAN_ATTRIBUTE_CH_BUCKET_5, + GSCAN_ATTRIBUTE_CH_BUCKET_6, + GSCAN_ATTRIBUTE_CH_BUCKET_7 +}; + +enum gscan_ch_attributes { + GSCAN_ATTRIBUTE_CH_ID_1, + GSCAN_ATTRIBUTE_CH_ID_2, + GSCAN_ATTRIBUTE_CH_ID_3, + GSCAN_ATTRIBUTE_CH_ID_4, + GSCAN_ATTRIBUTE_CH_ID_5, + GSCAN_ATTRIBUTE_CH_ID_6, + GSCAN_ATTRIBUTE_CH_ID_7 +}; + +enum rtt_attributes { + RTT_ATTRIBUTE_TARGET_CNT, + RTT_ATTRIBUTE_TARGET_INFO, + RTT_ATTRIBUTE_TARGET_MAC, + RTT_ATTRIBUTE_TARGET_TYPE, + RTT_ATTRIBUTE_TARGET_PEER, + RTT_ATTRIBUTE_TARGET_CHAN, + RTT_ATTRIBUTE_TARGET_MODE, + RTT_ATTRIBUTE_TARGET_INTERVAL, + RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT, + RTT_ATTRIBUTE_TARGET_NUM_PKT, + RTT_ATTRIBUTE_TARGET_NUM_RETRY +}; + +typedef enum wl_vendor_event { + BRCM_VENDOR_EVENT_UNSPEC, + BRCM_VENDOR_EVENT_PRIV_STR, + GOOGLE_GSCAN_SIGNIFICANT_EVENT, + GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT, + GOOGLE_GSCAN_BATCH_SCAN_EVENT, + GOOGLE_SCAN_FULL_RESULTS_EVENT, + GOOGLE_RTT_COMPLETE_EVENT, + GOOGLE_SCAN_COMPLETE_EVENT, + GOOGLE_GSCAN_GEOFENCE_LOST_EVENT, + BRCM_VENDOR_EVENT_IDSUP_STATUS +} wl_vendor_event_t; + +enum andr_wifi_attr { + ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, + ANDR_WIFI_ATTRIBUTE_FEATURE_SET, + ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI, + ANDR_WIFI_ATTRIBUTE_NODFS_SET +}; + +typedef enum wl_vendor_gscan_attribute { + ATTR_START_GSCAN, + ATTR_STOP_GSCAN, + ATTR_SET_SCAN_BATCH_CFG_ID, /* set batch scan params */ + ATTR_SET_SCAN_GEOFENCE_CFG_ID, /* set list of bssids to track */ + ATTR_SET_SCAN_SIGNIFICANT_CFG_ID, /* set list of bssids, rssi threshold etc.. */ + ATTR_SET_SCAN_CFG_ID, /* set common scan config params here */ + ATTR_GET_GSCAN_CAPABILITIES_ID, + /* Add more sub commands here */ + ATTR_GSCAN_MAX +} wl_vendor_gscan_attribute_t; + +typedef enum gscan_batch_attribute { + ATTR_GSCAN_BATCH_BESTN, + ATTR_GSCAN_BATCH_MSCAN, + ATTR_GSCAN_BATCH_BUFFER_THRESHOLD +} gscan_batch_attribute_t; + +typedef enum gscan_geofence_attribute { + ATTR_GSCAN_NUM_HOTLIST_BSSID, + ATTR_GSCAN_HOTLIST_BSSID +} gscan_geofence_attribute_t; + +typedef enum gscan_complete_event { + WIFI_SCAN_BUFFER_FULL, + WIFI_SCAN_COMPLETE +} gscan_complete_event_t; + +#if defined(WL_VENDOR_EXT_SUPPORT) || defined(CONFIG_BCMDHD_VENDOR_EXT) +extern int wl_cfgvendor_attach(struct wiphy *wiphy); +extern int wl_cfgvendor_detach(struct wiphy *wiphy); +extern int wl_cfgvendor_send_async_event(struct wiphy *wiphy, + struct net_device *dev, int event_id, const void *data, int len); +extern int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy, + struct net_device *dev, void *data, int len, wl_vendor_event_t event); +#else +static INLINE int cfgvendor_attach(struct wiphy *wiphy) { return 0; } +static INLINE int cfgvendor_detach(struct wiphy *wiphy) { return 0; } +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + +#endif /* _wl_cfgvendor_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h new file mode 100644 index 000000000000..291911611daa --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_dbg.h @@ -0,0 +1,211 @@ +/* + * Minimal debug/trace/assert driver definitions for + * Broadcom 802.11 Networking Adapter. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_dbg.h 519338 2014-12-05 21:23:30Z $ + */ + + +#ifndef _wl_dbg_h_ +#define _wl_dbg_h_ + +/* wl_msg_level is a bit vector with defs in wlioctl.h */ +extern uint32 wl_msg_level; +extern uint32 wl_msg_level2; + +#define WL_TIMESTAMP() + +#define WL_PRINT(args) do { WL_TIMESTAMP(); printf args; } while (0) + +#if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN) +#define _WL_SRSCAN(fmt, ...) EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__) +#define WL_SRSCAN(args) _WL_SRSCAN args +#else +#define WL_SRSCAN(args) +#endif + +#if defined(BCMCONDITIONAL_LOGGING) + +/* Ideally this should be some include file that vendors can include to conditionalize logging */ + +/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when + * BCMDBG is defined. + */ +#define DBGONLY(x) + +/* To disable a message completely ... until you need it again */ +#define WL_NONE(args) +#define WL_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args);} while (0) +#define WL_TRACE(args) +#define WL_PRHDRS_MSG(args) +#define WL_PRHDRS(i, p, f, t, r, l) +#define WL_PRPKT(m, b, n) +#define WL_INFORM(args) +#define WL_TMP(args) +#define WL_OID(args) +#define WL_RATE(args) do {if (wl_msg_level & WL_RATE_VAL) WL_PRINT(args);} while (0) +#define WL_ASSOC(args) do {if (wl_msg_level & WL_ASSOC_VAL) WL_PRINT(args);} while (0) +#define WL_PRUSR(m, b, n) +#define WL_PS(args) do {if (wl_msg_level & WL_PS_VAL) WL_PRINT(args);} while (0) + +#define WL_PORT(args) +#define WL_DUAL(args) +#define WL_REGULATORY(args) do {if (wl_msg_level & WL_REGULATORY_VAL) WL_PRINT(args);} while (0) + +#define WL_MPC(args) +#define WL_APSTA(args) +#define WL_APSTA_BCN(args) +#define WL_APSTA_TX(args) +#define WL_APSTA_TSF(args) +#define WL_APSTA_BSSID(args) +#define WL_BA(args) +#define WL_MBSS(args) +#define WL_PROTO(args) + +#define WL_CAC(args) do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0) +#define WL_AMSDU(args) +#define WL_AMPDU(args) +#define WL_FFPLD(args) +#define WL_MCHAN(args) + +#define WL_DFS(args) +#define WL_WOWL(args) +#define WL_DPT(args) +#define WL_ASSOC_OR_DPT(args) +#define WL_SCAN(args) do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0) +#define WL_COEX(args) +#define WL_RTDC(w, s, i, j) +#define WL_RTDC2(w, s, i, j) +#define WL_CHANINT(args) +#define WL_BTA(args) +#define WL_P2P(args) +#define WL_ITFR(args) +#define WL_TDLS(args) +#define WL_MCNX(args) +#define WL_PROT(args) +#define WL_PSTA(args) +#define WL_WFDS(m, b, n) +#define WL_TRF_MGMT(args) +#define WL_L2FILTER(args) +#define WL_MQ(args) +#define WL_TXBF(args) +#define WL_P2PO(args) +#define WL_ROAM(args) +#define WL_WNM(args) + + +#define WL_AMPDU_UPDN(args) +#define WL_AMPDU_RX(args) +#define WL_AMPDU_ERR(args) +#define WL_AMPDU_TX(args) +#define WL_AMPDU_CTL(args) +#define WL_AMPDU_HW(args) +#define WL_AMPDU_HWTXS(args) +#define WL_AMPDU_HWDBG(args) +#define WL_AMPDU_STAT(args) +#define WL_AMPDU_ERR_ON() 0 +#define WL_AMPDU_HW_ON() 0 +#define WL_AMPDU_HWTXS_ON() 0 + +#define WL_APSTA_UPDN(args) +#define WL_APSTA_RX(args) +#define WL_WSEC(args) +#define WL_WSEC_DUMP(args) +#define WL_PCIE(args) +#define WL_TSLOG(w, s, i, j) +#define WL_FBT(args) + +#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL) +#define WL_TRACE_ON() 0 +#define WL_PRHDRS_ON() 0 +#define WL_PRPKT_ON() 0 +#define WL_INFORM_ON() 0 +#define WL_TMP_ON() 0 +#define WL_OID_ON() 0 +#define WL_RATE_ON() (wl_msg_level & WL_RATE_VAL) +#define WL_ASSOC_ON() (wl_msg_level & WL_ASSOC_VAL) +#define WL_PRUSR_ON() 0 +#define WL_PS_ON() (wl_msg_level & WL_PS_VAL) +#define WL_PORT_ON() 0 +#define WL_WSEC_ON() 0 +#define WL_WSEC_DUMP_ON() 0 +#define WL_MPC_ON() 0 +#define WL_REGULATORY_ON() (wl_msg_level & WL_REGULATORY_VAL) +#define WL_APSTA_ON() 0 +#define WL_DFS_ON() 0 +#define WL_MBSS_ON() 0 +#define WL_CAC_ON() (wl_msg_level & WL_CAC_VAL) +#define WL_AMPDU_ON() 0 +#define WL_DPT_ON() 0 +#define WL_WOWL_ON() 0 +#define WL_SCAN_ON() (wl_msg_level2 & WL_SCAN_VAL) +#define WL_BTA_ON() 0 +#define WL_P2P_ON() 0 +#define WL_ITFR_ON() 0 +#define WL_MCHAN_ON() 0 +#define WL_TDLS_ON() 0 +#define WL_MCNX_ON() 0 +#define WL_PROT_ON() 0 +#define WL_PSTA_ON() 0 +#define WL_TRF_MGMT_ON() 0 +#define WL_LPC_ON() 0 +#define WL_L2FILTER_ON() 0 +#define WL_TXBF_ON() 0 +#define WL_P2PO_ON() 0 +#define WL_TSLOG_ON() 0 +#define WL_WNM_ON() 0 +#define WL_PCIE_ON() 0 + +#else /* !BCMDBG */ + +/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when + * BCMDBG is defined. + */ +#define DBGONLY(x) + +/* To disable a message completely ... until you need it again */ +#define WL_NONE(args) + +#define WL_ERROR(args) +#define WL_TRACE(args) +#define WL_APSTA_UPDN(args) +#define WL_APSTA_RX(args) +#ifdef WLMSG_WSEC +#define WL_WSEC(args) WL_PRINT(args) +#define WL_WSEC_DUMP(args) WL_PRINT(args) +#else +#define WL_WSEC(args) +#define WL_WSEC_DUMP(args) +#endif +#define WL_PCIE(args) do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0) +#define WL_PCIE_ON() (wl_msg_level2 & WL_PCIE_VAL) +#define WL_PFN(args) do {if (wl_msg_level & WL_PFN_VAL) WL_PRINT(args);} while (0) +#define WL_PFN_ON() (wl_msg_level & WL_PFN_VAL) +#endif + +extern uint32 wl_msg_level; +extern uint32 wl_msg_level2; +#endif /* _wl_dbg_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c new file mode 100644 index 000000000000..92d9aa1614a2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_iw.c @@ -0,0 +1,3811 @@ +/* + * Linux Wireless Extensions support + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_iw.c 591286 2015-10-07 11:59:26Z $ + */ + +#if defined(USE_IW) +#define LINUX_PORT + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +typedef const struct si_pub si_t; + +#include +#include + + +/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */ +#ifndef IW_AUTH_KEY_MGMT_FT_802_1X +#define IW_AUTH_KEY_MGMT_FT_802_1X 0x04 +#endif + +#ifndef IW_AUTH_KEY_MGMT_FT_PSK +#define IW_AUTH_KEY_MGMT_FT_PSK 0x08 +#endif + +#ifndef IW_ENC_CAPA_FW_ROAM_ENABLE +#define IW_ENC_CAPA_FW_ROAM_ENABLE 0x00000020 +#endif + + +/* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest + * version 22. + */ +#ifndef IW_ENCODE_ALG_PMK +#define IW_ENCODE_ALG_PMK 4 +#endif +#ifndef IW_ENC_CAPA_4WAY_HANDSHAKE +#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010 +#endif +/* End FC9. */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +#include +#endif +#if defined(SOFTAP) +struct net_device *ap_net_dev = NULL; +tsk_ctl_t ap_eth_ctl; /* apsta AP netdev waiter thread */ +#endif /* SOFTAP */ + +extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status, + uint32 reason, char* stringBuf, uint buflen); + +uint wl_msg_level = WL_ERROR_VAL; + +#define MAX_WLIW_IOCTL_LEN 1024 + +/* IOCTL swapping mode for Big Endian host with Little Endian dongle. Default to off */ +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +extern int dhd_wait_pend8021x(struct net_device *dev); + +#if WIRELESS_EXT < 19 +#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) +#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) +#endif /* WIRELESS_EXT < 19 */ + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define DAEMONIZE(a) do { \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); \ + } while (0) +#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else /* Linux 2.4 (w/o preemption patch) */ +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \ + } while (0); +#endif /* LINUX_VERSION_CODE */ + +#define ISCAN_STATE_IDLE 0 +#define ISCAN_STATE_SCANING 1 + +/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */ +#define WLC_IW_ISCAN_MAXLEN 2048 +typedef struct iscan_buf { + struct iscan_buf * next; + char iscan_buf[WLC_IW_ISCAN_MAXLEN]; +} iscan_buf_t; + +typedef struct iscan_info { + struct net_device *dev; + struct timer_list timer; + uint32 timer_ms; + uint32 timer_on; + int iscan_state; + iscan_buf_t * list_hdr; + iscan_buf_t * list_cur; + + /* Thread to work on iscan */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + struct task_struct *kthread; +#endif + long sysioc_pid; + struct semaphore sysioc_sem; + struct completion sysioc_exited; + + + char ioctlbuf[WLC_IOCTL_SMLEN]; +} iscan_info_t; +iscan_info_t *g_iscan = NULL; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void wl_iw_timerfunc(struct timer_list *t); +#else +static void wl_iw_timerfunc(ulong data); +#endif +static void wl_iw_set_event_mask(struct net_device *dev); +static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action); + +/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */ +typedef struct priv_link { + wl_iw_t *wliw; +} priv_link_t; + +/* dev to priv_link */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#define WL_DEV_LINK(dev) (priv_link_t*)(dev->priv) +#else +#define WL_DEV_LINK(dev) (priv_link_t*)netdev_priv(dev) +#endif + +/* dev to wl_iw_t */ +#define IW_DEV_IF(dev) ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw) + +static void swap_key_from_BE( + wl_wsec_key_t *key +) +{ + key->index = htod32(key->index); + key->len = htod32(key->len); + key->algo = htod32(key->algo); + key->flags = htod32(key->flags); + key->rxiv.hi = htod32(key->rxiv.hi); + key->rxiv.lo = htod16(key->rxiv.lo); + key->iv_initialized = htod32(key->iv_initialized); +} + +static void swap_key_to_BE( + wl_wsec_key_t *key +) +{ + key->index = dtoh32(key->index); + key->len = dtoh32(key->len); + key->algo = dtoh32(key->algo); + key->flags = dtoh32(key->flags); + key->rxiv.hi = dtoh32(key->rxiv.hi); + key->rxiv.lo = dtoh16(key->rxiv.lo); + key->iv_initialized = dtoh32(key->iv_initialized); +} + +static int +dev_wlc_ioctl( + struct net_device *dev, + int cmd, + void *arg, + int len +) +{ + struct ifreq ifr; + wl_ioctl_t ioc; + mm_segment_t fs; + int ret; + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + + strncpy(ifr.ifr_name, dev->name, sizeof(ifr.ifr_name)); + ifr.ifr_name[sizeof(ifr.ifr_name) - 1] = '\0'; + ifr.ifr_data = (caddr_t) &ioc; + + fs = get_fs(); + set_fs(get_ds()); +#if defined(WL_USE_NETDEV_OPS) + ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE); +#else + ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE); +#endif + set_fs(fs); + + return ret; +} + +/* +set named driver variable to int value and return error indication +calling example: dev_wlc_intvar_set(dev, "arate", rate) +*/ + +static int +dev_wlc_intvar_set( + struct net_device *dev, + char *name, + int val) +{ + char buf[WLC_IOCTL_SMLEN]; + uint len; + + val = htod32(val); + len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf)); + ASSERT(len); + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len)); +} + +static int +dev_iw_iovar_setbuf( + struct net_device *dev, + char *iovar, + void *param, + int paramlen, + void *bufptr, + int buflen) +{ + int iolen; + + iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); + ASSERT(iolen); + BCM_REFERENCE(iolen); + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen)); +} + +static int +dev_iw_iovar_getbuf( + struct net_device *dev, + char *iovar, + void *param, + int paramlen, + void *bufptr, + int buflen) +{ + int iolen; + + iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); + ASSERT(iolen); + BCM_REFERENCE(iolen); + + return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen)); +} + +#if WIRELESS_EXT > 17 +static int +dev_wlc_bufvar_set( + struct net_device *dev, + char *name, + char *buf, int len) +{ + char *ioctlbuf; + uint buflen; + int error; + + ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL); + if (!ioctlbuf) + return -ENOMEM; + + buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN); + ASSERT(buflen); + error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen); + + kfree(ioctlbuf); + return error; +} +#endif /* WIRELESS_EXT > 17 */ + +/* +get named driver variable to int value and return error indication +calling example: dev_wlc_bufvar_get(dev, "arate", &rate) +*/ + +static int +dev_wlc_bufvar_get( + struct net_device *dev, + char *name, + char *buf, int buflen) +{ + char *ioctlbuf; + int error; + + uint len; + + ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL); + if (!ioctlbuf) + return -ENOMEM; + len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN); + ASSERT(len); + BCM_REFERENCE(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN); + if (!error) + bcopy(ioctlbuf, buf, buflen); + + kfree(ioctlbuf); + return (error); +} + +/* +get named driver variable to int value and return error indication +calling example: dev_wlc_intvar_get(dev, "arate", &rate) +*/ + +static int +dev_wlc_intvar_get( + struct net_device *dev, + char *name, + int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + uint len; + uint data_null; + + len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf)); + ASSERT(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len); + + *retval = dtoh32(var.val); + + return (error); +} + +/* Maintain backward compatibility */ +#if WIRELESS_EXT < 13 +struct iw_request_info +{ + __u16 cmd; /* Wireless Extension command */ + __u16 flags; /* More to come ;-) */ +}; + +typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info, + void *wrqu, char *extra); +#endif /* WIRELESS_EXT < 13 */ + +#if WIRELESS_EXT > 12 +static int +wl_iw_set_leddc( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int dc = *(int *)extra; + int error; + + error = dev_wlc_intvar_set(dev, "leddc", dc); + return error; +} + +static int +wl_iw_set_vlanmode( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int mode = *(int *)extra; + int error; + + mode = htod32(mode); + error = dev_wlc_intvar_set(dev, "vlan_mode", mode); + return error; +} + +static int +wl_iw_set_pm( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int pm = *(int *)extra; + int error; + + pm = htod32(pm); + error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)); + return error; +} + +#if WIRELESS_EXT > 17 +#endif /* WIRELESS_EXT > 17 */ +#endif /* WIRELESS_EXT > 12 */ + +int +wl_iw_send_priv_event( + struct net_device *dev, + char *flag +) +{ + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + int cmd; + + cmd = IWEVCUSTOM; + memset(&wrqu, 0, sizeof(wrqu)); + if (strlen(flag) > sizeof(extra)) + return -1; + + strncpy(extra, flag, sizeof(extra)); + extra[sizeof(extra) - 1] = '\0'; + wrqu.data.length = strlen(extra); + wireless_send_event(dev, cmd, &wrqu, extra); + WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra)); + + return 0; +} + +static int +wl_iw_config_commit( + struct net_device *dev, + struct iw_request_info *info, + void *zwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + struct sockaddr bssid; + + WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) + return error; + + ssid.SSID_len = dtoh32(ssid.SSID_len); + + if (!ssid.SSID_len) + return 0; + + bzero(&bssid, sizeof(struct sockaddr)); + if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) { + WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error)); + return error; + } + + return 0; +} + +static int +wl_iw_get_name( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *cwrq, + char *extra +) +{ + int phytype, err; + uint band[3]; + char cap[5]; + + WL_TRACE(("%s: SIOCGIWNAME\n", dev->name)); + + cap[0] = 0; + if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0) + goto done; + if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0) + goto done; + + band[0] = dtoh32(band[0]); + switch (phytype) { + case WLC_PHY_TYPE_A: + strncpy(cap, "a", sizeof(cap)); + break; + case WLC_PHY_TYPE_B: + strncpy(cap, "b", sizeof(cap)); + break; + case WLC_PHY_TYPE_G: + if (band[0] >= 2) + strncpy(cap, "abg", sizeof(cap)); + else + strncpy(cap, "bg", sizeof(cap)); + break; + case WLC_PHY_TYPE_N: + if (band[0] >= 2) + strncpy(cap, "abgn", sizeof(cap)); + else + strncpy(cap, "bgn", sizeof(cap)); + break; + } +done: + (void)snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap); + + return 0; +} + +static int +wl_iw_set_freq( + struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, + char *extra +) +{ + int error, chan; + uint sf = 0; + + WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name)); + + /* Setting by channel number */ + if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) { + chan = fwrq->m; + } + + /* Setting by frequency */ + else { + /* Convert to MHz as best we can */ + if (fwrq->e >= 6) { + fwrq->e -= 6; + while (fwrq->e--) + fwrq->m *= 10; + } else if (fwrq->e < 6) { + while (fwrq->e++ < 6) + fwrq->m /= 10; + } + /* handle 4.9GHz frequencies as Japan 4 GHz based channelization */ + if (fwrq->m > 4000 && fwrq->m < 5000) + sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */ + + chan = wf_mhz2channel(fwrq->m, sf); + } + chan = htod32(chan); + if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) + return error; + + /* -EINPROGRESS: Call commit handler */ + return -EINPROGRESS; +} + +static int +wl_iw_get_freq( + struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, + char *extra +) +{ + channel_info_t ci; + int error; + + WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) + return error; + + /* Return radio channel in channel form */ + fwrq->m = dtoh32(ci.hw_channel); + fwrq->e = dtoh32(0); + return 0; +} + +static int +wl_iw_set_mode( + struct net_device *dev, + struct iw_request_info *info, + __u32 *uwrq, + char *extra +) +{ + int infra = 0, ap = 0, error = 0; + + WL_TRACE(("%s: SIOCSIWMODE\n", dev->name)); + + switch (*uwrq) { + case IW_MODE_MASTER: + infra = ap = 1; + break; + case IW_MODE_ADHOC: + case IW_MODE_AUTO: + break; + case IW_MODE_INFRA: + infra = 1; + break; + default: + return -EINVAL; + } + infra = htod32(infra); + ap = htod32(ap); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) || + (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap)))) + return error; + + /* -EINPROGRESS: Call commit handler */ + return -EINPROGRESS; +} + +static int +wl_iw_get_mode( + struct net_device *dev, + struct iw_request_info *info, + __u32 *uwrq, + char *extra +) +{ + int error, infra = 0, ap = 0; + + WL_TRACE(("%s: SIOCGIWMODE\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) || + (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap)))) + return error; + + infra = dtoh32(infra); + ap = dtoh32(ap); + *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC; + + return 0; +} + +static int +wl_iw_get_range( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + struct iw_range *range = (struct iw_range *) extra; + static int channels[MAXCHANNEL+1]; + wl_uint32_list_t *list = (wl_uint32_list_t *) channels; + wl_rateset_t rateset; + int error, i, k; + uint sf, ch; + + int phytype; + int bw_cap = 0, sgi_tx = 0, nmode = 0; + channel_info_t ci; + uint8 nrate_list2copy = 0; + uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130}, + {14, 29, 43, 58, 87, 116, 130, 144}, + {27, 54, 81, 108, 162, 216, 243, 270}, + {30, 60, 90, 120, 180, 240, 270, 300}}; + int fbt_cap = 0; + + WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name)); + + if (!extra) + return -EINVAL; + + dwrq->length = sizeof(struct iw_range); + memset(range, 0, sizeof(*range)); + + /* We don't use nwids */ + range->min_nwid = range->max_nwid = 0; + + /* Set available channels/frequencies */ + list->count = htod32(MAXCHANNEL); + if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels)))) + return error; + for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) { + range->freq[i].i = dtoh32(list->element[i]); + + ch = dtoh32(list->element[i]); + if (ch <= CH_MAX_2G_CHANNEL) + sf = WF_CHAN_FACTOR_2_4_G; + else + sf = WF_CHAN_FACTOR_5_G; + + range->freq[i].m = wf_channel2mhz(ch, sf); + range->freq[i].e = 6; + } + range->num_frequency = range->num_channels = i; + + /* Link quality (use NDIS cutoffs) */ + range->max_qual.qual = 5; + /* Signal level (use RSSI) */ + range->max_qual.level = 0x100 - 200; /* -200 dBm */ + /* Noise level (use noise) */ + range->max_qual.noise = 0x100 - 200; /* -200 dBm */ + /* Signal level threshold range (?) */ + range->sensitivity = 65535; + +#if WIRELESS_EXT > 11 + /* Link quality (use NDIS cutoffs) */ + range->avg_qual.qual = 3; + /* Signal level (use RSSI) */ + range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD; + /* Noise level (use noise) */ + range->avg_qual.noise = 0x100 - 75; /* -75 dBm */ +#endif /* WIRELESS_EXT > 11 */ + + /* Set available bitrates */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) + return error; + rateset.count = dtoh32(rateset.count); + range->num_bitrates = rateset.count; + for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++) + range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */ + if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode))) + return error; + if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype)))) + return error; + if (nmode == 1 && (((phytype == WLC_PHY_TYPE_LCN) || + (phytype == WLC_PHY_TYPE_LCN40)))) { + if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap))) + return error; + if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx))) + return error; + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t)))) + return error; + ci.hw_channel = dtoh32(ci.hw_channel); + + if (bw_cap == 0 || + (bw_cap == 2 && ci.hw_channel <= 14)) { + if (sgi_tx == 0) + nrate_list2copy = 0; + else + nrate_list2copy = 1; + } + if (bw_cap == 1 || + (bw_cap == 2 && ci.hw_channel >= 36)) { + if (sgi_tx == 0) + nrate_list2copy = 2; + else + nrate_list2copy = 3; + } + range->num_bitrates += 8; + ASSERT(range->num_bitrates < IW_MAX_BITRATES); + for (k = 0; i < range->num_bitrates; k++, i++) { + /* convert to bps */ + range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000; + } + } + + /* Set an indication of the max TCP throughput + * in bit/s that we can expect using this interface. + * May be use for QoS stuff... Jean II + */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) + return error; + i = dtoh32(i); + if (i == WLC_PHY_TYPE_A) + range->throughput = 24000000; /* 24 Mbits/s */ + else + range->throughput = 1500000; /* 1.5 Mbits/s */ + + /* RTS and fragmentation thresholds */ + range->min_rts = 0; + range->max_rts = 2347; + range->min_frag = 256; + range->max_frag = 2346; + + range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS; + range->num_encoding_sizes = 4; + range->encoding_size[0] = WEP1_KEY_SIZE; + range->encoding_size[1] = WEP128_KEY_SIZE; +#if WIRELESS_EXT > 17 + range->encoding_size[2] = TKIP_KEY_SIZE; +#else + range->encoding_size[2] = 0; +#endif + range->encoding_size[3] = AES_KEY_SIZE; + + /* Do not support power micro-management */ + range->min_pmp = 0; + range->max_pmp = 0; + range->min_pmt = 0; + range->max_pmt = 0; + range->pmp_flags = 0; + range->pm_capa = 0; + + /* Transmit Power - values are in mW */ + range->num_txpower = 2; + range->txpower[0] = 1; + range->txpower[1] = 255; + range->txpower_capa = IW_TXPOW_MWATT; + +#if WIRELESS_EXT > 10 + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 19; + + /* Only support retry limits */ + range->retry_capa = IW_RETRY_LIMIT; + range->retry_flags = IW_RETRY_LIMIT; + range->r_time_flags = 0; + /* SRL and LRL limits */ + range->min_retry = 1; + range->max_retry = 255; + /* Retry lifetime limits unsupported */ + range->min_r_time = 0; + range->max_r_time = 0; +#endif /* WIRELESS_EXT > 10 */ + +#if WIRELESS_EXT > 17 + range->enc_capa = IW_ENC_CAPA_WPA; + range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP; + range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP; + range->enc_capa |= IW_ENC_CAPA_WPA2; + + /* Determine driver FBT capability. */ + if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) { + if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) { + /* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */ + range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE; + } + } + +#ifdef BCMFW_ROAM_ENABLE_WEXT + /* Advertise firmware roam capability to the external supplicant */ + range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE; +#endif /* BCMFW_ROAM_ENABLE_WEXT */ + + /* Event capability (kernel) */ + IW_EVENT_CAPA_SET_KERNEL(range->event_capa); + /* Event capability (driver) */ + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); + IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); + IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND); + +#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID) + /* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */ + range->scan_capa = IW_SCAN_CAPA_ESSID; +#endif +#endif /* WIRELESS_EXT > 17 */ + + return 0; +} + +static int +rssi_to_qual(int rssi) +{ + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + return 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + return 1; + else if (rssi <= WL_IW_RSSI_LOW) + return 2; + else if (rssi <= WL_IW_RSSI_GOOD) + return 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + return 4; + else + return 5; +} + +static int +wl_iw_set_spy( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = IW_DEV_IF(dev); + struct sockaddr *addr = (struct sockaddr *) extra; + int i; + + WL_TRACE(("%s: SIOCSIWSPY\n", dev->name)); + + if (!extra) + return -EINVAL; + + iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length); + for (i = 0; i < iw->spy_num; i++) + memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN); + memset(iw->spy_qual, 0, sizeof(iw->spy_qual)); + + return 0; +} + +static int +wl_iw_get_spy( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = IW_DEV_IF(dev); + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num]; + int i; + + WL_TRACE(("%s: SIOCGIWSPY\n", dev->name)); + + if (!extra) + return -EINVAL; + + dwrq->length = iw->spy_num; + for (i = 0; i < iw->spy_num; i++) { + memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN); + addr[i].sa_family = AF_UNIX; + memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality)); + iw->spy_qual[i].updated = 0; + } + + return 0; +} + +static int +wl_iw_set_wap( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + int error = -EINVAL; + + WL_TRACE(("%s: SIOCSIWAP\n", dev->name)); + + if (awrq->sa_family != ARPHRD_ETHER) { + WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__)); + return -EINVAL; + } + + /* Ignore "auto" or "off" */ + if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) { + scb_val_t scbval; + bzero(&scbval, sizeof(scb_val_t)); + if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) { + WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error)); + } + return 0; + } + /* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data), + * eabuf))); + */ + /* Reassociate to the specified AP */ + if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) { + WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error)); + return error; + } + + return 0; +} + +static int +wl_iw_get_wap( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWAP\n", dev->name)); + + awrq->sa_family = ARPHRD_ETHER; + memset(awrq->sa_data, 0, ETHER_ADDR_LEN); + + /* Ignore error (may be down or disassociated) */ + (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN); + + return 0; +} + +#if WIRELESS_EXT > 17 +static int +wl_iw_mlme( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + struct iw_mlme *mlme; + scb_val_t scbval; + int error = -EINVAL; + + WL_TRACE(("%s: SIOCSIWMLME\n", dev->name)); + + mlme = (struct iw_mlme *)extra; + if (mlme == NULL) { + WL_ERROR(("Invalid ioctl data.\n")); + return error; + } + + scbval.val = mlme->reason_code; + bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN); + + if (mlme->cmd == IW_MLME_DISASSOC) { + scbval.val = htod32(scbval.val); + error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)); + } + else if (mlme->cmd == IW_MLME_DEAUTH) { + scbval.val = htod32(scbval.val); + error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, + sizeof(scb_val_t)); + } + else { + WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__)); + return error; + } + + return error; +} +#endif /* WIRELESS_EXT > 17 */ + +static int +wl_iw_get_aplist( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality qual[IW_MAX_AP]; + wl_bss_info_t *bi = NULL; + int error, i; + uint buflen = dwrq->length; + + WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); + + if (!extra) + return -EINVAL; + + /* Get scan results (too large to put on the stack) */ + list = kmalloc(buflen, GFP_KERNEL); + if (!list) + return -ENOMEM; + memset(list, 0, buflen); + list->buflen = htod32(buflen); + if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { + WL_ERROR(("%d: Scan results error %d\n", __LINE__, error)); + kfree(list); + return error; + } + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + ASSERT(list->version == WL_BSS_INFO_VERSION); + + for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + buflen)); + + /* Infrastructure only */ + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + + /* BSSID */ + memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); + addr[dwrq->length].sa_family = ARPHRD_ETHER; + qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); + qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); + qual[dwrq->length].noise = 0x100 + bi->phy_noise; + + /* Updated qual, level, and noise */ +#if WIRELESS_EXT > 18 + qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; +#else + qual[dwrq->length].updated = 7; +#endif /* WIRELESS_EXT > 18 */ + + dwrq->length++; + } + + kfree(list); + + if (dwrq->length) { + memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); + /* Provided qual */ + dwrq->flags = 1; + } + + return 0; +} + +static int +wl_iw_iscan_get_aplist( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + iscan_buf_t * buf; + iscan_info_t *iscan = g_iscan; + + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality qual[IW_MAX_AP]; + wl_bss_info_t *bi = NULL; + int i; + + WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); + + if (!extra) + return -EINVAL; + + if ((!iscan) || (iscan->sysioc_pid < 0)) { + return wl_iw_get_aplist(dev, info, dwrq, extra); + } + + buf = iscan->list_hdr; + /* Get scan results (too large to put on the stack) */ + while (buf) { + list = &((wl_iscan_results_t*)buf->iscan_buf)->results; + ASSERT(list->version == WL_BSS_INFO_VERSION); + + bi = NULL; + for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + WLC_IW_ISCAN_MAXLEN)); + + /* Infrastructure only */ + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + + /* BSSID */ + memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); + addr[dwrq->length].sa_family = ARPHRD_ETHER; + qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); + qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); + qual[dwrq->length].noise = 0x100 + bi->phy_noise; + + /* Updated qual, level, and noise */ +#if WIRELESS_EXT > 18 + qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; +#else + qual[dwrq->length].updated = 7; +#endif /* WIRELESS_EXT > 18 */ + + dwrq->length++; + } + buf = buf->next; + } + if (dwrq->length) { + memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); + /* Provided qual */ + dwrq->flags = 1; + } + + return 0; +} + +#if WIRELESS_EXT > 13 +static int +wl_iw_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + wlc_ssid_t ssid; + + WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name)); + + /* default Broadcast scan */ + memset(&ssid, 0, sizeof(ssid)); + +#if WIRELESS_EXT > 17 + /* check for given essid */ + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; + ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); + memcpy(ssid.SSID, req->essid, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + } + } +#endif + /* Ignore error (most likely scan in progress) */ + (void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid)); + + return 0; +} + +static int +wl_iw_iscan_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + wlc_ssid_t ssid; + iscan_info_t *iscan = g_iscan; + + WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name)); + + /* use backup if our thread is not successful */ + if ((!iscan) || (iscan->sysioc_pid < 0)) { + return wl_iw_set_scan(dev, info, wrqu, extra); + } + if (iscan->iscan_state == ISCAN_STATE_SCANING) { + return 0; + } + + /* default Broadcast scan */ + memset(&ssid, 0, sizeof(ssid)); + +#if WIRELESS_EXT > 17 + /* check for given essid */ + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; + ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); + memcpy(ssid.SSID, req->essid, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + } + } +#endif + + iscan->list_cur = iscan->list_hdr; + iscan->iscan_state = ISCAN_STATE_SCANING; + + + wl_iw_set_event_mask(dev); + wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START); + + iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); + add_timer(&iscan->timer); + iscan->timer_on = 1; + + return 0; +} + +#if WIRELESS_EXT > 17 +static bool +ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len) +{ +/* Is this body of this tlvs entry a WPA entry? If */ +/* not update the tlvs buffer pointer/length */ + uint8 *ie = *wpaie; + + /* If the contents match the WPA_OUI and type=1 */ + if ((ie[1] >= 6) && + !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) { + return TRUE; + } + + /* point to the next ie */ + ie += ie[1] + 2; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + return FALSE; +} + +static bool +ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len) +{ +/* Is this body of this tlvs entry a WPS entry? If */ +/* not update the tlvs buffer pointer/length */ + uint8 *ie = *wpsie; + + /* If the contents match the WPA_OUI and type=4 */ + if ((ie[1] >= 4) && + !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) { + return TRUE; + } + + /* point to the next ie */ + ie += ie[1] + 2; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + return FALSE; +} +#endif /* WIRELESS_EXT > 17 */ + + +static int +wl_iw_handle_scanresults_ies(char **event_p, char *end, + struct iw_request_info *info, wl_bss_info_t *bi) +{ +#if WIRELESS_EXT > 17 + struct iw_event iwe; + char *event; + + event = *event_p; + if (bi->ie_length) { + /* look for wpa/rsn ies in the ie list... */ + bcm_tlv_t *ie; + uint8 *ptr = ((uint8 *)bi) + bi->ie_offset; + int ptr_len = bi->ie_length; + + /* OSEN IE */ + if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) && + ie->len > WFA_OUI_LEN + 1 && + !bcmp((const void *)&ie->data[0], (const void *)WFA_OUI, WFA_OUI_LEN) && + ie->data[WFA_OUI_LEN] == WFA_OUI_TYPE_OSEN) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + } + ptr = ((uint8 *)bi) + bi->ie_offset; + + if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + } + ptr = ((uint8 *)bi) + bi->ie_offset; + + if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + } + ptr = ((uint8 *)bi) + bi->ie_offset; + + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { + /* look for WPS IE */ + if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + break; + } + } + + ptr = ((uint8 *)bi) + bi->ie_offset; + ptr_len = bi->ie_length; + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { + if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + break; + } + } + + *event_p = event; + } + +#endif /* WIRELESS_EXT > 17 */ + return 0; +} +static int +wl_iw_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + channel_info_t ci; + wl_scan_results_t *list; + struct iw_event iwe; + wl_bss_info_t *bi = NULL; + int error, i, j; + char *event = extra, *end = extra + dwrq->length, *value; + uint buflen = dwrq->length; + + WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name)); + + if (!extra) + return -EINVAL; + + /* Check for scan in progress */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) + return error; + ci.scan_channel = dtoh32(ci.scan_channel); + if (ci.scan_channel) + return -EAGAIN; + + /* Get scan results (too large to put on the stack) */ + list = kmalloc(buflen, GFP_KERNEL); + if (!list) + return -ENOMEM; + memset(list, 0, buflen); + list->buflen = htod32(buflen); + if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { + kfree(list); + return error; + } + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + + ASSERT(list->version == WL_BSS_INFO_VERSION); + + for (i = 0; i < list->count && i < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + buflen)); + + /* First entry must be the BSSID */ + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + /* SSID */ + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + /* Mode */ + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); + } + + /* Channel */ + iwe.cmd = SIOCGIWFREQ; + + iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), + (CHSPEC_IS2G(bi->chanspec)) ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + /* Channel quality */ + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); + iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + /* Encryption */ + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + /* Rates */ + if (bi->rateset.count) { + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + /* Those two flags are ignored... */ + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } + } + + kfree(list); + + dwrq->length = event - extra; + dwrq->flags = 0; /* todo */ + + return 0; +} + +static int +wl_iw_iscan_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + struct iw_event iwe; + wl_bss_info_t *bi = NULL; + int ii, j; + int apcnt; + char *event = extra, *end = extra + dwrq->length, *value; + iscan_info_t *iscan = g_iscan; + iscan_buf_t * p_buf; + + WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name)); + + if (!extra) + return -EINVAL; + + /* use backup if our thread is not successful */ + if ((!iscan) || (iscan->sysioc_pid < 0)) { + return wl_iw_get_scan(dev, info, dwrq, extra); + } + + /* Check for scan in progress */ + if (iscan->iscan_state == ISCAN_STATE_SCANING) + return -EAGAIN; + + apcnt = 0; + p_buf = iscan->list_hdr; + /* Get scan results */ + while (p_buf != iscan->list_cur) { + list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; + + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version)); + } + + bi = NULL; + for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + WLC_IW_ISCAN_MAXLEN)); + + /* overflow check cover fields before wpa IEs */ + if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN + + IW_EV_QUAL_LEN >= end) + return -E2BIG; + /* First entry must be the BSSID */ + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + /* SSID */ + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + /* Mode */ + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); + } + + /* Channel */ + iwe.cmd = SIOCGIWFREQ; + + iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), + (CHSPEC_IS2G(bi->chanspec)) ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + /* Channel quality */ + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); + iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + /* Encryption */ + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + /* Rates */ + if (bi->rateset.count <= sizeof(bi->rateset.rates)) { + if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) + return -E2BIG; + + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + /* Those two flags are ignored... */ + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } + } + p_buf = p_buf->next; + } /* while (p_buf) */ + + dwrq->length = event - extra; + dwrq->flags = 0; /* todo */ + + return 0; +} + +#endif /* WIRELESS_EXT > 13 */ + + +static int +wl_iw_set_essid( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + + WL_TRACE(("%s: SIOCSIWESSID\n", dev->name)); + + /* default Broadcast SSID */ + memset(&ssid, 0, sizeof(ssid)); + if (dwrq->length && extra) { +#if WIRELESS_EXT > 20 + ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length); +#else + ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1); +#endif + memcpy(ssid.SSID, extra, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) + return error; + } + /* If essid null then it is "iwconfig essid off" command */ + else { + scb_val_t scbval; + bzero(&scbval, sizeof(scb_val_t)); + if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) + return error; + } + return 0; +} + +static int +wl_iw_get_essid( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + + WL_TRACE(("%s: SIOCGIWESSID\n", dev->name)); + + if (!extra) + return -EINVAL; + + if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) { + WL_ERROR(("Error getting the SSID\n")); + return error; + } + + ssid.SSID_len = dtoh32(ssid.SSID_len); + + /* Get the current SSID */ + memcpy(extra, ssid.SSID, ssid.SSID_len); + + dwrq->length = ssid.SSID_len; + + dwrq->flags = 1; /* active */ + + return 0; +} + +static int +wl_iw_set_nick( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = IW_DEV_IF(dev); + WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name)); + + if (!extra) + return -EINVAL; + + /* Check the size of the string */ + if (dwrq->length > sizeof(iw->nickname)) + return -E2BIG; + + memcpy(iw->nickname, extra, dwrq->length); + iw->nickname[dwrq->length - 1] = '\0'; + + return 0; +} + +static int +wl_iw_get_nick( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = IW_DEV_IF(dev); + WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name)); + + if (!extra) + return -EINVAL; + + strcpy(extra, iw->nickname); + dwrq->length = strlen(extra) + 1; + + return 0; +} + +static int wl_iw_set_rate( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + wl_rateset_t rateset; + int error, rate, i, error_bg, error_a; + + WL_TRACE(("%s: SIOCSIWRATE\n", dev->name)); + + /* Get current rateset */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) + return error; + + rateset.count = dtoh32(rateset.count); + + if (vwrq->value < 0) { + /* Select maximum rate */ + rate = rateset.rates[rateset.count - 1] & 0x7f; + } else if (vwrq->value < rateset.count) { + /* Select rate by rateset index */ + rate = rateset.rates[vwrq->value] & 0x7f; + } else { + /* Specified rate in bps */ + rate = vwrq->value / 500000; + } + + if (vwrq->fixed) { + /* + Set rate override, + Since the is a/b/g-blind, both a/bg_rate are enforced. + */ + error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate); + error_a = dev_wlc_intvar_set(dev, "a_rate", rate); + + if (error_bg && error_a) + return (error_bg | error_a); + } else { + /* + clear rate override + Since the is a/b/g-blind, both a/bg_rate are enforced. + */ + /* 0 is for clearing rate override */ + error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0); + /* 0 is for clearing rate override */ + error_a = dev_wlc_intvar_set(dev, "a_rate", 0); + + if (error_bg && error_a) + return (error_bg | error_a); + + /* Remove rates above selected rate */ + for (i = 0; i < rateset.count; i++) + if ((rateset.rates[i] & 0x7f) > rate) + break; + rateset.count = htod32(i); + + /* Set current rateset */ + if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset)))) + return error; + } + + return 0; +} + +static int wl_iw_get_rate( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rate; + + WL_TRACE(("%s: SIOCGIWRATE\n", dev->name)); + + /* Report the current tx rate */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate)))) + return error; + rate = dtoh32(rate); + vwrq->value = rate * 500000; + + return 0; +} + +static int +wl_iw_set_rts( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rts; + + WL_TRACE(("%s: SIOCSIWRTS\n", dev->name)); + + if (vwrq->disabled) + rts = DOT11_DEFAULT_RTS_LEN; + else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN) + return -EINVAL; + else + rts = vwrq->value; + + if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts))) + return error; + + return 0; +} + +static int +wl_iw_get_rts( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rts; + + WL_TRACE(("%s: SIOCGIWRTS\n", dev->name)); + + if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts))) + return error; + + vwrq->value = rts; + vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN); + vwrq->fixed = 1; + + return 0; +} + +static int +wl_iw_set_frag( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, frag; + + WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name)); + + if (vwrq->disabled) + frag = DOT11_DEFAULT_FRAG_LEN; + else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN) + return -EINVAL; + else + frag = vwrq->value; + + if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag))) + return error; + + return 0; +} + +static int +wl_iw_get_frag( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, fragthreshold; + + WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name)); + + if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold))) + return error; + + vwrq->value = fragthreshold; + vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN); + vwrq->fixed = 1; + + return 0; +} + +static int +wl_iw_set_txpow( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, disable; + uint16 txpwrmw; + WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name)); + + /* Make sure radio is off or on as far as software is concerned */ + disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0; + disable += WL_RADIO_SW_DISABLE << 16; + + disable = htod32(disable); + if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable)))) + return error; + + /* If Radio is off, nothing more to do */ + if (disable & WL_RADIO_SW_DISABLE) + return 0; + + /* Only handle mW */ + if (!(vwrq->flags & IW_TXPOW_MWATT)) + return -EINVAL; + + /* Value < 0 means just "on" or "off" */ + if (vwrq->value < 0) + return 0; + + if (vwrq->value > 0xffff) txpwrmw = 0xffff; + else txpwrmw = (uint16)vwrq->value; + + + error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw))); + return error; +} + +static int +wl_iw_get_txpow( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, disable, txpwrdbm; + uint8 result; + + WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) || + (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm))) + return error; + + disable = dtoh32(disable); + result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE); + vwrq->value = (int32)bcm_qdbm_to_mw(result); + vwrq->fixed = 0; + vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0; + vwrq->flags = IW_TXPOW_MWATT; + + return 0; +} + +#if WIRELESS_EXT > 10 +static int +wl_iw_set_retry( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, lrl, srl; + + WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name)); + + /* Do not handle "off" or "lifetime" */ + if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME)) + return -EINVAL; + + /* Handle "[min|max] limit" */ + if (vwrq->flags & IW_RETRY_LIMIT) { + /* "max limit" or just "limit" */ +#if WIRELESS_EXT > 20 + if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) || + !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) { +#else + if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) { +#endif /* WIRELESS_EXT > 20 */ + + lrl = htod32(vwrq->value); + if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl)))) + return error; + } + /* "min limit" or just "limit" */ +#if WIRELESS_EXT > 20 + if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) || + !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) { +#else + if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) { +#endif /* WIRELESS_EXT > 20 */ + + srl = htod32(vwrq->value); + if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl)))) + return error; + } + } + + return 0; +} + +static int +wl_iw_get_retry( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, lrl, srl; + + WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name)); + + vwrq->disabled = 0; /* Can't be disabled */ + + /* Do not handle lifetime queries */ + if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) + return -EINVAL; + + /* Get retry limits */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) || + (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl)))) + return error; + + lrl = dtoh32(lrl); + srl = dtoh32(srl); + + /* Note : by default, display the min retry number */ + if (vwrq->flags & IW_RETRY_MAX) { + vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; + vwrq->value = lrl; + } else { + vwrq->flags = IW_RETRY_LIMIT; + vwrq->value = srl; + if (srl != lrl) + vwrq->flags |= IW_RETRY_MIN; + } + + return 0; +} +#endif /* WIRELESS_EXT > 10 */ + +static int +wl_iw_set_encode( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error, val, wsec; + + WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name)); + + memset(&key, 0, sizeof(key)); + + if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { + /* Find the current key */ + for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { + val = htod32(key.index); + if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + val = dtoh32(val); + if (val) + break; + } + /* Default to 0 */ + if (key.index == DOT11_MAX_DEFAULT_KEYS) + key.index = 0; + } else { + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + if (key.index >= DOT11_MAX_DEFAULT_KEYS) + return -EINVAL; + } + + /* Interpret "off" to mean no encryption */ + wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED; + + if ((error = dev_wlc_intvar_set(dev, "wsec", wsec))) + return error; + + /* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */ + if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) { + /* Just select a new current key */ + val = htod32(key.index); + if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + } else { + key.len = dwrq->length; + + if (dwrq->length > sizeof(key.data)) + return -EINVAL; + + memcpy(key.data, extra, dwrq->length); + + key.flags = WL_PRIMARY_KEY; + switch (key.len) { + case WEP1_KEY_SIZE: + key.algo = CRYPTO_ALGO_WEP1; + break; + case WEP128_KEY_SIZE: + key.algo = CRYPTO_ALGO_WEP128; + break; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) + case TKIP_KEY_SIZE: + key.algo = CRYPTO_ALGO_TKIP; + break; +#endif + case AES_KEY_SIZE: + key.algo = CRYPTO_ALGO_AES_CCM; + break; + default: + return -EINVAL; + } + + /* Set the new key/index */ + swap_key_from_BE(&key); + if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)))) + return error; + } + + /* Interpret "restricted" to mean shared key authentication */ + val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0; + val = htod32(val); + if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val)))) + return error; + + return 0; +} + +static int +wl_iw_get_encode( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error, val, wsec, auth; + + WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name)); + + /* assure default values of zero for things we don't touch */ + bzero(&key, sizeof(wl_wsec_key_t)); + + if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { + /* Find the current key */ + for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { + val = key.index; + if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + val = dtoh32(val); + if (val) + break; + } + } else + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + if (key.index >= DOT11_MAX_DEFAULT_KEYS) + key.index = 0; + + /* Get info */ + + if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) || + (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth)))) + return error; + + swap_key_to_BE(&key); + + wsec = dtoh32(wsec); + auth = dtoh32(auth); + /* Get key length */ + dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len); + + /* Get flags */ + dwrq->flags = key.index + 1; + if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) { + /* Interpret "off" to mean no encryption */ + dwrq->flags |= IW_ENCODE_DISABLED; + } + if (auth) { + /* Interpret "restricted" to mean shared key authentication */ + dwrq->flags |= IW_ENCODE_RESTRICTED; + } + + /* Get key */ + if (dwrq->length && extra) + memcpy(extra, key.data, dwrq->length); + + return 0; +} + +static int +wl_iw_set_power( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, pm; + + WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name)); + + pm = vwrq->disabled ? PM_OFF : PM_MAX; + + pm = htod32(pm); + if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)))) + return error; + + return 0; +} + +static int +wl_iw_get_power( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, pm; + + WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm)))) + return error; + + pm = dtoh32(pm); + vwrq->disabled = pm ? 0 : 1; + vwrq->flags = IW_POWER_ALL_R; + + return 0; +} + +#if WIRELESS_EXT > 17 +static int +wl_iw_set_wpaie( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *iwp, + char *extra +) +{ + dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length); + + return 0; +} + +static int +wl_iw_get_wpaie( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *iwp, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name)); + iwp->length = 64; + dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length); + return 0; +} + +static int +wl_iw_set_encodeext( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error; + struct iw_encode_ext *iwe; + + WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name)); + + memset(&key, 0, sizeof(key)); + iwe = (struct iw_encode_ext *)extra; + + /* disable encryption completely */ + if (dwrq->flags & IW_ENCODE_DISABLED) { + + } + + /* get the key index */ + key.index = 0; + if (dwrq->flags & IW_ENCODE_INDEX) + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + key.len = iwe->key_len; + + /* Instead of bcast for ea address for default wep keys, driver needs it to be Null */ + if (!ETHER_ISMULTI(iwe->addr.sa_data)) + bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN); + + /* check for key index change */ + if (key.len == 0) { + if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + WL_WSEC(("Changing the the primary Key to %d\n", key.index)); + /* change the key index .... */ + key.index = htod32(key.index); + error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, + &key.index, sizeof(key.index)); + if (error) + return error; + } + /* key delete */ + else { + swap_key_from_BE(&key); + error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + if (error) + return error; + } + } + /* This case is used to allow an external 802.1x supplicant + * to pass the PMK to the in-driver supplicant for use in + * the 4-way handshake. + */ + else if (iwe->alg == IW_ENCODE_ALG_PMK) { + int j; + wsec_pmk_t pmk; + char keystring[WSEC_MAX_PSK_LEN + 1]; + char* charptr = keystring; + uint len; + + /* copy the raw hex key to the appropriate format */ + for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) { + (void)snprintf(charptr, 3, "%02x", iwe->key[j]); + charptr += 2; + } + len = strlen(keystring); + pmk.key_len = htod16(len); + bcopy(keystring, pmk.key, len); + pmk.flags = htod16(WSEC_PASSPHRASE); + + error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk)); + if (error) + return error; + } + + else { + if (iwe->key_len > sizeof(key.data)) + return -EINVAL; + + WL_WSEC(("Setting the key index %d\n", key.index)); + if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + WL_WSEC(("key is a Primary Key\n")); + key.flags = WL_PRIMARY_KEY; + } + + bcopy((void *)iwe->key, key.data, iwe->key_len); + + if (iwe->alg == IW_ENCODE_ALG_TKIP) { + uint8 keybuf[8]; + bcopy(&key.data[24], keybuf, sizeof(keybuf)); + bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); + bcopy(keybuf, &key.data[16], sizeof(keybuf)); + } + + /* rx iv */ + if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { + uchar *ivptr; + ivptr = (uchar *)iwe->rx_seq; + key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key.iv_initialized = TRUE; + } + + switch (iwe->alg) { + case IW_ENCODE_ALG_NONE: + key.algo = CRYPTO_ALGO_OFF; + break; + case IW_ENCODE_ALG_WEP: + if (iwe->key_len == WEP1_KEY_SIZE) + key.algo = CRYPTO_ALGO_WEP1; + else + key.algo = CRYPTO_ALGO_WEP128; + break; + case IW_ENCODE_ALG_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + break; + case IW_ENCODE_ALG_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + break; + default: + break; + } + swap_key_from_BE(&key); + + dhd_wait_pend8021x(dev); + + error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + if (error) + return error; + } + return 0; +} + + +#if WIRELESS_EXT > 17 +struct { + pmkid_list_t pmkids; + pmkid_t foo[MAXPMKID-1]; +} pmkid_list; +static int +wl_iw_set_pmksa( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + struct iw_pmksa *iwpmksa; + uint i; + char eabuf[ETHER_ADDR_STR_LEN]; + pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid; + + WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name)); + iwpmksa = (struct iw_pmksa *)extra; + bzero((char *)eabuf, ETHER_ADDR_STR_LEN); + if (iwpmksa->cmd == IW_PMKSA_FLUSH) { + WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n")); + bzero((char *)&pmkid_list, sizeof(pmkid_list)); + } + if (iwpmksa->cmd == IW_PMKSA_REMOVE) { + pmkid_list_t pmkid, *pmkidptr; + pmkidptr = &pmkid; + bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN); + bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN); + { + uint j; + WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ", + bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j])); + WL_TRACE(("\n")); + } + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) + if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID, + ETHER_ADDR_LEN)) + break; + for (; i < pmkid_list.pmkids.npmkid; i++) { + bcopy(&pmkid_array[i+1].BSSID, + &pmkid_array[i].BSSID, + ETHER_ADDR_LEN); + bcopy(&pmkid_array[i+1].PMKID, + &pmkid_array[i].PMKID, + WPA2_PMKID_LEN); + } + pmkid_list.pmkids.npmkid--; + } + if (iwpmksa->cmd == IW_PMKSA_ADD) { + bcopy(&iwpmksa->bssid.sa_data[0], + &pmkid_array[pmkid_list.pmkids.npmkid].BSSID, + ETHER_ADDR_LEN); + bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID, + WPA2_PMKID_LEN); + { + uint j; + uint k; + k = pmkid_list.pmkids.npmkid; + BCM_REFERENCE(k); + WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ", + bcm_ether_ntoa(&pmkid_array[k].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_TRACE(("%02x ", pmkid_array[k].PMKID[j])); + WL_TRACE(("\n")); + } + pmkid_list.pmkids.npmkid++; + } + WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid)); + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) { + uint j; + WL_TRACE(("PMKID[%d]: %s = ", i, + bcm_ether_ntoa(&pmkid_array[i].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_TRACE(("%02x ", pmkid_array[i].PMKID[j])); + printf("\n"); + } + WL_TRACE(("\n")); + dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list)); + return 0; +} +#endif /* WIRELESS_EXT > 17 */ + +static int +wl_iw_get_encodeext( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name)); + return 0; +} + +static int +wl_iw_set_wpaauth( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error = 0; + int paramid; + int paramval; + uint32 cipher_combined; + int val = 0; + wl_iw_t *iw = IW_DEV_IF(dev); + + WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name)); + + paramid = vwrq->flags & IW_AUTH_INDEX; + paramval = vwrq->value; + + WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n", + dev->name, paramid, paramval)); + + switch (paramid) { + + case IW_AUTH_WPA_VERSION: + /* supported wpa version disabled or wpa or wpa2 */ + if (paramval & IW_AUTH_WPA_VERSION_DISABLED) + val = WPA_AUTH_DISABLED; + else if (paramval & (IW_AUTH_WPA_VERSION_WPA)) + val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; + else if (paramval & IW_AUTH_WPA_VERSION_WPA2) + val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; + WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + return error; + break; + + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: { + int fbt_cap = 0; + + if (paramid == IW_AUTH_CIPHER_PAIRWISE) { + iw->pwsec = paramval; + } + else { + iw->gwsec = paramval; + } + + if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) + return error; + + cipher_combined = iw->gwsec | iw->pwsec; + val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED); + if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) + val |= WEP_ENABLED; + if (cipher_combined & IW_AUTH_CIPHER_TKIP) + val |= TKIP_ENABLED; + if (cipher_combined & IW_AUTH_CIPHER_CCMP) + val |= AES_ENABLED; + + if (iw->privacy_invoked && !val) { + WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming " + "we're a WPS enrollee\n", dev->name, __FUNCTION__)); + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { + WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); + return error; + } + } else if (val) { + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } + + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) + return error; + + /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way + * handshake. + */ + if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) { + if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) { + if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) { + if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) + return error; + } + else if (val == 0) { + if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) + return error; + } + } + } + break; + } + + case IW_AUTH_KEY_MGMT: + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + + if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { + if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK)) + val = WPA_AUTH_PSK; + else + val = WPA_AUTH_UNSPECIFIED; + if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) + val |= WPA2_AUTH_FT; + } + else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { + if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK)) + val = WPA2_AUTH_PSK; + else + val = WPA2_AUTH_UNSPECIFIED; + if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) + val |= WPA2_AUTH_FT; + } + WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + return error; + break; + + case IW_AUTH_TKIP_COUNTERMEASURES: + dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)¶mval, 1); + break; + + case IW_AUTH_80211_AUTH_ALG: + /* open shared */ + WL_ERROR(("Setting the D11auth %d\n", paramval)); + if (paramval & IW_AUTH_ALG_OPEN_SYSTEM) + val = 0; + else if (paramval & IW_AUTH_ALG_SHARED_KEY) + val = 1; + else + error = 1; + if (!error && (error = dev_wlc_intvar_set(dev, "auth", val))) + return error; + break; + + case IW_AUTH_WPA_ENABLED: + if (paramval == 0) { + val = 0; + WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); + error = dev_wlc_intvar_set(dev, "wpa_auth", val); + return error; + } + else { + /* If WPA is enabled, wpa_auth is set elsewhere */ + } + break; + + case IW_AUTH_DROP_UNENCRYPTED: + dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)¶mval, 1); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + break; + +#if WIRELESS_EXT > 17 + + case IW_AUTH_ROAMING_CONTROL: + WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); + /* driver control or user space app control */ + break; + + case IW_AUTH_PRIVACY_INVOKED: { + int wsec; + + if (paramval == 0) { + iw->privacy_invoked = FALSE; + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } else { + iw->privacy_invoked = TRUE; + if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec))) + return error; + + if (!WSEC_ENABLED(wsec)) { + /* if privacy is true, but wsec is false, we are a WPS enrollee */ + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { + WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); + return error; + } + } else { + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } + } + break; + } + + +#endif /* WIRELESS_EXT > 17 */ + + + default: + break; + } + return 0; +} +#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK)) + +static int +wl_iw_get_wpaauth( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error; + int paramid; + int paramval = 0; + int val; + wl_iw_t *iw = IW_DEV_IF(dev); + + WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name)); + + paramid = vwrq->flags & IW_AUTH_INDEX; + + switch (paramid) { + case IW_AUTH_WPA_VERSION: + /* supported wpa version disabled or wpa or wpa2 */ + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED)) + paramval = IW_AUTH_WPA_VERSION_DISABLED; + else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) + paramval = IW_AUTH_WPA_VERSION_WPA; + else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) + paramval = IW_AUTH_WPA_VERSION_WPA2; + break; + + case IW_AUTH_CIPHER_PAIRWISE: + paramval = iw->pwsec; + break; + + case IW_AUTH_CIPHER_GROUP: + paramval = iw->gwsec; + break; + + case IW_AUTH_KEY_MGMT: + /* psk, 1x */ + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (VAL_PSK(val)) + paramval = IW_AUTH_KEY_MGMT_PSK; + else + paramval = IW_AUTH_KEY_MGMT_802_1X; + + break; + case IW_AUTH_TKIP_COUNTERMEASURES: + dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)¶mval, 1); + break; + + case IW_AUTH_DROP_UNENCRYPTED: + dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)¶mval, 1); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + break; + + case IW_AUTH_80211_AUTH_ALG: + /* open, shared, leap */ + if ((error = dev_wlc_intvar_get(dev, "auth", &val))) + return error; + if (!val) + paramval = IW_AUTH_ALG_OPEN_SYSTEM; + else + paramval = IW_AUTH_ALG_SHARED_KEY; + break; + case IW_AUTH_WPA_ENABLED: + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (val) + paramval = TRUE; + else + paramval = FALSE; + break; + +#if WIRELESS_EXT > 17 + + case IW_AUTH_ROAMING_CONTROL: + WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); + /* driver control or user space app control */ + break; + + case IW_AUTH_PRIVACY_INVOKED: + paramval = iw->privacy_invoked; + break; + +#endif /* WIRELESS_EXT > 17 */ + } + vwrq->value = paramval; + return 0; +} +#endif /* WIRELESS_EXT > 17 */ + +static const iw_handler wl_iw_handler[] = +{ + (iw_handler) wl_iw_config_commit, /* SIOCSIWCOMMIT */ + (iw_handler) wl_iw_get_name, /* SIOCGIWNAME */ + (iw_handler) NULL, /* SIOCSIWNWID */ + (iw_handler) NULL, /* SIOCGIWNWID */ + (iw_handler) wl_iw_set_freq, /* SIOCSIWFREQ */ + (iw_handler) wl_iw_get_freq, /* SIOCGIWFREQ */ + (iw_handler) wl_iw_set_mode, /* SIOCSIWMODE */ + (iw_handler) wl_iw_get_mode, /* SIOCGIWMODE */ + (iw_handler) NULL, /* SIOCSIWSENS */ + (iw_handler) NULL, /* SIOCGIWSENS */ + (iw_handler) NULL, /* SIOCSIWRANGE */ + (iw_handler) wl_iw_get_range, /* SIOCGIWRANGE */ + (iw_handler) NULL, /* SIOCSIWPRIV */ + (iw_handler) NULL, /* SIOCGIWPRIV */ + (iw_handler) NULL, /* SIOCSIWSTATS */ + (iw_handler) NULL, /* SIOCGIWSTATS */ + (iw_handler) wl_iw_set_spy, /* SIOCSIWSPY */ + (iw_handler) wl_iw_get_spy, /* SIOCGIWSPY */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) wl_iw_set_wap, /* SIOCSIWAP */ + (iw_handler) wl_iw_get_wap, /* SIOCGIWAP */ +#if WIRELESS_EXT > 17 + (iw_handler) wl_iw_mlme, /* SIOCSIWMLME */ +#else + (iw_handler) NULL, /* -- hole -- */ +#endif + (iw_handler) wl_iw_iscan_get_aplist, /* SIOCGIWAPLIST */ +#if WIRELESS_EXT > 13 + (iw_handler) wl_iw_iscan_set_scan, /* SIOCSIWSCAN */ + (iw_handler) wl_iw_iscan_get_scan, /* SIOCGIWSCAN */ +#else /* WIRELESS_EXT > 13 */ + (iw_handler) NULL, /* SIOCSIWSCAN */ + (iw_handler) NULL, /* SIOCGIWSCAN */ +#endif /* WIRELESS_EXT > 13 */ + (iw_handler) wl_iw_set_essid, /* SIOCSIWESSID */ + (iw_handler) wl_iw_get_essid, /* SIOCGIWESSID */ + (iw_handler) wl_iw_set_nick, /* SIOCSIWNICKN */ + (iw_handler) wl_iw_get_nick, /* SIOCGIWNICKN */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) wl_iw_set_rate, /* SIOCSIWRATE */ + (iw_handler) wl_iw_get_rate, /* SIOCGIWRATE */ + (iw_handler) wl_iw_set_rts, /* SIOCSIWRTS */ + (iw_handler) wl_iw_get_rts, /* SIOCGIWRTS */ + (iw_handler) wl_iw_set_frag, /* SIOCSIWFRAG */ + (iw_handler) wl_iw_get_frag, /* SIOCGIWFRAG */ + (iw_handler) wl_iw_set_txpow, /* SIOCSIWTXPOW */ + (iw_handler) wl_iw_get_txpow, /* SIOCGIWTXPOW */ +#if WIRELESS_EXT > 10 + (iw_handler) wl_iw_set_retry, /* SIOCSIWRETRY */ + (iw_handler) wl_iw_get_retry, /* SIOCGIWRETRY */ +#endif /* WIRELESS_EXT > 10 */ + (iw_handler) wl_iw_set_encode, /* SIOCSIWENCODE */ + (iw_handler) wl_iw_get_encode, /* SIOCGIWENCODE */ + (iw_handler) wl_iw_set_power, /* SIOCSIWPOWER */ + (iw_handler) wl_iw_get_power, /* SIOCGIWPOWER */ +#if WIRELESS_EXT > 17 + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) wl_iw_set_wpaie, /* SIOCSIWGENIE */ + (iw_handler) wl_iw_get_wpaie, /* SIOCGIWGENIE */ + (iw_handler) wl_iw_set_wpaauth, /* SIOCSIWAUTH */ + (iw_handler) wl_iw_get_wpaauth, /* SIOCGIWAUTH */ + (iw_handler) wl_iw_set_encodeext, /* SIOCSIWENCODEEXT */ + (iw_handler) wl_iw_get_encodeext, /* SIOCGIWENCODEEXT */ + (iw_handler) wl_iw_set_pmksa, /* SIOCSIWPMKSA */ +#endif /* WIRELESS_EXT > 17 */ +}; + +#if WIRELESS_EXT > 12 +enum { + WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV, + WL_IW_SET_VLANMODE, + WL_IW_SET_PM, +#if WIRELESS_EXT > 17 +#endif /* WIRELESS_EXT > 17 */ + WL_IW_SET_LAST +}; + +static iw_handler wl_iw_priv_handler[] = { + wl_iw_set_leddc, + wl_iw_set_vlanmode, + wl_iw_set_pm, +#if WIRELESS_EXT > 17 +#endif /* WIRELESS_EXT > 17 */ + NULL +}; + +static struct iw_priv_args wl_iw_priv_args[] = { + { + WL_IW_SET_LEDDC, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + 0, + "set_leddc" + }, + { + WL_IW_SET_VLANMODE, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + 0, + "set_vlanmode" + }, + { + WL_IW_SET_PM, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + 0, + "set_pm" + }, +#if WIRELESS_EXT > 17 +#endif /* WIRELESS_EXT > 17 */ + { 0, 0, 0, { 0 } } +}; + +const struct iw_handler_def wl_iw_handler_def = +{ + .num_standard = ARRAYSIZE(wl_iw_handler), + .num_private = ARRAY_SIZE(wl_iw_priv_handler), + .num_private_args = ARRAY_SIZE(wl_iw_priv_args), + .standard = (const iw_handler *) wl_iw_handler, + .private = wl_iw_priv_handler, + .private_args = wl_iw_priv_args, +#if WIRELESS_EXT >= 19 + get_wireless_stats: dhd_get_wireless_stats, +#endif /* WIRELESS_EXT >= 19 */ + }; +#endif /* WIRELESS_EXT > 12 */ + +int +wl_iw_ioctl( + struct net_device *dev, + struct ifreq *rq, + int cmd +) +{ + struct iwreq *wrq = (struct iwreq *) rq; + struct iw_request_info info; + iw_handler handler; + char *extra = NULL; + size_t token_size = 1; + int max_tokens = 0, ret = 0; + + if (cmd < SIOCIWFIRST || + IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) || + !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) + return -EOPNOTSUPP; + + switch (cmd) { + + case SIOCSIWESSID: + case SIOCGIWESSID: + case SIOCSIWNICKN: + case SIOCGIWNICKN: + max_tokens = IW_ESSID_MAX_SIZE + 1; + break; + + case SIOCSIWENCODE: + case SIOCGIWENCODE: +#if WIRELESS_EXT > 17 + case SIOCSIWENCODEEXT: + case SIOCGIWENCODEEXT: +#endif + max_tokens = IW_ENCODING_TOKEN_MAX; + break; + + case SIOCGIWRANGE: + max_tokens = sizeof(struct iw_range); + break; + + case SIOCGIWAPLIST: + token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); + max_tokens = IW_MAX_AP; + break; + +#if WIRELESS_EXT > 13 + case SIOCGIWSCAN: + if (g_iscan) + max_tokens = wrq->u.data.length; + else + max_tokens = IW_SCAN_MAX_DATA; + break; +#endif /* WIRELESS_EXT > 13 */ + + case SIOCSIWSPY: + token_size = sizeof(struct sockaddr); + max_tokens = IW_MAX_SPY; + break; + + case SIOCGIWSPY: + token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); + max_tokens = IW_MAX_SPY; + break; + default: + break; + } + + if (max_tokens && wrq->u.data.pointer) { + if (wrq->u.data.length > max_tokens) + return -E2BIG; + + if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) { + kfree(extra); + return -EFAULT; + } + } + + info.cmd = cmd; + info.flags = 0; + + ret = handler(dev, &info, &wrq->u, extra); + + if (extra) { + if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) { + kfree(extra); + return -EFAULT; + } + + kfree(extra); + } + + return ret; +} + +/* Convert a connection status event into a connection status string. + * Returns TRUE if a matching connection status string was found. + */ +bool +wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, + char* stringBuf, uint buflen) +{ + typedef struct conn_fail_event_map_t { + uint32 inEvent; /* input: event type to match */ + uint32 inStatus; /* input: event status code to match */ + uint32 inReason; /* input: event reason code to match */ + const char* outName; /* output: failure type */ + const char* outCause; /* output: failure cause */ + } conn_fail_event_map_t; + + /* Map of WLC_E events to connection failure strings */ +# define WL_IW_DONT_CARE 9999 + const conn_fail_event_map_t event_map [] = { + /* inEvent inStatus inReason */ + /* outName outCause */ + {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE, + "Conn", "Success"}, + {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE, + "Conn", "NoNetworks"}, + {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "ConfigMismatch"}, + {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH, + "Conn", "EncrypMismatch"}, + {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH, + "Conn", "RsnMismatch"}, + {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, + "Conn", "AuthTimeout"}, + {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "AuthFail"}, + {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE, + "Conn", "AuthNoAck"}, + {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "ReassocFail"}, + {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, + "Conn", "ReassocTimeout"}, + {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE, + "Conn", "ReassocAbort"}, + {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE, + "Sup", "ConnSuccess"}, + {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Sup", "WpaHandshakeFail"}, + {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "Deauth"}, + {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "DisassocInd"}, + {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "Disassoc"} + }; + + const char* name = ""; + const char* cause = NULL; + int i; + + /* Search the event map table for a matching event */ + for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) { + const conn_fail_event_map_t* row = &event_map[i]; + if (row->inEvent == event_type && + (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) && + (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) { + name = row->outName; + cause = row->outCause; + break; + } + } + + /* If found, generate a connection failure string and return TRUE */ + if (cause) { + memset(stringBuf, 0, buflen); + (void)snprintf(stringBuf, buflen, "%s %s %02d %02d", name, cause, status, reason); + WL_TRACE(("Connection status: %s\n", stringBuf)); + return TRUE; + } else { + return FALSE; + } +} + +#if (WIRELESS_EXT > 14) +/* Check if we have received an event that indicates connection failure + * If so, generate a connection failure report string. + * The caller supplies a buffer to hold the generated string. + */ +static bool +wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen) +{ + uint32 event = ntoh32(e->event_type); + uint32 status = ntoh32(e->status); + uint32 reason = ntoh32(e->reason); + + if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) { + return TRUE; + } else + { + return FALSE; + } +} +#endif /* WIRELESS_EXT > 14 */ + +#ifndef IW_CUSTOM_MAX +#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */ +#endif /* IW_CUSTOM_MAX */ + +void +wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) +{ +#if WIRELESS_EXT > 13 + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + int cmd = 0; + uint32 event_type = ntoh32(e->event_type); + uint16 flags = ntoh16(e->flags); + uint32 datalen = ntoh32(e->datalen); + uint32 status = ntoh32(e->status); + + memset(&wrqu, 0, sizeof(wrqu)); + memset(extra, 0, sizeof(extra)); + + memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + + switch (event_type) { + case WLC_E_TXFAIL: + cmd = IWEVTXDROP; + break; +#if WIRELESS_EXT > 14 + case WLC_E_JOIN: + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + cmd = IWEVREGISTERED; + break; + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: + cmd = SIOCGIWAP; + wrqu.data.length = strlen(extra); + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + bzero(&extra, ETHER_ADDR_LEN); + break; + + case WLC_E_LINK: + cmd = SIOCGIWAP; + wrqu.data.length = strlen(extra); + if (!(flags & WLC_EVENT_MSG_LINK)) { + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + bzero(&extra, ETHER_ADDR_LEN); + } + break; + case WLC_E_ACTION_FRAME: + cmd = IWEVCUSTOM; + if (datalen + 1 <= sizeof(extra)) { + wrqu.data.length = datalen + 1; + extra[0] = WLC_E_ACTION_FRAME; + memcpy(&extra[1], data, datalen); + WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length)); + } + break; + + case WLC_E_ACTION_FRAME_COMPLETE: + cmd = IWEVCUSTOM; + if (sizeof(status) + 1 <= sizeof(extra)) { + wrqu.data.length = sizeof(status) + 1; + extra[0] = WLC_E_ACTION_FRAME_COMPLETE; + memcpy(&extra[1], &status, sizeof(status)); + WL_TRACE(("wl_iw_event status %d \n", status)); + } + break; +#endif /* WIRELESS_EXT > 14 */ +#if WIRELESS_EXT > 17 + case WLC_E_MIC_ERROR: { + struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra; + cmd = IWEVMICHAELMICFAILURE; + wrqu.data.length = sizeof(struct iw_michaelmicfailure); + if (flags & WLC_EVENT_MSG_GROUP) + micerrevt->flags |= IW_MICFAILURE_GROUP; + else + micerrevt->flags |= IW_MICFAILURE_PAIRWISE; + memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN); + micerrevt->src_addr.sa_family = ARPHRD_ETHER; + + break; + } + + case WLC_E_ASSOC_REQ_IE: + cmd = IWEVASSOCREQIE; + wrqu.data.length = datalen; + if (datalen < sizeof(extra)) + memcpy(extra, data, datalen); + break; + + case WLC_E_ASSOC_RESP_IE: + cmd = IWEVASSOCRESPIE; + wrqu.data.length = datalen; + if (datalen < sizeof(extra)) + memcpy(extra, data, datalen); + break; + + case WLC_E_PMKID_CACHE: { + struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra; + pmkid_cand_list_t *pmkcandlist; + pmkid_cand_t *pmkidcand; + int count; + + if (data == NULL) + break; + + cmd = IWEVPMKIDCAND; + pmkcandlist = data; + count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand); + wrqu.data.length = sizeof(struct iw_pmkid_cand); + pmkidcand = pmkcandlist->pmkid_cand; + while (count) { + bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand)); + if (pmkidcand->preauth) + iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH; + bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data, + ETHER_ADDR_LEN); + wireless_send_event(dev, cmd, &wrqu, extra); + pmkidcand++; + count--; + } + break; + } +#endif /* WIRELESS_EXT > 17 */ + + case WLC_E_SCAN_COMPLETE: +#if WIRELESS_EXT > 14 + cmd = SIOCGIWSCAN; +#endif + WL_TRACE(("event WLC_E_SCAN_COMPLETE\n")); + if ((g_iscan) && (g_iscan->sysioc_pid >= 0) && + (g_iscan->iscan_state != ISCAN_STATE_IDLE)) + up(&g_iscan->sysioc_sem); + break; + + default: + /* Cannot translate event */ + break; + } + + if (cmd) { + if (cmd == SIOCGIWSCAN) + wireless_send_event(dev, cmd, &wrqu, NULL); + else + wireless_send_event(dev, cmd, &wrqu, extra); + } + +#if WIRELESS_EXT > 14 + /* Look for WLC events that indicate a connection failure. + * If found, generate an IWEVCUSTOM event. + */ + memset(extra, 0, sizeof(extra)); + if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) { + cmd = IWEVCUSTOM; + wrqu.data.length = strlen(extra); + wireless_send_event(dev, cmd, &wrqu, extra); + } +#endif /* WIRELESS_EXT > 14 */ + +#endif /* WIRELESS_EXT > 13 */ +} + +static int wl_iw_get_wireless_stats_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len) +{ + struct iw_statistics *wstats = ctx; + int res = BCME_OK; + + switch (type) { + case WL_CNT_XTLV_WLC: { + wl_cnt_wlc_t *cnt = (wl_cnt_wlc_t *)data; + if (len > sizeof(wl_cnt_wlc_t)) { + printf("counter structure length invalid! %d > %d\n", + len, (int)sizeof(wl_cnt_wlc_t)); + } + wstats->discard.nwid = 0; + wstats->discard.code = dtoh32(cnt->rxundec); + wstats->discard.fragment = dtoh32(cnt->rxfragerr); + wstats->discard.retries = dtoh32(cnt->txfail); + wstats->discard.misc = dtoh32(cnt->rxrunt) + dtoh32(cnt->rxgiant); + wstats->miss.beacon = 0; + WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n", + dtoh32(cnt->txframe), dtoh32(cnt->txbyte))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", + dtoh32(cnt->rxundec))); + WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", + dtoh32(cnt->txfail))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", + dtoh32(cnt->rxfragerr))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", + dtoh32(cnt->rxrunt))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", + dtoh32(cnt->rxgiant))); + break; + } + case WL_CNT_XTLV_CNTV_LE10_UCODE: + case WL_CNT_XTLV_LT40_UCODE_V1: + case WL_CNT_XTLV_GE40_UCODE_V1: + { + /* Offsets of rxfrmtoolong and rxbadplcp are the same in + * wl_cnt_v_le10_mcst_t, wl_cnt_lt40mcst_v1_t, and wl_cnt_ge40mcst_v1_t. + * So we can just cast to wl_cnt_v_le10_mcst_t here. + */ + wl_cnt_v_le10_mcst_t *cnt = (wl_cnt_v_le10_mcst_t *)data; + if (len != WL_CNT_MCST_STRUCT_SZ) { + printf("counter structure length mismatch! %d != %d\n", + len, WL_CNT_MCST_STRUCT_SZ); + } + WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", + dtoh32(cnt->rxfrmtoolong))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", + dtoh32(cnt->rxbadplcp))); + BCM_REFERENCE(cnt); + break; + } + default: + WL_ERROR(("%s %d: Unsupported type %d\n", __FUNCTION__, __LINE__, type)); + break; + } + return res; +} + +int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats) +{ + int res = 0; + int phy_noise; + int rssi; + scb_val_t scb_val; +#if WIRELESS_EXT > 11 + char *cntbuf = NULL; + wl_cnt_info_t *cntinfo; + uint16 ver; + uint32 corerev = 0; +#endif /* WIRELESS_EXT > 11 */ + + phy_noise = 0; + if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) + goto done; + + phy_noise = dtoh32(phy_noise); + WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise)); + + scb_val.val = 0; + if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) + goto done; + + rssi = dtoh32(scb_val.val); + WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi)); + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + wstats->qual.qual = 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + wstats->qual.qual = 1; + else if (rssi <= WL_IW_RSSI_LOW) + wstats->qual.qual = 2; + else if (rssi <= WL_IW_RSSI_GOOD) + wstats->qual.qual = 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + wstats->qual.qual = 4; + else + wstats->qual.qual = 5; + + /* Wraps to 0 if RSSI is 0 */ + wstats->qual.level = 0x100 + rssi; + wstats->qual.noise = 0x100 + phy_noise; +#if WIRELESS_EXT > 18 + wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM); +#else + wstats->qual.updated |= 7; +#endif /* WIRELESS_EXT > 18 */ + +#if WIRELESS_EXT > 11 + WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", WL_CNTBUF_MAX_SIZE)); + + if (WL_CNTBUF_MAX_SIZE > MAX_WLIW_IOCTL_LEN) + { + WL_ERROR(("wl_iw_get_wireless_stats buffer too short %d < %d\n", + WL_CNTBUF_MAX_SIZE, MAX_WLIW_IOCTL_LEN)); + res = BCME_BUFTOOSHORT; + goto done; + } + + cntbuf = kmalloc(WL_CNTBUF_MAX_SIZE, GFP_KERNEL); + if (!cntbuf) { + res = BCME_NOMEM; + goto done; + } + + memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE); + res = dev_wlc_bufvar_get(dev, "counters", cntbuf, WL_CNTBUF_MAX_SIZE); + if (res) + { + WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res)); + goto done; + } + + cntinfo = (wl_cnt_info_t *)cntbuf; + cntinfo->version = dtoh16(cntinfo->version); + cntinfo->datalen = dtoh16(cntinfo->datalen); + ver = cntinfo->version; + if (ver > WL_CNT_T_VERSION) { + WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n", + WL_CNT_T_VERSION, ver)); + res = BCME_VERSION; + goto done; + } + + if (ver == WL_CNT_VERSION_11) { + wlc_rev_info_t revinfo; + memset(&revinfo, 0, sizeof(revinfo)); + res = dev_wlc_ioctl(dev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo)); + if (res) { + WL_ERROR(("%s: WLC_GET_REVINFO failed %d\n", __FUNCTION__, res)); + goto done; + } + corerev = dtoh32(revinfo.corerev); + } + + res = wl_cntbuf_to_xtlv_format(NULL, cntinfo, WL_CNTBUF_MAX_SIZE, corerev); + if (res) { + WL_ERROR(("%s: wl_cntbuf_to_xtlv_format failed %d\n", __FUNCTION__, res)); + goto done; + } + + if ((res = bcm_unpack_xtlv_buf(wstats, cntinfo->data, cntinfo->datalen, + BCM_XTLV_OPTION_ALIGN32, wl_iw_get_wireless_stats_cbfn))) { + goto done; + } +#endif /* WIRELESS_EXT > 11 */ + +done: +#if WIRELESS_EXT > 11 + if (cntbuf) { + kfree(cntbuf); + } +#endif /* WIRELESS_EXT > 11 */ + return res; +} + +static void +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +wl_iw_timerfunc(struct timer_list *t) +#else +wl_iw_timerfunc(ulong data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + iscan_info_t *iscan = from_timer(iscan, t, timer);; +#else + iscan_info_t *iscan = (iscan_info_t *)data; +#endif + iscan->timer_on = 0; + if (iscan->iscan_state != ISCAN_STATE_IDLE) { + WL_TRACE(("timer trigger\n")); + up(&iscan->sysioc_sem); + } +} + +static void +wl_iw_set_event_mask(struct net_device *dev) +{ + char eventmask[WL_EVENTING_MASK_LEN]; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + + dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf)); + bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); + setbit(eventmask, WLC_E_SCAN_COMPLETE); + dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, + iovbuf, sizeof(iovbuf)); + +} + +static int +wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid) +{ + int err = 0; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = 0; + params->nprobes = -1; + params->active_time = -1; + params->passive_time = -1; + params->home_time = -1; + params->channel_num = 0; + + params->nprobes = htod32(params->nprobes); + params->active_time = htod32(params->active_time); + params->passive_time = htod32(params->passive_time); + params->home_time = htod32(params->home_time); + if (ssid && ssid->SSID_len) + memcpy(¶ms->ssid, ssid, sizeof(wlc_ssid_t)); + + return err; +} + +static int +wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action) +{ + int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)); + wl_iscan_params_t *params; + int err = 0; + + if (ssid && ssid->SSID_len) { + params_size += sizeof(wlc_ssid_t); + } + params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL); + if (params == NULL) { + return -ENOMEM; + } + memset(params, 0, params_size); + ASSERT(params_size < WLC_IOCTL_SMLEN); + + err = wl_iw_iscan_prep(¶ms->params, ssid); + + if (!err) { + params->version = htod32(ISCAN_REQ_VERSION); + params->action = htod16(action); + params->scan_duration = htod16(0); + + /* params_size += OFFSETOF(wl_iscan_params_t, params); */ + (void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size, + iscan->ioctlbuf, WLC_IOCTL_SMLEN); + } + + kfree(params); + return err; +} + +static uint32 +wl_iw_iscan_get(iscan_info_t *iscan) +{ + iscan_buf_t * buf; + iscan_buf_t * ptr; + wl_iscan_results_t * list_buf; + wl_iscan_results_t list; + wl_scan_results_t *results; + uint32 status; + + /* buffers are allocated on demand */ + if (iscan->list_cur) { + buf = iscan->list_cur; + iscan->list_cur = buf->next; + } + else { + buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL); + if (!buf) + return WL_SCAN_RESULTS_ABORTED; + buf->next = NULL; + if (!iscan->list_hdr) + iscan->list_hdr = buf; + else { + ptr = iscan->list_hdr; + while (ptr->next) { + ptr = ptr->next; + } + ptr->next = buf; + } + } + memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); + list_buf = (wl_iscan_results_t*)buf->iscan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); + (void) dev_iw_iovar_getbuf( + iscan->dev, + "iscanresults", + &list, + WL_ISCAN_RESULTS_FIXED_SIZE, + buf->iscan_buf, + WLC_IW_ISCAN_MAXLEN); + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + results->count = dtoh32(results->count); + WL_TRACE(("results->count = %d\n", results->count)); + + WL_TRACE(("results->buflen = %d\n", results->buflen)); + status = dtoh32(list_buf->status); + return status; +} + +static void wl_iw_send_scan_complete(iscan_info_t *iscan) +{ + union iwreq_data wrqu; + + memset(&wrqu, 0, sizeof(wrqu)); + + /* wext expects to get no data for SIOCGIWSCAN Event */ + wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL); +} + +static int +_iscan_sysioc_thread(void *data) +{ + uint32 status; + iscan_info_t *iscan = (iscan_info_t *)data; + + DAEMONIZE("iscan_sysioc"); + + status = WL_SCAN_RESULTS_PARTIAL; + while (down_interruptible(&iscan->sysioc_sem) == 0) { + if (iscan->timer_on) { + del_timer(&iscan->timer); + iscan->timer_on = 0; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + status = wl_iw_iscan_get(iscan); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif + + switch (status) { + case WL_SCAN_RESULTS_PARTIAL: + WL_TRACE(("iscanresults incomplete\n")); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + /* make sure our buffer size is enough before going next round */ + wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif + /* Reschedule the timer */ + iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); + add_timer(&iscan->timer); + iscan->timer_on = 1; + break; + case WL_SCAN_RESULTS_SUCCESS: + WL_TRACE(("iscanresults complete\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + wl_iw_send_scan_complete(iscan); + break; + case WL_SCAN_RESULTS_PENDING: + WL_TRACE(("iscanresults pending\n")); + /* Reschedule the timer */ + iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); + add_timer(&iscan->timer); + iscan->timer_on = 1; + break; + case WL_SCAN_RESULTS_ABORTED: + WL_TRACE(("iscanresults aborted\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + wl_iw_send_scan_complete(iscan); + break; + default: + WL_TRACE(("iscanresults returned unknown status %d\n", status)); + break; + } + } + complete_and_exit(&iscan->sysioc_exited, 0); +} + +int +wl_iw_attach(struct net_device *dev, void * dhdp) +{ + iscan_info_t *iscan = NULL; + + if (!dev) + return 0; + + iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL); + if (!iscan) + return -ENOMEM; + memset(iscan, 0, sizeof(iscan_info_t)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + iscan->kthread = NULL; +#endif + iscan->sysioc_pid = -1; + /* we only care about main interface so save a global here */ + g_iscan = iscan; + iscan->dev = dev; + iscan->iscan_state = ISCAN_STATE_IDLE; + + + /* Set up the timer */ + iscan->timer_ms = 2000; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&iscan->timer, wl_iw_timerfunc, 0); +#else + init_timer(&iscan->timer); + iscan->timer.data = (ulong)iscan; + iscan->timer.function = wl_iw_timerfunc; +#endif + + sema_init(&iscan->sysioc_sem, 0); + init_completion(&iscan->sysioc_exited); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc"); + iscan->sysioc_pid = iscan->kthread->pid; +#else + iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0); +#endif + if (iscan->sysioc_pid < 0) + return -ENOMEM; + return 0; +} + +void wl_iw_detach(void) +{ + iscan_buf_t *buf; + iscan_info_t *iscan = g_iscan; + if (!iscan) + return; + if (iscan->sysioc_pid >= 0) { + KILL_PROC(iscan->sysioc_pid, SIGTERM); + wait_for_completion(&iscan->sysioc_exited); + } + + while (iscan->list_hdr) { + buf = iscan->list_hdr->next; + kfree(iscan->list_hdr); + iscan->list_hdr = buf; + } + kfree(iscan); + g_iscan = NULL; +} + +#endif /* USE_IW */ diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h new file mode 100644 index 000000000000..6b86bb13a32c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_iw.h @@ -0,0 +1,164 @@ +/* + * Linux Wireless Extensions support + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_iw.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _wl_iw_h_ +#define _wl_iw_h_ + +#include + +#include +#include +#include + +#define WL_SCAN_PARAMS_SSID_MAX 10 +#define GET_SSID "SSID=" +#define GET_CHANNEL "CH=" +#define GET_NPROBE "NPROBE=" +#define GET_ACTIVE_ASSOC_DWELL "ACTIVE=" +#define GET_PASSIVE_ASSOC_DWELL "PASSIVE=" +#define GET_HOME_DWELL "HOME=" +#define GET_SCAN_TYPE "TYPE=" + +#define BAND_GET_CMD "GETBAND" +#define BAND_SET_CMD "SETBAND" +#define DTIM_SKIP_GET_CMD "DTIMSKIPGET" +#define DTIM_SKIP_SET_CMD "DTIMSKIPSET" +#define SETSUSPEND_CMD "SETSUSPENDOPT" +#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR" +/* Lin - Is the extra space needed? */ +#define PNOSETUP_SET_CMD "PNOSETUP " /* TLV command has extra end space */ +#define PNOENABLE_SET_CMD "PNOFORCE" +#define PNODEBUG_SET_CMD "PNODEBUG" +#define TXPOWER_SET_CMD "TXPOWER" + +#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] +#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" + +/* Structure to keep global parameters */ +typedef struct wl_iw_extra_params { + int target_channel; /* target channel */ +} wl_iw_extra_params_t; + +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; +/* ============================================== */ +/* Defines from wlc_pub.h */ +#define WL_IW_RSSI_MINVAL -200 /* Low value, e.g. for forcing roam */ +#define WL_IW_RSSI_NO_SIGNAL -91 /* NDIS RSSI link quality cutoffs */ +#define WL_IW_RSSI_VERY_LOW -80 /* Very low quality cutoffs */ +#define WL_IW_RSSI_LOW -70 /* Low quality cutoffs */ +#define WL_IW_RSSI_GOOD -68 /* Good quality cutoffs */ +#define WL_IW_RSSI_VERY_GOOD -58 /* Very good quality cutoffs */ +#define WL_IW_RSSI_EXCELLENT -57 /* Excellent quality cutoffs */ +#define WL_IW_RSSI_INVALID 0 /* invalid RSSI value */ +#define MAX_WX_STRING 80 +#define SSID_FMT_BUF_LEN ((4 * 32) + 1) +#define isprint(c) bcm_isprint(c) +#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1) +#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3) +#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5) +#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7) +#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9) +#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11) +#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13) + +#define G_SCAN_RESULTS 8*1024 +#define WE_ADD_EVENT_FIX 0x80 +#define G_WLAN_SET_ON 0 +#define G_WLAN_SET_OFF 1 + + +typedef struct wl_iw { + char nickname[IW_ESSID_MAX_SIZE]; + + struct iw_statistics wstats; + + int spy_num; + uint32 pwsec; /* pairwise wsec setting */ + uint32 gwsec; /* group wsec setting */ + bool privacy_invoked; /* IW_AUTH_PRIVACY_INVOKED setting */ + struct ether_addr spy_addr[IW_MAX_SPY]; + struct iw_quality spy_qual[IW_MAX_SPY]; + void *wlinfo; +} wl_iw_t; + +struct wl_ctrl { + struct timer_list *timer; + struct net_device *dev; + long sysioc_pid; + struct semaphore sysioc_sem; + struct completion sysioc_exited; +}; + + +#if WIRELESS_EXT > 12 +#include +extern const struct iw_handler_def wl_iw_handler_def; +#endif /* WIRELESS_EXT > 12 */ + +extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data); +extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats); +int wl_iw_attach(struct net_device *dev, void * dhdp); +int wl_iw_send_priv_event(struct net_device *dev, char *flag); + +void wl_iw_detach(void); + +#define CSCAN_COMMAND "CSCAN " +#define CSCAN_TLV_PREFIX 'S' +#define CSCAN_TLV_VERSION 1 +#define CSCAN_TLV_SUBVERSION 0 +#define CSCAN_TLV_TYPE_SSID_IE 'S' +#define CSCAN_TLV_TYPE_CHANNEL_IE 'C' +#define CSCAN_TLV_TYPE_NPROBE_IE 'N' +#define CSCAN_TLV_TYPE_ACTIVE_IE 'A' +#define CSCAN_TLV_TYPE_PASSIVE_IE 'P' +#define CSCAN_TLV_TYPE_HOME_IE 'H' +#define CSCAN_TLV_TYPE_STYPE_IE 'T' + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \ + iwe_stream_add_event(info, stream, ends, iwe, extra) +#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \ + iwe_stream_add_value(info, event, value, ends, iwe, event_len) +#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \ + iwe_stream_add_point(info, stream, ends, iwe, extra) +#else +#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \ + iwe_stream_add_event(stream, ends, iwe, extra) +#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \ + iwe_stream_add_value(event, value, ends, iwe, event_len) +#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \ + iwe_stream_add_point(stream, ends, iwe, extra) +#endif + +#endif /* _wl_iw_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/net/wireless/bcmdhd/wl_linux_mon.c new file mode 100644 index 000000000000..65e624092e80 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_linux_mon.c @@ -0,0 +1,406 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux monitor network interface + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_linux_mon.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +typedef enum monitor_states +{ + MONITOR_STATE_DEINIT = 0x0, + MONITOR_STATE_INIT = 0x1, + MONITOR_STATE_INTERFACE_ADDED = 0x2, + MONITOR_STATE_INTERFACE_DELETED = 0x4 +} monitor_states_t; +int dhd_add_monitor(char *name, struct net_device **new_ndev); +extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); +int dhd_del_monitor(struct net_device *ndev); +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); + +/** + * Local declarations and defintions (not exposed) + */ +#ifndef DHD_MAX_IFS +#define DHD_MAX_IFS 16 +#endif +#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__) +#define MON_TRACE MON_PRINT + +typedef struct monitor_interface { + int radiotap_enabled; + struct net_device* real_ndev; /* The real interface that the monitor is on */ + struct net_device* mon_ndev; +} monitor_interface; + +typedef struct dhd_linux_monitor { + void *dhd_pub; + monitor_states_t monitor_state; + monitor_interface mon_if[DHD_MAX_IFS]; + struct mutex lock; /* lock to protect mon_if */ +} dhd_linux_monitor_t; + +static dhd_linux_monitor_t g_monitor; + +static struct net_device* lookup_real_netdev(char *name); +static monitor_interface* ndev_to_monif(struct net_device *ndev); +static int dhd_mon_if_open(struct net_device *ndev); +static int dhd_mon_if_stop(struct net_device *ndev); +static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev); +static void dhd_mon_if_set_multicast_list(struct net_device *ndev); +static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr); + +static const struct net_device_ops dhd_mon_if_ops = { + .ndo_open = dhd_mon_if_open, + .ndo_stop = dhd_mon_if_stop, + .ndo_start_xmit = dhd_mon_if_subif_start_xmit, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_mon_if_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_mon_if_set_multicast_list, +#endif + .ndo_set_mac_address = dhd_mon_if_change_mac, +}; + +/** + * Local static function defintions + */ + +/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0" + * "p2p-eth0-0" is a match for "mon.p2p-eth0-0") + */ +static struct net_device* lookup_real_netdev(char *name) +{ + struct net_device *ndev_found = NULL; + + int i; + int len = 0; + int last_name_len = 0; + struct net_device *ndev; + + /* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0", + * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon + * iface would be mon-p2p0-0. + */ + for (i = 0; i < DHD_MAX_IFS; i++) { + ndev = dhd_idx2net(g_monitor.dhd_pub, i); + + /* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it + * it matches, then this netdev is the corresponding real_netdev. + */ + if (ndev && strstr(ndev->name, "p2p-p2p0")) { + len = strlen("p2p"); + } else { + /* if p2p- is not present, then the IFNAMSIZ have reached and name + * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x + */ + len = 0; + } + if (ndev && strstr(name, (ndev->name + len))) { + if (strlen(ndev->name) > last_name_len) { + ndev_found = ndev; + last_name_len = strlen(ndev->name); + } + } + } + + return ndev_found; +} + +static monitor_interface* ndev_to_monif(struct net_device *ndev) +{ + int i; + + for (i = 0; i < DHD_MAX_IFS; i++) { + if (g_monitor.mon_if[i].mon_ndev == ndev) + return &g_monitor.mon_if[i]; + } + + return NULL; +} + +static int dhd_mon_if_open(struct net_device *ndev) +{ + int ret = 0; + + MON_PRINT("enter\n"); + return ret; +} + +static int dhd_mon_if_stop(struct net_device *ndev) +{ + int ret = 0; + + MON_PRINT("enter\n"); + return ret; +} + +static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + int ret = 0; + int rtap_len; + int qos_len = 0; + int dot11_hdr_len = 24; + int snap_len = 6; + unsigned char *pdata; + unsigned short frame_ctl; + unsigned char src_mac_addr[6]; + unsigned char dst_mac_addr[6]; + struct ieee80211_hdr *dot11_hdr; + struct ieee80211_radiotap_header *rtap_hdr; + monitor_interface* mon_if; + + MON_PRINT("enter\n"); + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + goto fail; + } + + if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) + goto fail; + + rtap_hdr = (struct ieee80211_radiotap_header *)skb->data; + if (unlikely(rtap_hdr->it_version)) + goto fail; + + rtap_len = ieee80211_get_radiotap_len(skb->data); + if (unlikely(skb->len < rtap_len)) + goto fail; + + MON_PRINT("radiotap len (should be 14): %d\n", rtap_len); + + /* Skip the ratio tap header */ + skb_pull(skb, rtap_len); + + dot11_hdr = (struct ieee80211_hdr *)skb->data; + frame_ctl = le16_to_cpu(dot11_hdr->frame_control); + /* Check if the QoS bit is set */ + if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { + /* Check if this ia a Wireless Distribution System (WDS) frame + * which has 4 MAC addresses + */ + if (dot11_hdr->frame_control & 0x0080) + qos_len = 2; + if ((dot11_hdr->frame_control & 0x0300) == 0x0300) + dot11_hdr_len += 6; + + memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr)); + memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr)); + + /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for + * for two MAC addresses + */ + skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2); + pdata = (unsigned char*)skb->data; + memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr)); + memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr)); + PKTSETPRIO(skb, 0); + + MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); + + /* Use the real net device to transmit the packet */ + ret = dhd_start_xmit(skb, mon_if->real_ndev); + + return ret; + } +fail: + dev_kfree_skb(skb); + return 0; +} + +static void dhd_mon_if_set_multicast_list(struct net_device *ndev) +{ + monitor_interface* mon_if; + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + } else { + MON_PRINT("enter, if name: %s, matched if name %s\n", + ndev->name, mon_if->real_ndev->name); + } +} + +static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr) +{ + int ret = 0; + monitor_interface* mon_if; + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + } else { + MON_PRINT("enter, if name: %s, matched if name %s\n", + ndev->name, mon_if->real_ndev->name); + } + return ret; +} + +/** + * Global function definitions (declared in dhd_linux_mon.h) + */ + +int dhd_add_monitor(char *name, struct net_device **new_ndev) +{ + int i; + int idx = -1; + int ret = 0; + struct net_device* ndev = NULL; + dhd_linux_monitor_t **dhd_mon; + + mutex_lock(&g_monitor.lock); + + MON_TRACE("enter, if name: %s\n", name); + if (!name || !new_ndev) { + MON_PRINT("invalid parameters\n"); + ret = -EINVAL; + goto out; + } + + /* + * Find a vacancy + */ + for (i = 0; i < DHD_MAX_IFS; i++) + if (g_monitor.mon_if[i].mon_ndev == NULL) { + idx = i; + break; + } + if (idx == -1) { + MON_PRINT("exceeds maximum interfaces\n"); + ret = -EFAULT; + goto out; + } + + ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*)); + if (!ndev) { + MON_PRINT("failed to allocate memory\n"); + ret = -ENOMEM; + goto out; + } + + ndev->type = ARPHRD_IEEE80211_RADIOTAP; + strncpy(ndev->name, name, IFNAMSIZ); + ndev->name[IFNAMSIZ - 1] = 0; + ndev->netdev_ops = &dhd_mon_if_ops; + + ret = register_netdevice(ndev); + if (ret) { + MON_PRINT(" register_netdevice failed (%d)\n", ret); + goto out; + } + + *new_ndev = ndev; + g_monitor.mon_if[idx].radiotap_enabled = TRUE; + g_monitor.mon_if[idx].mon_ndev = ndev; + g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name); + dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev); + *dhd_mon = &g_monitor; + g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED; + MON_PRINT("net device returned: 0x%p\n", ndev); + MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name); + +out: + if (ret && ndev) + free_netdev(ndev); + + mutex_unlock(&g_monitor.lock); + return ret; + +} + +int dhd_del_monitor(struct net_device *ndev) +{ + int i; + if (!ndev) + return -EINVAL; + mutex_lock(&g_monitor.lock); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (g_monitor.mon_if[i].mon_ndev == ndev || + g_monitor.mon_if[i].real_ndev == ndev) { + + g_monitor.mon_if[i].real_ndev = NULL; + unregister_netdevice(g_monitor.mon_if[i].mon_ndev); + free_netdev(g_monitor.mon_if[i].mon_ndev); + g_monitor.mon_if[i].mon_ndev = NULL; + g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED; + break; + } + } + + if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED) + MON_PRINT("IF not found in monitor array, is this a monitor IF? 0x%p\n", ndev); + mutex_unlock(&g_monitor.lock); + + return 0; +} + +int dhd_monitor_init(void *dhd_pub) +{ + if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) { + g_monitor.dhd_pub = dhd_pub; + mutex_init(&g_monitor.lock); + g_monitor.monitor_state = MONITOR_STATE_INIT; + } + return 0; +} + +int dhd_monitor_uninit(void) +{ + int i; + struct net_device *ndev; + mutex_lock(&g_monitor.lock); + if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) { + for (i = 0; i < DHD_MAX_IFS; i++) { + ndev = g_monitor.mon_if[i].mon_ndev; + if (ndev) { + unregister_netdevice(ndev); + free_netdev(ndev); + g_monitor.mon_if[i].real_ndev = NULL; + g_monitor.mon_if[i].mon_ndev = NULL; + } + } + g_monitor.monitor_state = MONITOR_STATE_DEINIT; + } + mutex_unlock(&g_monitor.lock); + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/wl_roam.c b/drivers/net/wireless/bcmdhd/wl_roam.c new file mode 100644 index 000000000000..bddc755f8204 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_roam.c @@ -0,0 +1,28 @@ +/* + * Linux roam cache + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_roam.c 599089 2015-11-12 10:41:33Z $ + */ diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c new file mode 100644 index 000000000000..8478de93b7c8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wldev_common.c @@ -0,0 +1,456 @@ +/* + * Common function shared by Linux WEXT, cfg80211 and p2p drivers + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wldev_common.c 585478 2015-09-10 13:33:58Z $ + */ + +#include +#include +#include +#include + +#include +#include + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#define WLDEV_ERROR(args) \ + do { \ + printk(KERN_ERR "WLDEV-ERROR) "); \ + printk args; \ + } while (0) + +#define WLDEV_INFO(args) \ + do { \ + printk(KERN_INFO "WLDEV-INFO) "); \ + printk args; \ + } while (0) + +extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd); + +s32 wldev_ioctl( + struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set) +{ + s32 ret = 0; + struct wl_ioctl ioc; + + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + ioc.set = set; + + ret = dhd_ioctl_entry_local(dev, &ioc, cmd); + + return ret; +} + +/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be + * taken care of in dhd_ioctl_entry. Internal use only, not exposed to + * wl_iw, wl_cfg80211 and wl_cfgp2p + */ +static s32 wldev_mkiovar( + const s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, u32 buflen) +{ + s32 iolen = 0; + + iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen); + return iolen; +} + +s32 wldev_iovar_getbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync) +{ + s32 ret = 0; + if (buf_sync) { + mutex_lock(buf_sync); + } + wldev_mkiovar(iovar_name, param, paramlen, buf, buflen); + ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE); + if (buf_sync) + mutex_unlock(buf_sync); + return ret; +} + + +s32 wldev_iovar_setbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync) +{ + s32 ret = 0; + s32 iovar_len; + if (buf_sync) { + mutex_lock(buf_sync); + } + iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen); + if (iovar_len > 0) + ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE); + else + ret = BCME_BUFTOOSHORT; + + if (buf_sync) + mutex_unlock(buf_sync); + return ret; +} + +s32 wldev_iovar_setint( + struct net_device *dev, s8 *iovar, s32 val) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + + val = htod32(val); + memset(iovar_buf, 0, sizeof(iovar_buf)); + return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf, + sizeof(iovar_buf), NULL); +} + + +s32 wldev_iovar_getint( + struct net_device *dev, s8 *iovar, s32 *pval) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + s32 err; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf, + sizeof(iovar_buf), NULL); + if (err == 0) + { + memcpy(pval, iovar_buf, sizeof(*pval)); + *pval = dtoh32(*pval); + } + return err; +} + +/** Format a bsscfg indexed iovar buffer. The bsscfg index will be + * taken care of in dhd_ioctl_entry. Internal use only, not exposed to + * wl_iw, wl_cfg80211 and wl_cfgp2p + */ +s32 wldev_mkiovar_bsscfg( + const s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, s32 buflen, s32 bssidx) +{ + const s8 *prefix = "bsscfg:"; + s8 *p; + u32 prefixlen; + u32 namelen; + u32 iolen; + + if (bssidx == 0) { + return wldev_mkiovar(iovar_name, param, paramlen, + iovar_buf, buflen); + } + + prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */ + namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */ + iolen = prefixlen + namelen + sizeof(u32) + paramlen; + + if (buflen < 0 || iolen > (u32)buflen) + { + WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__)); + return BCME_BUFTOOSHORT; + } + + p = (s8 *)iovar_buf; + + /* copy prefix, no null */ + memcpy(p, prefix, prefixlen); + p += prefixlen; + + /* copy iovar name including null */ + memcpy(p, iovar_name, namelen); + p += namelen; + + /* bss config index as first param */ + bssidx = htod32(bssidx); + memcpy(p, &bssidx, sizeof(u32)); + p += sizeof(u32); + + /* parameter buffer follows */ + if (paramlen) + memcpy(p, param, paramlen); + + return iolen; + +} + +s32 wldev_iovar_getbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync) +{ + s32 ret = 0; + if (buf_sync) { + mutex_lock(buf_sync); + } + + wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx); + ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE); + if (buf_sync) { + mutex_unlock(buf_sync); + } + return ret; + +} + +s32 wldev_iovar_setbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync) +{ + s32 ret = 0; + s32 iovar_len; + if (buf_sync) { + mutex_lock(buf_sync); + } + iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx); + if (iovar_len > 0) + ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE); + else { + ret = BCME_BUFTOOSHORT; + } + + if (buf_sync) { + mutex_unlock(buf_sync); + } + return ret; +} + +s32 wldev_iovar_setint_bsscfg( + struct net_device *dev, s8 *iovar, s32 val, s32 bssidx) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + + val = htod32(val); + memset(iovar_buf, 0, sizeof(iovar_buf)); + return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf, + sizeof(iovar_buf), bssidx, NULL); +} + + +s32 wldev_iovar_getint_bsscfg( + struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + s32 err; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf, + sizeof(iovar_buf), bssidx, NULL); + if (err == 0) + { + memcpy(pval, iovar_buf, sizeof(*pval)); + *pval = dtoh32(*pval); + } + return err; +} + +int wldev_get_link_speed( + struct net_device *dev, int *plink_speed) +{ + int error; + + if (!plink_speed) + return -ENOMEM; + error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0); + if (unlikely(error)) + return error; + + /* Convert internal 500Kbps to Kbps */ + *plink_speed *= 500; + return error; +} + +int wldev_get_rssi( + struct net_device *dev, scb_val_t *scb_val) +{ + int error; + + if (!scb_val) + return -ENOMEM; + + error = wldev_ioctl(dev, WLC_GET_RSSI, scb_val, sizeof(scb_val_t), 0); + if (unlikely(error)) + return error; + + return error; +} + +int wldev_get_ssid( + struct net_device *dev, wlc_ssid_t *pssid) +{ + int error; + + if (!pssid) + return -ENOMEM; + error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0); + if (unlikely(error)) + return error; + pssid->SSID_len = dtoh32(pssid->SSID_len); + return error; +} + +int wldev_get_band( + struct net_device *dev, uint *pband) +{ + int error; + + error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0); + return error; +} + +int wldev_set_band( + struct net_device *dev, uint band) +{ + int error = -1; + + if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) { + error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true); + if (!error) + dhd_bus_band_set(dev, band); + } + return error; +} +int wldev_get_datarate(struct net_device *dev, int *datarate) +{ + int error = 0; + + error = wldev_ioctl(dev, WLC_GET_RATE, datarate, sizeof(int), false); + if (error) { + return -1; + } else { + *datarate = dtoh32(*datarate); + } + + return error; +} + +extern chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec); +#define WL_EXTRA_BUF_MAX 2048 +int wldev_get_mode( + struct net_device *dev, uint8 *cap) +{ + int error = 0; + int chanspec = 0; + uint16 band = 0; + uint16 bandwidth = 0; + wl_bss_info_t *bss = NULL; + char* buf = kmalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (!buf) + return -1; + *(u32*) buf = htod32(WL_EXTRA_BUF_MAX); + error = wldev_ioctl(dev, WLC_GET_BSS_INFO, (void*)buf, WL_EXTRA_BUF_MAX, false); + if (error) { + WLDEV_ERROR(("%s:failed:%d\n", __FUNCTION__, error)); + return -1; + } + bss = (struct wl_bss_info *)(buf + 4); + chanspec = wl_chspec_driver_to_host(bss->chanspec); + + band = chanspec & WL_CHANSPEC_BAND_MASK; + bandwidth = chanspec & WL_CHANSPEC_BW_MASK; + + if (band == WL_CHANSPEC_BAND_2G) { + if (bss->n_cap) + strcpy(cap, "n"); + else + strcpy(cap, "bg"); + } else if (band == WL_CHANSPEC_BAND_5G) { + if (bandwidth == WL_CHANSPEC_BW_80) + strcpy(cap, "ac"); + else if ((bandwidth == WL_CHANSPEC_BW_40) || (bandwidth == WL_CHANSPEC_BW_20)) { + if ((bss->nbss_cap & 0xf00) && (bss->n_cap)) + strcpy(cap, "n|ac"); + else if (bss->n_cap) + strcpy(cap, "n"); + else if (bss->vht_cap) + strcpy(cap, "ac"); + else + strcpy(cap, "a"); + } else { + WLDEV_ERROR(("%s:Mode get failed\n", __FUNCTION__)); + return -1; + } + + } + return error; +} +int wldev_set_country( + struct net_device *dev, char *country_code, bool notify, bool user_enforced, int revinfo) +{ + int error = -1; + wl_country_t cspec = {{0}, 0, {0}}; + scb_val_t scbval; + char smbuf[WLC_IOCTL_SMLEN]; + + if (!country_code) + return error; + + bzero(&scbval, sizeof(scb_val_t)); + error = wldev_iovar_getbuf(dev, "country", NULL, 0, &cspec, sizeof(cspec), NULL); + if (error < 0) { + WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error)); + return error; + } + + if ((error < 0) || + dhd_force_country_change(dev) || + (strncmp(country_code, cspec.ccode, WLC_CNTRY_BUF_SZ) != 0)) { + + if (user_enforced) { + bzero(&scbval, sizeof(scb_val_t)); + error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true); + if (error < 0) { + WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n", + __FUNCTION__, error)); + return error; + } + } + + cspec.rev = revinfo; + memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ); + memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ); + dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec); + error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec), + smbuf, sizeof(smbuf), NULL); + if (error < 0) { + WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n", + __FUNCTION__, country_code, cspec.ccode, cspec.rev)); + return error; + } + dhd_bus_country_set(dev, &cspec, notify); + WLDEV_INFO(("%s: set country for %s as %s rev %d\n", + __FUNCTION__, country_code, cspec.ccode, cspec.rev)); + } + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h new file mode 100644 index 000000000000..4cf421cfa19e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wldev_common.h @@ -0,0 +1,123 @@ +/* + * Common function shared by Linux WEXT, cfg80211 and p2p drivers + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wldev_common.h 556083 2015-05-12 14:03:00Z $ + */ +#ifndef __WLDEV_COMMON_H__ +#define __WLDEV_COMMON_H__ + +#include + +/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or + * netdev_ops->ndo_do_ioctl in new kernels) + * @dev: the net_device handle + */ +s32 wldev_ioctl( + struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set); + +/** Retrieve named IOVARs, this function calls wl_dev_ioctl with + * WLC_GET_VAR IOCTL code + */ +s32 wldev_iovar_getbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync); + +/** Set named IOVARs, this function calls wl_dev_ioctl with + * WLC_SET_VAR IOCTL code + */ +s32 wldev_iovar_setbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync); + +s32 wldev_iovar_setint( + struct net_device *dev, s8 *iovar, s32 val); + +s32 wldev_iovar_getint( + struct net_device *dev, s8 *iovar, s32 *pval); + +/** The following function can be implemented if there is a need for bsscfg + * indexed IOVARs + */ + +s32 wldev_mkiovar_bsscfg( + const s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, s32 buflen, s32 bssidx); + +/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with + * WLC_GET_VAR IOCTL code + */ +s32 wldev_iovar_getbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen, + void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync); + +/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with + * WLC_SET_VAR IOCTL code + */ +s32 wldev_iovar_setbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen, + void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync); + +s32 wldev_iovar_getint_bsscfg( + struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx); + +s32 wldev_iovar_setint_bsscfg( + struct net_device *dev, s8 *iovar, s32 val, s32 bssidx); + +extern int dhd_net_set_fw_path(struct net_device *dev, char *fw); +extern int dhd_net_bus_suspend(struct net_device *dev); +extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage); +extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, + unsigned long delay_msec); +extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, + wl_country_t *cspec); +extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify); +extern bool dhd_force_country_change(struct net_device *dev); +extern void dhd_bus_band_set(struct net_device *dev, uint band); +extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify, + bool user_enforced, int revinfo); +extern int net_os_wake_lock(struct net_device *dev); +extern int net_os_wake_unlock(struct net_device *dev); +extern int net_os_wake_lock_timeout(struct net_device *dev); +extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val); +extern int net_os_set_dtim_skip(struct net_device *dev, int val); +extern int net_os_set_suspend_disable(struct net_device *dev, int val); +extern int net_os_set_suspend(struct net_device *dev, int val, int force); +extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, + int max, int *bytes_left); + +/* Get the link speed from dongle, speed is in kpbs */ +int wldev_get_link_speed(struct net_device *dev, int *plink_speed); + +int wldev_get_rssi(struct net_device *dev, scb_val_t *prssi); + +int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid); + +int wldev_get_band(struct net_device *dev, uint *pband); +int wldev_get_mode(struct net_device *dev, uint8 *pband); +int wldev_get_datarate(struct net_device *dev, int *datarate); +int wldev_set_band(struct net_device *dev, uint band); + +#endif /* __WLDEV_COMMON_H__ */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 2ce675ab40ef..450f2216fac2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) * @dev_addr: optional device address. * * P2P needs mac addresses for P2P device and interface. If no device - * address it specified, these are derived from the primary net device, ie. - * the permanent ethernet address of the device. + * address it specified, these are derived from a random ethernet + * address. */ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) { - struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; - bool local_admin = false; + bool random_addr = false; - if (!dev_addr || is_zero_ether_addr(dev_addr)) { - dev_addr = pri_ifp->mac_addr; - local_admin = true; - } + if (!dev_addr || is_zero_ether_addr(dev_addr)) + random_addr = true; - /* Generate the P2P Device Address. This consists of the device's - * primary MAC address with the locally administered bit set. + /* Generate the P2P Device Address obtaining a random ethernet + * address with the locally administered bit set. */ - memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); - if (local_admin) - p2p->dev_addr[0] |= 0x02; + if (random_addr) + eth_random_addr(p2p->dev_addr); + else + memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); /* Generate the P2P Interface Address. If the discovery and connection * BSSCFGs need to simultaneously co-exist, then this address must be diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 613caca7dc02..eccd25febfe6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -2064,7 +2064,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt) return head_pad; } -/** +/* * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for * bus layer usage. */ @@ -4096,8 +4096,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, sdio_release_host(sdiodev->func[1]); fail: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); - device_release_driver(dev); device_release_driver(&sdiodev->func[2]->dev); + device_release_driver(dev); } struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e8b5ff42f5a8..c8e7b54a538a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -72,18 +72,21 @@ #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" +#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" +#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" #define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-" #define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" #define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" -#define IWL9000_MODULE_FIRMWARE(api) \ - IWL9000_FW_PRE "-" __stringify(api) ".ucode" +#define IWL9000A_MODULE_FIRMWARE(api) \ + IWL9000A_FW_PRE __stringify(api) ".ucode" +#define IWL9000B_MODULE_FIRMWARE(api) \ + IWL9000B_FW_PRE __stringify(api) ".ucode" #define IWL9000RFB_MODULE_FIRMWARE(api) \ - IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode" + IWL9000RFB_FW_PRE __stringify(api) ".ucode" #define IWL9260A_MODULE_FIRMWARE(api) \ - IWL9260A_FW_PRE "-" __stringify(api) ".ucode" + IWL9260A_FW_PRE __stringify(api) ".ucode" #define IWL9260B_MODULE_FIRMWARE(api) \ - IWL9260B_FW_PRE "-" __stringify(api) ".ucode" + IWL9260B_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_9000 10 @@ -193,7 +196,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl9460_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9460", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9461_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9461", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9462_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9462", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_cfg = { @@ -205,10 +249,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, - .integrated = true, }; -MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +const struct iwl_cfg iwl9560_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9560", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; +MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c index a440140ed8dd..7eade165b747 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c @@ -80,15 +80,15 @@ #define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_A000_HR_MODULE_FIRMWARE(api) \ - IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_FW_PRE __stringify(api) ".ucode" #define IWL_A000_JF_MODULE_FIRMWARE(api) \ - IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_JF_FW_PRE __stringify(api) ".ucode" #define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode" #define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode" #define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_A000 10 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 5a40092febfb..3bfc657f6b42 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -531,6 +531,8 @@ struct iwl_scan_config_v1 { } __packed; /* SCAN_CONFIG_DB_CMD_API_S */ #define SCAN_TWO_LMACS 2 +#define SCAN_LB_LMAC_IDX 0 +#define SCAN_HB_LMAC_IDX 1 struct iwl_scan_config { __le32 flags; @@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags { IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), + IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13), }; /** @@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail { * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags - * @reserved2: for future use and alignment * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @extended_dwell: dwell time for channels 1, 6 and 11 * @active_dwell: dwell time for active scan * @passive_dwell: dwell time for passive scan * @fragmented_dwell: dwell time for fragmented passive scan + * @adwell_default_n_aps: for adaptive dwell the default number of APs + * per channel + * @adwell_default_n_aps_social: for adaptive dwell the default + * number of APs per social (1,6,11) channel + * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added + * to total scan time * @max_out_time: max out of serving channel time, per LMAC - for CDB there * are 2 LMACs * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs @@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail { * @channel_flags: &enum iwl_scan_channel_flags * @n_channels: num of channels in scan request * @reserved: for future use and alignment + * @reserved2: for future use and alignment + * @reserved3: for future use and alignment * @data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail */ @@ -651,41 +661,64 @@ struct iwl_scan_req_umac { __le32 flags; __le32 uid; __le32 ooc_priority; - /* SCAN_GENERAL_PARAMS_API_S_VER_4 */ __le16 general_flags; - u8 reserved2; + u8 reserved; u8 scan_start_mac_id; - u8 extended_dwell; - u8 active_dwell; - u8 passive_dwell; - u8 fragmented_dwell; union { struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; __le32 max_out_time; __le32 suspend_time; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved; + __le16 reserved2; u8 data[]; } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved; + __le16 reserved2; u8 data[]; } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ + struct { + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + u8 adwell_default_n_aps; + u8 adwell_default_n_aps_social; + u8 reserved3; + __le16 adwell_max_budget; + __le32 max_out_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved2; + u8 data[]; + } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */ }; } __packed; -#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(u8) - sizeof(__le16)) #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32)) + 2 * sizeof(__le32) - 2 * sizeof(u8) - \ + sizeof(__le16)) /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 87b4434224a1..dfa111bb411e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -68,6 +68,9 @@ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames + * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using + * monitor mode. Note this queue is the same as the queue for P2P device + * but we can't have active monitor mode along with P2P device anyway. * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure * that we are never left without the possibility to connect to an AP. @@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq { IWL_MVM_DQA_CMD_QUEUE = 0, IWL_MVM_DQA_AUX_QUEUE = 1, IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, + IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2, IWL_MVM_DQA_GCAST_QUEUE = 3, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 9c889a32fe24..223fb77a3aa9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt) static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) { - iwl_fw_dbg_stop_recording(fwrt); - fwrt->dump.conf = FW_DBG_INVALID; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 279248cd9cfb..e988e4c371c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -262,6 +262,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ + IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 71cb1ecde0f7..e226179c32fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -364,6 +364,7 @@ struct iwl_cfg { u32 dccm2_len; u32 smem_offset; u32 smem_len; + u32 soc_latency; u16 nvm_ver; u16 nvm_calib_ver; u16 rx_with_siso_diversity:1, @@ -471,6 +472,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; +extern const struct iwl_cfg iwl9460_2ac_cfg_soc; +extern const struct iwl_cfg iwl9461_2ac_cfg_soc; +extern const struct iwl_cfg iwl9462_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index e90abbfba718..ecd5c1df811c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -117,6 +117,7 @@ #define FH_RSCSR_FRAME_INVALID 0x55550000 #define FH_RSCSR_FRAME_ALIGN 0x40 #define FH_RSCSR_RPA_EN BIT(25) +#define FH_RSCSR_RADA_EN BIT(26) #define FH_RSCSR_RXQ_POS 16 #define FH_RSCSR_RXQ_MASK 0x3F0000 @@ -128,7 +129,8 @@ struct iwl_rx_packet { * 31: flag flush RB request * 30: flag ignore TC (terminal counter) request * 29: flag fast IRQ request - * 28-26: Reserved + * 28-27: Reserved + * 26: RADA enabled * 25: Offload enabled * 24: RPF enabled * 23: RSS enabled diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index a2bf530eeae4..2f22e14e00fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; - u32 tfd_queue_msk = 0; + u32 tfd_queue_msk = BIT(mvm->snif_queue); int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 949e63418299..2ec27ceb8af9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -954,6 +954,7 @@ struct iwl_mvm { /* Tx queues */ u16 aux_queue; + u16 snif_queue; u16 probe_queue; u16 p2p_dev_queue; @@ -1042,6 +1043,7 @@ struct iwl_mvm { * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running + * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, @@ -1053,6 +1055,7 @@ enum iwl_mvm_status { IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_D3_RECONFIG, IWL_MVM_STATUS_FIRMWARE_RUNNING, + IWL_MVM_STATUS_NEED_FLUSH_P2P, }; /* Keep track of completed init configuration */ @@ -1124,6 +1127,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } +static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_ADAPTIVE_DWELL); +} + static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 231878969332..9fb40955d5f4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -622,6 +622,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 0fe723ca844e..d22cef7381ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1881,12 +1881,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, struct rs_rate *rate = &search_tbl->rate; const struct rs_tx_column *column = &rs_tx_columns[col_id]; const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); unsigned long rate_mask = 0; u32 rate_idx = 0; - memcpy(search_tbl, tbl, sz); + memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win)); rate->sgi = column->sgi; rate->ant = column->ant; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 248699c2c4bf..819e6f66a5b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -232,8 +232,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, - struct iwl_rx_mpdu_desc *desc, int queue, - u8 *crypt_len) + struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, + int queue, u8 *crypt_len) { u16 status = le16_to_cpu(desc->status); @@ -253,6 +253,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, return -1; stats->flag |= RX_FLAG_DECRYPTED; + if (pkt_flags & FH_RSCSR_RADA_EN) + stats->flag |= RX_FLAG_MIC_STRIPPED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case IWL_RX_MPDU_STATUS_SEC_TKIP: @@ -270,6 +272,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_WEP) *crypt_len = IEEE80211_WEP_IV_LEN; + + if (pkt_flags & FH_RSCSR_RADA_EN) + stats->flag |= RX_FLAG_ICV_STRIPPED; + return 0; case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) @@ -810,7 +816,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); - if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) { + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, + le32_to_cpu(pkt->len_n_flags), queue, + &crypt_len)) { kfree_skb(skb); return; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 774122fed454..e4fd476e9ccb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -130,6 +130,19 @@ struct iwl_mvm_scan_params { u32 measurement_dwell; }; +static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) +{ + struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + return (void *)&cmd->v7.data; + + if (iwl_mvm_has_new_tx_api(mvm)) + return (void *)&cmd->v6.data; + + return (void *)&cmd->v1.data; +} + static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) @@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, { struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type]; + if (iwl_mvm_is_regular_scan(params)) + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + else + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { + if (params->measurement_dwell) { + cmd->v7.active_dwell = params->measurement_dwell; + cmd->v7.passive_dwell = params->measurement_dwell; + } else { + cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE; + } + cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + + cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + } + + return; + } + if (params->measurement_dwell) { - cmd->active_dwell = params->measurement_dwell; - cmd->passive_dwell = params->measurement_dwell; - cmd->extended_dwell = params->measurement_dwell; + cmd->v1.active_dwell = params->measurement_dwell; + cmd->v1.passive_dwell = params->measurement_dwell; + cmd->v1.extended_dwell = params->measurement_dwell; } else { - cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; - cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; - cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; + cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE; + cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED; } - cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time); + cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { - cmd->v6.max_out_time[1] = + cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[1] = + cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); } } else { @@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->v1.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } - - if (iwl_mvm_is_regular_scan(params)) - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - else - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static void @@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ? - (void *)&cmd->v6.data : (void *)&cmd->v1.data; + void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; @@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - if (iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { + cmd->v7.channel_flags = channel_flags; + cmd->v7.n_channels = params->n_channels; + } else if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.channel_flags = channel_flags; cmd->v6.n_channels = params->n_channels; } else { @@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; - if (iwl_mvm_has_new_tx_api(mvm)) - base_size = IWL_SCAN_REQ_UMAC_SIZE; + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; + else if (iwl_mvm_has_new_tx_api(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) return base_size + diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index c4a343534c5e..0d7929799942 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1700,29 +1700,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) sta->sta_id = IWL_MVM_INVALID_STA; } -static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) +static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, + u8 sta_id, u8 fifo) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? mvm->cfg->base_params->wd_timeout : IWL_WATCHDOG_DISABLED; if (iwl_mvm_has_new_tx_api(mvm)) { - int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, - mvm->aux_sta.sta_id, - IWL_MAX_TID_COUNT, - wdg_timeout); - mvm->aux_queue = queue; + int tvqm_queue = + iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, + IWL_MAX_TID_COUNT, + wdg_timeout); + *queue = tvqm_queue; } else { struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_MCAST, - .sta_id = mvm->aux_sta.sta_id, + .fifo = fifo, + .sta_id = sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; - iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, - wdg_timeout); + iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout); } } @@ -1741,7 +1741,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) /* Map Aux queue to fifo - needs to happen before adding Aux station */ if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_queue(mvm); + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, + mvm->aux_sta.sta_id, + IWL_MVM_TX_FIFO_MCAST); ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, MAC_INDEX_AUX, 0); @@ -1755,7 +1757,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_queue(mvm); + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, + mvm->aux_sta.sta_id, + IWL_MVM_TX_FIFO_MCAST); return 0; } @@ -1763,10 +1767,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; lockdep_assert_held(&mvm->mutex); - return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, + + /* Map snif queue to fifo - must happen before adding snif station */ + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, + mvm->snif_sta.sta_id, + IWL_MVM_TX_FIFO_BE); + + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, mvmvif->id, 0); + if (ret) + return ret; + + /* + * For 22000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added + */ + if (iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, + mvm->snif_sta.sta_id, + IWL_MVM_TX_FIFO_BE); + + return 0; } int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1775,6 +1800,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, + IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 4d0314912e94..e25cda9fbf6c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * executed, and a new time event means a new command. */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); + + /* Do the same for the P2P device queue (STA) */ + if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { + struct iwl_mvm_vif *mvmvif; + + /* + * NB: access to this pointer would be racy, but the flush bit + * can only be set when we had a P2P-Device VIF, and we have a + * flush of this work in iwl_mvm_prepare_mac_removal() so it's + * not really racy. + */ + + if (!WARN_ON(!mvm->p2p_device_vif)) { + mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); + iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, + CMD_ASYNC); + } + } } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) @@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_remove_time_event(mvm, mvmvif, te_data); - else + set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); + } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); + } iwl_mvm_roc_finished(mvm); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f2e2af23219..887a504ce64a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { - queue = mvm->aux_queue; + queue = mvm->snif_queue; + sta_id = mvm->snif_sta.sta_id; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 2ea74abad73d..43ab172d31cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -603,6 +603,12 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { + if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { + IWL_ERR(mvm, + "DEVICE_ENABLED bit is not set. Aborting dump.\n"); + return; + } + iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); if (mvm->error_event_table[1]) @@ -1143,9 +1149,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, unsigned int default_timeout = cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { + /* + * We can't know when the station is asleep or awake, so we + * must disable the queue hang detection. + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && + vif && vif->type == NL80211_IFTYPE_AP) + return IWL_WATCHDOG_DISABLED; return iwlmvm_mod_params.tfd_q_hang_detect ? default_timeout : IWL_WATCHDOG_DISABLED; + } trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); txq_timer = (void *)trigger->data; @@ -1172,6 +1187,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, return le32_to_cpu(txq_timer->p2p_go); case NL80211_IFTYPE_P2P_DEVICE: return le32_to_cpu(txq_timer->p2p_device); + case NL80211_IFTYPE_MONITOR: + return default_timeout; default: WARN_ON(1); return mvm->cfg->base_params->wd_timeout; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 858765fed8f8..0f7bd37bf172 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -465,6 +465,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, @@ -483,6 +485,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, @@ -508,67 +511,144 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, /* a000 Series */ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)}, @@ -576,8 +656,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)}, + #endif /* CONFIG_IWLMVM */ {0} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 4fb7647995c3..9875ab5ce18c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -666,11 +666,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) return index & (q->n_window - 1); } -static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, +static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, struct iwl_txq *txq, int idx) { - return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, - idx); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->cfg->use_tfh) + idx = iwl_pcie_get_cmd_index(txq, idx); + + return txq->tfds + trans_pcie->tfd_size * idx; } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index c59f4581e972..ac05fd1e74c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -49,6 +49,7 @@ * *****************************************************************************/ #include "iwl-trans.h" +#include "iwl-prph.h" #include "iwl-context-info.h" #include "internal.h" @@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; + /* Stop dbgc before stopping device */ + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(trans, DBGC_OUT_CTRL, 0); + /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2e3e013ec95a..12a9b86d71ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1138,6 +1138,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; + /* Stop dbgc before stopping device */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); + } else { + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(trans, DBGC_OUT_CTRL, 0); + } + /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index d74613fcb756..6f45c8148b27 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ @@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) lockdep_assert_held(&txq->lock); iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_pcie_get_tfd(trans_pcie, txq, idx)); + iwl_pcie_get_tfd(trans, txq, idx)); /* free SKB */ if (txq->entries) { @@ -367,11 +365,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = - iwl_pcie_get_tfd(trans_pcie, txq, idx); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); dma_addr_t tb_phys; bool amsdu; int i, len, tb1_len, tb2_len, hdr_len; @@ -568,8 +564,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, u8 group_id = iwl_cmd_groupid(cmd->id); const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - struct iwl_tfh_tfd *tfd = - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index c645d10d3707..4704137a26e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i, num_tbs; - void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); + void *tfd = iwl_pcie_get_tfd(trans, txq, index); /* Sanity check on number of chunks */ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); @@ -1999,7 +1999,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), + iwl_pcie_get_tfd(trans, txq, txq->write_ptr), trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -2073,7 +2073,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), + iwl_pcie_get_tfd(trans, txq, txq->write_ptr), trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); @@ -2406,7 +2406,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); - tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), iwl_pcie_tfd_get_num_tbs(trans, tfd)); diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c index d5a3bf91a03e..ab6d39e12069 100644 --- a/drivers/net/wireless/intersil/p54/main.c +++ b/drivers/net/wireless/intersil/p54/main.c @@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; -#ifdef CONFIG_P54_LEDS - p54_unregister_leds(priv); -#endif /* CONFIG_P54_LEDS */ - if (priv->registered) { priv->registered = false; +#ifdef CONFIG_P54_LEDS + p54_unregister_leds(priv); +#endif /* CONFIG_P54_LEDS */ ieee80211_unregister_hw(dev); } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6467ffac9811..d148dbf3beeb 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -727,16 +727,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val) val != PS_MANUAL_POLL) return -EINVAL; - old_ps = data->ps; - data->ps = val; - - local_bh_disable(); if (val == PS_MANUAL_POLL) { + if (data->ps != PS_ENABLED) + return -EINVAL; + local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); - data->ps_poll_pending = true; - } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { + local_bh_enable(); + return 0; + } + old_ps = data->ps; + data->ps = val; + + local_bh_disable(); + if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); @@ -3108,6 +3113,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; const char *hwname = NULL; + int ret; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; @@ -3118,6 +3124,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_CHANNELS]) param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); + if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { + GENL_SET_ERR_MSG(info, "too many channels specified"); + return -EINVAL; + } + if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; @@ -3147,7 +3158,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) param.regd = hwsim_world_regdom_custom[idx]; } - return mac80211_hwsim_new_radio(info, ¶m); + ret = mac80211_hwsim_new_radio(info, ¶m); + kfree(hwname); + return ret; } static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) @@ -3212,7 +3225,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) continue; - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) { res = -ENOMEM; goto out_err; diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 32c5074da84c..68aa0c7a8139 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1116,6 +1116,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype; + if (priv->scan_request) { + mwifiex_dbg(priv->adapter, ERROR, + "change virtual interface: scan in process\n"); + return -EBUSY; + } + switch (curr_iftype) { case NL80211_IFTYPE_ADHOC: switch (type) { diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index cd314946452c..9511f5fe62f4 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2781,7 +2781,10 @@ static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - pci_reset_function(card->dev); + /* We can't afford to wait here; remove() might be waiting on us. If we + * can't grab the device lock, maybe we'll get another chance later. + */ + pci_try_reset_function(card->dev); } static void mwifiex_pcie_work(struct work_struct *work) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 69131965a298..146e42a132e7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -643,11 +643,11 @@ static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) { if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, priv->tx_bd_num)) { - pr_err_ratelimited("reclaim full Tx queue\n"); qtnf_pcie_data_tx_reclaim(priv); if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, priv->tx_bd_num)) { + pr_warn_ratelimited("reclaim full Tx queue\n"); priv->tx_full_count++; return 0; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index ecc96312a370..6fe0c6abe0d6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -142,15 +142,25 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, if (!rt2x00dev->ops->hw->set_rts_threshold && (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT))) { - if (rt2x00queue_available(queue) <= 1) - goto exit_fail; + if (rt2x00queue_available(queue) <= 1) { + /* + * Recheck for full queue under lock to avoid race + * conditions with rt2x00lib_txdone(). + */ + spin_lock(&queue->tx_lock); + if (rt2x00queue_threshold(queue)) + rt2x00queue_pause_queue(queue); + spin_unlock(&queue->tx_lock); + + goto exit_free_skb; + } if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) - goto exit_fail; + goto exit_free_skb; } if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) - goto exit_fail; + goto exit_free_skb; /* * Pausing queue has to be serialized with rt2x00lib_txdone(). Note @@ -164,10 +174,6 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, return; - exit_fail: - spin_lock(&queue->tx_lock); - rt2x00queue_pause_queue(queue); - spin_unlock(&queue->tx_lock); exit_free_skb: ieee80211_free_txskb(hw, skb); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index e2f4f5778267..086aad22743d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, if (status >= 0) return 0; - if (status == -ENODEV) { + if (status == -ENODEV || status == -ENOENT) { /* Device has disappeared. */ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); break; @@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { - if (status == -ENODEV) + if (status == -ENODEV || status == -ENOENT) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); @@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { - if (status == -ENODEV) + if (status == -ENODEV || status == -ENOENT) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 121b94f09714..9a1d15b3ce45 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -1450,6 +1450,7 @@ static int rtl8187_probe(struct usb_interface *intf, goto err_free_dev; } mutex_init(&priv->io_mutex); + mutex_init(&priv->conf_mutex); SET_IEEE80211_DEV(dev, &intf->dev); usb_set_intfdata(intf, dev); @@ -1625,7 +1626,6 @@ static int rtl8187_probe(struct usb_interface *intf, printk(KERN_ERR "rtl8187: Cannot register device\n"); goto err_free_dmabuf; } - mutex_init(&priv->conf_mutex); skb_queue_head_init(&priv->b_tx_status.queue); wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index ea18aa7afecb..93256f8bc0b5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1664,7 +1664,7 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw, void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) { struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - u8 reject_agg, ctrl_agg_size = 0, agg_size; + u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0; if (rtlpriv->cfg->ops->get_btc_status()) btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg, diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 08dc8919ef60..d7331225c5f3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1568,7 +1568,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) dev_kfree_skb_irq(skb); ring->idx = (ring->idx + 1) % ring->entries; } + + if (rtlpriv->use_new_trx_flow) { + rtlpci->tx_ring[i].cur_tx_rp = 0; + rtlpci->tx_ring[i].cur_tx_wp = 0; + } + ring->idx = 0; + ring->entries = rtlpci->txringcount[i]; } } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 7eae27f8e173..f9563ae301ad 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -682,7 +682,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; - + bool rtstatus; u32 totalpacketlen; u8 u1rsvdpageloc[5] = { 0 }; bool b_dlok = false; @@ -768,7 +768,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) skb = dev_alloc_skb(totalpacketlen); skb_put_data(skb, &reserved_page_packet, totalpacketlen); - b_dlok = true; + rtstatus = rtl_cmd_send_packet(hw, skb); + if (rtstatus) + b_dlok = true; if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 4d47b97adfed..0034ebd3e5ba 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -1123,7 +1123,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) /* Configuration Space offset 0x70f BIT7 is used to control L0S */ tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); - _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); + _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) | + ASPM_L1_LATENCY << 3); /* Configuration Space offset 0x719 Bit3 is for L1 * BIT4 is for clock request diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 1d431d4bf6d2..b82e5b363c05 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) } if (0 == tmp) { read_addr = REG_DBI_RDATA + addr % 4; - ret = rtl_read_word(rtlpriv, read_addr); + ret = rtl_read_byte(rtlpriv, read_addr); } return ret; } @@ -1164,7 +1164,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw) } tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); - _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); + _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) | + ASPM_L1_LATENCY << 3); tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); @@ -1372,6 +1373,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw) ppsc->wakeup_reason = 0; + do_gettimeofday(&ts); rtlhal->last_suspend_sec = ts.tv_sec; switch (fw_reason) { diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 1ab1024330fb..25c4e3e55921 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -99,6 +99,7 @@ #define RTL_USB_MAX_RX_COUNT 100 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 +#define ASPM_L1_LATENCY 7 #define TOTAL_CAM_ENTRY 32 diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 81df09dd2636..f90c10b3c921 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -162,13 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev, u8 *buf; int status = -ENOMEM; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!buf) return status; - if (len > RSI_USB_CTRL_BUF_SIZE) - return -EINVAL; - status = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), USB_VENDOR_REGISTER_READ, @@ -207,13 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, u8 *usb_reg_buf; int status = -ENOMEM; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!usb_reg_buf) return status; - if (len > RSI_USB_CTRL_BUF_SIZE) - return -EINVAL; - usb_reg_buf[0] = (value & 0x00ff); usb_reg_buf[1] = (value & 0xff00) >> 8; usb_reg_buf[2] = 0x0; diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index a52224836a2b..666b88cb2cfe 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, priv->bss_loss_state++; - skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); WARN_ON(!skb); if (skb) cw1200_tx(priv->hw, NULL, skb); @@ -2266,7 +2266,7 @@ static int cw1200_upload_null(struct cw1200_common *priv) .rate = 0xFF, }; - frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); if (!frame.skb) return -ENOMEM; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 9915d83a4a30..037defd10b91 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl) size = sizeof(struct wl12xx_null_data_template); ptr = NULL; } else { - skb = ieee80211_nullfunc_get(wl->hw, wl->vif); + skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false); if (!skb) goto out; size = skb->len; @@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc; - wl1251_acx_arp_ip_filter(wl, enable, addr); - + ret = wl1251_acx_arp_ip_filter(wl, enable, addr); if (ret < 0) goto out_sleep; } diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 2bfc12fdc929..761cf8573a80 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) ptr = NULL; } else { skb = ieee80211_nullfunc_get(wl->hw, - wl12xx_wlvif_to_vif(wlvif)); + wl12xx_wlvif_to_vif(wlvif), + false); if (!skb) goto out; size = skb->len; @@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, struct sk_buff *skb = NULL; int ret = -ENOMEM; - skb = ieee80211_nullfunc_get(wl->hw, vif); + skb = ieee80211_nullfunc_get(wl->hw, vif, false); if (!skb) goto out; diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index 58898b99d3f7..145e10a8be55 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -549,6 +549,11 @@ static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; + /* Disable filtering */ + ret = wl1271_acx_group_address_tbl(wl, wlvif, false, NULL, 0); + if (ret < 0) + return ret; + ret = wl1271_acx_ap_max_tx_retry(wl, wlvif); if (ret < 0) return ret; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8b8689c6d887..c980cdbd6e53 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,6 +87,8 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) +static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); + struct netfront_stats { u64 packets; u64 bytes; @@ -1324,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); + xenbus_switch_state(dev, XenbusStateInitialising); return netdev; exit: @@ -2021,10 +2024,12 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateClosed: + wake_up_all(&module_unload_q); if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: + wake_up_all(&module_unload_q); xenbus_frontend_closed(dev); break; } @@ -2130,6 +2135,20 @@ static int xennet_remove(struct xenbus_device *dev) dev_dbg(&dev->dev, "%s\n", dev->nodename); + if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + xenbus_switch_state(dev, XenbusStateClosing); + wait_event(module_unload_q, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing); + + xenbus_switch_state(dev, XenbusStateClosed); + wait_event(module_unload_q, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown); + } + xennet_disconnect_backend(info); unregister_netdev(info->netdev); diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index c4da50e07bbc..08a4f82a2965 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -176,6 +176,16 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb) /* Packet that contains a length */ if (tmp[0] == 0 && tmp[1] == 0) { phy->next_read_size = (tmp[2] << 8) + tmp[3] + 3; + /* + * Ensure next_read_size does not exceed sizeof(tmp) + * for reading that many bytes during next iteration + */ + if (phy->next_read_size > FDP_NCI_I2C_MAX_PAYLOAD) { + dev_dbg(&client->dev, "%s: corrupted packet\n", + __func__); + phy->next_read_size = 5; + goto flush; + } } else { phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD; diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index fd08be2917e6..3420c5104c94 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -217,7 +217,8 @@ static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, atr_req = (struct st21nfca_atr_req *)skb->data; - if (atr_req->length < sizeof(struct st21nfca_atr_req)) { + if (atr_req->length < sizeof(struct st21nfca_atr_req) || + atr_req->length > skb->len) { r = -EPROTO; goto exit; } diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 3a98563d4a12..6e84e120150d 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -320,23 +320,33 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, * AID 81 5 to 16 * PARAMETERS 82 0 to 255 */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && + if (skb->len < NFC_MIN_AID_LENGTH + 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; + /* + * Buffer should have enough space for at least + * two tag fields + two length fields + aid_len (skb->data[1]) + */ + if (skb->len < skb->data[1] + 4) + return -EPROTO; + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); transaction->aid_len = skb->data[1]; memcpy(transaction->aid, &skb->data[2], transaction->aid_len); + transaction->params_len = skb->data[transaction->aid_len + 3]; - /* Check next byte is PARAMETERS tag (82) */ + /* Check next byte is PARAMETERS tag (82) and the length field */ if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + NFC_EVT_TRANSACTION_PARAMS_TAG || + skb->len < transaction->aid_len + transaction->params_len + 4) { + devm_kfree(dev, transaction); return -EPROTO; + } - transaction->params_len = skb->data[transaction->aid_len + 3]; memcpy(transaction->params, skb->data + transaction->aid_len + 4, transaction->params_len); diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 345acca576b3..1bd7b3734751 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) disk->queue = q; disk->flags = GENHD_FL_EXT_DEVT; nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); - set_capacity(disk, 0); - device_add_disk(dev, disk); if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) return -ENOMEM; @@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) } set_capacity(disk, available_disk_size >> SECTOR_SHIFT); + device_add_disk(dev, disk); revalidate_disk(disk); return 0; } diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index d5612bd1cc81..b2feda35966b 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -210,12 +210,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, return ret; } -static int btt_log_read_pair(struct arena_info *arena, u32 lane, - struct log_entry *ent) +static int btt_log_group_read(struct arena_info *arena, u32 lane, + struct log_group *log) { return arena_read_bytes(arena, - arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, - 2 * LOG_ENT_SIZE, 0); + arena->logoff + (lane * LOG_GRP_SIZE), log, + LOG_GRP_SIZE, 0); } static struct dentry *debugfs_root; @@ -255,6 +255,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent, debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); debugfs_create_x32("flags", S_IRUGO, d, &a->flags); + debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]); + debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]); } static void btt_debugfs_init(struct btt *btt) @@ -273,6 +275,11 @@ static void btt_debugfs_init(struct btt *btt) } } +static u32 log_seq(struct log_group *log, int log_idx) +{ + return le32_to_cpu(log->ent[log_idx].seq); +} + /* * This function accepts two log entries, and uses the * sequence number to find the 'older' entry. @@ -282,8 +289,10 @@ static void btt_debugfs_init(struct btt *btt) * * TODO The logic feels a bit kludge-y. make it better.. */ -static int btt_log_get_old(struct log_entry *ent) +static int btt_log_get_old(struct arena_info *a, struct log_group *log) { + int idx0 = a->log_index[0]; + int idx1 = a->log_index[1]; int old; /* @@ -291,23 +300,23 @@ static int btt_log_get_old(struct log_entry *ent) * the next time, the following logic works out to put this * (next) entry into [1] */ - if (ent[0].seq == 0) { - ent[0].seq = cpu_to_le32(1); + if (log_seq(log, idx0) == 0) { + log->ent[idx0].seq = cpu_to_le32(1); return 0; } - if (ent[0].seq == ent[1].seq) + if (log_seq(log, idx0) == log_seq(log, idx1)) return -EINVAL; - if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5) + if (log_seq(log, idx0) + log_seq(log, idx1) > 5) return -EINVAL; - if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) { - if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1) + if (log_seq(log, idx0) < log_seq(log, idx1)) { + if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1) old = 0; else old = 1; } else { - if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1) + if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1) old = 1; else old = 0; @@ -327,17 +336,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane, { int ret; int old_ent, ret_ent; - struct log_entry log[2]; + struct log_group log; - ret = btt_log_read_pair(arena, lane, log); + ret = btt_log_group_read(arena, lane, &log); if (ret) return -EIO; - old_ent = btt_log_get_old(log); + old_ent = btt_log_get_old(arena, &log); if (old_ent < 0 || old_ent > 1) { dev_err(to_dev(arena), "log corruption (%d): lane %d seq [%d, %d]\n", - old_ent, lane, log[0].seq, log[1].seq); + old_ent, lane, log.ent[arena->log_index[0]].seq, + log.ent[arena->log_index[1]].seq); /* TODO set error state? */ return -EIO; } @@ -345,7 +355,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane, ret_ent = (old_flag ? old_ent : (1 - old_ent)); if (ent != NULL) - memcpy(ent, &log[ret_ent], LOG_ENT_SIZE); + memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); return ret_ent; } @@ -359,17 +369,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane, u32 sub, struct log_entry *ent, unsigned long flags) { int ret; - /* - * Ignore the padding in log_entry for calculating log_half. - * The entry is 'committed' when we write the sequence number, - * and we want to ensure that that is the last thing written. - * We don't bother writing the padding as that would be extra - * media wear and write amplification - */ - unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2; - u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE); + u32 group_slot = arena->log_index[sub]; + unsigned int log_half = LOG_ENT_SIZE / 2; void *src = ent; + u64 ns_off; + ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + + (group_slot * LOG_ENT_SIZE); /* split the 16B write into atomic, durable halves */ ret = arena_write_bytes(arena, ns_off, src, log_half, flags); if (ret) @@ -452,7 +458,7 @@ static int btt_log_init(struct arena_info *arena) { size_t logsize = arena->info2off - arena->logoff; size_t chunk_size = SZ_4K, offset = 0; - struct log_entry log; + struct log_entry ent; void *zerobuf; int ret; u32 i; @@ -484,11 +490,11 @@ static int btt_log_init(struct arena_info *arena) } for (i = 0; i < arena->nfree; i++) { - log.lba = cpu_to_le32(i); - log.old_map = cpu_to_le32(arena->external_nlba + i); - log.new_map = cpu_to_le32(arena->external_nlba + i); - log.seq = cpu_to_le32(LOG_SEQ_INIT); - ret = __btt_log_write(arena, i, 0, &log, 0); + ent.lba = cpu_to_le32(i); + ent.old_map = cpu_to_le32(arena->external_nlba + i); + ent.new_map = cpu_to_le32(arena->external_nlba + i); + ent.seq = cpu_to_le32(LOG_SEQ_INIT); + ret = __btt_log_write(arena, i, 0, &ent, 0); if (ret) goto free; } @@ -593,6 +599,123 @@ static int btt_freelist_init(struct arena_info *arena) return 0; } +static bool ent_is_padding(struct log_entry *ent) +{ + return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) + && (ent->seq == 0); +} + +/* + * Detecting valid log indices: We read a log group (see the comments in btt.h + * for a description of a 'log_group' and its 'slots'), and iterate over its + * four slots. We expect that a padding slot will be all-zeroes, and use this + * to detect a padding slot vs. an actual entry. + * + * If a log_group is in the initial state, i.e. hasn't been used since the + * creation of this BTT layout, it will have three of the four slots with + * zeroes. We skip over these log_groups for the detection of log_index. If + * all log_groups are in the initial state (i.e. the BTT has never been + * written to), it is safe to assume the 'new format' of log entries in slots + * (0, 1). + */ +static int log_set_indices(struct arena_info *arena) +{ + bool idx_set = false, initial_state = true; + int ret, log_index[2] = {-1, -1}; + u32 i, j, next_idx = 0; + struct log_group log; + u32 pad_count = 0; + + for (i = 0; i < arena->nfree; i++) { + ret = btt_log_group_read(arena, i, &log); + if (ret < 0) + return ret; + + for (j = 0; j < 4; j++) { + if (!idx_set) { + if (ent_is_padding(&log.ent[j])) { + pad_count++; + continue; + } else { + /* Skip if index has been recorded */ + if ((next_idx == 1) && + (j == log_index[0])) + continue; + /* valid entry, record index */ + log_index[next_idx] = j; + next_idx++; + } + if (next_idx == 2) { + /* two valid entries found */ + idx_set = true; + } else if (next_idx > 2) { + /* too many valid indices */ + return -ENXIO; + } + } else { + /* + * once the indices have been set, just verify + * that all subsequent log groups are either in + * their initial state or follow the same + * indices. + */ + if (j == log_index[0]) { + /* entry must be 'valid' */ + if (ent_is_padding(&log.ent[j])) + return -ENXIO; + } else if (j == log_index[1]) { + ; + /* + * log_index[1] can be padding if the + * lane never got used and it is still + * in the initial state (three 'padding' + * entries) + */ + } else { + /* entry must be invalid (padding) */ + if (!ent_is_padding(&log.ent[j])) + return -ENXIO; + } + } + } + /* + * If any of the log_groups have more than one valid, + * non-padding entry, then the we are no longer in the + * initial_state + */ + if (pad_count < 3) + initial_state = false; + pad_count = 0; + } + + if (!initial_state && !idx_set) + return -ENXIO; + + /* + * If all the entries in the log were in the initial state, + * assume new padding scheme + */ + if (initial_state) + log_index[1] = 1; + + /* + * Only allow the known permutations of log/padding indices, + * i.e. (0, 1), and (0, 2) + */ + if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2))) + ; /* known index possibilities */ + else { + dev_err(to_dev(arena), "Found an unknown padding scheme\n"); + return -ENXIO; + } + + arena->log_index[0] = log_index[0]; + arena->log_index[1] = log_index[1]; + dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]); + dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]); + return 0; +} + static int btt_rtt_init(struct arena_info *arena) { arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); @@ -649,8 +772,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, available -= 2 * BTT_PG_SIZE; /* The log takes a fixed amount of space based on nfree */ - logsize = roundup(2 * arena->nfree * sizeof(struct log_entry), - BTT_PG_SIZE); + logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); available -= logsize; /* Calculate optimal split between map and data area */ @@ -667,6 +789,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, arena->mapoff = arena->dataoff + datasize; arena->logoff = arena->mapoff + mapsize; arena->info2off = arena->logoff + logsize; + + /* Default log indices are (0,1) */ + arena->log_index[0] = 0; + arena->log_index[1] = 1; return arena; } @@ -757,6 +883,13 @@ static int discover_arenas(struct btt *btt) arena->external_lba_start = cur_nlba; parse_arena_meta(arena, super, cur_off); + ret = log_set_indices(arena); + if (ret) { + dev_err(to_dev(arena), + "Unable to deduce log/padding indices\n"); + goto out; + } + mutex_init(&arena->err_lock); ret = btt_freelist_init(arena); if (ret) @@ -1409,8 +1542,6 @@ static int btt_blk_init(struct btt *btt) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); btt->btt_queue->queuedata = btt; - set_capacity(btt->btt_disk, 0); - device_add_disk(&btt->nd_btt->dev, btt->btt_disk); if (btt_meta_size(btt)) { int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); @@ -1422,6 +1553,7 @@ static int btt_blk_init(struct btt *btt) } } set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); + device_add_disk(&btt->nd_btt->dev, btt->btt_disk); btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; revalidate_disk(btt->btt_disk); diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index 578c2057524d..2609683c4167 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h @@ -27,6 +27,7 @@ #define MAP_ERR_MASK (1 << MAP_ERR_SHIFT) #define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT))) #define MAP_ENT_NORMAL 0xC0000000 +#define LOG_GRP_SIZE sizeof(struct log_group) #define LOG_ENT_SIZE sizeof(struct log_entry) #define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */ #define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */ @@ -50,12 +51,52 @@ enum btt_init_state { INIT_READY }; +/* + * A log group represents one log 'lane', and consists of four log entries. + * Two of the four entries are valid entries, and the remaining two are + * padding. Due to an old bug in the padding location, we need to perform a + * test to determine the padding scheme being used, and use that scheme + * thereafter. + * + * In kernels prior to 4.15, 'log group' would have actual log entries at + * indices (0, 2) and padding at indices (1, 3), where as the correct/updated + * format has log entries at indices (0, 1) and padding at indices (2, 3). + * + * Old (pre 4.15) format: + * +-----------------+-----------------+ + * | ent[0] | ent[1] | + * | 16B | 16B | + * | lba/old/new/seq | pad | + * +-----------------------------------+ + * | ent[2] | ent[3] | + * | 16B | 16B | + * | lba/old/new/seq | pad | + * +-----------------+-----------------+ + * + * New format: + * +-----------------+-----------------+ + * | ent[0] | ent[1] | + * | 16B | 16B | + * | lba/old/new/seq | lba/old/new/seq | + * +-----------------------------------+ + * | ent[2] | ent[3] | + * | 16B | 16B | + * | pad | pad | + * +-----------------+-----------------+ + * + * We detect during start-up which format is in use, and set + * arena->log_index[(0, 1)] with the detected format. + */ + struct log_entry { __le32 lba; __le32 old_map; __le32 new_map; __le32 seq; - __le64 padding[2]; +}; + +struct log_group { + struct log_entry ent[4]; }; struct btt_sb { @@ -125,6 +166,7 @@ struct aligned_lock { * @list: List head for list of arenas * @debugfs_dir: Debugfs dentry * @flags: Arena flags - may signify error states. + * @log_index: Indices of the valid log entries in a log_group * * arena_info is a per-arena handle. Once an arena is narrowed down for an * IO, this struct is passed around for the duration of the IO. @@ -157,6 +199,7 @@ struct arena_info { /* Arena flags */ u32 flags; struct mutex err_lock; + int log_index[2]; }; /** diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index e0f0e3ce1a32..98466d762c8f 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -68,6 +68,7 @@ static int nvdimm_probe(struct device *dev) rc = nd_label_reserve_dpa(ndd); if (ndd->ns_current >= 0) nvdimm_set_aliasing(dev); + nvdimm_clear_locked(dev); nvdimm_bus_unlock(dev); if (rc) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index f0d1b7e5de01..5f1385b96b13 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -200,6 +200,13 @@ void nvdimm_set_locked(struct device *dev) set_bit(NDD_LOCKED, &nvdimm->flags); } +void nvdimm_clear_locked(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + + clear_bit(NDD_LOCKED, &nvdimm->flags); +} + static void nvdimm_release(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 9c5f108910e3..de66c02f6140 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -1050,7 +1050,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels) nsindex = to_namespace_index(ndd, 0); memset(nsindex, 0, ndd->nsarea.config_size); for (i = 0; i < 2; i++) { - int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT); + int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT); if (rc) return rc; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 3e4d1e7998da..0af988739a06 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1620,7 +1620,7 @@ static umode_t namespace_visible(struct kobject *kobj, if (a == &dev_attr_resource.attr) { if (is_namespace_blk(dev)) return 0; - return a->mode; + return 0400; } if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 9c758a91372b..156be00e1f76 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -254,6 +254,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, unsigned int len); void nvdimm_set_aliasing(struct device *dev); void nvdimm_set_locked(struct device *dev); +void nvdimm_clear_locked(struct device *dev); struct nd_btt *to_nd_btt(struct device *dev); struct nd_gen_sb { diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 9576c444f0ab..2adada1a5855 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -282,8 +282,16 @@ static struct attribute *nd_pfn_attributes[] = { NULL, }; +static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n) +{ + if (a == &dev_attr_resource.attr) + return 0400; + return a->mode; +} + struct attribute_group nd_pfn_attribute_group = { .attrs = nd_pfn_attributes, + .is_visible = pfn_visible, }; static const struct attribute_group *nd_pfn_attribute_groups[] = { @@ -356,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region) int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) { u64 checksum, offset; - unsigned long align; enum nd_pfn_mode mode; struct nd_namespace_io *nsio; + unsigned long align, start_pad; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; struct nd_namespace_common *ndns = nd_pfn->ndns; const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev); @@ -402,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) align = le32_to_cpu(pfn_sb->align); offset = le64_to_cpu(pfn_sb->dataoff); + start_pad = le32_to_cpu(pfn_sb->start_pad); if (align == 0) align = 1UL << ilog2(offset); mode = le32_to_cpu(pfn_sb->mode); @@ -460,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) return -EBUSY; } - if ((align && !IS_ALIGNED(offset, align)) + if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align)) || !IS_ALIGNED(offset, PAGE_SIZE)) { dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled align: %#lx\n", @@ -574,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, return altmap; } +static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) +{ + return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys), + ALIGN_DOWN(phys, nd_pfn->align)); +} + static int nd_pfn_init(struct nd_pfn *nd_pfn) { u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; @@ -629,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) start = nsio->res.start; size = PHYS_SECTION_ALIGN_UP(start + size) - start; if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE) == REGION_MIXED) { + IORES_DESC_NONE) == REGION_MIXED + || !IS_ALIGNED(start + resource_size(&nsio->res), + nd_pfn->align)) { size = resource_size(&nsio->res); - end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); + end_trunc = start + size - phys_pmem_align_down(nd_pfn, + start + size); } if (start_pad + end_trunc) - dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", + dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", dev_name(&ndns->dev), start_pad + end_trunc); /* diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 829d760f651c..abaf38c61220 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -562,8 +562,12 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) return 0; - if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr) - return 0; + if (a == &dev_attr_resource.attr) { + if (is_nd_pmem(dev)) + return 0400; + else + return 0; + } if (a == &dev_attr_deep_flush.attr) { int has_flush = nvdimm_has_flush(nd_region); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 37f9039bb9ca..dd956311a85a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1515,7 +1515,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); } - if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) + if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && + is_power_of_2(ctrl->max_hw_sectors)) blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); blk_queue_virt_boundary(q, ctrl->page_size - 1); if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) @@ -2299,7 +2300,8 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) mutex_lock(&ctrl->namespaces_mutex); list_for_each_entry(ns, &ctrl->namespaces, list) { if (ns->ns_id == nsid) { - kref_get(&ns->kref); + if (!kref_get_unless_zero(&ns->kref)) + continue; ret = ns; break; } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 555c976cc2ee..8cd42544c90e 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void) return NULL; kref_init(&host->ref); + uuid_gen(&host->id); snprintf(host->nqn, NVMF_NQN_SIZE, "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index bf33663218cd..9ff8529a64a9 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -142,4 +142,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts); int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); +static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl, + struct request *rq) +{ + struct nvme_command *cmd = nvme_req(rq)->cmd; + + /* + * We cannot accept any other command until the connect command has + * completed, so only allow connect to pass. + */ + if (!blk_rq_is_passthrough(rq) || + cmd->common.opcode != nvme_fabrics_command || + cmd->fabrics.fctype != nvme_fabrics_type_connect) { + /* + * Reconnecting state means transport disruption, which can take + * a long time and even might fail permanently, fail fast to + * give upper layers a chance to failover. + * Deleting state means that the ctrl will never accept commands + * again, fail it permanently. + */ + if (ctrl->state == NVME_CTRL_RECONNECTING || + ctrl->state == NVME_CTRL_DELETING) { + nvme_req(rq)->status = NVME_SC_ABORT_REQ; + return BLK_STS_IOERR; + } + return BLK_STS_RESOURCE; /* try again later */ + } + + return BLK_STS_OK; +} + #endif /* _NVME_FABRICS_H */ diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index be49d0f79381..7deb7b5d8683 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -41,6 +41,7 @@ enum nvme_fc_queue_flags { NVME_FC_Q_CONNECTED = (1 << 0), + NVME_FC_Q_LIVE = (1 << 1), }; #define NVMEFC_QUEUE_DELAY 3 /* ms units */ @@ -1654,6 +1655,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) return; + clear_bit(NVME_FC_Q_LIVE, &queue->flags); /* * Current implementation never disconnects a single queue. * It always terminates a whole association. So there is never @@ -1661,7 +1663,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) */ queue->connection_id = 0; - clear_bit(NVME_FC_Q_CONNECTED, &queue->flags); } static void @@ -1740,6 +1741,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) break; + + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); } return ret; @@ -2048,6 +2051,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, return BLK_STS_RESOURCE; } +static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue, + struct request *rq) +{ + if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; +} + static blk_status_t nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) @@ -2063,6 +2074,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, u32 data_len; blk_status_t ret; + ret = nvme_fc_is_ready(queue, rq); + if (unlikely(ret)) + return ret; + ret = nvme_setup_cmd(ns, rq, sqe); if (ret) return ret; @@ -2398,6 +2413,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ret) goto out_disconnect_admin_queue; + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); + /* * Check controller capabilities * @@ -2859,7 +2876,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); /* Remove core ctrl ref. */ nvme_put_ctrl(&ctrl->ctrl); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index d3f3c4447515..044af553204c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -108,7 +108,7 @@ static inline struct nvme_request *nvme_req(struct request *req) * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was * found empirically. */ -#define NVME_QUIRK_DELAY_AMOUNT 2000 +#define NVME_QUIRK_DELAY_AMOUNT 2300 enum nvme_ctrl_state { NVME_CTRL_NEW, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3f5a04c586ce..cdd2fd509ddc 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1617,6 +1617,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev) dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs = NULL; + dev->nr_host_mem_descs = 0; } static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, @@ -1645,7 +1646,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, if (!bufs) goto out_free_descs; - for (size = 0; size < preferred; size += len) { + for (size = 0; size < preferred && i < max_entries; size += len) { dma_addr_t dma_addr; len = min_t(u64, chunk_size, preferred - size); @@ -2282,7 +2283,7 @@ static int nvme_dev_map(struct nvme_dev *dev) return -ENODEV; } -static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) +static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) { if (pdev->vendor == 0x144d && pdev->device == 0xa802) { /* @@ -2297,6 +2298,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) return NVME_QUIRK_NO_DEEPEST_PS; + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { + /* + * Samsung SSD 960 EVO drops off the PCIe bus after system + * suspend on a Ryzen board, ASUS PRIME B350M-A. + */ + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && + dmi_match(DMI_BOARD_NAME, "PRIME B350M-A")) + return NVME_QUIRK_NO_APST; } return 0; @@ -2336,7 +2345,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto unmap; - quirks |= check_dell_samsung_bug(pdev); + quirks |= check_vendor_combination_bug(pdev); result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, quirks); @@ -2519,6 +2528,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0ebb539f3bd3..93a082e0bdd4 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -67,6 +67,9 @@ struct nvme_rdma_request { struct nvme_request req; struct ib_mr *mr; struct nvme_rdma_qe sqe; + union nvme_result result; + __le16 status; + refcount_t ref; struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; u32 num_sge; int nents; @@ -85,7 +88,6 @@ enum nvme_rdma_queue_flags { struct nvme_rdma_queue { struct nvme_rdma_qe *rsp_ring; - atomic_t sig_count; int queue_size; size_t cmnd_capsule_len; struct nvme_rdma_ctrl *ctrl; @@ -518,7 +520,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, queue->cmnd_capsule_len = sizeof(struct nvme_command); queue->queue_size = queue_size; - atomic_set(&queue->sig_count, 0); queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, RDMA_PS_TCP, IB_QPT_RC); @@ -1177,6 +1178,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, req->num_sge = 1; req->inline_data = false; req->mr->need_inval = false; + refcount_set(&req->ref, 2); /* send and recv completions */ c->common.flags |= NVME_CMD_SGL_METABUF; @@ -1213,25 +1215,24 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { - if (unlikely(wc->status != IB_WC_SUCCESS)) - nvme_rdma_wr_error(cq, wc, "SEND"); -} + struct nvme_rdma_qe *qe = + container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); + struct nvme_rdma_request *req = + container_of(qe, struct nvme_rdma_request, sqe); + struct request *rq = blk_mq_rq_from_pdu(req); -/* - * We want to signal completion at least every queue depth/2. This returns the - * largest power of two that is not above half of (queue size + 1) to optimize - * (avoid divisions). - */ -static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) -{ - int limit = 1 << ilog2((queue->queue_size + 1) / 2); + if (unlikely(wc->status != IB_WC_SUCCESS)) { + nvme_rdma_wr_error(cq, wc, "SEND"); + return; + } - return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0; + if (refcount_dec_and_test(&req->ref)) + nvme_end_request(rq, req->status, req->result); } static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, - struct ib_send_wr *first, bool flush) + struct ib_send_wr *first) { struct ib_send_wr wr, *bad_wr; int ret; @@ -1240,31 +1241,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, sge->length = sizeof(struct nvme_command), sge->lkey = queue->device->pd->local_dma_lkey; - qe->cqe.done = nvme_rdma_send_done; - wr.next = NULL; wr.wr_cqe = &qe->cqe; wr.sg_list = sge; wr.num_sge = num_sge; wr.opcode = IB_WR_SEND; - wr.send_flags = 0; - - /* - * Unsignalled send completions are another giant desaster in the - * IB Verbs spec: If we don't regularly post signalled sends - * the send queue will fill up and only a QP reset will rescue us. - * Would have been way to obvious to handle this in hardware or - * at least the RDMA stack.. - * - * Always signal the flushes. The magic request used for the flush - * sequencer is not allocated in our driver's tagset and it's - * triggered to be freed by blk_cleanup_queue(). So we need to - * always mark it as signaled to ensure that the "wr_cqe", which is - * embedded in request's payload, is not freed when __ib_process_cq() - * calls wr_cqe->done(). - */ - if (nvme_rdma_queue_sig_limit(queue) || flush) - wr.send_flags |= IB_SEND_SIGNALED; + wr.send_flags = IB_SEND_SIGNALED; if (first) first->next = ≀ @@ -1314,6 +1296,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) return queue->ctrl->tag_set.tags[queue_idx - 1]; } +static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) +{ + if (unlikely(wc->status != IB_WC_SUCCESS)) + nvme_rdma_wr_error(cq, wc, "ASYNC"); +} + static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); @@ -1335,10 +1323,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) cmd->common.flags |= NVME_CMD_SGL_METABUF; nvme_rdma_set_sg_null(cmd); + sqe->cqe.done = nvme_rdma_async_done; + ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); - ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false); + ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); WARN_ON_ONCE(ret); } @@ -1359,14 +1349,19 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, } req = blk_mq_rq_to_pdu(rq); - if (rq->tag == tag) - ret = 1; + req->status = cqe->status; + req->result = cqe->result; if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && wc->ex.invalidate_rkey == req->mr->rkey) req->mr->need_inval = false; - nvme_end_request(rq, cqe->status, cqe->result); + if (refcount_dec_and_test(&req->ref)) { + if (rq->tag == tag) + ret = 1; + nvme_end_request(rq, req->status, req->result); + } + return ret; } @@ -1603,31 +1598,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved) * We cannot accept any other command until the Connect command has completed. */ static inline blk_status_t -nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) -{ - if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { - struct nvme_command *cmd = nvme_req(rq)->cmd; - - if (!blk_rq_is_passthrough(rq) || - cmd->common.opcode != nvme_fabrics_command || - cmd->fabrics.fctype != nvme_fabrics_type_connect) { - /* - * reconnecting state means transport disruption, which - * can take a long time and even might fail permanently, - * fail fast to give upper layers a chance to failover. - * deleting state means that the ctrl will never accept - * commands again, fail it permanently. - */ - if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING || - queue->ctrl->ctrl.state == NVME_CTRL_DELETING) { - nvme_req(rq)->status = NVME_SC_ABORT_REQ; - return BLK_STS_IOERR; - } - return BLK_STS_RESOURCE; /* try again later */ - } - } - - return 0; +nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq) +{ + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; } static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1639,14 +1614,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_qe *sqe = &req->sqe; struct nvme_command *c = sqe->data; - bool flush = false; struct ib_device *dev; blk_status_t ret; int err; WARN_ON_ONCE(rq->tag < 0); - ret = nvme_rdma_queue_is_ready(queue, rq); + ret = nvme_rdma_is_ready(queue, rq); if (unlikely(ret)) return ret; @@ -1668,13 +1642,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, goto err; } + sqe->cqe.done = nvme_rdma_send_done; + ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); - if (req_op(rq) == REQ_OP_FLUSH) - flush = true; err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, - req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); + req->mr->need_inval ? &req->reg_wr.wr : NULL); if (unlikely(err)) { nvme_rdma_unmap_data(queue, rq); goto err; diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 58e010bdda3e..8e21211b904b 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -532,15 +532,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); + /* release the queue lookup reference on the completed IO */ + nvmet_fc_tgt_q_put(queue); + spin_lock_irqsave(&queue->qlock, flags); deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, struct nvmet_fc_defer_fcp_req, req_list); if (!deferfcp) { list_add_tail(&fod->fcp_list, &fod->queue->fod_list); spin_unlock_irqrestore(&queue->qlock, flags); - - /* Release reference taken at queue lookup and fod allocation */ - nvmet_fc_tgt_q_put(queue); return; } @@ -759,6 +759,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) tgtport->ops->fcp_req_release(&tgtport->fc_target_port, deferfcp->fcp_req); + /* release the queue lookup reference */ + nvmet_fc_tgt_q_put(queue); + kfree(deferfcp); spin_lock_irqsave(&queue->qlock, flags); diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 7b75d9de55ab..c0080f6ab2f5 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -204,6 +204,10 @@ struct fcloop_lport { struct completion unreg_done; }; +struct fcloop_lport_priv { + struct fcloop_lport *lport; +}; + struct fcloop_rport { struct nvme_fc_remote_port *remoteport; struct nvmet_fc_target_port *targetport; @@ -370,6 +374,7 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work) spin_lock(&tfcp_req->reqlock); fcpreq = tfcp_req->fcpreq; + tfcp_req->fcpreq = NULL; spin_unlock(&tfcp_req->reqlock); if (tport->remoteport && fcpreq) { @@ -611,11 +616,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, if (!tfcp_req) /* abort has already been called */ - return; - - if (rport->targetport) - nvmet_fc_rcv_fcp_abort(rport->targetport, - &tfcp_req->tgt_fcp_req); + goto finish; /* break initiator/target relationship for io */ spin_lock(&tfcp_req->reqlock); @@ -623,6 +624,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, tfcp_req->fcpreq = NULL; spin_unlock(&tfcp_req->reqlock); + if (rport->targetport) + nvmet_fc_rcv_fcp_abort(rport->targetport, + &tfcp_req->tgt_fcp_req); + +finish: /* post the aborted io completion */ fcpreq->status = -ECANCELED; schedule_work(&inireq->iniwork); @@ -657,7 +663,8 @@ fcloop_nport_get(struct fcloop_nport *nport) static void fcloop_localport_delete(struct nvme_fc_local_port *localport) { - struct fcloop_lport *lport = localport->private; + struct fcloop_lport_priv *lport_priv = localport->private; + struct fcloop_lport *lport = lport_priv->lport; /* release any threads waiting for the unreg to complete */ complete(&lport->unreg_done); @@ -697,7 +704,7 @@ static struct nvme_fc_port_template fctemplate = { .max_dif_sgl_segments = FCLOOP_SGL_SEGS, .dma_boundary = FCLOOP_DMABOUND_4G, /* sizes of additional private data for data structures */ - .local_priv_sz = sizeof(struct fcloop_lport), + .local_priv_sz = sizeof(struct fcloop_lport_priv), .remote_priv_sz = sizeof(struct fcloop_rport), .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), @@ -728,11 +735,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, struct fcloop_ctrl_options *opts; struct nvme_fc_local_port *localport; struct fcloop_lport *lport; - int ret; + struct fcloop_lport_priv *lport_priv; + unsigned long flags; + int ret = -ENOMEM; + + lport = kzalloc(sizeof(*lport), GFP_KERNEL); + if (!lport) + return -ENOMEM; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) - return -ENOMEM; + goto out_free_lport; ret = fcloop_parse_options(opts, buf); if (ret) @@ -752,23 +765,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); if (!ret) { - unsigned long flags; - /* success */ - lport = localport->private; + lport_priv = localport->private; + lport_priv->lport = lport; + lport->localport = localport; INIT_LIST_HEAD(&lport->lport_list); spin_lock_irqsave(&fcloop_lock, flags); list_add_tail(&lport->lport_list, &fcloop_lports); spin_unlock_irqrestore(&fcloop_lock, flags); - - /* mark all of the input buffer consumed */ - ret = count; } out_free_opts: kfree(opts); +out_free_lport: + /* free only if we're going to fail */ + if (ret) + kfree(lport); + return ret ? ret : count; } @@ -790,6 +805,8 @@ __wait_localport_unreg(struct fcloop_lport *lport) wait_for_completion(&lport->unreg_done); + kfree(lport); + return ret; } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 92628c432926..02aff5cc48bf 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -61,10 +61,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) return container_of(ctrl, struct nvme_loop_ctrl, ctrl); } +enum nvme_loop_queue_flags { + NVME_LOOP_Q_LIVE = 0, +}; + struct nvme_loop_queue { struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvme_loop_ctrl *ctrl; + unsigned long flags; }; static struct nvmet_port *nvmet_loop_port; @@ -153,6 +158,14 @@ nvme_loop_timeout(struct request *rq, bool reserved) return BLK_EH_HANDLED; } +static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue, + struct request *rq) +{ + if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; +} + static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -162,6 +175,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; + ret = nvme_loop_is_ready(queue, req); + if (unlikely(ret)) + return ret; + ret = nvme_setup_cmd(ns, req, &iod->cmd); if (ret) return ret; @@ -275,6 +292,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); @@ -305,8 +323,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) { int i; - for (i = 1; i < ctrl->ctrl.queue_count; i++) + for (i = 1; i < ctrl->ctrl.queue_count; i++) { + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + } } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) @@ -346,6 +366,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) return ret; + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); } return 0; @@ -387,6 +408,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) if (error) goto out_cleanup_queue; + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); if (error) { dev_err(ctrl->ctrl.device, diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 76d2bb793afe..3333d417b248 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1512,15 +1512,17 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = { static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) { - struct nvmet_rdma_queue *queue; + struct nvmet_rdma_queue *queue, *tmp; /* Device is being removed, delete all queues using this device */ mutex_lock(&nvmet_rdma_queue_mutex); - list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { + list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, + queue_list) { if (queue->dev->device != ib_device) continue; pr_info("Removing queue %d\n", queue->idx); + list_del_init(&queue->queue_list); __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index ce30c9a588a4..179cbc5929b8 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1109,42 +1109,66 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, return 0; } +/* + * Convert configs to something easy to use in C code + */ +#if defined(CONFIG_CMDLINE_FORCE) +static const int overwrite_incoming_cmdline = 1; +static const int read_dt_cmdline; +static const int concat_cmdline; +#elif defined(CONFIG_CMDLINE_EXTEND) +static const int overwrite_incoming_cmdline; +static const int read_dt_cmdline = 1; +static const int concat_cmdline = 1; +#else /* CMDLINE_FROM_BOOTLOADER */ +static const int overwrite_incoming_cmdline; +static const int read_dt_cmdline = 1; +static const int concat_cmdline; +#endif + +#ifdef CONFIG_CMDLINE +static const char *config_cmdline = CONFIG_CMDLINE; +#else +static const char *config_cmdline = ""; +#endif + int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data) { - int l; - const char *p; + int l = 0; + const char *p = NULL; + char *cmdline = data; pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); - if (depth != 1 || !data || + if (depth != 1 || !cmdline || (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) return 0; early_init_dt_check_for_initrd(node); - /* Retrieve command line */ - p = of_get_flat_dt_prop(node, "bootargs", &l); - if (p != NULL && l > 0) - strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE)); - - /* - * CONFIG_CMDLINE is meant to be a default in case nothing else - * managed to set the command line, unless CONFIG_CMDLINE_FORCE - * is set in which case we override whatever was found earlier. - */ -#ifdef CONFIG_CMDLINE -#if defined(CONFIG_CMDLINE_EXTEND) - strlcat(data, " ", COMMAND_LINE_SIZE); - strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#elif defined(CONFIG_CMDLINE_FORCE) - strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#else - /* No arguments from boot loader, use kernel's cmdl*/ - if (!((char *)data)[0]) - strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#endif -#endif /* CONFIG_CMDLINE */ + /* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */ + if (overwrite_incoming_cmdline || !cmdline[0]) + strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE); + + /* Retrieve command line unless forcing */ + if (read_dt_cmdline) + p = of_get_flat_dt_prop(node, "bootargs", &l); + + if (p != NULL && l > 0) { + if (concat_cmdline) { + int cmdline_len; + int copy_len; + strlcat(cmdline, " ", COMMAND_LINE_SIZE); + cmdline_len = strlen(cmdline); + copy_len = COMMAND_LINE_SIZE - cmdline_len - 1; + copy_len = min((int)l, copy_len); + strncpy(cmdline + cmdline_len, p, copy_len); + cmdline[cmdline_len + copy_len] = '\0'; + } else { + strlcpy(cmdline, p, min((int)l, COMMAND_LINE_SIZE)); + } + } pr_debug("Command line is: %s\n", (char*)data); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 98258583abb0..8c1819230ed2 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -228,7 +228,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) rc = of_mdiobus_register_phy(mdio, child, addr); else rc = of_mdiobus_register_device(mdio, child, addr); - if (rc) + + if (rc == -ENODEV) + dev_err(&mdio->dev, + "MDIO device at address %d is missing.\n", + addr); + else if (rc) goto unregister; } @@ -252,7 +257,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) if (of_mdiobus_child_is_phy(child)) { rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc) + if (rc && rc != -ENODEV) goto unregister; } } diff --git a/drivers/of/unittest-data/.gitignore b/drivers/of/unittest-data/.gitignore deleted file mode 100644 index 4b3cf8b16de2..000000000000 --- a/drivers/of/unittest-data/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -testcases.dtb -testcases.dtb.S diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig new file mode 100644 index 000000000000..a7fbb93f302c --- /dev/null +++ b/drivers/opp/Kconfig @@ -0,0 +1,13 @@ +config PM_OPP + bool + select SRCU + ---help--- + SOCs have a standard set of tuples consisting of frequency and + voltage pairs that the device will support per voltage domain. This + is called Operating Performance Point or OPP. The actual definitions + of OPP varies over silicon within the same family of devices. + + OPP layer organizes the data internally using device pointers + representing individual voltage domains and provides SOC + implementations a ready to use framework to manage OPPs. + For more information, read diff --git a/drivers/base/power/opp/Makefile b/drivers/opp/Makefile similarity index 100% rename from drivers/base/power/opp/Makefile rename to drivers/opp/Makefile diff --git a/drivers/base/power/opp/core.c b/drivers/opp/core.c similarity index 99% rename from drivers/base/power/opp/core.c rename to drivers/opp/core.c index a6de32530693..0459b1204694 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/opp/core.c @@ -296,7 +296,7 @@ int dev_pm_opp_get_opp_count(struct device *dev) opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { count = PTR_ERR(opp_table); - dev_err(dev, "%s: OPP table not found (%d)\n", + dev_dbg(dev, "%s: OPP table not found (%d)\n", __func__, count); return count; } diff --git a/drivers/base/power/opp/cpu.c b/drivers/opp/cpu.c similarity index 100% rename from drivers/base/power/opp/cpu.c rename to drivers/opp/cpu.c diff --git a/drivers/base/power/opp/debugfs.c b/drivers/opp/debugfs.c similarity index 100% rename from drivers/base/power/opp/debugfs.c rename to drivers/opp/debugfs.c diff --git a/drivers/base/power/opp/of.c b/drivers/opp/of.c similarity index 99% rename from drivers/base/power/opp/of.c rename to drivers/opp/of.c index 0b718886479b..87509cb69f79 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/opp/of.c @@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); _dev_pm_opp_remove_table(opp_table, dev, false); + of_node_put(np); goto put_opp_table; } } diff --git a/drivers/base/power/opp/opp.h b/drivers/opp/opp.h similarity index 100% rename from drivers/base/power/opp/opp.h rename to drivers/opp/opp.h diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index a25fed52f7e9..41b740aed3a3 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) iounmap(base_addr); } + +/* + * The design of the Diva management card in rp34x0 machines (rp3410, rp3440) + * seems rushed, so that many built-in components simply don't work. + * The following quirks disable the serial AUX port and the built-in ATI RV100 + * Radeon 7000 graphics card which both don't have any external connectors and + * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as + * such makes those machines the only PARISC machines on which we can't use + * ttyS0 as boot console. + */ +static void quirk_diva_ati_card(struct pci_dev *dev) +{ + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || + dev->subsystem_device != 0x1292) + return; + + dev_info(&dev->dev, "Hiding Diva built-in ATI card"); + dev->device = 0; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY, + quirk_diva_ati_card); + +static void quirk_diva_aux_disable(struct pci_dev *dev) +{ + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || + dev->subsystem_device != 0x1291) + return; + + dev_info(&dev->dev, "Hiding Diva built-in AUX serial device"); + dev->device = 0; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX, + quirk_diva_aux_disable); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 489492b608cf..380916bff9e0 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards { netmos_9901, netmos_9865, quatech_sppxp100, + wch_ch382l, }; @@ -2708,6 +2709,7 @@ static struct parport_pc_pci { /* netmos_9901 */ { 1, { { 0, -1 }, } }, /* netmos_9865 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, + /* wch_ch382l */ { 1, { { 2, -1 }, } }, }; static const struct pci_device_id parport_pc_pci_tbl[] = { @@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, + /* WCH CH382L PCI-E single parallel port card */ + { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l }, { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl); diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index 34427a6a15af..362607f727ee 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -594,6 +595,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) int i; int phy_count; struct phy **phy; + struct device_link **link; void __iomem *base; struct resource *res; struct dw_pcie *pci; @@ -649,11 +651,21 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) if (!phy) return -ENOMEM; + link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); + if (!link) + return -ENOMEM; + for (i = 0; i < phy_count; i++) { snprintf(name, sizeof(name), "pcie-phy%d", i); phy[i] = devm_phy_get(dev, name); if (IS_ERR(phy[i])) return PTR_ERR(phy[i]); + + link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); + if (!link[i]) { + ret = -EINVAL; + goto err_link; + } } dra7xx->base = base; @@ -732,6 +744,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) pm_runtime_disable(dev); dra7xx_pcie_disable_phy(dra7xx); +err_link: + while (--i >= 0) + device_link_del(link[i]); + return ret; } diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index 5bee3af47588..39405598b22d 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c @@ -178,7 +178,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, } /* interrupt controller is in a child node */ - *np_temp = of_find_node_by_name(np_pcie, controller); + *np_temp = of_get_child_by_name(np_pcie, controller); if (!(*np_temp)) { dev_err(dev, "Node for %s is absent\n", controller); return -EINVAL; @@ -187,6 +187,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, temp = of_irq_count(*np_temp); if (!temp) { dev_err(dev, "No IRQ entries in %s\n", controller); + of_node_put(*np_temp); return -EINVAL; } @@ -204,6 +205,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, break; } + of_node_put(*np_temp); + if (temp) { *num_irqs = temp; return 0; diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c index d53d5f168363..7c621877a939 100644 --- a/drivers/pci/dwc/pcie-designware-ep.c +++ b/drivers/pci/dwc/pcie-designware-ep.c @@ -197,20 +197,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr, static int dw_pcie_ep_get_msi(struct pci_epc *epc) { int val; - u32 lower_addr; - u32 upper_addr; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL); - val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; - - lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); - upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); - - if (!(lower_addr || upper_addr)) + val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); + if (!(val & MSI_CAP_MSI_EN_MASK)) return -EINVAL; + val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; return val; } diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 81e2157a7cfb..bc3e2d8d0cce 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c @@ -607,7 +607,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) /* setup bus numbers */ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); val &= 0xff000000; - val |= 0x00010100; + val |= 0x00ff0100; dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); /* setup command register */ diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index e5d9d77b778e..cb493bcae8b4 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h @@ -101,6 +101,7 @@ #define MSI_MESSAGE_CONTROL 0x52 #define MSI_CAP_MMC_SHIFT 1 #define MSI_CAP_MME_SHIFT 4 +#define MSI_CAP_MSI_EN_MASK 0x1 #define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) #define MSI_MESSAGE_ADDR_L32 0x54 #define MSI_MESSAGE_ADDR_U32 0x58 diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c index 424fdd6ed1ca..16cec66b1d0b 100644 --- a/drivers/pci/endpoint/pci-ep-cfs.c +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -109,7 +109,10 @@ static int pci_epc_epf_link(struct config_item *epc_item, goto err_add_epf; func_no = find_first_zero_bit(&epc_group->function_num_map, - sizeof(epc_group->function_num_map)); + BITS_PER_LONG); + if (func_no >= BITS_PER_LONG) + return -EINVAL; + set_bit(func_no, &epc_group->function_num_map); epf->func_no = func_no; diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 0fe3ea164ee5..73b724143be0 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -457,7 +457,6 @@ struct hv_pcibus_device { spinlock_t device_list_lock; /* Protect lists below */ void __iomem *cfg_addr; - struct semaphore enum_sem; struct list_head resources_for_children; struct list_head children; @@ -471,6 +470,8 @@ struct hv_pcibus_device { struct retarget_msi_interrupt retarget_msi_interrupt_params; spinlock_t retarget_msi_interrupt_lock; + + struct workqueue_struct *wq; }; /* @@ -879,7 +880,7 @@ static void hv_irq_unmask(struct irq_data *data) int cpu; u64 res; - dest = irq_data_get_affinity_mask(data); + dest = irq_data_get_effective_affinity_mask(data); pdev = msi_desc_to_pci_dev(msi_desc); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); @@ -1042,6 +1043,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; + struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct { @@ -1056,6 +1058,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) int ret; pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); @@ -1081,14 +1084,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) switch (pci_protocol_version) { case PCI_PROTOCOL_VERSION_1_1: size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, - irq_data_get_affinity_mask(data), + dest, hpdev->desc.win_slot.slot, cfg->vector); break; case PCI_PROTOCOL_VERSION_1_2: size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, - irq_data_get_affinity_mask(data), + dest, hpdev->desc.win_slot.slot, cfg->vector); break; @@ -1602,12 +1605,8 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, * It must also treat the omission of a previously observed device as * notification that the device no longer exists. * - * Note that this function is a work item, and it may not be - * invoked in the order that it was queued. Back to back - * updates of the list of present devices may involve queuing - * multiple work items, and this one may run before ones that - * were sent later. As such, this function only does something - * if is the last one in the queue. + * Note that this function is serialized with hv_eject_device_work(), + * because both are pushed to the ordered workqueue hbus->wq. */ static void pci_devices_present_work(struct work_struct *work) { @@ -1628,11 +1627,6 @@ static void pci_devices_present_work(struct work_struct *work) INIT_LIST_HEAD(&removed); - if (down_interruptible(&hbus->enum_sem)) { - put_hvpcibus(hbus); - return; - } - /* Pull this off the queue and process it if it was the last one. */ spin_lock_irqsave(&hbus->device_list_lock, flags); while (!list_empty(&hbus->dr_list)) { @@ -1649,7 +1643,6 @@ static void pci_devices_present_work(struct work_struct *work) spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (!dr) { - up(&hbus->enum_sem); put_hvpcibus(hbus); return; } @@ -1736,7 +1729,6 @@ static void pci_devices_present_work(struct work_struct *work) break; } - up(&hbus->enum_sem); put_hvpcibus(hbus); kfree(dr); } @@ -1782,7 +1774,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus, spin_unlock_irqrestore(&hbus->device_list_lock, flags); get_hvpcibus(hbus); - schedule_work(&dr_wrk->wrk); + queue_work(hbus->wq, &dr_wrk->wrk); } /** @@ -1860,7 +1852,7 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev) get_pcichild(hpdev, hv_pcidev_ref_pnp); INIT_WORK(&hpdev->wrk, hv_eject_device_work); get_hvpcibus(hpdev->hbus); - schedule_work(&hpdev->wrk); + queue_work(hpdev->hbus->wq, &hpdev->wrk); } /** @@ -2473,13 +2465,18 @@ static int hv_pci_probe(struct hv_device *hdev, spin_lock_init(&hbus->config_lock); spin_lock_init(&hbus->device_list_lock); spin_lock_init(&hbus->retarget_msi_interrupt_lock); - sema_init(&hbus->enum_sem, 1); init_completion(&hbus->remove_event); + hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0, + hbus->sysdata.domain); + if (!hbus->wq) { + ret = -ENOMEM; + goto free_bus; + } ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, hv_pci_onchannelcallback, hbus); if (ret) - goto free_bus; + goto destroy_wq; hv_set_drvdata(hdev, hbus); @@ -2548,6 +2545,8 @@ static int hv_pci_probe(struct hv_device *hdev, hv_free_config_window(hbus); close: vmbus_close(hdev->channel); +destroy_wq: + destroy_workqueue(hbus->wq); free_bus: free_page((unsigned long)hbus); return ret; @@ -2627,6 +2626,7 @@ static int hv_pci_remove(struct hv_device *hdev) irq_domain_free_fwnode(hbus->sysdata.fwnode); put_hvpcibus(hbus); wait_for_completion(&hbus->remove_event); + destroy_workqueue(hbus->wq); free_page((unsigned long)hbus); return 0; } diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index a5073a921a04..32228d41f746 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -92,6 +92,13 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) pcie->need_ob_cfg = true; } + /* + * DT nodes are not used by all platforms that use the iProc PCIe + * core driver. For platforms that require explict inbound mapping + * configuration, "dma-ranges" would have been present in DT + */ + pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); + /* PHY use is optional */ pcie->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(pcie->phy)) { diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 3a8b9d20ee57..c0ecc9f35667 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -1396,9 +1396,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) } } - ret = iproc_pcie_map_dma_ranges(pcie); - if (ret && ret != -ENOENT) - goto err_power_off_phy; + if (pcie->need_ib_cfg) { + ret = iproc_pcie_map_dma_ranges(pcie); + if (ret && ret != -ENOENT) + goto err_power_off_phy; + } #ifdef CONFIG_ARM pcie->sysdata.private_data = pcie; diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index a6b55cec9a66..4ac6282f2bfd 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -74,6 +74,7 @@ struct iproc_msi; * @ob: outbound mapping related parameters * @ob_map: outbound mapping related parameters specific to the controller * + * @need_ib_cfg: indicates SW needs to configure the inbound mapping window * @ib: inbound mapping related parameters * @ib_map: outbound mapping region related parameters * @@ -101,6 +102,7 @@ struct iproc_pcie { struct iproc_pcie_ob ob; const struct iproc_pcie_ob_map *ob_map; + bool need_ib_cfg; struct iproc_pcie_ib ib; const struct iproc_pcie_ib_map *ib_map; diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 4e0b25d09b0c..8f44a7d14bff 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -1141,17 +1141,19 @@ static int rcar_pcie_probe(struct platform_device *pdev) INIT_LIST_HEAD(&pcie->resources); - rcar_pcie_parse_request_of_pci_ranges(pcie); + err = rcar_pcie_parse_request_of_pci_ranges(pcie); + if (err) + goto err_free_bridge; err = rcar_pcie_get_resources(pcie); if (err < 0) { dev_err(dev, "failed to request resources: %d\n", err); - goto err_free_bridge; + goto err_free_resource_list; } err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); if (err) - goto err_free_bridge; + goto err_free_resource_list; pm_runtime_enable(dev); err = pm_runtime_get_sync(dev); @@ -1194,9 +1196,10 @@ static int rcar_pcie_probe(struct platform_device *pdev) err_pm_disable: pm_runtime_disable(dev); +err_free_resource_list: + pci_free_resource_list(&pcie->resources); err_free_bridge: pci_free_host_bridge(bridge); - pci_free_resource_list(&pcie->resources); return err; } diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index ac41c8be9200..0fd8e164339c 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -162,7 +162,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset) pci_device_add(virtfn, virtfn->bus); - pci_bus_add_device(virtfn); sprintf(buf, "virtfn%u", id); rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); if (rc) @@ -173,6 +172,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset) kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); + pci_bus_add_device(virtfn); + return 0; failed2: diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 496ed9130600..ca3c81a5ea07 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -190,7 +190,7 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); } -static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) +void __iomem *pci_msix_desc_addr(struct msi_desc *desc) { return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; @@ -294,7 +294,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) } } -void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +void native_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { struct pci_dev *dev = msi_desc_to_pci_dev(entry); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 11bd267fc137..55d1d1435b9a 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -680,13 +680,6 @@ static int pci_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; - /* - * Devices having power.ignore_children set may still be necessary for - * suspending their children in the next phase of device suspend. - */ - if (dev->power.ignore_children) - pm_runtime_resume(dev); - if (drv && drv->pm && drv->pm->prepare) { int error = drv->pm->prepare(dev); if (error) @@ -805,6 +798,9 @@ static int pci_pm_suspend_noirq(struct device *dev) pci_prepare_to_sleep(pci_dev); } + dev_dbg(dev, "PCI PM: Suspend power state: %s\n", + pci_power_name(pci_dev->current_state)); + pci_pm_set_unknown_state(pci_dev); /* @@ -968,7 +964,12 @@ static int pci_pm_thaw_noirq(struct device *dev) if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); - pci_update_current_state(pci_dev, PCI_D0); + /* + * pci_restore_state() requires the device to be in D0 (because of MSI + * restoration among other things), so force it into D0 in case the + * driver's "freeze" callbacks put it into a low-power state directly. + */ + pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); if (drv && drv->pm && drv->pm->thaw_noirq) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6078dfc11b11..74f1c57ab93b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4356,6 +4356,10 @@ static bool pci_bus_resetable(struct pci_bus *bus) { struct pci_dev *dev; + + if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) + return false; + list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resetable(dev->subordinate))) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fdb02c1f94bb..04cdbe55cbe5 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -180,6 +180,8 @@ static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); } +void __iomem *pci_msix_desc_addr(struct msi_desc *desc); + void pci_realloc_get_opt(char *); static inline int pci_no_d1d2(struct pci_dev *dev) diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 890efcc574cb..744805232155 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -390,7 +390,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, * If the error is reported by an end point, we think this * error is related to the upstream link of the end point. */ - pci_walk_bus(dev->bus, cb, &result_data); + if (state == pci_channel_io_normal) + /* + * the error is non fatal so the bus is ok, just invoke + * the callback for the function that logged the error. + */ + cb(dev, &result_data); + else + pci_walk_bus(dev->bus, cb, &result_data); } return result_data.result; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 83e4a892b14b..633e55c57b13 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -453,7 +453,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, /* Choose the greater of the two T_cmn_mode_rstr_time */ val1 = (upreg->l1ss_cap >> 8) & 0xFF; - val2 = (upreg->l1ss_cap >> 8) & 0xFF; + val2 = (dwreg->l1ss_cap >> 8) & 0xFF; if (val1 > val2) link->l1ss.ctl1 |= val1 << 8; else @@ -658,7 +658,7 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) 0xFF00, link->l1ss.ctl1); /* Program LTR L1.2 threshold in both ports */ - pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1, + pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, 0xE3FF0000, link->l1ss.ctl1); pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, 0xE3FF0000, link->l1ss.ctl1); @@ -803,10 +803,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) /* * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe - * hierarchies. + * hierarchies. Note that some PCIe host implementations omit + * the root ports entirely, in which case a downstream port on + * a switch may become the root of the link state chain for all + * its subordinate endpoints. */ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) { + pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE || + !pdev->bus->parent->self) { link->root = link; } else { struct pcie_link_state *parent; diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index fafdb165dd2e..df290aa58dce 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -226,6 +226,9 @@ static void pcie_pme_work_fn(struct work_struct *work) break; pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); + if (rtsta == (u32) ~0) + break; + if (rtsta & PCI_EXP_RTSTA_PME) { /* * Clear PME status of the port. If there are other @@ -273,7 +276,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context) spin_lock_irqsave(&data->lock, flags); pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); - if (!(rtsta & PCI_EXP_RTSTA_PME)) { + if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) { spin_unlock_irqrestore(&data->lock, flags); return IRQ_NONE; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ff94b69738a8..27c0b09fd343 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -18,7 +18,6 @@ #include #include #include "pci.h" - #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 @@ -41,6 +40,70 @@ struct pci_domain_busn_res { int domain_nr; }; +#define PCI_IGNORE_MAX 8 + +static u16 devices_ignore_table[PCI_IGNORE_MAX]; +static int devices_ignore_cnt; + +static void parse_ignore_device(char *bdf_str) +{ + int fields; + unsigned int bus; + unsigned int dev; + unsigned int func; + + if (devices_ignore_cnt >= PCI_IGNORE_MAX - 1) + return; + + fields = sscanf(bdf_str, "%x:%x:%x", &bus, &dev, &func); + if (fields != 3) + return; + + devices_ignore_table[devices_ignore_cnt++] = + PCI_DEVID(bus, PCI_DEVFN(dev, func)); +} + +static int __init pci_deivces_ignore(char *str) +{ + int len; + char *start, *end; + char bdf[16]; + + devices_ignore_cnt = 0; + + while ((start = strchr(str, '('))) { + + end = strchr(start, ')'); + if (end == NULL) + break; + + len = end - start - 1; + if (len >= 16) /*invalid string*/ + break; + + memcpy((void *)bdf, (void *)(start+1), len); + bdf[len] = '\0'; + parse_ignore_device(bdf); + str = end + 1; + } + + return 1; +} +__setup("pci_devices_ignore=", pci_deivces_ignore); + +static bool device_on_ignore_list(int bus, int dev, int func) +{ + int i; + + for (i = 0; i < devices_ignore_cnt; i++) + if ((PCI_BUS_NUM(devices_ignore_table[i]) == bus) && + (PCI_SLOT(devices_ignore_table[i]) == dev) && + (PCI_FUNC(devices_ignore_table[i]) == func)) + return true; + + return false; +} + static struct resource *get_pci_domain_busn_res(int domain_nr) { struct pci_domain_busn_res *r; @@ -1076,7 +1139,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) child = pci_add_new_bus(bus, dev, max+1); if (!child) goto out; - pci_bus_insert_busn_res(child, max+1, 0xff); + pci_bus_insert_busn_res(child, max+1, + bus->busn_res.end); } max++; buses = (buses & 0xff000000) @@ -2133,6 +2197,11 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) return dev; } + if (device_on_ignore_list(bus->number, + PCI_SLOT(devfn), + PCI_FUNC(devfn))) + return NULL; + dev = pci_scan_device(bus, devfn); if (!dev) return NULL; @@ -2433,6 +2502,10 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus) if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) { if (max - bus->busn_res.start < pci_hotplug_bus_size - 1) max = bus->busn_res.start + pci_hotplug_bus_size - 1; + + /* Do not allocate more buses than we have room left */ + if (max > bus->busn_res.end) + max = bus->busn_res.end; } /* diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 911b3b65c8b2..05fadcc4f9d2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1636,8 +1636,8 @@ static void quirk_pcie_mch(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch); /* * It's possible for the MSI to get corrupted if shpc and acpi @@ -3412,22 +3412,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, static void quirk_chelsio_extend_vpd(struct pci_dev *dev) { - pci_set_vpd_size(dev, 8192); + int chip = (dev->device & 0xf000) >> 12; + int func = (dev->device & 0x0f00) >> 8; + int prod = (dev->device & 0x00ff) >> 0; + + /* + * If this is a T3-based adapter, there's a 1KB VPD area at offset + * 0xc00 which contains the preferred VPD values. If this is a T4 or + * later based adapter, the special VPD is at offset 0x400 for the + * Physical Functions (the SR-IOV Virtual Functions have no VPD + * Capabilities). The PCI VPD Access core routines will normally + * compute the size of the VPD by parsing the VPD Data Structure at + * offset 0x000. This will result in silent failures when attempting + * to accesses these other VPD areas which are beyond those computed + * limits. + */ + if (chip == 0x0 && prod >= 0x20) + pci_set_vpd_size(dev, 8192); + else if (chip >= 0x4 && func < 0x8) + pci_set_vpd_size(dev, 2048); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, + quirk_chelsio_extend_vpd); #ifdef CONFIG_ACPI /* @@ -3892,6 +3899,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, + quirk_dma_func1_alias); /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB388_ESD, @@ -4212,17 +4221,32 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) #endif } +static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) +{ + /* + * Effectively selects all downstream ports for whole ThunderX 1 + * family by 0xf800 mask (which represents 8 SoCs), while the lower + * bits of device ID are used to indicate which subdevice is used + * within the SoC. + */ + return (pci_is_pcie(dev) && + (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) && + ((dev->device & 0xf800) == 0xa000)); +} + static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { /* - * Cavium devices matching this quirk do not perform peer-to-peer - * with other functions, allowing masking out these bits as if they - * were unimplemented in the ACS capability. + * Cavium root ports don't advertise an ACS capability. However, + * the RTL internally implements similar protection as if ACS had + * Request Redirection, Completion Redirection, Source Validation, + * and Upstream Forwarding features enabled. Assert that the + * hardware implements and enables equivalent ACS functionality for + * these flags. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | - PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); + acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); - if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff))) + if (!pci_quirk_cavium_acs_match(dev)) return -ENOTTY; return acs_flags ? 0 : 1; diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 73a03d382590..2fa0dbde36b7 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev) pci_pme_active(dev, false); if (dev->is_added) { + device_release_driver(&dev->dev); pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); - device_release_driver(&dev->dev); dev->is_added = 0; } diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index accaaaccb662..6601ad0dfb3a 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev, int irq, error; irq = platform_get_irq_byname(pdev, name); - if (!irq) + if (irq < 0) return -ENODEV; error = devm_request_threaded_irq(ddata->dev, irq, NULL, diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index a268f4d6f3e9..48a365e303e5 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -395,6 +395,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index) if (ret) return ERR_PTR(-ENODEV); + /* This phy type handled by the usb-phy subsystem for now */ + if (of_device_is_compatible(args.np, "usb-nop-xceiv")) + return ERR_PTR(-ENODEV); + mutex_lock(&phy_provider_mutex); phy_provider = of_phy_provider_lookup(args.np); if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c index 43865ef340e2..3afba145f2e6 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-ufs.c @@ -689,3 +689,8 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy) return 0; } EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); + +MODULE_AUTHOR("Yaniv Gardi "); +MODULE_AUTHOR("Vivek Gautam "); +MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 4307bf0013e1..63e916d4d069 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match); static struct device_node * tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(padctl->dev->of_node); + struct device_node *pads, *np; + + pads = of_get_child_by_name(padctl->dev->of_node, "pads"); + if (!pads) + return NULL; - np = of_find_node_by_name(np, "pads"); - if (np) - np = of_find_node_by_name(np, name); + np = of_get_child_by_name(pads, name); + of_node_put(pads); return np; } @@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) static struct device_node * tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(pad->dev.of_node); + struct device_node *np, *lanes; - np = of_find_node_by_name(np, "lanes"); - if (!np) + lanes = of_get_child_by_name(pad->dev.of_node, "lanes"); + if (!lanes) return NULL; - return of_find_node_by_name(np, pad->soc->lanes[index].name); + np = of_get_child_by_name(lanes, pad->soc->lanes[index].name); + of_node_put(lanes); + + return np; } static int @@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad, unsigned int i; int err; - children = of_find_node_by_name(pad->dev.of_node, "lanes"); + children = of_get_child_by_name(pad->dev.of_node, "lanes"); if (!children) return -ENODEV; @@ -444,21 +444,21 @@ static struct device_node * tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type, unsigned int index) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(padctl->dev->of_node); + struct device_node *ports, *np; + char *name; - np = of_find_node_by_name(np, "ports"); - if (np) { - char *name; + ports = of_get_child_by_name(padctl->dev->of_node, "ports"); + if (!ports) + return NULL; - name = kasprintf(GFP_KERNEL, "%s-%u", type, index); - if (!name) - return ERR_PTR(-ENOMEM); - np = of_find_node_by_name(np, name); - kfree(name); + name = kasprintf(GFP_KERNEL, "%s-%u", type, index); + if (!name) { + of_node_put(ports); + return ERR_PTR(-ENOMEM); } + np = of_get_child_by_name(ports, name); + kfree(name); + of_node_put(ports); return np; } @@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl) static int tegra_xusb_padctl_probe(struct platform_device *pdev) { - struct device_node *np = of_node_get(pdev->dev.of_node); + struct device_node *np = pdev->dev.of_node; const struct tegra_xusb_padctl_soc *soc; struct tegra_xusb_padctl *padctl; const struct of_device_id *match; @@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) int err; /* for backwards compatibility with old device trees */ - np = of_find_node_by_name(np, "pads"); + np = of_get_child_by_name(np, "pads"); if (!np) { dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n"); return tegra_xusb_padctl_legacy_probe(pdev); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 82cd8b08d71f..a73c794bed03 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -33,7 +33,8 @@ config DEBUG_PINCTRL config PINCTRL_ADI2 bool "ADI pin controller driver" - depends on BLACKFIN + depends on (BF54x || BF60x) + depends on !GPIO_ADI select PINMUX select IRQ_DOMAIN help diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 56fbe4c3e800..c55517312485 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1189,19 +1189,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p, EXPORT_SYMBOL_GPL(pinctrl_lookup_state); /** - * pinctrl_select_state() - select/activate/program a pinctrl state to HW + * pinctrl_commit_state() - select/activate/program a pinctrl state to HW * @p: the pinctrl handle for the device that requests configuration * @state: the state handle to select/activate/program */ -int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) +static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state) { struct pinctrl_setting *setting, *setting2; struct pinctrl_state *old_state = p->state; int ret; - if (p->state == state) - return 0; - if (p->state) { /* * For each pinmux setting in the old state, forget SW's record @@ -1265,6 +1262,19 @@ int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) return ret; } + +/** + * pinctrl_select_state() - select/activate/program a pinctrl state to HW + * @p: the pinctrl handle for the device that requests configuration + * @state: the state handle to select/activate/program + */ +int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) +{ + if (p->state == state) + return 0; + + return pinctrl_commit_state(p, state); +} EXPORT_SYMBOL_GPL(pinctrl_select_state); static void devm_pinctrl_release(struct device *dev, void *res) @@ -1430,7 +1440,7 @@ void pinctrl_unregister_map(const struct pinctrl_map *map) int pinctrl_force_sleep(struct pinctrl_dev *pctldev) { if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep)) - return pinctrl_select_state(pctldev->p, pctldev->hog_sleep); + return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep); return 0; } EXPORT_SYMBOL_GPL(pinctrl_force_sleep); @@ -1442,7 +1452,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep); int pinctrl_force_default(struct pinctrl_dev *pctldev) { if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default)) - return pinctrl_select_state(pctldev->p, pctldev->hog_default); + return pinctrl_commit_state(pctldev->p, pctldev->hog_default); return 0; } EXPORT_SYMBOL_GPL(pinctrl_force_default); diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 0f3a02495aeb..beeb7cbb5015 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -46,6 +46,9 @@ #define BYT_TRIG_POS BIT(25) #define BYT_TRIG_LVL BIT(24) #define BYT_DEBOUNCE_EN BIT(20) +#define BYT_GLITCH_FILTER_EN BIT(19) +#define BYT_GLITCH_F_SLOW_CLK BIT(17) +#define BYT_GLITCH_F_FAST_CLK BIT(16) #define BYT_PULL_STR_SHIFT 9 #define BYT_PULL_STR_MASK (3 << BYT_PULL_STR_SHIFT) #define BYT_PULL_STR_2K (0 << BYT_PULL_STR_SHIFT) @@ -1579,6 +1582,9 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) */ value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); + /* Enable glitch filtering */ + value |= BYT_GLITCH_FILTER_EN | BYT_GLITCH_F_SLOW_CLK | + BYT_GLITCH_F_FAST_CLK; writel(value, reg); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index fadbca907c7c..28d0a145efdb 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1620,6 +1620,38 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) clear_bit(i, chip->irq_valid_mask); } + /* + * The same set of machines in chv_no_valid_mask[] have incorrectly + * configured GPIOs that generate spurious interrupts so we use + * this same list to apply another quirk for them. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953. + */ + if (!need_valid_mask) { + /* + * Mask all interrupts the community is able to generate + * but leave the ones that can only generate GPEs unmasked. + */ + chv_writel(GENMASK(31, pctrl->community->nirqs), + pctrl->regs + CHV_INTMASK); + } + + /* + * The same set of machines in chv_no_valid_mask[] have incorrectly + * configured GPIOs that generate spurious interrupts so we use + * this same list to apply another quirk for them. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953. + */ + if (!need_valid_mask) { + /* + * Mask all interrupts the community is able to generate + * but leave the ones that can only generate GPEs unmasked. + */ + chv_writel(GENMASK(31, pctrl->community->nirqs), + pctrl->regs + CHV_INTMASK); + } + /* Clear all interrupts */ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c index 4500880240f2..6572550cfe78 100644 --- a/drivers/pinctrl/intel/pinctrl-denverton.c +++ b/drivers/pinctrl/intel/pinctrl-denverton.c @@ -207,7 +207,7 @@ static const unsigned int dnv_uart0_pins[] = { 60, 61, 64, 65 }; static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 }; static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 }; static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 }; -static const unsigned int dnv_uart2_modes[] = { 1, 1, 2, 2 }; +static const unsigned int dnv_uart2_modes[] = { 1, 2, 2, 2 }; static const unsigned int dnv_emmc_pins[] = { 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, }; diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 71df0f70b61f..dace48c647df 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -30,8 +30,6 @@ #define PADBAR 0x00c #define GPI_IS 0x100 -#define GPI_GPE_STS 0x140 -#define GPI_GPE_EN 0x160 #define PADOWN_BITS 4 #define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) @@ -427,6 +425,18 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) writel(value, padcfg0); } +static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) +{ + u32 value; + + /* Put the pad into GPIO mode */ + value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; + /* Disable SCI/SMI/NMI generation */ + value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); + value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); + writel(value, padcfg0); +} + static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) @@ -434,7 +444,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; unsigned long flags; - u32 value; raw_spin_lock_irqsave(&pctrl->lock, flags); @@ -444,13 +453,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, } padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); - /* Put the pad into GPIO mode */ - value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; - /* Disable SCI/SMI/NMI generation */ - value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); - value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); - writel(value, padcfg0); - + intel_gpio_set_gpio_mode(padcfg0); /* Disable TX buffer and enable RX (this will be input) */ __intel_gpio_set_direction(padcfg0, true); @@ -818,7 +821,7 @@ static void intel_gpio_irq_ack(struct irq_data *d) community = intel_get_community(pctrl, pin); if (community) { const struct intel_padgroup *padgrp; - unsigned gpp, gpp_offset; + unsigned gpp, gpp_offset, is_offset; padgrp = intel_community_get_padgroup(community, pin); if (!padgrp) @@ -826,9 +829,10 @@ static void intel_gpio_irq_ack(struct irq_data *d) gpp = padgrp->reg_num; gpp_offset = padgroup_offset(padgrp, pin); + is_offset = community->is_offset + gpp * 4; raw_spin_lock(&pctrl->lock); - writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); + writel(BIT(gpp_offset), community->regs + is_offset); raw_spin_unlock(&pctrl->lock); } } @@ -843,7 +847,7 @@ static void intel_gpio_irq_enable(struct irq_data *d) community = intel_get_community(pctrl, pin); if (community) { const struct intel_padgroup *padgrp; - unsigned gpp, gpp_offset; + unsigned gpp, gpp_offset, is_offset; unsigned long flags; u32 value; @@ -853,10 +857,11 @@ static void intel_gpio_irq_enable(struct irq_data *d) gpp = padgrp->reg_num; gpp_offset = padgroup_offset(padgrp, pin); + is_offset = community->is_offset + gpp * 4; raw_spin_lock_irqsave(&pctrl->lock, flags); /* Clear interrupt status first to avoid unexpected interrupt */ - writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); + writel(BIT(gpp_offset), community->regs + is_offset); value = readl(community->regs + community->ie_offset + gpp * 4); value |= BIT(gpp_offset); @@ -935,6 +940,8 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type) raw_spin_lock_irqsave(&pctrl->lock, flags); + intel_gpio_set_gpio_mode(reg); + value = readl(reg); value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); @@ -991,7 +998,8 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, const struct intel_padgroup *padgrp = &community->gpps[gpp]; unsigned long pending, enabled, gpp_offset; - pending = readl(community->regs + GPI_IS + padgrp->reg_num * 4); + pending = readl(community->regs + community->is_offset + + padgrp->reg_num * 4); enabled = readl(community->regs + community->ie_offset + padgrp->reg_num * 4); @@ -1241,6 +1249,9 @@ int intel_pinctrl_probe(struct platform_device *pdev, community->regs = regs; community->pad_regs = regs + padbar; + if (!community->is_offset) + community->is_offset = GPI_IS; + ret = intel_pinctrl_add_padgroups(pctrl, community); if (ret) return ret; @@ -1356,7 +1367,7 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) for (gpp = 0; gpp < community->ngpps; gpp++) { /* Mask and clear all interrupts */ writel(0, base + community->ie_offset + gpp * 4); - writel(0xffff, base + GPI_IS + gpp * 4); + writel(0xffff, base + community->is_offset + gpp * 4); } } } diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index 7fdb07753c2d..13b0bd6eb2a2 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -73,6 +73,8 @@ struct intel_padgroup { * @hostown_offset: Register offset of HOSTSW_OWN from @regs. If %0 then it * is assumed that the host owns the pin (rather than * ACPI). + * @is_offset: Register offset of GPI_IS from @regs. If %0 then uses the + * default (%0x100). * @ie_offset: Register offset of GPI_IE from @regs. * @pin_base: Starting pin of pins in this community * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK, @@ -98,6 +100,7 @@ struct intel_community { unsigned padown_offset; unsigned padcfglock_offset; unsigned hostown_offset; + unsigned is_offset; unsigned ie_offset; unsigned pin_base; unsigned gpp_size; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 71b944748304..c5fe7d4a9065 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip, { struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); unsigned int reg = OUTPUT_EN; - unsigned int mask; + unsigned int mask, val, ret; armada_37xx_update_reg(®, offset); mask = BIT(offset); - return regmap_update_bits(info->regmap, reg, mask, mask); + ret = regmap_update_bits(info->regmap, reg, mask, mask); + + if (ret) + return ret; + + reg = OUTPUT_VAL; + val = value ? mask : 0; + regmap_update_bits(info->regmap, reg, mask, val); + + return 0; } static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 9c950bbf07ba..447763aad815 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -891,16 +891,16 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, goto fail; } - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); - if (ret < 0) - goto fail; - if (mcp->irq && mcp->irq_controller) { ret = mcp23s08_irq_setup(mcp); if (ret) goto fail; } + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); + if (ret < 0) + goto fail; + mcp->pinctrl_desc.name = "mcp23xxx-pinctrl"; mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops; mcp->pinctrl_desc.confops = &mcp_pinconf_ops; diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index b5cb7858ffdc..a9bc1e01f982 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -1989,8 +1989,16 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset) { struct rockchip_pin_bank *bank = gpiochip_get_data(chip); u32 data; + int ret; + ret = clk_enable(bank->clk); + if (ret < 0) { + dev_err(bank->drvdata->dev, + "failed to enable clock for bank %s\n", bank->name); + return ret; + } data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); + clk_disable(bank->clk); return !(data & BIT(offset)); } diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index 7450f5118445..70a0228f4e7f 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1144,6 +1144,27 @@ static int sx150x_probe(struct i2c_client *client, if (ret) return ret; + /* Pinctrl_desc */ + pctl->pinctrl_desc.name = "sx150x-pinctrl"; + pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; + pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; + pctl->pinctrl_desc.pins = pctl->data->pins; + pctl->pinctrl_desc.npins = pctl->data->npins; + pctl->pinctrl_desc.owner = THIS_MODULE; + + ret = devm_pinctrl_register_and_init(dev, &pctl->pinctrl_desc, + pctl, &pctl->pctldev); + if (ret) { + dev_err(dev, "Failed to register pinctrl device\n"); + return ret; + } + + ret = pinctrl_enable(pctl->pctldev); + if (ret) { + dev_err(dev, "Failed to enable pinctrl device\n"); + return ret; + } + /* Register GPIO controller */ pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL); pctl->gpio.base = -1; @@ -1172,6 +1193,11 @@ static int sx150x_probe(struct i2c_client *client, if (ret) return ret; + ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev), + 0, 0, pctl->data->npins); + if (ret) + return ret; + /* Add Interrupt support if an irq is specified */ if (client->irq > 0) { pctl->irq_chip.name = devm_kstrdup(dev, client->name, @@ -1217,20 +1243,6 @@ static int sx150x_probe(struct i2c_client *client, client->irq); } - /* Pinctrl_desc */ - pctl->pinctrl_desc.name = "sx150x-pinctrl"; - pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; - pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; - pctl->pinctrl_desc.pins = pctl->data->pins; - pctl->pinctrl_desc.npins = pctl->data->npins; - pctl->pinctrl_desc.owner = THIS_MODULE; - - pctl->pctldev = pinctrl_register(&pctl->pinctrl_desc, dev, pctl); - if (IS_ERR(pctl->pctldev)) { - dev_err(dev, "Failed to register pinctrl device\n"); - return PTR_ERR(pctl->pctldev); - } - return 0; } diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c index 866aa3ce1ac9..6cf0006d4c8d 100644 --- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c +++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c @@ -436,3 +436,7 @@ int pxa2xx_pinctrl_exit(struct platform_device *pdev) return 0; } EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_exit); + +MODULE_AUTHOR("Robert Jarzmik "); +MODULE_DESCRIPTION("Marvell PXA2xx pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c index 071084d3ee9c..92aeea174a56 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c @@ -129,7 +129,7 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = { EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c), }; -const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = s5pv210_pin_bank, @@ -142,6 +142,11 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data s5pv210_of_data __initconst = { + .ctrl = s5pv210_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s5pv210_pin_ctrl), +}; + /* Pad retention control code for accessing PMU regmap */ static atomic_t exynos_shared_retention_refcnt; @@ -204,7 +209,7 @@ static const struct samsung_retention_data exynos3250_retention_data __initconst * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes * two gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos3250_pin_banks0, @@ -225,6 +230,11 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = { + .ctrl = exynos3250_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos3250_pin_ctrl), +}; + /* pin banks of exynos4210 pin-controller 0 */ static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -308,7 +318,7 @@ static const struct samsung_retention_data exynos4_audio_retention_data __initco * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes * three gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos4210_pin_banks0, @@ -334,6 +344,11 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = { + .ctrl = exynos4210_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos4210_pin_ctrl), +}; + /* pin banks of exynos4x12 pin-controller 0 */ static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -396,7 +411,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes * four gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos4x12_pin_banks0, @@ -432,6 +447,11 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = { + .ctrl = exynos4x12_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos4x12_pin_ctrl), +}; + /* pin banks of exynos5250 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -492,7 +512,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes * four gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5250_pin_banks0, @@ -528,6 +548,11 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = { + .ctrl = exynos5250_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5250_pin_ctrl), +}; + /* pin banks of exynos5260 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), @@ -572,7 +597,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes * three gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5260_pin_banks0, @@ -592,6 +617,11 @@ const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = { + .ctrl = exynos5260_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5260_pin_ctrl), +}; + /* pin banks of exynos5410 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -662,7 +692,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes * four gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5410_pin_banks0, @@ -695,6 +725,11 @@ const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = { + .ctrl = exynos5410_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5410_pin_ctrl), +}; + /* pin banks of exynos5420 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), @@ -779,7 +814,7 @@ static const struct samsung_retention_data exynos5420_retention_data __initconst * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes * four gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5420_pin_banks0, @@ -813,3 +848,8 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { .retention_data = &exynos4_audio_retention_data, }, }; + +const struct samsung_pinctrl_of_match_data exynos5420_of_data __initconst = { + .ctrl = exynos5420_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5420_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index 08e9fdb58fd2..0ab88fc268ea 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c @@ -180,7 +180,7 @@ static const struct samsung_retention_data exynos5433_fsys_retention_data __init * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes * ten gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5433_pin_banks0, @@ -265,6 +265,11 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = { + .ctrl = exynos5433_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5433_pin_ctrl), +}; + /* pin banks of exynos7 pin-controller - ALIVE */ static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), @@ -344,7 +349,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = { EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), }; -const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { { /* pin-controller instance 0 Alive data */ .pin_banks = exynos7_pin_banks0, @@ -397,3 +402,8 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { .eint_gpio_init = exynos_eint_gpio_init, }, }; + +const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = { + .ctrl = exynos7_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index edf27264b603..67da1cf18b68 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -570,7 +570,7 @@ static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = { PIN_BANK_2BIT(13, 0x080, "gpj"), }; -const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { { .pin_banks = s3c2412_pin_banks, .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), @@ -578,6 +578,11 @@ const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data s3c2412_of_data __initconst = { + .ctrl = s3c2412_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c2412_pin_ctrl), +}; + static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { PIN_BANK_A(27, 0x000, "gpa"), PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -592,7 +597,7 @@ static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { PIN_BANK_2BIT(2, 0x100, "gpm"), }; -const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { { .pin_banks = s3c2416_pin_banks, .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), @@ -600,6 +605,11 @@ const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data s3c2416_of_data __initconst = { + .ctrl = s3c2416_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c2416_pin_ctrl), +}; + static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { PIN_BANK_A(25, 0x000, "gpa"), PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -612,7 +622,7 @@ static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { PIN_BANK_2BIT(13, 0x0d0, "gpj"), }; -const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { { .pin_banks = s3c2440_pin_banks, .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), @@ -620,6 +630,11 @@ const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data s3c2440_of_data __initconst = { + .ctrl = s3c2440_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c2440_pin_ctrl), +}; + static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { PIN_BANK_A(28, 0x000, "gpa"), PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -635,10 +650,15 @@ static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { PIN_BANK_2BIT(2, 0x100, "gpm"), }; -const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { { .pin_banks = s3c2450_pin_banks, .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), .eint_wkup_init = s3c24xx_eint_init, }, }; + +const struct samsung_pinctrl_of_match_data s3c2450_of_data __initconst = { + .ctrl = s3c2450_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c2450_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index e63663b32907..0bdc1e683181 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -794,7 +794,7 @@ static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = { * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes * one gpio/pin-mux/pinconfig controller. */ -const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { { /* pin-controller instance 1 data */ .pin_banks = s3c64xx_pin_banks0, @@ -803,3 +803,8 @@ const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { .eint_wkup_init = s3c64xx_eint_eint0_init, }, }; + +const struct samsung_pinctrl_of_match_data s3c64xx_of_data __initconst = { + .ctrl = s3c64xx_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c64xx_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index e04f7fe0a65d..26e8fab736f1 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -947,12 +947,33 @@ static int samsung_gpiolib_register(struct platform_device *pdev, return 0; } +static const struct samsung_pin_ctrl * +samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct samsung_pinctrl_of_match_data *of_data; + int id; + + id = of_alias_get_id(node, "pinctrl"); + if (id < 0) { + dev_err(&pdev->dev, "failed to get alias id\n"); + return NULL; + } + + of_data = of_device_get_match_data(&pdev->dev); + if (id >= of_data->num_ctrl) { + dev_err(&pdev->dev, "invalid alias id %d\n", id); + return NULL; + } + + return &(of_data->ctrl[id]); +} + /* retrieve the soc specific data */ static const struct samsung_pin_ctrl * samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, struct platform_device *pdev) { - int id; struct device_node *node = pdev->dev.of_node; struct device_node *np; const struct samsung_pin_bank_data *bdata; @@ -962,13 +983,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES]; unsigned int i; - id = of_alias_get_id(node, "pinctrl"); - if (id < 0) { - dev_err(&pdev->dev, "failed to get alias id\n"); + ctrl = samsung_pinctrl_get_soc_data_for_of_alias(pdev); + if (!ctrl) return ERR_PTR(-ENOENT); - } - ctrl = of_device_get_match_data(&pdev->dev); - ctrl += id; d->suspend = ctrl->suspend; d->resume = ctrl->resume; @@ -1193,41 +1210,41 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev) static const struct of_device_id samsung_pinctrl_dt_match[] = { #ifdef CONFIG_PINCTRL_EXYNOS_ARM { .compatible = "samsung,exynos3250-pinctrl", - .data = exynos3250_pin_ctrl }, + .data = &exynos3250_of_data }, { .compatible = "samsung,exynos4210-pinctrl", - .data = exynos4210_pin_ctrl }, + .data = &exynos4210_of_data }, { .compatible = "samsung,exynos4x12-pinctrl", - .data = exynos4x12_pin_ctrl }, + .data = &exynos4x12_of_data }, { .compatible = "samsung,exynos5250-pinctrl", - .data = exynos5250_pin_ctrl }, + .data = &exynos5250_of_data }, { .compatible = "samsung,exynos5260-pinctrl", - .data = exynos5260_pin_ctrl }, + .data = &exynos5260_of_data }, { .compatible = "samsung,exynos5410-pinctrl", - .data = exynos5410_pin_ctrl }, + .data = &exynos5410_of_data }, { .compatible = "samsung,exynos5420-pinctrl", - .data = exynos5420_pin_ctrl }, + .data = &exynos5420_of_data }, { .compatible = "samsung,s5pv210-pinctrl", - .data = s5pv210_pin_ctrl }, + .data = &s5pv210_of_data }, #endif #ifdef CONFIG_PINCTRL_EXYNOS_ARM64 { .compatible = "samsung,exynos5433-pinctrl", - .data = exynos5433_pin_ctrl }, + .data = &exynos5433_of_data }, { .compatible = "samsung,exynos7-pinctrl", - .data = exynos7_pin_ctrl }, + .data = &exynos7_of_data }, #endif #ifdef CONFIG_PINCTRL_S3C64XX { .compatible = "samsung,s3c64xx-pinctrl", - .data = s3c64xx_pin_ctrl }, + .data = &s3c64xx_of_data }, #endif #ifdef CONFIG_PINCTRL_S3C24XX { .compatible = "samsung,s3c2412-pinctrl", - .data = s3c2412_pin_ctrl }, + .data = &s3c2412_of_data }, { .compatible = "samsung,s3c2416-pinctrl", - .data = s3c2416_pin_ctrl }, + .data = &s3c2416_of_data }, { .compatible = "samsung,s3c2440-pinctrl", - .data = s3c2440_pin_ctrl }, + .data = &s3c2440_of_data }, { .compatible = "samsung,s3c2450-pinctrl", - .data = s3c2450_pin_ctrl }, + .data = &s3c2450_of_data }, #endif {}, }; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index 9af07af6cad6..ae932e0c05f2 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -285,6 +285,16 @@ struct samsung_pinctrl_drv_data { void (*resume)(struct samsung_pinctrl_drv_data *); }; +/** + * struct samsung_pinctrl_of_match_data: OF match device specific configuration data. + * @ctrl: array of pin controller data. + * @num_ctrl: size of array @ctrl. + */ +struct samsung_pinctrl_of_match_data { + const struct samsung_pin_ctrl *ctrl; + unsigned int num_ctrl; +}; + /** * struct samsung_pin_group: represent group of pins of a pinmux function. * @name: name of the pin group, used to lookup the group. @@ -313,20 +323,20 @@ struct samsung_pmx_func { }; /* list of all exported SoC specific data */ -extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos7_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[]; -extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[]; +extern const struct samsung_pinctrl_of_match_data exynos3250_of_data; +extern const struct samsung_pinctrl_of_match_data exynos4210_of_data; +extern const struct samsung_pinctrl_of_match_data exynos4x12_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5250_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5260_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5410_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5420_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5433_of_data; +extern const struct samsung_pinctrl_of_match_data exynos7_of_data; +extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2412_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2416_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2440_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2450_of_data; +extern const struct samsung_pinctrl_of_match_data s5pv210_of_data; #endif /* __PINCTRL_SAMSUNG_H */ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 10bd35f8c894..c01ef02d326b 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -4826,6 +4826,10 @@ static const char * const can0_groups[] = { "can0_data_d", "can0_data_e", "can0_data_f", + /* + * Retained for backwards compatibility, use can_clk_groups in new + * designs. + */ "can_clk", "can_clk_b", "can_clk_c", @@ -4837,6 +4841,21 @@ static const char * const can1_groups[] = { "can1_data_b", "can1_data_c", "can1_data_d", + /* + * Retained for backwards compatibility, use can_clk_groups in new + * designs. + */ + "can_clk", + "can_clk_b", + "can_clk_c", + "can_clk_d", +}; + +/* + * can_clk_groups allows for independent configuration, use can_clk function + * in new designs. + */ +static const char * const can_clk_groups[] = { "can_clk", "can_clk_b", "can_clk_c", @@ -5308,7 +5327,7 @@ static const char * const vin2_groups[] = { }; static const struct { - struct sh_pfc_function common[56]; + struct sh_pfc_function common[57]; struct sh_pfc_function r8a779x[2]; } pinmux_functions = { .common = { @@ -5316,6 +5335,7 @@ static const struct { SH_PFC_FUNCTION(avb), SH_PFC_FUNCTION(can0), SH_PFC_FUNCTION(can1), + SH_PFC_FUNCTION(can_clk), SH_PFC_FUNCTION(du), SH_PFC_FUNCTION(du0), SH_PFC_FUNCTION(du1), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c index 95fd0994893a..ad037534aa13 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c @@ -1397,7 +1397,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP16_27_24, AUDIO_CLKOUT_B, SEL_ADG_1), PINMUX_IPSR_MSEL(IP16_27_24, SSI_SCK2_B, SEL_SSI_1), PINMUX_IPSR_MSEL(IP16_27_24, TS_SDEN1_D, SEL_TSIF1_3), - PINMUX_IPSR_MSEL(IP16_27_24, STP_ISEN_1_D, SEL_SSP1_1_2), + PINMUX_IPSR_MSEL(IP16_27_24, STP_ISEN_1_D, SEL_SSP1_1_3), PINMUX_IPSR_MSEL(IP16_27_24, STP_OPWM_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D0_B, SEL_DRIF3_1), PINMUX_IPSR_MSEL(IP16_27_24, TCLK2_B, SEL_TIMER_TMU_1), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c index 4f2a726bbaeb..f5f77432ce6f 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c @@ -428,7 +428,7 @@ static const struct sunxi_desc_pin a64_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */ - SUNXI_FUNCTION(0x4, "uart0")), /* RX */ + SUNXI_FUNCTION(0x3, "uart0")), /* RX */ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c index bc14e954d7a2..b7ca9a40cc66 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c @@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */ - SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */ SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */ - SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */ SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */ - SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0), diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 8dfa7fcb1248..e7bbdf947bbc 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -60,12 +60,14 @@ static int send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) { int ret; + int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg); if (ec_dev->proto_version > 2) - ret = ec_dev->pkt_xfer(ec_dev, msg); + xfer_fxn = ec_dev->pkt_xfer; else - ret = ec_dev->cmd_xfer(ec_dev, msg); + xfer_fxn = ec_dev->cmd_xfer; + ret = (*xfer_fxn)(ec_dev, msg); if (msg->result == EC_RES_IN_PROGRESS) { int i; struct cros_ec_command *status_msg; @@ -88,7 +90,7 @@ static int send_command(struct cros_ec_device *ec_dev, for (i = 0; i < EC_COMMAND_RETRIES; i++) { usleep_range(10000, 11000); - ret = ec_dev->cmd_xfer(ec_dev, status_msg); + ret = (*xfer_fxn)(ec_dev, status_msg); if (ret < 0) break; diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c index f3baf9973989..24f1630a8b3f 100644 --- a/drivers/platform/chrome/cros_ec_sysfs.c +++ b/drivers/platform/chrome/cros_ec_sysfs.c @@ -187,7 +187,7 @@ static ssize_t show_ec_version(struct device *dev, count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: EC error %d\n", msg->result); else { - msg->data[sizeof(msg->data) - 1] = '\0'; + msg->data[EC_HOST_PARAM_SIZE - 1] = '\0'; count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: %s\n", msg->data); } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 80b87954f6dd..c853d1a48271 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1139,6 +1139,23 @@ config SILEAD_DMI with the OS-image for the device. This option supplies the missing information. Enable this for x86 tablets with Silead touchscreens. +config INTEL_PSTORE_PRAM + tristate "Intel pstore RAM backend driver (PRAM BIOS feature)" + depends on ACPI + depends on PSTORE_RAM + ---help--- + This driver provides RAM backend for pstore, managed by BIOS + as PRAM (Persisted RAM buffer) debug feature. + + PRAM BIOS feature is configurable through BIOS setup or PRAM_Conf + EFI variable (GUID ecb54cd9-e5ae-4fdc-a971-e877756068f7). + Accepted values for variable are 0, 1, 2 and 3 as ASCII + string; will configure PRAM feature respectively as + Disabled, 4 MB, 16 MB and 64 MB. + + Safe to say Y, will not bind if your BIOS doesn't support + this feature. + endif # X86_PLATFORM_DEVICES config PMC_ATOM diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index f9e3ae683bbe..9e056fe7d6c0 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -84,3 +84,4 @@ obj-$(CONFIG_PMC_ATOM) += pmc_atom.o obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_MLX_CPLD_PLATFORM) += mlxcpld-hotplug.o obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o +obj-$(CONFIG_INTEL_PSTORE_PRAM) += intel_pstore_pram.o diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 623d322447a2..7c4eb86c851e 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -54,7 +53,6 @@ struct apple_gmux_data { bool indexed; struct mutex index_lock; - struct pci_dev *pdev; struct backlight_device *bdev; /* switcheroo data */ @@ -599,23 +597,6 @@ static int gmux_resume(struct device *dev) return 0; } -static struct pci_dev *gmux_get_io_pdev(void) -{ - struct pci_dev *pdev = NULL; - - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { - u16 cmd; - - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - if (!(cmd & PCI_COMMAND_IO)) - continue; - - return pdev; - } - - return NULL; -} - static int is_thunderbolt(struct device *dev, void *data) { return to_pci_dev(dev)->is_thunderbolt; @@ -631,7 +612,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) int ret = -ENXIO; acpi_status status; unsigned long long gpe; - struct pci_dev *pdev = NULL; if (apple_gmux_data) return -EBUSY; @@ -682,7 +662,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) ver_minor = (version >> 16) & 0xff; ver_release = (version >> 8) & 0xff; } else { - pr_info("gmux device not present or IO disabled\n"); + pr_info("gmux device not present\n"); ret = -ENODEV; goto err_release; } @@ -690,23 +670,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor, ver_release, (gmux_data->indexed ? "indexed" : "classic")); - /* - * Apple systems with gmux are EFI based and normally don't use - * VGA. In addition changing IO+MEM ownership between IGP and dGPU - * disables IO/MEM used for backlight control on some systems. - * Lock IO+MEM to GPU with active IO to prevent switch. - */ - pdev = gmux_get_io_pdev(); - if (pdev && vga_tryget(pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) { - pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n", - pci_name(pdev)); - ret = -EBUSY; - goto err_release; - } else if (pdev) - pr_info("locked IO for PCI:%s\n", pci_name(pdev)); - gmux_data->pdev = pdev; - memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); @@ -822,10 +785,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) err_notify: backlight_device_unregister(bdev); err_release: - if (gmux_data->pdev) - vga_put(gmux_data->pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM); - pci_dev_put(pdev); release_region(gmux_data->iostart, gmux_data->iolen); err_free: kfree(gmux_data); @@ -845,11 +804,6 @@ static void gmux_remove(struct pnp_dev *pnp) &gmux_notify_handler); } - if (gmux_data->pdev) { - vga_put(gmux_data->pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM); - pci_dev_put(gmux_data->pdev); - } backlight_device_unregister(gmux_data->bdev); release_region(gmux_data->iostart, gmux_data->iolen); diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c index f3796164329e..d4aeac3477f5 100644 --- a/drivers/platform/x86/asus-wireless.c +++ b/drivers/platform/x86/asus-wireless.c @@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event) return; } input_report_key(data->idev, KEY_RFKILL, 1); + input_sync(data->idev); input_report_key(data->idev, KEY_RFKILL, 0); input_sync(data->idev); } diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index f42159fd2031..7424e53157b0 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -49,6 +49,7 @@ struct quirk_entry { u8 touchpad_led; + u8 kbd_led_levels_off_1; int needs_kbd_timeouts; /* @@ -79,6 +80,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = { .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, }; +static struct quirk_entry quirk_dell_latitude_e6410 = { + .kbd_led_levels_off_1 = 1, +}; + static struct platform_driver platform_driver = { .driver = { .name = "dell-laptop", @@ -280,6 +285,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = { }, .driver_data = &quirk_dell_xps13_9333, }, + { + .callback = dmi_matched, + .ident = "Dell Latitude E6410", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"), + }, + .driver_data = &quirk_dell_latitude_e6410, + }, { } }; @@ -1200,6 +1214,9 @@ static int kbd_get_info(struct kbd_info *info) units = (buffer->output[2] >> 8) & 0xFF; info->levels = (buffer->output[2] >> 16) & 0xFF; + if (quirks && quirks->kbd_led_levels_off_1 && info->levels) + info->levels--; + if (units & BIT(0)) info->seconds = (buffer->output[3] >> 0) & 0xFF; if (units & BIT(1)) diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index b4ed3dc983d5..b4224389febe 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask) if (state < 0) return state; - return state & 0x1; + return !!(state & mask); } static int __init hp_wmi_bios_2008_later(void) diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 493d8910a74e..7b12abe86b94 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = { AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd), + AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted), AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), diff --git a/drivers/platform/x86/intel_pstore_pram.c b/drivers/platform/x86/intel_pstore_pram.c new file mode 100644 index 000000000000..8c8b1291545f --- /dev/null +++ b/drivers/platform/x86/intel_pstore_pram.c @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#define SZ_4K 0x00001000 +#define SZ_2M 0x00200000 + +/* PRAM stands for 'Persisted RAM' from BIOS point of view */ +#define ACPI_SIG_PRAM "PRAM" + +/* + * Following parameters match to those defined in fs/pstore/ram.c in + * order to keep campatibility between driver intefaces, please refer + * to it for implementaion details. + */ +static ulong pram_record_size = SZ_4K; +module_param_named(record_size, pram_record_size, ulong, 0400); +MODULE_PARM_DESC(record_size, "size of each dump done on oops/panic"); + +static ulong pram_console_size = SZ_2M; +module_param_named(console_size, pram_console_size, ulong, 0400); +MODULE_PARM_DESC(console_size, "size of kernel console log"); + +static ulong pram_ftrace_size = 2*SZ_4K; +module_param_named(ftrace_size, pram_ftrace_size, ulong, 0400); +MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); + +static int pram_dump_oops = 1; +module_param_named(dump_oops, pram_dump_oops, int, 0600); +MODULE_PARM_DESC(dump_oops, + "set to 1 to dump oopses, 0 to only dump panics (default 1)"); + +static int pram_ecc; +module_param_named(ecc, pram_ecc, int, 0600); +MODULE_PARM_DESC(ecc, + "if non-zero, the option enables SW ECC support, provided by" + "fs/pstore/ram_core.c, and specifies ECC buffer size in bytes" + "(1 is a special value, means 16 bytes ECC)"); + +static struct ramoops_platform_data *pram_data; +static struct platform_device *pram_dev; + +struct acpi_table_pram { + struct acpi_table_header header; + u64 addr; + u32 size; +} __packed; + +static int register_pram_dev(unsigned long mem_address, + unsigned long mem_size) +{ + pram_data = kzalloc(sizeof(*pram_data), GFP_KERNEL); + if (!pram_data) { + pr_err("could not allocate pram_data\n"); + return -ENOMEM; + } + + pram_data->mem_address = mem_address; + pram_data->mem_size = mem_size; + pram_data->record_size = pram_record_size; + pram_data->console_size = pram_console_size; + pram_data->ftrace_size = pram_ftrace_size; + pram_data->dump_oops = pram_dump_oops; + /* + * For backwards compatibility with previous + * fs/pstore/ram_core.c implementation, + * intel_pstore_pram.ecc=1 means 16 bytes ECC. + */ + pram_data->ecc_info.ecc_size = pram_ecc == 1 ? 16 : pram_ecc; + + pram_dev = platform_device_register_data(NULL, "ramoops", -1, + pram_data, sizeof(struct ramoops_platform_data)); + if (IS_ERR(pram_dev)) { + pr_err("could not create platform device: %ld\n", + PTR_ERR(pram_dev)); + kfree(pram_data); + return PTR_ERR(pram_dev); + } + + pr_info("registered pram device, addr=0x%lx, size=0x%lx\n", + (unsigned long)pram_data->mem_address, (unsigned long)pram_data->mem_size); + + return 0; +} + +static int __init intel_pram_init(void) +{ + acpi_status status; + struct acpi_table_pram *pramt; + + status = acpi_get_table(ACPI_SIG_PRAM, 0, + (struct acpi_table_header **)&pramt); + if (status == AE_NOT_FOUND) { + pr_debug("PRAM table not found\n"); + return -ENODEV; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err("Failed to get PRAM table: %s\n", msg); + return -EINVAL; + } + + if (!pramt->addr || !pramt->size) { + pr_debug("PRAM: bad address (0x%llx) or size (0x%lx)\n", + (unsigned long long)pramt->addr, + (unsigned long)pramt->size); + return -ENODEV; + } + + return register_pram_dev(pramt->addr, pramt->size); +} +postcore_initcall(intel_pram_init); + +static void __exit intel_pram_exit(void) +{ + platform_device_unregister(pram_dev); + kfree(pram_data); +} +module_exit(intel_pram_exit); diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c index a47a41fc10ad..b5b890127479 100644 --- a/drivers/platform/x86/intel_punit_ipc.c +++ b/drivers/platform/x86/intel_punit_ipc.c @@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev) * - GTDRIVER_IPC BASE_IFACE */ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 3); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 4); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 5); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c index bc98ef95514a..2da48ecc90c1 100644 --- a/drivers/platform/x86/peaq-wmi.c +++ b/drivers/platform/x86/peaq-wmi.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -64,8 +65,23 @@ static void peaq_wmi_poll(struct input_polled_dev *dev) } } +/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */ +static const struct dmi_system_id peaq_dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"), + DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"), + }, + }, + {} +}; + static int __init peaq_wmi_init(void) { + /* WMI GUID is not unique, also check for a DMI match */ + if (!dmi_check_system(peaq_dmi_table)) + return -ENODEV; + if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) return -ENODEV; @@ -86,6 +102,9 @@ static int __init peaq_wmi_init(void) static void __exit peaq_wmi_exit(void) { + if (!dmi_check_system(peaq_dmi_table)) + return; + if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) return; diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 0765b1797d4c..7f8fa42a1084 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -1268,5 +1268,5 @@ static void __exit acpi_wmi_exit(void) bus_unregister(&wmi_bus_type); } -subsys_initcall(acpi_wmi_init); +subsys_initcall_sync(acpi_wmi_init); module_exit(acpi_wmi_exit); diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c index 7549c7f74a3c..c03e96e6a041 100644 --- a/drivers/power/reset/zx-reboot.c +++ b/drivers/power/reset/zx-reboot.c @@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = { }, }; module_platform_driver(zx_reboot_driver); + +MODULE_DESCRIPTION("ZTE SoCs reset driver"); +MODULE_AUTHOR("Jun Nie "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 4ebbcce45c48..5a76c6d343de 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -3218,11 +3218,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di) } /* Enable backup battery charging */ - abx500_mask_and_set_register_interruptible(di->dev, + ret = abx500_mask_and_set_register_interruptible(di->dev, AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, RTC_BUP_CH_ENA); - if (ret < 0) + if (ret < 0) { dev_err(di->dev, "%s mask and set failed\n", __func__); + goto out; + } if (is_ab8540(di->parent)) { ret = abx500_mask_and_set_register_interruptible(di->dev, diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index d51ebd1da65e..9dc7590e07cb 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -785,6 +785,14 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) return 0; } +static void axp288_charger_cancel_work(void *data) +{ + struct axp288_chrg_info *info = data; + + cancel_work_sync(&info->otg.work); + cancel_work_sync(&info->cable.work); +} + static int axp288_charger_probe(struct platform_device *pdev) { int ret, i, pirq; @@ -836,6 +844,11 @@ static int axp288_charger_probe(struct platform_device *pdev) return ret; } + /* Cancel our work on cleanup, register this before the notifiers */ + ret = devm_add_action(dev, axp288_charger_cancel_work, info); + if (ret) + return ret; + /* Register for extcon notification */ INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker); info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt; diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 8e2c41ded171..8eda2a6b42b8 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,35 @@ #define BQ25890_IRQ_PIN "bq25890_irq" #define BQ25890_ID 3 +#define BQ25892_ID 0 + +/* pmic charger info */ + +#define PMIC_CHG_DEF_CC 2600000 /* in uA */ +#define PMIC_CHG_DEF_IPRECHG 256000 /* in uA */ +#define PMIC_CHG_DEF_ITERM 128000 /* in uA */ +#define PMIC_CHG_DEF_CV 4350000 /* in uV */ +#define PMIC_CHG_DEF_VSYSMIN 3500000 /* in uV */ +#define PMIC_CHG_DEF_BOOSTV 4700000 /* in uV */ +#define PMIC_CHG_DEF_BOOSTI 500000 /* in uA */ +#define PMIC_CHG_MAX_CC 2600000 /* in uA */ +#define PMIC_CHG_MAX_CV 4350000 /* in uV */ +#define PMIC_CHG_MAX_TEMP 100 /* in DegC */ +#define PMIC_CHG_MIN_TEMP 0 /* in DegC */ + +static struct bq25890_platform_data charger_drvdata = { + .def_cc = PMIC_CHG_DEF_CC, + .def_cv = PMIC_CHG_DEF_CV, + .iterm = PMIC_CHG_DEF_ITERM, + .iprechg = PMIC_CHG_DEF_IPRECHG, + .sysvmin = PMIC_CHG_DEF_VSYSMIN, + .boostv = PMIC_CHG_DEF_BOOSTV, + .boosti = PMIC_CHG_DEF_BOOSTI, + .max_cc = PMIC_CHG_MAX_CC, + .max_cv = PMIC_CHG_MAX_CV, + .min_temp = PMIC_CHG_MIN_TEMP, + .max_temp = PMIC_CHG_MAX_TEMP, +}; enum bq25890_fields { F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */ @@ -88,6 +118,7 @@ struct bq25890_state { struct bq25890_device { struct i2c_client *client; + struct bq25890_platform_data *pdata; struct device *dev; struct power_supply *charger; @@ -104,6 +135,11 @@ struct bq25890_device { struct bq25890_state state; struct mutex lock; /* protect state data */ + + int cc; + int cv; + int inlmt; + int chg_cntl_max; }; static const struct regmap_range bq25890_readonly_reg_ranges[] = { @@ -732,6 +768,7 @@ static int bq25890_irq_probe(struct bq25890_device *bq) return gpiod_to_irq(irq); } +#ifdef CONFIG_OF static int bq25890_fw_read_u32_props(struct bq25890_device *bq) { int ret; @@ -791,6 +828,37 @@ static int bq25890_fw_probe(struct bq25890_device *bq) return 0; } +#else +static int bq25890_fw_probe(struct bq25890_device *bq) +{ + struct bq25890_init_data *init = &bq->init_data; + + /* initialize the BXT platform data */ + init->ichg = bq25890_find_idx(bq->pdata->def_cc, TBL_ICHG); + init->vreg = bq25890_find_idx(bq->pdata->def_cv, TBL_VREG); + init->iterm = bq25890_find_idx(bq->pdata->iterm, TBL_ITERM); + init->iprechg = bq25890_find_idx(bq->pdata->iprechg, TBL_IPRECHG); + init->sysvmin = bq25890_find_idx(bq->pdata->sysvmin, TBL_SYSVMIN); + init->boostv = bq25890_find_idx(bq->pdata->boostv, TBL_BOOSTV); + init->boosti = bq25890_find_idx(bq->pdata->boosti, TBL_BOOSTI); + init->treg = bq25890_find_idx(bq->pdata->max_temp, TBL_TREG); + init->boostf = bq->pdata->boostf_low; + init->ilim_en = bq->pdata->en_ilim_pin; + + bq->cc = bq->pdata->def_cc; + bq->cv = bq->pdata->def_cv; + + /* + * devide the limit into 100mA steps so that + * total available steps will n + 2 including + * zero and High-Z mode. + */ + bq->chg_cntl_max = (bq->pdata->def_cc / 100000) + 2; + + return 0; +} +#endif + static int bq25890_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -840,12 +908,13 @@ static int bq25890_probe(struct i2c_client *client, return bq->chip_id; } - if (bq->chip_id != BQ25890_ID) { + if ((bq->chip_id != BQ25890_ID) && (bq->chip_id != BQ25892_ID)) { dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id); return -ENODEV; } if (!dev->platform_data) { + bq->pdata = &charger_drvdata; ret = bq25890_fw_probe(bq); if (ret < 0) { dev_err(dev, "Cannot read device properties.\n"); diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 5204f115970f..da589c3decb2 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -121,7 +121,10 @@ static ssize_t power_supply_show_property(struct device *dev, else if (off >= POWER_SUPPLY_PROP_MODEL_NAME) return sprintf(buf, "%s\n", value.strval); - return sprintf(buf, "%d\n", value.intval); + if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT) + return sprintf(buf, "%lld\n", value.int64val); + else + return sprintf(buf, "%d\n", value.intval); } static ssize_t power_supply_store_property(struct device *dev, @@ -245,6 +248,12 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(precharge_current), POWER_SUPPLY_ATTR(charge_term_current), POWER_SUPPLY_ATTR(calibrate), + /* Local extensions */ + POWER_SUPPLY_ATTR(usb_hc), + POWER_SUPPLY_ATTR(usb_otg), + POWER_SUPPLY_ATTR(charge_enabled), + /* Local extensions of type int64_t */ + POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ POWER_SUPPLY_ATTR(model_name), POWER_SUPPLY_ATTR(manufacturer), diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c index e464582a390a..3439f1e902cb 100644 --- a/drivers/pwm/pwm-stmpe.c +++ b/drivers/pwm/pwm-stmpe.c @@ -145,7 +145,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, break; case 2: - offset = STMPE24XX_PWMIC1; + offset = STMPE24XX_PWMIC2; break; default: diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 5beb0c361076..76afe1449cab 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -963,7 +963,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, req->sgt.sgl, req->sgt.nents, dir); if (nents == -EFAULT) { rmcd_error("Failed to map SG list"); - return -EFAULT; + ret = -EFAULT; + goto err_pg; } ret = do_dma_request(req, xfer, sync, nents); diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index 72c8b3e1022b..e0a9c445ed67 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev) * arbitrary timeout. */ ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, - !(val & STM32_VRR), 650, 10000); + val & STM32_VRR, 650, 10000); if (ret) { dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 5dcc9bf1c5bc..e8e12c2b1d0e 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -227,6 +227,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, init_completion(&channel->open_req); init_completion(&channel->open_ack); + init_completion(&channel->intent_req_comp); INIT_LIST_HEAD(&channel->done_intents); INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work); @@ -1616,3 +1617,6 @@ void qcom_glink_native_unregister(struct qcom_glink *glink) device_unregister(glink->dev); } EXPORT_SYMBOL_GPL(qcom_glink_native_unregister); + +MODULE_DESCRIPTION("Qualcomm GLINK driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 8cec9a02c0b8..9eb32ead63db 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -779,7 +779,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) } timerqueue_add(&rtc->timerqueue, &timer->node); - if (!next) { + if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c index 9e336184491c..8ff9dc3fe5bf 100644 --- a/drivers/rtc/rtc-ac100.c +++ b/drivers/rtc/rtc-ac100.c @@ -137,13 +137,15 @@ static unsigned long ac100_clkout_recalc_rate(struct clk_hw *hw, div = (reg >> AC100_CLKOUT_PRE_DIV_SHIFT) & ((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1); prate = divider_recalc_rate(hw, prate, div, - ac100_clkout_prediv, 0); + ac100_clkout_prediv, 0, + AC100_CLKOUT_PRE_DIV_WIDTH); } div = (reg >> AC100_CLKOUT_DIV_SHIFT) & (BIT(AC100_CLKOUT_DIV_WIDTH) - 1); return divider_recalc_rate(hw, prate, div, NULL, - CLK_DIVIDER_POWER_OF_TWO); + CLK_DIVIDER_POWER_OF_TWO, + AC100_CLKOUT_DIV_WIDTH); } static long ac100_clkout_round_rate(struct clk_hw *hw, unsigned long rate, @@ -567,6 +569,12 @@ static int ac100_rtc_probe(struct platform_device *pdev) return chip->irq; } + chip->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(chip->rtc)) + return PTR_ERR(chip->rtc); + + chip->rtc->ops = &ac100_rtc_ops; + ret = devm_request_threaded_irq(&pdev->dev, chip->irq, NULL, ac100_rtc_irq, IRQF_SHARED | IRQF_ONESHOT, @@ -586,17 +594,16 @@ static int ac100_rtc_probe(struct platform_device *pdev) /* clear counter alarm pending interrupts */ regmap_write(chip->regmap, AC100_ALM_INT_STA, AC100_ALM_INT_ENABLE); - chip->rtc = devm_rtc_device_register(&pdev->dev, "rtc-ac100", - &ac100_rtc_ops, THIS_MODULE); - if (IS_ERR(chip->rtc)) { - dev_err(&pdev->dev, "unable to register device\n"); - return PTR_ERR(chip->rtc); - } - ret = ac100_rtc_register_clks(chip); if (ret) return ret; + ret = rtc_register_device(chip->rtc); + if (ret) { + dev_err(&pdev->dev, "unable to register device\n"); + return ret; + } + dev_info(&pdev->dev, "RTC enabled\n"); return 0; diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c index 796ac792a381..6cee61201c30 100644 --- a/drivers/rtc/rtc-brcmstb-waketimer.c +++ b/drivers/rtc/rtc-brcmstb-waketimer.c @@ -253,7 +253,7 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev) ret = devm_request_irq(dev, timer->irq, brcmstb_waketmr_irq, 0, "brcmstb-waketimer", timer); if (ret < 0) - return ret; + goto err_clk; timer->reboot_notifier.notifier_call = brcmstb_waketmr_reboot; register_reboot_notifier(&timer->reboot_notifier); @@ -262,12 +262,21 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev) &brcmstb_waketmr_ops, THIS_MODULE); if (IS_ERR(timer->rtc)) { dev_err(dev, "unable to register device\n"); - unregister_reboot_notifier(&timer->reboot_notifier); - return PTR_ERR(timer->rtc); + ret = PTR_ERR(timer->rtc); + goto err_notifier; } dev_info(dev, "registered, with irq %d\n", timer->irq); + return 0; + +err_notifier: + unregister_reboot_notifier(&timer->reboot_notifier); + +err_clk: + if (timer->clk) + clk_disable_unprepare(timer->clk); + return ret; } diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index f4c070ea8384..c90fba3ed861 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -154,6 +154,8 @@ struct m41t80_data { struct rtc_device *rtc; #ifdef CONFIG_COMMON_CLK struct clk_hw sqw; + unsigned long freq; + unsigned int sqwe; #endif }; @@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume); #ifdef CONFIG_COMMON_CLK #define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw) -static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static unsigned long m41t80_decode_freq(int setting) +{ + return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ : + M41T80_SQW_MAX_FREQ >> setting; +} + +static unsigned long m41t80_get_freq(struct m41t80_data *m41t80) { - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); struct i2c_client *client = m41t80->client; int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ? M41T80_REG_WDAY : M41T80_REG_SQW; int ret = i2c_smbus_read_byte_data(client, reg_sqw); - unsigned long val = M41T80_SQW_MAX_FREQ; if (ret < 0) return 0; + return m41t80_decode_freq(ret >> 4); +} - ret >>= 4; - if (ret == 0) - val = 0; - else if (ret > 1) - val = val / (1 << ret); - - return val; +static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return sqw_to_m41t80_data(hw)->freq; } static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { - int i, freq = M41T80_SQW_MAX_FREQ; - - if (freq <= rate) - return freq; - - for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) { - freq /= 1 << i; - if (freq <= rate) - return freq; - } - - return 0; + if (rate >= M41T80_SQW_MAX_FREQ) + return M41T80_SQW_MAX_FREQ; + if (rate >= M41T80_SQW_MAX_FREQ / 4) + return M41T80_SQW_MAX_FREQ / 4; + if (!rate) + return 0; + return 1 << ilog2(rate); } static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, @@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, M41T80_REG_WDAY : M41T80_REG_SQW; int reg, ret, val = 0; - if (rate) { - if (!is_power_of_2(rate)) - return -EINVAL; - val = ilog2(rate); - if (val == ilog2(M41T80_SQW_MAX_FREQ)) - val = 1; - else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1)) - val = ilog2(M41T80_SQW_MAX_FREQ) - val; - else - return -EINVAL; - } + if (rate >= M41T80_SQW_MAX_FREQ) + val = 1; + else if (rate >= M41T80_SQW_MAX_FREQ / 4) + val = 2; + else if (rate) + val = 15 - ilog2(rate); reg = i2c_smbus_read_byte_data(client, reg_sqw); if (reg < 0) @@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, reg = (reg & 0x0f) | (val << 4); ret = i2c_smbus_write_byte_data(client, reg_sqw, reg); - if (ret < 0) - return ret; - - return -EINVAL; + if (!ret) + m41t80->freq = m41t80_decode_freq(val); + return ret; } static int m41t80_sqw_control(struct clk_hw *hw, bool enable) @@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable) else ret &= ~M41T80_ALMON_SQWE; - return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); + ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); + if (!ret) + m41t80->sqwe = enable; + return ret; } static int m41t80_sqw_prepare(struct clk_hw *hw) @@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw) static int m41t80_sqw_is_prepared(struct clk_hw *hw) { - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); - struct i2c_client *client = m41t80->client; - int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); - - if (ret < 0) - return ret; - - return !!(ret & M41T80_ALMON_SQWE); + return sqw_to_m41t80_data(hw)->sqwe; } static const struct clk_ops m41t80_sqw_ops = { @@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80) init.parent_names = NULL; init.num_parents = 0; m41t80->sqw.init = &init; + m41t80->freq = m41t80_get_freq(m41t80); /* optional override of the clockname */ of_property_read_string(node, "clock-output-names", &init.name); diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index e2a946c0e667..304e891e35fc 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) { long rc = OPAL_BUSY; + int retries = 10; u32 y_m_d; u64 h_m_s_ms; __be32 __y_m_d; @@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); - else + else if (retries-- && (rc == OPAL_HARDWARE + || rc == OPAL_INTERNAL_ERROR)) msleep(10); + else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) + break; } if (rc != OPAL_SUCCESS) @@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) { long rc = OPAL_BUSY; + int retries = 10; u32 y_m_d = 0; u64 h_m_s_ms = 0; @@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) rc = opal_rtc_write(y_m_d, h_m_s_ms); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); - else + else if (retries-- && (rc == OPAL_HARDWARE + || rc == OPAL_INTERNAL_ERROR)) msleep(10); + else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) + break; } return rc == OPAL_SUCCESS ? 0 : -EIO; diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c index 4bcfb88674d3..34aea38ebfa6 100644 --- a/drivers/rtc/rtc-palmas.c +++ b/drivers/rtc/rtc-palmas.c @@ -45,6 +45,42 @@ struct palmas_rtc { /* Total number of RTC registers needed to set time*/ #define PALMAS_NUM_TIME_REGS (PALMAS_YEARS_REG - PALMAS_SECONDS_REG + 1) +/* + * Special bin2bcd mapping to deal with bcd storage of year. + * + * 0-69 -> 0xD0 + * 70-99 (1970 - 1999) -> 0xD0 - 0xF9 (correctly rolls to 0x00) + * 100-199 (2000 - 2099) -> 0x00 - 0x99 (does not roll to 0xA0 :-( ) + * 200-229 (2100 - 2129) -> 0xA0 - 0xC9 (really for completeness) + * 230- -> 0xC9 + * + * Confirmed: the only transition that does not work correctly for this rtc + * clock is the transition from 2099 to 2100, it proceeds to 2000. We will + * accept this issue since the clock retains and transitions the year correctly + * in all other conditions. + */ +static unsigned char year_bin2bcd(int val) +{ + if (val < 70) + return 0xD0; + if (val < 100) + return bin2bcd(val - 20) | 0x80; /* KISS leverage of bin2bcd */ + if (val >= 230) + return 0xC9; + if (val >= 200) + return bin2bcd(val - 180) | 0x80; + return bin2bcd(val - 100); +} + +static int year_bcd2bin(unsigned char val) +{ + if (val >= 0xD0) + return bcd2bin(val & 0x7F) + 20; + if (val >= 0xA0) + return bcd2bin(val & 0x7F) + 180; + return bcd2bin(val) + 100; +} + static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned char rtc_data[PALMAS_NUM_TIME_REGS]; @@ -71,7 +107,7 @@ static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_hour = bcd2bin(rtc_data[2]); tm->tm_mday = bcd2bin(rtc_data[3]); tm->tm_mon = bcd2bin(rtc_data[4]) - 1; - tm->tm_year = bcd2bin(rtc_data[5]) + 100; + tm->tm_year = year_bcd2bin(rtc_data[5]); return ret; } @@ -87,7 +123,7 @@ static int palmas_rtc_set_time(struct device *dev, struct rtc_time *tm) rtc_data[2] = bin2bcd(tm->tm_hour); rtc_data[3] = bin2bcd(tm->tm_mday); rtc_data[4] = bin2bcd(tm->tm_mon + 1); - rtc_data[5] = bin2bcd(tm->tm_year - 100); + rtc_data[5] = year_bin2bcd(tm->tm_year); /* Stop RTC while updating the RTC time registers */ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG, @@ -142,7 +178,7 @@ static int palmas_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->time.tm_hour = bcd2bin(alarm_data[2]); alm->time.tm_mday = bcd2bin(alarm_data[3]); alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1; - alm->time.tm_year = bcd2bin(alarm_data[5]) + 100; + alm->time.tm_year = year_bcd2bin(alarm_data[5]); ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG, &int_val); @@ -173,7 +209,7 @@ static int palmas_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) alarm_data[2] = bin2bcd(alm->time.tm_hour); alarm_data[3] = bin2bcd(alm->time.tm_mday); alarm_data[4] = bin2bcd(alm->time.tm_mon + 1); - alarm_data[5] = bin2bcd(alm->time.tm_year - 100); + alarm_data[5] = year_bin2bcd(alm->time.tm_year); ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE, PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS); diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index cea6ea4df970..8c836c51a508 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c @@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw, return 0; buf &= PCF8563_REG_CLKO_F_MASK; - return clkout_rates[ret]; + return clkout_rates[buf]; } static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index e1687e19c59f..a30f24cb6c83 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -308,7 +308,8 @@ static int pl031_remove(struct amba_device *adev) dev_pm_clear_wake_irq(&adev->dev); device_init_wakeup(&adev->dev, false); - free_irq(adev->irq[0], ldata); + if (adev->irq[0]) + free_irq(adev->irq[0], ldata); rtc_device_unregister(ldata->rtc); iounmap(ldata->base); kfree(ldata); @@ -381,12 +382,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_rtc; } - if (request_irq(adev->irq[0], pl031_interrupt, - vendor->irqflags, "rtc-pl031", ldata)) { - ret = -EIO; - goto out_no_irq; + if (adev->irq[0]) { + ret = request_irq(adev->irq[0], pl031_interrupt, + vendor->irqflags, "rtc-pl031", ldata); + if (ret) + goto out_no_irq; + dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); } - dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); return 0; out_no_irq: diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index c94b606e0df8..ee14d8e45c97 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) erp = dasd_3990_erp_handle_match_erp(cqr, erp); } + + /* + * For path verification work we need to stick with the path that was + * originally chosen so that the per path configuration data is + * assigned correctly. + */ + if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) { + erp->lpm = cqr->lpm; + } + if (device->features & DASD_FEATURE_ERPLOG) { /* print current erp_chain */ dev_err(&device->cdev->dev, diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 8eafcd5fa004..5ede251c52ca 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -530,10 +530,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, pfxdata->validity.define_extent = 1; /* private uid is kept up to date, conf_data may be outdated */ - if (startpriv->uid.type != UA_BASE_DEVICE) { + if (startpriv->uid.type == UA_BASE_PAV_ALIAS) pfxdata->validity.verify_base = 1; - if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) - pfxdata->validity.hyper_pav = 1; + + if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { + pfxdata->validity.verify_base = 1; + pfxdata->validity.hyper_pav = 1; } rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize); @@ -3414,10 +3416,12 @@ static int prepare_itcw(struct itcw *itcw, pfxdata.validity.define_extent = 1; /* private uid is kept up to date, conf_data may be outdated */ - if (startpriv->uid.type != UA_BASE_DEVICE) { + if (startpriv->uid.type == UA_BASE_PAV_ALIAS) + pfxdata.validity.verify_base = 1; + + if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { pfxdata.validity.verify_base = 1; - if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) - pfxdata.validity.hyper_pav = 1; + pfxdata.validity.hyper_pav = 1; } switch (cmd) { diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index a4ad39ba3873..8941e7caaf4d 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, int start, int count, int auto_ack) { - int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; + int rc, tmp_count = count, tmp_start = start, nr = q->nr; unsigned int ccq = 0; qperf_inc(q, eqbs); @@ -149,14 +149,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, qperf_inc(q, eqbs_partial); DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", tmp_count); - /* - * Retry once, if that fails bail out and process the - * extracted buffers before trying again. - */ - if (!retried++) - goto again; - else - return count - tmp_count; + return count - tmp_count; } DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); @@ -212,7 +205,10 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, return 0; } -/* returns number of examined buffers and their common state in *state */ +/* + * Returns number of examined buffers and their common state in *state. + * Requested number of buffers-to-examine must be > 0. + */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, unsigned char *state, unsigned int count, int auto_ack, int merge_pending) @@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, if (is_qebsm(q)) return qdio_do_eqbs(q, state, bufnr, count, auto_ack); - for (i = 0; i < count; i++) { - if (!__state) { - __state = q->slsb.val[bufnr]; - if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) - __state = SLSB_P_OUTPUT_EMPTY; - } else if (merge_pending) { - if ((q->slsb.val[bufnr] & __state) != __state) - break; - } else if (q->slsb.val[bufnr] != __state) - break; + /* get initial state: */ + __state = q->slsb.val[bufnr]; + if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) + __state = SLSB_P_OUTPUT_EMPTY; + + for (i = 1; i < count; i++) { bufnr = next_buf(bufnr); + + /* merge PENDING into EMPTY: */ + if (merge_pending && + q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING && + __state == SLSB_P_OUTPUT_EMPTY) + continue; + + /* stop if next state differs from initial state: */ + if (q->slsb.val[bufnr] != __state) + break; } *state = __state; return i; diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index f20b4d66c75f..4a39b54732d0 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -330,6 +330,8 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx) { struct ccw1 *ccw = chain->ch_ccw + idx; + if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw)) + return; if (!ccw->count) return; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index b5f4006198b9..a9a56aa9c26b 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -218,8 +218,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, weight += atomic_read(&zq->load); pref_weight += atomic_read(&pref_zq->load); if (weight == pref_weight) - return &zq->queue->total_request_count > - &pref_zq->queue->total_request_count; + return zq->queue->total_request_count > + pref_zq->queue->total_request_count; return weight > pref_weight; } diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 47a13c5723c6..6b1e83539a9d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -564,9 +564,9 @@ enum qeth_cq { }; struct qeth_ipato { - int enabled; - int invert4; - int invert6; + bool enabled; + bool invert4; + bool invert6; struct list_head entries; }; @@ -580,6 +580,11 @@ struct qeth_cmd_buffer { void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); }; +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) +{ + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); +} + /** * definition of a qeth channel, used for read and write */ @@ -834,7 +839,7 @@ struct qeth_trap_id { */ static inline int qeth_get_elements_for_range(addr_t start, addr_t end) { - return PFN_UP(end - 1) - PFN_DOWN(start); + return PFN_UP(end) - PFN_DOWN(start); } static inline int qeth_get_micros(void) @@ -985,6 +990,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, int qeth_set_features(struct net_device *, netdev_features_t); int qeth_recover_features(struct net_device *); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); int qeth_vm_request_mac(struct qeth_card *card); int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bae7440abc01..939b5b5e97ef 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -19,6 +19,11 @@ #include #include #include +#include +#include +#include +#include + #include #include @@ -521,8 +526,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) queue == card->qdio.no_in_queues - 1; } - -static int qeth_issue_next_read(struct qeth_card *card) +static int __qeth_issue_next_read(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -553,6 +557,17 @@ static int qeth_issue_next_read(struct qeth_card *card) return rc; } +static int qeth_issue_next_read(struct qeth_card *card) +{ + int ret; + + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + ret = __qeth_issue_next_read(card); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + return ret; +} + static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; @@ -956,7 +971,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_running_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); - wake_up(&card->wait_q); + wake_up_all(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); @@ -1160,6 +1175,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, } rc = qeth_get_problem(cdev, irb); if (rc) { + card->read_or_write_problem = 1; qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; @@ -1178,7 +1194,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, return; if (channel == &card->read && channel->state == CH_STATE_UP) - qeth_issue_next_read(card); + __qeth_issue_next_read(card); iob = channel->iob; index = channel->buf_no; @@ -1474,9 +1490,9 @@ static int qeth_setup_card(struct qeth_card *card) qeth_set_intial_options(card); /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); - card->ipato.enabled = 0; - card->ipato.invert4 = 0; - card->ipato.invert6 = 0; + card->ipato.enabled = false; + card->ipato.invert4 = false; + card->ipato.invert6 = false; /* init QDIO stuff */ qeth_init_qdio_info(card); INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); @@ -2068,7 +2084,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); @@ -2082,23 +2098,27 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply->callback = reply_cb; reply->param = reply_param; - if (card->state == CARD_STATE_DOWN) - reply->seqno = QETH_IDX_COMMAND_SEQNO; - else - reply->seqno = card->seqno.ipa++; + init_waitqueue_head(&reply->wait_q); - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; - qeth_prepare_control_data(card, len, iob); - if (IS_IPA(iob->data)) + if (IS_IPA(iob->data)) { + cmd = __ipa_cmd(iob); + cmd->hdr.seqno = card->seqno.ipa++; + reply->seqno = cmd->hdr.seqno; event_timeout = QETH_IPA_TIMEOUT; - else + } else { + reply->seqno = QETH_IDX_COMMAND_SEQNO; event_timeout = QETH_TIMEOUT; + } + qeth_prepare_control_data(card, len, iob); + + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); @@ -2123,9 +2143,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, /* we have only one long running ipassist, since we can ensure process context of this command we can sleep */ - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - if ((cmd->hdr.command == IPA_CMD_SETIP) && - (cmd->hdr.prot_version == QETH_PROT_IPV4)) { + if (cmd && cmd->hdr.command == IPA_CMD_SETIP && + cmd->hdr.prot_version == QETH_PROT_IPV4) { if (!wait_event_timeout(reply->wait_q, atomic_read(&reply->received), event_timeout)) goto time_err; @@ -2889,7 +2908,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; - cmd->hdr.seqno = card->seqno.ipa; + /* cmd->hdr.seqno is set by qeth_send_control_data() */ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (__u8) card->info.portno; if (card->options.layer2) @@ -3854,10 +3873,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset) { - int elements = qeth_get_elements_for_range( - (addr_t)skb->data + data_offset, - (addr_t)skb->data + skb_headlen(skb)) + - qeth_get_elements_for_frags(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + int elements = qeth_get_elements_for_frags(skb); + addr_t start = (addr_t)skb->data + data_offset; + + if (start != end) + elements += qeth_get_elements_for_range(start, end); if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " @@ -5051,8 +5072,6 @@ static void qeth_core_free_card(struct qeth_card *card) QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); - if (card->dev) - free_netdev(card->dev); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); kfree(card); @@ -5440,6 +5459,13 @@ int qeth_poll(struct napi_struct *napi, int budget) } EXPORT_SYMBOL_GPL(qeth_poll); +static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) +{ + if (!cmd->hdr.return_code) + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + return cmd->hdr.return_code; +} + int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -6299,7 +6325,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, (struct qeth_checksum_cmd *)reply->param; QETH_CARD_TEXT(card, 4, "chkdoccb"); - if (cmd->hdr.return_code) + if (qeth_setassparms_inspect_rc(cmd)) return 0; memset(chksum_cb, 0, sizeof(*chksum_cb)); @@ -6505,6 +6531,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev, } EXPORT_SYMBOL_GPL(qeth_fix_features); +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + /* GSO segmentation builds skbs with + * a (small) linear part for the headers, and + * page frags for the data. + * Compared to a linear skb, the header-only part consumes an + * additional buffer element. This reduces buffer utilization, and + * hurts throughput. So compress small segments into one element. + */ + if (netif_needs_gso(skb, features)) { + /* match skb_segment(): */ + unsigned int doffset = skb->data - skb_mac_header(skb); + unsigned int hsize = skb_shinfo(skb)->gso_size; + unsigned int hroom = skb_headroom(skb); + + /* linearize only if resulting skb allocations are order-0: */ + if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) + features &= ~NETIF_F_SG; + } + + return vlan_features_check(skb, features); +} +EXPORT_SYMBOL_GPL(qeth_features_check); + static int __init qeth_core_init(void) { int rc; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 760b023eae95..521293b1f4fa 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -935,8 +935,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) qeth_l2_set_offline(cgdev); if (card->dev) { - netif_napi_del(&card->napi); unregister_netdev(card->dev); + free_netdev(card->dev); card->dev = NULL; } return; @@ -963,6 +963,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_stop = qeth_l2_stop, .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_features_check = qeth_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -1009,6 +1010,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { card->dev->hw_features = NETIF_F_SG; card->dev->vlan_features = NETIF_F_SG; + card->dev->features |= NETIF_F_SG; /* OSA 3S and earlier has no RX/TX support */ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { card->dev->hw_features |= NETIF_F_IP_CSUM; @@ -1027,8 +1029,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); netif_carrier_off(card->dev); diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 194ae9b577cc..8727b9517de8 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -40,8 +40,40 @@ struct qeth_ipaddr { unsigned int pfxlen; } a6; } u; - }; + +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + if (a1->proto != a2->proto) + return false; + if (a1->proto == QETH_PROT_IPV6) + return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr); + return a1->u.a4.addr == a2->u.a4.addr; +} + +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(), + * so 'proto' and 'addr' match for sure. + * + * For ucast: + * - 'mac' is always 0. + * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching + * values are required to avoid mixups in takeover eligibility. + * + * For mcast, + * - 'mac' is mapped from the IP, and thus always matches. + * - 'mask'/'pfxlen' is always 0. + */ + if (a1->type != a2->type) + return false; + if (a1->proto == QETH_PROT_IPV6) + return a1->u.a6.pfxlen == a2->u.a6.pfxlen; + return a1->u.a4.mask == a2->u.a4.mask; +} + static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) { u64 ret = 0; @@ -82,7 +114,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, const u8 *); -int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); +void qeth_l3_update_ipato(struct qeth_card *card); struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ab661a431f7c..1c62cbbaa66f 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -149,6 +149,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto, return -EINVAL; } +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, + struct qeth_ipaddr *query) +{ + u64 key = qeth_l3_ipaddr_hash(query); + struct qeth_ipaddr *addr; + + if (query->is_multicast) { + hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } else { + hash_for_each_possible(card->ip_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } + return NULL; +} + static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) { int i, j; @@ -163,8 +181,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) } } -int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, - struct qeth_ipaddr *addr) +static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, + struct qeth_ipaddr *addr) { struct qeth_ipato_entry *ipatoe; u8 addr_bits[128] = {0, }; @@ -173,6 +191,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, if (!card->ipato.enabled) return 0; + if (addr->type != QETH_IP_TYPE_NORMAL) + return 0; qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, (addr->proto == QETH_PROT_IPV4)? 4:16); @@ -200,34 +220,6 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, return rc; } -inline int -qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2) -{ - return addr1->proto == addr2->proto && - !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) && - !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac)); -} - -static struct qeth_ipaddr * -qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) -{ - struct qeth_ipaddr *addr; - - if (tmp_addr->is_multicast) { - hash_for_each_possible(card->ip_mc_htable, addr, - hnode, qeth_l3_ipaddr_hash(tmp_addr)) - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) - return addr; - } else { - hash_for_each_possible(card->ip_htable, addr, - hnode, qeth_l3_ipaddr_hash(tmp_addr)) - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) - return addr; - } - - return NULL; -} - int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) { int rc = 0; @@ -242,23 +234,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); } - addr = qeth_l3_ip_from_hash(card, tmp_addr); - if (!addr) + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) return -ENOENT; addr->ref_counter--; - if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || - addr->type == QETH_IP_TYPE_RXIP)) + if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) return rc; if (addr->in_progress) return -EINPROGRESS; - if (!qeth_card_hw_is_reachable(card)) { - addr->disp_flag = QETH_DISP_ADDR_DELETE; - return 0; - } - - rc = qeth_l3_deregister_addr_entry(card, addr); + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l3_deregister_addr_entry(card, addr); hash_del(&addr->hnode); kfree(addr); @@ -270,6 +257,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) { int rc = 0; struct qeth_ipaddr *addr; + char buf[40]; QETH_CARD_TEXT(card, 4, "addip"); @@ -280,8 +268,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); } - addr = qeth_l3_ip_from_hash(card, tmp_addr); - if (!addr) { + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (addr) { + if (tmp_addr->type != QETH_IP_TYPE_NORMAL) + return -EADDRINUSE; + if (qeth_l3_addr_match_all(addr, tmp_addr)) { + addr->ref_counter++; + return 0; + } + qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, + buf); + dev_warn(&card->gdev->dev, + "Registering IP address %s failed\n", buf); + return -EADDRINUSE; + } else { addr = qeth_l3_get_addr_buffer(tmp_addr->proto); if (!addr) return -ENOMEM; @@ -289,8 +289,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); addr->ref_counter = 1; - if (addr->type == QETH_IP_TYPE_NORMAL && - qeth_l3_is_addr_covered_by_ipato(card, addr)) { + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { QETH_CARD_TEXT(card, 2, "tkovaddr"); addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; } @@ -322,19 +321,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) (rc == IPA_RC_LAN_OFFLINE)) { addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; if (addr->ref_counter < 1) { - qeth_l3_delete_ip(card, addr); + qeth_l3_deregister_addr_entry(card, addr); + hash_del(&addr->hnode); kfree(addr); } } else { hash_del(&addr->hnode); kfree(addr); } - } else { - if (addr->type == QETH_IP_TYPE_NORMAL || - addr->type == QETH_IP_TYPE_RXIP) - addr->ref_counter++; } - return rc; } @@ -402,11 +397,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card) spin_lock_bh(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { - if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { - qeth_l3_deregister_addr_entry(card, addr); - hash_del(&addr->hnode); - kfree(addr); - } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) { + if (addr->disp_flag == QETH_DISP_ADDR_ADD) { if (addr->proto == QETH_PROT_IPV4) { addr->in_progress = 1; spin_unlock_bh(&card->ip_lock); @@ -604,6 +595,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) /* * IP address takeover related functions */ + +/** + * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. + * + * Caller must hold ip_lock. + */ +void qeth_l3_update_ipato(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + unsigned int i; + + hash_for_each(card->ip_htable, i, addr, hnode) { + if (addr->type != QETH_IP_TYPE_NORMAL) + continue; + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; + else + addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; + } +} + static void qeth_l3_clear_ipato_list(struct qeth_card *card) { struct qeth_ipato_entry *ipatoe, *tmp; @@ -615,6 +627,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card) kfree(ipatoe); } + qeth_l3_update_ipato(card); spin_unlock_bh(&card->ip_lock); } @@ -639,8 +652,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, } } - if (!rc) + if (!rc) { list_add_tail(&new->entry, &card->ipato.entries); + qeth_l3_update_ipato(card); + } spin_unlock_bh(&card->ip_lock); @@ -663,6 +678,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, (proto == QETH_PROT_IPV4)? 4:16) && (ipatoe->mask_bits == mask_bits)) { list_del(&ipatoe->entry); + qeth_l3_update_ipato(card); kfree(ipatoe); } } @@ -697,12 +713,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, return -ENOMEM; spin_lock_bh(&card->ip_lock); - - if (qeth_l3_ip_from_hash(card, ipaddr)) - rc = -EEXIST; - else - qeth_l3_add_ip(card, ipaddr); - + rc = qeth_l3_add_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); kfree(ipaddr); @@ -765,12 +776,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, return -ENOMEM; spin_lock_bh(&card->ip_lock); - - if (qeth_l3_ip_from_hash(card, ipaddr)) - rc = -EEXIST; - else - qeth_l3_add_ip(card, ipaddr); - + rc = qeth_l3_add_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); kfree(ipaddr); @@ -1376,9 +1382,11 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); memcpy(tmp->mac, buf, sizeof(tmp->mac)); + tmp->is_multicast = 1; - ipm = qeth_l3_ip_from_hash(card, tmp); + ipm = qeth_l3_find_addr_by_ip(card, tmp); if (ipm) { + /* for mcast, by-IP match means full match */ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; } else { ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); @@ -1461,8 +1469,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev) sizeof(struct in6_addr)); tmp->is_multicast = 1; - ipm = qeth_l3_ip_from_hash(card, tmp); + ipm = qeth_l3_find_addr_by_ip(card, tmp); if (ipm) { + /* for mcast, by-IP match means full match */ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; continue; } @@ -1553,7 +1562,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (!addr) - return; + goto out; spin_lock_bh(&card->ip_lock); @@ -1567,6 +1576,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, spin_unlock_bh(&card->ip_lock); kfree(addr); +out: in_dev_put(in_dev); } @@ -1591,7 +1601,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (!addr) - return; + goto out; spin_lock_bh(&card->ip_lock); @@ -1606,6 +1616,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, spin_unlock_bh(&card->ip_lock); kfree(addr); +out: in6_dev_put(in6_dev); #endif /* CONFIG_QETH_IPV6 */ } @@ -2604,11 +2615,12 @@ static void qeth_tso_fill_header(struct qeth_card *card, static int qeth_l3_get_elements_no_tso(struct qeth_card *card, struct sk_buff *skb, int extra_elems) { - addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); - int elements = qeth_get_elements_for_range( - tcpdptr, - (addr_t)skb->data + skb_headlen(skb)) + - qeth_get_elements_for_frags(skb); + addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + int elements = qeth_get_elements_for_frags(skb); + + if (start != end) + elements += qeth_get_elements_for_range(start, end); if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, @@ -2920,6 +2932,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_stop = qeth_l3_stop, .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_features_check = qeth_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_multicast_list, .ndo_do_ioctl = qeth_do_ioctl, @@ -2960,6 +2973,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->vlan_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO; + card->dev->features |= NETIF_F_SG; } } } else if (card->info.type == QETH_CARD_TYPE_IQD) { @@ -2987,8 +3001,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netif_keep_dst(card->dev); - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE; + netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * + PAGE_SIZE); SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); @@ -3032,8 +3046,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) qeth_l3_set_offline(cgdev); if (card->dev) { - netif_napi_del(&card->napi); unregister_netdev(card->dev); + free_netdev(card->dev); card->dev = NULL; } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 7a829ad77783..1295dd8ec849 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - struct qeth_ipaddr *addr; - int i, rc = 0; + bool enable; + int rc = 0; if (!card) return -EINVAL; @@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, } if (sysfs_streq(buf, "toggle")) { - card->ipato.enabled = (card->ipato.enabled)? 0 : 1; - } else if (sysfs_streq(buf, "1")) { - card->ipato.enabled = 1; - hash_for_each(card->ip_htable, i, addr, hnode) { - if ((addr->type == QETH_IP_TYPE_NORMAL) && - qeth_l3_is_addr_covered_by_ipato(card, addr)) - addr->set_flags |= - QETH_IPA_SETIP_TAKEOVER_FLAG; - } - } else if (sysfs_streq(buf, "0")) { - card->ipato.enabled = 0; - hash_for_each(card->ip_htable, i, addr, hnode) { - if (addr->set_flags & - QETH_IPA_SETIP_TAKEOVER_FLAG) - addr->set_flags &= - ~QETH_IPA_SETIP_TAKEOVER_FLAG; - } - } else + enable = !card->ipato.enabled; + } else if (kstrtobool(buf, &enable)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.enabled != enable) { + card->ipato.enabled = enable; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + bool invert; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); - if (sysfs_streq(buf, "toggle")) - card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; - else if (sysfs_streq(buf, "1")) - card->ipato.invert4 = 1; - else if (sysfs_streq(buf, "0")) - card->ipato.invert4 = 0; - else + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert4; + } else if (kstrtobool(buf, &invert)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.invert4 != invert) { + card->ipato.invert4 = invert; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } @@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + bool invert; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); - if (sysfs_streq(buf, "toggle")) - card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; - else if (sysfs_streq(buf, "1")) - card->ipato.invert6 = 1; - else if (sysfs_streq(buf, "0")) - card->ipato.invert6 = 0; - else + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert6; + } else if (kstrtobool(buf, &invert)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.invert6 != invert) { + card->ipato.invert6 = invert; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index af3e4d3f9735..7173ae53c526 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -913,8 +913,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) memset(str, ' ', sizeof(*str)); if (sup_adap_info->adapter_type_text[0]) { - char *cp = sup_adap_info->adapter_type_text; int c; + char *cp; + char *cname = kmemdup(sup_adap_info->adapter_type_text, + sizeof(sup_adap_info->adapter_type_text), + GFP_ATOMIC); + if (!cname) + return; + + cp = cname; if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) inqstrcpy("SMC", str->vid); else { @@ -923,7 +930,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) ++cp; c = *cp; *cp = '\0'; - inqstrcpy(sup_adap_info->adapter_type_text, str->vid); + inqstrcpy(cname, str->vid); *cp = c; while (*cp && *cp != ' ') ++cp; @@ -937,8 +944,8 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) cp[sizeof(str->pid)] = '\0'; } inqstrcpy (cp, str->pid); - if (c) - cp[sizeof(str->pid)] = c; + + kfree(cname); } else { struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 403a639574e5..b0b290f7b8dc 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1724,6 +1724,7 @@ struct aac_dev #define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010) #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020) #define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040) +#define FIB_CONTEXT_FLAG_EH_RESET (0x00000080) /* * Define the command values diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index dfe8e70f8d99..c0a4fcb7fd0a 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1583,6 +1583,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) * will ensure that i/o is queisced and the card is flushed in that * case. */ + aac_free_irq(aac); aac_fib_map_free(aac); dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, aac->comm_phys); @@ -1590,7 +1591,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) aac->comm_phys = 0; kfree(aac->queues); aac->queues = NULL; - aac_free_irq(aac); kfree(aac->fsa_dev); aac->fsa_dev = NULL; @@ -1672,14 +1672,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) out: aac->in_reset = 0; scsi_unblock_requests(host); - /* - * Issue bus rescan to catch any configuration that might have - * occurred - */ - if (!retval) { - dev_info(&aac->pdev->dev, "Issuing bus rescan\n"); - scsi_scan_host(host); - } + if (jafo) { spin_lock_irq(host->host_lock); } @@ -2383,19 +2376,19 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, goto out; } -int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) +int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now) { struct tm cur_tm; char wellness_str[] = "TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; u32 datasize = sizeof(wellness_str); - unsigned long local_time; + time64_t local_time; int ret = -ENODEV; if (!dev->sa_firmware) goto out; - local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60)); - time_to_tm(local_time, 0, &cur_tm); + local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60)); + time64_to_tm(local_time, 0, &cur_tm); cur_tm.tm_mon += 1; cur_tm.tm_year += 1900; wellness_str[8] = bin2bcd(cur_tm.tm_hour); @@ -2412,7 +2405,7 @@ int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) return ret; } -int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) +int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now) { int ret = -ENOMEM; struct fib *fibptr; @@ -2424,7 +2417,7 @@ int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) aac_fib_init(fibptr); info = (__le32 *)fib_data(fibptr); - *info = cpu_to_le32(now->tv_sec); + *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */ ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, 1, 1, NULL, NULL); @@ -2496,7 +2489,7 @@ int aac_command_thread(void *data) } if (!time_before(next_check_jiffies,next_jiffies) && ((difference = next_jiffies - jiffies) <= 0)) { - struct timeval now; + struct timespec64 now; int ret; /* Don't even try to talk to adapter if its sick */ @@ -2506,15 +2499,15 @@ int aac_command_thread(void *data) next_check_jiffies = jiffies + ((long)(unsigned)check_interval) * HZ; - do_gettimeofday(&now); + ktime_get_real_ts64(&now); /* Synchronize our watches */ - if (((1000000 - (1000000 / HZ)) > now.tv_usec) - && (now.tv_usec > (1000000 / HZ))) - difference = (((1000000 - now.tv_usec) * HZ) - + 500000) / 1000000; + if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) + && (now.tv_nsec > (NSEC_PER_SEC / HZ))) + difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ) + + NSEC_PER_SEC / 2) / NSEC_PER_SEC; else { - if (now.tv_usec > 500000) + if (now.tv_nsec > NSEC_PER_SEC / 2) ++now.tv_sec; if (dev->sa_firmware) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index c9252b138c1f..509fe23fafe1 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) info = &aac->hba_map[bus][cid]; if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || info->devtype != AAC_DEVTYPE_NATIVE_RAW) { - fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; + fib->flags |= FIB_CONTEXT_FLAG_EH_RESET; cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; } } diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index b2e8c0dfc79c..1aa46d0763a0 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job) struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; - struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct bfad_im_port_s *im_port = bfad_get_im_port(shost); struct bfad_s *bfad = im_port->bfad; struct request_queue *request_q = job->req->q; void *payload_kbuf; @@ -3357,7 +3358,8 @@ int bfad_im_bsg_els_ct_request(struct bsg_job *job) { struct bfa_bsg_data *bsg_data; - struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct bfad_im_port_s *im_port = bfad_get_im_port(shost); struct bfad_s *bfad = im_port->bfad; bfa_bsg_fcpt_t *bsg_fcpt; struct bfad_fcxp *drv_fcxp; diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 8dcd8c70c7ee..05f523971348 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, struct bfad_s *bfad = port->bfad; struct bfa_s *bfa = &bfad->bfa; struct bfa_ioc_s *ioc = &bfa->ioc; - int addr, len, rc, i; + int addr, rc, i; + u32 len; u32 *regbuf; void __iomem *rb, *reg_addr; unsigned long flags; @@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &len); - if (rc < 2) { + if (rc < 2 || len > (UINT_MAX >> 2)) { printk(KERN_INFO "bfad[%d]: %s failed to read user buf\n", bfad->inst_no, __func__); diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 24e657a4ec80..c05d6e91e4bd 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -546,6 +546,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, struct device *dev) { + struct bfad_im_port_pointer *im_portp; int error = 1; mutex_lock(&bfad_mutex); @@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, goto out_free_idr; } - im_port->shost->hostdata[0] = (unsigned long)im_port; + im_portp = shost_priv(im_port->shost); + im_portp->p = im_port; im_port->shost->unique_id = im_port->idr_id; im_port->shost->this_id = -1; im_port->shost->max_id = MAX_FCP_TARGET; @@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) sht->sg_tablesize = bfad->cfg_data.io_max_sge; - return scsi_host_alloc(sht, sizeof(unsigned long)); + return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer)); } void diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index c81ec2a77ef5..06ce4ba2b7bc 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -69,6 +69,16 @@ struct bfad_im_port_s { struct fc_vport *fc_vport; }; +struct bfad_im_port_pointer { + struct bfad_im_port_s *p; +}; + +static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host) +{ + struct bfad_im_port_pointer *im_portp = shost_priv(host); + return im_portp->p; +} + enum bfad_itnim_state { ITNIM_STATE_NONE, ITNIM_STATE_ONLINE, diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 1d02cf9fe06c..30d5f0ef29bb 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1575,6 +1575,7 @@ static void release_offload_resources(struct cxgbi_sock *csk) csk, csk->state, csk->flags, csk->tid); cxgbi_sock_free_cpl_skbs(csk); + cxgbi_sock_purge_write_queue(csk); if (csk->wr_cred != csk->wr_max_cred) { cxgbi_sock_purge_wr_queue(csk); cxgbi_sock_reset_wr_list(csk); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 76b8b7eed0c0..0b6467206f8e 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) cmd->parent = afu; cmd->hwq_index = hwq_index; + cmd->sa.ioasc = 0; cmd->rcb.ctx_id = hwq->ctx_hndl; cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 16664f2e15fb..8fa9bb336ad4 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -185,13 +185,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; + if (!task->lldd_task) + return; + + task->lldd_task = NULL; + if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) dma_unmap_sg(dev, task->scatter, slot->n_elem, task->data_dir); - task->lldd_task = NULL; - if (sas_dev) atomic64_dec(&sas_dev->running_req); } @@ -199,8 +202,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, if (slot->buf) dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); - list_del_init(&slot->entry); + slot->buf = NULL; slot->task = NULL; slot->port = NULL; hisi_sas_slot_index_free(hisi_hba, slot->idx); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index fe3a0da3ec97..dd9464920456 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev) scsi_proc_hostdir_rm(shost->hostt); + /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */ + rcu_barrier(); + if (shost->tmf_work_q) destroy_workqueue(shost->tmf_work_q); if (shost->ehandler) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4ed3d26ffdde..5fbaf13781b6 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -8684,6 +8684,8 @@ static void hpsa_remove_one(struct pci_dev *pdev) destroy_workqueue(h->rescan_ctlr_wq); destroy_workqueue(h->resubmit_wq); + hpsa_delete_sas_host(h); + /* * Call before disabling interrupts. * scsi_remove_host can trigger I/O operations especially @@ -8718,8 +8720,6 @@ static void hpsa_remove_one(struct pci_dev *pdev) h->lockup_detected = NULL; /* init_one 2 */ /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ - hpsa_delete_sas_host(h); - kfree(h); /* init_one 1 */ } @@ -9207,9 +9207,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) struct sas_phy *phy = hpsa_sas_phy->phy; sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); - sas_phy_free(phy); if (hpsa_sas_phy->added_to_port) list_del(&hpsa_sas_phy->phy_list_entry); + sas_phy_delete(phy); kfree(hpsa_sas_phy); } diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 9a0696f68f37..b81a53c4a9a8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes { }; struct ibmvfc_fcp_rsp_info { - __be16 reserved; + u8 reserved[3]; u8 rsp_code; u8 reserved2[4]; }__attribute__((packed, aligned (2))); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index f8dc1601efd5..bddbe2da5283 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) */ switch (session->state) { case ISCSI_STATE_FAILED: + /* + * cmds should fail during shutdown, if the session + * state is bad, allowing completion to happen + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + reason = FAILURE_SESSION_FAILED; + sc->result = DID_NO_CONNECT << 16; + break; + } case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_IMM_RETRY << 16; @@ -1980,6 +1989,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) } if (session->state != ISCSI_STATE_LOGGED_IN) { + /* + * During shutdown, if session is prematurely disconnected, + * recovery won't happen and there will be hung cmds. Not + * handling cmds would trigger EH, also bad in this case. + * Instead, handle cmd, allow completion to happen and let + * upper layer to deal with the result. + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + sc->result = DID_NO_CONNECT << 16; + ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); + rc = BLK_EH_HANDLED; + goto done; + } /* * We are probably in the middle of iscsi recovery so let * that complete and handle the error. @@ -2084,7 +2106,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) task->last_timeout = jiffies; spin_unlock(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? - "timer reset" : "nh"); + "timer reset" : "shutdown or nh"); return rc; } EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 6b4fd2375178..e2ea389fbec3 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; + phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); skip: if (new_phy) @@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); - if (!res) + if (res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); @@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: + kfree(req); kfree(resp); return res; @@ -2145,7 +2147,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { struct domain_device *dev; - unsigned int reslen = 0; + unsigned int rcvlen = 0; int ret = -EINVAL; /* no rphy means no smp target support (ie aic94xx host) */ @@ -2179,12 +2181,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, ret = smp_execute_task_sg(dev, job->request_payload.sg_list, job->reply_payload.sg_list); - if (ret > 0) { - /* positive number is the untransferred residual */ - reslen = ret; + if (ret >= 0) { + /* bsg_job_done() requires the length received */ + rcvlen = job->reply_payload.payload_len - ret; ret = 0; } out: - bsg_job_done(job, ret, reslen); + bsg_job_done(job, ret, rcvlen); } diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index ea8ad06ff582..10b17da20176 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type, int sas_eh_abort_handler(struct scsi_cmnd *cmd) { - int res; + int res = TMF_RESP_FUNC_FAILED; struct sas_task *task = TO_SAS_TASK(cmd); struct Scsi_Host *host = cmd->device->host; + struct domain_device *dev = cmd_to_domain_dev(cmd); struct sas_internal *i = to_sas_internal(host->transportt); + unsigned long flags; if (!i->dft->lldd_abort_task) return FAILED; - res = i->dft->lldd_abort_task(task); + spin_lock_irqsave(host->host_lock, flags); + /* We cannot do async aborts for SATA devices */ + if (dev_is_sata(dev) && !host->host_eh_scheduled) { + spin_unlock_irqrestore(host->host_lock, flags); + return FAILED; + } + spin_unlock_irqrestore(host->host_lock, flags); + + if (task) + res = i->dft->lldd_abort_task(task); + else + SAS_DPRINTK("no task to abort\n"); if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) return SUCCESS; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c17677f494af..dc6519b2c53a 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3134,7 +3134,8 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", + pring ? pring->txq_max : 0); } static DEVICE_ATTR(txq_hw, S_IRUGO, @@ -3147,7 +3148,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", + pring ? pring->txcmplq_max : 0); } static DEVICE_ATTR(txcmplq_hw, S_IRUGO, diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index fe9e1c079c20..d89816222b23 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2911,7 +2911,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, } } - if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { + if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) { ret_val = -ENOMEM; goto err_post_rxbufs_exit; } @@ -5421,6 +5421,8 @@ lpfc_bsg_timeout(struct bsg_job *job) struct lpfc_iocbq *check_iocb, *next_iocb; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return -EIO; /* if job's driver data is NULL, the command completed or is in the * the process of completing. In this case, return status to request diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 33417681f5d4..126723a5bc6f 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) "Parse GID_FTrsp: did:x%x flg:x%x x%x", Did, ndlp->nlp_flag, vport->fc_flag); + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); /* By default, the driver expects to support FCP FC4 */ if (fc4_type == FC_TYPE_FCP) ndlp->nlp_fc4_type |= NLP_FC4_FCP; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 468a66371de9..91783dbdf10c 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2088,6 +2088,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp = (struct lpfc_nodelist *) cmdiocb->context1; spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_PRLI_SND; + + /* Driver supports multiple FC4 types. Counters matter. */ + vport->fc_prli_sent--; + ndlp->fc4_prli_sent--; spin_unlock_irq(shost->host_lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, @@ -2095,9 +2099,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); - /* Ddriver supports multiple FC4 types. Counters matter. */ - vport->fc_prli_sent--; - /* PRLI completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0103 PRLI completes to NPort x%06x " @@ -2111,7 +2112,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus) { /* Check for retry */ - ndlp->fc4_prli_sent--; if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ goto out; @@ -2190,6 +2190,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_fc4_type |= NLP_FC4_NVME; local_nlp_type = ndlp->nlp_fc4_type; + /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp + * fields here before any of them can complete. + */ + ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); + ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; + ndlp->nvme_fb_size = 0; + send_next_prli: if (local_nlp_type & NLP_FC4_FCP) { /* Payload is 4 + 16 = 20 x14 bytes. */ @@ -2298,6 +2307,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_PRLI_SND; + + /* The vport counters are used for lpfc_scan_finished, but + * the ndlp is used to track outstanding PRLIs for different + * FC4 types. + */ + vport->fc_prli_sent++; + ndlp->fc4_prli_sent++; spin_unlock_irq(shost->host_lock); if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) { @@ -2308,12 +2324,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 1; } - /* The vport counters are used for lpfc_scan_finished, but - * the ndlp is used to track outstanding PRLIs for different - * FC4 types. - */ - vport->fc_prli_sent++; - ndlp->fc4_prli_sent++; /* The driver supports 2 FC4 types. Make sure * a PRLI is issued for all types before exiting. @@ -7430,6 +7440,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) timeout = (uint32_t)(phba->fc_ratov << 1); pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; if ((phba->pport->load_flag & FC_UNLOADING)) return; @@ -9310,6 +9322,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, list) { @@ -9416,7 +9431,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, rxid, 1); /* Check if TXQ queue needs to be serviced */ - if (!(list_empty(&pring->txq))) + if (pring && !list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 20808349a80e..d9a03beb76a4 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -3324,7 +3324,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Unblock ELS traffic */ pring = lpfc_phba_elsring(phba); - pring->flag &= ~LPFC_STOP_IOCB_EVENT; + if (pring) + pring->flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if (mb->mbxStatus) { @@ -4982,7 +4983,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_cancel_retry_delay_tmo(vport, ndlp); if ((ndlp->nlp_flag & NLP_DEFER_RM) && !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && - !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { + !(ndlp->nlp_flag & NLP_RPI_REGISTERED) && + phba->sli_rev != LPFC_SLI_REV4) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ @@ -5430,6 +5432,8 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) psli = &phba->sli; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; /* Error matching iocb on txq or txcmplq * First check the txq. diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1db0a38683f4..2b145966c73f 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -3636,7 +3636,7 @@ struct lpfc_mbx_get_port_name { #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 #define MB_CQE_STATUS_DMA_FAILED 0x5 -#define LPFC_MBX_WR_CONFIG_MAX_BDE 8 +#define LPFC_MBX_WR_CONFIG_MAX_BDE 1 struct lpfc_mbx_wr_object { struct mbox_header header; union { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 100bc4c8798d..25612ccf6ff2 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -9413,44 +9413,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) lpfc_sli4_bar0_register_memmap(phba, if_type); } - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, PCI_64BIT_BAR2))) { - /* - * Map SLI4 if type 0 HBA Control Register base to a kernel - * virtual address and setup the registers. - */ - phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); - bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); - phba->sli4_hba.ctrl_regs_memmap_p = - ioremap(phba->pci_bar1_map, bar1map_len); - if (!phba->sli4_hba.ctrl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA control registers.\n"); + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { + /* + * Map SLI4 if type 0 HBA Control Register base to a + * kernel virtual address and setup the registers. + */ + phba->pci_bar1_map = pci_resource_start(pdev, + PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); + phba->sli4_hba.ctrl_regs_memmap_p = + ioremap(phba->pci_bar1_map, + bar1map_len); + if (!phba->sli4_hba.ctrl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA " + "control registers.\n"); + error = -ENOMEM; + goto out_iounmap_conf; + } + phba->pci_bar2_memmap_p = + phba->sli4_hba.ctrl_regs_memmap_p; + lpfc_sli4_bar1_register_memmap(phba); + } else { + error = -ENOMEM; goto out_iounmap_conf; } - phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; - lpfc_sli4_bar1_register_memmap(phba); } - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, PCI_64BIT_BAR4))) { - /* - * Map SLI4 if type 0 HBA Doorbell Register base to a kernel - * virtual address and setup the registers. - */ - phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); - bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); - phba->sli4_hba.drbl_regs_memmap_p = - ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->sli4_hba.drbl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA doorbell registers.\n"); - goto out_iounmap_ctrl; - } - phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; - error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); - if (error) + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { + /* + * Map SLI4 if type 0 HBA Doorbell Register base to + * a kernel virtual address and setup the registers. + */ + phba->pci_bar2_map = pci_resource_start(pdev, + PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar2_map, + bar2map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA" + " doorbell registers.\n"); + error = -ENOMEM; + goto out_iounmap_ctrl; + } + phba->pci_bar4_memmap_p = + phba->sli4_hba.drbl_regs_memmap_p; + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); + if (error) + goto out_iounmap_all; + } else { + error = -ENOMEM; goto out_iounmap_all; + } } return 0; @@ -11404,6 +11422,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); + /* + * Bring down the SLI Layer. This step disables all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA FCoE function. + */ + lpfc_debugfs_terminate(vport); + lpfc_sli4_hba_unset(phba); /* Perform ndlp cleanup on the physical port. The nvme and nvmet * localports are destroyed after to cleanup all transport memory. @@ -11412,14 +11437,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) lpfc_nvmet_destroy_targetport(phba); lpfc_nvme_destroy_localport(vport); - /* - * Bring down the SLI Layer. This step disables all interrupts, - * clears the rings, discards all mailbox commands, and resets - * the HBA FCoE function. - */ - lpfc_debugfs_terminate(vport); - lpfc_sli4_hba_unset(phba); + lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 56faeb049b4a..87c08ff37ddd 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); if (rc < 0) { - (rqbp->rqb_free_buffer)(phba, rqb_entry); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6409 Cannot post to RQ %d: %x %x\n", rqb_entry->hrq->queue_id, rqb_entry->hrq->host_index, rqb_entry->hrq->hba_index); + (rqbp->rqb_free_buffer)(phba, rqb_entry); } else { list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); rqbp->buffer_count++; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index f3ad7cac355d..d489f6827cc1 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -216,7 +216,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) pring = lpfc_phba_elsring(phba); /* In case of error recovery path, we might have a NULL pring here */ - if (!pring) + if (unlikely(!pring)) return; /* Abort outstanding I/O on NPort */ @@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, break; } + ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); + ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; + /* Check for Nport to NPort pt2pt protocol */ if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { @@ -742,9 +747,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lp = (uint32_t *) pcmd->virt; npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); - ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); - ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; if ((npr->prliType == PRLI_FCP_TYPE) || (npr->prliType == PRLI_NVME_TYPE)) { if (npr->initiatorFunc) { @@ -769,8 +771,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * type. Target mode does not issue gft_id so doesn't get * the fc4 type set until now. */ - if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE)) + if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } + if (npr->prliType == PRLI_FCP_TYPE) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; } if (rport) { /* We need to update the rport role values */ @@ -1552,7 +1558,6 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } else { /* RPI registration has not completed. Reject the PRLI * to prevent an illegal state transition when the @@ -1564,10 +1569,11 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); memset(&stat, 0, sizeof(struct ls_rjt)); - stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; - stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; + stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; } } else { /* Initiator mode. */ @@ -1922,13 +1928,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp->nlp_state; } - /* Check out PRLI rsp */ - ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); - ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - - /* NVME or FCP first burst must be negotiated for each PRLI. */ - ndlp->nlp_flag &= ~NLP_FIRSTBURST; - ndlp->nvme_fb_size = 0; if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) && (npr->prliType == PRLI_FCP_TYPE)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, @@ -1945,8 +1944,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; - /* PRLI completed. Decrement count. */ - ndlp->fc4_prli_sent--; } else if (nvpr && (bf_get_be32(prli_acc_rsp_code, nvpr) == PRLI_REQ_EXECUTED) && @@ -1991,8 +1988,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, be32_to_cpu(nvpr->word5), ndlp->nlp_flag, ndlp->nlp_fcp_info, ndlp->nlp_type); - /* PRLI completed. Decrement count. */ - ndlp->fc4_prli_sent--; } if (!(ndlp->nlp_type & NLP_FCP_TARGET) && (vport->port_type == LPFC_NPIV_PORT) && @@ -2016,7 +2011,8 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); - else + else if (ndlp->nlp_type & + (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } else lpfc_printf_vlog(vport, diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0b7c1a49e203..7ac1a067d780 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1138,9 +1138,14 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) #endif if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, - "6025 Cannot register NVME targetport " - "x%x\n", error); + "6025 Cannot register NVME targetport x%x: " + "portnm %llx nodenm %llx segs %d qs %d\n", + error, + pinfo.port_name, pinfo.node_name, + lpfc_tgttemplate.max_sgl_segments, + lpfc_tgttemplate.max_hw_queues); phba->targetport = NULL; + phba->nvmet_support = 0; lpfc_nvmet_cleanup_io_context(phba); @@ -1152,9 +1157,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6026 Registered NVME " "targetport: %p, private %p " - "portnm %llx nodenm %llx\n", + "portnm %llx nodenm %llx segs %d qs %d\n", phba->targetport, tgtp, - pinfo.port_name, pinfo.node_name); + pinfo.port_name, pinfo.node_name, + lpfc_tgttemplate.max_sgl_segments, + lpfc_tgttemplate.max_hw_queues); atomic_set(&tgtp->rcv_ls_req_in, 0); atomic_set(&tgtp->rcv_ls_req_out, 0); @@ -1457,6 +1464,7 @@ static struct lpfc_nvmet_ctxbuf * lpfc_nvmet_replenish_context(struct lpfc_hba *phba, struct lpfc_nvmet_ctx_info *current_infop) { +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; struct lpfc_nvmet_ctx_info *get_infop; int i; @@ -1504,6 +1512,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, get_infop = get_infop->nvmet_ctx_next_cpu; } +#endif /* Nothing found, all contexts for the MRQ are in-flight */ return NULL; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8b119f87b51d..455f3ce9fda9 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9396,10 +9396,13 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) * for abort iocb hba_wqidx should already * be setup based on what work queue we used. */ - if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) + if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { piocb->hba_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb->context1); + piocb->hba_wqidx = piocb->hba_wqidx % + phba->cfg_fcp_io_channel; + } return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; } else { if (unlikely(!phba->sli4_hba.oas_wq)) @@ -10632,6 +10635,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) return 0; + if (!pring) { + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; + else + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; + goto abort_iotag_exit; + } + /* * If we're unloading, don't abort iocb on the ELS ring, but change * the callback so that nothing happens when it finishes. @@ -12500,6 +12511,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, unsigned long iflags; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return NULL; wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; spin_lock_irqsave(&pring->ring_lock, iflags); @@ -12507,19 +12520,21 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, /* Look up the ELS command IOCB and create pseudo response IOCB */ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); - /* Put the iocb back on the txcmplq */ - lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); - spin_unlock_irqrestore(&pring->ring_lock, iflags); - if (unlikely(!cmdiocbq)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0386 ELS complete with no corresponding " - "cmdiocb: iotag (%d)\n", - bf_get(lpfc_wcqe_c_request_tag, wcqe)); + "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", + wcqe->word0, wcqe->total_data_placed, + wcqe->parameter, wcqe->word3); lpfc_sli_release_iocbq(phba, irspiocbq); return NULL; } + /* Put the iocb back on the txcmplq */ + lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + /* Fake the irspiocbq and copy necessary response information */ lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); @@ -17137,7 +17152,8 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, if (pcmd && pcmd->virt) dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); kfree(pcmd); - lpfc_sli_release_iocbq(phba, iocbq); + if (iocbq) + lpfc_sli_release_iocbq(phba, iocbq); lpfc_in_buf_free(phba, &dmabuf->dbuf); } @@ -18691,6 +18707,8 @@ lpfc_drain_txq(struct lpfc_hba *phba) uint32_t txq_cnt = 0; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return 0; spin_lock_irqsave(&pring->ring_lock, iflags); list_for_each_entry(piocbq, &pring->txq, list) { diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e518dadc8161..4beb4dd2bee8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -6605,7 +6605,6 @@ static void megasas_detach_one(struct pci_dev *pdev) u32 pd_seq_map_sz; instance = pci_get_drvdata(pdev); - instance->unload = 1; host = instance->host; fusion = instance->ctrl_context; @@ -6616,6 +6615,7 @@ static void megasas_detach_one(struct pci_dev *pdev) if (instance->fw_crash_state != UNAVAILABLE) megasas_free_host_crash_buffer(instance); scsi_remove_host(instance->host); + instance->unload = 1; if (megasas_wait_for_adapter_operational(instance)) goto skip_firing_dcmds; diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index ecc699a65bac..08945142b9f8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, /* * This function will Populate Driver Map using firmware raid map */ -void MR_PopulateDrvRaidMap(struct megasas_instance *instance) +static int MR_PopulateDrvRaidMap(struct megasas_instance *instance) { struct fusion_context *fusion = instance->ctrl_context; struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; @@ -259,7 +259,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); if (ld_count > MAX_LOGICAL_DRIVES_EXT) { dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); - return; + return 1; } pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); @@ -285,6 +285,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) fusion->ld_map[(instance->map_id & 1)]; pFwRaidMap = &fw_map_old->raidMap; ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); + if (ld_count > MAX_LOGICAL_DRIVES) { + dev_dbg(&instance->pdev->dev, + "LD count exposed in RAID map in not valid\n"); + return 1; + } + pDrvRaidMap->totalSize = pFwRaidMap->totalSize; pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; @@ -300,6 +306,8 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) sizeof(struct MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); } + + return 0; } /* @@ -317,8 +325,8 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) u16 ld; u32 expected_size; - - MR_PopulateDrvRaidMap(instance); + if (MR_PopulateDrvRaidMap(instance)) + return 0; fusion = instance->ctrl_context; drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 11bd2e698b84..4bf406df051b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -190,36 +190,30 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, /** * megasas_fire_cmd_fusion - Sends command to the FW * @instance: Adapter soft state - * @req_desc: 32bit or 64bit Request descriptor + * @req_desc: 64bit Request descriptor * - * Perform PCI Write. Ventura supports 32 bit Descriptor. - * Prior to Ventura (12G) MR controller supports 64 bit Descriptor. + * Perform PCI Write. */ static void megasas_fire_cmd_fusion(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) { - if (instance->is_ventura) - writel(le32_to_cpu(req_desc->u.low), - &instance->reg_set->inbound_single_queue_port); - else { #if defined(writeq) && defined(CONFIG_64BIT) - u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | - le32_to_cpu(req_desc->u.low)); + u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | + le32_to_cpu(req_desc->u.low)); - writeq(req_data, &instance->reg_set->inbound_low_queue_port); + writeq(req_data, &instance->reg_set->inbound_low_queue_port); #else - unsigned long flags; - spin_lock_irqsave(&instance->hba_lock, flags); - writel(le32_to_cpu(req_desc->u.low), - &instance->reg_set->inbound_low_queue_port); - writel(le32_to_cpu(req_desc->u.high), - &instance->reg_set->inbound_high_queue_port); - mmiowb(); - spin_unlock_irqrestore(&instance->hba_lock, flags); + unsigned long flags; + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc->u.low), + &instance->reg_set->inbound_low_queue_port); + writel(le32_to_cpu(req_desc->u.high), + &instance->reg_set->inbound_high_queue_port); + mmiowb(); + spin_unlock_irqrestore(&instance->hba_lock, flags); #endif - } } /** @@ -772,7 +766,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) const char *sys_info; MFI_CAPABILITIES *drv_ops; u32 scratch_pad_2; - unsigned long flags; fusion = instance->ctrl_context; @@ -900,14 +893,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) break; } - /* For Ventura also IOC INIT required 64 bit Descriptor write. */ - spin_lock_irqsave(&instance->hba_lock, flags); - writel(le32_to_cpu(req_desc.u.low), - &instance->reg_set->inbound_low_queue_port); - writel(le32_to_cpu(req_desc.u.high), - &instance->reg_set->inbound_high_queue_port); - mmiowb(); - spin_unlock_irqrestore(&instance->hba_lock, flags); + megasas_fire_cmd_fusion(instance, &req_desc); wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 87999905bca3..6efa739a1912 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5659,14 +5659,14 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) } /** - * _wait_for_commands_to_complete - reset controller + * mpt3sas_wait_for_commands_to_complete - reset controller * @ioc: Pointer to MPT_ADAPTER structure * * This function waiting(3s) for all pending commands to complete * prior to putting controller in reset. */ -static void -_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) { u32 ioc_state; unsigned long flags; @@ -5745,7 +5745,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, is_fault = 1; } _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); - _wait_for_commands_to_complete(ioc); + mpt3sas_wait_for_commands_to_complete(ioc); _base_mask_interrupts(ioc); r = _base_make_ioc_ready(ioc, type); if (r) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index a77bb7dc12b1..2948cb7e9ae6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1292,6 +1292,9 @@ void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); + /* scsih shared API */ u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 22998cbd538f..139219c994e9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2471,7 +2471,8 @@ scsih_abort(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -2533,7 +2534,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -2595,7 +2597,8 @@ scsih_target_reset(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -2652,7 +2655,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) ioc->name, scmd); scsi_print_command(scmd); - if (ioc->is_driver_loading) { + if (ioc->is_driver_loading || ioc->remove_host) { pr_info(MPT3SAS_FMT "Blocking the host reset\n", ioc->name); r = FAILED; @@ -3957,7 +3960,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) _scsih_set_satl_pending(scmd, false); mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); - if (ioc->pci_error_recovery) + if (ioc->pci_error_recovery || ioc->remove_host) scmd->result = DID_NO_CONNECT << 16; else scmd->result = DID_RESET << 16; @@ -4103,19 +4106,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } - /* - * Bug work around for firmware SATL handling. The loop - * is based on atomic operations and ensures consistency - * since we're lockless at this point - */ - do { - if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { - scmd->result = SAM_STAT_BUSY; - scmd->scsi_done(scmd); - return 0; - } - } while (_scsih_set_satl_pending(scmd, true)); - sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ @@ -4141,6 +4131,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) sas_device_priv_data->block) return SCSI_MLQUEUE_DEVICE_BUSY; + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_READ; else if (scmd->sc_data_direction == DMA_TO_DEVICE) @@ -4167,6 +4170,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (!smid) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); + _scsih_set_satl_pending(scmd, false); goto out; } mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); @@ -4197,6 +4201,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (mpi_request->DataLength) { if (ioc->build_sg_scmd(ioc, scmd, smid)) { mpt3sas_base_free_smid(ioc, smid); + _scsih_set_satl_pending(scmd, false); goto out; } } else @@ -4804,6 +4809,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { scmd->result = DID_RESET << 16; break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (MPI2_SCSI_STATE_TERMINATED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; } scmd->result = DID_SOFT_ERROR << 16; break; @@ -8235,6 +8245,10 @@ static void scsih_remove(struct pci_dev *pdev) unsigned long flags; ioc->remove_host = 1; + + mpt3sas_wait_for_commands_to_complete(ioc); + _scsih_flush_running_cmds(ioc); + _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); @@ -8305,6 +8319,10 @@ scsih_shutdown(struct pci_dev *pdev) unsigned long flags; ioc->remove_host = 1; + + mpt3sas_wait_for_commands_to_complete(ioc); + _scsih_flush_running_cmds(ioc); + _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index a4f28b7e4c65..e18877177f1b 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write, return req; for_each_bio(bio) { - ret = blk_rq_append_bio(req, bio); + struct bio *bounce_bio = bio; + + ret = blk_rq_append_bio(req, &bounce_bio); if (ret) return ERR_PTR(ret); } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 486c075998f6..67b305531ec3 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -315,6 +315,29 @@ struct srb_cmd { /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) +/* + * 24 bit port ID type definition. + */ +typedef union { + uint32_t b24 : 24; + + struct { +#ifdef __BIG_ENDIAN + uint8_t domain; + uint8_t area; + uint8_t al_pa; +#elif defined(__LITTLE_ENDIAN) + uint8_t al_pa; + uint8_t area; + uint8_t domain; +#else +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" +#endif + uint8_t rsvd_1; + } b; +} port_id_t; +#define INVALID_PORT_ID 0xFFFFFF + struct els_logo_payload { uint8_t opcode; uint8_t rsvd[3]; @@ -332,6 +355,7 @@ struct ct_arg { u32 rsp_size; void *req; void *rsp; + port_id_t id; }; /* @@ -480,6 +504,7 @@ typedef struct srb { const char *name; int iocbs; struct qla_qpair *qpair; + struct list_head elem; u32 gen1; /* scratch */ u32 gen2; /* scratch */ union { @@ -2144,28 +2169,6 @@ struct imm_ntfy_from_isp { #define REQUEST_ENTRY_SIZE (sizeof(request_t)) -/* - * 24 bit port ID type definition. - */ -typedef union { - uint32_t b24 : 24; - - struct { -#ifdef __BIG_ENDIAN - uint8_t domain; - uint8_t area; - uint8_t al_pa; -#elif defined(__LITTLE_ENDIAN) - uint8_t al_pa; - uint8_t area; - uint8_t domain; -#else -#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" -#endif - uint8_t rsvd_1; - } b; -} port_id_t; -#define INVALID_PORT_ID 0xFFFFFF /* * Switch info gathering structure. @@ -4082,6 +4085,7 @@ typedef struct scsi_qla_host { #define LOOP_READY 5 #define LOOP_DEAD 6 + unsigned long relogin_jif; unsigned long dpc_flags; #define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */ #define RESET_ACTIVE 1 @@ -4223,6 +4227,7 @@ typedef struct scsi_qla_host { wait_queue_head_t fcport_waitQ; wait_queue_head_t vref_waitq; uint8_t min_link_speed_feat; + struct list_head gpnid_list; } scsi_qla_host_t; struct qla27xx_image_status { diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index bc3db6abc9a0..59ecc4eda6cd 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -175,6 +175,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); } break; + case CS_TIMEOUT: + rval = QLA_FUNCTION_TIMEOUT; + /* drop through */ default: ql_dbg(ql_dbg_disc, vha, 0x2033, "%s failed, completion status (%x) on port_id: " @@ -2833,7 +2836,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) } } else { /* fcport->d_id.b24 != ea->id.b24 */ fcport->d_id.b24 = ea->id.b24; - if (fcport->deleted == QLA_SESS_DELETED) { + if (fcport->deleted != QLA_SESS_DELETED) { ql_dbg(ql_dbg_disc, vha, 0x2021, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); @@ -2889,9 +2892,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res) ea.rc = res; ea.event = FCME_GIDPN_DONE; - ql_dbg(ql_dbg_disc, vha, 0x204f, - "Async done-%s res %x, WWPN %8phC ID %3phC \n", - sp->name, res, fcport->port_name, id); + if (res == QLA_FUNCTION_TIMEOUT) { + ql_dbg(ql_dbg_disc, sp->vha, 0xffff, + "Async done-%s WWPN %8phC timed out.\n", + sp->name, fcport->port_name); + qla24xx_post_gidpn_work(sp->vha, fcport); + sp->free(sp); + return; + } else if (res) { + ql_dbg(ql_dbg_disc, sp->vha, 0xffff, + "Async done-%s fail res %x, WWPN %8phC\n", + sp->name, res, fcport->port_name); + } else { + ql_dbg(ql_dbg_disc, vha, 0x204f, + "Async done-%s good WWPN %8phC ID %3phC\n", + sp->name, fcport->port_name, id); + } qla2x00_fcport_event_handler(vha, &ea); @@ -3205,11 +3221,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; struct event_arg ea; struct qla_work_evt *e; + unsigned long flags; - ql_dbg(ql_dbg_disc, vha, 0x2066, - "Async done-%s res %x ID %3phC. %8phC\n", - sp->name, res, ct_req->req.port_id.port_id, - ct_rsp->rsp.gpn_id.port_name); + if (res) + ql_dbg(ql_dbg_disc, vha, 0x2066, + "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", + sp->name, res, sp->gen1, ct_req->req.port_id.port_id, + ct_rsp->rsp.gpn_id.port_name); + else + ql_dbg(ql_dbg_disc, vha, 0x2066, + "Async done-%s good rscn gen %d ID %3phC. %8phC\n", + sp->name, sp->gen1, ct_req->req.port_id.port_id, + ct_rsp->rsp.gpn_id.port_name); memset(&ea, 0, sizeof(ea)); memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); @@ -3220,6 +3243,22 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) ea.rc = res; ea.event = FCME_GPNID_DONE; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_del(&sp->elem); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (res) { + if (res == QLA_FUNCTION_TIMEOUT) + qla24xx_post_gpnid_work(sp->vha, &ea.id); + sp->free(sp); + return; + } else if (sp->gen1) { + /* There was anoter RSNC for this Nport ID */ + qla24xx_post_gpnid_work(sp->vha, &ea.id); + sp->free(sp); + return; + } + qla2x00_fcport_event_handler(vha, &ea); e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE); @@ -3253,8 +3292,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; - srb_t *sp; + srb_t *sp, *tsp; struct ct_sns_pkt *ct_sns; + unsigned long flags; if (!vha->flags.online) goto done; @@ -3265,8 +3305,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sp->type = SRB_CT_PTHRU_CMD; sp->name = "gpnid"; + sp->u.iocb_cmd.u.ctarg.id = *id; + sp->gen1 = 0; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_for_each_entry(tsp, &vha->gpnid_list, elem) { + if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) { + tsp->gen1++; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + sp->free(sp); + goto done; + } + } + list_add_tail(&sp->elem, &vha->gpnid_list); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b5b48ddca962..2300c02ab5e6 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -102,11 +102,16 @@ qla2x00_async_iocb_timeout(void *data) struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; - ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, - "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", - sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); + if (fcport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, + "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", + sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); - fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags &= ~FCF_ASYNC_SENT; + } else { + pr_info("Async-%s timeout - hdl=%x.\n", + sp->name, sp->handle); + } switch (sp->type) { case SRB_LOGIN_CMD: @@ -864,6 +869,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) int rval = ea->rc; fc_port_t *fcport = ea->fcport; unsigned long flags; + u16 opt = ea->sp->u.iocb_cmd.u.mbx.out_mb[10]; fcport->flags &= ~FCF_ASYNC_SENT; @@ -894,7 +900,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - ea->fcport->login_gen++; + if (opt != PDO_FORCE_ADISC) + ea->fcport->login_gen++; ea->fcport->deleted = 0; ea->fcport->logout_on_delete = 1; @@ -918,6 +925,13 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) qla24xx_post_gpsc_work(vha, fcport); } + } else if (ea->fcport->login_succ) { + /* + * We have an existing session. A late RSCN delivery + * must have triggered the session to be re-validate. + * session is still valid. + */ + fcport->disc_state = DSC_LOGIN_COMPLETE; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } /* gpdb event */ @@ -964,7 +978,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x20bd, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); - qla24xx_async_gnl(vha, fcport); + qla24xx_post_gnl_work(vha, fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20bf, "%s %d %8phC post login\n", @@ -1133,7 +1147,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", __func__, __LINE__, fcport->port_name); - qla24xx_async_gidpn(vha, fcport); + qla24xx_post_gidpn_work(vha, fcport); return; } @@ -1348,6 +1362,7 @@ qla24xx_abort_sp_done(void *ptr, int res) srb_t *sp = ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; + del_timer(&sp->u.iocb_cmd.timer); complete(&abt->u.abt.comp); } @@ -1445,6 +1460,8 @@ static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { port_id_t cid; /* conflict Nport id */ + u16 lid; + struct fc_port *conflict_fcport; switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: @@ -1460,8 +1477,12 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) qla24xx_post_prli_work(vha, ea->fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC post gpdb\n", - __func__, __LINE__, ea->fcport->port_name); + "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->loop_id, ea->fcport->d_id.b24); + + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + ea->fcport->loop_id = FC_NO_LOOP_ID; ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; ea->fcport->send_els_logo = 0; @@ -1506,8 +1527,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, ea->fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(ea->fcport); - qla24xx_post_gidpn_work(vha, ea->fcport); + lid = ea->iop[1] & 0xffff; + qlt_find_sess_invalidate_other(vha, + wwn_to_u64(ea->fcport->port_name), + ea->fcport->d_id, lid, &conflict_fcport); + + if (conflict_fcport) { + /* + * Another fcport share the same loop_id/nport id. + * Conflict fcport needs to finish cleanup before this + * fcport can proceed to login. + */ + conflict_fcport->conflict = ea->fcport; + ea->fcport->login_pause = 1; + + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b24, lid); + qla2x00_clear_loop_id(ea->fcport); + qla24xx_post_gidpn_work(vha, ea->fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b24, lid); + + qla2x00_clear_loop_id(ea->fcport); + set_bit(lid, vha->hw->loop_id_map); + ea->fcport->loop_id = lid; + ea->fcport->keep_nport_handle = 0; + qlt_schedule_sess_for_deletion(ea->fcport, false); + } break; } return; @@ -8047,9 +8098,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) int ret = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = qpair->hw; - if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created) - goto fail; - qpair->delete_in_progress = 1; while (atomic_read(&qpair->ref_count)) msleep(500); @@ -8057,6 +8105,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) goto fail; + ret = qla25xx_delete_rsp_que(vha, qpair->rsp); if (ret != QLA_SUCCESS) goto fail; diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 2f94159186d7..63bea6a65d51 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2392,26 +2392,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data) srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; - struct qla_hw_data *ha = vha->hw; struct srb_iocb *lio = &sp->u.iocb_cmd; - unsigned long flags = 0; ql_dbg(ql_dbg_io, vha, 0x3069, "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - /* Abort the exchange */ - spin_lock_irqsave(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { - ql_dbg(ql_dbg_io, vha, 0x3070, - "mbx abort_command failed.\n"); - } else { - ql_dbg(ql_dbg_io, vha, 0x3071, - "mbx abort_command success.\n"); - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); - complete(&lio->u.els_logo.comp); } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9d9668aac6f6..d95b879c2bca 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1569,7 +1569,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, /* borrowing sts_entry_24xx.comp_status. same location as ct_entry_24xx.comp_status */ - res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, sp->name); sp->done(sp, res); @@ -2341,7 +2341,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) int res = 0; uint16_t state_flags = 0; uint16_t retry_delay = 0; - uint8_t no_logout = 0; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -2612,7 +2611,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) break; case CS_PORT_LOGGED_OUT: - no_logout = 1; case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: @@ -2643,9 +2641,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) port_state_str[atomic_read(&fcport->state)], comp_status); - if (no_logout) - fcport->logout_on_delete = 0; - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); qlt_schedule_sess_for_deletion_lock(fcport); } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 99502fa90810..2d909e12e23a 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -6078,8 +6078,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, } /* Check for logged in state. */ - if (current_login_state != PDS_PRLI_COMPLETE && - last_login_state != PDS_PRLI_COMPLETE) { + if (current_login_state != PDS_PRLI_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x119a, "Unable to verify login-state (%x/%x) for loop_id %x.\n", current_login_state, last_login_state, fcport->loop_id); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c0f8f6c17b79..d77dde89118e 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -343,15 +343,21 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) "FCPort update end.\n"); } - if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && - !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && - atomic_read(&vha->loop_state) != LOOP_DOWN) { - - ql_dbg(ql_dbg_dpc, vha, 0x4018, - "Relogin needed scheduled.\n"); - qla2x00_relogin(vha); - ql_dbg(ql_dbg_dpc, vha, 0x4019, - "Relogin needed end.\n"); + if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && + !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && + atomic_read(&vha->loop_state) != LOOP_DOWN) { + + if (!vha->relogin_jif || + time_after_eq(jiffies, vha->relogin_jif)) { + vha->relogin_jif = jiffies + HZ; + clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); + + ql_dbg(ql_dbg_dpc, vha, 0x4018, + "Relogin needed scheduled.\n"); + qla2x00_relogin(vha); + ql_dbg(ql_dbg_dpc, vha, 0x4019, + "Relogin needed end.\n"); + } } if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && @@ -569,14 +575,16 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) int qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) { - int ret = -1; + int ret = QLA_SUCCESS; - if (req) { + if (req && vha->flags.qpairs_req_created) { req->options |= BIT_0; ret = qla25xx_init_req_que(vha, req); - } - if (ret == QLA_SUCCESS) + if (ret != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + qla25xx_free_req_que(vha, req); + } return ret; } @@ -584,14 +592,16 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) int qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { - int ret = -1; + int ret = QLA_SUCCESS; - if (rsp) { + if (rsp && vha->flags.qpairs_rsp_created) { rsp->options |= BIT_0; ret = qla25xx_init_rsp_que(vha, rsp); - } - if (ret == QLA_SUCCESS) + if (ret != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + qla25xx_free_rsp_que(vha, rsp); + } return ret; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index dce42a416876..8e7c0626f8b5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -388,7 +388,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list); ha->base_qpair->enable_class_2 = ql2xenableclass2; /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(rsp->qpair, smp_processor_id()); + qla_cpu_update(rsp->qpair, raw_smp_processor_id()); ha->base_qpair->pdev = ha->pdev; if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) @@ -442,7 +442,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, ha->req_q_map[0] = req; set_bit(0, ha->rsp_qid_map); set_bit(0, ha->req_qid_map); - return 1; + return 0; fail_qpair_map: kfree(ha->base_qpair); @@ -1710,6 +1710,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) struct qla_tgt_cmd *cmd; uint8_t trace = 0; + if (!ha->req_q_map) + return; spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; @@ -3003,9 +3005,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) base_vha = qla2x00_create_host(sht, ha); if (!base_vha) { ret = -ENOMEM; - qla2x00_mem_free(ha); - qla2x00_free_req_que(ha, req); - qla2x00_free_rsp_que(ha, rsp); goto probe_hw_failed; } @@ -3066,14 +3065,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Set up the irqs */ ret = qla2x00_request_irqs(ha, rsp); if (ret) - goto probe_init_failed; + goto probe_failed; /* Alloc arrays of request and response ring ptrs */ - if (!qla2x00_alloc_queues(ha, req, rsp)) { + ret = qla2x00_alloc_queues(ha, req, rsp); + if (ret) { ql_log(ql_log_fatal, base_vha, 0x003d, "Failed to allocate memory for queue pointers..." "aborting.\n"); - goto probe_init_failed; + goto probe_failed; } if (ha->mqenable && shost_use_blk_mq(host)) { @@ -3177,10 +3177,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); + if (ha->mqenable) { bool mq = false; bool startit = false; - ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); if (QLA_TGT_MODE_ENABLED()) { mq = true; @@ -3349,15 +3350,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; -probe_init_failed: - qla2x00_free_req_que(ha, req); - ha->req_q_map[0] = NULL; - clear_bit(0, ha->req_qid_map); - qla2x00_free_rsp_que(ha, rsp); - ha->rsp_q_map[0] = NULL; - clear_bit(0, ha->rsp_qid_map); - ha->max_req_queues = ha->max_rsp_queues = 0; - probe_failed: if (base_vha->timer_active) qla2x00_stop_timer(base_vha); @@ -3370,10 +3362,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) } qla2x00_free_device(base_vha); - scsi_host_put(base_vha->host); + /* + * Need to NULL out local req/rsp after + * qla2x00_free_device => qla2x00_free_queues frees + * what these are pointing to. Or else we'll + * fall over below in qla2x00_free_req/rsp_que. + */ + req = NULL; + rsp = NULL; probe_hw_failed: + qla2x00_mem_free(ha); + qla2x00_free_req_que(ha, req); + qla2x00_free_rsp_que(ha, rsp); qla2x00_clear_drv_active(ha); iospace_config_failed: @@ -4061,6 +4063,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, (*rsp)->dma = 0; fail_rsp_ring: kfree(*rsp); + *rsp = NULL; fail_rsp: dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * sizeof(request_t), (*req)->ring, (*req)->dma); @@ -4068,6 +4071,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, (*req)->dma = 0; fail_req_ring: kfree(*req); + *req = NULL; fail_req: dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), ha->ct_sns, ha->ct_sns_dma); @@ -4434,6 +4438,7 @@ qla2x00_mem_free(struct qla_hw_data *ha) if (ha->init_cb) dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); + vfree(ha->optrom_buffer); kfree(ha->nvram); kfree(ha->npiv_info); @@ -4454,6 +4459,15 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->ex_init_cb_dma = 0; ha->async_pd = NULL; ha->async_pd_dma = 0; + ha->loop_id_map = NULL; + ha->npiv_info = NULL; + ha->optrom_buffer = NULL; + ha->swl = NULL; + ha->nvram = NULL; + ha->mctp_dump = NULL; + ha->dcbx_tlv = NULL; + ha->xgmac_data = NULL; + ha->sfp_data = NULL; ha->s_dma_pool = NULL; ha->dl_dma_pool = NULL; @@ -4498,6 +4512,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->qp_list); INIT_LIST_HEAD(&vha->gnl.fcports); INIT_LIST_HEAD(&vha->nvme_rport_list); + INIT_LIST_HEAD(&vha->gpnid_list); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); @@ -4732,11 +4747,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) } else { list_add_tail(&fcport->list, &vha->vp_fcports); - if (pla) { - qlt_plogi_ack_link(vha, pla, fcport, - QLT_PLOGI_LINK_SAME_WWN); - pla->ref_count--; - } + } + if (pla) { + qlt_plogi_ack_link(vha, pla, fcport, + QLT_PLOGI_LINK_SAME_WWN); + pla->ref_count--; } } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); @@ -4858,7 +4873,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) */ if (atomic_read(&fcport->state) != FCS_ONLINE && fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { - fcport->login_retry--; + if (fcport->flags & FCF_FABRIC_DEVICE) { ql_dbg(ql_dbg_disc, fcport->vha, 0x2108, "%s %8phC DS %d LS %d\n", __func__, @@ -4869,6 +4884,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) ea.fcport = fcport; qla2x00_fcport_event_handler(vha, &ea); } else { + fcport->login_retry--; status = qla2x00_local_device_login(vha, fcport); if (status == QLA_SUCCESS) { @@ -5851,16 +5867,21 @@ qla2x00_do_dpc(void *data) } /* Retry each device up to login retry count */ - if ((test_and_clear_bit(RELOGIN_NEEDED, - &base_vha->dpc_flags)) && + if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && atomic_read(&base_vha->loop_state) != LOOP_DOWN) { - ql_dbg(ql_dbg_dpc, base_vha, 0x400d, - "Relogin scheduled.\n"); - qla2x00_relogin(base_vha); - ql_dbg(ql_dbg_dpc, base_vha, 0x400e, - "Relogin end.\n"); + if (!base_vha->relogin_jif || + time_after_eq(jiffies, base_vha->relogin_jif)) { + base_vha->relogin_jif = jiffies + HZ; + clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); + + ql_dbg(ql_dbg_dpc, base_vha, 0x400d, + "Relogin scheduled.\n"); + qla2x00_relogin(base_vha); + ql_dbg(ql_dbg_dpc, base_vha, 0x400e, + "Relogin end.\n"); + } } loop_resync_check: if (test_and_clear_bit(LOOP_RESYNC_NEEDED, @@ -6591,9 +6612,14 @@ qla83xx_disable_laser(scsi_qla_host_t *vha) static int qla2xxx_map_queues(struct Scsi_Host *shost) { + int rc; scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; - return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); + if (USER_CTRL_IRQ(vha->hw)) + rc = blk_mq_map_queues(&shost->tag_set); + else + rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); + return rc; } static const struct pci_error_handlers qla2xxx_err_handler = { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f05cfc83c9c8..d6fe08de59a0 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -665,7 +665,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); sp->u.iocb_cmd.u.nack.ntfy = ntfy; - + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->done = qla2x00_async_nack_sp_done; rval = qla2x00_start_sp(sp); @@ -971,10 +971,11 @@ static void qlt_free_session_done(struct work_struct *work) logo.id = sess->d_id; logo.cmd_count = 0; + sess->send_els_logo = 0; qlt_send_first_logo(vha, &logo); } - if (sess->logout_on_delete) { + if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { int rc; rc = qla2x00_post_async_logout_work(vha, sess, NULL); @@ -1033,8 +1034,7 @@ static void qlt_free_session_done(struct work_struct *work) sess->login_succ = 0; } - if (sess->chip_reset != ha->base_qpair->chip_reset) - qla2x00_clear_loop_id(sess); + qla2x00_clear_loop_id(sess); if (sess->conflict) { sess->conflict->login_pause = 0; @@ -1205,7 +1205,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess, ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, "Scheduling sess %p for deletion\n", sess); - schedule_work(&sess->del_work); + INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); + queue_work(sess->vha->hw->wq, &sess->del_work); } void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess) @@ -1560,8 +1561,11 @@ static void qlt_release(struct qla_tgt *tgt) btree_destroy64(&tgt->lun_qpair_map); - if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target) - ha->tgt.tgt_ops->remove_target(vha); + if (vha->vp_idx) + if (ha->tgt.tgt_ops && + ha->tgt.tgt_ops->remove_target && + vha->vha_tgt.target_lport_ptr) + ha->tgt.tgt_ops->remove_target(vha); vha->vha_tgt.qla_tgt = NULL; @@ -3708,7 +3712,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, term = 1; if (term) - qlt_term_ctio_exchange(qpair, ctio, cmd, status); + qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); return term; } @@ -4584,9 +4588,9 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); - other_sess->keep_nport_handle = 1; - *conflict_sess = other_sess; + if (other_sess->disc_state != DSC_DELETED) + *conflict_sess = other_sess; qlt_schedule_sess_for_deletion(other_sess, true); } @@ -5755,7 +5759,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, unsigned long flags; u8 newfcport = 0; - fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, "qla_target(%d): Allocation of tmp FC port failed", @@ -5784,6 +5788,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, tfcp->port_type = fcport->port_type; tfcp->supported_classes = fcport->supported_classes; tfcp->flags |= fcport->flags; + tfcp->scan_state = QLA_FCPORT_FOUND; del = fcport; fcport = tfcp; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 09ba494f8896..92bc5b2d24ae 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3001,11 +3001,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return DID_ERROR << 16; - } else if (sdebug_verbose && (ret < (num * sdebug_sector_size))) + } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) sdev_printk(KERN_INFO, scp->device, - "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", + "%s: %s: lb size=%u, IO sent=%d bytes\n", my_name, "write same", - num * sdebug_sector_size, ret); + sdebug_sector_size, ret); /* Copy first sector to remaining blocks */ for (i = 1 ; i < num ; i++) diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index 01f08c03f2c1..c3765d29fd3f 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq) { struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); - char buf[80]; + const u8 *const cdb = READ_ONCE(cmd->cmnd); + char buf[80] = "(?)"; - __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); + if (cdb) + __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len); seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, cmd->retries, msecs / 1000, msecs % 1000); } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 6bf43d94cdc0..cfc095f45e26 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -161,7 +161,7 @@ static struct { {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, - {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN}, + {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2}, {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, @@ -181,7 +181,7 @@ static struct { {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ - {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ + {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */ {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"HP", "C1557A", NULL, BLIST_FORCELUN}, @@ -595,17 +595,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev, int key) { struct scsi_dev_info_list *devinfo; - int err; devinfo = scsi_dev_info_list_find(vendor, model, key); if (!IS_ERR(devinfo)) return devinfo->flags; - err = PTR_ERR(devinfo); - if (err != -ENOENT) - return err; - - /* nothing found, return nothing */ + /* key or device not found: return nothing */ if (key != SCSI_DEVINFO_GLOBAL) return 0; diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 84addee05be6..a5e30e9449ef 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"IBM", "1815", "rdac", }, {"IBM", "1818", "rdac", }, {"IBM", "3526", "rdac", }, + {"IBM", "3542", "rdac", }, + {"IBM", "3552", "rdac", }, {"SGI", "TP9", "rdac", }, {"SGI", "IS", "rdac", }, - {"STK", "OPENstorage D280", "rdac", }, + {"STK", "OPENstorage", "rdac", }, {"STK", "FLEXLINE 380", "rdac", }, + {"STK", "BladeCtlr", "rdac", }, {"SUN", "CSM", "rdac", }, {"SUN", "LCSM100", "rdac", }, {"SUN", "STK6580_6780", "rdac", }, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index dab876c65473..cf70f0bb8375 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -220,6 +220,18 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd) } } +static void scsi_eh_inc_host_failed(struct rcu_head *head) +{ + struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); + struct Scsi_Host *shost = scmd->device->host; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + shost->host_failed++; + scsi_eh_wakeup(shost); + spin_unlock_irqrestore(shost->host_lock, flags); +} + /** * scsi_eh_scmd_add - add scsi cmd to error handling. * @scmd: scmd to run eh on. @@ -242,9 +254,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) scsi_eh_reset(scmd); list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); - shost->host_failed++; - scsi_eh_wakeup(shost); spin_unlock_irqrestore(shost->host_lock, flags); + /* + * Ensure that all tasks observe the host state change before the + * host_failed change. + */ + call_rcu(&scmd->rcu, scsi_eh_inc_host_failed); } /** diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index bcc1694cebcd..359386730523 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) cmd->cmd_len = scsi_command_size(cmd->cmnd); } -void scsi_device_unbusy(struct scsi_device *sdev) +/* + * Decrement the host_busy counter and wake up the error handler if necessary. + * Avoid as follows that the error handler is not woken up if shost->host_busy + * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination + * with an RCU read lock in this function to ensure that this function in its + * entirety either finishes before scsi_eh_scmd_add() increases the + * host_failed counter or that it notices the shost state change made by + * scsi_eh_scmd_add(). + */ +static void scsi_dec_host_busy(struct Scsi_Host *shost) { - struct Scsi_Host *shost = sdev->host; - struct scsi_target *starget = scsi_target(sdev); unsigned long flags; + rcu_read_lock(); atomic_dec(&shost->host_busy); - if (starget->can_queue > 0) - atomic_dec(&starget->target_busy); - - if (unlikely(scsi_host_in_recovery(shost) && - (shost->host_failed || shost->host_eh_scheduled))) { + if (unlikely(scsi_host_in_recovery(shost))) { spin_lock_irqsave(shost->host_lock, flags); - scsi_eh_wakeup(shost); + if (shost->host_failed || shost->host_eh_scheduled) + scsi_eh_wakeup(shost); spin_unlock_irqrestore(shost->host_lock, flags); } + rcu_read_unlock(); +} + +void scsi_device_unbusy(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct scsi_target *starget = scsi_target(sdev); + + scsi_dec_host_busy(shost); + + if (starget->can_queue > 0) + atomic_dec(&starget->target_busy); atomic_dec(&sdev->device_busy); } @@ -653,6 +670,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (!blk_rq_is_scsi(req)) { WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); cmd->flags &= ~SCMD_INITIALIZED; + destroy_rcu_head(&cmd->rcu); } if (req->mq_ctx) { @@ -1133,6 +1151,7 @@ void scsi_initialize_rq(struct request *rq) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); scsi_req_init(&cmd->req); + init_rcu_head(&cmd->rcu); cmd->jiffies_at_alloc = jiffies; cmd->retries = 0; } @@ -1532,7 +1551,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, list_add_tail(&sdev->starved_entry, &shost->starved_list); spin_unlock_irq(shost->host_lock); out_dec: - atomic_dec(&shost->host_busy); + scsi_dec_host_busy(shost); return 0; } @@ -1993,7 +2012,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; out_dec_host_busy: - atomic_dec(&shost->host_busy); + scsi_dec_host_busy(shost); out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); @@ -2126,11 +2145,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) q->limits.cluster = 0; /* - * set a reasonable default alignment on word boundaries: the - * host and device may alter it using - * blk_queue_update_dma_alignment() later. + * Set a reasonable default alignment: The larger of 32-byte (dword), + * which is a common minimum for HBAs, and the minimum DMA alignment, + * which is set by the platform. + * + * Devices that require a bigger alignment can increase it later. */ - blk_queue_dma_alignment(q, 0x03); + blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); } EXPORT_SYMBOL_GPL(__scsi_init_queue); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index f796bd61f3f0..40406c162d0d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1383,7 +1383,10 @@ static void __scsi_remove_target(struct scsi_target *starget) * check. */ if (sdev->channel != starget->channel || - sdev->id != starget->id || + sdev->id != starget->id) + continue; + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL || !get_device(&sdev->sdev_gendev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d175c5c5ccf8..72db0f7d221a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -231,11 +231,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; + bool v; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - sdp->manage_start_stop = simple_strtoul(buf, NULL, 10); + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_start_stop = v; return count; } @@ -253,6 +257,7 @@ static ssize_t allow_restart_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + bool v; struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -262,7 +267,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr, if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return -EINVAL; - sdp->allow_restart = simple_strtoul(buf, NULL, 10); + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->allow_restart = v; return count; } @@ -1284,6 +1292,7 @@ static int sd_init_command(struct scsi_cmnd *cmd) static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; + u8 *cmnd; if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) sd_zbc_write_unlock_zone(SCpnt); @@ -1292,9 +1301,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) __free_page(rq->special_vec.bv_page); if (SCpnt->cmnd != scsi_req(rq)->cmd) { - mempool_free(SCpnt->cmnd, sd_cdb_pool); + cmnd = SCpnt->cmnd; SCpnt->cmnd = NULL; SCpnt->cmd_len = 0; + mempool_free(cmnd, sd_cdb_pool); } } diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 8aa54779aac1..2eb61d54bbb4 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -375,15 +375,15 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp, if (sdkp->device->type != TYPE_ZBC) { /* Host-aware */ sdkp->urswrz = 1; - sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]); - sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]); + sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]); + sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]); sdkp->zones_max_open = 0; } else { /* Host-managed */ sdkp->urswrz = buf[4] & 1; sdkp->zones_optimal_open = 0; sdkp->zones_optimal_nonseq = 0; - sdkp->zones_max_open = get_unaligned_be64(&buf[16]); + sdkp->zones_max_open = get_unaligned_be32(&buf[16]); } return 0; diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 11826c5c2dd4..62f04c0511cf 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -615,13 +615,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, } static void ses_match_to_enclosure(struct enclosure_device *edev, - struct scsi_device *sdev) + struct scsi_device *sdev, + int refresh) { + struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent); struct efd efd = { .addr = 0, }; - ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); + if (refresh) + ses_enclosure_data_process(edev, edev_sdev, 0); if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) efd.addr = sas_get_address(sdev); @@ -652,7 +655,7 @@ static int ses_intf_add(struct device *cdev, struct enclosure_device *prev = NULL; while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { - ses_match_to_enclosure(edev, sdev); + ses_match_to_enclosure(edev, sdev, 1); prev = edev; } return -ENODEV; @@ -768,7 +771,7 @@ static int ses_intf_add(struct device *cdev, shost_for_each_device(tmp_sdev, sdev->host) { if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev)) continue; - ses_match_to_enclosure(edev, tmp_sdev); + ses_match_to_enclosure(edev, tmp_sdev, 0); } return 0; diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile index 0f42a225a664..e6b779930230 100644 --- a/drivers/scsi/smartpqi/Makefile +++ b/drivers/scsi/smartpqi/Makefile @@ -1,3 +1,3 @@ ccflags-y += -I. -obj-m += smartpqi.o +obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 5e7200f05873..a3e480e7a257 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -952,10 +952,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, case TEST_UNIT_READY: break; default: - set_host_byte(scmnd, DID_TARGET_FAILURE); + set_host_byte(scmnd, DID_ERROR); } break; case SRB_STATUS_INVALID_LUN: + set_host_byte(scmnd, DID_NO_CONNECT); do_work = true; process_err_fn = storvsc_remove_lun; break; @@ -1826,8 +1827,10 @@ static int storvsc_probe(struct hv_device *device, fc_host_node_name(host) = stor_device->node_name; fc_host_port_name(host) = stor_device->port_name; stor_device->rport = fc_remote_port_add(host, 0, &ids); - if (!stor_device->rport) + if (!stor_device->rport) { + ret = -ENOMEM; goto err_out3; + } } #endif return 0; diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index e27b4d4e6ae2..8b545a9c51dd 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -38,6 +38,7 @@ config SCSI_UFSHCD select PM_DEVFREQ select DEVFREQ_GOV_SIMPLE_ONDEMAND select NLS + select RPMB ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index c87d770b519a..edd0554652bb 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -50,19 +50,10 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, u32 clk_cycles); -static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, - char *prefix) -{ - print_hex_dump(KERN_ERR, prefix, - len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, - 16, 4, (void __force *)hba->mmio_base + offset, - len * 4, false); -} - static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, - char *prefix, void *priv) + const char *prefix, void *priv) { - ufs_qcom_dump_regs(hba, offset, len, prefix); + ufshcd_dump_regs(hba, offset, len * 4, prefix); } static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) @@ -1436,7 +1427,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv, void (*print_fn)(struct ufs_hba *hba, - int offset, int num_regs, char *str, void *priv)) + int offset, int num_regs, const char *str, void *priv)) { u32 reg; struct ufs_qcom_host *host; @@ -1618,7 +1609,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) static void ufs_qcom_testbus_read(struct ufs_hba *hba) { - ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS "); + ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); } static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) @@ -1644,8 +1635,8 @@ static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) { - ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, - "HCI Vendor Specific Registers "); + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, + "HCI Vendor Specific Registers "); /* sleep a bit intermittently as we are dumping too much data */ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 54deeb754db5..77ff3c91917c 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -526,7 +526,9 @@ struct ufs_dev_info { */ struct ufs_dev_desc { u16 wmanufacturerid; - char model[MAX_MODEL_LEN + 1]; + char *model; + char *serial_no; + size_t serial_no_len; }; #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 794a4600e952..f814bb9713b8 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -37,10 +37,14 @@ * license terms, and distributes only under these terms. */ +#include #include #include #include #include +#include +#include + #include "ufshcd.h" #include "ufs_quirks.h" #include "unipro.h" @@ -97,8 +101,29 @@ _ret; \ }) -#define ufshcd_hex_dump(prefix_str, buf, len) \ -print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false) +#define ufshcd_hex_dump(prefix_str, buf, len) do { \ + size_t __len = (len); \ + print_hex_dump(KERN_ERR, prefix_str, \ + __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ + 16, 4, buf, __len, false); \ +} while (0) + +int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix) +{ + u8 *regs; + + regs = kzalloc(len, GFP_KERNEL); + if (!regs) + return -ENOMEM; + + memcpy_fromio(regs, hba->mmio_base + offset, len); + ufshcd_hex_dump(prefix, regs, len); + kfree(regs); + + return 0; +} +EXPORT_SYMBOL_GPL(ufshcd_dump_regs); enum { UFSHCD_MAX_CHANNEL = 0, @@ -264,16 +289,6 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) } } -/* replace non-printable or non-ASCII characters with spaces */ -static inline void ufshcd_remove_non_printable(char *val) -{ - if (!val) - return; - - if (*val < 0x20 || *val > 0x7e) - *val = ' '; -} - static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -342,15 +357,7 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, static void ufshcd_print_host_regs(struct ufs_hba *hba) { - /* - * hex_dump reads its data without the readl macro. This might - * cause inconsistency issues on some platform, as the printed - * values may be from cache and not the most recent value. - * To know whether you are looking at an un-cached version verify - * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked - * during platform/pci probe function. - */ - ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n", hba->ufs_version, hba->capabilities); dev_err(hba->dev, @@ -3009,7 +3016,7 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, u8 param_offset, - u8 *param_read_buf, + void *param_read_buf, u8 param_size) { int ret; @@ -3077,7 +3084,7 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, - u8 *buf, + void *buf, u32 size) { return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); @@ -3095,6 +3102,25 @@ static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); } +/** + * uc_string_id - unicode string + * + * @len: size of this descriptor inclusive + * @type: descriptor type + * @uc: unicode string character + */ +struct uc_string_id { + u8 len; + u8 type; + wchar_t uc[0]; +} __packed; + +/* replace non-printable or non-ASCII characters with spaces */ +static inline char blank_non_printable(char ch) +{ + return (ch >= 0x20 || ch <= 0x7e) ? ch : ' '; +} + /** * ufshcd_read_string_desc - read string descriptor * @hba: pointer to adapter instance @@ -3102,43 +3128,54 @@ static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) * @buf: pointer to buffer where descriptor would be read * @size: size of buf * @ascii: if true convert from unicode to ascii characters + * null terminated string. * - * Return 0 in case of success, non-zero otherwise + * Return: string size on success. + * -ENOMEM: on allocation failure + * -ETOOSMALL: if supplied buffer is too small */ -#define ASCII_STD true -static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii) +#define SD_ASCII_STD true +#define SD_RAW false +static int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + char **buf, bool ascii) { - int err = 0; + struct uc_string_id *uc_str; + char *str; + int ret; - err = ufshcd_read_desc(hba, - QUERY_DESC_IDN_STRING, desc_index, buf, size); + if (!buf) + return -EINVAL; - if (err) { - dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", - __func__, QUERY_REQ_RETRIES, err); + uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!uc_str) + return -ENOMEM; + + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, + desc_index, uc_str, + QUERY_DESC_MAX_SIZE); + if (ret < 0) { + dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", + QUERY_REQ_RETRIES, ret); + str = NULL; + goto out; + } + + if (uc_str->len <= QUERY_DESC_HDR_SIZE) { + dev_dbg(hba->dev, "String Desc is of zero length\n"); + str = NULL; + ret = 0; goto out; } if (ascii) { - int desc_len; - int ascii_len; int i; - char *buff_ascii; + ssize_t ascii_len; - desc_len = buf[0]; /* remove header and divide by 2 to move from UTF16 to UTF8 */ - ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; - if (size < ascii_len + QUERY_DESC_HDR_SIZE) { - dev_err(hba->dev, "%s: buffer allocated size is too small\n", - __func__); - err = -ENOMEM; - goto out; - } - - buff_ascii = kmalloc(ascii_len, GFP_KERNEL); - if (!buff_ascii) { - err = -ENOMEM; + ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; + str = kzalloc(ascii_len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; goto out; } @@ -3146,22 +3183,29 @@ static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, * the descriptor contains string in UTF16 format * we need to convert to utf-8 so it can be displayed */ - utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], - desc_len - QUERY_DESC_HDR_SIZE, - UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + ret = utf16s_to_utf8s(uc_str->uc, + uc_str->len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, str, ascii_len); /* replace non-printable or non-ASCII characters with spaces */ - for (i = 0; i < ascii_len; i++) - ufshcd_remove_non_printable(&buff_ascii[i]); + for (i = 0; i < ret; i++) + str[i] = blank_non_printable(str[i]); + + str[ret++] = '\0'; - memset(buf + QUERY_DESC_HDR_SIZE, 0, - size - QUERY_DESC_HDR_SIZE); - memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); - buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; - kfree(buff_ascii); + } else { + str = kzalloc(uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + memcpy(str, uc_str, uc_str->len); + ret = uc_str->len; } out: - return err; + *buf = str; + kfree(uc_str); + return ret; } /** @@ -5957,6 +6001,192 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) } +#define SEC_PROTOCOL_UFS 0xEC +#define SEC_SPECIFIC_UFS_RPMB 0x0001 + +#define SEC_PROTOCOL_CMD_SIZE 12 +#define SEC_PROTOCOL_RETRIES 3 +#define SEC_PROTOCOL_RETRIES_ON_RESET 10 +#define SEC_PROTOCOL_TIMEOUT msecs_to_jiffies(1000) + +static int +ufshcd_rpmb_security_out(struct scsi_device *sdev, + struct rpmb_frame *frames, u32 cnt) +{ + struct scsi_sense_hdr sshdr; + u32 trans_len = cnt * sizeof(struct rpmb_frame); + int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; + int ret; + u8 cmd[SEC_PROTOCOL_CMD_SIZE]; + + memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); + cmd[0] = SECURITY_PROTOCOL_OUT; + cmd[1] = SEC_PROTOCOL_UFS; + put_unaligned_be16(SEC_SPECIFIC_UFS_RPMB, cmd + 2); + cmd[4] = 0; /* inc_512 bit 7 set to 0 */ + put_unaligned_be32(trans_len, cmd + 6); /* transfer length */ + +retry: + ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, + frames, trans_len, &sshdr, + SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, + NULL); + + if (ret && scsi_sense_valid(&sshdr) && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* + * Device reset might occur several times, + * give it one more chance + */ + if (--reset_retries > 0) + goto retry; + + if (ret) + dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", + __func__, ret); + + if (driver_byte(ret) & DRIVER_SENSE) + scsi_print_sense_hdr(sdev, "rpmb: security out", &sshdr); + + return ret; +} + +static int +ufshcd_rpmb_security_in(struct scsi_device *sdev, + struct rpmb_frame *frames, u32 cnt) +{ + struct scsi_sense_hdr sshdr; + u32 alloc_len = cnt * sizeof(struct rpmb_frame); + int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; + int ret; + u8 cmd[SEC_PROTOCOL_CMD_SIZE]; + + memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); + cmd[0] = SECURITY_PROTOCOL_IN; + cmd[1] = SEC_PROTOCOL_UFS; + put_unaligned_be16(SEC_SPECIFIC_UFS_RPMB, cmd + 2); + cmd[4] = 0; /* inc_512 bit 7 set to 0 */ + put_unaligned_be32(alloc_len, cmd + 6); /* allocation length */ + +retry: + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, + frames, alloc_len, &sshdr, + SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, + NULL); + + if (ret && scsi_sense_valid(&sshdr) && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* + * Device reset might occur several times, + * give it one more chance + */ + if (--reset_retries > 0) + goto retry; + + if (ret) + dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", + __func__, ret); + + if (driver_byte(ret) & DRIVER_SENSE) + scsi_print_sense_hdr(sdev, "rpmb: security in", &sshdr); + + return ret; +} + +static int ufshcd_rpmb_cmd_seq(struct device *dev, + struct rpmb_cmd *cmds, u32 ncmds) +{ + unsigned long flags; + struct ufs_hba *hba = dev_get_drvdata(dev); + struct scsi_device *sdev; + struct rpmb_cmd *cmd; + int i; + int ret; + + spin_lock_irqsave(hba->host->host_lock, flags); + sdev = hba->sdev_ufs_rpmb; + if (sdev) { + ret = scsi_device_get(sdev); + if (!ret && !scsi_device_online(sdev)) { + ret = -ENODEV; + scsi_device_put(sdev); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ret) + return ret; + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + if (cmd->flags & RPMB_F_WRITE) + ret = ufshcd_rpmb_security_out(sdev, cmd->frames, + cmd->nframes); + else + ret = ufshcd_rpmb_security_in(sdev, cmd->frames, + cmd->nframes); + } + scsi_device_put(sdev); + return ret; +} + +static struct rpmb_ops ufshcd_rpmb_dev_ops = { + .cmd_seq = ufshcd_rpmb_cmd_seq, + .type = RPMB_TYPE_UFS, +}; + +static inline void ufshcd_rpmb_add(struct ufs_hba *hba, + struct ufs_dev_desc *dev_desc) +{ + struct rpmb_dev *rdev; + int ret; + + ufshcd_rpmb_dev_ops.dev_id = kmemdup(dev_desc->serial_no, + dev_desc->serial_no_len, + GFP_KERNEL); + if (ufshcd_rpmb_dev_ops.dev_id) + ufshcd_rpmb_dev_ops.dev_id_len = dev_desc->serial_no_len; + + ret = scsi_device_get(hba->sdev_ufs_rpmb); + if (ret) + goto out_put_dev; + + rdev = rpmb_dev_register(hba->dev, &ufshcd_rpmb_dev_ops); + if (IS_ERR(rdev)) { + dev_warn(hba->dev, "%s: cannot register to rpmb %ld\n", + dev_name(hba->dev), PTR_ERR(rdev)); + goto out_put_dev; + } + + return; + +out_put_dev: + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; +} + +static inline void ufshcd_rpmb_remove(struct ufs_hba *hba) +{ + unsigned long flags; + + if (!hba->sdev_ufs_rpmb) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + + rpmb_dev_unregister(hba->dev); + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; + + kfree(ufshcd_rpmb_dev_ops.dev_id); + ufshcd_rpmb_dev_ops.dev_id = NULL; + + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + /** * ufshcd_scsi_add_wlus - Adds required W-LUs * @hba: per-adapter instance @@ -6012,7 +6242,10 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) ret = PTR_ERR(sdev_rpmb); goto remove_sdev_boot; } + hba->sdev_ufs_rpmb = sdev_rpmb; + scsi_device_put(sdev_rpmb); + goto out; remove_sdev_boot: @@ -6026,15 +6259,17 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) static int ufs_get_device_desc(struct ufs_hba *hba, struct ufs_dev_desc *dev_desc) { - int err; - u8 model_index; - u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0}; - u8 desc_buf[hba->desc_size.dev_desc]; + int ret; + u8 *desc_buf; - err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); - if (err) { + desc_buf = kmalloc(hba->desc_size.dev_desc, GFP_KERNEL); + if (!desc_buf) + return -ENOMEM; + + ret = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); + if (ret) { dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", - __func__, err); + __func__, ret); goto out; } @@ -6045,26 +6280,33 @@ static int ufs_get_device_desc(struct ufs_hba *hba, dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; - model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; - - err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, - QUERY_DESC_MAX_SIZE, ASCII_STD); - if (err) { + ret = ufshcd_read_string_desc(hba, + desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME], + &dev_desc->model, SD_ASCII_STD); + if (ret < 0) { dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", - __func__, err); + __func__, ret); goto out; } - str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; - strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), - min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], - MAX_MODEL_LEN)); - - /* Null terminate the model string */ - dev_desc->model[MAX_MODEL_LEN] = '\0'; + ret = ufshcd_read_string_desc(hba, + desc_buf[DEVICE_DESC_PARAM_SN], + &dev_desc->serial_no, SD_RAW); + if (ret < 0) { + dev_err(hba->dev, "%s: Failed reading Serial No. err = %d\n", + __func__, ret); + goto out; + } out: - return err; + kfree(desc_buf); + return ret; +} + +static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc) +{ + kfree(dev_desc->model); + dev_desc->model = NULL; } static void ufs_fixup_device_setup(struct ufs_hba *hba, @@ -6344,13 +6586,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_init_desc_sizes(hba); ret = ufs_get_device_desc(hba, &card); - if (ret) { + if (ret < 0) { dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", __func__, ret); goto out; } ufs_fixup_device_setup(hba, &card); + ufshcd_tune_unipro_params(hba); ret = ufshcd_set_vccq_rail_unused(hba, @@ -6399,6 +6642,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ufshcd_scsi_add_wlus(hba)) goto out; + ufshcd_rpmb_add(hba, &card); + /* Initialize devfreq after UFS device is detected */ if (ufshcd_is_clkscaling_supported(hba)) { memcpy(&hba->clk_scaling.saved_pwr_info.info, @@ -6428,6 +6673,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) hba->is_init_prefetch = true; out: + + ufs_put_device_desc(&card); /* * If we failed to initialize the device or the device is not * present, turn off the power/clocks etc. @@ -6555,12 +6802,15 @@ static int ufshcd_config_vreg(struct device *dev, struct ufs_vreg *vreg, bool on) { int ret = 0; - struct regulator *reg = vreg->reg; - const char *name = vreg->name; + struct regulator *reg; + const char *name; int min_uV, uA_load; BUG_ON(!vreg); + reg = vreg->reg; + name = vreg->name; + if (regulator_count_voltages(reg) > 0) { min_uV = on ? vreg->min_uV : 0; ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); @@ -7726,6 +7976,8 @@ int ufshcd_shutdown(struct ufs_hba *hba) goto out; } + ufshcd_rpmb_remove(hba); + ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); out: if (ret) @@ -7742,7 +7994,10 @@ EXPORT_SYMBOL(ufshcd_shutdown); */ void ufshcd_remove(struct ufs_hba *hba) { + ufshcd_rpmb_remove(hba); + ufshcd_remove_sysfs_nodes(hba); + scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index cdc8bd05f7df..f414297e8869 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -454,6 +454,7 @@ struct ufs_stats { * @utmrdl_dma_addr: UTMRDL DMA address * @host: Scsi_Host instance of the driver * @dev: device handle + * @sdev_ufs_rpmb: reference to RPMB device W-LU * @lrb: local reference block * @lrb_in_use: lrb in use * @outstanding_tasks: Bits representing outstanding task requests @@ -517,6 +518,7 @@ struct ufs_hba { * "UFS device" W-LU. */ struct scsi_device *sdev_ufs_device; + struct scsi_device *sdev_ufs_rpmb; enum ufs_dev_pwr_mode curr_dev_pwr_mode; enum uic_link_state uic_link_state; @@ -983,4 +985,7 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) hba->vops->dbg_register_dump(hba); } +int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix); + #endif /* End of Header */ diff --git a/drivers/sdw/Kconfig b/drivers/sdw/Kconfig new file mode 100644 index 000000000000..660188bd2c02 --- /dev/null +++ b/drivers/sdw/Kconfig @@ -0,0 +1,19 @@ +menuconfig SDW + tristate "SoundWire bus support" + depends on CRC8 + depends on X86 + help + SoundWire interface is typically used for transporting data + related to audio functions. +menuconfig SDW_CNL + tristate "Intel SoundWire master controller support" + depends on SDW && X86 + help + Intel SoundWire master controller driver +menuconfig SDW_MAXIM_SLAVE + bool "SoundWire Slave for the Intel CNL FPGA" + depends on SDW && X86 + help + SoundWire Slave on FPGA platform for Intel CNL IP + Mostly N for all the cases other than CNL Slave FPGA + diff --git a/drivers/sdw/Makefile b/drivers/sdw/Makefile new file mode 100644 index 000000000000..e2ba440f4ef2 --- /dev/null +++ b/drivers/sdw/Makefile @@ -0,0 +1,5 @@ +sdw_bus-objs := sdw.o sdw_bwcalc.o sdw_utils.o + +obj-$(CONFIG_SDW) += sdw_bus.o +obj-$(CONFIG_SDW_CNL) += sdw_cnl.o +obj-$(CONFIG_SDW_MAXIM_SLAVE) += sdw_maxim.o diff --git a/drivers/sdw/sdw.c b/drivers/sdw/sdw.c new file mode 100644 index 000000000000..55f34fc6b89a --- /dev/null +++ b/drivers/sdw/sdw.c @@ -0,0 +1,3448 @@ +/* + * sdw.c - SoundWire Bus driver implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdw_priv.h" + +#define sdw_slave_attr_gr NULL +#define sdw_mstr_attr_gr NULL + +#define CREATE_TRACE_POINTS +#include + +/* Global instance handling all the SoundWire buses */ +struct sdw_core sdw_core; + +static void sdw_slave_release(struct device *dev) +{ + kfree(to_sdw_slave(dev)); +} + +static void sdw_mstr_release(struct device *dev) +{ + struct sdw_master *mstr = to_sdw_master(dev); + + complete(&mstr->slv_released); +} + +static struct device_type sdw_slv_type = { + .groups = sdw_slave_attr_gr, + .release = sdw_slave_release, +}; + +static struct device_type sdw_mstr_type = { + .groups = sdw_mstr_attr_gr, + .release = sdw_mstr_release, +}; +/** + * sdw_slave_verify - return parameter as sdw_slave, or NULL + * @dev: device, probably from some driver model iterator + * + * When traversing the driver model tree, perhaps using driver model + * iterators like @device_for_each_child(), you can't assume very much + * about the nodes you find. Use this function to avoid oopses caused + * by wrongly treating some non-SDW device as an sdw_slave. + */ +struct sdw_slave *sdw_slave_verify(struct device *dev) +{ + return (dev->type == &sdw_slv_type) + ? to_sdw_slave(dev) + : NULL; +} + +/** + * sdw_mstr_verify - return parameter as sdw_master, or NULL + * @dev: device, probably from some driver model iterator + * + * When traversing the driver model tree, perhaps using driver model + * iterators like @device_for_each_child(), you can't assume very much + * about the nodes you find. Use this function to avoid oopses caused + * by wrongly treating some non-SDW device as an sdw_slave. + */ +struct sdw_master *sdw_mstr_verify(struct device *dev) +{ + return (dev->type == &sdw_mstr_type) + ? to_sdw_master(dev) + : NULL; +} + +static const struct sdw_slave_id *sdw_match_slave(const struct sdw_slave_id *id, + const struct sdw_slave *sdw_slv) +{ + while (id->name[0]) { + if (strncmp(sdw_slv->name, id->name, SOUNDWIRE_NAME_SIZE) == 0) + return id; + id++; + } + return NULL; +} + +static const struct sdw_master_id *sdw_match_master( + const struct sdw_master_id *id, + const struct sdw_master *sdw_mstr) +{ + if (!id) + return NULL; + while (id->name[0]) { + if (strncmp(sdw_mstr->name, id->name, SOUNDWIRE_NAME_SIZE) == 0) + return id; + id++; + } + return NULL; +} + +static int sdw_slv_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_slave *sdw_slv; + struct sdw_slave_driver *drv = to_sdw_slave_driver(driver); + int ret = 0; + + /* Check if driver is slave type or not, both master and slave + * driver has first field as driver_type, so if driver is not + * of slave type return + */ + if (drv->driver_type != SDW_DRIVER_TYPE_SLAVE) + return ret; + + sdw_slv = to_sdw_slave(dev); + + if (drv->id_table) + ret = (sdw_match_slave(drv->id_table, sdw_slv) != NULL); + + if (driver->name && !ret) + ret = (strncmp(sdw_slv->name, driver->name, SOUNDWIRE_NAME_SIZE) + == 0); + if (ret) + sdw_slv->driver = drv; + return ret; +} +static int sdw_mstr_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_master *sdw_mstr; + struct sdw_mstr_driver *drv = to_sdw_mstr_driver(driver); + int ret = 0; + + /* Check if driver is slave type or not, both master and slave + * driver has first field as driver_type, so if driver is not + * of slave type return + */ + if (drv->driver_type != SDW_DRIVER_TYPE_MASTER) + return ret; + + sdw_mstr = to_sdw_master(dev); + + if (drv->id_table) + ret = (sdw_match_master(drv->id_table, sdw_mstr) != NULL); + + if (driver->name) + ret = (strncmp(sdw_mstr->name, driver->name, + SOUNDWIRE_NAME_SIZE) == 0); + if (ret) + sdw_mstr->driver = drv; + + return ret; +} + +static int sdw_mstr_probe(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + struct sdw_master *mstr = to_sdw_master(dev); + int ret = 0; + + if (!sdrv->probe) + return -ENODEV; + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(mstr, sdw_match_master(sdrv->id_table, mstr)); + if (ret) + dev_pm_domain_detach(dev, true); + } + return ret; +} + +static int sdw_slv_probe(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + struct sdw_slave *sdwslv = to_sdw_slave(dev); + int ret = 0; + + if (!sdrv->probe) + return -ENODEV; + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(sdwslv, sdw_match_slave(sdrv->id_table, + sdwslv)); + return 0; + if (ret) + dev_pm_domain_detach(dev, true); + } + return ret; +} + + +int sdw_slave_get_bus_params(struct sdw_slave *sdw_slv, + struct sdw_bus_params *params) +{ + struct sdw_bus *bus; + struct sdw_master *mstr = sdw_slv->mstr; + + list_for_each_entry(bus, &sdw_core.bus_list, bus_node) { + if (bus->mstr == mstr) + break; + } + if (!bus) + return -EFAULT; + + params->num_rows = bus->row; + params->num_cols = bus->col; + params->bus_clk_freq = bus->clk_freq >> 1; + params->bank = bus->active_bank; + + return 0; +} +EXPORT_SYMBOL(sdw_slave_get_bus_params); + +static int sdw_mstr_remove(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + int ret = 0; + + if (sdrv->remove) + ret = sdrv->remove(to_sdw_master(dev)); + else + return -ENODEV; + + dev_pm_domain_detach(dev, true); + return ret; + +} + +static int sdw_slv_remove(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + int ret = 0; + + if (sdrv->remove) + ret = sdrv->remove(to_sdw_slave(dev)); + else + return -ENODEV; + + dev_pm_domain_detach(dev, true); + return ret; +} + +static void sdw_slv_shutdown(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + + if (sdrv->shutdown) + sdrv->shutdown(to_sdw_slave(dev)); +} + +static void sdw_mstr_shutdown(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + struct sdw_master *mstr = to_sdw_master(dev); + + if (sdrv->shutdown) + sdrv->shutdown(mstr); +} + +static void sdw_shutdown(struct device *dev) +{ + struct sdw_slave *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + sdw_slv_shutdown(dev); + else if (sdw_mstr) + sdw_mstr_shutdown(dev); +} + +static int sdw_remove(struct device *dev) +{ + struct sdw_slave *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_remove(dev); + else if (sdw_mstr) + return sdw_mstr_remove(dev); + + return 0; +} + +static int sdw_probe(struct device *dev) +{ + + struct sdw_slave *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_probe(dev); + else if (sdw_mstr) + return sdw_mstr_probe(dev); + + return -ENODEV; + +} + +static int sdw_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_slave *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_match(dev, driver); + else if (sdw_mstr) + return sdw_mstr_match(dev, driver); + return 0; + +} + +#ifdef CONFIG_PM_SLEEP +static int sdw_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct sdw_slave *sdw_slv = NULL; + struct sdw_slave_driver *driver; + + if (dev->type == &sdw_slv_type) + sdw_slv = to_sdw_slave(dev); + + if (!sdw_slv || !dev->driver) + return 0; + + driver = to_sdw_slave_driver(dev->driver); + if (!driver->suspend) + return 0; + + return driver->suspend(sdw_slv, mesg); +} + +static int sdw_legacy_resume(struct device *dev) +{ + struct sdw_slave *sdw_slv = NULL; + struct sdw_slave_driver *driver; + + if (dev->type == &sdw_slv_type) + sdw_slv = to_sdw_slave(dev); + + if (!sdw_slv || !dev->driver) + return 0; + + driver = to_sdw_slave_driver(dev->driver); + if (!driver->resume) + return 0; + + return driver->resume(sdw_slv); +} + +static int sdw_pm_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_suspend(dev); + else + return sdw_legacy_suspend(dev, PMSG_SUSPEND); +} + +static int sdw_pm_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_resume(dev); + else + return sdw_legacy_resume(dev); +} + +#else +#define sdw_pm_suspend NULL +#define sdw_pm_resume NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops soundwire_pm = { + .suspend = sdw_pm_suspend, + .resume = sdw_pm_resume, +#ifdef CONFIG_PM + .runtime_suspend = pm_generic_runtime_suspend, + .runtime_resume = pm_generic_runtime_resume, +#endif +}; + +struct bus_type sdw_bus_type = { + .name = "soundwire", + .match = sdw_match, + .probe = sdw_probe, + .remove = sdw_remove, + .shutdown = sdw_shutdown, + .pm = &soundwire_pm, +}; +EXPORT_SYMBOL_GPL(sdw_bus_type); + +struct device sdw_slv = { + .init_name = "soundwire", +}; + +static struct static_key sdw_trace_msg = STATIC_KEY_INIT_FALSE; + +int sdw_transfer_trace_reg(void) +{ + static_key_slow_inc(&sdw_trace_msg); + + return 0; +} + +void sdw_transfer_trace_unreg(void) +{ + static_key_slow_dec(&sdw_trace_msg); +} + +/** + * sdw_lock_mstr - Get exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +void sdw_lock_mstr(struct sdw_master *mstr) +{ + rt_mutex_lock(&mstr->bus_lock); +} + +/** + * sdw_trylock_mstr - Try to get exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +int sdw_trylock_mstr(struct sdw_master *mstr) +{ + return rt_mutex_trylock(&mstr->bus_lock); +} + + +/** + * sdw_unlock_mstr - Release exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +void sdw_unlock_mstr(struct sdw_master *mstr) +{ + rt_mutex_unlock(&mstr->bus_lock); +} + + +static int sdw_assign_slv_number(struct sdw_master *mstr, + struct sdw_msg *msg) +{ + int i, j, ret = -1; + + sdw_lock_mstr(mstr); + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned == true) + continue; + mstr->sdw_addr[i].assigned = true; + for (j = 0; j < 6; j++) + mstr->sdw_addr[i].dev_id[j] = msg->buf[j]; + ret = i; + break; + } + sdw_unlock_mstr(mstr); + return ret; +} + +static int sdw_program_slv_address(struct sdw_master *mstr, + u8 slave_addr) +{ + struct sdw_msg msg; + u8 buf[1] = {0}; + int ret; + + buf[0] = slave_addr; + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.addr = SDW_SCP_DEVNUMBER; + msg.len = 1; + msg.buf = buf; + msg.slave_addr = 0x0; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, &msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "Program Slave address change\n"); + return ret; + } + return 0; +} + +static int sdw_find_slave(struct sdw_master *mstr, struct sdw_msg + *msg, bool *found) +{ + struct sdw_slv_addr *sdw_addr; + int ret = 0, i, comparison; + *found = false; + + sdw_lock_mstr(mstr); + sdw_addr = mstr->sdw_addr; + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + comparison = memcmp(sdw_addr[i].dev_id, msg->buf, + SDW_NUM_DEV_ID_REGISTERS); + if ((!comparison) && (sdw_addr[i].assigned == true)) { + *found = true; + break; + } + } + sdw_unlock_mstr(mstr); + if (*found == true) + ret = sdw_program_slv_address(mstr, sdw_addr[i].slv_number); + return ret; +} + +static void sdw_free_slv_number(struct sdw_master *mstr, + int slv_number) +{ + int i; + + sdw_lock_mstr(mstr); + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (slv_number == mstr->sdw_addr[i].slv_number) { + mstr->sdw_addr[slv_number].assigned = false; + memset(&mstr->sdw_addr[slv_number].dev_id[0], 0x0, 6); + } + } + sdw_unlock_mstr(mstr); +} + + +int count; +static int sdw_register_slave(struct sdw_master *mstr) +{ + int ret = 0, i, ports; + struct sdw_msg msg; + u8 buf[6] = {0}; + struct sdw_slave *sdw_slave; + int slv_number = -1; + bool found = false; + + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.addr = SDW_SCP_DEVID_0; + msg.len = 6; + msg.buf = buf; + msg.slave_addr = 0x0; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + + while ((ret = (sdw_slave_transfer(mstr, &msg, 1)) == 1)) { + ret = sdw_find_slave(mstr, &msg, &found); + if (found && !ret) { + dev_info(&mstr->dev, "Slave already registered\n"); + continue; + /* Even if slave registering fails we continue for other + * slave status, but we flag error + */ + } else if (ret) { + dev_err(&mstr->dev, "Re-registering slave failed"); + continue; + } + slv_number = sdw_assign_slv_number(mstr, &msg); + if (slv_number <= 0) { + dev_err(&mstr->dev, "Failed to assign slv_number\n"); + ret = -EINVAL; + goto slv_number_assign_fail; + } + sdw_slave = kzalloc(sizeof(struct sdw_slave), GFP_KERNEL); + if (!sdw_slave) { + ret = -ENOMEM; + goto mem_alloc_failed; + } + sdw_slave->mstr = mstr; + sdw_slave->dev.parent = &sdw_slave->mstr->dev; + sdw_slave->dev.bus = &sdw_bus_type; + sdw_slave->dev.type = &sdw_slv_type; + sdw_slave->slv_addr = &mstr->sdw_addr[slv_number]; + sdw_slave->slv_addr->slave = sdw_slave; + /* We have assigned new slave number, so its not present + * till it again attaches to bus with this new + * slave address + */ + sdw_slave->slv_addr->status = SDW_SLAVE_STAT_NOT_PRESENT; + for (i = 0; i < 6; i++) + sdw_slave->dev_id[i] = msg.buf[i]; + dev_dbg(&mstr->dev, "SDW slave slave id found with values\n"); + dev_dbg(&mstr->dev, "dev_id0 to dev_id5: %x:%x:%x:%x:%x:%x\n", + msg.buf[0], msg.buf[1], msg.buf[2], + msg.buf[3], msg.buf[4], msg.buf[5]); + dev_dbg(&mstr->dev, "Slave number assigned is %x\n", slv_number); + /* TODO: Fill the sdw_slave structre from ACPI */ + ports = sdw_slave->sdw_slv_cap.num_of_sdw_ports; + /* Add 1 for port 0 for simplicity */ + ports++; + sdw_slave->port_ready = + kzalloc((sizeof(struct completion) * ports), + GFP_KERNEL); + if (!sdw_slave->port_ready) { + ret = -ENOMEM; + goto port_alloc_mem_failed; + } + for (i = 0; i < ports; i++) + init_completion(&sdw_slave->port_ready[i]); + + dev_set_name(&sdw_slave->dev, "sdw-slave%d-%02x:%02x:%02x:%02x:%02x:%02x", + sdw_master_id(mstr), + sdw_slave->dev_id[0], + sdw_slave->dev_id[1], + sdw_slave->dev_id[2], + sdw_slave->dev_id[3], + sdw_slave->dev_id[4], + sdw_slave->dev_id[5] + mstr->nr); + /* Set name based on dev_id. This will be + * compared to load driver + */ + sprintf(sdw_slave->name, "%02x:%02x:%02x:%02x:%02x:%02x", + sdw_slave->dev_id[0], + sdw_slave->dev_id[1], + sdw_slave->dev_id[2], + sdw_slave->dev_id[3], + sdw_slave->dev_id[4], + sdw_slave->dev_id[5] + mstr->nr); + ret = device_register(&sdw_slave->dev); + if (ret) { + dev_err(&mstr->dev, "Register slave failed\n"); + goto reg_slv_failed; + } + ret = sdw_program_slv_address(mstr, slv_number); + if (ret) { + dev_err(&mstr->dev, "Programming slave address failed\n"); + goto program_slv_failed; + } + dev_dbg(&mstr->dev, "Slave registered with bus id %s\n", + dev_name(&sdw_slave->dev)); + sdw_slave->slv_number = slv_number; + mstr->num_slv++; + sdw_lock_mstr(mstr); + list_add_tail(&sdw_slave->node, &mstr->slv_list); + sdw_unlock_mstr(mstr); + + } + count++; + return 0; +program_slv_failed: + device_unregister(&sdw_slave->dev); +port_alloc_mem_failed: +reg_slv_failed: + kfree(sdw_slave); +mem_alloc_failed: + sdw_free_slv_number(mstr, slv_number); +slv_number_assign_fail: + return ret; + +} + +/** + * __sdw_transfer - unlocked flavor of sdw_slave_transfer + * @mstr: Handle to SDW bus + * @msg: One or more messages to execute before STOP is issued to + * terminate the operation; each message begins with a START. + * @num: Number of messages to be executed. + * + * Returns negative errno, else the number of messages executed. + * + * Adapter lock must be held when calling this function. No debug logging + * takes place. mstr->algo->master_xfer existence isn't checked. + */ +int __sdw_transfer(struct sdw_master *mstr, struct sdw_msg *msg, int num, + struct sdw_async_xfer_data *async_data) +{ + unsigned long orig_jiffies; + int ret = 0, try, i; + struct sdw_slv_capabilities *slv_cap; + int program_scp_addr_page; + int addr = msg->slave_addr; + + /* sdw_trace_msg gets enabled when tracepoint sdw_slave_transfer gets + * enabled. This is an efficient way of keeping the for-loop from + * being executed when not needed. + */ + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < num; i++) + if (msg[i].flag & SDW_MSG_FLAG_READ) + trace_sdw_read(mstr, &msg[i], i); + else + trace_sdw_write(mstr, &msg[i], i); + } + orig_jiffies = jiffies; + for (i = 0; i < num; i++) { + for (ret = 0, try = 0; try <= mstr->retries; try++) { + if (msg->slave_addr == 0) + /* If we are enumerating slave address 0, + * we dont program scp, it should be set + * default to 0 + */ + program_scp_addr_page = 0; + else if (msg->slave_addr == 15) + /* If we are broadcasting, we need to program + * the SCP address as some slaves will be + * supporting it while some wont be. + * So it should be programmed + */ + program_scp_addr_page = 1; + + else { + slv_cap = + &mstr->sdw_addr[addr].slave->sdw_slv_cap; + program_scp_addr_page = + slv_cap->paging_supported; + } + /* Call async or sync handler based on call */ + if (!async_data) + ret = mstr->driver->mstr_ops->xfer_msg(mstr, + msg, program_scp_addr_page); + /* Async transfer is not mandatory to support + * It requires only if stream is split across the + * masters, where bus driver need to send the commands + * for bank switch individually and wait for them + * to complete out side of the master context + */ + else if (mstr->driver->mstr_ops->xfer_msg_async && + async_data) + ret = mstr->driver->mstr_ops->xfer_msg_async( + mstr, msg, + program_scp_addr_page, + async_data); + else + return -ENOTSUPP; + if (ret != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + mstr->timeout)) + break; + } + } + + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < msg->len; i++) + if (msg[i].flag & SDW_MSG_FLAG_READ) + trace_sdw_reply(mstr, &msg[i], i); + trace_sdw_result(mstr, i, ret); + } + if (!ret) + return i; + return ret; +} +EXPORT_SYMBOL_GPL(__sdw_transfer); + +/* NO PM version of slave transfer. Called from power management APIs + * to avoid dead locks. + */ +static int sdw_slave_transfer_nopm(struct sdw_master *mstr, struct sdw_msg *msg, + int num) +{ + int ret; + + if (mstr->driver->mstr_ops->xfer_msg) { + ret = __sdw_transfer(mstr, msg, num, NULL); + return ret; + } + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; +} + +int sdw_slave_transfer_async(struct sdw_master *mstr, struct sdw_msg *msg, + int num, + struct sdw_async_xfer_data *async_data) +{ + int ret; + /* Currently we support only message asynchronously, This is mainly + * used to do bank switch for multiple controllers + */ + if (num != 1) + return -EINVAL; + if (!(mstr->driver->mstr_ops->xfer_msg)) { + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; + } + pm_runtime_get_sync(&mstr->dev); + ret = __sdw_transfer(mstr, msg, num, async_data); + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} + +/** + * sdw_slave_transfer: Transfer message between slave and mstr on the bus. + * @mstr: mstr master which will transfer the message + * @msg: Array of messages to be transferred. + * @num: Number of messages to be transferred, messages include read and write + * messages, but not the ping messages. + */ +int sdw_slave_transfer(struct sdw_master *mstr, struct sdw_msg *msg, int num) +{ + int ret; + + /* REVISIT the fault reporting model here is weak: + * + * - When we get an error after receiving N bytes from a slave, + * there is no way to report "N". + * + * - When we get a NAK after transmitting N bytes to a slave, + * there is no way to report "N" ... or to let the mstr + * continue executing the rest of this combined message, if + * that's the appropriate response. + * + * - When for example "num" is two and we successfully complete + * the first message but get an error part way through the + * second, it's unclear whether that should be reported as + * one (discarding status on the second message) or errno + * (discarding status on the first one). + */ + if (!(mstr->driver->mstr_ops->xfer_msg)) { + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; + } + pm_runtime_get_sync(&mstr->dev); + if (in_atomic() || irqs_disabled()) { + ret = sdw_trylock_mstr(mstr); + if (!ret) { + /* SDW activity is ongoing. */ + ret = -EAGAIN; + goto out; + } + } else { + sdw_lock_mstr(mstr); + } + ret = __sdw_transfer(mstr, msg, num, NULL); + sdw_unlock_mstr(mstr); +out: + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_slave_transfer); + +static int sdw_handle_dp0_interrupts(struct sdw_master *mstr, + struct sdw_slave *sdw_slv, u8 *status) +{ + int ret = 0; + struct sdw_msg rd_msg, wr_msg; + int impl_def_mask = 0; + u8 rbuf[1] = {0}, wbuf[1] = {0}; + + /* Create message for clearing the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_DP0_INTCLEAR; + wr_msg.len = 1; + wr_msg.buf = wbuf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + /* Create message for reading the interrupts for DP0 interrupts*/ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.addr = SDW_DP0_INTSTAT; + rd_msg.len = 1; + rd_msg.buf = rbuf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Interrupt status read failed for slave %x\n", sdw_slv->slv_number); + goto out; + } + if (rd_msg.buf[0] & SDW_DP0_INTSTAT_TEST_FAIL_MASK) { + dev_err(&mstr->dev, "Test fail for slave %d port 0\n", + sdw_slv->slv_number); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_TEST_FAIL_MASK; + } + if (rd_msg.buf[0] & SDW_DP0_INTSTAT_PORT_READY_MASK) { + complete(&sdw_slv->port_ready[0]); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_PORT_READY_MASK; + } + if (rd_msg.buf[0] & SDW_DP0_INTMASK_BRA_FAILURE_MASK) { + /* TODO: Handle BRA failure */ + dev_err(&mstr->dev, "BRA failed for slave %d\n", + sdw_slv->slv_number); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_BRA_FAILURE_MASK; + } + impl_def_mask = SDW_DP0_INTSTAT_IMPDEF1_MASK | + SDW_DP0_INTSTAT_IMPDEF2_MASK | + SDW_DP0_INTSTAT_IMPDEF3_MASK; + if (rd_msg.buf[0] & impl_def_mask) { + wr_msg.buf[0] |= impl_def_mask; + *status = wr_msg.buf[0]; + } + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + return ret; + +} + +static int sdw_handle_port_interrupt(struct sdw_master *mstr, + struct sdw_slave *sdw_slv, int port_num, + u8 *status) +{ + int ret = 0; + struct sdw_msg rd_msg, wr_msg; + u8 rbuf[1], wbuf[1]; + int impl_def_mask = 0; + +/* + * Handle the Data port0 interrupt separately since the interrupt + * mask and stat register is different than other DPn registers + */ + if (port_num == 0 && sdw_slv->sdw_slv_cap.sdw_dp0_supported) + return sdw_handle_dp0_interrupts(mstr, sdw_slv, status); + + /* Create message for reading the port interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_DPN_INTCLEAR + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + wr_msg.len = 1; + wr_msg.buf = wbuf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.addr = SDW_DPN_INTSTAT + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + rd_msg.len = 1; + rd_msg.buf = rbuf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Port Status read failed for slv %x port %x\n", + sdw_slv->slv_number, port_num); + goto out; + } + if (rd_msg.buf[0] & SDW_DPN_INTSTAT_TEST_FAIL_MASK) { + dev_err(&mstr->dev, "Test fail for slave %x port %x\n", + sdw_slv->slv_number, port_num); + wr_msg.buf[0] |= SDW_DPN_INTCLEAR_TEST_FAIL_MASK; + } + if (rd_msg.buf[0] & SDW_DPN_INTSTAT_PORT_READY_MASK) { + complete(&sdw_slv->port_ready[port_num]); + wr_msg.buf[0] |= SDW_DPN_INTCLEAR_PORT_READY_MASK; + } + impl_def_mask = SDW_DPN_INTSTAT_IMPDEF1_MASK | + SDW_DPN_INTSTAT_IMPDEF2_MASK | + SDW_DPN_INTSTAT_IMPDEF3_MASK; + if (rd_msg.buf[0] & impl_def_mask) { + /* TODO: Handle implementation defined mask ready */ + wr_msg.buf[0] |= impl_def_mask; + *status = wr_msg.buf[0]; + } + /* Clear and Ack the interrupt */ + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + return ret; + +} +static int sdw_handle_slave_alerts(struct sdw_master *mstr, + struct sdw_slave *sdw_slv) +{ + struct sdw_msg rd_msg[3], wr_msg; + u8 rbuf[3], wbuf[1]; + int i, ret = 0; + int cs_port_mask, cs_port_register, cs_port_start, cs_ports; + struct sdw_impl_def_intr_stat *intr_status; + struct sdw_portn_intr_stat *portn_stat; + u8 port_status[15] = {0}; + u8 control_port_stat = 0; + + + /* Read Instat 1, Instat 2 and Instat 3 registers */ + rd_msg[0].ssp_tag = 0x0; + rd_msg[0].flag = SDW_MSG_FLAG_READ; + rd_msg[0].addr = SDW_SCP_INTSTAT_1; + rd_msg[0].len = 1; + rd_msg[0].buf = &rbuf[0]; + rd_msg[0].slave_addr = sdw_slv->slv_number; + rd_msg[0].addr_page1 = 0x0; + rd_msg[0].addr_page2 = 0x0; + + rd_msg[1].ssp_tag = 0x0; + rd_msg[1].flag = SDW_MSG_FLAG_READ; + rd_msg[1].addr = SDW_SCP_INTSTAT2; + rd_msg[1].len = 1; + rd_msg[1].buf = &rbuf[1]; + rd_msg[1].slave_addr = sdw_slv->slv_number; + rd_msg[1].addr_page1 = 0x0; + rd_msg[1].addr_page2 = 0x0; + + rd_msg[2].ssp_tag = 0x0; + rd_msg[2].flag = SDW_MSG_FLAG_READ; + rd_msg[2].addr = SDW_SCP_INTSTAT3; + rd_msg[2].len = 1; + rd_msg[2].buf = &rbuf[2]; + rd_msg[2].slave_addr = sdw_slv->slv_number; + rd_msg[2].addr_page1 = 0x0; + rd_msg[2].addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_SCP_INTCLEAR1; + wr_msg.len = 1; + wr_msg.buf = &wbuf[0]; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, rd_msg, 3); + if (ret != 3) { + ret = -EINVAL; + dev_err(&mstr->dev, "Reading of register failed\n"); + goto out; + } + /* First handle parity and bus clash interrupts */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_PARITY_MASK) { + dev_err(&mstr->dev, "Parity error detected\n"); + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_PARITY_MASK; + } + /* Handle bus errors */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_BUS_CLASH_MASK) { + dev_err(&mstr->dev, "Bus clash error detected\n"); + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_BUS_CLASH_MASK; + } + /* Handle implementation defined mask */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_IMPL_DEF_MASK) { + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_IMPL_DEF_MASK; + control_port_stat = (rd_msg[0].buf[0] & + SDW_SCP_INTSTAT1_IMPL_DEF_MASK); + } + + /* Handle Cascaded Port interrupts from Instat_1 registers */ + + /* Number of port status bits in this register */ + cs_ports = 4; + /* Port number starts at in this register */ + cs_port_start = 0; + /* Bit mask for the starting port intr status */ + cs_port_mask = 0x08; + /* Bit mask for the starting port intr status */ + cs_port_register = 0; + + /* Look for cascaded port interrupts, if found handle port + * interrupts. Do this for all the Int_stat registers. + */ + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + + /* + * Handle cascaded interrupts from instat_2 register, + * if no cascaded interrupt from SCP2 cascade move to SCP3 + */ + if (!(rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_SCP2_CASCADE_MASK)) + goto handle_instat_3_register; + + + cs_ports = 7; + cs_port_start = 4; + cs_port_mask = 0x1; + cs_port_register = 1; + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + + /* + * Handle cascaded interrupts from instat_2 register, + * if no cascaded interrupt from SCP2 cascade move to impl_def intrs + */ +handle_instat_3_register: + if (!(rd_msg[1].buf[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE_MASK)) + goto handle_impl_def_interrupts; + + cs_ports = 4; + cs_port_start = 11; + cs_port_mask = 0x1; + cs_port_register = 2; + + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + +handle_impl_def_interrupts: + + /* + * If slave has not registered for implementation defined + * interrupts, dont read it. + */ + if (!sdw_slv->driver->handle_impl_def_interrupts) + goto ack_interrupts; + + intr_status = kzalloc(sizeof(*intr_status), GFP_KERNEL); + + portn_stat = kzalloc((sizeof(*portn_stat)) * + sdw_slv->sdw_slv_cap.num_of_sdw_ports, + GFP_KERNEL); + + intr_status->portn_stat = portn_stat; + intr_status->control_port_stat = control_port_stat; + + /* Update the implementation defined status to Slave */ + for (i = 1; i < sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + intr_status->portn_stat[i].status = port_status[i]; + intr_status->portn_stat[i].num = i; + } + + intr_status->port0_stat = port_status[0]; + intr_status->control_port_stat = wr_msg.buf[0]; + + ret = sdw_slv->driver->handle_impl_def_interrupts(sdw_slv, + intr_status); + if (ret) + dev_err(&mstr->dev, "Implementation defined interrupt handling failed\n"); + + kfree(portn_stat); + kfree(intr_status); + +ack_interrupts: + /* Ack the interrupts */ + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + } +out: + return 0; +} + +int sdw_en_intr(struct sdw_slave *sdw_slv, int port_num, int mask) +{ + + struct sdw_msg rd_msg, wr_msg; + u8 buf; + int ret; + struct sdw_master *mstr = sdw_slv->mstr; + + rd_msg.addr = wr_msg.addr = SDW_DPN_INTMASK + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + + /* Create message for enabling the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + /* Create message for reading the interrupts for DP0 interrupts*/ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DPn Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_DPN_INTSTAT_TEST_FAIL_MASK; + buf |= SDW_DPN_INTSTAT_PORT_READY_MASK; + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DPn Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + return 0; +} + +static int sdw_en_scp_intr(struct sdw_slave *sdw_slv, int mask) +{ + struct sdw_msg rd_msg, wr_msg; + u8 buf = 0; + int ret; + struct sdw_master *mstr = sdw_slv->mstr; + u16 reg_addr; + + reg_addr = SDW_SCP_INTMASK1; + + rd_msg.addr = wr_msg.addr = reg_addr; + + /* Create message for reading the interrupt mask */ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "SCP Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Enable the Slave defined interrupts. */ + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_SCP_INTMASK1_BUS_CLASH_MASK; + buf |= SDW_SCP_INTMASK1_PARITY_MASK; + + /* Create message for enabling the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "SCP Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Return if DP0 is not present */ + if (!sdw_slv->sdw_slv_cap.sdw_dp0_supported) + return 0; + + + reg_addr = SDW_DP0_INTMASK; + rd_msg.addr = wr_msg.addr = reg_addr; + mask = sdw_slv->sdw_slv_cap.sdw_dp0_cap->imp_def_intr_mask; + buf = 0; + + /* Create message for reading the interrupt mask */ + /* Create message for reading the interrupt mask */ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DP0 Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Enable the Slave defined interrupts. */ + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_DP0_INTSTAT_TEST_FAIL_MASK; + buf |= SDW_DP0_INTSTAT_PORT_READY_MASK; + buf |= SDW_DP0_INTSTAT_BRA_FAILURE_MASK; + + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DP0 Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + return 0; +} + +static int sdw_prog_slv(struct sdw_slave *sdw_slv) +{ + + struct sdw_slv_capabilities *cap; + int ret, i; + struct sdw_slv_dpn_capabilities *dpn_cap; + struct sdw_master *mstr = sdw_slv->mstr; + + if (!sdw_slv->slave_cap_updated) + return 0; + cap = &sdw_slv->sdw_slv_cap; + + /* Enable DP0 and SCP interrupts */ + ret = sdw_en_scp_intr(sdw_slv, cap->scp_impl_def_intr_mask); + + /* Failure should never happen, even if it happens we continue */ + if (ret) + dev_err(&mstr->dev, "SCP program failed\n"); + + for (i = 0; i < cap->num_of_sdw_ports; i++) { + dpn_cap = &cap->sdw_dpn_cap[i]; + ret = sdw_en_intr(sdw_slv, (i + 1), + dpn_cap->imp_def_intr_mask); + + if (ret) + break; + } + return ret; +} + + +static void sdw_send_slave_status(struct sdw_slave *slave, + enum sdw_slave_status *status) +{ + struct sdw_slave_driver *slv_drv = slave->driver; + + if (slv_drv && slv_drv->update_slv_status) + slv_drv->update_slv_status(slave, status); +} + +static int sdw_wait_for_deprepare(struct sdw_slave *slave) +{ + int ret; + struct sdw_msg msg; + u8 buf[1] = {0}; + int timeout = 0; + struct sdw_master *mstr = slave->mstr; + + /* Create message to read clock stop status, its broadcast message. */ + buf[0] = 0xFF; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_STAT; + /* + * Read the ClockStopNotFinished bit from the SCP_Stat register + * of particular Slave to make sure that clock stop prepare is done + */ + do { + /* + * Ideally this should not fail, but even if it fails + * in exceptional situation, we go ahead for clock stop + */ + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + if (ret != 1) { + WARN_ONCE(1, "Clock stop status read failed\n"); + break; + } + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + break; + + /* + * TODO: Need to find from spec what is requirement. + * Since we are in suspend we should not sleep for more + * Ideally Slave should be ready to stop clock in less than + * few ms. + * So sleep less and increase loop time. This is not + * harmful, since if Slave is ready loop will terminate. + * + */ + msleep(2); + timeout++; + + } while (timeout != 500); + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + + dev_info(&mstr->dev, "Clock stop prepare done\n"); + else + WARN_ONCE(1, "Clk stp deprepare failed for slave %d\n", + slave->slv_number); + + return -EINVAL; +} + +static void sdw_prep_slave_for_clk_stp(struct sdw_master *mstr, + struct sdw_slave *slave, + enum sdw_clk_stop_mode clock_stop_mode, + bool prep) +{ + bool wake_en; + struct sdw_slv_capabilities *cap; + u8 buf[1] = {0}; + struct sdw_msg msg; + int ret; + + cap = &slave->sdw_slv_cap; + + /* Set the wakeup enable based on Slave capability */ + wake_en = !cap->wake_up_unavailable; + + if (prep) { + /* Even if its simplified clock stop prepare, + * setting prepare bit wont harm + */ + buf[0] |= (1 << SDW_SCP_SYSTEMCTRL_CLK_STP_PREP_SHIFT); + buf[0] |= clock_stop_mode << + SDW_SCP_SYSTEMCTRL_CLK_STP_MODE_SHIFT; + buf[0] |= wake_en << SDW_SCP_SYSTEMCTRL_WAKE_UP_EN_SHIFT; + } else + buf[0] = 0; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_SYSTEMCTRL; + + /* + * We are calling NOPM version of the transfer API, because + * Master controllers calls this from the suspend handler, + * so if we call the normal transfer API, it tries to resume + * controller, which result in deadlock + */ + + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + /* We should continue even if it fails for some Slave */ + if (ret != 1) + WARN_ONCE(1, "Clock Stop prepare failed for slave %d\n", + slave->slv_number); +} + +static int sdw_check_for_prep_bit(struct sdw_slave *slave) +{ + u8 buf[1] = {0}; + struct sdw_msg msg; + int ret; + struct sdw_master *mstr = slave->mstr; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_SYSTEMCTRL; + + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + /* We should continue even if it fails for some Slave */ + if (ret != 1) { + dev_err(&mstr->dev, "SCP_SystemCtrl read failed for Slave %d\n", + slave->slv_number); + return -EINVAL; + + } + return (buf[0] & SDW_SCP_SYSTEMCTRL_CLK_STP_PREP_MASK); + +} + +static int sdw_slv_deprepare_clk_stp1(struct sdw_slave *slave) +{ + struct sdw_slv_capabilities *cap; + int ret; + struct sdw_master *mstr = slave->mstr; + + cap = &slave->sdw_slv_cap; + + /* + * Slave might have enumerated 1st time or from clock stop mode 1 + * return if Slave doesn't require deprepare + */ + if (!cap->clk_stp1_deprep_required) + return 0; + + /* + * If Slave requires de-prepare after exiting from Clock Stop + * mode 1, than check for ClockStopPrepare bit in SystemCtrl register + * if its 1, de-prepare Slave from clock stop prepare, else + * return + */ + ret = sdw_check_for_prep_bit(slave); + /* If prepare bit is not set, return without error */ + if (!ret) + return 0; + + /* If error in reading register, return with error */ + if (ret < 0) + return ret; + + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) { + ret = slave->driver->pre_clk_stop_prep(slave, + cap->clock_stop1_mode_supported, false); + if (ret) { + dev_warn(&mstr->dev, "Pre de-prepare failed for Slave %d\n", + slave->slv_number); + return ret; + } + } + + sdw_prep_slave_for_clk_stp(slave->mstr, slave, + cap->clock_stop1_mode_supported, false); + + /* Make sure NF = 0 for deprepare to complete */ + ret = sdw_wait_for_deprepare(slave); + + /* Return in de-prepare unsuccessful */ + if (ret) + return ret; + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + cap->clock_stop1_mode_supported, false); + + if (ret) + dev_err(&mstr->dev, "Post de-prepare failed for Slave %d\n", + slave->slv_number); + } + + return ret; +} + +static void handle_slave_status(struct kthread_work *work) +{ + int i, ret = 0; + struct sdw_slv_status *status, *__status__; + struct sdw_bus *bus = + container_of(work, struct sdw_bus, kwork); + struct sdw_master *mstr = bus->mstr; + unsigned long flags; + bool slave_present = 0; + + /* Handle the new attached slaves to the bus. Register new slave + * to the bus. + */ + list_for_each_entry_safe(status, __status__, &bus->status_list, node) { + if (status->status[0] == SDW_SLAVE_STAT_ATTACHED_OK) { + ret += sdw_register_slave(mstr); + if (ret) + /* Even if adding new slave fails, we will + * continue. + */ + dev_err(&mstr->dev, "Registering new slave failed\n"); + } + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + slave_present = false; + if (status->status[i] == SDW_SLAVE_STAT_NOT_PRESENT && + mstr->sdw_addr[i].assigned == true) { + /* Logical address was assigned to slave, but + * now its down, so mark it as not present + */ + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_NOT_PRESENT; + slave_present = true; + } + + else if (status->status[i] == SDW_SLAVE_STAT_ALERT && + mstr->sdw_addr[i].assigned == true) { + ret = 0; + /* Handle slave alerts */ + mstr->sdw_addr[i].status = SDW_SLAVE_STAT_ALERT; + ret = sdw_handle_slave_alerts(mstr, + mstr->sdw_addr[i].slave); + if (ret) + dev_err(&mstr->dev, "Handle slave alert failed for Slave %d\n", i); + + slave_present = true; + + + } else if (status->status[i] == + SDW_SLAVE_STAT_ATTACHED_OK && + mstr->sdw_addr[i].assigned == true) { + + sdw_prog_slv(mstr->sdw_addr[i].slave); + + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_ATTACHED_OK; + ret = sdw_slv_deprepare_clk_stp1( + mstr->sdw_addr[i].slave); + + /* + * If depreparing Slave fails, no need to + * reprogram Slave, this should never happen + * in ideal case. + */ + if (ret) + continue; + slave_present = true; + } + + if (!slave_present) + continue; + + sdw_send_slave_status(mstr->sdw_addr[i].slave, + &mstr->sdw_addr[i].status); + } + spin_lock_irqsave(&bus->spinlock, flags); + list_del(&status->node); + spin_unlock_irqrestore(&bus->spinlock, flags); + kfree(status); + } +} + +static int sdw_register_master(struct sdw_master *mstr) +{ + int ret = 0; + int i; + struct sdw_bus *sdw_bus; + + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdw_bus_type.p))) { + ret = -EAGAIN; + goto bus_init_not_done; + } + /* Sanity checks */ + if (unlikely(mstr->name[0] == '\0')) { + pr_err("sdw-core: Attempt to register an master with no name!\n"); + ret = -EINVAL; + goto mstr_no_name; + } + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) + mstr->sdw_addr[i].slv_number = i; + + rt_mutex_init(&mstr->bus_lock); + INIT_LIST_HEAD(&mstr->slv_list); + INIT_LIST_HEAD(&mstr->mstr_rt_list); + + sdw_bus = kzalloc(sizeof(struct sdw_bus), GFP_KERNEL); + if (!sdw_bus) + goto bus_alloc_failed; + sdw_bus->mstr = mstr; + init_completion(&sdw_bus->async_data.xfer_complete); + + mutex_lock(&sdw_core.core_lock); + list_add_tail(&sdw_bus->bus_node, &sdw_core.bus_list); + mutex_unlock(&sdw_core.core_lock); + + dev_set_name(&mstr->dev, "sdw-%d", mstr->nr); + mstr->dev.bus = &sdw_bus_type; + mstr->dev.type = &sdw_mstr_type; + + ret = device_register(&mstr->dev); + if (ret) + goto out_list; + kthread_init_worker(&sdw_bus->kworker); + sdw_bus->status_thread = kthread_run(kthread_worker_fn, + &sdw_bus->kworker, "%s", + dev_name(&mstr->dev)); + if (IS_ERR(sdw_bus->status_thread)) { + dev_err(&mstr->dev, "error: failed to create status message task\n"); + ret = PTR_ERR(sdw_bus->status_thread); + goto task_failed; + } + kthread_init_work(&sdw_bus->kwork, handle_slave_status); + INIT_LIST_HEAD(&sdw_bus->status_list); + spin_lock_init(&sdw_bus->spinlock); + ret = sdw_mstr_bw_init(sdw_bus); + if (ret) { + dev_err(&mstr->dev, "error: Failed to init mstr bw\n"); + goto mstr_bw_init_failed; + } + dev_dbg(&mstr->dev, "master [%s] registered\n", mstr->name); + + return 0; + +mstr_bw_init_failed: +task_failed: + device_unregister(&mstr->dev); +out_list: + mutex_lock(&sdw_core.core_lock); + list_del(&sdw_bus->bus_node); + mutex_unlock(&sdw_core.core_lock); + kfree(sdw_bus); +bus_alloc_failed: +mstr_no_name: +bus_init_not_done: + mutex_lock(&sdw_core.core_lock); + idr_remove(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + return ret; +} + +/** + * sdw_master_update_slv_status: Report the status of slave to the bus driver. + * master calls this function based on the + * interrupt it gets once the slave changes its + * state. + * @mstr: Master handle for which status is reported. + * @status: Array of status of each slave. + */ +int sdw_master_update_slv_status(struct sdw_master *mstr, + struct sdw_status *status) +{ + struct sdw_bus *bus = NULL; + struct sdw_slv_status *slv_status; + unsigned long flags; + + list_for_each_entry(bus, &sdw_core.bus_list, bus_node) { + if (bus->mstr == mstr) + break; + } + /* This is master is not registered with bus driver */ + if (!bus) { + dev_info(&mstr->dev, "Master not registered with bus\n"); + return 0; + } + slv_status = kzalloc(sizeof(struct sdw_slv_status), GFP_ATOMIC); + memcpy(slv_status->status, status, sizeof(struct sdw_status)); + + spin_lock_irqsave(&bus->spinlock, flags); + list_add_tail(&slv_status->node, &bus->status_list); + spin_unlock_irqrestore(&bus->spinlock, flags); + + kthread_queue_work(&bus->kworker, &bus->kwork); + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_update_slv_status); + +/** + * sdw_add_master_controller - declare sdw master, use dynamic bus number + * @master: the master to add + * Context: can sleep + * + * This routine is used to declare an sdw master when its bus number + * doesn't matter or when its bus number is specified by an dt alias. + * Examples of bases when the bus number doesn't matter: sdw masters + * dynamically added by USB links or PCI plugin cards. + * + * When this returns zero, a new bus number was allocated and stored + * in mstr->nr, and the specified master became available for slaves. + * Otherwise, a negative errno value is returned. + */ +int sdw_add_master_controller(struct sdw_master *mstr) +{ + int id; + + mutex_lock(&sdw_core.core_lock); + + id = idr_alloc(&sdw_core.idr, mstr, + sdw_core.first_dynamic_bus_num, 0, GFP_KERNEL); + mutex_unlock(&sdw_core.core_lock); + if (id < 0) + return id; + + mstr->nr = id; + + return sdw_register_master(mstr); +} +EXPORT_SYMBOL_GPL(sdw_add_master_controller); + +static void sdw_unregister_slave(struct sdw_slave *sdw_slv) +{ + + struct sdw_master *mstr; + + mstr = sdw_slv->mstr; + sdw_lock_mstr(mstr); + list_del(&sdw_slv->node); + sdw_unlock_mstr(mstr); + mstr->sdw_addr[sdw_slv->slv_number].assigned = false; + memset(mstr->sdw_addr[sdw_slv->slv_number].dev_id, 0x0, 6); + device_unregister(&sdw_slv->dev); + kfree(sdw_slv); +} + +static int __unregister_slave(struct device *dev, void *dummy) +{ + struct sdw_slave *slave = sdw_slave_verify(dev); + + if (slave && strcmp(slave->name, "dummy")) + sdw_unregister_slave(slave); + return 0; +} + +/** + * sdw_del_master_controller - unregister SDW master + * @mstr: the master being unregistered + * Context: can sleep + * + * This unregisters an SDW master which was previously registered + * by @sdw_add_master_controller or @sdw_add_master_controller. + */ +void sdw_del_master_controller(struct sdw_master *mstr) +{ + struct sdw_master *found; + + /* First make sure that this master was ever added */ + mutex_lock(&sdw_core.core_lock); + found = idr_find(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + + if (found != mstr) { + pr_debug("sdw-core: attempting to delete unregistered master [%s]\n", mstr->name); + return; + } + /* Detach any active slaves. This can't fail, thus we do not + * check the returned value. + */ + device_for_each_child(&mstr->dev, NULL, __unregister_slave); + + /* device name is gone after device_unregister */ + dev_dbg(&mstr->dev, "mstrter [%s] unregistered\n", mstr->name); + + /* wait until all references to the device are gone + * + * FIXME: This is old code and should ideally be replaced by an + * alternative which results in decoupling the lifetime of the struct + * device from the sdw_master, like spi or netdev do. Any solution + * should be thoroughly tested with DEBUG_KOBJECT_RELEASE enabled! + */ + init_completion(&mstr->slv_released); + device_unregister(&mstr->dev); + wait_for_completion(&mstr->slv_released); + + /* free bus id */ + mutex_lock(&sdw_core.core_lock); + idr_remove(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + + /* Clear the device structure in case this mstrter is ever going to be + added again */ + memset(&mstr->dev, 0, sizeof(mstr->dev)); +} +EXPORT_SYMBOL_GPL(sdw_del_master_controller); + +/** + * sdw_slave_xfer_bra_block: Transfer the data block using the BTP/BRA + * protocol. + * @mstr: SoundWire Master Master + * @block: Data block to be transferred. + */ +int sdw_slave_xfer_bra_block(struct sdw_master *mstr, + struct sdw_bra_block *block) +{ + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_mstr_driver *ops = NULL; + int ret; + + /* + * This API will be called by slave/codec + * when it needs to xfer firmware to + * its memory or perform bulk read/writes of registers. + */ + + /* + * Acquire core lock + * TODO: Acquire Master lock inside core lock + * similar way done in upstream. currently + * keeping it as core lock + */ + mutex_lock(&sdw_core.core_lock); + + /* Get master data structure */ + list_for_each_entry(sdw_mstr_bs, &sdw_core.bus_list, bus_node) { + /* Match master structure pointer */ + if (sdw_mstr_bs->mstr != mstr) + continue; + + break; + } + + /* + * Here assumption is made that complete SDW bandwidth is used + * by BRA. So bus will return -EBUSY if any active stream + * is running on given master. + * TODO: In final implementation extra bandwidth will be always + * allocated for BRA. In that case all the computation of clock, + * frame shape, transport parameters for DP0 will be done + * considering BRA feature. + */ + if (!list_empty(&mstr->mstr_rt_list)) { + + /* + * Currently not allowing BRA when any + * active stream on master, returning -EBUSY + */ + + /* Release lock */ + mutex_unlock(&sdw_core.core_lock); + return -EBUSY; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* + * Check whether Master is supporting bulk transfer. If not, then + * bus will use alternate method of performing BRA request using + * normal register read/write API. + * TODO: Currently if Master is not supporting BRA transfers, bus + * returns error. Bus driver to extend support for normal register + * read/write as alternate method. + */ + if (!ops->mstr_ops->xfer_bulk) + return -EINVAL; + + /* Data port Programming (ON) */ + ret = sdw_bus_bra_xport_config(sdw_mstr_bs, block, true); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Xport parameter config failed ret=%d\n", ret); + goto error; + } + + /* Bulk Setup */ + ret = ops->mstr_ops->xfer_bulk(mstr, block); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Transfer failed ret=%d\n", ret); + goto error; + } + + /* Data port Programming (OFF) */ + ret = sdw_bus_bra_xport_config(sdw_mstr_bs, block, false); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Xport parameter de-config failed ret=%d\n", ret); + goto error; + } + +error: + /* Release lock */ + mutex_unlock(&sdw_core.core_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(sdw_slave_xfer_bra_block); + +/* + * An sdw_driver is used with one or more sdw_slave (slave) nodes to access + * sdw slave chips, on a bus instance associated with some sdw_master. + */ +int __sdw_mstr_driver_register(struct module *owner, + struct sdw_mstr_driver *driver) +{ + int res; + + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdw_bus_type.p))) + return -EAGAIN; + + /* add the driver to the list of sdw drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &sdw_bus_type; + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound slaves. + */ + res = driver_register(&driver->driver); + if (res) + return res; + + pr_debug("sdw-core: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__sdw_mstr_driver_register); + +void sdw_mstr_driver_unregister(struct sdw_mstr_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(sdw_mstr_driver_unregister); + +void sdw_slave_driver_unregister(struct sdw_slave_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(sdw_slave_driver_unregister); + +/* + * An sdw_driver is used with one or more sdw_slave (slave) nodes to access + * sdw slave chips, on a bus instance associated with some sdw_master. + */ +int __sdw_slave_driver_register(struct module *owner, + struct sdw_slave_driver *driver) +{ + int res; + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdw_bus_type.p))) + return -EAGAIN; + + /* add the driver to the list of sdw drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &sdw_bus_type; + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound slaves. + */ + res = driver_register(&driver->driver); + if (res) + return res; + pr_debug("sdw-core: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__sdw_slave_driver_register); + +int sdw_register_slave_capabilities(struct sdw_slave *sdw, + struct sdw_slv_capabilities *cap) +{ + struct sdw_slv_capabilities *slv_cap; + struct sdw_slv_dpn_capabilities *slv_dpn_cap, *dpn_cap; + struct port_audio_mode_properties *prop, *slv_prop; + int i, j; + int ret = 0; + + slv_cap = &sdw->sdw_slv_cap; + + slv_cap->wake_up_unavailable = cap->wake_up_unavailable; + slv_cap->wake_up_unavailable = cap->wake_up_unavailable; + slv_cap->test_mode_supported = cap->test_mode_supported; + slv_cap->clock_stop1_mode_supported = cap->clock_stop1_mode_supported; + slv_cap->simplified_clock_stop_prepare = + cap->simplified_clock_stop_prepare; + slv_cap->scp_impl_def_intr_mask = cap->scp_impl_def_intr_mask; + + slv_cap->highphy_capable = cap->highphy_capable; + slv_cap->paging_supported = cap->paging_supported; + slv_cap->bank_delay_support = cap->bank_delay_support; + slv_cap->port_15_read_behavior = cap->port_15_read_behavior; + slv_cap->sdw_dp0_supported = cap->sdw_dp0_supported; + slv_cap->num_of_sdw_ports = cap->num_of_sdw_ports; + slv_cap->sdw_dpn_cap = devm_kzalloc(&sdw->dev, + ((sizeof(struct sdw_slv_dpn_capabilities)) * + cap->num_of_sdw_ports), GFP_KERNEL); + for (i = 0; i < cap->num_of_sdw_ports; i++) { + dpn_cap = &cap->sdw_dpn_cap[i]; + slv_dpn_cap = &slv_cap->sdw_dpn_cap[i]; + slv_dpn_cap->port_direction = dpn_cap->port_direction; + slv_dpn_cap->port_number = dpn_cap->port_number; + slv_dpn_cap->max_word_length = dpn_cap->max_word_length; + slv_dpn_cap->min_word_length = dpn_cap->min_word_length; + slv_dpn_cap->num_word_length = dpn_cap->num_word_length; + if (NULL == dpn_cap->word_length_buffer) + slv_dpn_cap->word_length_buffer = + dpn_cap->word_length_buffer; + else { + slv_dpn_cap->word_length_buffer = + devm_kzalloc(&sdw->dev, + dpn_cap->num_word_length * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_dpn_cap->word_length_buffer) + return -ENOMEM; + memcpy(slv_dpn_cap->word_length_buffer, + dpn_cap->word_length_buffer, + dpn_cap->num_word_length * + (sizeof(unsigned int))); + } + slv_dpn_cap->dpn_type = dpn_cap->dpn_type; + slv_dpn_cap->dpn_grouping = dpn_cap->dpn_grouping; + slv_dpn_cap->prepare_ch = dpn_cap->prepare_ch; + slv_dpn_cap->imp_def_intr_mask = dpn_cap->imp_def_intr_mask; + slv_dpn_cap->min_ch_num = dpn_cap->min_ch_num; + slv_dpn_cap->max_ch_num = dpn_cap->max_ch_num; + slv_dpn_cap->num_ch_supported = dpn_cap->num_ch_supported; + if (NULL == slv_dpn_cap->ch_supported) + slv_dpn_cap->ch_supported = dpn_cap->ch_supported; + else { + slv_dpn_cap->ch_supported = + devm_kzalloc(&sdw->dev, + dpn_cap->num_ch_supported * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_dpn_cap->ch_supported) + return -ENOMEM; + memcpy(slv_dpn_cap->ch_supported, + dpn_cap->ch_supported, + dpn_cap->num_ch_supported * + (sizeof(unsigned int))); + } + slv_dpn_cap->port_flow_mode_mask = + dpn_cap->port_flow_mode_mask; + slv_dpn_cap->block_packing_mode_mask = + dpn_cap->block_packing_mode_mask; + slv_dpn_cap->port_encoding_type_mask = + dpn_cap->port_encoding_type_mask; + slv_dpn_cap->num_audio_modes = dpn_cap->num_audio_modes; + + slv_dpn_cap->mode_properties = devm_kzalloc(&sdw->dev, + ((sizeof(struct port_audio_mode_properties)) * + dpn_cap->num_audio_modes), GFP_KERNEL); + for (j = 0; j < dpn_cap->num_audio_modes; j++) { + prop = &dpn_cap->mode_properties[j]; + slv_prop = &slv_dpn_cap->mode_properties[j]; + slv_prop->max_frequency = prop->max_frequency; + slv_prop->min_frequency = prop->min_frequency; + slv_prop->num_freq_configs = prop->num_freq_configs; + if (NULL == slv_prop->freq_supported) + slv_prop->freq_supported = + prop->freq_supported; + else { + slv_prop->freq_supported = + devm_kzalloc(&sdw->dev, + prop->num_freq_configs * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_prop->freq_supported) + return -ENOMEM; + memcpy(slv_prop->freq_supported, + prop->freq_supported, + prop->num_freq_configs * + (sizeof(unsigned int))); + } + slv_prop->glitchless_transitions_mask + = prop->glitchless_transitions_mask; + slv_prop->max_sampling_frequency = + prop->max_sampling_frequency; + slv_prop->min_sampling_frequency = + prop->min_sampling_frequency; + slv_prop->num_sampling_freq_configs = + prop->num_sampling_freq_configs; + if (NULL == prop->sampling_freq_config) + slv_prop->sampling_freq_config = + prop->sampling_freq_config; + else { + slv_prop->sampling_freq_config = + devm_kzalloc(&sdw->dev, + prop->num_sampling_freq_configs * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_prop->sampling_freq_config) + return -ENOMEM; + memcpy(slv_prop->sampling_freq_config, + prop->sampling_freq_config, + prop->num_sampling_freq_configs * + (sizeof(unsigned int))); + } + + slv_prop->ch_prepare_behavior = + prop->ch_prepare_behavior; + } + } + ret = sdw_prog_slv(sdw); + if (ret) + return ret; + sdw->slave_cap_updated = true; + return 0; +} +EXPORT_SYMBOL_GPL(sdw_register_slave_capabilities); + +static int sdw_get_stream_tag(char *key, int *stream_tag) +{ + int i; + int ret = -EINVAL; + struct sdw_runtime *sdw_rt; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + /* If stream tag is already allocated return that after incrementing + * reference count. This is only possible if key is provided. + */ + mutex_lock(&sdw_core.core_lock); + if (!key) + goto key_check_not_required; + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (!(strcmp(stream_tags[i].key, key))) { + stream_tags[i].ref_count++; + *stream_tag = stream_tags[i].stream_tag; + mutex_unlock(&sdw_core.core_lock); + return 0; + } + } +key_check_not_required: + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (!stream_tags[i].ref_count) { + stream_tags[i].ref_count++; + *stream_tag = stream_tags[i].stream_tag; + mutex_init(&stream_tags[i].stream_lock); + sdw_rt = kzalloc(sizeof(struct sdw_runtime), + GFP_KERNEL); + INIT_LIST_HEAD(&sdw_rt->slv_rt_list); + INIT_LIST_HEAD(&sdw_rt->mstr_rt_list); + sdw_rt->stream_state = SDW_STATE_INIT_STREAM_TAG; + stream_tags[i].sdw_rt = sdw_rt; + if (!stream_tags[i].sdw_rt) { + stream_tags[i].ref_count--; + ret = -ENOMEM; + goto out; + } + if (key) + strlcpy(stream_tags[i].key, key, + SDW_MAX_STREAM_TAG_KEY_SIZE); + mutex_unlock(&sdw_core.core_lock); + return 0; + } + } + mutex_unlock(&sdw_core.core_lock); +out: + return ret; +} + +void sdw_release_stream_tag(int stream_tag) +{ + int i; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + mutex_lock(&sdw_core.core_lock); + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream_tags[i].ref_count--; + if (stream_tags[i].ref_count == 0) { + kfree(stream_tags[i].sdw_rt); + memset(stream_tags[i].key, 0x0, + SDW_MAX_STREAM_TAG_KEY_SIZE); + } + } + } + mutex_unlock(&sdw_core.core_lock); +} +EXPORT_SYMBOL_GPL(sdw_release_stream_tag); + +/** + * sdw_alloc_stream_tag: Assign the stream tag for the unique streams + * between master and slave device. + * Normally master master will request for the + * stream tag for the stream between master + * and slave device. It programs the same stream + * tag to the slave device. Stream tag is unique + * for all the streams between masters and slave + * across SoCs. + * @guid: Group of the device port. All the ports of the device with + * part of same stream will have same guid. + * + * @stream:tag: Stream tag returned by bus driver. + */ +int sdw_alloc_stream_tag(char *guid, int *stream_tag) +{ + int ret = 0; + + ret = sdw_get_stream_tag(guid, stream_tag); + if (ret) { + pr_err("Stream tag assignment failed\n"); + goto out; + } + +out: + return ret; +} +EXPORT_SYMBOL_GPL(sdw_alloc_stream_tag); + +static struct sdw_mstr_runtime *sdw_get_mstr_rt(struct sdw_runtime *sdw_rt, + struct sdw_master *mstr) { + + struct sdw_mstr_runtime *mstr_rt; + int ret = 0; + + list_for_each_entry(mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (mstr_rt->mstr == mstr) + return mstr_rt; + } + + /* Allocate sdw_mstr_runtime structure */ + mstr_rt = kzalloc(sizeof(struct sdw_mstr_runtime), GFP_KERNEL); + if (!mstr_rt) { + ret = -ENOMEM; + goto out; + } + + /* Initialize sdw_mstr_runtime structure */ + INIT_LIST_HEAD(&mstr_rt->port_rt_list); + INIT_LIST_HEAD(&mstr_rt->slv_rt_list); + list_add_tail(&mstr_rt->mstr_sdw_node, &sdw_rt->mstr_rt_list); + list_add_tail(&mstr_rt->mstr_node, &mstr->mstr_rt_list); + mstr_rt->rt_state = SDW_STATE_INIT_RT; + mstr_rt->mstr = mstr; +out: + return mstr_rt; +} + +static struct sdw_slave_runtime *sdw_config_slave_stream( + struct sdw_slave *slave, + struct sdw_stream_config *stream_config, + struct sdw_runtime *sdw_rt) +{ + struct sdw_slave_runtime *slv_rt; + int ret = 0; + struct sdw_stream_params *str_p; + + slv_rt = kzalloc(sizeof(struct sdw_slave_runtime), GFP_KERNEL); + if (!slv_rt) { + ret = -ENOMEM; + goto out; + } + slv_rt->slave = slave; + str_p = &slv_rt->stream_params; + slv_rt->direction = stream_config->direction; + slv_rt->rt_state = SDW_STATE_CONFIG_RT; + str_p->rate = stream_config->frame_rate; + str_p->channel_count = stream_config->channel_count; + str_p->bps = stream_config->bps; + INIT_LIST_HEAD(&slv_rt->port_rt_list); +out: + return slv_rt; +} + +static void sdw_release_mstr_stream(struct sdw_master *mstr, + struct sdw_runtime *sdw_rt) +{ + struct sdw_mstr_runtime *mstr_rt, *__mstr_rt; + struct sdw_port_runtime *port_rt, *__port_rt, *first_port_rt = NULL; + + list_for_each_entry_safe(mstr_rt, __mstr_rt, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + if (mstr_rt->mstr == mstr) { + + /* Get first runtime node from port list */ + first_port_rt = list_first_entry(&mstr_rt->port_rt_list, + struct sdw_port_runtime, + port_node); + + /* Release Master port resources */ + list_for_each_entry_safe(port_rt, __port_rt, + &mstr_rt->port_rt_list, port_node) + list_del(&port_rt->port_node); + + kfree(first_port_rt); + list_del(&mstr_rt->mstr_sdw_node); + if (mstr_rt->direction == SDW_DATA_DIR_OUT) + sdw_rt->tx_ref_count--; + else + sdw_rt->rx_ref_count--; + list_del(&mstr_rt->mstr_node); + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + kfree(mstr_rt); + } + } +} + +static void sdw_release_slave_stream(struct sdw_slave *slave, + struct sdw_runtime *sdw_rt) +{ + struct sdw_slave_runtime *slv_rt, *__slv_rt; + struct sdw_port_runtime *port_rt, *__port_rt, *first_port_rt = NULL; + + list_for_each_entry_safe(slv_rt, __slv_rt, &sdw_rt->slv_rt_list, + slave_sdw_node) { + if (slv_rt->slave == slave) { + + /* Get first runtime node from port list */ + first_port_rt = list_first_entry(&slv_rt->port_rt_list, + struct sdw_port_runtime, + port_node); + + /* Release Slave port resources */ + list_for_each_entry_safe(port_rt, __port_rt, + &slv_rt->port_rt_list, port_node) + list_del(&port_rt->port_node); + + kfree(first_port_rt); + list_del(&slv_rt->slave_sdw_node); + if (slv_rt->direction == SDW_DATA_DIR_OUT) + sdw_rt->tx_ref_count--; + else + sdw_rt->rx_ref_count--; + pm_runtime_mark_last_busy(&slave->dev); + pm_runtime_put_sync_autosuspend(&slave->dev); + kfree(slv_rt); + } + } +} + +/** + * sdw_release_stream: De-allocates the bandwidth allocated to the + * the stream. This is reference counted, + * so for the last stream count, BW will be de-allocated + * for the stream. Normally this will be called + * as part of hw_free. + * + * @mstr: Master handle + * @slave: SoundWire slave handle. + * @stream_config: Stream configuration for the soundwire audio stream. + * @stream_tag: Unique stream tag identifier across SoC for all soundwire + * busses. + * for each audio stream between slaves. This stream tag + * will be allocated by master driver for every + * stream getting open. + */ +int sdw_release_stream(struct sdw_master *mstr, + struct sdw_slave *slave, + unsigned int stream_tag) +{ + int i; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + break; + } + } + if (!sdw_rt) { + dev_err(&mstr->dev, "Invalid stream tag\n"); + return -EINVAL; + } + if (!slave) + sdw_release_mstr_stream(mstr, sdw_rt); + else + sdw_release_slave_stream(slave, sdw_rt); + return 0; +} +EXPORT_SYMBOL_GPL(sdw_release_stream); + +/** + * sdw_configure_stream: Allocates the B/W onto the soundwire bus + * for transferring the data between slave and master. + * This is configuring the single stream of data. + * This will be called by slave, Slave stream + * configuration should match the master stream + * configuration. Normally slave would call this + * as a part of hw_params. + * + * @mstr: Master handle + * @sdw_slave: SoundWire slave handle. + * @stream_config: Stream configuration for the soundwire audio stream. + * @stream_tag: Unique stream tag identifier across the soundwire bus + * for each audio stream between slaves and master. + * This is something like stream_tag in HDA protocol, but + * here its virtual rather than being embedded into protocol. + * Further same stream tag is valid across masters also + * if some ports of the master is participating in + * stream aggregation. This is input parameters to the + * function. + */ +int sdw_config_stream(struct sdw_master *mstr, + struct sdw_slave *slave, + struct sdw_stream_config *stream_config, + unsigned int stream_tag) +{ + int i; + int ret = 0; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_mstr_runtime *mstr_rt = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + stream = &stream_tags[i]; + break; + } + } + if (!sdw_rt) { + dev_err(&mstr->dev, "Valid stream tag not found\n"); + ret = -EINVAL; + goto out; + } + if (static_key_false(&sdw_trace_msg)) + trace_sdw_config_stream(mstr, slave, stream_config, + stream_tag); + + mutex_lock(&stream->stream_lock); + + mstr_rt = sdw_get_mstr_rt(sdw_rt, mstr); + if (!mstr_rt) { + dev_err(&mstr->dev, "master runtime configuration failed\n"); + ret = -EINVAL; + goto out; + } + + if (!slave) { + mstr_rt->direction = stream_config->direction; + mstr_rt->rt_state = SDW_STATE_CONFIG_RT; + sdw_rt->xport_state = SDW_STATE_ONLY_XPORT_STREAM; + + mstr_rt->stream_params.rate = stream_config->frame_rate; + mstr_rt->stream_params.channel_count = + stream_config->channel_count; + mstr_rt->stream_params.bps = stream_config->bps; + + } else + slv_rt = sdw_config_slave_stream(slave, + stream_config, sdw_rt); + /* Stream params will be stored based on Tx only, since there can + * be only one Tx and muliple Rx, There can be muliple Tx if + * there is aggregation on Tx. That is handled by adding the channels + * to stream_params for each aggregated Tx slaves + */ + if (!sdw_rt->tx_ref_count && stream_config->direction == + SDW_DATA_DIR_OUT) { + sdw_rt->stream_params.rate = stream_config->frame_rate; + sdw_rt->stream_params.channel_count = + stream_config->channel_count; + sdw_rt->stream_params.bps = stream_config->bps; + sdw_rt->tx_ref_count++; + } + + + /* Normally there will be only one Tx in system, multiple Tx + * can only be there if we support aggregation. In that case + * there may be multiple slave or masters handing different + * channels of same Tx stream. + */ + else if (sdw_rt->tx_ref_count && stream_config->direction == + SDW_DATA_DIR_OUT) { + if (sdw_rt->stream_params.rate != + stream_config->frame_rate) { + dev_err(&mstr->dev, "Frame rate for aggregated devices not matching\n"); + ret = -EINVAL; + goto free_mem; + } + if (sdw_rt->stream_params.bps != stream_config->bps) { + dev_err(&mstr->dev, "bps for aggregated devices not matching\n"); + ret = -EINVAL; + goto free_mem; + } + /* Number of channels gets added, since both devices will + * be supporting different channels. Like one Codec + * supporting L and other supporting R channel. + */ + sdw_rt->stream_params.channel_count += + stream_config->channel_count; + sdw_rt->tx_ref_count++; + } else + sdw_rt->rx_ref_count++; + + sdw_rt->type = stream_config->type; + sdw_rt->stream_state = SDW_STATE_CONFIG_STREAM; + + /* Slaves are added to two list, This is because BW is calculated + * for two masters individually, while Ports are enabled of all + * the aggregated masters and slaves part of the same stream tag + * simultaneously. + */ + if (slave) { + list_add_tail(&slv_rt->slave_sdw_node, &sdw_rt->slv_rt_list); + list_add_tail(&slv_rt->slave_node, &mstr_rt->slv_rt_list); + } + mutex_unlock(&stream->stream_lock); + if (slave) + pm_runtime_get_sync(&slave->dev); + else + pm_runtime_get_sync(&mstr->dev); + return ret; + +free_mem: + mutex_unlock(&stream->stream_lock); + kfree(mstr_rt); + kfree(slv_rt); +out: + return ret; + +} +EXPORT_SYMBOL_GPL(sdw_config_stream); + +/** + * sdw_chk_slv_dpn_caps - Return success + * -EINVAL - In case of error + * + * This function checks all slave port capabilities + * for given stream parameters. If any of parameters + * is not supported in port capabilities, it returns + * error. + */ +int sdw_chk_slv_dpn_caps(struct sdw_slv_dpn_capabilities *dpn_cap, + struct sdw_stream_params *strm_prms) +{ + struct port_audio_mode_properties *mode_prop = + dpn_cap->mode_properties; + int ret = 0, i, value; + + /* Check Sampling frequency */ + if (mode_prop->num_sampling_freq_configs) { + for (i = 0; i < mode_prop->num_sampling_freq_configs; i++) { + + value = mode_prop->sampling_freq_config[i]; + if (strm_prms->rate == value) + break; + } + + if (i == mode_prop->num_sampling_freq_configs) + return -EINVAL; + + } else { + + if ((strm_prms->rate < mode_prop->min_sampling_frequency) + || (strm_prms->rate > + mode_prop->max_sampling_frequency)) + return -EINVAL; + } + + /* check for bit rate */ + if (dpn_cap->num_word_length) { + for (i = 0; i < dpn_cap->num_word_length; i++) { + + value = dpn_cap->word_length_buffer[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_word_length) + return -EINVAL; + + } else { + + if ((strm_prms->bps < dpn_cap->min_word_length) + || (strm_prms->bps > dpn_cap->max_word_length)) + return -EINVAL; + } + + /* check for number of channels */ + if (dpn_cap->num_ch_supported) { + for (i = 0; i < dpn_cap->num_ch_supported; i++) { + + value = dpn_cap->ch_supported[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_ch_supported) + return -EINVAL; + + } else { + + if ((strm_prms->channel_count < dpn_cap->min_ch_num) + || (strm_prms->channel_count > dpn_cap->max_ch_num)) + return -EINVAL; + } + + return ret; +} + +/** + * sdw_chk_mstr_dpn_caps - Return success + * -EINVAL - In case of error + * + * This function checks all master port capabilities + * for given stream parameters. If any of parameters + * is not supported in port capabilities, it returns + * error. + */ +int sdw_chk_mstr_dpn_caps(struct sdw_mstr_dpn_capabilities *dpn_cap, + struct sdw_stream_params *strm_prms) +{ + + int ret = 0, i, value; + + /* check for bit rate */ + if (dpn_cap->num_word_length) { + for (i = 0; i < dpn_cap->num_word_length; i++) { + + value = dpn_cap->word_length_buffer[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_word_length) + return -EINVAL; + + } else { + + if ((strm_prms->bps < dpn_cap->min_word_length) + || (strm_prms->bps > dpn_cap->max_word_length)) { + return -EINVAL; + } + + + } + + /* check for number of channels */ + if (dpn_cap->num_ch_supported) { + for (i = 0; i < dpn_cap->num_ch_supported; i++) { + + value = dpn_cap->ch_supported[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_ch_supported) + return -EINVAL; + + } else { + + if ((strm_prms->channel_count < dpn_cap->min_ch_num) + || (strm_prms->channel_count > dpn_cap->max_ch_num)) + return -EINVAL; + } + + return ret; +} + +static int sdw_mstr_port_configuration(struct sdw_master *mstr, + struct sdw_runtime *sdw_rt, + struct sdw_port_config *port_config) +{ + struct sdw_mstr_runtime *mstr_rt; + struct sdw_port_runtime *port_rt; + int found = 0; + int i; + int ret = 0, pn = 0; + struct sdw_mstr_dpn_capabilities *dpn_cap = + mstr->mstr_capabilities.sdw_dpn_cap; + + list_for_each_entry(mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (mstr_rt->mstr == mstr) { + found = 1; + break; + } + } + if (!found) { + dev_err(&mstr->dev, "Master not found for this port\n"); + return -EINVAL; + } + + port_rt = kzalloc((sizeof(struct sdw_port_runtime)) * + port_config->num_ports, GFP_KERNEL); + if (!port_rt) + return -EINVAL; + + if (!dpn_cap) + return -EINVAL; + /* + * Note: Here the assumption the configuration is not + * received for 0th port. + */ + for (i = 0; i < port_config->num_ports; i++) { + port_rt[i].channel_mask = port_config->port_cfg[i].ch_mask; + port_rt[i].port_num = pn = port_config->port_cfg[i].port_num; + + /* Perform capability check for master port */ + ret = sdw_chk_mstr_dpn_caps(&dpn_cap[pn], + &mstr_rt->stream_params); + if (ret < 0) { + dev_err(&mstr->dev, + "Master capabilities check failed\n"); + return -EINVAL; + } + + list_add_tail(&port_rt[i].port_node, &mstr_rt->port_rt_list); + } + + return ret; +} + +static int sdw_slv_port_configuration(struct sdw_slave *slave, + struct sdw_runtime *sdw_rt, + struct sdw_port_config *port_config) +{ + struct sdw_slave_runtime *slv_rt; + struct sdw_port_runtime *port_rt; + struct sdw_slv_dpn_capabilities *dpn_cap = + slave->sdw_slv_cap.sdw_dpn_cap; + int found = 0, ret = 0; + int i, pn; + + list_for_each_entry(slv_rt, &sdw_rt->slv_rt_list, slave_sdw_node) { + if (slv_rt->slave == slave) { + found = 1; + break; + } + } + if (!found) { + dev_err(&slave->mstr->dev, "Slave not found for this port\n"); + return -EINVAL; + } + + if (!slave->slave_cap_updated) { + dev_err(&slave->mstr->dev, "Slave capabilities not updated\n"); + return -EINVAL; + } + + port_rt = kzalloc((sizeof(struct sdw_port_runtime)) * + port_config->num_ports, GFP_KERNEL); + if (!port_rt) + return -EINVAL; + + for (i = 0; i < port_config->num_ports; i++) { + port_rt[i].channel_mask = port_config->port_cfg[i].ch_mask; + port_rt[i].port_num = pn = port_config->port_cfg[i].port_num; + + /* Perform capability check for master port */ + ret = sdw_chk_slv_dpn_caps(&dpn_cap[pn], + &slv_rt->stream_params); + if (ret < 0) { + dev_err(&slave->mstr->dev, + "Slave capabilities check failed\n"); + return -EINVAL; + } + + list_add_tail(&port_rt[i].port_node, &slv_rt->port_rt_list); + } + + return ret; +} + +/** + * sdw_config_port: Port configuration for the SoundWire. Multiple + * soundWire ports may form single stream. Like two + * ports each transferring/receiving mono channels + * forms single stream with stereo channels. + * There will be single ASoC DAI representing + * the both ports. So stream configuration will be + * stereo, but both of the ports will be configured + * for mono channels, each with different channel + * mask. This is used to program port w.r.t to stream. + * params. So no need to de-configure, since these + * are automatically destroyed once stream gets + * destroyed. + * @mstr: Master handle where the slave is connected. + * @slave: Slave handle. + * @port_config: Port configuration for each port of soundwire slave. + * @stream_tag: Stream tag, where this port is connected. + * + */ +int sdw_config_port(struct sdw_master *mstr, + struct sdw_slave *slave, + struct sdw_port_config *port_config, + unsigned int stream_tag) +{ + int ret = 0; + int i; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_stream_tag *stream = NULL; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + stream = &stream_tags[i]; + break; + } + } + + if (!sdw_rt) { + dev_err(&mstr->dev, "Invalid stream tag\n"); + return -EINVAL; + } + + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < port_config->num_ports; i++) { + trace_sdw_config_port(mstr, slave, + &port_config->port_cfg[i], stream_tag); + } + } + + mutex_lock(&stream->stream_lock); + + if (!slave) + ret = sdw_mstr_port_configuration(mstr, sdw_rt, port_config); + else + ret = sdw_slv_port_configuration(slave, sdw_rt, port_config); + + mutex_unlock(&stream->stream_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(sdw_config_port); + +int sdw_prepare_and_enable(int stream_tag, bool enable) +{ + + int i, ret = 0; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + mutex_lock(&sdw_core.core_lock); + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream = &stream_tags[i]; + break; + } + } + if (stream == NULL) { + mutex_unlock(&sdw_core.core_lock); + WARN_ON(1); /* Return from here after unlocking core*/ + return -EINVAL; + } + mutex_lock(&stream->stream_lock); + ret = sdw_bus_calc_bw(&stream_tags[i], enable); + if (ret) + pr_err("Bandwidth allocation failed\n"); + + mutex_unlock(&stream->stream_lock); + mutex_unlock(&sdw_core.core_lock); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_prepare_and_enable); + +int sdw_disable_and_unprepare(int stream_tag, bool unprepare) +{ + int i, ret = 0; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + mutex_lock(&sdw_core.core_lock); + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream = &stream_tags[i]; + break; + } + } + if (stream == NULL) { + mutex_unlock(&sdw_core.core_lock); + WARN_ON(1); /* Return from here after unlocking core*/ + return -EINVAL; + } + mutex_lock(&stream->stream_lock); + ret = sdw_bus_calc_bw_dis(&stream_tags[i], unprepare); + if (ret) + pr_err("Bandwidth de-allocation failed\n"); + + mutex_unlock(&stream->stream_lock); + + mutex_unlock(&sdw_core.core_lock); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_disable_and_unprepare); + +int sdw_stop_clock(struct sdw_master *mstr, enum sdw_clk_stop_mode mode) +{ + int ret = 0, i; + struct sdw_msg msg; + u8 buf[1] = {0}; + int slave_present = 0; + + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned && + mstr->sdw_addr[i].status != + SDW_SLAVE_STAT_NOT_PRESENT) + slave_present = 1; + } + + /* Send Broadcast message to the SCP_ctrl register with + * clock stop now + */ + msg.ssp_tag = 1; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.addr = SDW_SCP_CTRL; + msg.len = 1; + buf[0] |= 0x1 << SDW_SCP_CTRL_CLK_STP_NOW_SHIFT; + msg.buf = buf; + msg.slave_addr = 15; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + if (ret != 1 && slave_present) { + dev_err(&mstr->dev, "Failed to stop clk\n"); + return -EBUSY; + } + /* If we are entering clock stop mode1, mark all the slaves un-attached. + */ + if (mode == SDW_CLOCK_STOP_MODE_1) { + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned) + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_NOT_PRESENT; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_stop_clock); + +int sdw_wait_for_slave_enumeration(struct sdw_master *mstr, + struct sdw_slave *slave) +{ + int timeout = 0; + + /* Wait till device gets enumerated. Wait for 2Secs before + * giving up + */ + do { + msleep(100); + timeout++; + } while ((slave->slv_addr->status == SDW_SLAVE_STAT_NOT_PRESENT) && + timeout < 20); + + if (slave->slv_addr->status == SDW_SLAVE_STAT_NOT_PRESENT) + return -EBUSY; + return 0; +} +EXPORT_SYMBOL_GPL(sdw_wait_for_slave_enumeration); + +static enum sdw_clk_stop_mode sdw_get_clk_stp_mode(struct sdw_slave *slave) +{ + enum sdw_clk_stop_mode clock_stop_mode = SDW_CLOCK_STOP_MODE_0; + struct sdw_slv_capabilities *cap = &slave->sdw_slv_cap; + + if (!slave->driver) + return clock_stop_mode; + /* + * Get the dynamic value of clock stop from Slave driver + * if supported, else use the static value from + * capabilities register. Update the capabilities also + * if we have new dynamic value. + */ + if (slave->driver->get_dyn_clk_stp_mod) { + clock_stop_mode = slave->driver->get_dyn_clk_stp_mod(slave); + + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + cap->clock_stop1_mode_supported = true; + else + cap->clock_stop1_mode_supported = false; + } else + clock_stop_mode = cap->clock_stop1_mode_supported; + + return clock_stop_mode; +} + +/** + * sdw_master_stop_clock: Stop the clock. This function broadcasts the SCP_CTRL + * register with clock_stop_now bit set. + * + * @mstr: Master handle for which clock has to be stopped. + * + * Returns 0 on success, appropriate error code on failure. + */ +int sdw_master_stop_clock(struct sdw_master *mstr) +{ + int ret = 0, i; + struct sdw_msg msg; + u8 buf[1] = {0}; + enum sdw_clk_stop_mode mode; + + /* Send Broadcast message to the SCP_ctrl register with + * clock stop now. If none of the Slaves are attached, then there + * may not be ACK, flag the error about ACK not recevied but + * clock will be still stopped. + */ + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = SDW_SLAVE_BDCAST_ADDR; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_CTRL; + buf[0] |= 0x1 << SDW_SCP_CTRL_CLK_STP_NOW_SHIFT; + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + /* Even if broadcast fails, we stop the clock and flag error */ + if (ret != 1) + dev_err(&mstr->dev, "ClockStopNow Broadcast message failed\n"); + + /* + * Mark all Slaves as un-attached which are entering clock stop + * mode1 + */ + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + + if (!mstr->sdw_addr[i].assigned) + continue; + + /* Get clock stop mode for all Slaves */ + mode = sdw_get_clk_stp_mode(mstr->sdw_addr[i].slave); + if (mode == SDW_CLOCK_STOP_MODE_0) + continue; + + /* If clock stop mode 1, mark Slave as not present */ + mstr->sdw_addr[i].status = SDW_SLAVE_STAT_NOT_PRESENT; + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_stop_clock); + +static struct sdw_slave *get_slave_for_prep_deprep(struct sdw_master *mstr, + int *slave_index) +{ + int i; + + for (i = *slave_index; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned != true) + continue; + + if (mstr->sdw_addr[i].status == SDW_SLAVE_STAT_NOT_PRESENT) + continue; + + *slave_index = i + 1; + return mstr->sdw_addr[i].slave; + } + return NULL; +} + +/* + * Wait till clock stop prepare/deprepare is finished. Prepare for all + * mode, De-prepare only for the Slaves resuming from clock stop mode 0 + */ +static void sdw_wait_for_clk_prep(struct sdw_master *mstr) +{ + int ret; + struct sdw_msg msg; + u8 buf[1] = {0}; + int timeout = 0; + + /* Create message to read clock stop status, its broadcast message. */ + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = SDW_SLAVE_BDCAST_ADDR; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_STAT; + buf[0] = 0xFF; + /* + * Once all the Slaves are written with prepare bit, + * we go ahead and broadcast the read message for the + * SCP_STAT register to read the ClockStopNotFinished bit + * Read till we get this a 0. Currently we have timeout of 1sec + * before giving up. Even if its not read as 0 after timeout, + * controller can stop the clock after warning. + */ + do { + /* + * Ideally this should not fail, but even if it fails + * in exceptional situation, we go ahead for clock stop + */ + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + if (ret != 1) { + WARN_ONCE(1, "Clock stop status read failed\n"); + break; + } + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + break; + + /* + * TODO: Need to find from spec what is requirement. + * Since we are in suspend we should not sleep for more + * Ideally Slave should be ready to stop clock in less than + * few ms. + * So sleep less and increase loop time. This is not + * harmful, since if Slave is ready loop will terminate. + * + */ + msleep(2); + timeout++; + + } while (timeout != 500); + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + + dev_info(&mstr->dev, "Clock stop prepare done\n"); + else + WARN_ONCE(1, "Some Slaves prepare un-successful\n"); +} + +/** + * sdw_master_prep_for_clk_stop: Prepare all the Slaves for clock stop. + * Iterate through each of the enumerated Slave. + * Prepare each Slave according to the clock stop + * mode supported by Slave. Use dynamic value from + * Slave callback if registered, else use static values + * from Slave capabilities registered. + * 1. Get clock stop mode for each Slave. + * 2. Call pre_prepare callback of each Slave if + * registered. + * 3. Prepare each Slave for clock stop + * 4. Broadcast the Read message to make sure + * all Slaves are prepared for clock stop. + * 5. Call post_prepare callback of each Slave if + * registered. + * + * @mstr: Master handle for which clock state has to be changed. + * + * Returns 0 + */ +int sdw_master_prep_for_clk_stop(struct sdw_master *mstr) +{ + struct sdw_slv_capabilities *cap; + enum sdw_clk_stop_mode clock_stop_mode; + int ret = 0; + struct sdw_slave *slave = NULL; + int slv_index = 1; + + /* + * Get all the Slaves registered to the master driver for preparing + * for clock stop. Start from Slave with logical address as 1. + */ + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) { + ret = slave->driver->pre_clk_stop_prep(slave, + clock_stop_mode, true); + + /* If it fails we still continue */ + if (ret) + dev_warn(&mstr->dev, "Pre prepare failed for Slave %d\n", + slave->slv_number); + } + + sdw_prep_slave_for_clk_stp(mstr, slave, clock_stop_mode, true); + } + + /* Wait till prepare for all Slaves is finished */ + /* + * We should continue even if the prepare fails. Clock stop + * prepare failure on Slaves, should not impact the broadcasting + * of ClockStopNow. + */ + sdw_wait_for_clk_prep(mstr); + + slv_index = 1; + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + clock_stop_mode, + true); + /* + * Even if Slave fails we continue with other + * Slaves. This should never happen ideally. + */ + if (ret) + dev_err(&mstr->dev, "Post prepare failed for Slave %d\n", + slave->slv_number); + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_prep_for_clk_stop); + +/** + * sdw_mstr_deprep_after_clk_start: De-prepare all the Slaves + * exiting clock stop mode 0 after clock resumes. Clock + * is already resumed before this. De-prepare all the Slaves + * which were earlier in ClockStop mode0. De-prepare for the + * Slaves which were there in ClockStop mode1 is done after + * they enumerated back. Its not done here as part of master + * getting resumed. + * 1. Get clock stop mode for each Slave its exiting from + * 2. Call pre_prepare callback of each Slave exiting from + * clock stop mode 0. + * 3. De-Prepare each Slave exiting from Clock Stop mode0 + * 4. Broadcast the Read message to make sure + * all Slaves are de-prepared for clock stop. + * 5. Call post_prepare callback of each Slave exiting from + * clock stop mode0 + * + * + * @mstr: Master handle + * + * Returns 0 + */ +int sdw_mstr_deprep_after_clk_start(struct sdw_master *mstr) +{ + struct sdw_slv_capabilities *cap; + enum sdw_clk_stop_mode clock_stop_mode; + int ret = 0; + struct sdw_slave *slave = NULL; + /* We are preparing for stop */ + bool stop = false; + int slv_index = 1; + + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + /* Get the clock stop mode from which Slave is exiting */ + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Slave is exiting from Clock stop mode 1, De-prepare + * is optional based on capability, and it has to be done + * after Slave is enumerated. So nothing to be done + * here. + */ + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + continue; + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) + ret = slave->driver->pre_clk_stop_prep(slave, + clock_stop_mode, false); + + /* If it fails we still continue */ + if (ret) + dev_warn(&mstr->dev, "Pre de-prepare failed for Slave %d\n", + slave->slv_number); + + sdw_prep_slave_for_clk_stp(mstr, slave, clock_stop_mode, false); + } + + /* + * Wait till prepare is finished for all the Slaves. + */ + sdw_wait_for_clk_prep(mstr); + + slv_index = 1; + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Slave is exiting from Clock stop mode 1, De-prepare + * is optional based on capability, and it has to be done + * after Slave is enumerated. + */ + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + continue; + + if (slave->driver && slave->driver->post_clk_stop_prep) + ret = slave->driver->post_clk_stop_prep(slave, + clock_stop_mode, + stop); + /* + * Even if Slave fails we continue with other + * Slaves. This should never happen ideally. + */ + if (ret) + dev_err(&mstr->dev, "Post de-prepare failed for Slave %d\n", + slave->slv_number); + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_mstr_deprep_after_clk_start); + + +struct sdw_master *sdw_get_master(int nr) +{ + struct sdw_master *master; + + mutex_lock(&sdw_core.core_lock); + master = idr_find(&sdw_core.idr, nr); + if (master && !try_module_get(master->owner)) + master = NULL; + mutex_unlock(&sdw_core.core_lock); + + return master; +} +EXPORT_SYMBOL_GPL(sdw_get_master); + +void sdw_put_master(struct sdw_master *mstr) +{ + if (mstr) + module_put(mstr->owner); +} +EXPORT_SYMBOL_GPL(sdw_put_master); + +static void sdw_exit(void) +{ + device_unregister(&sdw_slv); + bus_unregister(&sdw_bus_type); +} + +static int sdw_init(void) +{ + int retval; + int i; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) + sdw_core.stream_tags[i].stream_tag = i; + mutex_init(&sdw_core.core_lock); + INIT_LIST_HEAD(&sdw_core.bus_list); + idr_init(&sdw_core.idr); + retval = bus_register(&sdw_bus_type); + + if (!retval) + retval = device_register(&sdw_slv); + + + if (retval) + bus_unregister(&sdw_bus_type); + + retval = sdw_bus_bw_init(); + if (retval) { + device_unregister(&sdw_slv); + bus_unregister(&sdw_bus_type); + } + + return retval; +} +postcore_initcall(sdw_init); +module_exit(sdw_exit); + +MODULE_AUTHOR("Hardik Shah "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("SoundWire bus driver"); +MODULE_ALIAS("platform:soundwire"); diff --git a/drivers/sdw/sdw_bwcalc.c b/drivers/sdw/sdw_bwcalc.c new file mode 100644 index 000000000000..90ae86143e57 --- /dev/null +++ b/drivers/sdw/sdw_bwcalc.c @@ -0,0 +1,3079 @@ +/* + * sdw_bwcalc.c - SoundWire Bus BW calculation & CHN Enabling implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Sanyog Kale + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include +#include +#include "sdw_priv.h" +#include +#include + + + +#ifndef CONFIG_SND_SOC_SVFPGA /* Original */ +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) +int rows[MAX_NUM_ROWS] = {48, 50, 60, 64, 72, 75, 80, 90, + 96, 125, 144, 147, 100, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; +#define SDW_DEFAULT_SSP 50 +#else +int rows[MAX_NUM_ROWS] = {125, 64, 48, 50, 60, 72, 75, 80, 90, + 96, 144, 147, 100, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; +#define SDW_DEFAULT_SSP 24 +#endif /* IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) */ + +int cols[MAX_NUM_COLS] = {2, 4, 6, 8, 10, 12, 14, 16}; + +#else +/* For PDM Capture, frameshape used is 50x10 */ +int rows[MAX_NUM_ROWS] = {50, 100, 48, 60, 64, 72, 75, 80, 90, + 96, 125, 144, 147, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; + +int cols[MAX_NUM_COLS] = {10, 2, 4, 6, 8, 12, 14, 16}; +#define SDW_DEFAULT_SSP 50 +#endif + +/* + * TBD: Get supported clock frequency from ACPI and store + * it in master data structure. + */ +#define MAXCLOCKDIVS 1 +int clock_div[MAXCLOCKDIVS] = {1}; + +struct sdw_num_to_col sdw_num_col_mapping[MAX_NUM_COLS] = { + {0, 2}, {1, 4}, {2, 6}, {3, 8}, {4, 10}, {5, 12}, {6, 14}, {7, 16}, +}; + +struct sdw_num_to_row sdw_num_row_mapping[MAX_NUM_ROWS] = { + {0, 48}, {1, 50}, {2, 60}, {3, 64}, {4, 75}, {5, 80}, {6, 125}, + {7, 147}, {8, 96}, {9, 100}, {10, 120}, {11, 128}, {12, 150}, + {13, 160}, {14, 250}, {16, 192}, {17, 200}, {18, 240}, {19, 256}, + {20, 72}, {21, 144}, {22, 90}, {23, 180}, +}; + +/** + * sdw_bus_bw_init - returns Success + * + * + * This function is called from sdw_init function when bus driver + * gets intitalized. This function performs all the generic + * intializations required for BW control. + */ +int sdw_bus_bw_init(void) +{ + int r, c, rowcolcount = 0; + int control_bits = 48; + + for (c = 0; c < MAX_NUM_COLS; c++) { + + for (r = 0; r < MAX_NUM_ROWS; r++) { + sdw_core.rowcolcomb[rowcolcount].col = cols[c]; + sdw_core.rowcolcomb[rowcolcount].row = rows[r]; + sdw_core.rowcolcomb[rowcolcount].control_bits = + control_bits; + sdw_core.rowcolcomb[rowcolcount].data_bits = + (cols[c] * rows[r]) - control_bits; + rowcolcount++; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_bw_init); + + +/** + * sdw_mstr_bw_init - returns Success + * + * + * This function is called from sdw_register_master function + * for each master controller gets register. This function performs + * all the intializations per master controller required for BW control. + */ +int sdw_mstr_bw_init(struct sdw_bus *sdw_bs) +{ + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + + /* Initialize required parameters in bus structure */ + sdw_bs->bandwidth = 0; + sdw_bs->system_interval = 0; + sdw_bs->frame_freq = 0; + sdw_bs->clk_state = SDW_CLK_STATE_ON; + sdw_mstr_cap = &sdw_bs->mstr->mstr_capabilities; + sdw_bs->clk_freq = (sdw_mstr_cap->base_clk_freq * 2); + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_mstr_bw_init); + + +/** + * sdw_get_col_to_num + * + * Returns column number from the mapping. + */ +int sdw_get_col_to_num(int col) +{ + int i; + + for (i = 0; i <= MAX_NUM_COLS; i++) { + if (sdw_num_col_mapping[i].col == col) + return sdw_num_col_mapping[i].num; + } + + return 0; /* Lowest Column number = 2 */ +} + + +/** + * sdw_get_row_to_num + * + * Returns row number from the mapping. + */ +int sdw_get_row_to_num(int row) +{ + int i; + + for (i = 0; i <= MAX_NUM_ROWS; i++) { + if (sdw_num_row_mapping[i].row == row) + return sdw_num_row_mapping[i].num; + } + + return 0; /* Lowest Row number = 48 */ +} + +/* + * sdw_lcm - returns LCM of two numbers + * + * + * This function is called BW calculation function to find LCM + * of two numbers. + */ +int sdw_lcm(int num1, int num2) +{ + int max; + + /* maximum value is stored in variable max */ + max = (num1 > num2) ? num1 : num2; + + while (1) { + if (max%num1 == 0 && max%num2 == 0) + break; + ++max; + } + + return max; +} + + +/* + * sdw_cfg_slv_params - returns Success + * -EINVAL - In case of error. + * + * + * This function configures slave registers for + * transport and port parameters. + */ +int sdw_cfg_slv_params(struct sdw_bus *mstr_bs, + struct sdw_transport_params *t_slv_params, + struct sdw_port_params *p_slv_params, int slv_number) +{ + struct sdw_msg wr_msg, wr_msg1, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + u8 wbuf1[2] = {0, 0}; + u8 rbuf[1] = {0}; + + +#ifdef CONFIG_SND_SOC_SVFPGA + /* + * The below hardcoding is required + * for running PDM capture with SV conora card + * because the transport params of card is not + * same as master parameters. Also not all + * standard registers are valid. + */ + t_slv_params->blockgroupcontrol_valid = false; + t_slv_params->sample_interval = 50; + t_slv_params->offset1 = 0; + t_slv_params->offset2 = 0; + t_slv_params->hstart = 1; + t_slv_params->hstop = 6; + p_slv_params->word_length = 30; +#endif + + /* Program slave alternate bank with all transport parameters */ + /* DPN_BlockCtrl2 */ + wbuf[0] = t_slv_params->blockgroupcontrol; + /* DPN_SampleCtrl1 */ + wbuf[1] = (t_slv_params->sample_interval - 1) & + SDW_DPN_SAMPLECTRL1_LOW_MASK; + wbuf[2] = ((t_slv_params->sample_interval - 1) >> 8) & + SDW_DPN_SAMPLECTRL1_LOW_MASK; /* DPN_SampleCtrl2 */ + wbuf[3] = t_slv_params->offset1; /* DPN_OffsetCtrl1 */ + wbuf[4] = t_slv_params->offset2; /* DPN_OffsetCtrl2 */ + /* DPN_HCtrl */ + wbuf[5] = (t_slv_params->hstop | (t_slv_params->hstart << 4)); + wbuf[6] = t_slv_params->blockpackingmode; /* DPN_BlockCtrl3 */ + wbuf[7] = t_slv_params->lanecontrol; /* DPN_LaneCtrl */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + /* Program slave alternate bank with all port parameters */ + rd_msg.addr = SDW_DPN_PORTCTRL + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + wbuf1[0] = (p_slv_params->port_flow_mode | + (p_slv_params->port_data_mode << + SDW_DPN_PORTCTRL_PORTDATAMODE_SHIFT) | + (rbuf[0])); + + wbuf1[1] = (p_slv_params->word_length - 1); + + /* Check whether address computed is correct for both cases */ + wr_msg.addr = ((SDW_DPN_BLOCKCTRL2 + + (1 * (!t_slv_params->blockgroupcontrol_valid)) + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num)); + + wr_msg1.addr = SDW_DPN_PORTCTRL + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num); + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; +#ifdef CONFIG_SND_SOC_SVFPGA + wr_msg.len = (5 + (1 * (t_slv_params->blockgroupcontrol_valid))); +#else + wr_msg.len = (7 + (1 * (t_slv_params->blockgroupcontrol_valid))); +#endif + + wr_msg.slave_addr = slv_number; + wr_msg.buf = &wbuf[0 + (1 * (!t_slv_params->blockgroupcontrol_valid))]; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + wr_msg1.ssp_tag = 0x0; + wr_msg1.flag = SDW_MSG_FLAG_WRITE; + wr_msg1.len = 2; + + wr_msg1.slave_addr = slv_number; + wr_msg1.buf = &wbuf1[0]; + wr_msg1.addr_page1 = 0x0; + wr_msg1.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + + return ret; +} + + +/* + * sdw_cfg_mstr_params - returns Success + * -EINVAL - In case of error. + * + * + * This function configures master registers for + * transport and port parameters. + */ +int sdw_cfg_mstr_params(struct sdw_bus *mstr_bs, + struct sdw_transport_params *t_mstr_params, + struct sdw_port_params *p_mstr_params) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + int banktouse, ret = 0; + + /* 1. Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* 2. Set Master Xport Params */ + if (ops->mstr_port_ops->dpn_set_port_transport_params) { + ret = ops->mstr_port_ops->dpn_set_port_transport_params + (mstr_bs->mstr, t_mstr_params, banktouse); + if (ret < 0) + return ret; + } + + /* 3. Set Master Port Params */ + if (ops->mstr_port_ops->dpn_set_port_params) { + ret = ops->mstr_port_ops->dpn_set_port_params + (mstr_bs->mstr, p_mstr_params, banktouse); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * sdw_cfg_params_mstr_slv - returns Success + * + * This function copies/configure master/slave transport & + * port params. + * + */ +int sdw_cfg_params_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_bs_rt, + bool state_check) +{ + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + struct sdw_transport_params *t_params, *t_slv_params; + struct sdw_port_params *p_params, *p_slv_params; + int ret = 0; + + list_for_each_entry(slv_rt, + &sdw_mstr_bs_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + /* configure transport params based on state */ + if ((state_check) && + (slv_rt->rt_state == SDW_STATE_UNPREPARE_RT)) + continue; + + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + /* Fill in port params here */ + port_slv_rt->port_params.num = port_slv_rt->port_num; + port_slv_rt->port_params.word_length = + slv_rt->stream_params.bps; + /* Normal/Isochronous Mode */ + port_slv_rt->port_params.port_flow_mode = 0x0; + /* Normal Mode */ + port_slv_rt->port_params.port_data_mode = 0x0; + t_slv_params = &port_slv_rt->transport_params; + p_slv_params = &port_slv_rt->port_params; + + /* Configure xport & port params for slave */ + ret = sdw_cfg_slv_params(sdw_mstr_bs, t_slv_params, + p_slv_params, slv_rt->slave->slv_number); + if (ret < 0) + return ret; + + } + } + + if ((state_check) && + (sdw_mstr_bs_rt->rt_state == SDW_STATE_UNPREPARE_RT)) + return 0; + + list_for_each_entry(port_rt, + &sdw_mstr_bs_rt->port_rt_list, port_node) { + + /* Transport and port parameters */ + t_params = &port_rt->transport_params; + p_params = &port_rt->port_params; + + + p_params->num = port_rt->port_num; + p_params->word_length = sdw_mstr_bs_rt->stream_params.bps; + p_params->port_flow_mode = 0x0; /* Normal/Isochronous Mode */ + p_params->port_data_mode = 0x0; /* Normal Mode */ + + /* Configure xport params and port params for master */ + ret = sdw_cfg_mstr_params(sdw_mstr_bs, t_params, p_params); + if (ret < 0) + return ret; + + } + + return 0; +} + + +/* + * sdw_cfg_slv_enable_disable - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable slave port channels. + */ +int sdw_cfg_slv_enable_disable(struct sdw_bus *mstr_bs, + struct sdw_slave_runtime *slv_rt_strm, + struct sdw_port_runtime *port_slv_strm, + struct port_chn_en_state *chn_en) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + if ((chn_en->is_activate) || (chn_en->is_bank_sw)) + banktouse = !banktouse; + + rd_msg.addr = wr_msg.addr = ((SDW_DPN_CHANNELEN + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * + port_slv_strm->port_num)); + + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_rt_strm->slave->slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + + if (chn_en->is_activate) { + + /* + * 1. slave port enable_ch_pre + * --> callback + * --> no callback available + */ + + /* 2. slave port enable */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] | port_slv_strm->channel_mask); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + /* + * 3. slave port enable post pre + * --> callback + * --> no callback available + */ + slv_rt_strm->rt_state = SDW_STATE_ENABLE_RT; + + } else { + + /* + * 1. slave port enable_ch_unpre + * --> callback + * --> no callback available + */ + + /* 2. slave port disable */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] & ~(port_slv_strm->channel_mask)); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * 3. slave port enable post unpre + * --> callback + * --> no callback available + */ + if (!chn_en->is_bank_sw) + slv_rt_strm->rt_state = SDW_STATE_DISABLE_RT; + + } +out: + return ret; + +} + + +/* + * sdw_cfg_mstr_activate_disable - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable master port channels. + */ +int sdw_cfg_mstr_activate_disable(struct sdw_bus *mstr_bs, + struct sdw_mstr_runtime *mstr_rt_strm, + struct sdw_port_runtime *port_mstr_strm, + struct port_chn_en_state *chn_en) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_activate_ch activate_ch; + int banktouse, ret = 0; + + activate_ch.num = port_mstr_strm->port_num; + activate_ch.ch_mask = port_mstr_strm->channel_mask; + activate_ch.activate = chn_en->is_activate; /* Enable/Disable */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + if ((chn_en->is_activate) || (chn_en->is_bank_sw)) + banktouse = !banktouse; + + /* 2. Master port enable */ + if (ops->mstr_port_ops->dpn_port_activate_ch) { + ret = ops->mstr_port_ops->dpn_port_activate_ch(mstr_bs->mstr, + &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + if (chn_en->is_activate) + mstr_rt_strm->rt_state = SDW_STATE_ENABLE_RT; + else if (!chn_en->is_bank_sw) + mstr_rt_strm->rt_state = SDW_STATE_DISABLE_RT; + + return 0; +} + + +/* + * sdw_en_dis_mstr_slv - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave enable/disable + * channel API's. + */ +int sdw_en_dis_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, bool is_act) +{ + struct sdw_slave_runtime *slv_rt_strm = NULL; + struct sdw_port_runtime *port_slv_strm, *port_mstr_strm; + struct sdw_mstr_runtime *mstr_rt_strm = NULL; + struct port_chn_en_state chn_en; + int ret = 0; + + if (is_act) + chn_en.is_bank_sw = true; + else + chn_en.is_bank_sw = false; + + chn_en.is_activate = is_act; + + list_for_each_entry(slv_rt_strm, &sdw_rt->slv_rt_list, slave_sdw_node) { + + if (slv_rt_strm->slave == NULL) + break; + + list_for_each_entry(port_slv_strm, + &slv_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_slv_enable_disable + (sdw_mstr_bs, slv_rt_strm, + port_slv_strm, &chn_en); + if (ret < 0) + return ret; + + } + + break; + + } + + list_for_each_entry(mstr_rt_strm, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + if (mstr_rt_strm->mstr == NULL) + break; + + list_for_each_entry(port_mstr_strm, + &mstr_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_activate_disable + (sdw_mstr_bs, mstr_rt_strm, + port_mstr_strm, &chn_en); + if (ret < 0) + return ret; + + } + + } + + return 0; +} + + +/* + * sdw_en_dis_mstr_slv_state - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave enable/disable + * channel API's based on runtime state. + */ +int sdw_en_dis_mstr_slv_state(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_bs_rt, + struct port_chn_en_state *chn_en) +{ + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_slv_rt, *port_rt; + int ret = 0; + + list_for_each_entry(slv_rt, &sdw_mstr_bs_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + if (slv_rt->rt_state == SDW_STATE_ENABLE_RT) { + + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + ret = sdw_cfg_slv_enable_disable + (sdw_mstr_bs, slv_rt, + port_slv_rt, chn_en); + if (ret < 0) + return ret; + + } + } + } + + if (sdw_mstr_bs_rt->rt_state == SDW_STATE_ENABLE_RT) { + + list_for_each_entry(port_rt, + &sdw_mstr_bs_rt->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_activate_disable + (sdw_mstr_bs, sdw_mstr_bs_rt, port_rt, chn_en); + if (ret < 0) + return ret; + + } + } + + return 0; +} + + +/* + * sdw_get_clock_frmshp - returns Success + * -EINVAL - In case of error. + * + * + * This function computes clock and frame shape based on + * clock frequency. + */ +int sdw_get_clock_frmshp(struct sdw_bus *sdw_mstr_bs, int *frame_int, + struct sdw_mstr_runtime *sdw_mstr_rt) +{ + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_slv_dpn_capabilities *sdw_slv_dpn_cap = NULL; + struct port_audio_mode_properties *mode_prop = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_slv_rt = NULL; + int i, j, rc; + int clock_reqd = 0, frame_interval = 0, frame_frequency = 0; + int sel_row = 0, sel_col = 0, pn = 0; + int value; + bool clock_ok = false; + + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + + /* + * Find nearest clock frequency needed by master for + * given bandwidth + */ + for (i = 0; i < MAXCLOCKDIVS; i++) { + + /* TBD: Check why 3000 */ + if ((((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]) <= + sdw_mstr_bs->bandwidth) || + ((((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]) + % 3000) != 0)) + continue; + + clock_reqd = ((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]); + + /* + * Check all the slave device capabilities + * here and find whether given frequency is + * supported by all slaves + */ + list_for_each_entry(slv_rt, &sdw_mstr_rt->slv_rt_list, + slave_node) { + + /* check for valid slave */ + if (slv_rt->slave == NULL) + break; + + /* check clock req for each port */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + pn = port_slv_rt->port_num; + + + sdw_slv_dpn_cap = + &slv_rt->slave->sdw_slv_cap.sdw_dpn_cap[pn]; + mode_prop = sdw_slv_dpn_cap->mode_properties; + + /* + * TBD: Indentation to be fixed, + * code refactoring to be considered. + */ + if (mode_prop->num_freq_configs) { + for (j = 0; j < + mode_prop->num_freq_configs; j++) { + value = + mode_prop->freq_supported[j]; + if (clock_reqd == value) { + clock_ok = true; + break; + } + if (j == + mode_prop->num_freq_configs) { + clock_ok = false; + break; + } + + } + + } else { + if ((clock_reqd < + mode_prop->min_frequency) || + (clock_reqd > + mode_prop->max_frequency)) { + clock_ok = false; + } else + clock_ok = true; + } + + /* Go for next clock frequency */ + if (!clock_ok) + break; + } + + /* + * Dont check next slave, go for next clock + * frequency + */ + if (!clock_ok) + break; + } + + /* check for next clock divider */ + if (!clock_ok) + continue; + + /* Find frame shape based on bandwidth per controller */ + for (rc = 0; rc < MAX_NUM_ROW_COLS; rc++) { + frame_interval = + sdw_core.rowcolcomb[rc].row * + sdw_core.rowcolcomb[rc].col; + frame_frequency = clock_reqd/frame_interval; + + if ((clock_reqd - + (frame_frequency * + sdw_core.rowcolcomb[rc]. + control_bits)) < + sdw_mstr_bs->bandwidth) + continue; + + break; + } + + /* Valid frameshape not found, check for next clock freq */ + if (rc == MAX_NUM_ROW_COLS) + continue; + + sel_row = sdw_core.rowcolcomb[rc].row; + sel_col = sdw_core.rowcolcomb[rc].col; + sdw_mstr_bs->frame_freq = frame_frequency; + sdw_mstr_bs->clk_freq = clock_reqd; + sdw_mstr_bs->clk_div = clock_div[i]; + clock_ok = false; + *frame_int = frame_interval; + sdw_mstr_bs->col = sel_col; + sdw_mstr_bs->row = sel_row; + + return 0; + } + + /* None of clock frequency matches, return error */ + if (i == MAXCLOCKDIVS) + return -EINVAL; + + return 0; +} + +/* + * sdw_compute_sys_interval - returns Success + * -EINVAL - In case of error. + * + * + * This function computes system interval. + */ +int sdw_compute_sys_interval(struct sdw_bus *sdw_mstr_bs, + struct sdw_master_capabilities *sdw_mstr_cap, + int frame_interval) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_transport_params *t_params = NULL, *t_slv_params = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + int lcmnum1 = 0, lcmnum2 = 0, div = 0, lcm = 0; + int sample_interval; + + /* + * once you got bandwidth frame shape for bus, + * run a loop for all the active streams running + * on bus and compute stream interval & sample_interval. + */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* + * Calculate sample interval for stream + * running on given master. + */ + if (sdw_mstr_rt->stream_params.rate) + sample_interval = (sdw_mstr_bs->clk_freq/ + sdw_mstr_rt->stream_params.rate); + else + return -EINVAL; + + /* Run port loop to assign sample interval per port */ + list_for_each_entry(port_rt, + &sdw_mstr_rt->port_rt_list, port_node) { + + t_params = &port_rt->transport_params; + + /* + * Assign sample interval each port transport + * properties. Assumption is that sample interval + * per port for given master will be same. + */ + t_params->sample_interval = sample_interval; + } + + /* Calculate LCM */ + lcmnum2 = sample_interval; + if (!lcmnum1) + lcmnum1 = sdw_lcm(lcmnum2, lcmnum2); + else + lcmnum1 = sdw_lcm(lcmnum1, lcmnum2); + + /* Run loop for slave per master runtime */ + list_for_each_entry(slv_rt, + &sdw_mstr_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + /* Assign sample interval for each port of slave */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + t_slv_params = &port_slv_rt->transport_params; + + /* Assign sample interval each port */ + t_slv_params->sample_interval = sample_interval; + } + } + } + + /* + * If system interval already calculated + * In pause/resume, underrun scenario + */ + if (sdw_mstr_bs->system_interval) + return 0; + + /* Assign frame stream interval */ + sdw_mstr_bs->stream_interval = lcmnum1; + + /* 6. compute system_interval */ + if ((sdw_mstr_cap) && (sdw_mstr_bs->clk_freq)) { + + div = ((sdw_mstr_cap->base_clk_freq * 2) / + sdw_mstr_bs->clk_freq); + + if ((lcmnum1) && (frame_interval)) + lcm = sdw_lcm(lcmnum1, frame_interval); + else + return -EINVAL; + + sdw_mstr_bs->system_interval = (div * lcm); + + } + + /* + * Something went wrong, may be sdw_lcm value may be 0, + * return error accordingly + */ + if (!sdw_mstr_bs->system_interval) + return -EINVAL; + + + return 0; +} + +/** + * sdw_chk_first_node - returns True or false + * + * This function returns true in case of first node + * else returns false. + */ +bool sdw_chk_first_node(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_master *sdw_mstr) +{ + struct sdw_mstr_runtime *first_rt = NULL; + + first_rt = list_first_entry(&sdw_mstr->mstr_rt_list, + struct sdw_mstr_runtime, mstr_node); + if (sdw_mstr_rt == first_rt) + return true; + else + return false; + +} + +/* + * sdw_compute_hstart_hstop - returns Success + * -EINVAL - In case of error. + * + * + * This function computes hstart and hstop for running + * streams per master & slaves. + */ +int sdw_compute_hstart_hstop(struct sdw_bus *sdw_mstr_bs) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_rt; + struct sdw_transport_params *t_params = NULL, *t_slv_params = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + int hstart = 0, hstop = 0; + int column_needed = 0; + int sel_col = sdw_mstr_bs->col; + int group_count = 0, no_of_channels = 0; + struct temp_elements *temp, *element; + int rates[10]; + int num, ch_mask, block_offset, i, port_block_offset; + + /* Run loop for all master runtimes for given master */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + /* Perform grouping of streams based on stream rate */ + if (sdw_mstr_rt == list_first_entry(&sdw_mstr->mstr_rt_list, + struct sdw_mstr_runtime, mstr_node)) + rates[group_count++] = sdw_mstr_rt->stream_params.rate; + else { + num = group_count; + for (i = 0; i < num; i++) { + if (sdw_mstr_rt->stream_params.rate == rates[i]) + break; + + if (i == num) + rates[group_count++] = + sdw_mstr_rt->stream_params.rate; + } + } + } + + /* check for number of streams and number of group count */ + if (group_count == 0) + return 0; + + /* Allocate temporary memory holding temp variables */ + temp = kzalloc((sizeof(struct temp_elements) * group_count), + GFP_KERNEL); + if (!temp) + return -ENOMEM; + + /* Calculate full bandwidth per group */ + for (i = 0; i < group_count; i++) { + element = &temp[i]; + element->rate = rates[i]; + element->full_bw = sdw_mstr_bs->clk_freq/element->rate; + } + + /* Calculate payload bandwidth per group */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + for (i = 0; i < group_count; i++) { + element = &temp[i]; + if (sdw_mstr_rt->stream_params.rate == element->rate) { + element->payload_bw += + sdw_mstr_rt->stream_params.bps * + sdw_mstr_rt->stream_params.channel_count; + } + + /* Any of stream rate should match */ + if (i == group_count) + return -EINVAL; + } + } + + /* Calculate hwidth per group and total column needed per master */ + for (i = 0; i < group_count; i++) { + element = &temp[i]; + element->hwidth = + (sel_col * element->payload_bw + + element->full_bw - 1)/element->full_bw; + column_needed += element->hwidth; + } + + /* Check column required should not be greater than selected columns*/ + if (column_needed > sel_col - 1) + return -EINVAL; + + /* Compute hstop */ + hstop = sel_col - 1; + + /* Run loop for all groups to compute transport parameters */ + for (i = 0; i < group_count; i++) { + port_block_offset = block_offset = 1; + element = &temp[i]; + + /* Find streams associated with each group */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + if (sdw_mstr_rt->stream_params.rate != element->rate) + continue; + + /* Compute hstart */ + sdw_mstr_rt->hstart = hstart = + hstop - element->hwidth + 1; + sdw_mstr_rt->hstop = hstop; + + /* Assign hstart, hstop, block offset for each port */ + list_for_each_entry(port_rt, + &sdw_mstr_rt->port_rt_list, port_node) { + + t_params = &port_rt->transport_params; + t_params->num = port_rt->port_num; + t_params->hstart = hstart; + t_params->hstop = hstop; + t_params->offset1 = port_block_offset; + t_params->offset2 = port_block_offset >> 8; + + /* Only BlockPerPort supported */ + t_params->blockgroupcontrol_valid = true; + t_params->blockgroupcontrol = 0x0; + t_params->lanecontrol = 0x0; + /* Copy parameters if first node */ + if (port_rt == list_first_entry + (&sdw_mstr_rt->port_rt_list, + struct sdw_port_runtime, port_node)) { + + sdw_mstr_rt->hstart = hstart; + sdw_mstr_rt->hstop = hstop; + + sdw_mstr_rt->block_offset = + port_block_offset; + + } + + /* Get no. of channels running on curr. port */ + ch_mask = port_rt->channel_mask; + no_of_channels = (((ch_mask >> 3) & 1) + + ((ch_mask >> 2) & 1) + + ((ch_mask >> 1) & 1) + + (ch_mask & 1)); + + + port_block_offset += + sdw_mstr_rt->stream_params.bps * + no_of_channels; + } + + /* Compute block offset */ + block_offset += sdw_mstr_rt->stream_params.bps * + sdw_mstr_rt->stream_params.channel_count; + + /* + * Re-assign port_block_offset for next stream + * under same group + */ + port_block_offset = block_offset; + } + + /* Compute hstop for next group */ + hstop = hstop - element->hwidth; + } + + /* Compute transport params for slave */ + + /* Run loop for master runtime streams running on master */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + /* Get block offset from master runtime */ + port_block_offset = sdw_mstr_rt->block_offset; + + /* Run loop for slave per master runtime */ + list_for_each_entry(slv_rt, + &sdw_mstr_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + if (slv_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + /* Run loop for each port of slave */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + t_slv_params = &port_slv_rt->transport_params; + t_slv_params->num = port_slv_rt->port_num; + + /* Assign transport parameters */ + t_slv_params->hstart = sdw_mstr_rt->hstart; + t_slv_params->hstop = sdw_mstr_rt->hstop; + t_slv_params->offset1 = port_block_offset; + t_slv_params->offset2 = port_block_offset >> 8; + + /* Only BlockPerPort supported */ + t_slv_params->blockgroupcontrol_valid = true; + t_slv_params->blockgroupcontrol = 0x0; + t_slv_params->lanecontrol = 0x0; + + /* Get no. of channels running on curr. port */ + ch_mask = port_slv_rt->channel_mask; + no_of_channels = (((ch_mask >> 3) & 1) + + ((ch_mask >> 2) & 1) + + ((ch_mask >> 1) & 1) + + (ch_mask & 1)); + + /* Increment block offset for next port/slave */ + port_block_offset += slv_rt->stream_params.bps * + no_of_channels; + } + } + } + + kfree(temp); + + return 0; +} + +/* + * sdw_cfg_frmshp_bnkswtch - returns Success + * -EINVAL - In case of error. + * -ENOMEM - In case of memory alloc failure. + * -EAGAIN - In case of activity ongoing. + * + * + * This function broadcast frameshape on framectrl + * register and performs bank switch. + */ +int sdw_cfg_frmshp_bnkswtch(struct sdw_bus *mstr_bs, bool is_wait) +{ + struct sdw_msg *wr_msg; + int ret = 0; + int banktouse, numcol, numrow; + u8 *wbuf; + + wr_msg = kzalloc(sizeof(struct sdw_msg), GFP_KERNEL); + if (!wr_msg) + return -ENOMEM; + + mstr_bs->async_data.msg = wr_msg; + + wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL); + if (!wbuf) + return -ENOMEM; + + numcol = sdw_get_col_to_num(mstr_bs->col); + numrow = sdw_get_row_to_num(mstr_bs->row); + + wbuf[0] = numcol | (numrow << 3); + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + if (banktouse) { + wr_msg->addr = (SDW_SCP_FRAMECTRL + SDW_BANK1_REGISTER_OFFSET) + + (SDW_NUM_DATA_PORT_REGISTERS * 0); /* Data port 0 */ + } else { + + wr_msg->addr = SDW_SCP_FRAMECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * 0); /* Data port 0 */ + } + + wr_msg->ssp_tag = 0x1; + wr_msg->flag = SDW_MSG_FLAG_WRITE; + wr_msg->len = 1; + wr_msg->slave_addr = 0xF; /* Broadcast address*/ + wr_msg->buf = wbuf; + wr_msg->addr_page1 = 0x0; + wr_msg->addr_page2 = 0x0; + + if (is_wait) { + + if (in_atomic() || irqs_disabled()) { + ret = sdw_trylock_mstr(mstr_bs->mstr); + if (!ret) { + /* SDW activity is ongoing. */ + ret = -EAGAIN; + goto out; + } + } else + sdw_lock_mstr(mstr_bs->mstr); + + ret = sdw_slave_transfer_async(mstr_bs->mstr, wr_msg, + 1, &mstr_bs->async_data); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + } else { + ret = sdw_slave_transfer(mstr_bs->mstr, wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + } + + msleep(100); /* TBD: Remove this */ + + /* + * TBD: check whether we need to poll on + * mcp active bank bit to switch bank + */ + mstr_bs->active_bank = banktouse; + + if (!is_wait) { + kfree(mstr_bs->async_data.msg->buf); + kfree(mstr_bs->async_data.msg); + } + + +out: + + return ret; +} + +/* + * sdw_cfg_frmshp_bnkswtch_wait - returns Success + * -ETIMEDOUT - In case of timeout + * + * This function waits on completion of + * bank switch. + */ +int sdw_cfg_frmshp_bnkswtch_wait(struct sdw_bus *mstr_bs) +{ + unsigned long time_left; + struct sdw_master *mstr = mstr_bs->mstr; + + time_left = wait_for_completion_timeout( + &mstr_bs->async_data.xfer_complete, + 3000); + if (!time_left) { + dev_err(&mstr->dev, "Controller Timed out\n"); + sdw_unlock_mstr(mstr); + return -ETIMEDOUT; + } + kfree(mstr_bs->async_data.msg->buf); + kfree(mstr_bs->async_data.msg); + sdw_unlock_mstr(mstr); + return 0; +} + +/* + * sdw_config_bs_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs master/slave transport + * params config, set SSP interval, set Clock + * frequency, enable channel. This API is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * + */ +int sdw_config_bs_prms(struct sdw_bus *sdw_mstr_bs, bool state_check) +{ + struct port_chn_en_state chn_en; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_bs_rt = NULL; + struct sdw_mstr_driver *ops; + int banktouse, ret = 0; + + list_for_each_entry(sdw_mstr_bs_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_bs_rt->mstr == NULL) + continue; + + /* + * Configure transport and port params + * for master and slave ports. + */ + ret = sdw_cfg_params_mstr_slv(sdw_mstr_bs, + sdw_mstr_bs_rt, state_check); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr_bs->mstr->dev, + "slave/master config params failed\n"); + return ret; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* Configure SSP */ + banktouse = sdw_mstr_bs->active_bank; + banktouse = !banktouse; + + /* + * TBD: Currently harcoded SSP interval, + * computed value to be taken from system_interval in + * bus data structure. + * Add error check. + */ + if (ops->mstr_ops->set_ssp_interval) + ops->mstr_ops->set_ssp_interval(sdw_mstr_bs->mstr, + SDW_DEFAULT_SSP, banktouse); + + /* + * Configure Clock + * TBD: Add error check + */ + if (ops->mstr_ops->set_clock_freq) + ops->mstr_ops->set_clock_freq(sdw_mstr_bs->mstr, + sdw_mstr_bs->clk_div, banktouse); + + /* Enable channel on alternate bank for running streams */ + chn_en.is_activate = true; + chn_en.is_bank_sw = true; + ret = sdw_en_dis_mstr_slv_state + (sdw_mstr_bs, sdw_mstr_bs_rt, &chn_en); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr_bs->mstr->dev, + "Channel enable failed\n"); + return ret; + } + + } + + return 0; +} + +/* + * sdw_dis_chan - returns Success + * -EINVAL - In case of error. + * + * + * This function disables channel on alternate + * bank. This API is called from sdw_bus_calc_bw + * & sdw_bus_calc_bw_dis when channel on current + * bank is enabled. + * + */ +int sdw_dis_chan(struct sdw_bus *sdw_mstr_bs) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_bs_rt = NULL; + struct port_chn_en_state chn_en; + int ret = 0; + + list_for_each_entry(sdw_mstr_bs_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_bs_rt->mstr == NULL) + continue; + + chn_en.is_activate = false; + chn_en.is_bank_sw = true; + ret = sdw_en_dis_mstr_slv_state(sdw_mstr_bs, + sdw_mstr_bs_rt, &chn_en); + if (ret < 0) + return ret; + } + + return 0; +} + + +/* + * sdw_cfg_slv_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare slave ports. + */ +int sdw_cfg_slv_prep_unprep(struct sdw_bus *mstr_bs, + struct sdw_slave_runtime *slv_rt_strm, + struct sdw_port_runtime *port_slv_strm, + bool prep) +{ + struct sdw_slave_driver *slv_ops = slv_rt_strm->slave->driver; + struct sdw_slv_capabilities *slv_cap = + &slv_rt_strm->slave->sdw_slv_cap; + struct sdw_slv_dpn_capabilities *sdw_slv_dpn_cap = + slv_cap->sdw_dpn_cap; + + struct sdw_msg wr_msg, rd_msg, rd_msg1; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + u8 rbuf1[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* Read SDW_DPN_PREPARECTRL register */ + rd_msg.addr = wr_msg.addr = SDW_DPN_PREPARECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * port_slv_strm->port_num); + + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + rd_msg1.ssp_tag = 0x0; + rd_msg1.flag = SDW_MSG_FLAG_READ; + rd_msg1.len = 1; + rd_msg1.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg1.buf = rbuf1; + rd_msg1.addr_page1 = 0x0; + rd_msg1.addr_page2 = 0x0; + + + rd_msg1.addr = SDW_DPN_PREPARESTATUS + + (SDW_NUM_DATA_PORT_REGISTERS * port_slv_strm->port_num); + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_rt_strm->slave->slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + if (prep) { /* PREPARE */ + + /* + * 1. slave port prepare_ch_pre + * --> callback + * --> handle_pre_port_prepare + */ + if (slv_ops->handle_pre_port_prepare) { + slv_ops->handle_pre_port_prepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + /* 2. slave port prepare --> to write */ + if (sdw_slv_dpn_cap->prepare_ch) { + + /* NON SIMPLIFIED CM, prepare required */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] | port_slv_strm->channel_mask); + + /* + * TBD: poll for prepare interrupt bit + * before calling post_prepare + * 2. check capabilities if simplified + * CM no need to prepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * TBD: check on port ready, + * ideally we should check on prepare + * status for port_ready + */ + + /* wait for completion on port ready*/ + msleep(100); /* TBD: Remove this */ + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + } + + /* + * 3. slave port post pre + * --> callback + * --> handle_post_port_prepare + */ + if (slv_ops->handle_post_port_prepare) { + slv_ops->handle_post_port_prepare + (slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, banktouse); + } + + slv_rt_strm->rt_state = SDW_STATE_PREPARE_RT; + + } else { + /* UNPREPARE */ + /* + * 1. slave port unprepare_ch_pre + * --> callback + * --> handle_pre_port_prepare + */ + if (slv_ops->handle_pre_port_unprepare) { + slv_ops->handle_pre_port_unprepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + /* 2. slave port unprepare --> to write */ + if (sdw_slv_dpn_cap->prepare_ch) { + + /* NON SIMPLIFIED CM, unprepare required */ + + /* Read SDW_DPN_PREPARECTRL register */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] & ~(port_slv_strm->channel_mask)); + + /* + * TBD: poll for prepare interrupt bit before + * calling post_prepare + * Does it apply for unprepare aswell? + * 2. check capabilities if simplified CM + * no need to unprepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + } + + /* + * 3. slave port post unpre + * --> callback + * --> handle_post_port_unprepare + */ + if (slv_ops->handle_post_port_unprepare) { + slv_ops->handle_post_port_unprepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + slv_rt_strm->rt_state = SDW_STATE_UNPREPARE_RT; + } +out: + return ret; + +} + + +/* + * sdw_cfg_mstr_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare master ports. + */ +int sdw_cfg_mstr_prep_unprep(struct sdw_bus *mstr_bs, + struct sdw_mstr_runtime *mstr_rt_strm, + struct sdw_port_runtime *port_mstr_strm, + bool prep) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = port_mstr_strm->port_num; + prep_ch.ch_mask = port_mstr_strm->channel_mask; + prep_ch.prepare = prep; /* Prepare/Unprepare */ + + /* TBD: Bank configuration */ + + /* 1. Master port prepare_ch_pre */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_pre + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 2. Master port prepare */ + if (ops->mstr_port_ops->dpn_port_prepare_ch) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 3. Master port prepare_ch_post */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_post) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_post + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + if (prep) + mstr_rt_strm->rt_state = SDW_STATE_PREPARE_RT; + else + mstr_rt_strm->rt_state = SDW_STATE_UNPREPARE_RT; + + return 0; +} + + +/* + * sdw_prep_unprep_mstr_slv - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave prepare/unprepare + * port configuration API's, called from sdw_bus_calc_bw + * & sdw_bus_calc_bw_dis API's. + */ +int sdw_prep_unprep_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, bool is_prep) +{ + struct sdw_slave_runtime *slv_rt_strm = NULL; + struct sdw_port_runtime *port_slv_strm, *port_mstr_strm; + struct sdw_mstr_runtime *mstr_rt_strm = NULL; + int ret = 0; + + list_for_each_entry(slv_rt_strm, + &sdw_rt->slv_rt_list, slave_sdw_node) { + + if (slv_rt_strm->slave == NULL) + break; + + list_for_each_entry(port_slv_strm, + &slv_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_slv_prep_unprep(sdw_mstr_bs, + slv_rt_strm, port_slv_strm, is_prep); + if (ret < 0) + return ret; + } + + } + + list_for_each_entry(mstr_rt_strm, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + if (mstr_rt_strm->mstr == NULL) + break; + + list_for_each_entry(port_mstr_strm, + &mstr_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_prep_unprep(sdw_mstr_bs, + mstr_rt_strm, port_mstr_strm, is_prep); + if (ret < 0) + return ret; + } + } + + return 0; +} + +struct sdw_bus *master_to_bus(struct sdw_master *mstr) +{ + struct sdw_bus *sdw_mstr_bs = NULL; + + list_for_each_entry(sdw_mstr_bs, &sdw_core.bus_list, bus_node) { + /* Match master structure pointer */ + if (sdw_mstr_bs->mstr != mstr) + continue; + return sdw_mstr_bs; + } + /* This should never happen, added to suppress warning */ + WARN_ON(1); + + return NULL; +} + +/* + * sdw_chk_strm_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs all the required + * check such as isynchronous mode support, + * stream rates etc. This API is called + * from sdw_bus_calc_bw API. + * + */ +int sdw_chk_strm_prms(struct sdw_master_capabilities *sdw_mstr_cap, + struct sdw_stream_params *mstr_params, + struct sdw_stream_params *stream_params) +{ + /* Asynchronous mode not supported, return Error */ + if (((sdw_mstr_cap->base_clk_freq * 2) % mstr_params->rate) != 0) + return -EINVAL; + + /* Check for sampling frequency */ + if (stream_params->rate != mstr_params->rate) + return -EINVAL; + + return 0; +} + +/* + * sdw_compute_bs_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs master/slave transport + * params computation. This API is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * + */ +int sdw_compute_bs_prms(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt) +{ + + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + int ret = 0, frame_interval = 0; + + sdw_mstr_cap = &sdw_mstr->mstr_capabilities; + + ret = sdw_get_clock_frmshp(sdw_mstr_bs, &frame_interval, + sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "clock/frameshape config failed\n"); + return ret; + } + + /* + * TBD: find right place to run sorting on + * master rt_list. Below sorting is done based on + * bps from low to high, that means PDM streams + * will be placed before PCM. + */ + + /* + * TBD Should we also perform sorting based on rate + * for PCM stream check. if yes then how?? + * creating two different list. + */ + + /* Compute system interval */ + ret = sdw_compute_sys_interval(sdw_mstr_bs, sdw_mstr_cap, + frame_interval); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "compute system interval failed\n"); + return ret; + } + + /* Compute hstart/hstop */ + ret = sdw_compute_hstart_hstop(sdw_mstr_bs); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "compute hstart/hstop failed\n"); + return ret; + } + + return 0; +} + +/* + * sdw_bs_pre_bnkswtch_post - returns Success + * -EINVAL or ret value - In case of error. + * + * This API performs on of the following operation + * based on bs_state value: + * pre-activate port + * bank switch operation + * post-activate port + * bankswitch wait operation + * disable channel operation + */ +int sdw_bs_pre_bnkswtch_post(struct sdw_runtime *sdw_rt, int bs_state) +{ + struct sdw_mstr_runtime *mstr_rt_act = NULL; + struct sdw_bus *mstr_bs_act = NULL; + struct sdw_master_port_ops *ops; + int ret = 0; + + list_for_each_entry(mstr_rt_act, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + + if (mstr_rt_act->mstr == NULL) + break; + + /* Get bus structure for master */ + mstr_bs_act = master_to_bus(mstr_rt_act->mstr); + ops = mstr_bs_act->mstr->driver->mstr_port_ops; + + /* + * Note that current all the operations + * of pre->bankswitch->post->wait->disable + * are performed sequentially.The switch case + * is kept in order for code to scale where + * pre->bankswitch->post->wait->disable are + * not sequential and called from different + * instances. + */ + switch (bs_state) { + + case SDW_UPDATE_BS_PRE: + /* Pre activate ports */ + if (ops->dpn_port_activate_ch_pre) { + ret = ops->dpn_port_activate_ch_pre + (mstr_bs_act->mstr, NULL, 0); + if (ret < 0) + return ret; + } + break; + case SDW_UPDATE_BS_BNKSWTCH: + /* Configure Frame Shape/Switch Bank */ + ret = sdw_cfg_frmshp_bnkswtch(mstr_bs_act, true); + if (ret < 0) + return ret; + break; + case SDW_UPDATE_BS_POST: + /* Post activate ports */ + if (ops->dpn_port_activate_ch_post) { + ret = ops->dpn_port_activate_ch_post + (mstr_bs_act->mstr, NULL, 0); + if (ret < 0) + return ret; + } + break; + case SDW_UPDATE_BS_BNKSWTCH_WAIT: + /* Post Bankswitch wait operation */ + ret = sdw_cfg_frmshp_bnkswtch_wait(mstr_bs_act); + if (ret < 0) + return ret; + break; + case SDW_UPDATE_BS_DIS_CHN: + /* Disable channel on previous bank */ + ret = sdw_dis_chan(mstr_bs_act); + if (ret < 0) + return ret; + break; + default: + return -EINVAL; + break; + } + } + + return ret; + +} + +/* + * sdw_update_bs_prms - returns Success + * -EINVAL - In case of error. + * + * Once all the parameters are configured + * for ports, this function performs bankswitch + * where all the new configured parameters + * gets in effect. This function is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * This function also disables all the channels + * enabled on previous bank after bankswitch. + */ +int sdw_update_bs_prms(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, + int last_node) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + int ret = 0; + + /* + * Optimization scope. + * Check whether we can assign function pointer + * link sync value is 1, and call that function + * if its not NULL. + */ + if ((last_node) && (sdw_mstr->link_sync_mask)) { + + /* Perform pre-activate ports */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_PRE); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Pre-activate port failed\n"); + return ret; + } + + /* Perform bankswitch operation*/ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_BNKSWTCH); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Bank Switch operation failed\n"); + return ret; + } + + /* Perform post-activate ports */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_POST); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Pre-activate port failed\n"); + return ret; + } + + /* Perform bankswitch post wait opearation */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, + SDW_UPDATE_BS_BNKSWTCH_WAIT); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "BnkSwtch wait op failed\n"); + return ret; + } + + /* Disable channels on previous bank */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_DIS_CHN); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Channel disabled failed\n"); + return ret; + } + + } + + if (!sdw_mstr->link_sync_mask) { + + /* Configure Frame Shape/Switch Bank */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "bank switch failed\n"); + return ret; + } + + /* Disable all channels enabled on previous bank */ + ret = sdw_dis_chan(sdw_mstr_bs); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel disabled failed\n"); + return ret; + } + } + + return ret; +} + +/** + * sdw_chk_last_node - returns True or false + * + * This function returns true in case of last node + * else returns false. + */ +bool sdw_chk_last_node(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + struct sdw_mstr_runtime *last_rt = NULL; + + last_rt = list_last_entry(&sdw_rt->mstr_rt_list, + struct sdw_mstr_runtime, mstr_sdw_node); + if (sdw_mstr_rt == last_rt) + return true; + else + return false; + +} + +/** + * sdw_unprepare_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to unprepare ports and does recomputation of + * bus parameters. + */ +int sdw_unprepare_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_stream_params *mstr_params; + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + mstr_params = &sdw_mstr_rt->stream_params; + + /* 1. Un-prepare master and slave port */ + ret = sdw_prep_unprep_mstr_slv(sdw_mstr_bs, + sdw_rt, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Ch unprep failed\n"); + return ret; + } + + /* change stream state to unprepare */ + if (last_node) + sdw_rt->stream_state = + SDW_STATE_UNPREPARE_STREAM; + + /* + * Calculate new bandwidth, frame size + * and total BW required for master controller + */ + sdw_mstr_rt->stream_bw = mstr_params->rate * + mstr_params->channel_count * mstr_params->bps; + sdw_mstr_bs->bandwidth -= sdw_mstr_rt->stream_bw; + + /* Something went wrong in bandwidth calulation */ + if (sdw_mstr_bs->bandwidth < 0) { + dev_err(&sdw_mstr->dev, "BW calculation failed\n"); + return -EINVAL; + } + + if (!sdw_mstr_bs->bandwidth) { + /* + * Last stream on master should + * return successfully + */ + sdw_mstr_bs->system_interval = 0; + sdw_mstr_bs->stream_interval = 0; + sdw_mstr_bs->frame_freq = 0; + sdw_mstr_bs->row = 0; + sdw_mstr_bs->col = 0; + return 0; + } + + /* Compute transport params */ + ret = sdw_compute_bs_prms(sdw_mstr_bs, sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Params computation failed\n"); + return -EINVAL; + } + + /* Configure bus params */ + ret = sdw_config_bs_prms(sdw_mstr_bs, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_disable_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to disable ports. + */ +int sdw_disable_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_stream_params *mstr_params; + bool last_node = false; + int ret = 0; + + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + mstr_params = &sdw_mstr_rt->stream_params; + + /* Lets do disabling of port for stream to be freed */ + ret = sdw_en_dis_mstr_slv(sdw_mstr_bs, sdw_rt, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Ch dis failed\n"); + return ret; + } + + /* Change stream state to disable */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_DISABLE_STREAM; + + ret = sdw_config_bs_prms(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_enable_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to enable ports. + */ +int sdw_enable_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + + ret = sdw_config_bs_prms(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* Enable new port for master and slave */ + ret = sdw_en_dis_mstr_slv(sdw_mstr_bs, sdw_rt, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel enable failed\n"); + return ret; + } + + /* change stream state to enable */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_ENABLE_STREAM; + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_prepare_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to prepare ports and does computation of + * bus parameters. + */ +int sdw_prepare_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + struct sdw_stream_params *stream_params = &sdw_rt->stream_params; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_stream_params *mstr_params; + + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + mstr_params = &sdw_mstr_rt->stream_params; + + /* + * check all the stream parameters received + * Check for isochronous mode, sample rate etc + */ + ret = sdw_chk_strm_prms(sdw_mstr_cap, mstr_params, + stream_params); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Stream param check failed\n"); + return -EINVAL; + } + + /* + * Calculate stream bandwidth, frame size and + * total BW required for master controller + */ + sdw_mstr_rt->stream_bw = mstr_params->rate * + mstr_params->channel_count * mstr_params->bps; + sdw_mstr_bs->bandwidth += sdw_mstr_rt->stream_bw; + + /* Compute transport params */ + ret = sdw_compute_bs_prms(sdw_mstr_bs, sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Params computation failed\n"); + return -EINVAL; + } + + /* Configure bus parameters */ + ret = sdw_config_bs_prms(sdw_mstr_bs, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport param config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + /* Prepare new port for master and slave */ + ret = sdw_prep_unprep_mstr_slv(sdw_mstr_bs, sdw_rt, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel prepare failed\n"); + return ret; + } + + /* change stream state to prepare */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_PREPARE_STREAM; + + + return ret; +} + +/** + * sdw_pre_en_dis_unprep_op - returns Success + * -EINVAL - In case of error. + * + * This function is called by sdw_bus_calc_bw + * and sdw_bus_calc_bw_dis to prepare, enable, + * unprepare and disable ports. Based on state + * value, individual APIs are called. + */ +int sdw_pre_en_dis_unprep_op(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt, int state) +{ + struct sdw_master *sdw_mstr = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + int ret = 0; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + sdw_mstr = sdw_mstr_bs->mstr; + + /* + * All data structures required available, + * lets calculate BW for master controller + */ + + switch (state) { + + case SDW_STATE_PREPARE_STREAM: /* Prepare */ + ret = sdw_prepare_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_ENABLE_STREAM: /* Enable */ + ret = sdw_enable_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_DISABLE_STREAM: /* Disable */ + ret = sdw_disable_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_UNPREPARE_STREAM: /* UnPrepare */ + ret = sdw_unprepare_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + default: + ret = -EINVAL; + break; + + } + + return ret; +} + +/** + * sdw_bus_calc_bw - returns Success + * -EINVAL - In case of error. + * + * + * This function is called from sdw_prepare_and_enable + * whenever new stream is processed. The function based + * on the stream associated with controller calculates + * required bandwidth, clock, frameshape, computes + * all transport params for a given port, enable channel + * & perform bankswitch. + */ +int sdw_bus_calc_bw(struct sdw_stream_tag *stream_tag, bool enable) +{ + + struct sdw_runtime *sdw_rt = stream_tag->sdw_rt; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_master *sdw_mstr = NULL; + int ret = 0; + + + /* + * TBD: check for mstr_rt is in configured state or not + * If yes, then configure masters as well + * If no, then do not configure/enable master related parameters + */ + + /* BW calulation for active master controller for given stream tag */ + list_for_each_entry(sdw_mstr_rt, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((sdw_rt->stream_state != SDW_STATE_CONFIG_STREAM) && + (sdw_rt->stream_state != SDW_STATE_UNPREPARE_STREAM)) + goto enable_stream; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_PREPARE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Prepare Operation failed\n"); + return -EINVAL; + } + } + +enable_stream: + + list_for_each_entry(sdw_mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((!enable) || + (sdw_rt->stream_state != SDW_STATE_PREPARE_STREAM)) + return 0; + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + sdw_mstr = sdw_mstr_bs->mstr; + + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_ENABLE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Enable Operation failed\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_calc_bw); + +/** + * sdw_bus_calc_bw_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function is called from sdw_disable_and_unprepare + * whenever stream is ended. The function based disables/ + * unprepare port/channel of associated stream and computes + * required bandwidth, clock, frameshape, computes + * all transport params for a given port, enable channel + * & perform bankswitch for remaining streams on given + * controller. + */ +int sdw_bus_calc_bw_dis(struct sdw_stream_tag *stream_tag, bool unprepare) +{ + struct sdw_runtime *sdw_rt = stream_tag->sdw_rt; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_master *sdw_mstr = NULL; + int ret = 0; + + + /* BW calulation for active master controller for given stream tag */ + list_for_each_entry(sdw_mstr_rt, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + + if (sdw_mstr_rt->mstr == NULL) + break; + + if (sdw_rt->stream_state != SDW_STATE_ENABLE_STREAM) + goto unprepare_stream; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_DISABLE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Disable Operation failed\n"); + return -EINVAL; + } + } + +unprepare_stream: + list_for_each_entry(sdw_mstr_rt, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((!unprepare) || + (sdw_rt->stream_state != SDW_STATE_DISABLE_STREAM)) + return 0; + + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_UNPREPARE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Unprepare Operation failed\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_calc_bw_dis); + +/* + * sdw_slv_dp0_en_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable Slave DP0 channels. + */ +int sdw_slv_dp0_en_dis(struct sdw_bus *mstr_bs, + bool is_enable, u8 slv_number) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + rd_msg.addr = wr_msg.addr = ((SDW_DPN_CHANNELEN + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * + 0x0)); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + if (is_enable) + wbuf[0] = (rbuf[0] | 0x1); + else + wbuf[0] = (rbuf[0] & ~(0x1)); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + /* This is just status read, can be removed later */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + +out: + return ret; + +} + + +/* + * sdw_mstr_dp0_act_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable Master DP0 channels. + */ +int sdw_mstr_dp0_act_dis(struct sdw_bus *mstr_bs, bool is_enable) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_activate_ch activate_ch; + int banktouse, ret = 0; + + activate_ch.num = 0; + activate_ch.ch_mask = 0x1; + activate_ch.activate = is_enable; /* Enable/Disable */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* 1. Master port enable_ch_pre */ + if (ops->mstr_port_ops->dpn_port_activate_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_activate_ch_pre + (mstr_bs->mstr, &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + /* 2. Master port enable */ + if (ops->mstr_port_ops->dpn_port_activate_ch) { + ret = ops->mstr_port_ops->dpn_port_activate_ch(mstr_bs->mstr, + &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + /* 3. Master port enable_ch_post */ + if (ops->mstr_port_ops->dpn_port_activate_ch_post) { + ret = ops->mstr_port_ops->dpn_port_activate_ch_post + (mstr_bs->mstr, &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * sdw_slv_dp0_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare Slave DP0. + */ +int sdw_slv_dp0_prep_unprep(struct sdw_bus *mstr_bs, + u8 slv_number, bool prepare) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* Read SDW_DPN_PREPARECTRL register */ + rd_msg.addr = wr_msg.addr = SDW_DPN_PREPARECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * 0x0); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + if (prepare) + wbuf[0] = (rbuf[0] | 0x1); + else + wbuf[0] = (rbuf[0] & ~(0x1)); + + /* + * TBD: poll for prepare interrupt bit + * before calling post_prepare + * 2. check capabilities if simplified + * CM no need to prepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * Sleep for 100ms. + * TODO: check on check on prepare status for port_ready + */ + msleep(100); + +out: + return ret; + +} + +/* + * sdw_mstr_dp0_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare Master DP0. + */ +int sdw_mstr_dp0_prep_unprep(struct sdw_bus *mstr_bs, + bool prep) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = 0x0; + prep_ch.ch_mask = 0x1; + prep_ch.prepare = prep; /* Prepare/Unprepare */ + + /* 1. Master port prepare_ch_pre */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_pre + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 2. Master port prepare */ + if (ops->mstr_port_ops->dpn_port_prepare_ch) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 3. Master port prepare_ch_post */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_post) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_post + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + return 0; +} + +static int sdw_bra_config_ops(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, + struct sdw_transport_params *t_params, + struct sdw_port_params *p_params) +{ + struct sdw_mstr_driver *ops; + int ret, banktouse; + + /* configure Master transport params */ + ret = sdw_cfg_mstr_params(sdw_mstr_bs, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master xport params config failed\n"); + return ret; + } + + /* configure Slave transport params */ + ret = sdw_cfg_slv_params(sdw_mstr_bs, t_params, + p_params, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave xport params config failed\n"); + return ret; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* Configure SSP */ + banktouse = sdw_mstr_bs->active_bank; + banktouse = !banktouse; + + if (ops->mstr_ops->set_ssp_interval) { + ret = ops->mstr_ops->set_ssp_interval(sdw_mstr_bs->mstr, + 24, banktouse); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: SSP interval config failed\n"); + return ret; + } + } + + /* Configure Clock */ + if (ops->mstr_ops->set_clock_freq) { + ret = ops->mstr_ops->set_clock_freq(sdw_mstr_bs->mstr, + sdw_mstr_bs->clk_div, banktouse); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Clock config failed\n"); + return ret; + } + } + + return 0; +} + +static int sdw_bra_xport_config_enable(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, + struct sdw_transport_params *t_params, + struct sdw_port_params *p_params) +{ + int ret; + + /* Prepare sequence */ + ret = sdw_bra_config_ops(sdw_mstr_bs, block, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: config operation failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + /* + * TODO: There may be some slave which doesn't support + * prepare for DP0. We have two options here. + * 1. Just call prepare and ignore error from those + * codec who doesn't support prepare for DP0. + * 2. Get slave capabilities and based on prepare DP0 + * support, Program Slave prepare register. + * Currently going with approach 1, not checking return + * value. + * 3. Try to use existing prep_unprep API both for master + * and slave. + */ + sdw_slv_dp0_prep_unprep(sdw_mstr_bs, block->slave_addr, true); + + /* Prepare Master port */ + ret = sdw_mstr_dp0_prep_unprep(sdw_mstr_bs, true); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master prepare failed\n"); + return ret; + } + + /* Enable sequence */ + ret = sdw_bra_config_ops(sdw_mstr_bs, block, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: config operation failed\n"); + return ret; + } + + /* Enable DP0 channel (Slave) */ + ret = sdw_slv_dp0_en_dis(sdw_mstr_bs, true, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave DP0 enable failed\n"); + return ret; + } + + /* Enable DP0 channel (Master) */ + ret = sdw_mstr_dp0_act_dis(sdw_mstr_bs, true); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master DP0 enable failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + return 0; +} + +static int sdw_bra_xport_config_disable(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block) +{ + int ret; + + /* Disable DP0 channel (Slave) */ + ret = sdw_slv_dp0_en_dis(sdw_mstr_bs, false, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave DP0 disable failed\n"); + return ret; + } + + /* Disable DP0 channel (Master) */ + ret = sdw_mstr_dp0_act_dis(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master DP0 disable failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + /* + * TODO: There may be some slave which doesn't support + * de-prepare for DP0. We have two options here. + * 1. Just call prepare and ignore error from those + * codec who doesn't support de-prepare for DP0. + * 2. Get slave capabilities and based on prepare DP0 + * support, Program Slave prepare register. + * Currently going with approach 1, not checking return + * value. + */ + sdw_slv_dp0_prep_unprep(sdw_mstr_bs, block->slave_addr, false); + + /* De-prepare Master port */ + ret = sdw_mstr_dp0_prep_unprep(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master de-prepare failed\n"); + return ret; + } + + return 0; +} + +int sdw_bus_bra_xport_config(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, bool enable) +{ + struct sdw_transport_params t_params; + struct sdw_port_params p_params; + int ret; + + /* TODO: + * compute transport parameters based on current clock and + * frameshape. need to check how algorithm should be designed + * for BRA for computing clock, frameshape, SSP and transport params. + */ + + /* Transport Parameters */ + t_params.num = 0x0; /* DP 0 */ + t_params.blockpackingmode = 0x0; + t_params.blockgroupcontrol_valid = false; + t_params.blockgroupcontrol = 0x0; + t_params.lanecontrol = 0; + t_params.sample_interval = 10; + + t_params.hstart = 7; + t_params.hstop = 9; + t_params.offset1 = 0; + t_params.offset2 = 0; + + /* Port Parameters */ + p_params.num = 0x0; /* DP 0 */ + + /* Isochronous Mode */ + p_params.port_flow_mode = 0x0; + + /* Normal Mode */ + p_params.port_data_mode = 0x0; + + /* Word length */ + p_params.word_length = 3; + + /* Frameshape and clock params */ + sdw_mstr_bs->clk_div = 1; + sdw_mstr_bs->col = 10; + sdw_mstr_bs->row = 80; + +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) + sdw_mstr_bs->bandwidth = 9.6 * 1000 * 1000; +#else + sdw_mstr_bs->bandwidth = 12 * 1000 * 1000; +#endif + + if (enable) { + ret = sdw_bra_xport_config_enable(sdw_mstr_bs, block, + &t_params, &p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Xport params config failed\n"); + return ret; + } + + } else { + ret = sdw_bra_xport_config_disable(sdw_mstr_bs, block); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Xport params de-config failed\n"); + return ret; + } + } + + return 0; +} diff --git a/drivers/sdw/sdw_cnl.c b/drivers/sdw/sdw_cnl.c new file mode 100644 index 000000000000..2e9773571b9b --- /dev/null +++ b/drivers/sdw/sdw_cnl.c @@ -0,0 +1,2482 @@ +/* + * sdw_cnl.c - Intel SoundWire master controller driver implementation. + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdw_cnl_priv.h" + +static inline int cnl_sdw_reg_readl(void __iomem *base, int offset) +{ + int value; + + value = readl(base + offset); + return value; +} + +static inline void cnl_sdw_reg_writel(void __iomem *base, int offset, int value) +{ + writel(value, base + offset); +} + +static inline u16 cnl_sdw_reg_readw(void __iomem *base, int offset) +{ + int value; + + value = readw(base + offset); + return value; +} + +static inline void cnl_sdw_reg_writew(void __iomem *base, int offset, u16 value) +{ + writew(value, base + offset); +} + +static inline int cnl_sdw_port_reg_readl(void __iomem *base, int offset, + int port_num) +{ + return cnl_sdw_reg_readl(base, offset + port_num * 128); +} + +static inline void cnl_sdw_port_reg_writel(u32 __iomem *base, int offset, + int port_num, int value) +{ + return cnl_sdw_reg_writel(base, offset + port_num * 128, value); +} + +struct cnl_sdw_async_msg { + struct completion *async_xfer_complete; + struct sdw_msg *msg; + int length; +}; + +struct cnl_sdw { + struct cnl_sdw_data data; + struct sdw_master *mstr; + irqreturn_t (*thread)(int irq, void *context); + void *thread_context; + struct completion tx_complete; + struct cnl_sdw_port port[CNL_SDW_MAX_PORTS]; + int num_pcm_streams; + struct cnl_sdw_pdi_stream *pcm_streams; + int num_in_pcm_streams; + struct cnl_sdw_pdi_stream *in_pcm_streams; + int num_out_pcm_streams; + struct cnl_sdw_pdi_stream *out_pcm_streams; + int num_pdm_streams; + struct cnl_sdw_pdi_stream *pdm_streams; + int num_in_pdm_streams; + struct cnl_sdw_pdi_stream *in_pdm_streams; + int num_out_pdm_streams; + struct cnl_sdw_pdi_stream *out_pdm_streams; + struct mutex stream_lock; + spinlock_t ctrl_lock; + struct cnl_sdw_async_msg async_msg; + u32 response_buf[0x80]; + bool sdw_link_status; + +}; + +static int sdw_power_up_link(struct cnl_sdw *sdw) +{ + volatile int link_control; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + /* Try 10 times before timing out */ + int timeout = 10; + int spa_mask, cpa_mask; + + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + spa_mask = (CNL_LCTL_SPA_MASK << (data->inst_id + CNL_LCTL_SPA_SHIFT)); + cpa_mask = (CNL_LCTL_CPA_MASK << (data->inst_id + CNL_LCTL_CPA_SHIFT)); + link_control |= spa_mask; + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_LCTL, link_control); + do { + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (link_control & cpa_mask) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + /* Read once again to confirm */ + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (link_control & cpa_mask) { + dev_info(&mstr->dev, "SoundWire ctrl %d Powered Up\n", + data->inst_id); + sdw->sdw_link_status = 1; + return 0; + } + dev_err(&mstr->dev, "Failed to Power Up the SDW ctrl %d\n", + data->inst_id); + return -EIO; +} + +static void sdw_power_down_link(struct cnl_sdw *sdw) +{ + volatile int link_control; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + /* Retry 10 times before giving up */ + int timeout = 10; + int spa_mask, cpa_mask; + + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + spa_mask = ~(CNL_LCTL_SPA_MASK << (data->inst_id + CNL_LCTL_SPA_SHIFT)); + cpa_mask = (CNL_LCTL_CPA_MASK << (data->inst_id + CNL_LCTL_CPA_SHIFT)); + link_control &= spa_mask; + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_LCTL, link_control); + do { + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (!(link_control & cpa_mask)) + break; + timeout--; + /* Wait for 20ms before each retry */ + msleep(20); + } while (timeout != 0); + /* Read once again to confirm */ + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (!(link_control & cpa_mask)) { + dev_info(&mstr->dev, "SoundWire ctrl %d Powered Down\n", + data->inst_id); + sdw->sdw_link_status = 0; + return; + } + dev_err(&mstr->dev, "Failed to Power Down the SDW ctrl %d\n", + data->inst_id); +} + +static void sdw_init_phyctrl(struct cnl_sdw *sdw) +{ + /* TODO: Initialize based on hardware requirement */ + +} + +static void sdw_switch_to_mip(struct cnl_sdw *sdw) +{ + u16 ioctl; + u16 act = 0; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + int act_offset = SDW_CNL_CTMCTL + (data->inst_id * + SDW_CNL_CTMCTL_REG_OFFSET); + + ioctl = cnl_sdw_reg_readw(data->sdw_shim, ioctl_offset); + + ioctl &= ~(CNL_IOCTL_DOE_MASK << CNL_IOCTL_DOE_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_DO_MASK << CNL_IOCTL_DO_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_MIF_MASK << CNL_IOCTL_MIF_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT); + ioctl &= ~(CNL_IOCTL_COE_MASK << CNL_IOCTL_COE_SHIFT); + + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + act |= 0x1 << CNL_CTMCTL_DOAIS_SHIFT; + act |= CNL_CTMCTL_DACTQE_MASK << CNL_CTMCTL_DACTQE_SHIFT; + act |= CNL_CTMCTL_DODS_MASK << CNL_CTMCTL_DODS_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, act_offset, act); +} + +static void sdw_switch_to_glue(struct cnl_sdw *sdw) +{ + u16 ioctl; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + + ioctl = cnl_sdw_reg_readw(data->sdw_shim, ioctl_offset); + ioctl |= CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT; + ioctl |= CNL_IOCTL_COE_MASK << CNL_IOCTL_COE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_MIF_MASK << CNL_IOCTL_MIF_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); +} + +static void sdw_init_shim(struct cnl_sdw *sdw) +{ + u16 ioctl = 0; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + + + ioctl |= CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_WPDD_MASK << CNL_IOCTL_WPDD_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_DO_MASK << CNL_IOCTL_DO_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_DOE_MASK << CNL_IOCTL_DOE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); +} + +static int sdw_config_update(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + struct sdw_master *mstr = sdw->mstr; + int sync_reg, syncgo_mask; + volatile int config_update = 0; + volatile int sync_update = 0; + /* Try 10 times before giving up on configuration update */ + int timeout = 10; + int config_updated = 0; + + config_update |= MCP_CONFIGUPDATE_CONFIGUPDATE_MASK << + MCP_CONFIGUPDATE_CONFIGUPDATE_SHIFT; + /* Bit is self-cleared when configuration gets updated. */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIGUPDATE, + config_update); + + /* + * Set SYNCGO bit for Master(s) running in aggregated mode + * (MMModeEN = 1). This action causes all gSyncs of all Master IPs + * to be unmasked and asserted at the currently active gSync rate. + * The initialization-pending Master IP SoundWire bus clock will + * start up synchronizing to gSync, leading to bus reset entry, + * subsequent exit, and 1st Frame generation aligning to gSync. + * Note that this is done in order to overcome hardware bug related + * to mis-alignment of gSync and frame. + */ + if (mstr->link_sync_mask) { + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + syncgo_mask = (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, + SDW_CNL_SYNC); + if ((sync_update & syncgo_mask) == 0) + break; + + msleep(20); + timeout--; + + } while (timeout); + + if ((sync_update & syncgo_mask) != 0) { + dev_err(&mstr->dev, "Failed to set sync go\n"); + return -EIO; + } + + /* Reset timeout */ + timeout = 10; + } + + /* Wait for config update bit to be self cleared */ + do { + config_update = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONFIGUPDATE); + if ((config_update & + MCP_CONFIGUPDATE_CONFIGUPDATE_MASK) == 0) { + config_updated = 1; + break; + } + timeout--; + /* Wait for 20ms between each try */ + msleep(20); + + } while (timeout != 0); + if (!config_updated) { + dev_err(&mstr->dev, "SoundWire update failed\n"); + return -EIO; + } + return 0; +} + +static void sdw_enable_interrupt(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + int int_mask = 0; + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTMASK0, + MCP_SLAVEINTMASK0_MASK); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTMASK1, + MCP_SLAVEINTMASK1_MASK); + /* Enable slave interrupt mask */ + int_mask |= MCP_INTMASK_SLAVERESERVED_MASK << + MCP_INTMASK_SLAVERESERVED_SHIFT; + int_mask |= MCP_INTMASK_SLAVEALERT_MASK << + MCP_INTMASK_SLAVEALERT_SHIFT; + int_mask |= MCP_INTMASK_SLAVEATTACHED_MASK << + MCP_INTMASK_SLAVEATTACHED_SHIFT; + int_mask |= MCP_INTMASK_SLAVENOTATTACHED_MASK << + MCP_INTMASK_SLAVENOTATTACHED_SHIFT; + int_mask |= MCP_INTMASK_CONTROLBUSCLASH_MASK << + MCP_INTMASK_CONTROLBUSCLASH_SHIFT; + int_mask |= MCP_INTMASK_DATABUSCLASH_MASK << + MCP_INTMASK_DATABUSCLASH_SHIFT; + int_mask |= MCP_INTMASK_RXWL_MASK << + MCP_INTMASK_RXWL_SHIFT; + int_mask |= MCP_INTMASK_IRQEN_MASK << + MCP_INTMASK_IRQEN_SHIFT; + int_mask |= MCP_INTMASK_DPPDIINT_MASK << + MCP_INTMASK_DPPDIINT_SHIFT; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_INTMASK, int_mask); +} + +static int sdw_pcm_pdi_init(struct cnl_sdw *sdw) +{ + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int pcm_cap; + int pcm_cap_offset = SDW_CNL_PCMSCAP + (data->inst_id * + SDW_CNL_PCMSCAP_REG_OFFSET); + int ch_cnt_offset; + int i; + + pcm_cap = cnl_sdw_reg_readw(data->sdw_shim, pcm_cap_offset); + sdw->num_pcm_streams = (pcm_cap >> CNL_PCMSCAP_BSS_SHIFT) & + CNL_PCMSCAP_BSS_MASK; + dev_info(&mstr->dev, "Number of Bidirectional PCM stream = %d\n", + sdw->num_pcm_streams); + sdw->pcm_streams = devm_kzalloc(&mstr->dev, + sdw->num_pcm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->pcm_streams) + return -ENOMEM; + /* Two of the PCM streams are reserved for bulk transfers */ + sdw->pcm_streams -= SDW_CNL_PCM_PDI_NUM_OFFSET; + for (i = SDW_CNL_PCM_PDI_NUM_OFFSET; i < sdw->num_pcm_streams; i++) { + ch_cnt_offset = SDW_CNL_PCMSCHC + + (data->inst_id * SDW_CNL_PCMSCHC_REG_OFFSET) + + ((i + SDW_CNL_PCM_PDI_NUM_OFFSET) * 0x2); + + sdw->pcm_streams[i].ch_cnt = cnl_sdw_reg_readw(data->sdw_shim, + ch_cnt_offset); + /* Zero based value in register */ + sdw->pcm_streams[i].ch_cnt++; + sdw->pcm_streams[i].pdi_num = i; + sdw->pcm_streams[i].allocated = false; + dev_info(&mstr->dev, "CH Count for stream %d is %d\n", + i, sdw->pcm_streams[i].ch_cnt); + } + return 0; +} + +static int sdw_pdm_pdi_init(struct cnl_sdw *sdw) +{ + int i; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int pdm_cap, pdm_ch_count, total_pdm_streams; + int pdm_cap_offset = SDW_CNL_PDMSCAP + + (data->inst_id * SDW_CNL_PDMSCAP_REG_OFFSET); + pdm_cap = cnl_sdw_reg_readw(data->sdw_shim, pdm_cap_offset); + sdw->num_pdm_streams = (pdm_cap >> CNL_PDMSCAP_BSS_SHIFT) & + CNL_PDMSCAP_BSS_MASK; + + sdw->pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->pdm_streams) + return -ENOMEM; + + sdw->num_in_pdm_streams = (pdm_cap >> CNL_PDMSCAP_ISS_SHIFT) & + CNL_PDMSCAP_ISS_MASK; + + sdw->in_pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_in_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + + if (!sdw->in_pdm_streams) + return -ENOMEM; + + sdw->num_out_pdm_streams = (pdm_cap >> CNL_PDMSCAP_OSS_SHIFT) & + CNL_PDMSCAP_OSS_MASK; + /* Zero based value in register */ + sdw->out_pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_out_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->out_pdm_streams) + return -ENOMEM; + + total_pdm_streams = sdw->num_pdm_streams + + sdw->num_in_pdm_streams + + sdw->num_out_pdm_streams; + + pdm_ch_count = (pdm_cap >> CNL_PDMSCAP_CPSS_SHIFT) & + CNL_PDMSCAP_CPSS_MASK; + for (i = 0; i < sdw->num_pdm_streams; i++) { + sdw->pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->pdm_streams[i].pdi_num = i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->pdm_streams[i].allocated = false; + } + for (i = 0; i < sdw->num_in_pdm_streams; i++) { + sdw->in_pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->in_pdm_streams[i].pdi_num = i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->in_pdm_streams[i].allocated = false; + } + for (i = 0; i < sdw->num_out_pdm_streams; i++) { + sdw->out_pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->out_pdm_streams[i].pdi_num = + i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->out_pdm_streams[i].allocated = false; + } + return 0; +} + +static int sdw_port_pdi_init(struct cnl_sdw *sdw) +{ + int i, ret = 0; + + for (i = 0; i < CNL_SDW_MAX_PORTS; i++) { + sdw->port[i].port_num = i; + sdw->port[i].allocated = false; + } + ret = sdw_pcm_pdi_init(sdw); + if (ret) + return ret; + ret = sdw_pdm_pdi_init(sdw); + + return ret; +} + +static int sdw_init(struct cnl_sdw *sdw, bool is_first_init) +{ + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int mcp_config, mcp_control, sync_reg, mcp_clockctrl; + volatile int sync_update = 0; + int timeout = 10; /* Try 10 times before timing out */ + int ret = 0; + + /* Power up the link controller */ + ret = sdw_power_up_link(sdw); + if (ret) + return ret; + + /* Initialize the IO control registers */ + sdw_init_shim(sdw); + + /* Switch the ownership to Master IP from glue logic */ + sdw_switch_to_mip(sdw); + + /* Set SyncPRD period */ + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (SDW_CNL_DEFAULT_SYNC_PERIOD << CNL_SYNC_SYNCPRD_SHIFT); + + /* Set SyncPU bit */ + sync_reg |= (0x1 << CNL_SYNC_SYNCCPU_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + if ((sync_update & CNL_SYNC_SYNCCPU_MASK) == 0) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + if ((sync_update & CNL_SYNC_SYNCCPU_MASK) != 0) { + dev_err(&mstr->dev, "Fail to set sync period\n"); + return -EINVAL; + } + + /* + * Set CMDSYNC bit based on Master ID + * Note that this bit is set only for the Master which will be + * running in aggregated mode (MMModeEN = 1). By doing + * this the gSync to Master IP to be masked inactive. + * Note that this is done in order to overcome hardware bug related + * to mis-alignment of gSync and frame. + */ + if (mstr->link_sync_mask) { + + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (1 << (data->inst_id + CNL_SYNC_CMDSYNC_SHIFT)); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + } + + /* Set clock divider to default value in default bank */ + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL0); + mcp_clockctrl |= SDW_CNL_DEFAULT_CLK_DIVIDER; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CLOCKCTRL0, + mcp_clockctrl); + + /* Set the Frame shape init to default value */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FRAMESHAPEINIT, + SDW_CNL_DEFAULT_FRAME_SHAPE); + + + /* Set the SSP interval to default value for both banks */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SSPCTRL0, + SDW_CNL_DEFAULT_SSP_INTERVAL); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SSPCTRL1, + SDW_CNL_DEFAULT_SSP_INTERVAL); + + /* Set command acceptance mode. This is required because when + * Master broadcasts the clock_stop command to slaves, slaves + * might be already suspended, so this return NO ACK, in that + * case also master should go to clock stop mode. + */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + mcp_control |= (MCP_CONTROL_CMDACCEPTMODE_MASK << + MCP_CONTROL_CMDACCEPTMODE_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + + mcp_config = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONFIG); + /* Set Max cmd retry to 15 times */ + mcp_config |= (CNL_SDW_MAX_CMD_RETRIES << + MCP_CONFIG_MAXCMDRETRY_SHIFT); + + /* Set Ping request to ping delay to 15 frames. + * Spec supports 32 max frames + */ + mcp_config |= (CNL_SDW_MAX_PREQ_DELAY << + MCP_CONFIG_MAXPREQDELAY_SHIFT); + + /* If master is synchronized to some other master set Multimode */ + if (mstr->link_sync_mask) { + mcp_config |= (MCP_CONFIG_MMMODEEN_MASK << + MCP_CONFIG_MMMODEEN_SHIFT); + mcp_config |= (MCP_CONFIG_SSPMODE_MASK << + MCP_CONFIG_SSPMODE_SHIFT); + } else { + mcp_config &= ~(MCP_CONFIG_MMMODEEN_MASK << + MCP_CONFIG_MMMODEEN_SHIFT); + mcp_config &= ~(MCP_CONFIG_SSPMODE_MASK << + MCP_CONFIG_SSPMODE_SHIFT); + } + + /* Disable automatic bus release */ + mcp_config &= ~(MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT); + + /* Disable sniffer mode now */ + mcp_config &= ~(MCP_CONFIG_SNIFFEREN_MASK << + MCP_CONFIG_SNIFFEREN_SHIFT); + + /* Set the command mode for Tx and Rx command */ + mcp_config &= ~(MCP_CONFIG_CMDMODE_MASK << + MCP_CONFIG_CMDMODE_SHIFT); + + /* Set operation mode to normal */ + mcp_config &= ~(MCP_CONFIG_OPERATIONMODE_MASK << + MCP_CONFIG_OPERATIONMODE_SHIFT); + mcp_config |= ((MCP_CONFIG_OPERATIONMODE_NORMAL & + MCP_CONFIG_OPERATIONMODE_MASK) << + MCP_CONFIG_OPERATIONMODE_SHIFT); + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIG, mcp_config); + + /* Initialize the phy control registers. */ + sdw_init_phyctrl(sdw); + + if (is_first_init) { + /* Initlaize the ports */ + ret = sdw_port_pdi_init(sdw); + if (ret) { + dev_err(&mstr->dev, "SoundWire controller init failed %d\n", + data->inst_id); + sdw_power_down_link(sdw); + return ret; + } + } + + /* Lastly enable interrupts */ + sdw_enable_interrupt(sdw); + + /* Update soundwire configuration */ + return sdw_config_update(sdw); +} + +static int sdw_alloc_pcm_stream(struct cnl_sdw *sdw, + struct cnl_sdw_port *port, int ch_cnt, + enum sdw_data_direction direction) +{ + int num_pcm_streams, pdi_ch_map = 0, stream_id; + struct cnl_sdw_pdi_stream *stream, *pdi_stream; + unsigned int i; + unsigned int ch_map_offset, port_ctrl_offset, pdi_config_offset; + struct sdw_master *mstr = sdw->mstr; + unsigned int port_ctrl = 0, pdi_config = 0, channel_mask; + unsigned int stream_config; + + /* Currently PCM supports only bi-directional streams only */ + num_pcm_streams = sdw->num_pcm_streams; + stream = sdw->pcm_streams; + + mutex_lock(&sdw->stream_lock); + for (i = SDW_CNL_PCM_PDI_NUM_OFFSET; i < num_pcm_streams; i++) { + if (stream[i].allocated == false) { + stream[i].allocated = true; + stream[i].port_num = port->port_num; + port->pdi_stream = &stream[i]; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port->pdi_stream) { + dev_err(&mstr->dev, "Unable to allocate stream for PCM\n"); + return -EINVAL; + } + pdi_stream = port->pdi_stream; + /* We didnt get enough PDI streams, so free the allocated + * PDI streams. Free the port as well and return with error + */ + pdi_stream->l_ch_num = 0; + pdi_stream->h_ch_num = ch_cnt - 1; + ch_map_offset = SDW_CNL_PCMSCHM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr) + + (SDW_PCM_STRM_START_INDEX * pdi_stream->pdi_num); + if (port->direction == SDW_DATA_DIR_IN) + pdi_ch_map |= (CNL_PCMSYCM_DIR_MASK << CNL_PCMSYCM_DIR_SHIFT); + else + pdi_ch_map &= ~(CNL_PCMSYCM_DIR_MASK << CNL_PCMSYCM_DIR_SHIFT); + /* TODO: Remove this hardcoding */ + stream_id = mstr->nr * 16 + pdi_stream->pdi_num + 5; + pdi_stream->sdw_pdi_num = stream_id; + pdi_ch_map |= (stream_id & CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + pdi_ch_map |= (pdi_stream->l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + pdi_ch_map |= (0xF & CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + cnl_sdw_reg_writew(sdw->data.sdw_shim, ch_map_offset, + pdi_ch_map); + /* If direction is input, port is sink port*/ + if (direction == SDW_DATA_DIR_IN) + port_ctrl |= (PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + else + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl_offset = SDW_CNL_PORTCTRL + (port->port_num * + SDW_CNL_PORT_REG_OFFSET); + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, port_ctrl); + + pdi_config |= ((port->port_num & PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + channel_mask = (1 << ch_cnt) - 1; + pdi_config |= (channel_mask << PDINCONFIG_CHANNEL_MASK_SHIFT); + /* TODO: Remove below hardcodings */ + pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (pdi_stream->pdi_num * 16)); + cnl_sdw_reg_writel(sdw->data.sdw_regs, pdi_config_offset, pdi_config); + + stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (pdi_stream->sdw_pdi_num * ALH_CNL_STRMZCFG_OFFSET)); + stream_config |= (CNL_STRMZCFG_DMAT_VAL & CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + stream_config |= ((ch_cnt - 1) & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + cnl_sdw_reg_writel(sdw->data.alh_base, + (pdi_stream->sdw_pdi_num * ALH_CNL_STRMZCFG_OFFSET), + stream_config); + return 0; +} + +static int sdw_alloc_pdm_stream(struct cnl_sdw *sdw, + struct cnl_sdw_port *port, int ch_cnt, int direction) +{ + int num_pdm_streams; + struct cnl_sdw_pdi_stream *stream; + int i; + unsigned int port_ctrl_offset, pdi_config_offset; + unsigned int port_ctrl = 0, pdi_config = 0, channel_mask; + + /* Currently PDM supports either Input or Output Streams */ + if (direction == SDW_DATA_DIR_IN) { + num_pdm_streams = sdw->num_in_pdm_streams; + stream = sdw->in_pdm_streams; + } else { + num_pdm_streams = sdw->num_out_pdm_streams; + stream = sdw->out_pdm_streams; + } + mutex_lock(&sdw->stream_lock); + for (i = 0; i < num_pdm_streams; i++) { + if (stream[i].allocated == false) { + stream[i].allocated = true; + stream[i].port_num = port->port_num; + port->pdi_stream = &stream[i]; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port->pdi_stream) + return -EINVAL; + /* If direction is input, port is sink port*/ + if (direction == SDW_DATA_DIR_IN) + port_ctrl |= (PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + else + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl_offset = SDW_CNL_PORTCTRL + (port->port_num * + SDW_CNL_PORT_REG_OFFSET); + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, port_ctrl); + + pdi_config |= ((port->port_num & PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + channel_mask = (1 << ch_cnt) - 1; + pdi_config |= (channel_mask << PDINCONFIG_CHANNEL_MASK_SHIFT); + /* TODO: Remove below hardcodings */ + pdi_config_offset = (SDW_CNL_PDINCONFIG0 + (stream[i].pdi_num * 16)); + cnl_sdw_reg_writel(sdw->data.sdw_regs, pdi_config_offset, pdi_config); + + return 0; +} + +struct cnl_sdw_port *cnl_sdw_alloc_port(struct sdw_master *mstr, int ch_count, + enum sdw_data_direction direction, + enum cnl_sdw_pdi_stream_type stream_type) +{ + struct cnl_sdw *sdw; + struct cnl_sdw_port *port = NULL; + int i, ret = 0; + struct num_pdi_streams; + + sdw = sdw_master_get_drvdata(mstr); + + mutex_lock(&sdw->stream_lock); + for (i = 1; i <= CNL_SDW_MAX_PORTS; i++) { + if (sdw->port[i].allocated == false) { + port = &sdw->port[i]; + port->allocated = true; + port->direction = direction; + port->ch_cnt = ch_count; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port) { + dev_err(&mstr->dev, "Unable to allocate port\n"); + return NULL; + } + port->pdi_stream = NULL; + if (stream_type == CNL_SDW_PDI_TYPE_PDM) + ret = sdw_alloc_pdm_stream(sdw, port, ch_count, direction); + else + ret = sdw_alloc_pcm_stream(sdw, port, ch_count, direction); + if (!ret) + return port; + + dev_err(&mstr->dev, "Unable to allocate stream\n"); + mutex_lock(&sdw->stream_lock); + port->allocated = false; + mutex_unlock(&sdw->stream_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(cnl_sdw_alloc_port); + +void cnl_sdw_free_port(struct sdw_master *mstr, int port_num) +{ + int i; + struct cnl_sdw *sdw; + struct cnl_sdw_port *port = NULL; + + sdw = sdw_master_get_drvdata(mstr); + for (i = 1; i < CNL_SDW_MAX_PORTS; i++) { + if (sdw->port[i].port_num == port_num) { + port = &sdw->port[i]; + break; + } + } + if (!port) + return; + mutex_lock(&sdw->stream_lock); + port->pdi_stream->allocated = false; + port->pdi_stream = NULL; + port->allocated = false; + mutex_unlock(&sdw->stream_lock); +} +EXPORT_SYMBOL_GPL(cnl_sdw_free_port); + +static int cnl_sdw_update_slave_status(struct cnl_sdw *sdw, int slave_intstat0, + int slave_intstat1) +{ + int i; + struct sdw_status slave_status; + u64 slaves_stat, slave_stat; + int ret = 0; + + memset(&slave_status, 0x0, sizeof(slave_status)); + slaves_stat = (u64) slave_intstat1 << + SDW_CNL_SLAVES_STAT_UPPER_DWORD_SHIFT; + slaves_stat |= slave_intstat0; + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) { + slave_stat = slaves_stat >> (i * SDW_CNL_SLAVE_STATUS_BITS); + if (slave_stat & MCP_SLAVEINTSTAT_NOT_PRESENT_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_NOT_PRESENT; + else if (slave_stat & MCP_SLAVEINTSTAT_ATTACHED_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_ATTACHED_OK; + else if (slave_stat & MCP_SLAVEINTSTAT_ALERT_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_ALERT; + else if (slave_stat & MCP_SLAVEINTSTAT_RESERVED_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_RESERVED; + } + ret = sdw_master_update_slv_status(sdw->mstr, &slave_status); + return ret; +} + +static void cnl_sdw_read_response(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + int num_res = 0, i; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + + num_res = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_FIFOSTAT); + num_res &= MCP_RX_FIFO_AVAIL_MASK; + for (i = 0; i < num_res; i++) { + sdw->response_buf[i] = cnl_sdw_reg_readl(data->sdw_regs, + cmd_base); + cmd_base += SDW_CNL_CMD_WORD_LEN; + } +} + +static enum sdw_command_response sdw_fill_message_response( + struct sdw_master *mstr, + struct sdw_msg *msg, + int count, int offset) +{ + int i, j; + int no_ack = 0, nack = 0; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + for (i = 0; i < count; i++) { + if (!(MCP_RESPONSE_ACK_MASK & sdw->response_buf[i])) { + no_ack = 1; + dev_err(&mstr->dev, "Ack not recevied\n"); + if ((MCP_RESPONSE_NACK_MASK & + sdw->response_buf[i])) { + nack = 1; + dev_err(&mstr->dev, "NACK recevied\n"); + } + } + break; + } + if (nack) { + dev_err(&mstr->dev, "Nack detected for slave %d\n", msg->slave_addr); + msg->len = 0; + return -EREMOTEIO; + } else if (no_ack) { + dev_err(&mstr->dev, "Command ignored for slave %d\n", msg->slave_addr); + msg->len = 0; + return -EREMOTEIO; + } + if (msg->flag == SDW_MSG_FLAG_WRITE) + return 0; + /* Response and Command has same base address */ + for (j = 0; j < count; j++) + msg->buf[j + offset] = + (sdw->response_buf[j] >> MCP_RESPONSE_RDATA_SHIFT); + return 0; +} + + +irqreturn_t cnl_sdw_irq_handler(int irq, void *context) +{ + struct cnl_sdw *sdw = context; + volatile int int_status, status, wake_sts; + + struct cnl_sdw_data *data = &sdw->data; + volatile int slave_intstat0 = 0, slave_intstat1 = 0; + struct sdw_master *mstr = sdw->mstr; + + /* + * Return if IP is in power down state. Interrupt can still come + * since its shared irq. + */ + if (!sdw->sdw_link_status) + return IRQ_NONE; + + int_status = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_INTSTAT); + status = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_STAT); + slave_intstat0 = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_SLAVEINTSTAT0); + slave_intstat1 = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_SLAVEINTSTAT1); + wake_sts = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKESTS_REG_OFFSET); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKESTS_REG_OFFSET, + wake_sts); + + if (!(int_status & (MCP_INTSTAT_IRQ_MASK << MCP_INTSTAT_IRQ_SHIFT))) + return IRQ_NONE; + + if (int_status & (MCP_INTSTAT_RXWL_MASK << MCP_INTSTAT_RXWL_SHIFT)) { + cnl_sdw_read_response(sdw); + if (sdw->async_msg.async_xfer_complete) { + sdw_fill_message_response(mstr, sdw->async_msg.msg, + sdw->async_msg.length, 0); + complete(sdw->async_msg.async_xfer_complete); + sdw->async_msg.async_xfer_complete = NULL; + sdw->async_msg.msg = NULL; + } else + complete(&sdw->tx_complete); + } + if (int_status & (MCP_INTSTAT_CONTROLBUSCLASH_MASK << + MCP_INTSTAT_CONTROLBUSCLASH_SHIFT)) { + /* Some slave is behaving badly, where its driving + * data line during control word bits. + */ + dev_err_ratelimited(&mstr->dev, "Bus clash detected for control word\n"); + WARN_ONCE(1, "Bus clash detected for control word\n"); + } + if (int_status & (MCP_INTSTAT_DATABUSCLASH_MASK << + MCP_INTSTAT_DATABUSCLASH_SHIFT)) { + /* More than 1 slave is trying to drive bus. There is + * some problem with ownership of bus data bits, + * or either of the + * slave is behaving badly. + */ + dev_err_ratelimited(&mstr->dev, "Bus clash detected for control word\n"); + WARN_ONCE(1, "Bus clash detected for data word\n"); + } + + if (int_status & (MCP_INTSTAT_SLAVE_STATUS_CHANGED_MASK << + MCP_INTSTAT_SLAVE_STATUS_CHANGED_SHIFT)) { + dev_info(&mstr->dev, "Slave status change\n"); + cnl_sdw_update_slave_status(sdw, slave_intstat0, + slave_intstat1); + } + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTSTAT0, + slave_intstat0); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTSTAT1, + slave_intstat1); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_INTSTAT, int_status); + return IRQ_HANDLED; +} + +static enum sdw_command_response cnl_program_scp_addr(struct sdw_master *mstr, + struct sdw_msg *msg) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + u32 cmd_data[2] = {0, 0}; + unsigned long time_left; + int no_ack = 0, nack = 0; + int i; + + /* Since we are programming 2 commands, program the + * RX watermark level at 2 + */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FIFOLEVEL, 2); + /* Program device address */ + cmd_data[0] |= (msg->slave_addr & MCP_COMMAND_DEV_ADDR_MASK) << + MCP_COMMAND_DEV_ADDR_SHIFT; + /* Write command to program the scp_addr1 register */ + cmd_data[0] |= (0x3 << MCP_COMMAND_COMMAND_SHIFT); + cmd_data[1] = cmd_data[0]; + /* scp_addr1 register address */ + cmd_data[0] |= (SDW_SCP_ADDRPAGE1 << MCP_COMMAND_REG_ADDR_L_SHIFT); + cmd_data[1] |= (SDW_SCP_ADDRPAGE2 << MCP_COMMAND_REG_ADDR_L_SHIFT); + cmd_data[0] |= msg->addr_page1; + cmd_data[1] |= msg->addr_page2; + + cnl_sdw_reg_writel(data->sdw_regs, cmd_base, cmd_data[0]); + cmd_base += SDW_CNL_CMD_WORD_LEN; + cnl_sdw_reg_writel(data->sdw_regs, cmd_base, cmd_data[1]); + + time_left = wait_for_completion_timeout(&sdw->tx_complete, + 3000); + if (!time_left) { + dev_err(&mstr->dev, "Controller Timed out\n"); + msg->len = 0; + return -ETIMEDOUT; + } + + for (i = 0; i < CNL_SDW_SCP_ADDR_REGS; i++) { + if (!(MCP_RESPONSE_ACK_MASK & sdw->response_buf[i])) { + no_ack = 1; + dev_err(&mstr->dev, "Ack not recevied\n"); + if ((MCP_RESPONSE_NACK_MASK & sdw->response_buf[i])) { + nack = 1; + dev_err(&mstr->dev, "NACK recevied\n"); + } + } + } + /* We dont return error if NACK or No ACK detected for broadcast addr + * because some slave might support SCP addr, while some slaves may not + * support it. This is not correct, since we wont be able to find out + * if NACK is detected because of slave not supporting SCP_addrpage or + * its a genuine NACK because of bus errors. We are not sure what slaves + * will report, NACK or No ACK for the scp_addrpage programming if they + * dont support it. Spec is not clear about this. + * This needs to be thought through + */ + if (nack & (msg->slave_addr != 15)) { + dev_err(&mstr->dev, "SCP_addrpage write NACKed for slave %d\n", msg->slave_addr); + return -EREMOTEIO; + } else if (no_ack && (msg->slave_addr != 15)) { + dev_err(&mstr->dev, "SCP_addrpage write ignored for slave %d\n", msg->slave_addr); + return -EREMOTEIO; + } else + return 0; + +} + +static enum sdw_command_response sdw_xfer_msg(struct sdw_master *mstr, + struct sdw_msg *msg, int cmd, int offset, int count, bool async) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int j; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + u32 cmd_data = 0; + unsigned long time_left; + u16 addr = msg->addr; + + /* Program the watermark level upto number of count */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FIFOLEVEL, count); + + cmd_base = SDW_CNL_MCP_COMMAND_BASE; + for (j = 0; j < count; j++) { + /* Program device address */ + cmd_data = 0; + cmd_data |= (msg->slave_addr & + MCP_COMMAND_DEV_ADDR_MASK) << + MCP_COMMAND_DEV_ADDR_SHIFT; + /* Program read/write command */ + cmd_data |= (cmd << MCP_COMMAND_COMMAND_SHIFT); + /* program incrementing address register */ + cmd_data |= (addr++ << MCP_COMMAND_REG_ADDR_L_SHIFT); + /* Program the data if write command */ + if (msg->flag == SDW_MSG_FLAG_WRITE) + cmd_data |= + msg->buf[j + offset]; + + cmd_data |= ((msg->ssp_tag & + MCP_COMMAND_SSP_TAG_MASK) << + MCP_COMMAND_SSP_TAG_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, + cmd_base, cmd_data); + cmd_base += SDW_CNL_CMD_WORD_LEN; + } + + /* If Async dont wait for completion */ + if (async) + return 0; + /* Wait for 3 second for timeout */ + time_left = wait_for_completion_timeout(&sdw->tx_complete, 3 * HZ); + if (!time_left) { + dev_err(&mstr->dev, "Controller timedout\n"); + msg->len = 0; + return -ETIMEDOUT; + } + return sdw_fill_message_response(mstr, msg, count, offset); +} + +static enum sdw_command_response cnl_sdw_xfer_msg_async(struct sdw_master *mstr, + struct sdw_msg *msg, bool program_scp_addr_page, + struct sdw_async_xfer_data *data) +{ + int ret = 0, cmd; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + /* Only 1 message can be handled in Async fashion. This is used + * only for Bank switching where during aggregation it is required + * to synchronously switch the bank on more than 1 controller + */ + if (msg->len > 1) { + ret = -EINVAL; + goto error; + } + /* If scp addr programming fails goto error */ + if (program_scp_addr_page) + ret = cnl_program_scp_addr(mstr, msg); + if (ret) + goto error; + + switch (msg->flag) { + case SDW_MSG_FLAG_READ: + cmd = 0x2; + break; + case SDW_MSG_FLAG_WRITE: + cmd = 0x3; + break; + default: + dev_err(&mstr->dev, "Command not supported\n"); + return -EINVAL; + } + sdw->async_msg.async_xfer_complete = &data->xfer_complete; + sdw->async_msg.msg = msg; + sdw->async_msg.length = msg->len; + /* Dont wait for reply, calling function will wait for reply. */ + ret = sdw_xfer_msg(mstr, msg, cmd, 0, msg->len, true); + return ret; +error: + msg->len = 0; + complete(&data->xfer_complete); + return -EINVAL; + +} + +static enum sdw_command_response cnl_sdw_xfer_msg(struct sdw_master *mstr, + struct sdw_msg *msg, bool program_scp_addr_page) +{ + int i, ret = 0, cmd; + + if (program_scp_addr_page) + ret = cnl_program_scp_addr(mstr, msg); + + if (ret) { + msg->len = 0; + return ret; + } + + switch (msg->flag) { + case SDW_MSG_FLAG_READ: + cmd = 0x2; + break; + case SDW_MSG_FLAG_WRITE: + cmd = 0x3; + break; + default: + dev_err(&mstr->dev, "Command not supported\n"); + return -EINVAL; + } + for (i = 0; i < msg->len / SDW_CNL_MCP_COMMAND_LENGTH; i++) { + ret = sdw_xfer_msg(mstr, msg, + cmd, i * SDW_CNL_MCP_COMMAND_LENGTH, + SDW_CNL_MCP_COMMAND_LENGTH, false); + if (ret < 0) + break; + } + if (!(msg->len % SDW_CNL_MCP_COMMAND_LENGTH)) + return ret; + ret = sdw_xfer_msg(mstr, msg, cmd, i * SDW_CNL_MCP_COMMAND_LENGTH, + msg->len % SDW_CNL_MCP_COMMAND_LENGTH, false); + if (ret < 0) + return -EINVAL; + return ret; +} + +static void cnl_sdw_bra_prep_crc(u8 *txdata_buf, + struct sdw_bra_block *block, int data_offset, int addr_offset) +{ + + int addr = addr_offset; + + txdata_buf[addr++] = sdw_bus_compute_crc8((block->values + data_offset), + block->num_bytes); + txdata_buf[addr++] = 0x0; + txdata_buf[addr++] = 0x0; + txdata_buf[addr] |= ((0x2 & SDW_BRA_SOP_EOP_PDI_MASK) + << SDW_BRA_SOP_EOP_PDI_SHIFT); +} + +static void cnl_sdw_bra_prep_data(u8 *txdata_buf, + struct sdw_bra_block *block, int data_offset, int addr_offset) +{ + + int i; + int addr = addr_offset; + + for (i = 0; i < block->num_bytes; i += 2) { + + txdata_buf[addr++] = block->values[i + data_offset]; + if ((block->num_bytes - 1) - i) + txdata_buf[addr++] = block->values[i + data_offset + 1]; + else + txdata_buf[addr++] = 0; + + txdata_buf[addr++] = 0; + txdata_buf[addr++] = 0; + } +} + +static void cnl_sdw_bra_prep_hdr(u8 *txdata_buf, + struct sdw_bra_block *block, int rolling_id, int offset) +{ + + u8 tmp_hdr[6] = {0, 0, 0, 0, 0, 0}; + u8 temp = 0x0; + + /* + * 6 bytes header + * 1st byte: b11001010 + * b11: Header is active + * b0010: Device number 2 is selected + * b1: Write operation + * b0: MSB of BRA_NumBytes is 0 + * 2nd byte: LSB of number of bytes + * 3rd byte to 6th byte: Slave register offset + */ + temp |= (SDW_BRA_HDR_ACTIVE & SDW_BRA_HDR_ACTIVE_MASK) << + SDW_BRA_HDR_ACTIVE_SHIFT; + temp |= (block->slave_addr & SDW_BRA_HDR_SLV_ADDR_MASK) << + SDW_BRA_HDR_SLV_ADDR_SHIFT; + temp |= (block->cmd & SDW_BRA_HDR_RD_WR_MASK) << + SDW_BRA_HDR_RD_WR_SHIFT; + + if (block->num_bytes > SDW_BRA_HDR_MSB_BYTE_CHK) + temp |= (SDW_BRA_HDR_MSB_BYTE_SET & SDW_BRA_HDR_MSB_BYTE_MASK); + else + temp |= (SDW_BRA_HDR_MSB_BYTE_UNSET & + SDW_BRA_HDR_MSB_BYTE_MASK); + + txdata_buf[offset + 0] = tmp_hdr[0] = temp; + txdata_buf[offset + 1] = tmp_hdr[1] = block->num_bytes; + txdata_buf[offset + 3] |= ((SDW_BRA_SOP_EOP_PDI_STRT_VALUE & + SDW_BRA_SOP_EOP_PDI_MASK) << + SDW_BRA_SOP_EOP_PDI_SHIFT); + + txdata_buf[offset + 3] |= ((rolling_id & SDW_BRA_ROLLINGID_PDI_MASK) + << SDW_BRA_ROLLINGID_PDI_SHIFT); + + txdata_buf[offset + 4] = tmp_hdr[2] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK24) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT24); + + txdata_buf[offset + 5] = tmp_hdr[3] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK16) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT16); + + txdata_buf[offset + 8] = tmp_hdr[4] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK8) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT8); + + txdata_buf[offset + 9] = tmp_hdr[5] = (block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK0); + + /* CRC check */ + txdata_buf[offset + 0xc] = sdw_bus_compute_crc8(tmp_hdr, + SDW_BRA_HEADER_SIZE); + + if (!block->cmd) + txdata_buf[offset + 0xf] = ((SDW_BRA_SOP_EOP_PDI_END_VALUE & + SDW_BRA_SOP_EOP_PDI_MASK) << + SDW_BRA_SOP_EOP_PDI_SHIFT); +} + +static void cnl_sdw_bra_pdi_tx_config(struct sdw_master *mstr, + struct cnl_sdw *sdw, bool enable) +{ + struct cnl_sdw_pdi_stream tx_pdi_stream; + unsigned int tx_ch_map_offset, port_ctrl_offset, tx_pdi_config_offset; + unsigned int port_ctrl = 0, tx_pdi_config = 0, tx_stream_config; + int tx_pdi_ch_map = 0; + + if (enable) { + /* DP0 PORT CTRL REG */ + port_ctrl_offset = SDW_CNL_PORTCTRL + (SDW_BRA_PORT_ID * + SDW_CNL_PORT_REG_OFFSET); + + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl |= ((SDW_BRA_BULK_ENABLE & SDW_BRA_BLK_EN_MASK) << + SDW_BRA_BLK_EN_SHIFT); + + port_ctrl |= ((SDW_BRA_BPT_PAYLOAD_TYPE & + SDW_BRA_BPT_PYLD_TY_MASK) << + SDW_BRA_BPT_PYLD_TY_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, + port_ctrl); + + /* PDI0 Programming */ + tx_pdi_stream.l_ch_num = 0; + tx_pdi_stream.h_ch_num = 0xF; + tx_pdi_stream.pdi_num = SDW_BRA_PDI_TX_ID; + /* TODO: Remove hardcoding */ + tx_pdi_stream.sdw_pdi_num = mstr->nr * 16 + + tx_pdi_stream.pdi_num + 3; + + /* SNDWxPCMS2CM SHIM REG */ + tx_ch_map_offset = SDW_CNL_CTLS2CM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr); + + tx_pdi_ch_map |= (tx_pdi_stream.sdw_pdi_num & + CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + + tx_pdi_ch_map |= (tx_pdi_stream.l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + + tx_pdi_ch_map |= (tx_pdi_stream.h_ch_num & + CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + + cnl_sdw_reg_writew(sdw->data.sdw_shim, tx_ch_map_offset, + tx_pdi_ch_map); + + /* TX PDI0 CONFIG REG BANK 0 */ + tx_pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (tx_pdi_stream.pdi_num * 16)); + + tx_pdi_config |= ((SDW_BRA_PORT_ID & + PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + tx_pdi_config |= (SDW_BRA_CHN_MASK << + PDINCONFIG_CHANNEL_MASK_SHIFT); + + tx_pdi_config |= (SDW_BRA_SOFT_RESET << + PDINCONFIG_PORT_SOFT_RESET_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, + tx_pdi_config_offset, tx_pdi_config); + + /* ALH STRMzCFG REG */ + tx_stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (tx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET)); + + tx_stream_config |= (CNL_STRMZCFG_DMAT_VAL & + CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + + tx_stream_config |= (0x0 & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + + cnl_sdw_reg_writel(sdw->data.alh_base, + (tx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET), + tx_stream_config); + + + } else { + + /* + * TODO: There is official workaround which needs to be + * performed for PDI config register. The workaround + * is to perform SoftRst twice in order to clear + * PDI fifo contents. + */ + + } +} + +static void cnl_sdw_bra_pdi_rx_config(struct sdw_master *mstr, + struct cnl_sdw *sdw, bool enable) +{ + + struct cnl_sdw_pdi_stream rx_pdi_stream; + unsigned int rx_ch_map_offset, rx_pdi_config_offset, rx_stream_config; + unsigned int rx_pdi_config = 0; + int rx_pdi_ch_map = 0; + + if (enable) { + + /* RX PDI1 Configuration */ + rx_pdi_stream.l_ch_num = 0; + rx_pdi_stream.h_ch_num = 0xF; + rx_pdi_stream.pdi_num = SDW_BRA_PDI_RX_ID; + rx_pdi_stream.sdw_pdi_num = mstr->nr * 16 + + rx_pdi_stream.pdi_num + 3; + + /* SNDWxPCMS3CM SHIM REG */ + rx_ch_map_offset = SDW_CNL_CTLS3CM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr); + + rx_pdi_ch_map |= (rx_pdi_stream.sdw_pdi_num & + CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + + rx_pdi_ch_map |= (rx_pdi_stream.l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + + rx_pdi_ch_map |= (rx_pdi_stream.h_ch_num & + CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + + cnl_sdw_reg_writew(sdw->data.sdw_shim, rx_ch_map_offset, + rx_pdi_ch_map); + + /* RX PDI1 CONFIG REG */ + rx_pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (rx_pdi_stream.pdi_num * 16)); + + rx_pdi_config |= ((SDW_BRA_PORT_ID & + PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + rx_pdi_config |= (SDW_BRA_CHN_MASK << + PDINCONFIG_CHANNEL_MASK_SHIFT); + + rx_pdi_config |= (SDW_BRA_SOFT_RESET << + PDINCONFIG_PORT_SOFT_RESET_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, + rx_pdi_config_offset, rx_pdi_config); + + + /* ALH STRMzCFG REG */ + rx_stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (rx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET)); + + rx_stream_config |= (CNL_STRMZCFG_DMAT_VAL & + CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + + rx_stream_config |= (0 & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + + cnl_sdw_reg_writel(sdw->data.alh_base, + (rx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET), + rx_stream_config); + + } else { + + /* + * TODO: There is official workaround which needs to be + * performed for PDI config register. The workaround + * is to perform SoftRst twice in order to clear + * PDI fifo contents. + */ + + } +} + +static void cnl_sdw_bra_pdi_config(struct sdw_master *mstr, bool enable) +{ + struct cnl_sdw *sdw; + + /* Get driver data for master */ + sdw = sdw_master_get_drvdata(mstr); + + /* PDI0 configuration */ + cnl_sdw_bra_pdi_tx_config(mstr, sdw, enable); + + /* PDI1 configuration */ + cnl_sdw_bra_pdi_rx_config(mstr, sdw, enable); +} + +static int cnl_sdw_bra_verify_footer(u8 *rx_buf, int offset) +{ + int ret = 0; + u8 ftr_response; + u8 ack_nack = 0; + u8 ftr_result = 0; + + ftr_response = rx_buf[offset]; + + /* + * ACK/NACK check + * NACK+ACK value from target: + * 00 -> Ignored + * 01 -> OK + * 10 -> Failed (Header CRC check failed) + * 11 -> Reserved + * NACK+ACK values at Target or initiator + * 00 -> Ignored + * 01 -> OK + * 10 -> Abort (Header cannot be trusted) + * 11 -> Abort (Header cannot be trusted) + */ + ack_nack = ((ftr_response > SDW_BRA_FTR_RESP_ACK_SHIFT) & + SDW_BRA_FTR_RESP_ACK_MASK); + if (ack_nack == SDW_BRA_ACK_NAK_IGNORED) { + pr_info("BRA Packet Ignored\n"); + ret = -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_OK) + pr_info("BRA: Packet OK\n"); + else if (ack_nack == SDW_BRA_ACK_NAK_FAILED_ABORT) { + pr_info("BRA: Packet Failed/Reserved\n"); + return -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_RSVD_ABORT) { + pr_info("BRA: Packet Reserved/Abort\n"); + return -EINVAL; + } + + /* + * BRA footer result check + * Writes: + * 0 -> Good. Target accepted write payload + * 1 -> Bad. Target did not accept write payload + * Reads: + * 0 -> Good. Target completed read operation successfully + * 1 -> Bad. Target failed to complete read operation successfully + */ + ftr_result = (ftr_response > SDW_BRA_FTR_RESP_RES_SHIFT) > + SDW_BRA_FTR_RESP_RES_MASK; + if (ftr_result == SDW_BRA_FTR_RESULT_BAD) { + pr_info("BRA: Read/Write operation failed on target side\n"); + /* Error scenario */ + return -EINVAL; + } + + pr_info("BRA: Read/Write operation complete on target side\n"); + + return ret; +} + +static int cnl_sdw_bra_verify_hdr(u8 *rx_buf, int offset, bool *chk_footer, + int roll_id) +{ + int ret = 0; + u8 hdr_response, rolling_id; + u8 ack_nack = 0; + u8 not_ready = 0; + + /* Match rolling ID */ + hdr_response = rx_buf[offset]; + rolling_id = rx_buf[offset + SDW_BRA_ROLLINGID_PDI_INDX]; + + rolling_id = (rolling_id & SDW_BRA_ROLLINGID_PDI_MASK); + if (roll_id != rolling_id) { + pr_info("BRA: Rolling ID doesn't match, returning error\n"); + return -EINVAL; + } + + /* + * ACK/NACK check + * NACK+ACK value from target: + * 00 -> Ignored + * 01 -> OK + * 10 -> Failed (Header CRC check failed) + * 11 -> Reserved + * NACK+ACK values at Target or initiator + * 00 -> Ignored + * 01 -> OK + * 10 -> Abort (Header cannot be trusted) + * 11 -> Abort (Header cannot be trusted) + */ + ack_nack = ((hdr_response > SDW_BRA_HDR_RESP_ACK_SHIFT) & + SDW_BRA_HDR_RESP_ACK_MASK); + if (ack_nack == SDW_BRA_ACK_NAK_IGNORED) { + pr_info("BRA: Packet Ignored rolling_id:%d\n", rolling_id); + ret = -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_OK) + pr_info("BRA: Packet OK rolling_id:%d\n", rolling_id); + else if (ack_nack == SDW_BRA_ACK_NAK_FAILED_ABORT) { + pr_info("BRA: Packet Failed/Abort rolling_id:%d\n", rolling_id); + return -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_RSVD_ABORT) { + pr_info("BRA: Packet Reserved/Abort rolling_id:%d\n", rolling_id); + return -EINVAL; + } + + /* BRA not ready check */ + not_ready = (hdr_response > SDW_BRA_HDR_RESP_NRDY_SHIFT) > + SDW_BRA_HDR_RESP_NRDY_MASK; + if (not_ready == SDW_BRA_TARGET_NOT_READY) { + pr_info("BRA: Target not ready for read/write operation rolling_id:%d\n", + rolling_id); + chk_footer = false; + return -EBUSY; + } + + pr_info("BRA: Target ready for read/write operation rolling_id:%d\n", rolling_id); + return ret; +} + +static void cnl_sdw_bra_remove_data_padding(u8 *src_buf, u8 *dst_buf, + u8 size) { + + int i; + + for (i = 0; i < size/2; i++) { + + *dst_buf++ = *src_buf++; + *dst_buf++ = *src_buf++; + src_buf++; + src_buf++; + } +} + + +static int cnl_sdw_bra_check_data(struct sdw_master *mstr, + struct sdw_bra_block *block, struct bra_info *info) { + + int offset = 0, rolling_id = 0, tmp_offset = 0; + int rx_crc_comp = 0, rx_crc_rvd = 0; + int i, ret; + bool chk_footer = true; + int rx_buf_size = info->rx_block_size; + u8 *rx_buf = info->rx_ptr; + u8 *tmp_buf = NULL; + + /* TODO: Remove below hex dump print */ + print_hex_dump(KERN_DEBUG, "BRA RX DATA:", DUMP_PREFIX_OFFSET, 8, 4, + rx_buf, rx_buf_size, false); + + /* Allocate temporary buffer in case of read request */ + if (!block->cmd) { + tmp_buf = kzalloc(block->num_bytes, GFP_KERNEL); + if (!tmp_buf) { + ret = -ENOMEM; + goto error; + } + } + + /* + * TODO: From the response header and footer there is no mention of + * read or write packet so controller needs to keep transmit packet + * information in order to verify rx packet. Also the current + * approach used for error mechanism is any of the packet response + * is not success, just report the whole transfer failed to Slave. + */ + + /* + * Verification of response packet for one known + * hardcoded configuration. This needs to be extended + * once we have dynamic algorithm integrated. + */ + + /* 2 valid read response */ + for (i = 0; i < info->valid_packets; i++) { + + + pr_info("BRA: Verifying packet number:%d with rolling id:%d\n", + info->packet_info[i].packet_num, + rolling_id); + chk_footer = true; + ret = cnl_sdw_bra_verify_hdr(rx_buf, offset, &chk_footer, + rolling_id); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Header verification failed for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + /* Increment offset for header response */ + offset = offset + SDW_BRA_HEADER_RESP_SIZE_PDI; + + if (!block->cmd) { + + /* Remove PDI padding for data */ + cnl_sdw_bra_remove_data_padding(&rx_buf[offset], + &tmp_buf[tmp_offset], + info->packet_info[i].num_data_bytes); + + /* Increment offset for consumed data */ + offset = offset + + (info->packet_info[i].num_data_bytes * 2); + + rx_crc_comp = sdw_bus_compute_crc8(&tmp_buf[tmp_offset], + info->packet_info[i].num_data_bytes); + + /* Match Data CRC */ + rx_crc_rvd = rx_buf[offset]; + if (rx_crc_comp != rx_crc_rvd) { + ret = -EINVAL; + dev_err(&mstr->dev, "BRA: Data CRC doesn't match for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + /* Increment destination buffer with copied data */ + tmp_offset = tmp_offset + + info->packet_info[i].num_data_bytes; + + /* Increment offset for CRC */ + offset = offset + SDW_BRA_DATA_CRC_SIZE_PDI; + } + + if (chk_footer) { + ret = cnl_sdw_bra_verify_footer(rx_buf, offset); + if (ret < 0) { + ret = -EINVAL; + dev_err(&mstr->dev, "BRA: Footer verification failed for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + } + + /* Increment offset for footer response */ + offset = offset + SDW_BRA_HEADER_RESP_SIZE_PDI; + + /* Increment rolling id for next packet */ + rolling_id++; + if (rolling_id > 0xF) + rolling_id = 0; + } + + /* + * No need to check for dummy responses from codec + * Assumption made here is that dummy packets are + * added in 1ms buffer only after valid packets. + */ + + /* Copy data to codec buffer in case of read request */ + if (!block->cmd) + memcpy(block->values, tmp_buf, block->num_bytes); + +error: + /* Free up temp buffer allocated in case of read request */ + if (!block->cmd) + kfree(tmp_buf); + + /* Free up buffer allocated in cnl_sdw_bra_data_ops */ + kfree(info->tx_ptr); + kfree(info->rx_ptr); + kfree(info->packet_info); + + return ret; +} + +static int cnl_sdw_bra_data_ops(struct sdw_master *mstr, + struct sdw_bra_block *block, struct bra_info *info) +{ + + struct sdw_bra_block tmp_block; + int i; + int tx_buf_size = 384, rx_buf_size = 1152; + u8 *tx_buf = NULL, *rx_buf = NULL; + int rolling_id = 0, total_bytes = 0, offset = 0, reg_offset = 0; + int dummy_read = 0x0000; + int ret; + + /* + * TODO: Run an algorithm here to identify the buffer size + * for TX and RX buffers + number of dummy packets (read + * or write) to be added for to align buffers. + */ + + info->tx_block_size = tx_buf_size; + info->tx_ptr = tx_buf = kzalloc(tx_buf_size, GFP_KERNEL); + if (!tx_buf) { + ret = -ENOMEM; + goto error; + } + + info->rx_block_size = rx_buf_size; + info->rx_ptr = rx_buf = kzalloc(rx_buf_size, GFP_KERNEL); + if (!rx_buf) { + ret = -ENOMEM; + goto error; + } + + /* Fill valid packets transferred per millisecond buffer */ + info->valid_packets = 2; + info->packet_info = kcalloc(info->valid_packets, + sizeof(*info->packet_info), + GFP_KERNEL); + if (!info->packet_info) { + ret = -ENOMEM; + goto error; + } + + /* + * Below code performs packet preparation for one known + * configuration. + * 1. 2 Valid Read request with 18 bytes each. + * 2. 22 dummy read packets with 18 bytes each. + */ + for (i = 0; i < info->valid_packets; i++) { + tmp_block.slave_addr = block->slave_addr; + tmp_block.cmd = block->cmd; /* Read Request */ + tmp_block.num_bytes = 18; + tmp_block.reg_offset = block->reg_offset + reg_offset; + tmp_block.values = NULL; + reg_offset += tmp_block.num_bytes; + + cnl_sdw_bra_prep_hdr(tx_buf, &tmp_block, rolling_id, offset); + /* Total Header size: Header + Header CRC size on PDI */ + offset += SDW_BRA_HEADER_TOTAL_SZ_PDI; + + if (block->cmd) { + /* + * PDI data preparation in case of write request + * Assumption made here is data size from codec will + * be always an even number. + */ + cnl_sdw_bra_prep_data(tx_buf, &tmp_block, + total_bytes, offset); + offset += tmp_block.num_bytes * 2; + + /* Data CRC */ + cnl_sdw_bra_prep_crc(tx_buf, &tmp_block, + total_bytes, offset); + offset += SDW_BRA_DATA_CRC_SIZE_PDI; + } + + total_bytes += tmp_block.num_bytes; + rolling_id++; + + /* Fill packet info data structure */ + info->packet_info[i].packet_num = i + 1; + info->packet_info[i].num_data_bytes = tmp_block.num_bytes; + } + + /* Prepare dummy packets */ + for (i = 0; i < 22; i++) { + tmp_block.slave_addr = block->slave_addr; + tmp_block.cmd = 0; /* Read request */ + tmp_block.num_bytes = 18; + tmp_block.reg_offset = dummy_read++; + tmp_block.values = NULL; + + cnl_sdw_bra_prep_hdr(tx_buf, &tmp_block, rolling_id, offset); + + /* Total Header size: RD header + RD header CRC size on PDI */ + offset += SDW_BRA_HEADER_TOTAL_SZ_PDI; + + total_bytes += tmp_block.num_bytes; + rolling_id++; + } + + /* TODO: Remove below hex dump print */ + print_hex_dump(KERN_DEBUG, "BRA PDI VALID TX DATA:", + DUMP_PREFIX_OFFSET, 8, 4, tx_buf, tx_buf_size, false); + + return 0; + +error: + kfree(info->tx_ptr); + kfree(info->rx_ptr); + kfree(info->packet_info); + + return ret; +} + +static int cnl_sdw_xfer_bulk(struct sdw_master *mstr, + struct sdw_bra_block *block) +{ + struct cnl_sdw *sdw = sdw_master_get_platdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + struct cnl_bra_operation *ops = data->bra_data->bra_ops; + struct bra_info info; + int ret; + + /* + * 1. PDI Configuration + * 2. Prepare BRA packets including CRC calculation. + * 3. Configure TX and RX DMA in one shot mode. + * 4. Configure TX and RX Pipeline. + * 5. Run TX and RX DMA. + * 6. Run TX and RX pipelines. + * 7. Wait on completion for RX buffer. + * 8. Match TX and RX buffer packets and check for errors. + */ + + /* Memset bra_info data structure */ + memset(&info, 0x0, sizeof(info)); + + /* Fill master number in bra info data structure */ + info.mstr_num = mstr->nr; + + /* PDI Configuration (ON) */ + cnl_sdw_bra_pdi_config(mstr, true); + + /* Prepare TX buffer */ + ret = cnl_sdw_bra_data_ops(mstr, block, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Request packet(s) creation failed\n"); + goto out; + } + + /* Pipeline Setup (ON) */ + ret = ops->bra_platform_setup(data->bra_data->drv_data, true, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline setup failed\n"); + goto out; + } + + /* Trigger START host DMA and pipeline */ + ret = ops->bra_platform_xfer(data->bra_data->drv_data, true, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline start failed\n"); + goto out; + } + + /* Trigger STOP host DMA and pipeline */ + ret = ops->bra_platform_xfer(data->bra_data->drv_data, false, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline stop failed\n"); + goto out; + } + + /* Pipeline Setup (OFF) */ + ret = ops->bra_platform_setup(data->bra_data->drv_data, false, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline de-setup failed\n"); + goto out; + } + + /* Verify RX buffer */ + ret = cnl_sdw_bra_check_data(mstr, block, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Response packet(s) incorrect\n"); + goto out; + } + + /* PDI Configuration (OFF) */ + cnl_sdw_bra_pdi_config(mstr, false); + +out: + return ret; +} + +static int cnl_sdw_mon_handover(struct sdw_master *mstr, + bool enable) +{ + int mcp_config; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + mcp_config = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONFIG); + if (enable) + mcp_config |= MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT; + else + mcp_config &= ~(MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT); + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIG, mcp_config); + return 0; +} + +static int cnl_sdw_set_ssp_interval(struct sdw_master *mstr, + int ssp_interval, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int sspctrl_offset, check; + + if (bank) + sspctrl_offset = SDW_CNL_MCP_SSPCTRL1; + else + sspctrl_offset = SDW_CNL_MCP_SSPCTRL0; + + cnl_sdw_reg_writel(data->sdw_regs, sspctrl_offset, ssp_interval); + + check = cnl_sdw_reg_readl(data->sdw_regs, sspctrl_offset); + + return 0; +} + +static int cnl_sdw_set_clock_freq(struct sdw_master *mstr, + int cur_clk_div, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int mcp_clockctrl_offset, mcp_clockctrl; + + + /* TODO: Retrieve divider value or get value directly from calling + * function + */ + int divider = (cur_clk_div - 1); + + if (bank) { + mcp_clockctrl_offset = SDW_CNL_MCP_CLOCKCTRL1; + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL1); + + } else { + mcp_clockctrl_offset = SDW_CNL_MCP_CLOCKCTRL0; + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL0); + } + + mcp_clockctrl |= divider; + + /* Write value here */ + cnl_sdw_reg_writel(data->sdw_regs, mcp_clockctrl_offset, + mcp_clockctrl); + + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + mcp_clockctrl_offset); + return 0; +} + +static int cnl_sdw_set_port_params(struct sdw_master *mstr, + struct sdw_port_params *params, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int dpn_config = 0, dpn_config_offset; + + if (bank) + dpn_config_offset = SDW_CNL_DPN_CONFIG1; + else + dpn_config_offset = SDW_CNL_DPN_CONFIG0; + + dpn_config = cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + + dpn_config |= (((params->word_length - 1) & DPN_CONFIG_WL_MASK) << + DPN_CONFIG_WL_SHIFT); + dpn_config |= ((params->port_flow_mode & DPN_CONFIG_PF_MODE_MASK) << + DPN_CONFIG_PF_MODE_SHIFT); + dpn_config |= ((params->port_data_mode & DPN_CONFIG_PD_MODE_MASK) << + DPN_CONFIG_PD_MODE_SHIFT); + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_config_offset, params->num, dpn_config); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + return 0; +} + +static int cnl_sdw_set_port_transport_params(struct sdw_master *mstr, + struct sdw_transport_params *params, int bank) +{ +struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + int dpn_config = 0, dpn_config_offset; + int dpn_samplectrl_offset; + int dpn_offsetctrl = 0, dpn_offsetctrl_offset; + int dpn_hctrl = 0, dpn_hctrl_offset; + + if (bank) { + dpn_config_offset = SDW_CNL_DPN_CONFIG1; + dpn_samplectrl_offset = SDW_CNL_DPN_SAMPLECTRL1; + dpn_hctrl_offset = SDW_CNL_DPN_HCTRL1; + dpn_offsetctrl_offset = SDW_CNL_DPN_OFFSETCTRL1; + } else { + dpn_config_offset = SDW_CNL_DPN_CONFIG0; + dpn_samplectrl_offset = SDW_CNL_DPN_SAMPLECTRL0; + dpn_hctrl_offset = SDW_CNL_DPN_HCTRL0; + dpn_offsetctrl_offset = SDW_CNL_DPN_OFFSETCTRL0; + } + dpn_config = cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + dpn_config |= ((params->blockgroupcontrol & DPN_CONFIG_BGC_MASK) << + DPN_CONFIG_BGC_SHIFT); + dpn_config |= ((params->blockpackingmode & DPN_CONFIG_BPM_MASK) << + DPN_CONFIG_BPM_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_config_offset, params->num, dpn_config); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + + dpn_offsetctrl |= ((params->offset1 & DPN_OFFSETCTRL0_OF1_MASK) << + DPN_OFFSETCTRL0_OF1_SHIFT); + + dpn_offsetctrl |= ((params->offset2 & DPN_OFFSETCTRL0_OF2_MASK) << + DPN_OFFSETCTRL0_OF2_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_offsetctrl_offset, params->num, dpn_offsetctrl); + + + dpn_hctrl |= ((params->hstart & DPN_HCTRL_HSTART_MASK) << + DPN_HCTRL_HSTART_SHIFT); + dpn_hctrl |= ((params->hstop & DPN_HCTRL_HSTOP_MASK) << + DPN_HCTRL_HSTOP_SHIFT); + dpn_hctrl |= ((params->lanecontrol & DPN_HCTRL_LCONTROL_MASK) << + DPN_HCTRL_LCONTROL_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_hctrl_offset, params->num, dpn_hctrl); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_samplectrl_offset, params->num, + (params->sample_interval - 1)); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_hctrl_offset, params->num); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_samplectrl_offset, params->num); + + return 0; +} + +static int cnl_sdw_port_activate_ch(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int dpn_channelen_offset; + int ch_mask; + + if (bank) + dpn_channelen_offset = SDW_CNL_DPN_CHANNELEN1; + else + dpn_channelen_offset = SDW_CNL_DPN_CHANNELEN0; + + if (activate_ch->activate) + ch_mask = activate_ch->ch_mask; + else + ch_mask = 0; + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_channelen_offset, activate_ch->num, + ch_mask); + + return 0; +} + +static int cnl_sdw_port_activate_ch_pre(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + int sync_reg; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + if (mstr->link_sync_mask) { + /* Check if this link is synchronized with some other link */ + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + /* If link is synchronized with other link than + * Need to make sure that command doesnt go till + * ssync is applied + */ + sync_reg |= (1 << (data->inst_id + CNL_SYNC_CMDSYNC_SHIFT)); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + } + + return 0; +} +static int cnl_sdw_port_activate_ch_post(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + int sync_reg; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + volatile int sync_update = 0; + int timeout = 10; + + + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + /* If waiting for synchronization set the go bit, else return */ + if (!(sync_reg & SDW_CMDSYNC_SET_MASK)) + return 0; + sync_reg |= (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + if ((sync_update & + (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT)) == 0) + break; + msleep(20); + timeout--; + + } while (timeout); + + if ((sync_update & + (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT)) != 0) { + dev_err(&mstr->dev, "Failed to set sync go\n"); + return -EIO; + } + return 0; +} + +static int cnl_sdw_probe(struct sdw_master *mstr, + const struct sdw_master_id *sdw_id) +{ + struct cnl_sdw *sdw; + int ret = 0; + struct cnl_sdw_data *data = mstr->dev.platform_data; + + sdw = devm_kzalloc(&mstr->dev, sizeof(*sdw), GFP_KERNEL); + if (!sdw) { + ret = -ENOMEM; + return ret; + } + dev_info(&mstr->dev, + "Controller Resources ctrl_base = %p shim=%p irq=%d inst_id=%d\n", + data->sdw_regs, data->sdw_shim, data->irq, data->inst_id); + sdw->data.sdw_regs = data->sdw_regs; + sdw->data.sdw_shim = data->sdw_shim; + sdw->data.irq = data->irq; + sdw->data.inst_id = data->inst_id; + sdw->data.alh_base = data->alh_base; + sdw->mstr = mstr; + spin_lock_init(&sdw->ctrl_lock); + sdw_master_set_drvdata(mstr, sdw); + init_completion(&sdw->tx_complete); + mutex_init(&sdw->stream_lock); + ret = sdw_init(sdw, true); + if (ret) { + dev_err(&mstr->dev, "SoundWire controller init failed %d\n", + data->inst_id); + return ret; + } + ret = devm_request_irq(&mstr->dev, + sdw->data.irq, cnl_sdw_irq_handler, IRQF_SHARED, "SDW", sdw); + if (ret) { + dev_err(&mstr->dev, "unable to grab IRQ %d, disabling device\n", + sdw->data.irq); + sdw_power_down_link(sdw); + return ret; + } + pm_runtime_set_autosuspend_delay(&mstr->dev, 3000); + pm_runtime_use_autosuspend(&mstr->dev); + pm_runtime_enable(&mstr->dev); + pm_runtime_get_sync(&mstr->dev); + /* Resuming the device, since its already ON, function will simply + * return doing nothing + */ + pm_runtime_mark_last_busy(&mstr->dev); + /* Suspending the device after 3 secs, by the time + * all the slave would have enumerated. Initial + * clock freq is 9.6MHz and frame shape is 48X2, so + * there are 200000 frames in second, total there are + * minimum 600000 frames before device suspends. Soundwire + * spec says slave should get attached to bus in 4096 + * error free frames after reset. So this should be + * enough to make sure device gets attached to bus. + */ + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} + +static int cnl_sdw_remove(struct sdw_master *mstr) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + sdw_power_down_link(sdw); + + return 0; +} + +#ifdef CONFIG_PM +static int cnl_sdw_runtime_suspend(struct device *dev) +{ + int volatile mcp_stat; + int mcp_control; + int timeout = 0; + int ret = 0; + + struct cnl_sdw *sdw = dev_get_drvdata(dev); + struct cnl_sdw_data *data = &sdw->data; + + /* If its suspended return */ + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT)) { + dev_info(dev, "Clock is already stopped\n"); + return 0; + } + + /* Write the MCP Control register to prevent block wakeup */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + mcp_control |= (MCP_CONTROL_BLOCKWAKEUP_MASK << + MCP_CONTROL_BLOCKWAKEUP_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + /* Prepare all the slaves for clock stop */ + ret = sdw_master_prep_for_clk_stop(sdw->mstr); + if (ret) + return ret; + + /* Call bus function to broadcast the clock stop now */ + ret = sdw_master_stop_clock(sdw->mstr); + if (ret) + return ret; + /* Wait for clock to be stopped, we are waiting at max 1sec now */ + while (timeout != 10) { + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT)) + break; + msleep(100); + timeout++; + } + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (!(mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT))) { + dev_err(dev, "Clock Stop failed\n"); + ret = -EBUSY; + goto out; + } + /* Switch control from master IP to glue */ + sdw_switch_to_glue(sdw); + + sdw_power_down_link(sdw); + + /* Enable the wakeup */ + cnl_sdw_reg_writew(data->sdw_shim, + SDW_CNL_SNDWWAKEEN_REG_OFFSET, + (0x1 << data->inst_id)); +out: + return ret; +} + +static int cnl_sdw_clock_stop_exit(struct cnl_sdw *sdw) +{ + u16 wake_en, wake_sts; + int ret; + struct cnl_sdw_data *data = &sdw->data; + + /* Disable the wake up interrupt */ + wake_en = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKEEN_REG_OFFSET); + wake_en &= ~(0x1 << data->inst_id); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKEEN_REG_OFFSET, + wake_en); + + /* Clear wake status. This may be set if Slave requested wakeup has + * happened, or may not be if it master requested. But in any case + * this wont make any harm + */ + wake_sts = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKESTS_REG_OFFSET); + wake_sts |= (0x1 << data->inst_id); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKESTS_REG_OFFSET, + wake_sts); + ret = sdw_init(sdw, false); + if (ret < 0) { + pr_err("sdw_init fail: %d\n", ret); + return ret; + } + + dev_info(&sdw->mstr->dev, "Exit from clock stop successful\n"); + return 0; + +} + +static int cnl_sdw_runtime_resume(struct device *dev) +{ + struct cnl_sdw *sdw = dev_get_drvdata(dev); + struct cnl_sdw_data *data = &sdw->data; + int volatile mcp_stat; + struct sdw_master *mstr; + int ret = 0; + + mstr = sdw->mstr; + /* + * If already resumed, do nothing. This can happen because of + * wakeup enable. + */ + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (!(mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT))) { + dev_info(dev, "Clock is already running\n"); + return 0; + } + dev_info(dev, "%s %d Clock is stopped\n", __func__, __LINE__); + + ret = cnl_sdw_clock_stop_exit(sdw); + if (ret) + return ret; + dev_info(&mstr->dev, "Exit from clock stop successful\n"); + + /* Prepare all the slaves to comeout of clock stop */ + ret = sdw_mstr_deprep_after_clk_start(sdw->mstr); + if (ret) + return ret; + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cnl_sdw_sleep_resume(struct device *dev) +{ + return cnl_sdw_runtime_resume(dev); +} +static int cnl_sdw_sleep_suspend(struct device *dev) +{ + return cnl_sdw_runtime_suspend(dev); +} +#else +#define cnl_sdw_sleep_suspend NULL +#define cnl_sdw_sleep_resume NULL +#endif /* CONFIG_PM_SLEEP */ +#else +#define cnl_sdw_runtime_suspend NULL +#define cnl_sdw_runtime_resume NULL +#endif /* CONFIG_PM */ + + +static const struct dev_pm_ops cnl_sdw_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cnl_sdw_sleep_suspend, cnl_sdw_sleep_resume) + SET_RUNTIME_PM_OPS(cnl_sdw_runtime_suspend, + cnl_sdw_runtime_resume, NULL) +}; + +static struct sdw_master_ops cnl_sdw_master_ops = { + .xfer_msg_async = cnl_sdw_xfer_msg_async, + .xfer_msg = cnl_sdw_xfer_msg, + .xfer_bulk = cnl_sdw_xfer_bulk, + .monitor_handover = cnl_sdw_mon_handover, + .set_ssp_interval = cnl_sdw_set_ssp_interval, + .set_clock_freq = cnl_sdw_set_clock_freq, + .set_frame_shape = NULL, +}; + +static struct sdw_master_port_ops cnl_sdw_master_port_ops = { + .dpn_set_port_params = cnl_sdw_set_port_params, + .dpn_set_port_transport_params = cnl_sdw_set_port_transport_params, + .dpn_port_activate_ch = cnl_sdw_port_activate_ch, + .dpn_port_activate_ch_pre = cnl_sdw_port_activate_ch_pre, + .dpn_port_activate_ch_post = cnl_sdw_port_activate_ch_post, + .dpn_port_prepare_ch = NULL, + .dpn_port_prepare_ch_pre = NULL, + .dpn_port_prepare_ch_post = NULL, + +}; + +static struct sdw_mstr_driver cnl_sdw_mstr_driver = { + .driver_type = SDW_DRIVER_TYPE_MASTER, + .driver = { + .name = "cnl_sdw_mstr", + .pm = &cnl_sdw_pm_ops, + }, + .probe = cnl_sdw_probe, + .remove = cnl_sdw_remove, + .mstr_ops = &cnl_sdw_master_ops, + .mstr_port_ops = &cnl_sdw_master_port_ops, +}; + +static int __init cnl_sdw_init(void) +{ + return sdw_mstr_driver_register(&cnl_sdw_mstr_driver); +} +module_init(cnl_sdw_init); + +static void cnl_sdw_exit(void) +{ + sdw_mstr_driver_unregister(&cnl_sdw_mstr_driver); +} +module_exit(cnl_sdw_exit); + +MODULE_DESCRIPTION("Intel SoundWire Master Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Hardik Shah "); diff --git a/drivers/sdw/sdw_cnl_priv.h b/drivers/sdw/sdw_cnl_priv.h new file mode 100644 index 000000000000..b7f44e1f9d6f --- /dev/null +++ b/drivers/sdw/sdw_cnl_priv.h @@ -0,0 +1,385 @@ +/* + * sdw_cnl_priv.h - Private definition for intel master controller driver. + * + * Copyright (C) 2014-2015 Intel Corp + * Author: Hardik Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#ifndef _LINUX_SDW_CNL_PRIV_H +#define _LINUX_SDW_CNL_PRIV_H + +#define SDW_CNL_PM_TIMEOUT 3000 /* ms */ +#define SDW_CNL_SLAVES_STAT_UPPER_DWORD_SHIFT 32 +#define SDW_CNL_SLAVE_STATUS_BITS 4 +#define SDW_CNL_CMD_WORD_LEN 4 +#define SDW_CNL_DEFAULT_SSP_INTERVAL 0x18 +#define SDW_CNL_DEFAULT_CLK_DIVIDER 0 + +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) +#define SDW_CNL_DEFAULT_SYNC_PERIOD 0x257F +#define SDW_CNL_DEFAULT_FRAME_SHAPE 0x48 +#else +#define SDW_CNL_DEFAULT_SYNC_PERIOD 0x176F +#define SDW_CNL_DEFAULT_FRAME_SHAPE 0x30 +#endif + +#define SDW_CNL_PORT_REG_OFFSET 0x80 +#define CNL_SDW_SCP_ADDR_REGS 0x2 +#define SDW_CNL_PCM_PDI_NUM_OFFSET 0x2 +#define SDW_CNL_PDM_PDI_NUM_OFFSET 0x6 + +#define SDW_CNL_CTMCTL_REG_OFFSET 0x60 +#define SDW_CNL_IOCTL_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCAP_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCHC_REG_OFFSET 0x60 +#define SDW_CNL_PDMSCAP_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCHM_REG_OFFSET 0x60 +#define SDW_CNL_SNDWWAKEEN_REG_OFFSET 0x190 +#define SDW_CNL_SNDWWAKESTS_REG_OFFSET 0x192 + + +#define SDW_CNL_MCP_CONFIG 0x0 +#define MCP_CONFIG_BRELENABLE_MASK 0x1 +#define MCP_CONFIG_BRELENABLE_SHIFT 0x6 +#define MCP_CONFIG_MAXCMDRETRY_SHIFT 24 +#define MCP_CONFIG_MAXCMDRETRY_MASK 0xF +#define MCP_CONFIG_MAXPREQDELAY_SHIFT 16 +#define MCP_CONFIG_MAXPREQDELAY_MASK 0x1F +#define MCP_CONFIG_MMMODEEN_SHIFT 0x7 +#define MCP_CONFIG_MMMODEEN_MASK 0x1 +#define MCP_CONFIG_SNIFFEREN_SHIFT 0x5 +#define MCP_CONFIG_SNIFFEREN_MASK 0x1 +#define MCP_CONFIG_SSPMODE_SHIFT 0x4 +#define MCP_CONFIG_SSPMODE_MASK 0x1 +#define MCP_CONFIG_CMDMODE_SHIFT 0x3 +#define MCP_CONFIG_CMDMODE_MASK 0x1 + +#define MCP_CONFIG_OPERATIONMODE_MASK 0x7 +#define MCP_CONFIG_OPERATIONMODE_SHIFT 0x0 +#define MCP_CONFIG_OPERATIONMODE_NORMAL 0x0 + +#define SDW_CNL_MCP_CONTROL 0x4 +#define MCP_CONTROL_RESETDELAY_SHIFT 0x8 +#define MCP_CONTROL_CMDRST_SHIFT 0x7 +#define MCP_CONTROL_CMDRST_MASK 0x1 +#define MCP_CONTROL_SOFTRST_SHIFT 0x6 +#define MCP_CONTROL_SOFTCTRLBUSRST_SHIFT 0x5 +#define MCP_CONTROL_HARDCTRLBUSRST_MASK 0x1 +#define MCP_CONTROL_HARDCTRLBUSRST_SHIFT 0x4 +#define MCP_CONTROL_CLOCKPAUSEREQ_SHIFT 0x3 +#define MCP_CONTROL_CLOCKSTOPCLEAR_SHIFT 0x2 +#define MCP_CONTROL_CLOCKSTOPCLEAR_MASK 0x1 +#define MCP_CONTROL_CMDACCEPTMODE_MASK 0x1 +#define MCP_CONTROL_CMDACCEPTMODE_SHIFT 0x1 +#define MCP_CONTROL_BLOCKWAKEUP_SHIFT 0x0 +#define MCP_CONTROL_BLOCKWAKEUP_MASK 0x1 + + +#define MCP_SLAVEINTMASK0_MASK 0xFFFFFFFF +#define MCP_SLAVEINTMASK1_MASK 0x0000FFFF + +#define SDW_CNL_MCP_CMDCTRL 0x8 +#define SDW_CNL_MCP_SSPSTAT 0xC +#define SDW_CNL_MCP_FRAMESHAPE 0x10 +#define SDW_CNL_MCP_FRAMESHAPEINIT 0x14 +#define SDW_CNL_MCP_CONFIGUPDATE 0x18 +#define MCP_CONFIGUPDATE_CONFIGUPDATE_SHIFT 0x0 +#define MCP_CONFIGUPDATE_CONFIGUPDATE_MASK 0x1 + +#define SDW_CNL_MCP_PHYCTRL 0x1C +#define SDW_CNL_MCP_SSPCTRL0 0x20 +#define SDW_CNL_MCP_SSPCTRL1 0x28 +#define SDW_CNL_MCP_CLOCKCTRL0 0x30 +#define SDW_CNL_MCP_CLOCKCTRL1 0x38 +#define SDW_CNL_MCP_STAT 0x40 +#define SDW_CNL_MCP_INTSTAT 0x44 +#define MCP_INTSTAT_IRQ_SHIFT 31 +#define MCP_INTSTAT_IRQ_MASK 1 +#define MCP_INTSTAT_WAKEUP_SHIFT 16 +#define MCP_INTSTAT_SLAVE_STATUS_CHANGED_SHIFT 12 +#define MCP_INTSTAT_SLAVE_STATUS_CHANGED_MASK 0xF +#define MCP_INTSTAT_SLAVENOTATTACHED_SHIFT 12 +#define MCP_INTSTAT_SLAVEATTACHED_SHIFT 13 +#define MCP_INTSTAT_SLAVEALERT_SHIFT 14 +#define MCP_INTSTAT_SLAVERESERVED_SHIFT 15 + +#define MCP_INTSTAT_DPPDIINT_SHIFT 11 +#define MCP_INTSTAT_DPPDIINTMASK 0x1 +#define MCP_INTSTAT_CONTROLBUSCLASH_SHIFT 10 +#define MCP_INTSTAT_CONTROLBUSCLASH_MASK 0x1 +#define MCP_INTSTAT_DATABUSCLASH_SHIFT 9 +#define MCP_INTSTAT_DATABUSCLASH_MASK 0x1 +#define MCP_INTSTAT_CMDERR_SHIFT 7 +#define MCP_INTSTAT_CMDERR_MASK 0x1 +#define MCP_INTSTAT_TXE_SHIFT 1 +#define MCP_INTSTAT_TXE_MASK 0x1 +#define MCP_INTSTAT_RXWL_SHIFT 2 +#define MCP_INTSTAT_RXWL_MASK 1 + +#define SDW_CNL_MCP_INTMASK 0x48 +#define MCP_INTMASK_IRQEN_SHIFT 31 +#define MCP_INTMASK_IRQEN_MASK 0x1 +#define MCP_INTMASK_WAKEUP_SHIFT 16 +#define MCP_INTMASK_WAKEUP_MASK 0x1 +#define MCP_INTMASK_SLAVERESERVED_SHIFT 15 +#define MCP_INTMASK_SLAVERESERVED_MASK 0x1 +#define MCP_INTMASK_SLAVEALERT_SHIFT 14 +#define MCP_INTMASK_SLAVEALERT_MASK 0x1 +#define MCP_INTMASK_SLAVEATTACHED_SHIFT 13 +#define MCP_INTMASK_SLAVEATTACHED_MASK 0x1 +#define MCP_INTMASK_SLAVENOTATTACHED_SHIFT 12 +#define MCP_INTMASK_SLAVENOTATTACHED_MASK 0x1 +#define MCP_INTMASK_DPPDIINT_SHIFT 11 +#define MCP_INTMASK_DPPDIINT_MASK 0x1 +#define MCP_INTMASK_CONTROLBUSCLASH_SHIFT 10 +#define MCP_INTMASK_CONTROLBUSCLASH_MASK 1 +#define MCP_INTMASK_DATABUSCLASH_SHIFT 9 +#define MCP_INTMASK_DATABUSCLASH_MASK 1 +#define MCP_INTMASK_CMDERR_SHIFT 7 +#define MCP_INTMASK_CMDERR_MASK 0x1 +#define MCP_INTMASK_TXE_SHIFT 1 +#define MCP_INTMASK_TXE_MASK 0x1 +#define MCP_INTMASK_RXWL_SHIFT 2 +#define MCP_INTMASK_RXWL_MASK 0x1 + +#define SDW_CNL_MCP_INTSET 0x4C +#define SDW_CNL_MCP_STAT 0x40 +#define MCP_STAT_ACTIVE_BANK_MASK 0x1 +#define MCP_STAT_ACTIVE_BANK_SHIT 20 +#define MCP_STAT_CLOCKSTOPPED_MASK 0x1 +#define MCP_STAT_CLOCKSTOPPED_SHIFT 16 + +#define SDW_CNL_MCP_SLAVESTAT 0x50 +#define MCP_SLAVESTAT_MASK 0x3 + +#define SDW_CNL_MCP_SLAVEINTSTAT0 0x54 +#define MCP_SLAVEINTSTAT_NOT_PRESENT_MASK 0x1 +#define MCP_SLAVEINTSTAT_ATTACHED_MASK 0x2 +#define MCP_SLAVEINTSTAT_ALERT_MASK 0x4 +#define MCP_SLAVEINTSTAT_RESERVED_MASK 0x8 + +#define SDW_CNL_MCP_SLAVEINTSTAT1 0x58 +#define SDW_CNL_MCP_SLAVEINTMASK0 0x5C +#define SDW_CNL_MCP_SLAVEINTMASK1 0x60 +#define SDW_CNL_MCP_PORTINTSTAT 0x64 +#define SDW_CNL_MCP_PDISTAT 0x6C + +#define SDW_CNL_MCP_FIFOLEVEL 0x78 +#define SDW_CNL_MCP_FIFOSTAT 0x7C +#define MCP_RX_FIFO_AVAIL_MASK 0x3F +#define SDW_CNL_MCP_COMMAND_BASE 0x80 +#define SDW_CNL_MCP_RESPONSE_BASE 0x80 +#define SDW_CNL_MCP_COMMAND_LENGTH 0x20 + +#define MCP_COMMAND_SSP_TAG_MASK 0x1 +#define MCP_COMMAND_SSP_TAG_SHIFT 31 +#define MCP_COMMAND_COMMAND_MASK 0x7 +#define MCP_COMMAND_COMMAND_SHIFT 28 +#define MCP_COMMAND_DEV_ADDR_MASK 0xF +#define MCP_COMMAND_DEV_ADDR_SHIFT 24 +#define MCP_COMMAND_REG_ADDR_H_MASK 0x7 +#define MCP_COMMAND_REG_ADDR_H_SHIFT 16 +#define MCP_COMMAND_REG_ADDR_L_MASK 0xFF +#define MCP_COMMAND_REG_ADDR_L_SHIFT 8 +#define MCP_COMMAND_REG_DATA_MASK 0xFF +#define MCP_COMMAND_REG_DATA_SHIFT 0x0 + +#define MCP_RESPONSE_RDATA_MASK 0xFF +#define MCP_RESPONSE_RDATA_SHIFT 8 +#define MCP_RESPONSE_ACK_MASK 0x1 +#define MCP_RESPONSE_ACK_SHIFT 0 +#define MCP_RESPONSE_NACK_MASK 0x2 + +#define SDW_CNL_DPN_CONFIG0 0x100 +#define SDW_CNL_DPN_CHANNELEN0 0x104 +#define SDW_CNL_DPN_SAMPLECTRL0 0x108 +#define SDW_CNL_DPN_OFFSETCTRL0 0x10C +#define SDW_CNL_DPN_HCTRL0 0x110 +#define SDW_CNL_DPN_ASYNCCTRL0 0x114 + +#define SDW_CNL_DPN_CONFIG1 0x118 +#define SDW_CNL_DPN_CHANNELEN1 0x11C +#define SDW_CNL_DPN_SAMPLECTRL1 0x120 +#define SDW_CNL_DPN_OFFSETCTRL1 0x124 +#define SDW_CNL_DPN_HCTRL1 0x128 + +#define SDW_CNL_PORTCTRL 0x130 +#define PORTCTRL_PORT_DIRECTION_SHIFT 0x7 +#define PORTCTRL_PORT_DIRECTION_MASK 0x1 +#define PORTCTRL_BANK_INVERT_SHIFT 0x8 +#define PORTCTRL_BANK_INVERT_MASK 0x1 + +#define SDW_CNL_PDINCONFIG0 0x1100 +#define SDW_CNL_PDINCONFIG1 0x1108 +#define PDINCONFIG_CHANNEL_MASK_SHIFT 0x8 +#define PDINCONFIG_CHANNEL_MASK_MASK 0xFF +#define PDINCONFIG_PORT_NUMBER_SHIFT 0x0 +#define PDINCONFIG_PORT_NUMBER_MASK 0x1F +#define PDINCONFIG_PORT_SOFT_RESET_SHIFT 0x18 +#define PDINCONFIG_PORT_SOFT_RESET 0x1F + +#define DPN_CONFIG_WL_SHIFT 0x8 +#define DPN_CONFIG_WL_MASK 0x1F +#define DPN_CONFIG_PF_MODE_SHIFT 0x0 +#define DPN_CONFIG_PF_MODE_MASK 0x3 +#define DPN_CONFIG_PD_MODE_SHIFT 0x2 +#define DPN_CONFIG_PD_MODE_MASK 0x3 +#define DPN_CONFIG_BPM_MASK 0x1 +#define DPN_CONFIG_BPM_SHIFT 0x12 +#define DPN_CONFIG_BGC_MASK 0x3 +#define DPN_CONFIG_BGC_SHIFT 0x10 + +#define DPN_SAMPLECTRL_SI_MASK 0xFFFF +#define DPN_SAMPLECTRL_SI_SHIFT 0x0 + +#define DPN_OFFSETCTRL0_OF1_MASK 0xFF +#define DPN_OFFSETCTRL0_OF1_SHIFT 0x0 +#define DPN_OFFSETCTRL0_OF2_MASK 0xFF +#define DPN_OFFSETCTRL0_OF2_SHIFT 0x8 + +#define DPN_HCTRL_HSTOP_MASK 0xF +#define DPN_HCTRL_HSTOP_SHIFT 0x0 +#define DPN_HCTRL_HSTART_MASK 0xF +#define DPN_HCTRL_HSTART_SHIFT 0x4 +#define DPN_HCTRL_LCONTROL_MASK 0x7 +#define DPN_HCTRL_LCONTROL_SHIFT 0x8 + +/* SoundWire Shim registers */ +#define SDW_CNL_LCAP 0x0 +#define SDW_CNL_LCTL 0x4 +#define CNL_LCTL_CPA_SHIFT 8 +#define CNL_LCTL_SPA_SHIFT 0 +#define CNL_LCTL_CPA_MASK 0x1 +#define CNL_LCTL_SPA_MASK 0x1 + +#define SDW_CMDSYNC_SET_MASK 0xF0000 +#define SDW_CNL_IPPTR 0x8 +#define SDW_CNL_SYNC 0xC +#define CNL_SYNC_CMDSYNC_MASK 0x1 +#define CNL_SYNC_CMDSYNC_SHIFT 16 +#define CNL_SYNC_SYNCGO_MASK 0x1 +#define CNL_SYNC_SYNCGO_SHIFT 0x18 +#define CNL_SYNC_SYNCPRD_MASK 0x7FFF +#define CNL_SYNC_SYNCPRD_SHIFT 0x0 +#define CNL_SYNC_SYNCCPU_MASK 0x8000 +#define CNL_SYNC_SYNCCPU_SHIFT 0xF + +#define SDW_CNL_CTLSCAP 0x10 +#define SDW_CNL_CTLS0CM 0x12 +#define SDW_CNL_CTLS1CM 0x14 +#define SDW_CNL_CTLS2CM 0x16 +#define SDW_CNL_CTLS3CM 0x18 + +#define SDW_CNL_PCMSCAP 0x20 +#define CNL_PCMSCAP_BSS_SHIFT 8 +#define CNL_PCMSCAP_BSS_MASK 0x1F +#define CNL_PCMSCAP_OSS_SHIFT 4 +#define CNL_PCMSCAP_OSS_MASK 0xF +#define CNL_PCMSCAP_ISS_SHIFT 0 +#define CNL_PCMSCAP_ISS_MASK 0xF + +#define SDW_CNL_PCMSCHM 0x22 +#define CNL_PCMSYCM_DIR_SHIFT 15 +#define CNL_PCMSYCM_DIR_MASK 0x1 +#define CNL_PCMSYCM_STREAM_SHIFT 8 +#define CNL_PCMSYCM_STREAM_MASK 0x3F +#define CNL_PCMSYCM_HCHAN_SHIFT 4 +#define CNL_PCMSYCM_HCHAN_MASK 0xF +#define CNL_PCMSYCM_LCHAN_SHIFT 0 +#define CNL_PCMSYCM_LCHAN_MASK 0xF + +#define SDW_CNL_PCMSCHC 0x42 + +#define SDW_CNL_PDMSCAP 0x62 +#define CNL_PDMSCAP_BSS_SHIFT 8 +#define CNL_PDMSCAP_BSS_MASK 0x1F +#define CNL_PDMSCAP_OSS_SHIFT 4 +#define CNL_PDMSCAP_OSS_MASK 0xF +#define CNL_PDMSCAP_ISS_SHIFT 0 +#define CNL_PDMSCAP_ISS_MASK 0xF +#define CNL_PDMSCAP_CPSS_SHIFT 13 +#define CNL_PDMSCAP_CPSS_MASK 0x7 +#define SDW_CNL_PDMSCM + +#define SDW_CNL_IOCTL 0x6C +#define CNL_IOCTL_MIF_SHIFT 0x0 +#define CNL_IOCTL_MIF_MASK 0x1 +#define CNL_IOCTL_CO_SHIFT 0x1 +#define CNL_IOCTL_CO_MASK 0x1 +#define CNL_IOCTL_COE_SHIFT 0x2 +#define CNL_IOCTL_COE_MASK 0x1 +#define CNL_IOCTL_DO_SHIFT 0x3 +#define CNL_IOCTL_DO_MASK 0x1 +#define CNL_IOCTL_DOE_SHIFT 0x4 +#define CNL_IOCTL_DOE_MASK 0x1 +#define CNL_IOCTL_BKE_SHIFT 0x5 +#define CNL_IOCTL_BKE_MASK 0x1 +#define CNL_IOCTL_WPDD_SHIFT 0x6 +#define CNL_IOCTL_WPDD_MASK 0x1 +#define CNL_IOCTL_CIBD_SHIFT 0x8 +#define CNL_IOCTL_CIBD_MASK 0x1 +#define CNL_IOCTL_DIBD_SHIFT 0x9 +#define CNL_IOCTL_DIBD_MASK 0x1 + +#define SDW_CNL_CTMCTL_OFFSET 0x60 +#define SDW_CNL_CTMCTL 0x6E +#define CNL_CTMCTL_DACTQE_SHIFT 0x0 +#define CNL_CTMCTL_DACTQE_MASK 0x1 +#define CNL_CTMCTL_DODS_SHIFT 0x1 +#define CNL_CTMCTL_DODS_MASK 0x1 +#define CNL_CTMCTL_DOAIS_SHIFT 0x3 +#define CNL_CTMCTL_DOAIS_MASK 0x3 + +#define ALH_CNL_STRMZCFG_BASE 0x4 +#define ALH_CNL_STRMZCFG_OFFSET 0x4 +#define CNL_STRMZCFG_DMAT_SHIFT 0x0 +#define CNL_STRMZCFG_DMAT_MASK 0xFF +#define CNL_STRMZCFG_DMAT_VAL 0x3 +#define CNL_STRMZCFG_CHAN_SHIFT 16 +#define CNL_STRMZCFG_CHAN_MASK 0xF + +#define SDW_BRA_HEADER_SIZE_PDI 12 /* In bytes */ +#define SDW_BRA_HEADER_CRC_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_DATA_CRC_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_HEADER_RESP_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_FOOTER_RESP_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_PADDING_SZ_PDI 4 /* In bytes */ +#define SDW_BRA_HEADER_TOTAL_SZ_PDI 16 /* In bytes */ + +#define SDW_BRA_SOP_EOP_PDI_STRT_VALUE 0x4 +#define SDW_BRA_SOP_EOP_PDI_END_VALUE 0x2 +#define SDW_BRA_SOP_EOP_PDI_MASK 0x1F +#define SDW_BRA_SOP_EOP_PDI_SHIFT 5 + +#define SDW_BRA_STRM_ID_BLK_OUT 3 +#define SDW_BRA_STRM_ID_BLK_IN 4 + +#define SDW_BRA_PDI_TX_ID 0 +#define SDW_BRA_PDI_RX_ID 1 + +#define SDW_BRA_SOFT_RESET 0x1 +#define SDW_BRA_BULK_ENABLE 1 +#define SDW_BRA_BLK_EN_MASK 0xFFFEFFFF +#define SDW_BRA_BLK_EN_SHIFT 16 + +#define SDW_BRA_ROLLINGID_PDI_INDX 3 +#define SDW_BRA_ROLLINGID_PDI_MASK 0xF +#define SDW_BRA_ROLLINGID_PDI_SHIFT 0 + +#define SDW_PCM_STRM_START_INDEX 0x2 + +#endif /* _LINUX_SDW_CNL_H */ diff --git a/drivers/sdw/sdw_maxim.c b/drivers/sdw/sdw_maxim.c new file mode 100644 index 000000000000..7f2844a7c9c1 --- /dev/null +++ b/drivers/sdw/sdw_maxim.c @@ -0,0 +1,146 @@ +/* + * sdw_maxim.c -- Maxim SoundWire slave device driver. Dummy driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include + + +static int maxim_register_sdw_capabilties(struct sdw_slave *sdw, + const struct sdw_slave_id *sdw_id) +{ + struct sdw_slv_capabilities cap; + struct sdw_slv_dpn_capabilities *dpn_cap = NULL; + struct port_audio_mode_properties *prop = NULL; + int i, j; + + cap.wake_up_unavailable = true; + cap.test_mode_supported = false; + cap.clock_stop1_mode_supported = false; + cap.simplified_clock_stop_prepare = false; + cap.highphy_capable = true; + cap.paging_supported = false; + cap.bank_delay_support = false; + cap.port_15_read_behavior = 0; + cap.sdw_dp0_supported = false; + cap.num_of_sdw_ports = 3; + cap.sdw_dpn_cap = devm_kzalloc(&sdw->dev, + ((sizeof(struct sdw_slv_dpn_capabilities)) * + cap.num_of_sdw_ports), GFP_KERNEL); + for (i = 0; i < cap.num_of_sdw_ports; i++) { + dpn_cap = &cap.sdw_dpn_cap[i]; + if (i == 0 || i == 2) + dpn_cap->port_direction = SDW_PORT_SOURCE; + else + dpn_cap->port_direction = SDW_PORT_SINK; + + dpn_cap->port_number = i+1; + dpn_cap->max_word_length = 24; + dpn_cap->min_word_length = 16; + dpn_cap->num_word_length = 0; + dpn_cap->word_length_buffer = NULL; + dpn_cap->dpn_type = SDW_FULL_DP; + dpn_cap->dpn_grouping = SDW_BLOCKGROUPCOUNT_1; + dpn_cap->prepare_ch = SDW_CP_SM; + dpn_cap->imp_def_intr_mask = 0x0; + dpn_cap->min_ch_num = 1; + dpn_cap->max_ch_num = 2; + dpn_cap->num_ch_supported = 0; + dpn_cap->ch_supported = NULL; + dpn_cap->port_flow_mode_mask = SDW_PORT_FLOW_MODE_ISOCHRONOUS; + dpn_cap->block_packing_mode_mask = + SDW_PORT_BLK_PKG_MODE_BLK_PER_PORT_MASK | + SDW_PORT_BLK_PKG_MODE_BLK_PER_CH_MASK; + dpn_cap->port_encoding_type_mask = + SDW_PORT_ENCODING_TYPE_TWOS_CMPLMNT | + SDW_PORT_ENCODING_TYPE_SIGN_MAGNITUDE | + SDW_PORT_ENCODING_TYPE_IEEE_32_FLOAT; + dpn_cap->num_audio_modes = 1; + + dpn_cap->mode_properties = devm_kzalloc(&sdw->dev, + ((sizeof(struct port_audio_mode_properties)) * + dpn_cap->num_audio_modes), GFP_KERNEL); + for (j = 0; j < dpn_cap->num_audio_modes; j++) { + prop = &dpn_cap->mode_properties[j]; + prop->max_frequency = 16000000; + prop->min_frequency = 1000000; + prop->num_freq_configs = 0; + prop->freq_supported = NULL; + prop->glitchless_transitions_mask = 0x1; + prop->max_sampling_frequency = 192000; + prop->min_sampling_frequency = 8000; + prop->num_sampling_freq_configs = 0; + prop->sampling_freq_config = NULL; + prop->ch_prepare_behavior = SDW_CH_PREP_ANY_TIME; + } + } + return sdw_register_slave_capabilities(sdw, &cap); + +} +static int maxim_sdw_probe(struct sdw_slave *sdw, + const struct sdw_slave_id *sdw_id) +{ + dev_info(&sdw->dev, "Maxim SoundWire Slave Registered %lx\n", sdw_id->driver_data); + return maxim_register_sdw_capabilties(sdw, sdw_id); +} + +static int maxim_sdw_remove(struct sdw_slave *sdw) +{ + dev_info(&sdw->dev, "Maxim SoundWire Slave un-Registered\n"); + return 0; +} + +static const struct sdw_slave_id maxim_id[] = { + {"03:01:9f:79:00:00", 0}, + {"09:01:9f:79:00:00", 1}, + {"04:01:9f:79:00:00", 2}, + {"0a:01:9f:79:00:00", 3}, + {"04:01:9f:79:00:00", 4}, + {"0a:01:9f:79:00:00", 5}, + {"05:01:9f:79:00:00", 6}, + {"06:01:9f:79:00:00", 7}, + {"05:01:9f:79:00:00", 8}, + {"00:01:9f:79:00:00", 9}, + {"06:01:9f:79:00:00", 10}, + {"07:01:9f:79:00:00", 11}, + {"00:01:9f:79:00:00", 12}, + {"06:01:9f:79:00:00", 13}, + {"01:01:9f:79:00:00", 14}, + {"07:01:9f:79:00:00", 15}, + {"08:01:9f:79:00:00", 16}, + {"01:01:9f:79:00:00", 17}, + {"07:01:9f:79:00:00", 18}, + {"02:01:9f:79:00:00", 19}, + {"08:01:9f:79:00:00", 20}, + {"09:01:9f:79:00:00", 21}, + {"02:01:9f:79:00:00", 22}, + {"08:01:9f:79:00:00", 23}, + {"03:01:9f:79:00:00", 24}, + {"09:01:9f:79:00:00", 25}, + {"0a:01:9f:79:00:00", 26}, + {}, +}; + +MODULE_DEVICE_TABLE(sdw, maxim_id); + +static struct sdw_slave_driver maxim_sdw_driver = { + .driver_type = SDW_DRIVER_TYPE_SLAVE, + .driver = { + .name = "maxim", + }, + .probe = maxim_sdw_probe, + .remove = maxim_sdw_remove, + .id_table = maxim_id, +}; + +module_sdw_slave_driver(maxim_sdw_driver); + +MODULE_DESCRIPTION("SoundWire Maxim Slave Driver"); +MODULE_AUTHOR("Hardik Shah, "); +MODULE_LICENSE("GPL"); diff --git a/drivers/sdw/sdw_priv.h b/drivers/sdw/sdw_priv.h new file mode 100644 index 000000000000..b165d7262c7a --- /dev/null +++ b/drivers/sdw/sdw_priv.h @@ -0,0 +1,280 @@ +/* + * sdw_priv.h - Private definition for sdw bus interface. + * + * Copyright (C) 2014-2015 Intel Corp + * Author: Hardik Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#ifndef _LINUX_SDW_PRIV_H +#define _LINUX_SDW_PRIV_H + +#include /* For kthread */ +#include + +#define SDW_MAX_STREAM_TAG_KEY_SIZE 80 +#define SDW_NUM_STREAM_TAGS 100 +#define MAX_NUM_ROWS 23 /* As per SDW Spec */ +#define MAX_NUM_COLS 8/* As per SDW Spec */ +#define MAX_NUM_ROW_COLS (MAX_NUM_ROWS * MAX_NUM_COLS) + +#define SDW_STATE_INIT_STREAM_TAG 0x1 +#define SDW_STATE_ALLOC_STREAM 0x2 +#define SDW_STATE_CONFIG_STREAM 0x3 +#define SDW_STATE_PREPARE_STREAM 0x4 +#define SDW_STATE_ENABLE_STREAM 0x5 +#define SDW_STATE_DISABLE_STREAM 0x6 +#define SDW_STATE_UNPREPARE_STREAM 0x7 +#define SDW_STATE_RELEASE_STREAM 0x8 +#define SDW_STATE_FREE_STREAM 0x9 +#define SDW_STATE_FREE_STREAM_TAG 0xA +#define SDW_STATE_ONLY_XPORT_STREAM 0xB + +#define SDW_STATE_INIT_RT 0x1 +#define SDW_STATE_CONFIG_RT 0x2 +#define SDW_STATE_PREPARE_RT 0x3 +#define SDW_STATE_ENABLE_RT 0x4 +#define SDW_STATE_DISABLE_RT 0x5 +#define SDW_STATE_UNPREPARE_RT 0x6 +#define SDW_STATE_RELEASE_RT 0x7 + +#define SDW_SLAVE_BDCAST_ADDR 15 + +struct sdw_runtime; +/* Defined in sdw.c, used by multiple files of module */ +extern struct sdw_core sdw_core; + +enum sdw_port_state { + SDW_PORT_STATE_CH_READY, + SDW_PORT_STATE_CH_STOPPED, + SDW_PORT_STATE_CH_PREPARING, + SDW_PORT_STATE_CH_DEPREPARING, +}; + +enum sdw_stream_state { + SDW_STREAM_ALLOCATED, + SDW_STREAM_FREE, + SDW_STREAM_ACTIVE, + SDW_STREAM_INACTIVE, +}; + +enum sdw_clk_state { + SDW_CLK_STATE_OFF = 0, + SDW_CLK_STATE_ON = 1, +}; + +enum sdw_update_bs_state { + SDW_UPDATE_BS_PRE, + SDW_UPDATE_BS_BNKSWTCH, + SDW_UPDATE_BS_POST, + SDW_UPDATE_BS_BNKSWTCH_WAIT, + SDW_UPDATE_BS_DIS_CHN, +}; + +enum sdw_port_en_state { + SDW_PORT_STATE_PREPARE, + SDW_PORT_STATE_ENABLE, + SDW_PORT_STATE_DISABLE, + SDW_PORT_STATE_UNPREPARE, +}; + +struct port_chn_en_state { + bool is_activate; + bool is_bank_sw; +}; + +struct temp_elements { + int rate; + int full_bw; + int payload_bw; + int hwidth; +}; + +struct sdw_stream_tag { + int stream_tag; + struct mutex stream_lock; + int ref_count; + enum sdw_stream_state stream_state; + char key[SDW_MAX_STREAM_TAG_KEY_SIZE]; + struct sdw_runtime *sdw_rt; +}; + +struct sdw_stream_params { + unsigned int rate; + unsigned int channel_count; + unsigned int bps; +}; + +struct sdw_port_runtime { + int port_num; + enum sdw_port_state port_state; + int channel_mask; + /* Frame params and stream params are per port based + * Single stream of audio may be split + * into mutliple port each handling + * subset of channels, channels should + * be contiguous in subset + */ + struct sdw_transport_params transport_params; + struct sdw_port_params port_params; + struct list_head port_node; +}; + +struct sdw_slave_runtime { + /* Simplified port or full port, there cannot be both types of + * data port for single stream, so data structure is kept per + * slave runtime, not per port + */ + enum sdw_dpn_type type; + struct sdw_slave *slave; + int direction; + /* Stream may be split into multiple slaves, so this is for + * this particular slave + */ + struct sdw_stream_params stream_params; + struct list_head port_rt_list; + struct list_head slave_sdw_node; + struct list_head slave_node; + int rt_state; /* State of runtime structure */ + +}; + + +struct sdw_mstr_runtime { + struct sdw_master *mstr; + int direction; + /* Stream may be split between multiple masters so this + * is for invidual master, if stream is split into multiple + * streams. For calculating the bandwidth on the particular bus + * stream params of master is taken into account. + */ + struct sdw_stream_params stream_params; + struct list_head port_rt_list; + /* Two nodes are required because BW calculation is based on master + * while stream enabling is based on stream_tag, where multiple + * masters may be involved + */ + struct list_head mstr_sdw_node; /* This is to add mstr_rt in sdw_rt */ + struct list_head mstr_node; /* This is to add mstr_rt in mstr */ + + struct list_head slv_rt_list; + /* Individual stream bandwidth on given master */ + unsigned int stream_bw; + /* State of runtime structure */ + int rt_state; + int hstart; + int hstop; + int block_offset; + int sub_block_offset; +}; + +struct sdw_runtime { + int tx_ref_count; + int rx_ref_count; + /* This is stream params for whole stream + * but stream may be split between two + * masters, or two slaves. + */ + struct sdw_stream_params stream_params; + struct list_head slv_rt_list; + struct list_head mstr_rt_list; + enum sdw_stream_type type; + int stream_state; + int xport_state; + +}; + +struct sdw_slv_status { + struct list_head node; + enum sdw_slave_status status[SOUNDWIRE_MAX_DEVICES]; +}; + +/** Bus structure which handles bus related information */ +struct sdw_bus { + struct list_head bus_node; + struct sdw_master *mstr; + unsigned int port_grp_mask[2]; + unsigned int slave_grp_mask[2]; + unsigned int clk_state; + unsigned int active_bank; + unsigned int clk_freq; + unsigned int clk_div; + /* Bus total Bandwidth. Initialize and reset to zero */ + unsigned int bandwidth; + unsigned int stream_interval; /* Stream Interval */ + unsigned int system_interval; /* Bus System Interval */ + unsigned int frame_freq; + unsigned int col; + unsigned int row; + struct task_struct *status_thread; + struct kthread_worker kworker; + struct kthread_work kwork; + struct list_head status_list; + spinlock_t spinlock; + struct sdw_async_xfer_data async_data; +}; + +/** Holds supported Row-Column combination related information */ +struct sdw_rowcol { + int row; + int col; + int control_bits; + int data_bits; +}; + +/** + * Global soundwire structure. It handles all the streams spawned + * across masters and has list of bus structure per every master + * registered + */ +struct sdw_core { + struct sdw_stream_tag stream_tags[SDW_NUM_STREAM_TAGS]; + struct sdw_rowcol rowcolcomb[MAX_NUM_ROW_COLS]; + struct list_head bus_list; + struct mutex core_lock; + struct idr idr; + int first_dynamic_bus_num; +}; + +/* Structure holding mapping of numbers to cols */ +struct sdw_num_to_col { + int num; + int col; +}; + +/* Structure holding mapping of numbers to rows */ +struct sdw_num_to_row { + int num; + int row; +}; + +int sdw_slave_port_config_port_params(struct sdw_slave_runtime *slv_rt); +int sdw_slave_port_prepare(struct sdw_slave_runtime, bool prepare); +int sdw_bus_bw_init(void); +int sdw_mstr_bw_init(struct sdw_bus *sdw_bs); +int sdw_bus_calc_bw(struct sdw_stream_tag *stream_tag, bool enable); +int sdw_bus_calc_bw_dis(struct sdw_stream_tag *stream_tag, bool unprepare); +int sdw_bus_bra_xport_config(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, bool enable); +int sdw_chn_enable(void); +void sdw_unlock_mstr(struct sdw_master *mstr); +int sdw_trylock_mstr(struct sdw_master *mstr); +void sdw_lock_mstr(struct sdw_master *mstr); +int sdw_slave_transfer_async(struct sdw_master *mstr, struct sdw_msg *msg, + int num, + struct sdw_async_xfer_data *async_data); + +#endif /* _LINUX_SDW_PRIV_H */ diff --git a/drivers/sdw/sdw_utils.c b/drivers/sdw/sdw_utils.c new file mode 100644 index 000000000000..724323d01993 --- /dev/null +++ b/drivers/sdw/sdw_utils.c @@ -0,0 +1,49 @@ +/* + * sdw_bwcalc.c - SoundWire Bus BW calculation & CHN Enabling implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Sanyog Kale + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include + + + +/** + * sdw_bus_compute_crc8: SoundWire bus helper function to compute crc8. + * This API uses crc8 helper functions internally. + * + * @values: Data buffer. + * @num_bytes: Number of bytes. + */ +u8 sdw_bus_compute_crc8(u8 *values, u8 num_bytes) +{ + u8 table[256]; + u8 poly = 0x4D; /* polynomial = x^8 + x^6 + x^3 + x^2 + 1 */ + u8 crc = CRC8_INIT_VALUE; /* Initialize 8 bit to 11111111 */ + + /* Populate MSB */ + crc8_populate_msb(table, poly); + + /* CRC computation */ + crc = crc8(table, values, num_bytes, crc); + + return crc; +} +EXPORT_SYMBOL(sdw_bus_compute_crc8); diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index 89f4cf507be6..f2d8c3c53ea4 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -20,8 +20,8 @@ #define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8 #define SOCINFO_MAJOR GENMASK(31, 24) -#define SOCINFO_MINOR GENMASK(23, 16) -#define SOCINFO_PACK GENMASK(15, 8) +#define SOCINFO_PACK GENMASK(23, 16) +#define SOCINFO_MINOR GENMASK(15, 8) #define SOCINFO_MISC GENMASK(7, 0) static const struct meson_gx_soc_id { diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 18eefc3f1abe..0c6065dba48a 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -2414,39 +2414,21 @@ struct cgr_comp { struct completion completion; }; -static int qman_delete_cgr_thread(void *p) +static void qman_delete_cgr_smp_call(void *p) { - struct cgr_comp *cgr_comp = (struct cgr_comp *)p; - int ret; - - ret = qman_delete_cgr(cgr_comp->cgr); - complete(&cgr_comp->completion); - - return ret; + qman_delete_cgr((struct qman_cgr *)p); } void qman_delete_cgr_safe(struct qman_cgr *cgr) { - struct task_struct *thread; - struct cgr_comp cgr_comp; - preempt_disable(); if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { - init_completion(&cgr_comp.completion); - cgr_comp.cgr = cgr; - thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, - "cgr_del"); - - if (IS_ERR(thread)) - goto out; - - kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); - wake_up_process(thread); - wait_for_completion(&cgr_comp.completion); + smp_call_function_single(qman_cgr_cpus[cgr->cgrid], + qman_delete_cgr_smp_call, cgr, true); preempt_enable(); return; } -out: + qman_delete_cgr(cgr); preempt_enable(); } diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index c2048382830f..e3df1e96b141 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -522,7 +522,7 @@ struct pmic_wrapper_type { u32 int_en_all; u32 spi_w; u32 wdt_src; - int has_bridge:1; + unsigned int has_bridge:1; int (*init_reg_clock)(struct pmic_wrapper *wrp); int (*init_soc_specific)(struct pmic_wrapper *wrp); }; diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index 403bea9d546b..50214b620865 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -496,8 +496,10 @@ static int qcom_smsm_probe(struct platform_device *pdev) if (!smsm->hosts) return -ENOMEM; - local_node = of_find_node_with_property(of_node_get(pdev->dev.of_node), - "#qcom,smem-state-cells"); + for_each_child_of_node(pdev->dev.of_node, local_node) { + if (of_find_property(local_node, "#qcom,smem-state-cells", NULL)) + break; + } if (!local_node) { dev_err(&pdev->dev, "no state entry\n"); return -EINVAL; diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 568e1c65aa82..fe3fa1e8517a 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -79,6 +79,7 @@ #define A3700_SPI_BYTE_LEN BIT(5) #define A3700_SPI_CLK_PRESCALE BIT(0) #define A3700_SPI_CLK_PRESCALE_MASK (0x1f) +#define A3700_SPI_CLK_EVEN_OFFS (0x10) #define A3700_SPI_WFIFO_THRS_BIT 28 #define A3700_SPI_RFIFO_THRS_BIT 24 @@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi, prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz); + /* For prescaler values over 15, we can only set it by steps of 2. + * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to + * 30. We only use this range from 16 to 30. + */ + if (prescale > 15) + prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2); + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); val = val & ~A3700_SPI_CLK_PRESCALE_MASK; diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index f95da364c283..669470971023 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); /* reset the hardware and block queue progress */ - spin_lock_irq(&as->lock); if (as->use_dma) { atmel_spi_stop_dma(master); atmel_spi_release_dma(master); } + spin_lock_irq(&as->lock); spi_writel(as, CR, SPI_BIT(SWRST)); spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ spi_readl(as, SR); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 6ab4c7700228..68cfc351b47f 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -553,7 +553,7 @@ static int spi_engine_probe(struct platform_device *pdev) static int spi_engine_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct spi_engine *spi_engine = spi_master_get_devdata(master); int irq = platform_get_irq(pdev, 0); @@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev) free_irq(irq, master); + spi_master_put(master); + writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index babb15f07995..d51ca243a028 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1496,12 +1496,23 @@ static int spi_imx_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct spi_imx_data *spi_imx = spi_master_get_devdata(master); + int ret; spi_bitbang_stop(&spi_imx->bitbang); + ret = clk_enable(spi_imx->clk_per); + if (ret) + return ret; + + ret = clk_enable(spi_imx->clk_ipg); + if (ret) { + clk_disable(spi_imx->clk_per); + return ret; + } + writel(0, spi_imx->base + MXC_CSPICTRL); - clk_unprepare(spi_imx->clk_ipg); - clk_unprepare(spi_imx->clk_per); + clk_disable_unprepare(spi_imx->clk_ipg); + clk_disable_unprepare(spi_imx->clk_per); spi_imx_sdma_exit(spi_imx); spi_master_put(master); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0eb1e9583485..2770fbd4ce49 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -55,6 +55,8 @@ struct sh_msiof_spi_priv { void *rx_dma_page; dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; + bool native_cs_inited; + bool native_cs_high; bool slave_aborted; }; @@ -528,8 +530,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi) { struct device_node *np = spi->master->dev.of_node; struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); - - pm_runtime_get_sync(&p->pdev->dev); + u32 clr, set, tmp; if (!np) { /* @@ -539,19 +540,31 @@ static int sh_msiof_spi_setup(struct spi_device *spi) spi->cs_gpio = (uintptr_t)spi->controller_data; } - /* Configure pins before deasserting CS */ - sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL), - !!(spi->mode & SPI_CPHA), - !!(spi->mode & SPI_3WIRE), - !!(spi->mode & SPI_LSB_FIRST), - !!(spi->mode & SPI_CS_HIGH)); - - if (spi->cs_gpio >= 0) + if (spi->cs_gpio >= 0) { gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); + return 0; + } + if (spi_controller_is_slave(p->master)) + return 0; - pm_runtime_put(&p->pdev->dev); + if (p->native_cs_inited && + (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH))) + return 0; + /* Configure native chip select mode/polarity early */ + clr = MDR1_SYNCMD_MASK; + set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI; + if (spi->mode & SPI_CS_HIGH) + clr |= BIT(MDR1_SYNCAC_SHIFT); + else + set |= BIT(MDR1_SYNCAC_SHIFT); + pm_runtime_get_sync(&p->pdev->dev); + tmp = sh_msiof_read(p, TMDR1) & ~clr; + sh_msiof_write(p, TMDR1, tmp | set); + pm_runtime_put(&p->pdev->dev); + p->native_cs_high = spi->mode & SPI_CS_HIGH; + p->native_cs_inited = true; return 0; } @@ -784,11 +797,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, goto stop_dma; } - /* wait for tx fifo to be emptied / rx fifo to be filled */ + /* wait for tx/rx DMA completion */ ret = sh_msiof_wait_for_completion(p); if (ret) goto stop_reset; + if (!rx) { + reinit_completion(&p->done); + sh_msiof_write(p, IER, IER_TEOFE); + + /* wait for tx fifo to be emptied */ + ret = sh_msiof_wait_for_completion(p); + if (ret) + goto stop_reset; + } + /* clear status bits */ sh_msiof_reset_str(p); @@ -900,7 +923,7 @@ static int sh_msiof_transfer_one(struct spi_master *master, break; copy32 = copy_bswap32; } else if (bits <= 16) { - if (l & 1) + if (l & 3) break; copy32 = copy_wswap32; } else { diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index c5cd635c28f3..41410031f8e9 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -525,7 +525,7 @@ static int sun4i_spi_probe(struct platform_device *pdev) static int sun4i_spi_remove(struct platform_device *pdev) { - pm_runtime_disable(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); return 0; } diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index fb38234249a8..8533f4edd00a 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -541,7 +541,7 @@ static int sun6i_spi_probe(struct platform_device *pdev) static int sun6i_spi_remove(struct platform_device *pdev) { - pm_runtime_disable(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); return 0; } diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index bc7100b93dfc..e0b9fe1d0e37 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) while (remaining_words) { int n_words, tx_words, rx_words; u32 sr; + int stalled; n_words = min(remaining_words, xspi->buffer_size); @@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) /* Read out all the data from the Rx FIFO */ rx_words = n_words; + stalled = 10; while (rx_words) { + if (rx_words == n_words && !(stalled--) && + !(sr & XSPI_SR_TX_EMPTY_MASK) && + (sr & XSPI_SR_RX_EMPTY_MASK)) { + dev_err(&spi->dev, + "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n"); + xspi_init_hw(xspi); + return -EIO; + } + if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) { xilinx_spi_rx(xspi); rx_words--; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e8b5a5e21b2e..3ff0ee88c467 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2245,11 +2245,12 @@ static int __unregister(struct device *dev, void *null) void spi_unregister_controller(struct spi_controller *ctlr) { struct spi_controller *found; + int id = ctlr->bus_num; int dummy; /* First make sure that this controller was ever added */ mutex_lock(&board_lock); - found = idr_find(&spi_master_idr, ctlr->bus_num); + found = idr_find(&spi_master_idr, id); mutex_unlock(&board_lock); if (found != ctlr) { dev_dbg(&ctlr->dev, @@ -2269,7 +2270,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) device_unregister(&ctlr->dev); /* free bus id */ mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); + idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 71a50b99caff..7d32d119d3dc 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -14,7 +14,54 @@ config ASHMEM It is, in theory, a good memory allocator for low-memory devices, because it can discard shared memory units when under memory pressure. +config ANDROID_LOW_MEMORY_KILLER + bool "Android Low Memory Killer" + ---help--- + Registers processes to be killed when low memory conditions, this is useful + as there is no particular swap space on android. + + The registered process will kill according to the priorities in android init + scripts (/init.rc), and it defines priority values with minimum free memory size + for each priority. + +config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES + bool "Android Low Memory Killer: detect oom_adj values" + depends on ANDROID_LOW_MEMORY_KILLER + default y + ---help--- + Detect oom_adj values written to + /sys/module/lowmemorykiller/parameters/adj and convert them + to oom_score_adj values. + +config SYNC + bool "Synchronization framework" + default n + ---help--- + This option enables the framework for synchronization between multiple + drivers. Sync implementations can take advantage of hardware + synchronization built into devices like GPUs. + +config ANDROID_FWDATA + tristate "Parser for Android-specific firmware data" + depends on ACPI + default n + ---help--- + This driver parses Android-specific data (e.g. fstab configuration) + stored in firmware (e.g. ACPI tables), and present it to user space + via sysfs. Android Oreo (8.0) and later requires some essential boot- + time configuration to be available in a directory structure organized + in Device Tree style, e.g. /proc/device-tree/firmware/android/ on + platforms that enable DT. Platforms that use ACPI instead of DT, such + as Goldfish (Ranchu) x86/x86_64, should enable this driver to ensure + the required information can be found in sysfs with the expected + layout. + source "drivers/staging/android/ion/Kconfig" +source "drivers/staging/android/abl/Kconfig" +source "drivers/staging/android/sbl/Kconfig" +source "drivers/staging/android/vsbl/Kconfig" + +source "drivers/staging/android/fiq_debugger/Kconfig" endif # if ANDROID diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 7cf1564a49a5..41a5e152c3d0 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,5 +1,11 @@ ccflags-y += -I$(src) # needed for trace events obj-y += ion/ +obj-$(CONFIG_ABL_BOOTLOADER_CONTROL) += abl/ +obj-$(CONFIG_SBL_BOOTLOADER_CONTROL) += sbl/ +obj-$(CONFIG_VSBL_BOOTLOADER_CONTROL) += vsbl/ +obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger/ obj-$(CONFIG_ASHMEM) += ashmem.o +obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o +obj-$(CONFIG_ANDROID_FWDATA) += fwdata.o diff --git a/drivers/staging/android/abl/Kconfig b/drivers/staging/android/abl/Kconfig new file mode 100644 index 000000000000..a89ac5b5efb1 --- /dev/null +++ b/drivers/staging/android/abl/Kconfig @@ -0,0 +1,8 @@ +config ABL_BOOTLOADER_CONTROL + tristate "ABL Bootloader Control module" + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. diff --git a/drivers/staging/android/abl/Makefile b/drivers/staging/android/abl/Makefile new file mode 100644 index 000000000000..b70a05a2af6d --- /dev/null +++ b/drivers/staging/android/abl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ABL_BOOTLOADER_CONTROL) += ablbc.o diff --git a/drivers/staging/android/abl/ablbc.c b/drivers/staging/android/abl/ablbc.c new file mode 100644 index 000000000000..7b15fa39a43d --- /dev/null +++ b/drivers/staging/android/abl/ablbc.c @@ -0,0 +1,411 @@ +/* + * ablbc: control ABL bootloaders + * Copyright (c) 2013-2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "ablbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* ABL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) +#define USERCMD_UPDATE_IFWI(len) _USERCMD_(2, len) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +static bool capsule_request; + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_capsule_cmd { + char action; + char device; + char partition; + char file_name[1]; +} __packed; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static struct kobject *capsule_kobject; + +static ssize_t is_capsule_requested(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", capsule_request); +} + +enum capsule_device_type { + EMMC = 2, + SDCARD = 4 +}; + +static ssize_t capsule_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nvram_msg msg; + struct nvram_capsule_cmd *capsule_cmd; + char name[32], partition; + enum capsule_device_type device; + int ret, padding; + unsigned char size; + union _cdata_header cdh; + + device = (buf[0] == 'm' ? EMMC : SDCARD); + partition = buf[1] - '0'; + ret = sscanf(buf+3, "%s", name); + pr_info(MODULE_NAME " capsule parameters (%d): DEVICE=%d PARTITION=%d NAME=%s\n", + ret, device, partition, name); + + cdh.data = 0; + cdh.tag = CDATA_TAG_USER_CMD; + + /* padding of filename on next dword */ + padding = (4 - (3 + strlen(name))%4)%4; + size = 2 + sizeof(cdh) + 3 + strlen(name) + padding + 4; + cdh.length = 1 + (3 + strlen(name) + padding) / 4; + + msg.magic = NVRAM_VALID_FLAG; + msg.size = size; + msg.cdata_header.data = cdh.data; + + capsule_cmd = kmalloc(size, GFP_KERNEL); + if (!capsule_cmd) + return -ENOMEM; + + capsule_cmd->action = USERCMD_UPDATE_IFWI(strlen(name) + 2); + capsule_cmd->device = device; + capsule_cmd->partition = partition; + strncpy(capsule_cmd->file_name, name, strlen(name)); + msg.cdata_payload = (char *)capsule_cmd; + msg.cdata_payload_size = 3 + strlen(name) + padding; + msg.crc = crc32c_msg(&msg); + write_msg_to_nvram(&msg); + capsule_request = true; + + kfree(capsule_cmd); + + return count; +} + +static struct kobj_attribute capsule_name_attribute = + __ATTR(capsule_name, 0600, NULL, capsule_store); + +static struct kobj_attribute capsule_requested_attribute = + __ATTR(capsule_requested, 0400, is_capsule_requested, NULL); + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static const unsigned int DEFAULT_TARGET_INDEX; + +static const char * const cold_reset_capsule[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#05025555555555", + NULL}; +static const char * const suppress_heartbeat[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#01035555555555", + NULL}; +static const char * const reboot_request[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#03015555555555", + NULL}; + +static int execute_slcan_command(const char *cmd[]) +{ + struct subprocess_info *sub_info; + int ret = -1; + + sub_info = call_usermodehelper_setup((char *)cmd[0], + (char **)cmd,(char **) NULL, GFP_KERNEL, + (void *)NULL, (void*)NULL, (void*)NULL); + + if (sub_info) { + ret = call_usermodehelper_exec(sub_info, + UMH_WAIT_PROC); + pr_info("Exec cmd=%s ret=%d\n", cmd[0], ret); + } + + if (ret) + pr_err("Failure on cmd=%s ret=%d\n", cmd[0], ret); + + return ret; +} + +static int ablbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + + ret = execute_slcan_command((const char **)suppress_heartbeat); + if (ret) + goto done; + + ret = execute_slcan_command((const char **)reboot_request); + if (ret) + goto done; + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + if (capsule_request) + ret = execute_slcan_command((const char **)cold_reset_capsule); + +done: + return NOTIFY_DONE; +} + +static struct notifier_block ablbc_reboot_notifier = { + .notifier_call = ablbc_reboot_notifier_call, +}; + +static int __init ablbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&ablbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + capsule_kobject = kobject_create_and_add("capsule", kernel_kobj); + if (!capsule_kobject) + return -ENOMEM; + + ret = sysfs_create_file(capsule_kobject, + &capsule_name_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_name\n"); + goto err; + } + + ret = sysfs_create_file(capsule_kobject, + &capsule_requested_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_requested\n"); + goto err; + } + + return 0; + +err: + kobject_put(capsule_kobject); + return ret; +} + +module_init(ablbc_init); + +static void __exit ablbc_exit(void) +{ + unregister_reboot_notifier(&ablbc_reboot_notifier); + kobject_put(capsule_kobject); +} +module_exit(ablbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Automotive Bootloader boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 0f695df14c9d..69df278e9aa4 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -334,24 +334,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) mutex_lock(&ashmem_mutex); if (asma->size == 0) { - ret = -EINVAL; - goto out; + mutex_unlock(&ashmem_mutex); + return -EINVAL; } if (!asma->file) { - ret = -EBADF; - goto out; + mutex_unlock(&ashmem_mutex); + return -EBADF; } + mutex_unlock(&ashmem_mutex); + ret = vfs_llseek(asma->file, offset, origin); if (ret < 0) - goto out; + return ret; /** Copy f_pos from backing file, since f_ops->llseek() sets it */ file->f_pos = asma->file->f_pos; - -out: - mutex_unlock(&ashmem_mutex); return ret; } @@ -401,22 +400,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) } get_file(asma->file); - /* - * XXX - Reworked to use shmem_zero_setup() instead of - * shmem_set_file while we're in staging. -jstultz - */ - if (vma->vm_flags & VM_SHARED) { - ret = shmem_zero_setup(vma); - if (ret) { - fput(asma->file); - goto out; - } + if (vma->vm_flags & VM_SHARED) + shmem_set_file(vma, asma->file); + else { + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = asma->file; } - if (vma->vm_file) - fput(vma->vm_file); - vma->vm_file = asma->file; - out: mutex_unlock(&ashmem_mutex); return ret; @@ -453,9 +444,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) loff_t start = range->pgstart * PAGE_SIZE; loff_t end = (range->pgend + 1) * PAGE_SIZE; - vfs_fallocate(range->asma->file, - FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start, end - start); + range->asma->file->f_op->fallocate(range->asma->file, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start, end - start); range->purged = ASHMEM_WAS_PURGED; lru_del(range); @@ -710,30 +701,30 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, size_t pgstart, pgend; int ret = -EINVAL; - if (unlikely(!asma->file)) - return -EINVAL; - if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) return -EFAULT; + mutex_lock(&ashmem_mutex); + + if (unlikely(!asma->file)) + goto out_unlock; + /* per custom, you can pass zero for len to mean "everything onward" */ if (!pin.len) pin.len = PAGE_ALIGN(asma->size) - pin.offset; if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) - return -EINVAL; + goto out_unlock; if (unlikely(((__u32)-1) - pin.offset < pin.len)) - return -EINVAL; + goto out_unlock; if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) - return -EINVAL; + goto out_unlock; pgstart = pin.offset / PAGE_SIZE; pgend = pgstart + (pin.len / PAGE_SIZE) - 1; - mutex_lock(&ashmem_mutex); - switch (cmd) { case ASHMEM_PIN: ret = ashmem_pin(asma, pgstart, pgend); @@ -746,6 +737,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, break; } +out_unlock: mutex_unlock(&ashmem_mutex); return ret; @@ -765,10 +757,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case ASHMEM_SET_SIZE: ret = -EINVAL; + mutex_lock(&ashmem_mutex); if (!asma->file) { ret = 0; asma->size = (size_t)arg; } + mutex_unlock(&ashmem_mutex); break; case ASHMEM_GET_SIZE: ret = asma->size; diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig new file mode 100644 index 000000000000..60fc224d4efc --- /dev/null +++ b/drivers/staging/android/fiq_debugger/Kconfig @@ -0,0 +1,58 @@ +config FIQ_DEBUGGER + bool "FIQ Mode Serial Debugger" + default n + depends on ARM || ARM64 + help + The FIQ serial debugger can accept commands even when the + kernel is unresponsive due to being stuck with interrupts + disabled. + +config FIQ_DEBUGGER_NO_SLEEP + bool "Keep serial debugger active" + depends on FIQ_DEBUGGER + default n + help + Enables the serial debugger at boot. Passing + fiq_debugger.no_sleep on the kernel commandline will + override this config option. + +config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON + bool "Don't disable wakeup IRQ when debugger is active" + depends on FIQ_DEBUGGER + default n + help + Don't disable the wakeup irq when enabling the uart clock. This will + cause extra interrupts, but it makes the serial debugger usable with + on some MSM radio builds that ignore the uart clock request in power + collapse. + +config FIQ_DEBUGGER_CONSOLE + bool "Console on FIQ Serial Debugger port" + depends on FIQ_DEBUGGER + default n + help + Enables a console so that printk messages are displayed on + the debugger serial port as the occur. + +config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE + bool "Put the FIQ debugger into console mode by default" + depends on FIQ_DEBUGGER_CONSOLE + default n + help + If enabled, this puts the fiq debugger into console mode by default. + Otherwise, the fiq debugger will start out in debug mode. + +config FIQ_DEBUGGER_UART_OVERLAY + bool "Install uart DT overlay" + depends on FIQ_DEBUGGER + select OF_OVERLAY + default n + help + If enabled, fiq debugger is calling fiq_debugger_uart_overlay() + that will apply overlay uart_overlay@0 to disable proper uart. + +config FIQ_WATCHDOG + bool + select FIQ_DEBUGGER + select PSTORE_RAM + default n diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile new file mode 100644 index 000000000000..a7ca4871cad3 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/Makefile @@ -0,0 +1,4 @@ +obj-y += fiq_debugger.o +obj-$(CONFIG_ARM) += fiq_debugger_arm.o +obj-$(CONFIG_ARM64) += fiq_debugger_arm64.o +obj-$(CONFIG_FIQ_WATCHDOG) += fiq_watchdog.o diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c new file mode 100644 index 000000000000..f6a806219f84 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c @@ -0,0 +1,1246 @@ +/* + * drivers/staging/android/fiq_debugger.c + * + * Serial Debugger Interface accessed through an FIQ interrupt. + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_FIQ_GLUE +#include +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_UART_OVERLAY +#include +#endif + +#include + +#include "fiq_debugger.h" +#include "fiq_debugger_priv.h" +#include "fiq_debugger_ringbuf.h" + +#define DEBUG_MAX 64 +#define MAX_UNHANDLED_FIQ_COUNT 1000000 + +#define MAX_FIQ_DEBUGGER_PORTS 4 + +struct fiq_debugger_state { +#ifdef CONFIG_FIQ_GLUE + struct fiq_glue_handler handler; +#endif + struct fiq_debugger_output output; + + int fiq; + int uart_irq; + int signal_irq; + int wakeup_irq; + bool wakeup_irq_no_set_wake; + struct clk *clk; + struct fiq_debugger_pdata *pdata; + struct platform_device *pdev; + + char debug_cmd[DEBUG_MAX]; + int debug_busy; + int debug_abort; + + char debug_buf[DEBUG_MAX]; + int debug_count; + + bool no_sleep; + bool debug_enable; + bool ignore_next_wakeup_irq; + struct timer_list sleep_timer; + spinlock_t sleep_timer_lock; + bool uart_enabled; + struct wakeup_source debugger_wake_src; + bool console_enable; + int current_cpu; + atomic_t unhandled_fiq_count; + bool in_fiq; + + struct work_struct work; + spinlock_t work_lock; + char work_cmd[DEBUG_MAX]; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + spinlock_t console_lock; + struct console console; + struct tty_port tty_port; + struct fiq_debugger_ringbuf *tty_rbuf; + bool syslog_dumping; +#endif + + unsigned int last_irqs[NR_IRQS]; + unsigned int last_local_timer_irqs[NR_CPUS]; +}; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE +struct tty_driver *fiq_tty_driver; +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP +static bool initial_no_sleep = true; +#else +static bool initial_no_sleep; +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE +static bool initial_debug_enable = true; +static bool initial_console_enable = true; +#else +static bool initial_debug_enable; +static bool initial_console_enable; +#endif + +static bool fiq_kgdb_enable; +static bool fiq_debugger_disable; + +module_param_named(no_sleep, initial_no_sleep, bool, 0644); +module_param_named(debug_enable, initial_debug_enable, bool, 0644); +module_param_named(console_enable, initial_console_enable, bool, 0644); +module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644); +module_param_named(disable, fiq_debugger_disable, bool, 0644); + +#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON +static inline +void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {} +static inline +void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {} +#else +static inline +void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + enable_irq(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + enable_irq_wake(state->wakeup_irq); +} +static inline +void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + disable_irq_nosync(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + disable_irq_wake(state->wakeup_irq); +} +#endif + +static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state) +{ + return (state->fiq >= 0); +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_force_irq(struct fiq_debugger_state *state) +{ + unsigned int irq = state->signal_irq; + + if (WARN_ON(!fiq_debugger_have_fiq(state))) + return; + if (state->pdata->force_irq) { + state->pdata->force_irq(state->pdev, irq); + } else { + struct irq_chip *chip = irq_get_chip(irq); + if (chip && chip->irq_retrigger) + chip->irq_retrigger(irq_get_irq_data(irq)); + } +} +#endif + +static void fiq_debugger_uart_enable(struct fiq_debugger_state *state) +{ + if (state->clk) + clk_enable(state->clk); + if (state->pdata->uart_enable) + state->pdata->uart_enable(state->pdev); +} + +static void fiq_debugger_uart_disable(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_disable) + state->pdata->uart_disable(state->pdev); + if (state->clk) + clk_disable(state->clk); +} + +static void fiq_debugger_uart_flush(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_flush) + state->pdata->uart_flush(state->pdev); +} + +static void fiq_debugger_putc(struct fiq_debugger_state *state, char c) +{ + state->pdata->uart_putc(state->pdev, c); +} + +static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s) +{ + unsigned c; + while ((c = *s++)) { + if (c == '\n') + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, c); + } +} + +static void fiq_debugger_prompt(struct fiq_debugger_state *state) +{ + fiq_debugger_puts(state, "debug> "); +} + +static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state) +{ + char buf[512]; + size_t len; + struct kmsg_dumper dumper = { .active = true }; + + + kmsg_dump_rewind_nolock(&dumper); + while (kmsg_dump_get_line_nolock(&dumper, true, buf, + sizeof(buf) - 1, &len)) { + buf[len] = 0; + fiq_debugger_puts(state, buf); + } +} + +static void fiq_debugger_printf(struct fiq_debugger_output *output, + const char *fmt, ...) +{ + struct fiq_debugger_state *state; + char buf[256]; + va_list ap; + + state = container_of(output, struct fiq_debugger_state, output); + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + fiq_debugger_puts(state, buf); +} + +/* Safe outside fiq context */ +static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...) +{ + struct fiq_debugger_state *state = cookie; + char buf[256]; + va_list ap; + unsigned long irq_flags; + + va_start(ap, fmt); + vsnprintf(buf, 128, fmt, ap); + va_end(ap); + + local_irq_save(irq_flags); + fiq_debugger_puts(state, buf); + fiq_debugger_uart_flush(state); + local_irq_restore(irq_flags); + return state->debug_abort; +} + +static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state) +{ + int n; + struct irq_desc *desc; + + fiq_debugger_printf(&state->output, + "irqnr total since-last status name\n"); + for_each_irq_desc(n, desc) { + struct irqaction *act = desc->action; + if (!act && !kstat_irqs(n)) + continue; + fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x %s\n", n, + kstat_irqs(n), + kstat_irqs(n) - state->last_irqs[n], + desc->status_use_accessors, + (act && act->name) ? act->name : "???"); + state->last_irqs[n] = kstat_irqs(n); + } +} + +static void fiq_debugger_do_ps(struct fiq_debugger_state *state) +{ + struct task_struct *g; + struct task_struct *p; + unsigned task_state; + static const char stat_nam[] = "RSDTtZX"; + + fiq_debugger_printf(&state->output, "pid ppid prio task pc\n"); + read_lock(&tasklist_lock); + do_each_thread(g, p) { + task_state = p->state ? __ffs(p->state) + 1 : 0; + fiq_debugger_printf(&state->output, + "%5d %5d %4d ", p->pid, p->parent->pid, p->prio); + fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm, + task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]); + if (task_state == TASK_RUNNING) + fiq_debugger_printf(&state->output, " running\n"); + else + fiq_debugger_printf(&state->output, " %08lx\n", + thread_saved_pc(p)); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +} + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE +static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state) +{ + state->syslog_dumping = true; +} + +static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state) +{ + state->syslog_dumping = false; +} +#else +extern int do_syslog(int type, char __user *bug, int count); +static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state) +{ + do_syslog(5 /* clear */, NULL, 0); +} + +static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state) +{ + fiq_debugger_dump_kernel_log(state); +} +#endif + +static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq) +{ + if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) { + fiq_debugger_printf(&state->output, "sysrq-g blocked\n"); + return; + } + fiq_debugger_begin_syslog_dump(state); + handle_sysrq(rq); + fiq_debugger_end_syslog_dump(state); +} + +#ifdef CONFIG_KGDB +static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state) +{ + if (!fiq_kgdb_enable) { + fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n"); + return; + } + + fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n"); + state->console_enable = true; + handle_sysrq('g'); +} +#endif + +static void fiq_debugger_schedule_work(struct fiq_debugger_state *state, + char *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&state->work_lock, flags); + if (state->work_cmd[0] != '\0') { + fiq_debugger_printf(&state->output, "work command processor busy\n"); + spin_unlock_irqrestore(&state->work_lock, flags); + return; + } + + strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd)); + spin_unlock_irqrestore(&state->work_lock, flags); + + schedule_work(&state->work); +} + +static void fiq_debugger_work(struct work_struct *work) +{ + struct fiq_debugger_state *state; + char work_cmd[DEBUG_MAX]; + char *cmd; + unsigned long flags; + + state = container_of(work, struct fiq_debugger_state, work); + + spin_lock_irqsave(&state->work_lock, flags); + + strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd)); + state->work_cmd[0] = '\0'; + + spin_unlock_irqrestore(&state->work_lock, flags); + + cmd = work_cmd; + if (!strncmp(cmd, "reboot", 6)) { + cmd += 6; + while (*cmd == ' ') + cmd++; + if (*cmd != '\0') + kernel_restart(cmd); + else + kernel_restart(NULL); + } else { + fiq_debugger_printf(&state->output, "unknown work command '%s'\n", + work_cmd); + } +} + +/* This function CANNOT be called in FIQ context */ +static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd) +{ + if (!strcmp(cmd, "ps")) + fiq_debugger_do_ps(state); + if (!strcmp(cmd, "sysrq")) + fiq_debugger_do_sysrq(state, 'h'); + if (!strncmp(cmd, "sysrq ", 6)) + fiq_debugger_do_sysrq(state, cmd[6]); +#ifdef CONFIG_KGDB + if (!strcmp(cmd, "kgdb")) + fiq_debugger_do_kgdb(state); +#endif + if (!strncmp(cmd, "reboot", 6)) + fiq_debugger_schedule_work(state, cmd); +} + +static void fiq_debugger_help(struct fiq_debugger_state *state) +{ + fiq_debugger_printf(&state->output, + "FIQ Debugger commands:\n" + " pc PC status\n" + " regs Register dump\n" + " allregs Extended Register dump\n" + " bt Stack trace\n" + " reboot [] Reboot with command \n" + " reset [] Hard reset with command \n" + " irqs Interupt status\n" + " kmsg Kernel log\n" + " version Kernel version\n"); + fiq_debugger_printf(&state->output, + " sleep Allow sleep while in FIQ\n" + " nosleep Disable sleep while in FIQ\n" + " console Switch terminal to console\n" + " cpu Current CPU\n" + " cpu Switch to CPU\n"); + fiq_debugger_printf(&state->output, + " ps Process list\n" + " sysrq sysrq options\n" + " sysrq Execute sysrq with \n"); +#ifdef CONFIG_KGDB + fiq_debugger_printf(&state->output, + " kgdb Enter kernel debugger\n"); +#endif +} + +static void fiq_debugger_take_affinity(void *info) +{ + struct fiq_debugger_state *state = info; + struct cpumask cpumask; + + cpumask_clear(&cpumask); + cpumask_set_cpu(get_cpu(), &cpumask); + + irq_set_affinity(state->uart_irq, &cpumask); +} + +static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu) +{ + if (!fiq_debugger_have_fiq(state)) + smp_call_function_single(cpu, fiq_debugger_take_affinity, state, + false); + state->current_cpu = cpu; +} + +static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state, + const char *cmd, const struct pt_regs *regs, + void *svc_sp) +{ + bool signal_helper = false; + + if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) { + fiq_debugger_help(state); + } else if (!strcmp(cmd, "pc")) { + fiq_debugger_dump_pc(&state->output, regs); + } else if (!strcmp(cmd, "regs")) { + fiq_debugger_dump_regs(&state->output, regs); + } else if (!strcmp(cmd, "allregs")) { + fiq_debugger_dump_allregs(&state->output, regs); + } else if (!strcmp(cmd, "bt")) { + fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp); + } else if (!strncmp(cmd, "reset", 5)) { + cmd += 5; + while (*cmd == ' ') + cmd++; + if (*cmd) { + char tmp_cmd[32]; + strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd)); + machine_restart(tmp_cmd); + } else { + machine_restart(NULL); + } + } else if (!strcmp(cmd, "irqs")) { + fiq_debugger_dump_irqs(state); + } else if (!strcmp(cmd, "kmsg")) { + fiq_debugger_dump_kernel_log(state); + } else if (!strcmp(cmd, "version")) { + fiq_debugger_printf(&state->output, "%s\n", linux_banner); + } else if (!strcmp(cmd, "sleep")) { + state->no_sleep = false; + fiq_debugger_printf(&state->output, "enabling sleep\n"); + } else if (!strcmp(cmd, "nosleep")) { + state->no_sleep = true; + fiq_debugger_printf(&state->output, "disabling sleep\n"); + } else if (!strcmp(cmd, "console")) { + fiq_debugger_printf(&state->output, "console mode\n"); + fiq_debugger_uart_flush(state); + state->console_enable = true; + } else if (!strcmp(cmd, "cpu")) { + fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu); + } else if (!strncmp(cmd, "cpu ", 4)) { + unsigned long cpu = 0; + if (kstrtoul(cmd + 4, 10, &cpu) == 0) + fiq_debugger_switch_cpu(state, cpu); + else + fiq_debugger_printf(&state->output, "invalid cpu\n"); + fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu); + } else { + if (state->debug_busy) { + fiq_debugger_printf(&state->output, + "command processor busy. trying to abort.\n"); + state->debug_abort = -1; + } else { + strcpy(state->debug_cmd, cmd); + state->debug_busy = 1; + } + + return true; + } + if (!state->console_enable) + fiq_debugger_prompt(state); + + return signal_helper; +} + +static void fiq_debugger_sleep_timer_expired(unsigned long data) +{ + struct fiq_debugger_state *state = (struct fiq_debugger_state *)data; + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + if (state->uart_enabled && !state->no_sleep) { + if (state->debug_enable && !state->console_enable) { + state->debug_enable = false; + fiq_debugger_printf_nfiq(state, + "suspending fiq debugger\n"); + } + state->ignore_next_wakeup_irq = true; + fiq_debugger_uart_disable(state); + state->uart_enabled = false; + fiq_debugger_enable_wakeup_irq(state); + } + __pm_relax(&state->debugger_wake_src); + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); +} + +static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state) +{ + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) { + state->ignore_next_wakeup_irq = false; + } else if (!state->uart_enabled) { + __pm_stay_awake(&state->debugger_wake_src); + fiq_debugger_uart_enable(state); + state->uart_enabled = true; + fiq_debugger_disable_wakeup_irq(state); + mod_timer(&state->sleep_timer, jiffies + HZ / 2); + } + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); +} + +static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (!state->no_sleep) + fiq_debugger_puts(state, "WAKEUP\n"); + fiq_debugger_handle_wakeup(state); + + return IRQ_HANDLED; +} + +static +void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state) +{ +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + if (state->tty_port.ops) { + int i; + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + for (i = 0; i < count; i++) { + int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); + tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL); + if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1)) + pr_warn("fiq tty failed to consume byte\n"); + } + tty_flip_buffer_push(&state->tty_port); + } +#endif +} + +static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state) +{ + if (!state->no_sleep) { + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + __pm_stay_awake(&state->debugger_wake_src); + mod_timer(&state->sleep_timer, jiffies + HZ * 5); + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); + } + fiq_debugger_handle_console_irq_context(state); + if (state->debug_busy) { + fiq_debugger_irq_exec(state, state->debug_cmd); + if (!state->console_enable) + fiq_debugger_prompt(state); + state->debug_busy = 0; + } +} + +static int fiq_debugger_getc(struct fiq_debugger_state *state) +{ + return state->pdata->uart_getc(state->pdev); +} + +static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state, + int this_cpu, const struct pt_regs *regs, void *svc_sp) +{ + int c; + static int last_c; + int count = 0; + bool signal_helper = false; + + if (this_cpu != state->current_cpu) { + if (state->in_fiq) + return false; + + if (atomic_inc_return(&state->unhandled_fiq_count) != + MAX_UNHANDLED_FIQ_COUNT) + return false; + + fiq_debugger_printf(&state->output, + "fiq_debugger: cpu %d not responding, " + "reverting to cpu %d\n", state->current_cpu, + this_cpu); + + atomic_set(&state->unhandled_fiq_count, 0); + fiq_debugger_switch_cpu(state, this_cpu); + return false; + } + + state->in_fiq = true; + + while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) { + count++; + if (!state->debug_enable) { + if ((c == 13) || (c == 10)) { + state->debug_enable = true; + state->debug_count = 0; + fiq_debugger_prompt(state); + } + } else if (c == FIQ_DEBUGGER_BREAK) { + state->console_enable = false; + fiq_debugger_puts(state, "fiq debugger mode\n"); + state->debug_count = 0; + fiq_debugger_prompt(state); +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + } else if (state->console_enable && state->tty_rbuf) { + fiq_debugger_ringbuf_push(state->tty_rbuf, c); + signal_helper = true; +#endif + } else if ((c >= ' ') && (c < 127)) { + if (state->debug_count < (DEBUG_MAX - 1)) { + state->debug_buf[state->debug_count++] = c; + fiq_debugger_putc(state, c); + } + } else if ((c == 8) || (c == 127)) { + if (state->debug_count > 0) { + state->debug_count--; + fiq_debugger_putc(state, 8); + fiq_debugger_putc(state, ' '); + fiq_debugger_putc(state, 8); + } + } else if ((c == 13) || (c == 10)) { + if (c == '\r' || (c == '\n' && last_c != '\r')) { + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, '\n'); + } + if (state->debug_count) { + state->debug_buf[state->debug_count] = 0; + state->debug_count = 0; + signal_helper |= + fiq_debugger_fiq_exec(state, + state->debug_buf, + regs, svc_sp); + } else { + fiq_debugger_prompt(state); + } + } + last_c = c; + } + if (!state->console_enable) + fiq_debugger_uart_flush(state); + if (state->pdata->fiq_ack) + state->pdata->fiq_ack(state->pdev, state->fiq); + + /* poke sleep timer if necessary */ + if (state->debug_enable && !state->no_sleep) + signal_helper = true; + + atomic_set(&state->unhandled_fiq_count, 0); + state->in_fiq = false; + + return signal_helper; +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_fiq(struct fiq_glue_handler *h, + const struct pt_regs *regs, void *svc_sp) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu; + bool need_irq; + + need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs, + svc_sp); + if (need_irq) + fiq_debugger_force_irq(state); +} +#endif + +/* + * When not using FIQs, we only use this single interrupt as an entry point. + * This just effectively takes over the UART interrupt and does all the work + * in this context. + */ +static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + bool not_done; + + fiq_debugger_handle_wakeup(state); + + /* handle the debugger irq in regular context */ + not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(), + get_irq_regs(), + current_thread_info()); + if (not_done) + fiq_debugger_handle_irq_context(state); + + return IRQ_HANDLED; +} + +/* + * If FIQs are used, not everything can happen in fiq context. + * FIQ handler does what it can and then signals this interrupt to finish the + * job in irq context. + */ +static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (state->pdata->force_irq_ack) + state->pdata->force_irq_ack(state->pdev, state->signal_irq); + + fiq_debugger_handle_irq_context(state); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_resume(struct fiq_glue_handler *h) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + if (state->pdata->uart_resume) + state->pdata->uart_resume(state->pdev); +} +#endif + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) +struct tty_driver *fiq_debugger_console_device(struct console *co, int *index) +{ + *index = co->index; + return fiq_tty_driver; +} + +static void fiq_debugger_console_write(struct console *co, + const char *s, unsigned int count) +{ + struct fiq_debugger_state *state; + unsigned long flags; + + state = container_of(co, struct fiq_debugger_state, console); + + if (!state->console_enable && !state->syslog_dumping) + return; + + fiq_debugger_uart_enable(state); + spin_lock_irqsave(&state->console_lock, flags); + while (count--) { + if (*s == '\n') + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, *s++); + } + fiq_debugger_uart_flush(state); + spin_unlock_irqrestore(&state->console_lock, flags); + fiq_debugger_uart_disable(state); +} + +static struct console fiq_debugger_console = { + .name = "ttyFIQ", + .device = fiq_debugger_console_device, + .write = fiq_debugger_console_write, + .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, +}; + +int fiq_tty_open(struct tty_struct *tty, struct file *filp) +{ + int line = tty->index; + struct fiq_debugger_state **states = tty->driver->driver_state; + struct fiq_debugger_state *state = states[line]; + + return tty_port_open(&state->tty_port, tty, filp); +} + +void fiq_tty_close(struct tty_struct *tty, struct file *filp) +{ + tty_port_close(tty->port, tty, filp); +} + +int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + int i; + int line = tty->index; + struct fiq_debugger_state **states = tty->driver->driver_state; + struct fiq_debugger_state *state = states[line]; + + if (!state->console_enable) + return count; + + fiq_debugger_uart_enable(state); + spin_lock_irq(&state->console_lock); + for (i = 0; i < count; i++) + fiq_debugger_putc(state, *buf++); + spin_unlock_irq(&state->console_lock); + fiq_debugger_uart_disable(state); + + return count; +} + +int fiq_tty_write_room(struct tty_struct *tty) +{ + return 16; +} + +#ifdef CONFIG_CONSOLE_POLL +static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options) +{ + return 0; +} + +static int fiq_tty_poll_get_char(struct tty_driver *driver, int line) +{ + struct fiq_debugger_state **states = driver->driver_state; + struct fiq_debugger_state *state = states[line]; + int c = NO_POLL_CHAR; + + fiq_debugger_uart_enable(state); + if (fiq_debugger_have_fiq(state)) { + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + if (count > 0) { + c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); + fiq_debugger_ringbuf_consume(state->tty_rbuf, 1); + } + } else { + c = fiq_debugger_getc(state); + if (c == FIQ_DEBUGGER_NO_CHAR) + c = NO_POLL_CHAR; + } + fiq_debugger_uart_disable(state); + + return c; +} + +static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch) +{ + struct fiq_debugger_state **states = driver->driver_state; + struct fiq_debugger_state *state = states[line]; + fiq_debugger_uart_enable(state); + fiq_debugger_putc(state, ch); + fiq_debugger_uart_disable(state); +} +#endif + +static const struct tty_port_operations fiq_tty_port_ops; + +static const struct tty_operations fiq_tty_driver_ops = { + .write = fiq_tty_write, + .write_room = fiq_tty_write_room, + .open = fiq_tty_open, + .close = fiq_tty_close, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = fiq_tty_poll_init, + .poll_get_char = fiq_tty_poll_get_char, + .poll_put_char = fiq_tty_poll_put_char, +#endif +}; + +static int fiq_debugger_tty_init(void) +{ + int ret; + struct fiq_debugger_state **states = NULL; + + states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL); + if (!states) { + pr_err("Failed to allocate fiq debugger state structres\n"); + return -ENOMEM; + } + + fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS); + if (!fiq_tty_driver) { + pr_err("Failed to allocate fiq debugger tty\n"); + ret = -ENOMEM; + goto err_free_state; + } + + fiq_tty_driver->owner = THIS_MODULE; + fiq_tty_driver->driver_name = "fiq-debugger"; + fiq_tty_driver->name = "ttyFIQ"; + fiq_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + fiq_tty_driver->subtype = SERIAL_TYPE_NORMAL; + fiq_tty_driver->init_termios = tty_std_termios; + fiq_tty_driver->flags = TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV; + fiq_tty_driver->driver_state = states; + + fiq_tty_driver->init_termios.c_cflag = + B115200 | CS8 | CREAD | HUPCL | CLOCAL; + fiq_tty_driver->init_termios.c_ispeed = 115200; + fiq_tty_driver->init_termios.c_ospeed = 115200; + + tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops); + + ret = tty_register_driver(fiq_tty_driver); + if (ret) { + pr_err("Failed to register fiq tty: %d\n", ret); + goto err_free_tty; + } + + pr_info("Registered FIQ tty driver\n"); + return 0; + +err_free_tty: + put_tty_driver(fiq_tty_driver); + fiq_tty_driver = NULL; +err_free_state: + kfree(states); + return ret; +} + +static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state) +{ + int ret; + struct device *tty_dev; + struct fiq_debugger_state **states = fiq_tty_driver->driver_state; + + states[state->pdev->id] = state; + + state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024); + if (!state->tty_rbuf) { + pr_err("Failed to allocate fiq debugger ringbuf\n"); + ret = -ENOMEM; + goto err; + } + + tty_port_init(&state->tty_port); + state->tty_port.ops = &fiq_tty_port_ops; + + tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver, + state->pdev->id, &state->pdev->dev); + if (IS_ERR(tty_dev)) { + pr_err("Failed to register fiq debugger tty device\n"); + ret = PTR_ERR(tty_dev); + goto err; + } + + device_set_wakeup_capable(tty_dev, 1); + + pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id); + + return 0; + +err: + fiq_debugger_ringbuf_free(state->tty_rbuf); + state->tty_rbuf = NULL; + return ret; +} +#endif + +static int fiq_debugger_dev_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fiq_debugger_state *state = platform_get_drvdata(pdev); + + if (state->pdata->uart_dev_suspend) + return state->pdata->uart_dev_suspend(pdev); + return 0; +} + +static int fiq_debugger_dev_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fiq_debugger_state *state = platform_get_drvdata(pdev); + + if (state->pdata->uart_dev_resume) + return state->pdata->uart_dev_resume(pdev); + return 0; +} + +static int fiq_debugger_probe(struct platform_device *pdev) +{ + int ret; + struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev); + struct fiq_debugger_state *state; + int fiq; + int uart_irq; + + if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS) + return -EINVAL; + + if (!pdata->uart_getc || !pdata->uart_putc) + return -EINVAL; + if ((pdata->uart_enable && !pdata->uart_disable) || + (!pdata->uart_enable && pdata->uart_disable)) + return -EINVAL; + + fiq = platform_get_irq_byname(pdev, "fiq"); + uart_irq = platform_get_irq_byname(pdev, "uart_irq"); + + /* uart_irq mode and fiq mode are mutually exclusive, but one of them + * is required */ + if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0)) + return -EINVAL; + if (fiq >= 0 && !pdata->fiq_enable) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + state->output.printf = fiq_debugger_printf; + setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired, + (unsigned long)state); + state->pdata = pdata; + state->pdev = pdev; + state->no_sleep = initial_no_sleep; + state->debug_enable = initial_debug_enable; + state->console_enable = initial_console_enable; + + state->fiq = fiq; + state->uart_irq = uart_irq; + state->signal_irq = platform_get_irq_byname(pdev, "signal"); + state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup"); + + INIT_WORK(&state->work, fiq_debugger_work); + spin_lock_init(&state->work_lock); + + platform_set_drvdata(pdev, state); + + spin_lock_init(&state->sleep_timer_lock); + + if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state)) + state->no_sleep = true; + state->ignore_next_wakeup_irq = !state->no_sleep; + + wakeup_source_init(&state->debugger_wake_src, "serial-debug"); + + state->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(state->clk)) + state->clk = NULL; + + /* do not call pdata->uart_enable here since uart_init may still + * need to do some initialization before uart_enable can work. + * So, only try to manage the clock during init. + */ + if (state->clk) + clk_enable(state->clk); + + if (pdata->uart_init) { + ret = pdata->uart_init(pdev); + if (ret) + goto err_uart_init; + } + + fiq_debugger_printf_nfiq(state, + "\n", + state->no_sleep ? "" : "twice "); + +#ifdef CONFIG_FIQ_GLUE + if (fiq_debugger_have_fiq(state)) { + state->handler.fiq = fiq_debugger_fiq; + state->handler.resume = fiq_debugger_resume; + ret = fiq_glue_register_handler(&state->handler); + if (ret) { + pr_err("%s: could not install fiq handler\n", __func__); + goto err_register_irq; + } + + pdata->fiq_enable(pdev, state->fiq, 1); + } else +#endif + { + ret = request_irq(state->uart_irq, fiq_debugger_uart_irq, + IRQF_NO_SUSPEND, "debug", state); + if (ret) { + pr_err("%s: could not install irq handler\n", __func__); + goto err_register_irq; + } + + /* for irq-only mode, we want this irq to wake us up, if it + * can. + */ + enable_irq_wake(state->uart_irq); + } + + if (state->clk) + clk_disable(state->clk); + + if (state->signal_irq >= 0) { + ret = request_irq(state->signal_irq, fiq_debugger_signal_irq, + IRQF_TRIGGER_RISING, "debug-signal", state); + if (ret) + pr_err("serial_debugger: could not install signal_irq"); + } + + if (state->wakeup_irq >= 0) { + ret = request_irq(state->wakeup_irq, + fiq_debugger_wakeup_irq_handler, + IRQF_TRIGGER_FALLING, + "debug-wakeup", state); + if (ret) { + pr_err("serial_debugger: " + "could not install wakeup irq\n"); + state->wakeup_irq = -1; + } else { + ret = enable_irq_wake(state->wakeup_irq); + if (ret) { + pr_err("serial_debugger: " + "could not enable wakeup\n"); + state->wakeup_irq_no_set_wake = true; + } + } + } + if (state->no_sleep) + fiq_debugger_handle_wakeup(state); + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + spin_lock_init(&state->console_lock); + state->console = fiq_debugger_console; + state->console.index = pdev->id; + if (!console_set_on_cmdline) + add_preferred_console(state->console.name, + state->console.index, NULL); + register_console(&state->console); + fiq_debugger_tty_init_one(state); +#endif + return 0; + +err_register_irq: + if (pdata->uart_free) + pdata->uart_free(pdev); +err_uart_init: + if (state->clk) + clk_disable(state->clk); + if (state->clk) + clk_put(state->clk); + wakeup_source_trash(&state->debugger_wake_src); + platform_set_drvdata(pdev, NULL); + kfree(state); + return ret; +} + +static const struct dev_pm_ops fiq_debugger_dev_pm_ops = { + .suspend = fiq_debugger_dev_suspend, + .resume = fiq_debugger_dev_resume, +}; + +static struct platform_driver fiq_debugger_driver = { + .probe = fiq_debugger_probe, + .driver = { + .name = "fiq_debugger", + .pm = &fiq_debugger_dev_pm_ops, + }, +}; + +#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY) +int fiq_debugger_uart_overlay(void) +{ + struct device_node *onp = of_find_node_by_path("/uart_overlay@0"); + int ret; + + if (!onp) { + pr_err("serial_debugger: uart overlay not found\n"); + return -ENODEV; + } + + ret = of_overlay_create(onp); + if (ret < 0) { + pr_err("serial_debugger: fail to create overlay: %d\n", ret); + of_node_put(onp); + return ret; + } + + pr_info("serial_debugger: uart overlay applied\n"); + return 0; +} +#endif + +static int __init fiq_debugger_init(void) +{ + if (fiq_debugger_disable) { + pr_err("serial_debugger: disabled\n"); + return -ENODEV; + } +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + fiq_debugger_tty_init(); +#endif +#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY) + fiq_debugger_uart_overlay(); +#endif + return platform_driver_register(&fiq_debugger_driver); +} + +postcore_initcall(fiq_debugger_init); diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h new file mode 100644 index 000000000000..c9ec4f8db086 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger.h @@ -0,0 +1,64 @@ +/* + * drivers/staging/android/fiq_debugger/fiq_debugger.h + * + * Copyright (C) 2010 Google, Inc. + * Author: Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_ +#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_ + +#include + +#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR +#define FIQ_DEBUGGER_BREAK 0x00ff0100 + +#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq" +#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal" +#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup" + +/** + * struct fiq_debugger_pdata - fiq debugger platform data + * @uart_resume: used to restore uart state right before enabling + * the fiq. + * @uart_enable: Do the work necessary to communicate with the uart + * hw (enable clocks, etc.). This must be ref-counted. + * @uart_disable: Do the work necessary to disable the uart hw + * (disable clocks, etc.). This must be ref-counted. + * @uart_dev_suspend: called during PM suspend, generally not needed + * for real fiq mode debugger. + * @uart_dev_resume: called during PM resume, generally not needed + * for real fiq mode debugger. + */ +struct fiq_debugger_pdata { + int (*uart_init)(struct platform_device *pdev); + void (*uart_free)(struct platform_device *pdev); + int (*uart_resume)(struct platform_device *pdev); + int (*uart_getc)(struct platform_device *pdev); + void (*uart_putc)(struct platform_device *pdev, unsigned int c); + void (*uart_flush)(struct platform_device *pdev); + void (*uart_enable)(struct platform_device *pdev); + void (*uart_disable)(struct platform_device *pdev); + + int (*uart_dev_suspend)(struct platform_device *pdev); + int (*uart_dev_resume)(struct platform_device *pdev); + + void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq, + bool enable); + void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq); + + void (*force_irq)(struct platform_device *pdev, unsigned int irq); + void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq); +}; + +#endif diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c new file mode 100644 index 000000000000..8b3e0137be1a --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include + +#include "fiq_debugger_priv.h" + +static char *mode_name(unsigned cpsr) +{ + switch (cpsr & MODE_MASK) { + case USR_MODE: return "USR"; + case FIQ_MODE: return "FIQ"; + case IRQ_MODE: return "IRQ"; + case SVC_MODE: return "SVC"; + case ABT_MODE: return "ABT"; + case UND_MODE: return "UND"; + case SYSTEM_MODE: return "SYS"; + default: return "???"; + } +} + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " pc %08x cpsr %08x mode %s\n", + regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr)); +} + +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, + " r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); + output->printf(output, + " r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7); + output->printf(output, + " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", + regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp, + mode_name(regs->ARM_cpsr)); + output->printf(output, + " ip %08x sp %08x lr %08x pc %08x cpsr %08x\n", + regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc, + regs->ARM_cpsr); +} + +struct mode_regs { + unsigned long sp_svc; + unsigned long lr_svc; + unsigned long spsr_svc; + + unsigned long sp_abt; + unsigned long lr_abt; + unsigned long spsr_abt; + + unsigned long sp_und; + unsigned long lr_und; + unsigned long spsr_und; + + unsigned long sp_irq; + unsigned long lr_irq; + unsigned long spsr_irq; + + unsigned long r8_fiq; + unsigned long r9_fiq; + unsigned long r10_fiq; + unsigned long r11_fiq; + unsigned long r12_fiq; + unsigned long sp_fiq; + unsigned long lr_fiq; + unsigned long spsr_fiq; +}; + +static void __naked get_mode_regs(struct mode_regs *regs) +{ + asm volatile ( + "mrs r1, cpsr\n" + "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r8 - r14}\n" + "mrs r2, spsr\n" + "stmia r0!, {r2}\n" + "msr cpsr_c, r1\n" + "bx lr\n"); +} + + +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + struct mode_regs mode_regs; + unsigned long mode = regs->ARM_cpsr & MODE_MASK; + + fiq_debugger_dump_regs(output, regs); + get_mode_regs(&mode_regs); + + output->printf(output, + "%csvc: sp %08x lr %08x spsr %08x\n", + mode == SVC_MODE ? '*' : ' ', + mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc); + output->printf(output, + "%cabt: sp %08x lr %08x spsr %08x\n", + mode == ABT_MODE ? '*' : ' ', + mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt); + output->printf(output, + "%cund: sp %08x lr %08x spsr %08x\n", + mode == UND_MODE ? '*' : ' ', + mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und); + output->printf(output, + "%cirq: sp %08x lr %08x spsr %08x\n", + mode == IRQ_MODE ? '*' : ' ', + mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq); + output->printf(output, + "%cfiq: r8 %08x r9 %08x r10 %08x r11 %08x r12 %08x\n", + mode == FIQ_MODE ? '*' : ' ', + mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq, + mode_regs.r11_fiq, mode_regs.r12_fiq); + output->printf(output, + " fiq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq); +} + +struct stacktrace_state { + struct fiq_debugger_output *output; + unsigned int depth; +}; + +static int report_trace(struct stackframe *frame, void *d) +{ + struct stacktrace_state *sts = d; + + if (sts->depth) { + sts->output->printf(sts->output, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + frame->pc, frame->pc, frame->lr, frame->lr, + frame->sp, frame->fp); + sts->depth--; + return 0; + } + sts->output->printf(sts->output, " ...\n"); + + return sts->depth == 0; +} + +struct frame_tail { + struct frame_tail *fp; + unsigned long sp; + unsigned long lr; +} __attribute__((packed)); + +static struct frame_tail *user_backtrace(struct fiq_debugger_output *output, + struct frame_tail *tail) +{ + struct frame_tail buftail[2]; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) { + output->printf(output, " invalid frame pointer %p\n", + tail); + return NULL; + } + if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) { + output->printf(output, + " failed to copy frame pointer %p\n", tail); + return NULL; + } + + output->printf(output, " %p\n", buftail[0].lr); + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ + if (tail >= buftail[0].fp) + return NULL; + + return buftail[0].fp-1; +} + +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp) +{ + struct frame_tail *tail; + struct thread_info *real_thread_info = THREAD_INFO(ssp); + struct stacktrace_state sts; + + sts.depth = depth; + sts.output = output; + *current_thread_info() = *real_thread_info; + + if (!current) + output->printf(output, "current NULL\n"); + else + output->printf(output, "pid: %d comm: %s\n", + current->pid, current->comm); + fiq_debugger_dump_regs(output, regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->ARM_fp; + frame.sp = regs->ARM_sp; + frame.lr = regs->ARM_lr; + frame.pc = regs->ARM_pc; + output->printf(output, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr, + regs->ARM_sp, regs->ARM_fp); + walk_stackframe(&frame, report_trace, &sts); + return; + } + + tail = ((struct frame_tail *) regs->ARM_fp) - 1; + while (depth-- && tail && !((unsigned long) tail & 3)) + tail = user_backtrace(output, tail); +} diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c new file mode 100644 index 000000000000..c53f4980bab9 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c @@ -0,0 +1,201 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include "fiq_debugger_priv.h" + +static char *mode_name(const struct pt_regs *regs) +{ + if (compat_user_mode(regs)) { + return "USR"; + } else { + switch (processor_mode(regs)) { + case PSR_MODE_EL0t: return "EL0t"; + case PSR_MODE_EL1t: return "EL1t"; + case PSR_MODE_EL1h: return "EL1h"; + case PSR_MODE_EL2t: return "EL2t"; + case PSR_MODE_EL2h: return "EL2h"; + default: return "???"; + } + } +} + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " pc %016lx cpsr %08lx mode %s\n", + regs->pc, regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs->compat_usr(0), regs->compat_usr(1), + regs->compat_usr(2), regs->compat_usr(3)); + output->printf(output, " r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs->compat_usr(4), regs->compat_usr(5), + regs->compat_usr(6), regs->compat_usr(7)); + output->printf(output, " r8 %08x r9 %08x r10 %08x r11 %08x\n", + regs->compat_usr(8), regs->compat_usr(9), + regs->compat_usr(10), regs->compat_usr(11)); + output->printf(output, " ip %08x sp %08x lr %08x pc %08x\n", + regs->compat_usr(12), regs->compat_sp, + regs->compat_lr, regs->pc); + output->printf(output, " cpsr %08x (%s)\n", + regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + + output->printf(output, " x0 %016lx x1 %016lx\n", + regs->regs[0], regs->regs[1]); + output->printf(output, " x2 %016lx x3 %016lx\n", + regs->regs[2], regs->regs[3]); + output->printf(output, " x4 %016lx x5 %016lx\n", + regs->regs[4], regs->regs[5]); + output->printf(output, " x6 %016lx x7 %016lx\n", + regs->regs[6], regs->regs[7]); + output->printf(output, " x8 %016lx x9 %016lx\n", + regs->regs[8], regs->regs[9]); + output->printf(output, " x10 %016lx x11 %016lx\n", + regs->regs[10], regs->regs[11]); + output->printf(output, " x12 %016lx x13 %016lx\n", + regs->regs[12], regs->regs[13]); + output->printf(output, " x14 %016lx x15 %016lx\n", + regs->regs[14], regs->regs[15]); + output->printf(output, " x16 %016lx x17 %016lx\n", + regs->regs[16], regs->regs[17]); + output->printf(output, " x18 %016lx x19 %016lx\n", + regs->regs[18], regs->regs[19]); + output->printf(output, " x20 %016lx x21 %016lx\n", + regs->regs[20], regs->regs[21]); + output->printf(output, " x22 %016lx x23 %016lx\n", + regs->regs[22], regs->regs[23]); + output->printf(output, " x24 %016lx x25 %016lx\n", + regs->regs[24], regs->regs[25]); + output->printf(output, " x26 %016lx x27 %016lx\n", + regs->regs[26], regs->regs[27]); + output->printf(output, " x28 %016lx x29 %016lx\n", + regs->regs[28], regs->regs[29]); + output->printf(output, " x30 %016lx sp %016lx\n", + regs->regs[30], regs->sp); + output->printf(output, " pc %016lx cpsr %08x (%s)\n", + regs->pc, regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + if (compat_user_mode(regs)) + fiq_debugger_dump_regs_aarch32(output, regs); + else + fiq_debugger_dump_regs_aarch64(output, regs); +} + +#define READ_SPECIAL_REG(x) ({ \ + u64 val; \ + asm volatile ("mrs %0, " # x : "=r"(val)); \ + val; \ +}) + +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + u32 pstate = READ_SPECIAL_REG(CurrentEl); + bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t; + + fiq_debugger_dump_regs(output, regs); + + output->printf(output, " sp_el0 %016lx\n", + READ_SPECIAL_REG(sp_el0)); + + if (in_el2) + output->printf(output, " sp_el1 %016lx\n", + READ_SPECIAL_REG(sp_el1)); + + output->printf(output, " elr_el1 %016lx\n", + READ_SPECIAL_REG(elr_el1)); + + output->printf(output, " spsr_el1 %08lx\n", + READ_SPECIAL_REG(spsr_el1)); + + if (in_el2) { + output->printf(output, " spsr_irq %08lx\n", + READ_SPECIAL_REG(spsr_irq)); + output->printf(output, " spsr_abt %08lx\n", + READ_SPECIAL_REG(spsr_abt)); + output->printf(output, " spsr_und %08lx\n", + READ_SPECIAL_REG(spsr_und)); + output->printf(output, " spsr_fiq %08lx\n", + READ_SPECIAL_REG(spsr_fiq)); + output->printf(output, " spsr_el2 %08lx\n", + READ_SPECIAL_REG(elr_el2)); + output->printf(output, " spsr_el2 %08lx\n", + READ_SPECIAL_REG(spsr_el2)); + } +} + +struct stacktrace_state { + struct fiq_debugger_output *output; + unsigned int depth; +}; + +static int report_trace(struct stackframe *frame, void *d) +{ + struct stacktrace_state *sts = d; + + if (sts->depth) { + sts->output->printf(sts->output, "%pF:\n", frame->pc); + sts->output->printf(sts->output, + " pc %016lx fp %016lx\n", + frame->pc, frame->fp); + sts->depth--; + return 0; + } + sts->output->printf(sts->output, " ...\n"); + + return sts->depth == 0; +} + +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp) +{ + struct thread_info *real_thread_info = THREAD_INFO(ssp); + struct stacktrace_state sts; + + sts.depth = depth; + sts.output = output; + *current_thread_info() = *real_thread_info; + + if (!current) + output->printf(output, "current NULL\n"); + else + output->printf(output, "pid: %d comm: %s\n", + current->pid, current->comm); + fiq_debugger_dump_regs(output, regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->regs[29]; + frame.pc = regs->pc; + output->printf(output, "\n"); + walk_stackframe(current, &frame, report_trace, &sts); + } +} diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h new file mode 100644 index 000000000000..d5d051f727a8 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _FIQ_DEBUGGER_PRIV_H_ +#define _FIQ_DEBUGGER_PRIV_H_ + +#define THREAD_INFO(sp) ((struct thread_info *) \ + ((unsigned long)(sp) & ~(THREAD_SIZE - 1))) + +struct fiq_debugger_output { + void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...); +}; + +struct pt_regs; + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp); + +#endif diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h new file mode 100644 index 000000000000..10c3c5d09098 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h @@ -0,0 +1,94 @@ +/* + * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h + * + * simple lockless ringbuffer + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +struct fiq_debugger_ringbuf { + int len; + int head; + int tail; + u8 buf[]; +}; + + +static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len) +{ + struct fiq_debugger_ringbuf *rbuf; + + rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL); + if (rbuf == NULL) + return NULL; + + rbuf->len = len; + rbuf->head = 0; + rbuf->tail = 0; + smp_mb(); + + return rbuf; +} + +static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf) +{ + kfree(rbuf); +} + +static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf) +{ + int level = rbuf->head - rbuf->tail; + + if (level < 0) + level = rbuf->len + level; + + return level; +} + +static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf) +{ + return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1; +} + +static inline u8 +fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i) +{ + return rbuf->buf[(rbuf->tail + i) % rbuf->len]; +} + +static inline int +fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count) +{ + count = min(count, fiq_debugger_ringbuf_level(rbuf)); + + rbuf->tail = (rbuf->tail + count) % rbuf->len; + smp_mb(); + + return count; +} + +static inline int +fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum) +{ + if (fiq_debugger_ringbuf_room(rbuf) == 0) + return 0; + + rbuf->buf[rbuf->head] = datum; + smp_mb(); + rbuf->head = (rbuf->head + 1) % rbuf->len; + smp_mb(); + + return 1; +} diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c new file mode 100644 index 000000000000..194b54138417 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "fiq_watchdog.h" +#include "fiq_debugger_priv.h" + +static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock); + +static void fiq_watchdog_printf(struct fiq_debugger_output *output, + const char *fmt, ...) +{ + char buf[256]; + va_list ap; + int len; + + va_start(ap, fmt); + len = vscnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + ramoops_console_write_buf(buf, len); +} + +struct fiq_debugger_output fiq_watchdog_output = { + .printf = fiq_watchdog_printf, +}; + +void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp) +{ + char msg[24]; + int len; + + raw_spin_lock(&fiq_watchdog_lock); + + len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n", + THREAD_INFO(svc_sp)->cpu); + ramoops_console_write_buf(msg, len); + + fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp); + + raw_spin_unlock(&fiq_watchdog_lock); +} diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h new file mode 100644 index 000000000000..c6b507f8d976 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _FIQ_WATCHDOG_H_ +#define _FIQ_WATCHDOG_H_ + +void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp); + +#endif diff --git a/drivers/staging/android/fwdata.c b/drivers/staging/android/fwdata.c new file mode 100644 index 000000000000..1f06c615a64e --- /dev/null +++ b/drivers/staging/android/fwdata.c @@ -0,0 +1,332 @@ +/* + * Copyright (C) 2017 Intel, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +struct android_fwdata_state { + struct device *dev; + struct kobject *properties_kobj; + struct kobject *android_kobj; + struct kobject *vbmeta_kobj; + struct kobject *fstab_kobj; + struct kobject *system_kobj; + struct kobject *vendor_kobj; +}; + +static struct android_fwdata_state state; + +/* Called when /properties// is read. */ +static ssize_t property_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + const char *prefix = NULL; + char key[128]; + const char *value = NULL; + int ret; + + /* It would be much more convenient if show() gave us the relative path + * to the file being read, e.g. properties/android/fstab/system/dev, + * which could be easily converted to a property key. + * TODO: Infer the relative path from kobj and remove all hard-coded + * property keys. + */ + if (kobj == state.android_kobj) { + prefix = "android"; + } else if (kobj == state.vbmeta_kobj) { + prefix = "android.vbmeta"; + } else if (kobj == state.fstab_kobj) { + prefix = "android.fstab"; + } else if (kobj == state.system_kobj) { + prefix = "android.fstab.system"; + } else if (kobj == state.vendor_kobj) { + prefix = "android.fstab.vendor"; + } else { + pr_err("%s: Unexpected folder\n", __func__); + return -EINVAL; + } + /* We don't put any file in properties/ directly, so prefix can't be + * empty. + */ + snprintf(key, sizeof(key), "%s.%s", prefix, attr->attr.name); + + ret = device_property_read_string(state.dev, key, &value); + if (ret) { + pr_err("%s: Failed to read property '%s', ret=%d\n", __func__, + key, ret); + return ret; + } + return scnprintf(buf, PAGE_SIZE, "%s\n", value); +} + +#define DT_SIMPLE_ATTR(_prefix, _name) \ + struct kobj_attribute _prefix##_##_name##_attr = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = 0444, \ + }, \ + .show = property_show, \ + } + +static DT_SIMPLE_ATTR(android, compatible); +static DT_SIMPLE_ATTR(vbmeta, compatible); +static DT_SIMPLE_ATTR(fstab, compatible); +static DT_SIMPLE_ATTR(system, compatible); +static DT_SIMPLE_ATTR(vendor, compatible); + +static DT_SIMPLE_ATTR(vbmeta, parts); + +static struct attribute *vbmeta_attrs[] = { + &vbmeta_compatible_attr.attr, + &vbmeta_parts_attr.attr, + NULL, +}; + +static struct attribute_group vbmeta_group = { + .attrs = vbmeta_attrs, +}; + +static DT_SIMPLE_ATTR(system, dev); +static DT_SIMPLE_ATTR(system, type); +static DT_SIMPLE_ATTR(system, mnt_flags); +static DT_SIMPLE_ATTR(system, fsmgr_flags); + +static DT_SIMPLE_ATTR(vendor, dev); +static DT_SIMPLE_ATTR(vendor, type); +static DT_SIMPLE_ATTR(vendor, mnt_flags); +static DT_SIMPLE_ATTR(vendor, fsmgr_flags); + +static struct attribute *system_attrs[] = { + &system_compatible_attr.attr, + &system_dev_attr.attr, + &system_type_attr.attr, + &system_mnt_flags_attr.attr, + &system_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group system_group = { + .attrs = system_attrs, +}; + +static struct attribute *vendor_attrs[] = { + &vendor_compatible_attr.attr, + &vendor_dev_attr.attr, + &vendor_type_attr.attr, + &vendor_mnt_flags_attr.attr, + &vendor_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group vendor_group = { + .attrs = vendor_attrs, +}; + +static struct kobject *create_folder(struct kobject *parent, const char *name) +{ + struct kobject *kobj; + + kobj = kobject_create_and_add(name, parent); + if (!kobj) { + pr_err("%s: Failed to create %s/\n", __func__, name); + return NULL; + } + return kobj; +} + +static struct kobject *create_folder_with_file(struct kobject *parent, + const char *name, + struct kobj_attribute *attr) +{ + struct kobject *kobj; + + kobj = create_folder(parent, name); + if (kobj) { + /* Note: Usually drivers should use device_create_file() rather + * than sysfs_create_file(), but the former does not support + * creating the file in a subfolder. + */ + int ret; + + ret = sysfs_create_file(kobj, &attr->attr); + if (ret) { + pr_err("%s: Failed to create %s/%s: ret=%d\n", __func__, + name, attr->attr.name, ret); + kobject_put(kobj); + return NULL; + } + } + return kobj; +} + +static void remove_folder_with_file(struct kobject *kobj, + struct kobj_attribute *attr) +{ + sysfs_remove_file(kobj, &attr->attr); + kobject_put(kobj); +} + +static struct kobject *create_folder_with_files(struct kobject *parent, + const char *name, + struct attribute_group *group) +{ + struct kobject *kobj; + + kobj = create_folder(parent, name); + if (kobj) { + /* Note: Usually drivers should use device_add_groups() rather + * than sysfs_create_group(), but the former does not support + * creating the folder in a subfolder. + */ + int ret; + + ret = sysfs_create_group(kobj, group); + if (ret) { + pr_err("%s: Failed to create %s/*: ret=%d\n", __func__, + name, ret); + kobject_put(kobj); + return NULL; + } + } + return kobj; +} + +static void remove_folder_with_files(struct kobject *kobj, + struct attribute_group *group) +{ + sysfs_remove_group(kobj, group); + kobject_put(kobj); +} + +static void clean_up(void) +{ + if (state.vendor_kobj) { + /* Delete /properties/android/fstab/vendor/ */ + remove_folder_with_files(state.vendor_kobj, &vendor_group); + state.vendor_kobj = NULL; + } + if (state.system_kobj) { + /* Delete /properties/android/fstab/system/ */ + remove_folder_with_files(state.system_kobj, &system_group); + state.system_kobj = NULL; + } + if (state.fstab_kobj) { + /* Delete /properties/android/fstab/ */ + remove_folder_with_file(state.fstab_kobj, + &fstab_compatible_attr); + state.fstab_kobj = NULL; + } + if (state.vbmeta_kobj) { + /* Delete /properties/android/vbmeta/ */ + remove_folder_with_files(state.vbmeta_kobj, &vbmeta_group); + state.vbmeta_kobj = NULL; + } + if (state.android_kobj) { + /* Delete /properties/android/ */ + remove_folder_with_file(state.android_kobj, + &android_compatible_attr); + state.android_kobj = NULL; + } + if (state.properties_kobj) { + /* Delete /properties/ */ + kobject_put(state.properties_kobj); + state.properties_kobj = NULL; + } +} + +static int android_fwdata_probe(struct platform_device *pdev) +{ + int ret = -EIO; + + state.dev = &pdev->dev; + /* Create /properties/ */ + state.properties_kobj = create_folder(&state.dev->kobj, "properties"); + if (!state.properties_kobj) + goto out; + + /* TODO: Iterate over all device properties in firmware, and dynamically + * create sysfs nodes under /properties/ + */ + + /* Create /properties/android/compatible */ + state.android_kobj = create_folder_with_file(state.properties_kobj, + "android", + &android_compatible_attr); + if (!state.android_kobj) + goto out; + + if (device_property_present(state.dev, "android.vbmeta.compatible")) { + /* Firmware contains vbmeta config for AVB 2.0 */ + state.vbmeta_kobj = create_folder_with_files(state.android_kobj, + "vbmeta", + &vbmeta_group); + if (!state.vbmeta_kobj) + goto out; + } + + /* Create /properties/android/fstab/compatible */ + state.fstab_kobj = create_folder_with_file(state.android_kobj, "fstab", + &fstab_compatible_attr); + if (!state.fstab_kobj) + goto out; + + if (device_property_present(state.dev, "android.fstab.system.dev")) { + /* Firmware contains fstab config for early mount of /system */ + state.system_kobj = create_folder_with_files(state.fstab_kobj, + "system", + &system_group); + if (!state.system_kobj) + goto out; + } + if (device_property_present(state.dev, "android.fstab.vendor.dev")) { + /* Firmware contains fstab config for early mount of /vendor */ + state.vendor_kobj = create_folder_with_files(state.fstab_kobj, + "vendor", + &vendor_group); + if (!state.vendor_kobj) + goto out; + } + return 0; + +out: + clean_up(); + return ret; +} + +static int android_fwdata_remove(struct platform_device *pdev) +{ + clean_up(); + return 0; +} + +static const struct acpi_device_id android_fwdata_acpi_match[] = { + { "ANDR0001", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, android_fwdata_acpi_match); + +static struct platform_driver android_fwdata_driver = { + .probe = android_fwdata_probe, + .remove = android_fwdata_remove, + .driver = { + .name = "android_fwdata", + .owner = THIS_MODULE, + .acpi_match_table = ACPI_PTR(android_fwdata_acpi_match), + } +}; + +module_platform_driver(android_fwdata_driver); diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index a517b2d29f1b..8f6494158d3d 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -37,7 +37,7 @@ config ION_CHUNK_HEAP config ION_CMA_HEAP bool "Ion CMA heap support" - depends on ION && CMA + depends on ION && DMA_CMA help Choose this option to enable CMA heaps with Ion. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c index d9f8b1424da1..021a956db1a8 100644 --- a/drivers/staging/android/ion/ion-ioctl.c +++ b/drivers/staging/android/ion/ion-ioctl.c @@ -71,8 +71,10 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EFAULT; ret = validate_ioctl_arg(cmd, &data); - if (WARN_ON_ONCE(ret)) + if (ret) { + pr_warn_once("%s: ioctl validate failed\n", __func__); return ret; + } if (!(dir & _IOC_WRITE)) memset(&data, 0, sizeof(data)); diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 93e2c90fa77d..83dc3292e9ab 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -348,7 +348,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); + direction); } mutex_unlock(&buffer->lock); @@ -370,7 +370,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); + direction); } mutex_unlock(&buffer->lock); diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index dd5545d9990a..fa3e4b7e0c9f 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "ion.h" @@ -39,12 +40,34 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, struct ion_cma_heap *cma_heap = to_cma_heap(heap); struct sg_table *table; struct page *pages; + unsigned long size = PAGE_ALIGN(len); + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned long align = get_order(size); int ret; - pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL); + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; + + pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL); if (!pages) return -ENOMEM; + if (PageHighMem(pages)) { + unsigned long nr_clear_pages = nr_pages; + struct page *page = pages; + + while (nr_clear_pages > 0) { + void *vaddr = kmap_atomic(page); + + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + page++; + nr_clear_pages--; + } + } else { + memset(page_address(pages), 0, size); + } + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) goto err; @@ -53,7 +76,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, if (ret) goto free_mem; - sg_set_page(table->sgl, pages, len, 0); + sg_set_page(table->sgl, pages, size, 0); buffer->priv_virt = pages; buffer->sg_table = table; @@ -62,7 +85,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, free_mem: kfree(table); err: - cma_release(cma_heap->cma, pages, buffer->size); + cma_release(cma_heap->cma, pages, nr_pages); return -ENOMEM; } @@ -70,9 +93,10 @@ static void ion_cma_free(struct ion_buffer *buffer) { struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); struct page *pages = buffer->priv_virt; + unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; /* release memory */ - cma_release(cma_heap->cma, pages, buffer->size); + cma_release(cma_heap->cma, pages, nr_pages); /* release sg table */ sg_free_table(buffer->sg_table); kfree(buffer->sg_table); diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 4dc5d7a589c2..b6ece18e6a88 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -371,7 +371,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, unsigned long i; int ret; - page = alloc_pages(low_order_gfp_flags, order); + page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order); if (!page) return -ENOMEM; diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c new file mode 100644 index 000000000000..999653dc2356 --- /dev/null +++ b/drivers/staging/android/lowmemorykiller.c @@ -0,0 +1,304 @@ +/* drivers/misc/lowmemorykiller.c + * + * The lowmemorykiller driver lets user-space specify a set of memory thresholds + * where processes with a range of oom_score_adj values will get killed. Specify + * the minimum oom_score_adj values in + * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in + * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma + * separated list of numbers in ascending order. + * + * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and + * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill + * processes with a oom_score_adj value of 8 or higher when the free memory + * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or + * higher when the free memory drops below 1024 pages. + * + * The driver considers memory used for caches to be free, but if a large + * percentage of the cached memory is locked this can be very inaccurate + * and processes may not get killed until the normal oom killer is triggered. + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace/lowmemorykiller.h" + +static u32 lowmem_debug_level = 1; +static short lowmem_adj[6] = { + 0, + 1, + 6, + 12, +}; + +static int lowmem_adj_size = 4; +static int lowmem_minfree[6] = { + 3 * 512, /* 6MB */ + 2 * 1024, /* 8MB */ + 4 * 1024, /* 16MB */ + 16 * 1024, /* 64MB */ +}; + +static int lowmem_minfree_size = 4; + +static unsigned long lowmem_deathpending_timeout; + +#define lowmem_print(level, x...) \ + do { \ + if (lowmem_debug_level >= (level)) \ + pr_info(x); \ + } while (0) + +static unsigned long lowmem_count(struct shrinker *s, + struct shrink_control *sc) +{ + return global_node_page_state(NR_ACTIVE_ANON) + + global_node_page_state(NR_ACTIVE_FILE) + + global_node_page_state(NR_INACTIVE_ANON) + + global_node_page_state(NR_INACTIVE_FILE); +} + +static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) +{ + struct task_struct *tsk; + struct task_struct *selected = NULL; + unsigned long rem = 0; + int tasksize; + int i; + short min_score_adj = OOM_SCORE_ADJ_MAX + 1; + int minfree = 0; + int selected_tasksize = 0; + short selected_oom_score_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + int other_free = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; + int other_file = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + global_node_page_state(NR_UNEVICTABLE) - + total_swapcache_pages(); + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if (lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + for (i = 0; i < array_size; i++) { + minfree = lowmem_minfree[i]; + if (other_free < minfree && other_file < minfree) { + min_score_adj = lowmem_adj[i]; + break; + } + } + + lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n", + sc->nr_to_scan, sc->gfp_mask, other_free, + other_file, min_score_adj); + + if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) { + lowmem_print(5, "lowmem_scan %lu, %x, return 0\n", + sc->nr_to_scan, sc->gfp_mask); + return 0; + } + + selected_oom_score_adj = min_score_adj; + + rcu_read_lock(); + for_each_process(tsk) { + struct task_struct *p; + short oom_score_adj; + + if (tsk->flags & PF_KTHREAD) + continue; + + p = find_lock_task_mm(tsk); + if (!p) + continue; + + if (task_lmk_waiting(p) && + time_before_eq(jiffies, lowmem_deathpending_timeout)) { + task_unlock(p); + rcu_read_unlock(); + return 0; + } + oom_score_adj = p->signal->oom_score_adj; + if (oom_score_adj < min_score_adj) { + task_unlock(p); + continue; + } + tasksize = get_mm_rss(p->mm); + task_unlock(p); + if (tasksize <= 0) + continue; + if (selected) { + if (oom_score_adj < selected_oom_score_adj) + continue; + if (oom_score_adj == selected_oom_score_adj && + tasksize <= selected_tasksize) + continue; + } + selected = p; + selected_tasksize = tasksize; + selected_oom_score_adj = oom_score_adj; + lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n", + p->comm, p->pid, oom_score_adj, tasksize); + } + if (selected) { + long cache_size = other_file * (long)(PAGE_SIZE / 1024); + long cache_limit = minfree * (long)(PAGE_SIZE / 1024); + long free = other_free * (long)(PAGE_SIZE / 1024); + + task_lock(selected); + send_sig(SIGKILL, selected, 0); + if (selected->mm) + task_set_lmk_waiting(selected); + task_unlock(selected); + trace_lowmemory_kill(selected, cache_size, cache_limit, free); + lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n" + " to free %ldkB on behalf of '%s' (%d) because\n" + " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" + " Free memory is %ldkB above reserved\n", + selected->comm, selected->pid, selected->tgid, + selected_oom_score_adj, + selected_tasksize * (long)(PAGE_SIZE / 1024), + current->comm, current->pid, + cache_size, cache_limit, + min_score_adj, + free); + lowmem_deathpending_timeout = jiffies + HZ; + rem += selected_tasksize; + } + + lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n", + sc->nr_to_scan, sc->gfp_mask, rem); + rcu_read_unlock(); + return rem; +} + +static struct shrinker lowmem_shrinker = { + .scan_objects = lowmem_scan, + .count_objects = lowmem_count, + .seeks = DEFAULT_SEEKS * 16 +}; + +static int __init lowmem_init(void) +{ + register_shrinker(&lowmem_shrinker); + return 0; +} +device_initcall(lowmem_init); + +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES +static short lowmem_oom_adj_to_oom_score_adj(short oom_adj) +{ + if (oom_adj == OOM_ADJUST_MAX) + return OOM_SCORE_ADJ_MAX; + else + return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; +} + +static void lowmem_autodetect_oom_adj_values(void) +{ + int i; + short oom_adj; + short oom_score_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + + if (array_size <= 0) + return; + + oom_adj = lowmem_adj[array_size - 1]; + if (oom_adj > OOM_ADJUST_MAX) + return; + + oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj); + if (oom_score_adj <= OOM_ADJUST_MAX) + return; + + lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n"); + for (i = 0; i < array_size; i++) { + oom_adj = lowmem_adj[i]; + oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj); + lowmem_adj[i] = oom_score_adj; + lowmem_print(1, "oom_adj %d => oom_score_adj %d\n", + oom_adj, oom_score_adj); + } +} + +static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp) +{ + int ret; + + ret = param_array_ops.set(val, kp); + + /* HACK: Autodetect oom_adj values in lowmem_adj array */ + lowmem_autodetect_oom_adj_values(); + + return ret; +} + +static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp) +{ + return param_array_ops.get(buffer, kp); +} + +static void lowmem_adj_array_free(void *arg) +{ + param_array_ops.free(arg); +} + +static struct kernel_param_ops lowmem_adj_array_ops = { + .set = lowmem_adj_array_set, + .get = lowmem_adj_array_get, + .free = lowmem_adj_array_free, +}; + +static const struct kparam_array __param_arr_adj = { + .max = ARRAY_SIZE(lowmem_adj), + .num = &lowmem_adj_size, + .ops = ¶m_ops_short, + .elemsize = sizeof(lowmem_adj[0]), + .elem = lowmem_adj, +}; +#endif + +/* + * not really modular, but the easiest way to keep compat with existing + * bootargs behaviour is to continue using module_param here. + */ +module_param_named(cost, lowmem_shrinker.seeks, int, 0644); +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES +module_param_cb(adj, &lowmem_adj_array_ops, + .arr = &__param_arr_adj, + 0644); +__MODULE_PARM_TYPE(adj, "array of short"); +#else +module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644); +#endif +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, + 0644); +module_param_named(debug_level, lowmem_debug_level, uint, 0644); + diff --git a/drivers/staging/android/sbl/Kconfig b/drivers/staging/android/sbl/Kconfig new file mode 100644 index 000000000000..f5f754f8d890 --- /dev/null +++ b/drivers/staging/android/sbl/Kconfig @@ -0,0 +1,8 @@ +config SBL_BOOTLOADER_CONTROL + tristate "SBL Bootloader Control module" + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. diff --git a/drivers/staging/android/sbl/Makefile b/drivers/staging/android/sbl/Makefile new file mode 100644 index 000000000000..6d1258d7bfc1 --- /dev/null +++ b/drivers/staging/android/sbl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SBL_BOOTLOADER_CONTROL) += sblbc.o diff --git a/drivers/staging/android/sbl/sblbc.c b/drivers/staging/android/sbl/sblbc.c new file mode 100644 index 000000000000..d77700b70e25 --- /dev/null +++ b/drivers/staging/android/sbl/sblbc.c @@ -0,0 +1,367 @@ +/* + * sblbc: control SBL bootloaders + * Copyright (c) 2013-2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "sblbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* ABL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) +#define USERCMD_UPDATE_IFWI(len) _USERCMD_(2, len) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +static bool capsule_request; + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_capsule_cmd { + char action; + char device; + char partition; + char file_name[1]; +} __packed; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + for (i = 0; i < size; i++) + { + pr_err("Kernel Addr=0x%X, data=0x%X\n", (unsigned int)(NVRAM_START_ADDRESS + offset + i), (unsigned int)(*(unsigned char *)(data + i))); + } + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static struct kobject *capsule_kobject; + +static ssize_t is_capsule_requested(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", capsule_request); +} + +enum capsule_device_type { + EMMC = 2, + SDCARD = 4 +}; + +static ssize_t capsule_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nvram_msg msg; + struct nvram_capsule_cmd *capsule_cmd; + char name[32], partition; + enum capsule_device_type device; + int ret, padding; + unsigned char size; + union _cdata_header cdh; + + device = (buf[0] == 'm' ? EMMC : SDCARD); + partition = buf[1] - '0'; + ret = sscanf(buf+3, "%s", name); + pr_info(MODULE_NAME " capsule parameters (%d): DEVICE=%d PARTITION=%d NAME=%s\n", + ret, device, partition, name); + + cdh.data = 0; + cdh.tag = CDATA_TAG_USER_CMD; + + /* padding of filename on next dword */ + padding = (4 - (3 + strlen(name))%4)%4; + size = 2 + sizeof(cdh) + 3 + strlen(name) + padding + 4; + cdh.length = 1 + (3 + strlen(name) + padding) / 4; + + msg.magic = NVRAM_VALID_FLAG; + msg.size = size; + msg.cdata_header.data = cdh.data; + + capsule_cmd = kmalloc(size, GFP_KERNEL); + if (!capsule_cmd) + return -ENOMEM; + + capsule_cmd->action = USERCMD_UPDATE_IFWI(strlen(name) + 2); + capsule_cmd->device = device; + capsule_cmd->partition = partition; + strncpy(capsule_cmd->file_name, name, strlen(name)); + msg.cdata_payload = (char *)capsule_cmd; + msg.cdata_payload_size = 3 + strlen(name) + padding; + msg.crc = crc32c_msg(&msg); + write_msg_to_nvram(&msg); + capsule_request = true; + + kfree(capsule_cmd); + + return count; +} + +static struct kobj_attribute capsule_name_attribute = + __ATTR(capsule_name, 0600, NULL, capsule_store); + +static struct kobj_attribute capsule_requested_attribute = + __ATTR(capsule_requested, 0400, is_capsule_requested, NULL); + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static int sblbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + + return NOTIFY_DONE; +} + +static struct notifier_block sblbc_reboot_notifier = { + .notifier_call = sblbc_reboot_notifier_call, +}; + +static int __init sblbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&sblbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + capsule_kobject = kobject_create_and_add("capsule", kernel_kobj); + if (!capsule_kobject) + return -ENOMEM; + + ret = sysfs_create_file(capsule_kobject, + &capsule_name_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_name\n"); + goto err; + } + + ret = sysfs_create_file(capsule_kobject, + &capsule_requested_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_requested\n"); + goto err; + } + + return 0; + +err: + kobject_put(capsule_kobject); + return ret; +} + +module_init(sblbc_init); + +static void __exit sblbc_exit(void) +{ + unregister_reboot_notifier(&sblbc_reboot_notifier); + kobject_put(capsule_kobject); +} +module_exit(sblbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Slimboot boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/trace/lowmemorykiller.h b/drivers/staging/android/trace/lowmemorykiller.h new file mode 100644 index 000000000000..f43d3fae75ee --- /dev/null +++ b/drivers/staging/android/trace/lowmemorykiller.h @@ -0,0 +1,41 @@ +#undef TRACE_SYSTEM +#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace +#define TRACE_SYSTEM lowmemorykiller + +#if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_LOWMEMORYKILLER_H + +#include + +TRACE_EVENT(lowmemory_kill, + TP_PROTO(struct task_struct *killed_task, long cache_size, \ + long cache_limit, long free), + + TP_ARGS(killed_task, cache_size, cache_limit, free), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(long, pagecache_size) + __field(long, pagecache_limit) + __field(long, free) + ), + + TP_fast_assign( + memcpy(__entry->comm, killed_task->comm, TASK_COMM_LEN); + __entry->pid = killed_task->pid; + __entry->pagecache_size = cache_size; + __entry->pagecache_limit = cache_limit; + __entry->free = free; + ), + + TP_printk("%s (%d), page cache %ldkB (limit %ldkB), free %ldKb", + __entry->comm, __entry->pid, __entry->pagecache_size, + __entry->pagecache_limit, __entry->free) +); + + +#endif /* if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include diff --git a/drivers/staging/android/vsbl/Kconfig b/drivers/staging/android/vsbl/Kconfig new file mode 100644 index 000000000000..bb53cf922685 --- /dev/null +++ b/drivers/staging/android/vsbl/Kconfig @@ -0,0 +1,8 @@ +config VSBL_BOOTLOADER_CONTROL + tristate "vSBL Bootloader Control module" + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. diff --git a/drivers/staging/android/vsbl/Makefile b/drivers/staging/android/vsbl/Makefile new file mode 100644 index 000000000000..8ce038941fc6 --- /dev/null +++ b/drivers/staging/android/vsbl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_VSBL_BOOTLOADER_CONTROL) += vsblbc.o diff --git a/drivers/staging/android/vsbl/vsblbc.c b/drivers/staging/android/vsbl/vsblbc.c new file mode 100644 index 000000000000..527a174f0130 --- /dev/null +++ b/drivers/staging/android/vsbl/vsblbc.c @@ -0,0 +1,262 @@ +/* + * vsblbc: control vSBL bootloaders + * Copyright (c) 2013-2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "vsblbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* vSBL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static int vsblbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + + return NOTIFY_DONE; +} + +static struct notifier_block vsblbc_reboot_notifier = { + .notifier_call = vsblbc_reboot_notifier_call, +}; + +static int __init vsblbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&vsblbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + return 0; +} + +module_init(vsblbc_init); + +static void __exit vsblbc_exit(void) +{ + unregister_reboot_notifier(&vsblbc_reboot_notifier); +} +module_exit(vsblbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Virtual Slimboot boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h index 851d3907167e..a9c417b07b04 100644 --- a/drivers/staging/ccree/cc_lli_defs.h +++ b/drivers/staging/ccree/cc_lli_defs.h @@ -59,7 +59,7 @@ static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr) lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK; - lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 16)); + lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32)); #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ } diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c index 8d31a93fd8b7..087a622f20b2 100644 --- a/drivers/staging/ccree/ssi_cipher.c +++ b/drivers/staging/ccree/ssi_cipher.c @@ -904,6 +904,7 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req) scatterwalk_map_and_copy(req_ctx->backup_info, req->src, (req->nbytes - ivsize), ivsize, 0); req_ctx->is_giv = false; + req_ctx->backup_info = NULL; return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT); } diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c index 9c6f1200c130..eeb995307951 100644 --- a/drivers/staging/ccree/ssi_driver.c +++ b/drivers/staging/ccree/ssi_driver.c @@ -141,7 +141,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) irr &= ~SSI_COMP_IRQ_MASK; complete_request(drvdata); } -#ifdef CC_SUPPORT_FIPS +#ifdef CONFIG_CRYPTO_FIPS /* TEE FIPS interrupt */ if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) { /* Mask interrupt - will be unmasked in Deferred service handler */ diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c index 13291aeaf350..e266a70a1b32 100644 --- a/drivers/staging/ccree/ssi_hash.c +++ b/drivers/staging/ccree/ssi_hash.c @@ -1781,7 +1781,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in) struct device *dev = &ctx->drvdata->plat_dev->dev; struct ahash_req_ctx *state = ahash_request_ctx(req); u32 tmp; - int rc; + int rc = 0; memcpy(&tmp, in, sizeof(u32)); if (tmp != CC_EXPORT_MAGIC) { @@ -1790,9 +1790,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in) } in += sizeof(u32); - rc = ssi_hash_init(state, ctx); - if (rc) - goto out; + /* call init() to allocate bufs if the user hasn't */ + if (!state->digest_buff) { + rc = ssi_hash_init(state, ctx); + if (rc) + goto out; + } dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 0b43db6371c6..c11c22bd6d13 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, struct comedi_cmd *cmd = &async->cmd; if (cmd->stop_src == TRIG_COUNT) { - unsigned int nscans = nsamples / cmd->scan_end_arg; - unsigned int scans_left = __comedi_nscans_left(s, nscans); + unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg); unsigned int scan_pos = comedi_bytes_to_samples(s, async->scan_progress); unsigned long long samples_left = 0; diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 398347fedc47..2cac160993bb 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -1284,6 +1284,8 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status) ack |= NISTC_INTA_ACK_AI_START; if (a_status & NISTC_AI_STATUS1_STOP) ack |= NISTC_INTA_ACK_AI_STOP; + if (a_status & NISTC_AI_STATUS1_OVER) + ack |= NISTC_INTA_ACK_AI_ERR; if (ack) ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG); } diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c index 26017fe9df93..8e84b2e7f5bd 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c @@ -131,6 +131,8 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, u16 fd_offset = dpaa2_fd_get_offset(fd); u32 fd_length = dpaa2_fd_get_len(fd); + ch->buf_count--; + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); if (unlikely(!skb)) @@ -139,8 +141,6 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, skb_reserve(skb, fd_offset); skb_put(skb, fd_length); - ch->buf_count--; - return skb; } @@ -178,8 +178,15 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, /* We build the skb around the first data buffer */ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - if (unlikely(!skb)) - return NULL; + if (unlikely(!skb)) { + /* We still need to subtract the buffers used + * by this FD from our software counter + */ + while (!dpaa2_sg_is_final(&sgt[i]) && + i < DPAA2_ETH_MAX_SG_ENTRIES) + i++; + break; + } sg_offset = dpaa2_sg_get_offset(sge); skb_reserve(skb, sg_offset); diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig index 504c987447f2..eee1c1b277fa 100644 --- a/drivers/staging/fsl-mc/bus/Kconfig +++ b/drivers/staging/fsl-mc/bus/Kconfig @@ -8,7 +8,7 @@ config FSL_MC_BUS bool "QorIQ DPAA2 fsl-mc bus driver" - depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC))) + depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC))) select GENERIC_MSI_IRQ_DOMAIN help Driver to enable the bus infrastructure for the QorIQ DPAA2 diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c index f8096828f5b7..a609ec82daf3 100644 --- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c +++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c @@ -76,7 +76,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, if (d) return d; - if (unlikely(cpu >= num_possible_cpus())) + if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus()) return NULL; /* @@ -121,7 +121,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) return NULL; /* check if CPU is out of range (-1 means any cpu) */ - if (desc->cpu >= num_possible_cpus()) { + if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) { kfree(obj); return NULL; } diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h index c5646096c5d4..afc2d060d077 100644 --- a/drivers/staging/fsl-mc/include/dpaa2-io.h +++ b/drivers/staging/fsl-mc/include/dpaa2-io.h @@ -54,6 +54,8 @@ struct device; * for dequeue. */ +#define DPAA2_IO_ANY_CPU -1 + /** * struct dpaa2_io_desc - The DPIO descriptor * @receives_notifications: Use notificaton mode. Non-zero if the DPIO @@ -91,8 +93,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj); * @cb: The callback to be invoked when the notification arrives * @is_cdan: Zero for FQDAN, non-zero for CDAN * @id: FQID or channel ID, needed for rearm - * @desired_cpu: The cpu on which the notifications will show up. -1 means - * any CPU. + * @desired_cpu: The cpu on which the notifications will show up. Use + * DPAA2_IO_ANY_CPU if don't care * @dpio_id: The dpio index * @qman64: The 64-bit context value shows up in the FQDAN/CDAN. * @node: The list node diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig index 4e094602437c..d293bbc22c79 100644 --- a/drivers/staging/goldfish/Kconfig +++ b/drivers/staging/goldfish/Kconfig @@ -4,6 +4,14 @@ config GOLDFISH_AUDIO ---help--- Emulated audio channel for the Goldfish Android Virtual Device +config GOLDFISH_SYNC + tristate "Goldfish AVD Sync Driver" + depends on GOLDFISH + depends on SW_SYNC + depends on SYNC_FILE + ---help--- + Emulated sync fences for the Goldfish Android Virtual Device + config MTD_GOLDFISH_NAND tristate "Goldfish NAND device" depends on GOLDFISH diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile index dec34ad58162..3313fce4e940 100644 --- a/drivers/staging/goldfish/Makefile +++ b/drivers/staging/goldfish/Makefile @@ -4,3 +4,9 @@ obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o + +# and sync + +ccflags-y := -Idrivers/staging/android +goldfish_sync-objs := goldfish_sync_timeline_fence.o goldfish_sync_timeline.o +obj-$(CONFIG_GOLDFISH_SYNC) += goldfish_sync.o diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c index bd559956f199..0bb0ee2e691f 100644 --- a/drivers/staging/goldfish/goldfish_audio.c +++ b/drivers/staging/goldfish/goldfish_audio.c @@ -28,6 +28,7 @@ #include #include #include +#include MODULE_AUTHOR("Google, Inc."); MODULE_DESCRIPTION("Android QEMU Audio Driver"); @@ -116,6 +117,7 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct goldfish_audio *data = fp->private_data; + unsigned long irq_flags; int length; int result = 0; @@ -129,6 +131,10 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf, wait_event_interruptible(data->wait, data->buffer_status & AUDIO_INT_READ_BUFFER_FULL); + spin_lock_irqsave(&data->lock, irq_flags); + data->buffer_status &= ~AUDIO_INT_READ_BUFFER_FULL; + spin_unlock_irqrestore(&data->lock, irq_flags); + length = AUDIO_READ(data, AUDIO_READ_BUFFER_AVAILABLE); /* copy data to user space */ @@ -351,12 +357,19 @@ static const struct of_device_id goldfish_audio_of_match[] = { }; MODULE_DEVICE_TABLE(of, goldfish_audio_of_match); +static const struct acpi_device_id goldfish_audio_acpi_match[] = { + { "GFSH0005", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_audio_acpi_match); + static struct platform_driver goldfish_audio_driver = { .probe = goldfish_audio_probe, .remove = goldfish_audio_remove, .driver = { .name = "goldfish_audio", .of_match_table = goldfish_audio_of_match, + .acpi_match_table = ACPI_PTR(goldfish_audio_acpi_match), } }; diff --git a/drivers/staging/goldfish/goldfish_sync_timeline.c b/drivers/staging/goldfish/goldfish_sync_timeline.c new file mode 100644 index 000000000000..880d6e2e5b34 --- /dev/null +++ b/drivers/staging/goldfish/goldfish_sync_timeline.c @@ -0,0 +1,962 @@ +/* + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "goldfish_sync_timeline_fence.h" + +#define ERR(...) printk(KERN_ERR __VA_ARGS__); + +#define INFO(...) printk(KERN_INFO __VA_ARGS__); + +#define DPRINT(...) pr_debug(__VA_ARGS__); + +#define DTRACE() DPRINT("%s: enter", __func__) + +/* The Goldfish sync driver is designed to provide a interface + * between the underlying host's sync device and the kernel's + * fence sync framework.. + * The purpose of the device/driver is to enable lightweight + * creation and signaling of timelines and fences + * in order to synchronize the guest with host-side graphics events. + * + * Each time the interrupt trips, the driver + * may perform a sync operation. + */ + +/* The operations are: */ + +/* Ready signal - used to mark when irq should lower */ +#define CMD_SYNC_READY 0 + +/* Create a new timeline. writes timeline handle */ +#define CMD_CREATE_SYNC_TIMELINE 1 + +/* Create a fence object. reads timeline handle and time argument. + * Writes fence fd to the SYNC_REG_HANDLE register. */ +#define CMD_CREATE_SYNC_FENCE 2 + +/* Increments timeline. reads timeline handle and time argument */ +#define CMD_SYNC_TIMELINE_INC 3 + +/* Destroys a timeline. reads timeline handle */ +#define CMD_DESTROY_SYNC_TIMELINE 4 + +/* Starts a wait on the host with + * the given glsync object and sync thread handle. */ +#define CMD_TRIGGER_HOST_WAIT 5 + +/* The register layout is: */ + +#define SYNC_REG_BATCH_COMMAND 0x00 /* host->guest batch commands */ +#define SYNC_REG_BATCH_GUESTCOMMAND 0x04 /* guest->host batch commands */ +#define SYNC_REG_BATCH_COMMAND_ADDR 0x08 /* communicate physical address of host->guest batch commands */ +#define SYNC_REG_BATCH_COMMAND_ADDR_HIGH 0x0c /* 64-bit part */ +#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR 0x10 /* communicate physical address of guest->host commands */ +#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH 0x14 /* 64-bit part */ +#define SYNC_REG_INIT 0x18 /* signals that the device has been probed */ + +/* There is an ioctl associated with goldfish sync driver. + * Make it conflict with ioctls that are not likely to be used + * in the emulator. + * + * '@' 00-0F linux/radeonfb.h conflict! + * '@' 00-0F drivers/video/aty/aty128fb.c conflict! + */ +#define GOLDFISH_SYNC_IOC_MAGIC '@' + +#define GOLDFISH_SYNC_IOC_QUEUE_WORK _IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info) + +/* The above definitions (command codes, register layout, ioctl definitions) + * need to be in sync with the following files: + * + * Host-side (emulator): + * external/qemu/android/emulation/goldfish_sync.h + * external/qemu-android/hw/misc/goldfish_sync.c + * + * Guest-side (system image): + * device/generic/goldfish-opengl/system/egl/goldfish_sync.h + * device/generic/goldfish/ueventd.ranchu.rc + * platform/build/target/board/generic/sepolicy/file_contexts + */ +struct goldfish_sync_hostcmd { + /* sorted for alignment */ + uint64_t handle; + uint64_t hostcmd_handle; + uint32_t cmd; + uint32_t time_arg; +}; + +struct goldfish_sync_guestcmd { + uint64_t host_command; /* uint64_t for alignment */ + uint64_t glsync_handle; + uint64_t thread_handle; + uint64_t guest_timeline_handle; +}; + +#define GOLDFISH_SYNC_MAX_CMDS 32 + +struct goldfish_sync_state { + char __iomem *reg_base; + int irq; + + /* Spinlock protects |to_do| / |to_do_end|. */ + spinlock_t lock; + /* |mutex_lock| protects all concurrent access + * to timelines for both kernel and user space. */ + struct mutex mutex_lock; + + /* Buffer holding commands issued from host. */ + struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS]; + uint32_t to_do_end; + + /* Addresses for the reading or writing + * of individual commands. The host can directly write + * to |batch_hostcmd| (and then this driver immediately + * copies contents to |to_do|). This driver either replies + * through |batch_hostcmd| or simply issues a + * guest->host command through |batch_guestcmd|. + */ + struct goldfish_sync_hostcmd *batch_hostcmd; + struct goldfish_sync_guestcmd *batch_guestcmd; + + /* Used to give this struct itself to a work queue + * function for executing actual sync commands. */ + struct work_struct work_item; +}; + +static struct goldfish_sync_state global_sync_state[1]; + +struct goldfish_sync_timeline_obj { + struct goldfish_sync_timeline *sync_tl; + uint32_t current_time; + /* We need to be careful about when we deallocate + * this |goldfish_sync_timeline_obj| struct. + * In order to ensure proper cleanup, we need to + * consider the triggered host-side wait that may + * still be in flight when the guest close()'s a + * goldfish_sync device's sync context fd (and + * destroys the |sync_tl| field above). + * The host-side wait may raise IRQ + * and tell the kernel to increment the timeline _after_ + * the |sync_tl| has already been set to null. + * + * From observations on OpenGL apps and CTS tests, this + * happens at some very low probability upon context + * destruction or process close, but it does happen + * and it needs to be handled properly. Otherwise, + * if we clean up the surrounding |goldfish_sync_timeline_obj| + * too early, any |handle| field of any host->guest command + * might not even point to a null |sync_tl| field, + * but to garbage memory or even a reclaimed |sync_tl|. + * If we do not count such "pending waits" and kfree the object + * immediately upon |goldfish_sync_timeline_destroy|, + * we might get mysterous RCU stalls after running a long + * time because the garbage memory that is being read + * happens to be interpretable as a |spinlock_t| struct + * that is currently in the locked state. + * + * To track when to free the |goldfish_sync_timeline_obj| + * itself, we maintain a kref. + * The kref essentially counts the timeline itself plus + * the number of waits in flight. kref_init/kref_put + * are issued on + * |goldfish_sync_timeline_create|/|goldfish_sync_timeline_destroy| + * and kref_get/kref_put are issued on + * |goldfish_sync_fence_create|/|goldfish_sync_timeline_inc|. + * + * The timeline is destroyed after reference count + * reaches zero, which would happen after + * |goldfish_sync_timeline_destroy| and all pending + * |goldfish_sync_timeline_inc|'s are fulfilled. + * + * NOTE (1): We assume that |fence_create| and + * |timeline_inc| calls are 1:1, otherwise the kref scheme + * will not work. This is a valid assumption as long + * as the host-side virtual device implementation + * does not insert any timeline increments + * that we did not trigger from here. + * + * NOTE (2): The use of kref by itself requires no locks, + * but this does not mean everything works without locks. + * Related timeline operations do require a lock of some sort, + * or at least are not proven to work without it. + * In particualr, we assume that all the operations + * done on the |kref| field above are done in contexts where + * |global_sync_state->mutex_lock| is held. Do not + * remove that lock until everything is proven to work + * without it!!! */ + struct kref kref; +}; + +/* We will call |delete_timeline_obj| when the last reference count + * of the kref is decremented. This deletes the sync + * timeline object along with the wrapper itself. */ +static void delete_timeline_obj(struct kref* kref) { + struct goldfish_sync_timeline_obj* obj = + container_of(kref, struct goldfish_sync_timeline_obj, kref); + + goldfish_sync_timeline_put_internal(obj->sync_tl); + obj->sync_tl = NULL; + kfree(obj); +} + +static uint64_t gensym_ctr; +static void gensym(char *dst) +{ + sprintf(dst, "goldfish_sync:gensym:%llu", gensym_ctr); + gensym_ctr++; +} + +/* |goldfish_sync_timeline_create| assumes that |global_sync_state->mutex_lock| + * is held. */ +static struct goldfish_sync_timeline_obj* +goldfish_sync_timeline_create(void) +{ + + char timeline_name[256]; + struct goldfish_sync_timeline *res_sync_tl = NULL; + struct goldfish_sync_timeline_obj *res; + + DTRACE(); + + gensym(timeline_name); + + res_sync_tl = goldfish_sync_timeline_create_internal(timeline_name); + if (!res_sync_tl) { + ERR("Failed to create goldfish_sw_sync timeline."); + return NULL; + } + + res = kzalloc(sizeof(struct goldfish_sync_timeline_obj), GFP_KERNEL); + res->sync_tl = res_sync_tl; + res->current_time = 0; + kref_init(&res->kref); + + DPRINT("new timeline_obj=0x%p", res); + return res; +} + +/* |goldfish_sync_fence_create| assumes that |global_sync_state->mutex_lock| + * is held. */ +static int +goldfish_sync_fence_create(struct goldfish_sync_timeline_obj *obj, + uint32_t val) +{ + + int fd; + char fence_name[256]; + struct sync_pt *syncpt = NULL; + struct sync_file *sync_file_obj = NULL; + struct goldfish_sync_timeline *tl; + + DTRACE(); + + if (!obj) return -1; + + tl = obj->sync_tl; + + syncpt = goldfish_sync_pt_create_internal( + tl, sizeof(struct sync_pt) + 4, val); + if (!syncpt) { + ERR("could not create sync point! " + "goldfish_sync_timeline=0x%p val=%d", + tl, val); + return -1; + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + ERR("could not get unused fd for sync fence. " + "errno=%d", fd); + goto err_cleanup_pt; + } + + gensym(fence_name); + + sync_file_obj = sync_file_create(&syncpt->base); + if (!sync_file_obj) { + ERR("could not create sync fence! " + "goldfish_sync_timeline=0x%p val=%d sync_pt=0x%p", + tl, val, syncpt); + goto err_cleanup_fd_pt; + } + + DPRINT("installing sync fence into fd %d sync_file_obj=0x%p", + fd, sync_file_obj); + fd_install(fd, sync_file_obj->file); + kref_get(&obj->kref); + + return fd; + +err_cleanup_fd_pt: + put_unused_fd(fd); +err_cleanup_pt: + dma_fence_put(&syncpt->base); + return -1; +} + +/* |goldfish_sync_timeline_inc| assumes that |global_sync_state->mutex_lock| + * is held. */ +static void +goldfish_sync_timeline_inc(struct goldfish_sync_timeline_obj *obj, uint32_t inc) +{ + DTRACE(); + /* Just give up if someone else nuked the timeline. + * Whoever it was won't care that it doesn't get signaled. */ + if (!obj) return; + + DPRINT("timeline_obj=0x%p", obj); + goldfish_sync_timeline_signal_internal(obj->sync_tl, inc); + DPRINT("incremented timeline. increment max_time"); + obj->current_time += inc; + + /* Here, we will end up deleting the timeline object if it + * turns out that this call was a pending increment after + * |goldfish_sync_timeline_destroy| was called. */ + kref_put(&obj->kref, delete_timeline_obj); + DPRINT("done"); +} + +/* |goldfish_sync_timeline_destroy| assumes + * that |global_sync_state->mutex_lock| is held. */ +static void +goldfish_sync_timeline_destroy(struct goldfish_sync_timeline_obj *obj) +{ + DTRACE(); + /* See description of |goldfish_sync_timeline_obj| for why we + * should not immediately destroy |obj| */ + kref_put(&obj->kref, delete_timeline_obj); +} + +static inline void +goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t handle, + uint32_t time_arg, + uint64_t hostcmd_handle) +{ + struct goldfish_sync_hostcmd *to_add; + + DTRACE(); + + BUG_ON(sync_state->to_do_end == GOLDFISH_SYNC_MAX_CMDS); + + to_add = &sync_state->to_do[sync_state->to_do_end]; + + to_add->cmd = cmd; + to_add->handle = handle; + to_add->time_arg = time_arg; + to_add->hostcmd_handle = hostcmd_handle; + + sync_state->to_do_end += 1; +} + +static inline void +goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t handle, + uint32_t time_arg, + uint64_t hostcmd_handle) +{ + unsigned long irq_flags; + struct goldfish_sync_hostcmd *batch_hostcmd = + sync_state->batch_hostcmd; + + DTRACE(); + + spin_lock_irqsave(&sync_state->lock, irq_flags); + + batch_hostcmd->cmd = cmd; + batch_hostcmd->handle = handle; + batch_hostcmd->time_arg = time_arg; + batch_hostcmd->hostcmd_handle = hostcmd_handle; + writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND); + + spin_unlock_irqrestore(&sync_state->lock, irq_flags); +} + +static inline void +goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t glsync_handle, + uint64_t thread_handle, + uint64_t timeline_handle) +{ + unsigned long irq_flags; + struct goldfish_sync_guestcmd *batch_guestcmd = + sync_state->batch_guestcmd; + + DTRACE(); + + spin_lock_irqsave(&sync_state->lock, irq_flags); + + batch_guestcmd->host_command = (uint64_t)cmd; + batch_guestcmd->glsync_handle = (uint64_t)glsync_handle; + batch_guestcmd->thread_handle = (uint64_t)thread_handle; + batch_guestcmd->guest_timeline_handle = (uint64_t)timeline_handle; + writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND); + + spin_unlock_irqrestore(&sync_state->lock, irq_flags); +} + +/* |goldfish_sync_interrupt| handles IRQ raises from the virtual device. + * In the context of OpenGL, this interrupt will fire whenever we need + * to signal a fence fd in the guest, with the command + * |CMD_SYNC_TIMELINE_INC|. + * However, because this function will be called in an interrupt context, + * it is necessary to do the actual work of signaling off of interrupt context. + * The shared work queue is used for this purpose. At the end when + * all pending commands are intercepted by the interrupt handler, + * we call |schedule_work|, which will later run the actual + * desired sync command in |goldfish_sync_work_item_fn|. + */ +static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id) +{ + + struct goldfish_sync_state *sync_state = dev_id; + + uint32_t nextcmd; + uint32_t command_r; + uint64_t handle_rw; + uint32_t time_r; + uint64_t hostcmd_handle_rw; + + int count = 0; + + DTRACE(); + + sync_state = dev_id; + + spin_lock(&sync_state->lock); + + for (;;) { + + readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND); + nextcmd = sync_state->batch_hostcmd->cmd; + + if (nextcmd == 0) + break; + + command_r = nextcmd; + handle_rw = sync_state->batch_hostcmd->handle; + time_r = sync_state->batch_hostcmd->time_arg; + hostcmd_handle_rw = sync_state->batch_hostcmd->hostcmd_handle; + + goldfish_sync_cmd_queue( + sync_state, + command_r, + handle_rw, + time_r, + hostcmd_handle_rw); + + count++; + } + + spin_unlock(&sync_state->lock); + + schedule_work(&sync_state->work_item); + + return (count == 0) ? IRQ_NONE : IRQ_HANDLED; +} + +/* |goldfish_sync_work_item_fn| does the actual work of servicing + * host->guest sync commands. This function is triggered whenever + * the IRQ for the goldfish sync device is raised. Once it starts + * running, it grabs the contents of the buffer containing the + * commands it needs to execute (there may be multiple, because + * our IRQ is active high and not edge triggered), and then + * runs all of them one after the other. + */ +static void goldfish_sync_work_item_fn(struct work_struct *input) +{ + + struct goldfish_sync_state *sync_state; + int sync_fence_fd; + + struct goldfish_sync_timeline_obj *timeline; + uint64_t timeline_ptr; + + uint64_t hostcmd_handle; + + uint32_t cmd; + uint64_t handle; + uint32_t time_arg; + + struct goldfish_sync_hostcmd *todo; + uint32_t todo_end; + + unsigned long irq_flags; + + struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS]; + uint32_t i = 0; + + sync_state = container_of(input, struct goldfish_sync_state, work_item); + + mutex_lock(&sync_state->mutex_lock); + + spin_lock_irqsave(&sync_state->lock, irq_flags); { + + todo_end = sync_state->to_do_end; + + DPRINT("num sync todos: %u", sync_state->to_do_end); + + for (i = 0; i < todo_end; i++) + to_run[i] = sync_state->to_do[i]; + + /* We expect that commands will come in at a slow enough rate + * so that incoming items will not be more than + * GOLDFISH_SYNC_MAX_CMDS. + * + * This is because the way the sync device is used, + * it's only for managing buffer data transfers per frame, + * with a sequential dependency between putting things in + * to_do and taking them out. Once a set of commands is + * queued up in to_do, the user of the device waits for + * them to be processed before queuing additional commands, + * which limits the rate at which commands come in + * to the rate at which we take them out here. + * + * We also don't expect more than MAX_CMDS to be issued + * at once; there is a correspondence between + * which buffers need swapping to the (display / buffer queue) + * to particular commands, and we don't expect there to be + * enough display or buffer queues in operation at once + * to overrun GOLDFISH_SYNC_MAX_CMDS. + */ + sync_state->to_do_end = 0; + + } spin_unlock_irqrestore(&sync_state->lock, irq_flags); + + for (i = 0; i < todo_end; i++) { + DPRINT("todo index: %u", i); + + todo = &to_run[i]; + + cmd = todo->cmd; + + handle = (uint64_t)todo->handle; + time_arg = todo->time_arg; + hostcmd_handle = (uint64_t)todo->hostcmd_handle; + + DTRACE(); + + timeline = (struct goldfish_sync_timeline_obj *)(uintptr_t)handle; + + switch (cmd) { + case CMD_SYNC_READY: + break; + case CMD_CREATE_SYNC_TIMELINE: + DPRINT("exec CMD_CREATE_SYNC_TIMELINE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + timeline = goldfish_sync_timeline_create(); + timeline_ptr = (uintptr_t)timeline; + goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_TIMELINE, + timeline_ptr, + 0, + hostcmd_handle); + DPRINT("sync timeline created: %p", timeline); + break; + case CMD_CREATE_SYNC_FENCE: + DPRINT("exec CMD_CREATE_SYNC_FENCE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + sync_fence_fd = goldfish_sync_fence_create(timeline, time_arg); + goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_FENCE, + sync_fence_fd, + 0, + hostcmd_handle); + break; + case CMD_SYNC_TIMELINE_INC: + DPRINT("exec CMD_SYNC_TIMELINE_INC: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + goldfish_sync_timeline_inc(timeline, time_arg); + break; + case CMD_DESTROY_SYNC_TIMELINE: + DPRINT("exec CMD_DESTROY_SYNC_TIMELINE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + goldfish_sync_timeline_destroy(timeline); + break; + } + DPRINT("Done executing sync command"); + } + mutex_unlock(&sync_state->mutex_lock); +} + +/* Guest-side interface: file operations */ + +/* Goldfish sync context and ioctl info. + * + * When a sync context is created by open()-ing the goldfish sync device, we + * create a sync context (|goldfish_sync_context|). + * + * Currently, the only data required to track is the sync timeline itself + * along with the current time, which are all packed up in the + * |goldfish_sync_timeline_obj| field. We use a |goldfish_sync_context| + * as the filp->private_data. + * + * Next, when a sync context user requests that work be queued and a fence + * fd provided, we use the |goldfish_sync_ioctl_info| struct, which holds + * information about which host handles to touch for this particular + * queue-work operation. We need to know about the host-side sync thread + * and the particular host-side GLsync object. We also possibly write out + * a file descriptor. + */ +struct goldfish_sync_context { + struct goldfish_sync_timeline_obj *timeline; +}; + +struct goldfish_sync_ioctl_info { + uint64_t host_glsync_handle_in; + uint64_t host_syncthread_handle_in; + int fence_fd_out; +}; + +static int goldfish_sync_open(struct inode *inode, struct file *file) +{ + + struct goldfish_sync_context *sync_context; + + DTRACE(); + + mutex_lock(&global_sync_state->mutex_lock); + + sync_context = kzalloc(sizeof(struct goldfish_sync_context), GFP_KERNEL); + + if (sync_context == NULL) { + ERR("Creation of goldfish sync context failed!"); + mutex_unlock(&global_sync_state->mutex_lock); + return -ENOMEM; + } + + sync_context->timeline = NULL; + + file->private_data = sync_context; + + DPRINT("successfully create a sync context @0x%p", sync_context); + + mutex_unlock(&global_sync_state->mutex_lock); + + return 0; +} + +static int goldfish_sync_release(struct inode *inode, struct file *file) +{ + + struct goldfish_sync_context *sync_context; + + DTRACE(); + + mutex_lock(&global_sync_state->mutex_lock); + + sync_context = file->private_data; + + if (sync_context->timeline) + goldfish_sync_timeline_destroy(sync_context->timeline); + + sync_context->timeline = NULL; + + kfree(sync_context); + + mutex_unlock(&global_sync_state->mutex_lock); + + return 0; +} + +/* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync + * and is used in conjunction with eglCreateSyncKHR to queue up the + * actual work of waiting for the EGL sync command to complete, + * possibly returning a fence fd to the guest. + */ +static long goldfish_sync_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct goldfish_sync_context *sync_context_data; + struct goldfish_sync_timeline_obj *timeline; + int fd_out; + struct goldfish_sync_ioctl_info ioctl_data; + + DTRACE(); + + sync_context_data = file->private_data; + fd_out = -1; + + switch (cmd) { + case GOLDFISH_SYNC_IOC_QUEUE_WORK: + + DPRINT("exec GOLDFISH_SYNC_IOC_QUEUE_WORK"); + + mutex_lock(&global_sync_state->mutex_lock); + + if (copy_from_user(&ioctl_data, + (void __user *)arg, + sizeof(ioctl_data))) { + ERR("Failed to copy memory for ioctl_data from user."); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + if (ioctl_data.host_syncthread_handle_in == 0) { + DPRINT("Error: zero host syncthread handle!!!"); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + if (!sync_context_data->timeline) { + DPRINT("no timeline yet, create one."); + sync_context_data->timeline = goldfish_sync_timeline_create(); + DPRINT("timeline: 0x%p", &sync_context_data->timeline); + } + + timeline = sync_context_data->timeline; + fd_out = goldfish_sync_fence_create(timeline, + timeline->current_time + 1); + DPRINT("Created fence with fd %d and current time %u (timeline: 0x%p)", + fd_out, + sync_context_data->timeline->current_time + 1, + sync_context_data->timeline); + + ioctl_data.fence_fd_out = fd_out; + + if (copy_to_user((void __user *)arg, + &ioctl_data, + sizeof(ioctl_data))) { + DPRINT("Error, could not copy to user!!!"); + + sys_close(fd_out); + /* We won't be doing an increment, kref_put immediately. */ + kref_put(&timeline->kref, delete_timeline_obj); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + /* We are now about to trigger a host-side wait; + * accumulate on |pending_waits|. */ + goldfish_sync_send_guestcmd(global_sync_state, + CMD_TRIGGER_HOST_WAIT, + ioctl_data.host_glsync_handle_in, + ioctl_data.host_syncthread_handle_in, + (uint64_t)(uintptr_t)(sync_context_data->timeline)); + + mutex_unlock(&global_sync_state->mutex_lock); + return 0; + default: + return -ENOTTY; + } +} + +static const struct file_operations goldfish_sync_fops = { + .owner = THIS_MODULE, + .open = goldfish_sync_open, + .release = goldfish_sync_release, + .unlocked_ioctl = goldfish_sync_ioctl, + .compat_ioctl = goldfish_sync_ioctl, +}; + +static struct miscdevice goldfish_sync_device = { + .name = "goldfish_sync", + .fops = &goldfish_sync_fops, +}; + + +static bool setup_verify_batch_cmd_addr(struct goldfish_sync_state *sync_state, + void *batch_addr, + uint32_t addr_offset, + uint32_t addr_offset_high) +{ + uint64_t batch_addr_phys; + uint32_t batch_addr_phys_test_lo; + uint32_t batch_addr_phys_test_hi; + + if (!batch_addr) { + ERR("Could not use batch command address!"); + return false; + } + + batch_addr_phys = virt_to_phys(batch_addr); + writel((uint32_t)(batch_addr_phys), + sync_state->reg_base + addr_offset); + writel((uint32_t)(batch_addr_phys >> 32), + sync_state->reg_base + addr_offset_high); + + batch_addr_phys_test_lo = + readl(sync_state->reg_base + addr_offset); + batch_addr_phys_test_hi = + readl(sync_state->reg_base + addr_offset_high); + + if (virt_to_phys(batch_addr) != + (((uint64_t)batch_addr_phys_test_hi << 32) | + batch_addr_phys_test_lo)) { + ERR("Invalid batch command address!"); + return false; + } + + return true; +} + +int goldfish_sync_probe(struct platform_device *pdev) +{ + struct resource *ioresource; + struct goldfish_sync_state *sync_state = global_sync_state; + int status; + + DTRACE(); + + sync_state->to_do_end = 0; + + spin_lock_init(&sync_state->lock); + mutex_init(&sync_state->mutex_lock); + + platform_set_drvdata(pdev, sync_state); + + ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (ioresource == NULL) { + ERR("platform_get_resource failed"); + return -ENODEV; + } + + sync_state->reg_base = + devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE); + if (sync_state->reg_base == NULL) { + ERR("Could not ioremap"); + return -ENOMEM; + } + + sync_state->irq = platform_get_irq(pdev, 0); + if (sync_state->irq < 0) { + ERR("Could not platform_get_irq"); + return -ENODEV; + } + + status = devm_request_irq(&pdev->dev, + sync_state->irq, + goldfish_sync_interrupt, + IRQF_SHARED, + pdev->name, + sync_state); + if (status) { + ERR("request_irq failed"); + return -ENODEV; + } + + INIT_WORK(&sync_state->work_item, + goldfish_sync_work_item_fn); + + misc_register(&goldfish_sync_device); + + /* Obtain addresses for batch send/recv of commands. */ + { + struct goldfish_sync_hostcmd *batch_addr_hostcmd; + struct goldfish_sync_guestcmd *batch_addr_guestcmd; + + batch_addr_hostcmd = + devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_hostcmd), + GFP_KERNEL); + batch_addr_guestcmd = + devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_guestcmd), + GFP_KERNEL); + + if (!setup_verify_batch_cmd_addr(sync_state, + batch_addr_hostcmd, + SYNC_REG_BATCH_COMMAND_ADDR, + SYNC_REG_BATCH_COMMAND_ADDR_HIGH)) { + ERR("goldfish_sync: Could not setup batch command address"); + return -ENODEV; + } + + if (!setup_verify_batch_cmd_addr(sync_state, + batch_addr_guestcmd, + SYNC_REG_BATCH_GUESTCOMMAND_ADDR, + SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH)) { + ERR("goldfish_sync: Could not setup batch guest command address"); + return -ENODEV; + } + + sync_state->batch_hostcmd = batch_addr_hostcmd; + sync_state->batch_guestcmd = batch_addr_guestcmd; + } + + INFO("goldfish_sync: Initialized goldfish sync device"); + + writel(0, sync_state->reg_base + SYNC_REG_INIT); + + return 0; +} + +static int goldfish_sync_remove(struct platform_device *pdev) +{ + struct goldfish_sync_state *sync_state = global_sync_state; + + DTRACE(); + + misc_deregister(&goldfish_sync_device); + memset(sync_state, 0, sizeof(struct goldfish_sync_state)); + return 0; +} + +static const struct of_device_id goldfish_sync_of_match[] = { + { .compatible = "google,goldfish-sync", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_sync_of_match); + +static const struct acpi_device_id goldfish_sync_acpi_match[] = { + { "GFSH0006", 0 }, + { }, +}; + +MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match); + +static struct platform_driver goldfish_sync = { + .probe = goldfish_sync_probe, + .remove = goldfish_sync_remove, + .driver = { + .name = "goldfish_sync", + .of_match_table = goldfish_sync_of_match, + .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match), + } +}; + +module_platform_driver(goldfish_sync); + +MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Android QEMU Sync Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); diff --git a/drivers/staging/goldfish/goldfish_sync_timeline_fence.c b/drivers/staging/goldfish/goldfish_sync_timeline_fence.c new file mode 100644 index 000000000000..5c0c029fadda --- /dev/null +++ b/drivers/staging/goldfish/goldfish_sync_timeline_fence.c @@ -0,0 +1,246 @@ +#include +#include +#include +#include +#include + +#include "goldfish_sync_timeline_fence.h" + +/* + * Timeline-based sync for Goldfish Sync + * Based on "Sync File validation framework" + * (drivers/dma-buf/sw_sync.c) + * + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/** + * struct goldfish_sync_timeline - sync object + * @kref: reference count on fence. + * @name: name of the goldfish_sync_timeline. Useful for debugging + * @child_list_head: list of children sync_pts for this goldfish_sync_timeline + * @child_list_lock: lock protecting @child_list_head and fence.status + * @active_list_head: list of active (unsignaled/errored) sync_pts + */ +struct goldfish_sync_timeline { + struct kref kref; + char name[32]; + + /* protected by child_list_lock */ + u64 context; + int value; + + struct list_head child_list_head; + spinlock_t child_list_lock; + + struct list_head active_list_head; +}; + +static inline struct goldfish_sync_timeline *goldfish_dma_fence_parent(struct dma_fence *fence) +{ + return container_of(fence->lock, struct goldfish_sync_timeline, + child_list_lock); +} + +static const struct dma_fence_ops goldfish_sync_timeline_fence_ops; + +static inline struct sync_pt *goldfish_sync_fence_to_sync_pt(struct dma_fence *fence) +{ + if (fence->ops != &goldfish_sync_timeline_fence_ops) + return NULL; + return container_of(fence, struct sync_pt, base); +} + +/** + * goldfish_sync_timeline_create_internal() - creates a sync object + * @name: sync_timeline name + * + * Creates a new sync_timeline. Returns the sync_timeline object or NULL in + * case of error. + */ +struct goldfish_sync_timeline +*goldfish_sync_timeline_create_internal(const char *name) +{ + struct goldfish_sync_timeline *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + + kref_init(&obj->kref); + obj->context = dma_fence_context_alloc(1); + strlcpy(obj->name, name, sizeof(obj->name)); + + INIT_LIST_HEAD(&obj->child_list_head); + INIT_LIST_HEAD(&obj->active_list_head); + spin_lock_init(&obj->child_list_lock); + + return obj; +} + +static void goldfish_sync_timeline_free_internal(struct kref *kref) +{ + struct goldfish_sync_timeline *obj = + container_of(kref, struct goldfish_sync_timeline, kref); + + kfree(obj); +} + +static void goldfish_sync_timeline_get_internal( + struct goldfish_sync_timeline *obj) +{ + kref_get(&obj->kref); +} + +void goldfish_sync_timeline_put_internal(struct goldfish_sync_timeline *obj) +{ + kref_put(&obj->kref, goldfish_sync_timeline_free_internal); +} + +/** + * goldfish_sync_timeline_signal() - + * signal a status change on a goldfish_sync_timeline + * @obj: sync_timeline to signal + * @inc: num to increment on timeline->value + * + * A sync implementation should call this any time one of it's fences + * has signaled or has an error condition. + */ +void goldfish_sync_timeline_signal_internal(struct goldfish_sync_timeline *obj, + unsigned int inc) +{ + unsigned long flags; + struct sync_pt *pt, *next; + + spin_lock_irqsave(&obj->child_list_lock, flags); + + obj->value += inc; + + list_for_each_entry_safe(pt, next, &obj->active_list_head, + active_list) { + if (dma_fence_is_signaled_locked(&pt->base)) + list_del_init(&pt->active_list); + } + + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +/** + * goldfish_sync_pt_create_internal() - creates a sync pt + * @parent: fence's parent sync_timeline + * @size: size to allocate for this pt + * @inc: value of the fence + * + * Creates a new sync_pt as a child of @parent. @size bytes will be + * allocated allowing for implementation specific data to be kept after + * the generic sync_timeline struct. Returns the sync_pt object or + * NULL in case of error. + */ +struct sync_pt *goldfish_sync_pt_create_internal( + struct goldfish_sync_timeline *obj, int size, + unsigned int value) +{ + unsigned long flags; + struct sync_pt *pt; + + if (size < sizeof(*pt)) + return NULL; + + pt = kzalloc(size, GFP_KERNEL); + if (!pt) + return NULL; + + spin_lock_irqsave(&obj->child_list_lock, flags); + goldfish_sync_timeline_get_internal(obj); + dma_fence_init(&pt->base, &goldfish_sync_timeline_fence_ops, &obj->child_list_lock, + obj->context, value); + list_add_tail(&pt->child_list, &obj->child_list_head); + INIT_LIST_HEAD(&pt->active_list); + spin_unlock_irqrestore(&obj->child_list_lock, flags); + return pt; +} + +static const char *goldfish_sync_timeline_fence_get_driver_name( + struct dma_fence *fence) +{ + return "sw_sync"; +} + +static const char *goldfish_sync_timeline_fence_get_timeline_name( + struct dma_fence *fence) +{ + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + + return parent->name; +} + +static void goldfish_sync_timeline_fence_release(struct dma_fence *fence) +{ + struct sync_pt *pt = goldfish_sync_fence_to_sync_pt(fence); + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + unsigned long flags; + + spin_lock_irqsave(fence->lock, flags); + list_del(&pt->child_list); + if (!list_empty(&pt->active_list)) + list_del(&pt->active_list); + spin_unlock_irqrestore(fence->lock, flags); + + goldfish_sync_timeline_put_internal(parent); + dma_fence_free(fence); +} + +static bool goldfish_sync_timeline_fence_signaled(struct dma_fence *fence) +{ + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + + return (fence->seqno > parent->value) ? false : true; +} + +static bool goldfish_sync_timeline_fence_enable_signaling(struct dma_fence *fence) +{ + struct sync_pt *pt = goldfish_sync_fence_to_sync_pt(fence); + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + + if (goldfish_sync_timeline_fence_signaled(fence)) + return false; + + list_add_tail(&pt->active_list, &parent->active_list_head); + return true; +} + +static void goldfish_sync_timeline_fence_value_str(struct dma_fence *fence, + char *str, int size) +{ + snprintf(str, size, "%d", fence->seqno); +} + +static void goldfish_sync_timeline_fence_timeline_value_str( + struct dma_fence *fence, + char *str, int size) +{ + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + + snprintf(str, size, "%d", parent->value); +} + +static const struct dma_fence_ops goldfish_sync_timeline_fence_ops = { + .get_driver_name = goldfish_sync_timeline_fence_get_driver_name, + .get_timeline_name = goldfish_sync_timeline_fence_get_timeline_name, + .enable_signaling = goldfish_sync_timeline_fence_enable_signaling, + .signaled = goldfish_sync_timeline_fence_signaled, + .wait = dma_fence_default_wait, + .release = goldfish_sync_timeline_fence_release, + .fence_value_str = goldfish_sync_timeline_fence_value_str, + .timeline_value_str = goldfish_sync_timeline_fence_timeline_value_str, +}; diff --git a/drivers/staging/goldfish/goldfish_sync_timeline_fence.h b/drivers/staging/goldfish/goldfish_sync_timeline_fence.h new file mode 100644 index 000000000000..638c6fb68f1e --- /dev/null +++ b/drivers/staging/goldfish/goldfish_sync_timeline_fence.h @@ -0,0 +1,58 @@ +#include +#include + +/** + * struct sync_pt - sync_pt object + * @base: base dma_fence object + * @child_list: sync timeline child's list + * @active_list: sync timeline active child's list + */ +struct sync_pt { + struct dma_fence base; + struct list_head child_list; + struct list_head active_list; +}; + +/** + * goldfish_sync_timeline_create_internal() - creates a sync object + * @name: goldfish_sync_timeline name + * + * Creates a new goldfish_sync_timeline. + * Returns the goldfish_sync_timeline object or NULL in case of error. + */ +struct goldfish_sync_timeline +*goldfish_sync_timeline_create_internal(const char *name); + +/** + * goldfish_sync_pt_create_internal() - creates a sync pt + * @parent: fence's parent goldfish_sync_timeline + * @size: size to allocate for this pt + * @inc: value of the fence + * + * Creates a new sync_pt as a child of @parent. @size bytes will be + * allocated allowing for implementation specific data to be kept after + * the generic sync_timeline struct. Returns the sync_pt object or + * NULL in case of error. + */ +struct sync_pt +*goldfish_sync_pt_create_internal(struct goldfish_sync_timeline *obj, + int size, unsigned int value); + +/** + * goldfish_sync_timeline_signal_internal() - + * signal a status change on a sync_timeline + * @obj: goldfish_sync_timeline to signal + * @inc: num to increment on timeline->value + * + * A sync implementation should call this any time one of it's fences + * has signaled or has an error condition. + */ +void goldfish_sync_timeline_signal_internal(struct goldfish_sync_timeline *obj, + unsigned int inc); + +/** + * goldfish_sync_timeline_put_internal() - dec refcount of a sync_timeline + * and clean up memory if it was the last ref. + * @obj: goldfish_sync_timeline to decref + */ +void goldfish_sync_timeline_put_internal(struct goldfish_sync_timeline *obj); diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 3f4148c92308..0f538b8c3a07 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -925,6 +925,8 @@ static void __gb_lights_led_unregister(struct gb_channel *channel) return; led_classdev_unregister(cdev); + kfree(cdev->name); + cdev->name = NULL; channel->led = NULL; } diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c index 08e255884206..93e86798ec1c 100644 --- a/drivers/staging/greybus/loopback.c +++ b/drivers/staging/greybus/loopback.c @@ -1042,8 +1042,10 @@ static int gb_loopback_fn(void *data) else if (type == GB_LOOPBACK_TYPE_SINK) error = gb_loopback_async_sink(gb, size); - if (error) + if (error) { gb->error++; + gb->iteration_count++; + } } else { /* We are effectively single threaded here */ if (type == GB_LOOPBACK_TYPE_PING) diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c index e97b19148497..1e7321a1404c 100644 --- a/drivers/staging/greybus/spilib.c +++ b/drivers/staging/greybus/spilib.c @@ -544,11 +544,14 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, return 0; -exit_spi_unregister: - spi_unregister_master(master); exit_spi_put: spi_master_put(master); + return ret; + +exit_spi_unregister: + spi_unregister_master(master); + return ret; } EXPORT_SYMBOL_GPL(gb_spilib_master_init); @@ -558,7 +561,6 @@ void gb_spilib_master_exit(struct gb_connection *connection) struct spi_master *master = gb_connection_get_data(connection); spi_unregister_master(master); - spi_master_put(master); } EXPORT_SYMBOL_GPL(gb_spilib_master_exit); diff --git a/drivers/staging/greybus/tools/Android.mk b/drivers/staging/greybus/tools/Android.mk deleted file mode 100644 index fdadbf611757..000000000000 --- a/drivers/staging/greybus/tools/Android.mk +++ /dev/null @@ -1,10 +0,0 @@ -LOCAL_PATH:= $(call my-dir) - -include $(CLEAR_VARS) - -LOCAL_SRC_FILES:= loopback_test.c -LOCAL_MODULE_TAGS := optional -LOCAL_MODULE := gb_loopback_test - -include $(BUILD_EXECUTABLE) - diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index 6150d2780e22..31a195d1bf05 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -141,6 +141,8 @@ #define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */ #define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */ +#define AD7192_EXT_FREQ_MHZ_MIN 2457600 +#define AD7192_EXT_FREQ_MHZ_MAX 5120000 #define AD7192_INT_FREQ_MHZ 4915200 /* NOTE: @@ -217,6 +219,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st) ARRAY_SIZE(ad7192_calib_arr)); } +static inline bool ad7192_valid_external_frequency(u32 freq) +{ + return (freq >= AD7192_EXT_FREQ_MHZ_MIN && + freq <= AD7192_EXT_FREQ_MHZ_MAX); +} + static int ad7192_setup(struct ad7192_state *st, const struct ad7192_platform_data *pdata) { @@ -242,17 +250,20 @@ static int ad7192_setup(struct ad7192_state *st, id); switch (pdata->clock_source_sel) { - case AD7192_CLK_EXT_MCLK1_2: - case AD7192_CLK_EXT_MCLK2: - st->mclk = AD7192_INT_FREQ_MHZ; - break; case AD7192_CLK_INT: case AD7192_CLK_INT_CO: - if (pdata->ext_clk_hz) - st->mclk = pdata->ext_clk_hz; - else - st->mclk = AD7192_INT_FREQ_MHZ; + st->mclk = AD7192_INT_FREQ_MHZ; break; + case AD7192_CLK_EXT_MCLK1_2: + case AD7192_CLK_EXT_MCLK2: + if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) { + st->mclk = pdata->ext_clk_hz; + break; + } + dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n", + pdata->ext_clk_hz); + ret = -EINVAL; + goto out; default: ret = -EINVAL; goto out; diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 3d539eeb0e26..6d31001d1825 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -649,8 +649,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev) /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &ad5933_ring_setup_ops; - indio_dev->modes |= INDIO_BUFFER_HARDWARE; - return 0; } @@ -763,7 +761,7 @@ static int ad5933_probe(struct i2c_client *client, indio_dev->dev.parent = &client->dev; indio_dev->info = &ad5933_info; indio_dev->name = id->name; - indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE); indio_dev->channels = ad5933_channels; indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 64763aacda57..284cdd44a2ee 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -825,14 +825,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm return conn; failed_2: - kiblnd_destroy_conn(conn, true); + kiblnd_destroy_conn(conn); + LIBCFS_FREE(conn, sizeof(*conn)); failed_1: LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); failed_0: return NULL; } -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) +void kiblnd_destroy_conn(struct kib_conn *conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; struct kib_peer *peer = conn->ibc_peer; @@ -895,8 +896,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) rdma_destroy_id(cmid); atomic_dec(&net->ibn_nconns); } - - LIBCFS_FREE(conn, sizeof(*conn)); } int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why) diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index a1e994a1cc84..98a5e2c21a83 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -1015,7 +1015,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why); struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid, int state, int version); -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn); +void kiblnd_destroy_conn(struct kib_conn *conn); void kiblnd_close_conn(struct kib_conn *conn, int error); void kiblnd_close_conn_locked(struct kib_conn *conn, int error); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 8fc191d99927..29e10021b906 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -3313,11 +3313,13 @@ kiblnd_connd(void *arg) spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - kiblnd_destroy_conn(conn, !peer); + kiblnd_destroy_conn(conn); spin_lock_irqsave(lock, flags); - if (!peer) + if (!peer) { + kfree(conn); continue; + } conn->ibc_peer = peer; if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c index 2da051c0d251..a4bb93b440a5 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c @@ -528,19 +528,20 @@ EXPORT_SYMBOL(cfs_cpt_spread_node); int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) { - int cpu = smp_processor_id(); - int cpt = cptab->ctb_cpu2cpt[cpu]; + int cpu; + int cpt; - if (cpt < 0) { - if (!remap) - return cpt; + preempt_disable(); + cpu = smp_processor_id(); + cpt = cptab->ctb_cpu2cpt[cpu]; + if (cpt < 0 && remap) { /* don't return negative value for safety of upper layer, * instead we shadow the unknown cpu to a valid partition ID */ cpt = cpu % cptab->ctb_nparts; } - + preempt_enable(); return cpt; } EXPORT_SYMBOL(cfs_cpt_current); diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c index db0572733712..ab30a0f5129c 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c @@ -119,6 +119,7 @@ static struct shash_alg alg = { .cra_name = "adler32", .cra_driver_name = "adler32-zlib", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index cd7a5391a574..0a3f832095ea 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -847,7 +847,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req) if (req->rq_pool || !req->rq_reqbuf) return; - kfree(req->rq_reqbuf); + kvfree(req->rq_reqbuf); req->rq_reqbuf = NULL; req->rq_reqbuf_len = 0; } diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index 32a483769975..fa611455109a 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -754,7 +754,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf) } /* setting only at first time */ - if (!(pmlmepriv->cur_network.join_res)) { + if (pmlmepriv->cur_network.join_res != true) { /* WEP Key will be set before this function, do not * clear CAM. */ diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 9461bce883ea..be8542676adf 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -333,7 +333,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter) else RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid)); - pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (!pcmd) { res = _FAIL; goto exit; @@ -508,7 +508,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu if (enqueue) { /* need enqueue, prepare cmd_obj and enqueue */ - cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); + cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC); if (!cmdobj) { res = _FAIL; kfree(param); diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c index f663e6c41f8a..f6d71587b803 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c @@ -106,10 +106,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) { - rtw_free_mlme_priv_ie_data(pmlmepriv); - - if (pmlmepriv) + if (pmlmepriv) { + rtw_free_mlme_priv_ie_data(pmlmepriv); vfree(pmlmepriv->free_bss_buf); + } } struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv) diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index 3fd5f4102b36..afb9dadc1cfe 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -259,10 +259,12 @@ static int recvframe_chkmic(struct adapter *adapter, } /* icv_len included the mic code */ - datalen = precvframe->pkt->len-prxattrib->hdrlen - 8; + datalen = precvframe->pkt->len-prxattrib->hdrlen - + prxattrib->iv_len-prxattrib->icv_len-8; pframe = precvframe->pkt->data; - payload = pframe+prxattrib->hdrlen; + payload = pframe+prxattrib->hdrlen+prxattrib->iv_len; + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len)); rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], (unsigned char)prxattrib->priority); /* care the length of the data */ @@ -407,15 +409,9 @@ static struct recv_frame *decryptor(struct adapter *padapter, default: break; } - if (res != _FAIL) { - memmove(precv_frame->pkt->data + precv_frame->attrib.iv_len, precv_frame->pkt->data, precv_frame->attrib.hdrlen); - skb_pull(precv_frame->pkt, precv_frame->attrib.iv_len); - skb_trim(precv_frame->pkt, precv_frame->pkt->len - precv_frame->attrib.icv_len); - } } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 && - (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) { - psecuritypriv->hw_decrypted = true; - } + (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) + psecuritypriv->hw_decrypted = true; if (res == _FAIL) { rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue); @@ -456,7 +452,7 @@ static struct recv_frame *portctrl(struct adapter *adapter, if (auth_alg == 2) { /* get ether_type */ - ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE; + ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len; memcpy(&be_tmp, ptr, 2); ether_type = ntohs(be_tmp); @@ -1138,8 +1134,6 @@ static int validate_recv_data_frame(struct adapter *adapter, } if (pattrib->privacy) { - struct sk_buff *skb = precv_frame->pkt; - RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy)); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra))); @@ -1148,13 +1142,6 @@ static int validate_recv_data_frame(struct adapter *adapter, RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt)); SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt); - - if (pattrib->bdecrypted == 1 && pattrib->encrypt > 0) { - memmove(skb->data + pattrib->iv_len, - skb->data, pattrib->hdrlen); - skb_pull(skb, pattrib->iv_len); - skb_trim(skb, skb->len - pattrib->icv_len); - } } else { pattrib->encrypt = 0; pattrib->iv_len = 0; @@ -1274,7 +1261,6 @@ static int validate_recv_frame(struct adapter *adapter, * Hence forward the frame to the monitor anyway to preserve the order * in which frames were received. */ - rtl88eu_mon_recv_hook(adapter->pmondev, precv_frame); exit: @@ -1296,8 +1282,11 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe) u8 *ptr = precvframe->pkt->data; struct rx_pkt_attrib *pattrib = &precvframe->attrib; - psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen); - psnap_type = ptr+pattrib->hdrlen + SNAP_SIZE; + if (pattrib->encrypt) + skb_trim(precvframe->pkt, precvframe->pkt->len - pattrib->icv_len); + + psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len); + psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE; /* convert hdr + possible LLC headers into Ethernet header */ if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) && (!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) && @@ -1310,9 +1299,12 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe) bsnaphdr = false; } - rmv_len = pattrib->hdrlen + (bsnaphdr ? SNAP_SIZE : 0); + rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0); len = precvframe->pkt->len - rmv_len; + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, + ("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len)); + memcpy(&be_tmp, ptr+rmv_len, 2); eth_type = ntohs(be_tmp); /* pattrib->ether_type */ pattrib->eth_type = eth_type; @@ -1337,6 +1329,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, struct __queue *defrag_q) { struct list_head *plist, *phead; + u8 wlanhdr_offset; u8 curfragnum; struct recv_frame *pfhdr, *pnfhdr; struct recv_frame *prframe, *pnextrframe; @@ -1385,7 +1378,12 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, /* copy the 2nd~n fragment frame's payload to the first fragment */ /* get the 2nd~last fragment frame's payload */ - skb_pull(pnextrframe->pkt, pnfhdr->attrib.hdrlen); + wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len; + + skb_pull(pnextrframe->pkt, wlanhdr_offset); + + /* append to first fragment frame's tail (if privacy frame, pull the ICV) */ + skb_trim(prframe->pkt, prframe->pkt->len - pfhdr->attrib.icv_len); /* memcpy */ memcpy(skb_tail_pointer(pfhdr->pkt), pnfhdr->pkt->data, @@ -1393,7 +1391,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, skb_put(prframe->pkt, pnfhdr->pkt->len); - pfhdr->attrib.icv_len = 0; + pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len; plist = plist->next; } @@ -1519,6 +1517,11 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) nr_subframes = 0; pattrib = &prframe->attrib; + skb_pull(prframe->pkt, prframe->attrib.hdrlen); + + if (prframe->attrib.iv_len > 0) + skb_pull(prframe->pkt, prframe->attrib.iv_len); + a_len = prframe->pkt->len; pdata = prframe->pkt->data; @@ -1887,6 +1890,24 @@ static int process_recv_indicatepkts(struct adapter *padapter, return retval; } +static int recv_func_prehandle(struct adapter *padapter, + struct recv_frame *rframe) +{ + int ret = _SUCCESS; + struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; + + /* check the frame crtl field and decache */ + ret = validate_recv_frame(padapter, rframe); + if (ret != _SUCCESS) { + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n")); + rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */ + goto exit; + } + +exit: + return ret; +} + static int recv_func_posthandle(struct adapter *padapter, struct recv_frame *prframe) { @@ -1939,7 +1960,6 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe) struct rx_pkt_attrib *prxattrib = &rframe->attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; struct mlme_priv *mlmepriv = &padapter->mlmepriv; - struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; /* check if need to handle uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) { @@ -1951,12 +1971,9 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe) } } - /* check the frame crtl field and decache */ - ret = validate_recv_frame(padapter, rframe); - if (ret != _SUCCESS) { - RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n")); - rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */ - } else { + ret = recv_func_prehandle(padapter, rframe); + + if (ret == _SUCCESS) { /* check if need to enqueue into uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 && diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index c0664dc80bf2..446310775e90 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev, if ((check_fwstate(pmlmepriv, _FW_LINKED)) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) { len = pcur_bss->Ssid.SsidLength; - - wrqu->essid.length = len; - memcpy(extra, pcur_bss->Ssid.Ssid, len); - - wrqu->essid.flags = 1; } else { - ret = -1; - goto exit; + len = 0; + *extra = 0; } - -exit: - + wrqu->essid.length = len; + wrqu->essid.flags = 1; return ret; } diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index 37fd52d7364f..225c23fc69dc 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c @@ -66,34 +66,6 @@ static void mon_recv_decrypted(struct net_device *dev, const u8 *data, netif_rx(skb); } -static void mon_recv_decrypted_recv(struct net_device *dev, const u8 *data, - int data_len) -{ - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - int hdr_len; - - skb = netdev_alloc_skb(dev, data_len); - if (!skb) - return; - memcpy(skb_put(skb, data_len), data, data_len); - - /* - * Frame data is not encrypted. Strip off protection so - * userspace doesn't think that it is. - */ - - hdr = (struct ieee80211_hdr *)skb->data; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - - if (ieee80211_has_protected(hdr->frame_control)) - hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED); - - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); -} - static void mon_recv_encrypted(struct net_device *dev, const u8 *data, int data_len) { @@ -110,6 +82,7 @@ static void mon_recv_encrypted(struct net_device *dev, const u8 *data, void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame) { struct rx_pkt_attrib *attr; + int iv_len, icv_len; int data_len; u8 *data; @@ -122,8 +95,11 @@ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame) data = frame->pkt->data; data_len = frame->pkt->len; + /* Broadcast and multicast frames don't have attr->{iv,icv}_len set */ + SET_ICE_IV_LEN(iv_len, icv_len, attr->encrypt); + if (attr->bdecrypted) - mon_recv_decrypted_recv(dev, data, data_len); + mon_recv_decrypted(dev, data, data_len, iv_len, icv_len); else mon_recv_encrypted(dev, data, data_len); } diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.c b/drivers/staging/rtlwifi/phydm/phydm_dig.c index 31a4f3fcad19..c88b9788363a 100644 --- a/drivers/staging/rtlwifi/phydm/phydm_dig.c +++ b/drivers/staging/rtlwifi/phydm/phydm_dig.c @@ -490,6 +490,8 @@ void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type, break; } + /* pin max_level to be >= 0 */ + max_level = max_t(s8, 0, max_level); /* write IGI of lower level */ odm_write_dig(dm, dig_tab->pause_dig_value[max_level]); ODM_RT_TRACE(dm, ODM_COMP_DIG, diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c index 8e24da16752c..acabb2470d55 100644 --- a/drivers/staging/rtlwifi/rtl8822be/fw.c +++ b/drivers/staging/rtlwifi/rtl8822be/fw.c @@ -419,7 +419,7 @@ static bool _rtl8822be_send_bcn_or_cmd_packet(struct ieee80211_hw *hw, dma_addr = rtlpriv->cfg->ops->get_desc( hw, (u8 *)pbd_desc, true, HW_DESC_TXBUFF_ADDR); - pci_unmap_single(rtlpci->pdev, dma_addr, skb->len, + pci_unmap_single(rtlpci->pdev, dma_addr, pskb->len, PCI_DMA_TODEVICE); kfree_skb(pskb); @@ -464,6 +464,8 @@ bool rtl8822b_halmac_cb_write_data_rsvd_page(struct rtl_priv *rtlpriv, u8 *buf, int count; skb = dev_alloc_skb(size); + if (!skb) + return false; memcpy((u8 *)skb_put(skb, size), buf, size); if (!_rtl8822be_send_bcn_or_cmd_packet(rtlpriv->hw, skb, BEACON_QUEUE)) diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h index 09c223f815de..aee82fcaf669 100644 --- a/drivers/staging/sm750fb/ddk750_chip.h +++ b/drivers/staging/sm750fb/ddk750_chip.h @@ -18,7 +18,7 @@ static inline u32 peek32(u32 addr) return readl(addr + mmio750); } -static inline void poke32(u32 data, u32 addr) +static inline void poke32(u32 addr, u32 data) { writel(data, addr + mmio750); } diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/staging/typec/fusb302/fusb302.c index fc6a3cf74eb3..7d9f25db1add 100644 --- a/drivers/staging/typec/fusb302/fusb302.c +++ b/drivers/staging/typec/fusb302/fusb302.c @@ -1552,6 +1552,21 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip, fusb302_log(chip, "PD message header: %x", msg->header); fusb302_log(chip, "PD message len: %d", len); + /* + * Check if we've read off a GoodCRC message. If so then indicate to + * TCPM that the previous transmission has completed. Otherwise we pass + * the received message over to TCPM for processing. + * + * We make this check here instead of basing the reporting decision on + * the IRQ event type, as it's possible for the chip to report the + * TX_SUCCESS and GCRCSENT events out of order on occasion, so we need + * to check the message type to ensure correct reporting to TCPM. + */ + if ((!len) && (pd_header_type_le(msg->header) == PD_CTRL_GOOD_CRC)) + tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS); + else + tcpm_pd_receive(chip->tcpm_port, msg); + return ret; } @@ -1659,13 +1674,12 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id) if (interrupta & FUSB_REG_INTERRUPTA_TX_SUCCESS) { fusb302_log(chip, "IRQ: PD tx success"); - /* read out the received good CRC */ ret = fusb302_pd_read_message(chip, &pd_msg); if (ret < 0) { - fusb302_log(chip, "cannot read in GCRC, ret=%d", ret); + fusb302_log(chip, + "cannot read in PD message, ret=%d", ret); goto done; } - tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS); } if (interrupta & FUSB_REG_INTERRUPTA_HARDRESET) { @@ -1686,7 +1700,6 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id) "cannot read in PD message, ret=%d", ret); goto done; } - tcpm_pd_receive(chip->tcpm_port, &pd_msg); } done: mutex_unlock(&chip->lock); diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h index 4b9302703b36..eeac4f0cb2c6 100644 --- a/drivers/staging/vboxvideo/vbox_drv.h +++ b/drivers/staging/vboxvideo/vbox_drv.h @@ -137,8 +137,8 @@ struct vbox_connector { char name[32]; struct vbox_crtc *vbox_crtc; struct { - u16 width; - u16 height; + u32 width; + u32 height; bool disconnected; } mode_hint; }; @@ -150,8 +150,8 @@ struct vbox_crtc { unsigned int crtc_id; u32 fb_offset; bool cursor_enabled; - u16 x_hint; - u16 y_hint; + u32 x_hint; + u32 y_hint; }; struct vbox_encoder { diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c index 3ca8bec62ac4..74abdf02d9fd 100644 --- a/drivers/staging/vboxvideo/vbox_irq.c +++ b/drivers/staging/vboxvideo/vbox_irq.c @@ -150,8 +150,8 @@ static void vbox_update_mode_hints(struct vbox_private *vbox) disconnected = !(hints->enabled); crtc_id = vbox_conn->vbox_crtc->crtc_id; - vbox_conn->mode_hint.width = hints->cx & 0x8fff; - vbox_conn->mode_hint.height = hints->cy & 0x8fff; + vbox_conn->mode_hint.width = hints->cx; + vbox_conn->mode_hint.height = hints->cy; vbox_conn->vbox_crtc->x_hint = hints->dx; vbox_conn->vbox_crtc->y_hint = hints->dy; vbox_conn->mode_hint.disconnected = disconnected; diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index 257a77830410..6f08dc966719 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -553,12 +553,22 @@ static int vbox_get_modes(struct drm_connector *connector) ++num_modes; } vbox_set_edid(connector, preferred_width, preferred_height); - drm_object_property_set_value( - &connector->base, vbox->dev->mode_config.suggested_x_property, - vbox_connector->vbox_crtc->x_hint); - drm_object_property_set_value( - &connector->base, vbox->dev->mode_config.suggested_y_property, - vbox_connector->vbox_crtc->y_hint); + + if (vbox_connector->vbox_crtc->x_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_x_property, + vbox_connector->vbox_crtc->x_hint); + else + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_x_property, 0); + + if (vbox_connector->vbox_crtc->y_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_y_property, + vbox_connector->vbox_crtc->y_hint); + else + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_y_property, 0); return num_modes; } @@ -640,9 +650,9 @@ static int vbox_connector_init(struct drm_device *dev, drm_mode_create_suggested_offset_properties(dev); drm_object_attach_property(&connector->base, - dev->mode_config.suggested_x_property, -1); + dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, - dev->mode_config.suggested_y_property, -1); + dev->mode_config.suggested_y_property, 0); drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 9fcf2e223f71..1123b4f1e1d6 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1693,10 +1693,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state) MACbShutdown(priv); pci_disable_device(pcid); - pci_set_power_state(pcid, pci_choose_state(pcid, state)); spin_unlock_irqrestore(&priv->lock, flags); + pci_set_power_state(pcid, pci_choose_state(pcid, state)); + return 0; } diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 9addef1f1e12..f49dfa82f1b8 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c @@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid; buffer_offset = ETH_ETHERNET_HDR_OFFSET; - memcpy(&txb[offset + 4], bssid, 6); + memcpy(&txb[offset + 8], bssid, 6); } else { buffer_offset = HOST_HDR_OFFSET; } diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 5001261f5d69..52fa52c20be0 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -834,6 +834,7 @@ static int iscsit_add_reject_from_cmd( unsigned char *buf) { struct iscsi_conn *conn; + const bool do_put = cmd->se_cmd.se_tfo != NULL; if (!cmd->conn) { pr_err("cmd->conn is NULL for ITT: 0x%08x\n", @@ -864,7 +865,7 @@ static int iscsit_add_reject_from_cmd( * Perform the kref_put now if se_cmd has already been setup by * scsit_setup_scsi_cmd() */ - if (cmd->se_cmd.se_tfo != NULL) { + if (do_put) { pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); target_put_sess_cmd(&cmd->se_cmd); } @@ -1960,7 +1961,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct iscsi_tmr_req *tmr_req; struct iscsi_tm *hdr; int out_of_order_cmdsn = 0, ret; - bool sess_ref = false; u8 function, tcm_function = TMR_UNKNOWN; hdr = (struct iscsi_tm *) buf; @@ -1993,22 +1993,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, cmd->data_direction = DMA_NONE; cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL); - if (!cmd->tmr_req) + if (!cmd->tmr_req) { return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + + transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, + conn->sess->se_sess, 0, DMA_NONE, + TCM_SIMPLE_TAG, cmd->sense_buffer + 2); + + target_get_sess_cmd(&cmd->se_cmd, true); /* * TASK_REASSIGN for ERL=2 / connection stays inside of * LIO-Target $FABRIC_MOD */ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, - conn->sess->se_sess, 0, DMA_NONE, - TCM_SIMPLE_TAG, cmd->sense_buffer + 2); - - target_get_sess_cmd(&cmd->se_cmd, true); - sess_ref = true; tcm_function = iscsit_convert_tmf(function); if (tcm_function == TMR_UNKNOWN) { pr_err("Unknown iSCSI TMR Function:" @@ -2099,12 +2100,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); - if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) + if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) { out_of_order_cmdsn = 1; - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); return 0; - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { return -1; + } } iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); @@ -2124,12 +2127,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * For connection recovery, this is also the default action for * TMR TASK_REASSIGN. */ - if (sess_ref) { - pr_debug("Handle TMR, using sess_ref=true check\n"); - target_put_sess_cmd(&cmd->se_cmd); - } - iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + target_put_sess_cmd(&cmd->se_cmd); return 0; } EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index f9bc8ec6fb6b..9518ffd8b8ba 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -421,7 +421,8 @@ static int chap_server_compute_md5( auth_ret = 0; out: kzfree(desc); - crypto_free_shash(tfm); + if (tfm) + crypto_free_shash(tfm); kfree(challenge); kfree(challenge_binhex); return auth_ret; diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 0dd4c45f7575..0ebc4818e132 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI); if (ret < 0) - return NULL; + goto free_out; ret = iscsit_tpg_add_portal_group(tiqn, tpg); if (ret != 0) @@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( return &tpg->tpg_se_tpg; out: core_tpg_deregister(&tpg->tpg_se_tpg); +free_out: kfree(tpg); return NULL; } diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7a6751fecd32..87248a2512e5 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -432,6 +432,9 @@ static void iscsi_target_sk_data_ready(struct sock *sk) if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { write_unlock_bh(&sk->sk_callback_lock); pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); + if (iscsi_target_sk_data_ready == conn->orig_data_ready) + return; + conn->orig_data_ready(sk); return; } diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 1e36f83b5961..70c6b9bfc04e 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -694,6 +694,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) struct iscsi_session *sess; struct se_cmd *se_cmd = &cmd->se_cmd; + WARN_ON(!list_empty(&cmd->i_conn_node)); + if (cmd->conn) sess = cmd->conn->sess; else @@ -716,6 +718,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues) { struct iscsi_conn *conn = cmd->conn; + WARN_ON(!list_empty(&cmd->i_conn_node)); + if (cmd->data_direction == DMA_TO_DEVICE) { iscsit_stop_dataout_timer(cmd); iscsit_free_r2ts_from_list(cmd); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index c629817a8854..9b2c0c773022 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) struct inode *inode = file->f_mapping->host; int ret; + if (!nolb) { + return 0; + } + if (cmd->se_dev->dev_attrib.pi_prot_type) { ret = fd_do_prot_unmap(cmd, lba, nolb); if (ret) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index dd2cd8048582..4ba5004a069e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -58,8 +58,10 @@ void core_pr_dump_initiator_port( char *buf, u32 size) { - if (!pr_reg->isid_present_at_reg) + if (!pr_reg->isid_present_at_reg) { buf[0] = '\0'; + return; + } snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid); } @@ -4011,6 +4013,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) * Set the ADDITIONAL DESCRIPTOR LENGTH */ put_unaligned_be32(desc_len, &buf[off]); + off += 4; /* * Size of full desctipor header minus TransportID * containing $FABRIC_MOD specific) initiator device/port diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 7c69b4a9694d..0d99b242e82e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, " %d i: %d bio: %p, allocating another" " bio\n", bio->bi_vcnt, i, bio); - rc = blk_rq_append_bio(req, bio); + rc = blk_rq_append_bio(req, &bio); if (rc) { pr_err("pSCSI: failed to append bio\n"); goto fail; @@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } if (bio) { - rc = blk_rq_append_bio(req, bio); + rc = blk_rq_append_bio(req, &bio); if (rc) { pr_err("pSCSI: failed to append bio\n"); goto fail; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e22847bd79b9..9c7bc1ca341a 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd, spin_unlock(&se_cmd->t_state_lock); return false; } + if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) { + if (se_cmd->scsi_status) { + pr_debug("Attempted to abort io tag: %llu early failure" + " status: 0x%02x\n", se_cmd->tag, + se_cmd->scsi_status); + spin_unlock(&se_cmd->t_state_lock); + return false; + } + } if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { pr_debug("Attempted to abort io tag: %llu already shutdown," " skipping\n", se_cmd->tag); @@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list( * LUN_RESET tmr.. */ spin_lock_irqsave(&dev->se_tmr_lock, flags); - list_del_init(&tmr->tmr_list); + if (tmr) + list_del_init(&tmr->tmr_list); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { cmd = tmr_p->task_cmd; if (!cmd) { diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 836d552b0385..e6d51135d105 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1730,9 +1730,6 @@ void transport_generic_request_failure(struct se_cmd *cmd, { int ret = 0, post_ret = 0; - if (transport_check_aborted_status(cmd, 1)) - return; - pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", sense_reason); target_show_cmd("-----[ ", cmd); @@ -1741,6 +1738,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, * For SAM Task Attribute emulation for failed struct se_cmd */ transport_complete_task_attr(cmd); + /* * Handle special case for COMPARE_AND_WRITE failure, where the * callback is expected to drop the per device ->caw_sem. @@ -1749,6 +1747,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, cmd->transport_complete_callback) cmd->transport_complete_callback(cmd, false, &post_ret); + if (transport_check_aborted_status(cmd, 1)) + return; + switch (sense_reason) { case TCM_NON_EXISTENT_LUN: case TCM_UNSUPPORTED_SCSI_OPCODE: @@ -1973,6 +1974,7 @@ void target_execute_cmd(struct se_cmd *cmd) } cmd->t_state = TRANSPORT_PROCESSING; + cmd->transport_state &= ~CMD_T_PRE_EXECUTE; cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); @@ -2010,6 +2012,8 @@ static void target_restart_delayed_cmds(struct se_device *dev) list_del(&cmd->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); + cmd->transport_state |= CMD_T_SENT; + __target_execute_cmd(cmd, true); if (cmd->sam_task_attr == TCM_ORDERED_TAG) @@ -2045,6 +2049,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", dev->dev_cur_ordered_id); } + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; + restart: target_restart_delayed_cmds(dev); } @@ -2570,7 +2576,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd); static void transport_write_pending_qf(struct se_cmd *cmd) { + unsigned long flags; int ret; + bool stop; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (stop) { + pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + complete_all(&cmd->t_transport_stop_comp); + return; + } ret = cmd->se_tfo->write_pending(cmd); if (ret) { @@ -2664,6 +2683,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) ret = -ESHUTDOWN; goto out; } + se_cmd->transport_state |= CMD_T_PRE_EXECUTE; list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); out: spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 942d094269fb..c4a5fb6f038f 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -796,6 +796,13 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) int ret; DEFINE_WAIT(__wait); + /* + * Don't leave commands partially setup because the unmap + * thread might need the blocks to make forward progress. + */ + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); + tcmu_cmd_reset_dbi_cur(tcmu_cmd); + prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); pr_debug("sleeping for ring space\n"); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 7952357df9c8..edb6e4e9ef3a 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -590,7 +590,6 @@ static int __init optee_driver_init(void) return -ENODEV; np = of_find_matching_node(fw_np, optee_match); - of_node_put(fw_np); if (!np) return -ENODEV; diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index bd3572c41585..2d855a96cdd9 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -23,222 +23,450 @@ #include #include #include +#include #include "thermal_core.h" -#define TEMP0_TH (0x4) -#define TEMP0_RST_TH (0x8) -#define TEMP0_CFG (0xC) -#define TEMP0_EN (0x10) -#define TEMP0_INT_EN (0x14) -#define TEMP0_INT_CLR (0x18) -#define TEMP0_RST_MSK (0x1C) -#define TEMP0_VALUE (0x28) - -#define HISI_TEMP_BASE (-60) -#define HISI_TEMP_RESET (100000) - -#define HISI_MAX_SENSORS 4 +#define HI6220_TEMP0_LAG (0x0) +#define HI6220_TEMP0_TH (0x4) +#define HI6220_TEMP0_RST_TH (0x8) +#define HI6220_TEMP0_CFG (0xC) +#define HI6220_TEMP0_CFG_SS_MSK (0xF000) +#define HI6220_TEMP0_CFG_HDAK_MSK (0x30) +#define HI6220_TEMP0_EN (0x10) +#define HI6220_TEMP0_INT_EN (0x14) +#define HI6220_TEMP0_INT_CLR (0x18) +#define HI6220_TEMP0_RST_MSK (0x1C) +#define HI6220_TEMP0_VALUE (0x28) + +#define HI3660_OFFSET(chan) ((chan) * 0x40) +#define HI3660_TEMP(chan) (HI3660_OFFSET(chan) + 0x1C) +#define HI3660_TH(chan) (HI3660_OFFSET(chan) + 0x20) +#define HI3660_LAG(chan) (HI3660_OFFSET(chan) + 0x28) +#define HI3660_INT_EN(chan) (HI3660_OFFSET(chan) + 0x2C) +#define HI3660_INT_CLR(chan) (HI3660_OFFSET(chan) + 0x30) + +#define HI6220_TEMP_BASE (-60000) +#define HI6220_TEMP_RESET (100000) +#define HI6220_TEMP_STEP (785) +#define HI6220_TEMP_LAG (3500) + +#define HI3660_TEMP_BASE (-63780) +#define HI3660_TEMP_STEP (205) +#define HI3660_TEMP_LAG (4000) + +#define HI6220_DEFAULT_SENSOR 2 +#define HI3660_DEFAULT_SENSOR 1 struct hisi_thermal_sensor { - struct hisi_thermal_data *thermal; struct thermal_zone_device *tzd; - - long sensor_temp; uint32_t id; uint32_t thres_temp; }; struct hisi_thermal_data { - struct mutex thermal_lock; /* protects register data */ + int (*get_temp)(struct hisi_thermal_data *data); + int (*enable_sensor)(struct hisi_thermal_data *data); + int (*disable_sensor)(struct hisi_thermal_data *data); + int (*irq_handler)(struct hisi_thermal_data *data); struct platform_device *pdev; struct clk *clk; - struct hisi_thermal_sensor sensors[HISI_MAX_SENSORS]; - - int irq, irq_bind_sensor; - bool irq_enabled; - + struct hisi_thermal_sensor sensor; void __iomem *regs; + int irq; }; -/* in millicelsius */ -static inline int _step_to_temp(int step) +/* + * The temperature computation on the tsensor is as follow: + * Unit: millidegree Celsius + * Step: 200/255 (0.7843) + * Temperature base: -60°C + * + * The register is programmed in temperature steps, every step is 785 + * millidegree and begins at -60 000 m°C + * + * The temperature from the steps: + * + * Temp = TempBase + (steps x 785) + * + * and the steps from the temperature: + * + * steps = (Temp - TempBase) / 785 + * + */ +static inline int hi6220_thermal_step_to_temp(int step) { - /* - * Every step equals (1 * 200) / 255 celsius, and finally - * need convert to millicelsius. - */ - return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255)); + return HI6220_TEMP_BASE + (step * HI6220_TEMP_STEP); } -static inline long _temp_to_step(long temp) +static inline int hi6220_thermal_temp_to_step(int temp) { - return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000; + return DIV_ROUND_UP(temp - HI6220_TEMP_BASE, HI6220_TEMP_STEP); } -static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, - struct hisi_thermal_sensor *sensor) +/* + * for Hi3660, + * Step: 189/922 (0.205) + * Temperature base: -63.780°C + * + * The register is programmed in temperature steps, every step is 205 + * millidegree and begins at -63 780 m°C + */ +static inline int hi3660_thermal_step_to_temp(int step) { - long val; + return HI3660_TEMP_BASE + step * HI3660_TEMP_STEP; +} - mutex_lock(&data->thermal_lock); +static inline int hi3660_thermal_temp_to_step(int temp) +{ + return DIV_ROUND_UP(temp - HI3660_TEMP_BASE, HI3660_TEMP_STEP); +} - /* disable interrupt */ - writel(0x0, data->regs + TEMP0_INT_EN); - writel(0x1, data->regs + TEMP0_INT_CLR); +/* + * The lag register contains 5 bits encoding the temperature in steps. + * + * Each time the temperature crosses the threshold boundary, an + * interrupt is raised. It could be when the temperature is going + * above the threshold or below. However, if the temperature is + * fluctuating around this value due to the load, we can receive + * several interrupts which may not desired. + * + * We can setup a temperature representing the delta between the + * threshold and the current temperature when the temperature is + * decreasing. + * + * For instance: the lag register is 5°C, the threshold is 65°C, when + * the temperature reaches 65°C an interrupt is raised and when the + * temperature decrease to 65°C - 5°C another interrupt is raised. + * + * A very short lag can lead to an interrupt storm, a long lag + * increase the latency to react to the temperature changes. In our + * case, that is not really a problem as we are polling the + * temperature. + * + * [0:4] : lag register + * + * The temperature is coded in steps, cf. HI6220_TEMP_STEP. + * + * Min : 0x00 : 0.0 °C + * Max : 0x1F : 24.3 °C + * + * The 'value' parameter is in milliCelsius. + */ +static inline void hi6220_thermal_set_lag(void __iomem *addr, int value) +{ + writel(DIV_ROUND_UP(value, HI6220_TEMP_STEP) & 0x1F, + addr + HI6220_TEMP0_LAG); +} - /* disable module firstly */ - writel(0x0, data->regs + TEMP0_EN); +static inline void hi6220_thermal_alarm_clear(void __iomem *addr, int value) +{ + writel(value, addr + HI6220_TEMP0_INT_CLR); +} - /* select sensor id */ - writel((sensor->id << 12), data->regs + TEMP0_CFG); +static inline void hi6220_thermal_alarm_enable(void __iomem *addr, int value) +{ + writel(value, addr + HI6220_TEMP0_INT_EN); +} - /* enable module */ - writel(0x1, data->regs + TEMP0_EN); +static inline void hi6220_thermal_alarm_set(void __iomem *addr, int temp) +{ + writel(hi6220_thermal_temp_to_step(temp) | 0x0FFFFFF00, + addr + HI6220_TEMP0_TH); +} - usleep_range(3000, 5000); +static inline void hi6220_thermal_reset_set(void __iomem *addr, int temp) +{ + writel(hi6220_thermal_temp_to_step(temp), addr + HI6220_TEMP0_RST_TH); +} - val = readl(data->regs + TEMP0_VALUE); - val = _step_to_temp(val); +static inline void hi6220_thermal_reset_enable(void __iomem *addr, int value) +{ + writel(value, addr + HI6220_TEMP0_RST_MSK); +} - mutex_unlock(&data->thermal_lock); +static inline void hi6220_thermal_enable(void __iomem *addr, int value) +{ + writel(value, addr + HI6220_TEMP0_EN); +} - return val; +static inline int hi6220_thermal_get_temperature(void __iomem *addr) +{ + return hi6220_thermal_step_to_temp(readl(addr + HI6220_TEMP0_VALUE)); } -static void hisi_thermal_enable_bind_irq_sensor - (struct hisi_thermal_data *data) +/* + * [0:6] lag register + * + * The temperature is coded in steps, cf. HI3660_TEMP_STEP. + * + * Min : 0x00 : 0.0 °C + * Max : 0x7F : 26.0 °C + * + */ +static inline void hi3660_thermal_set_lag(void __iomem *addr, + int id, int value) { - struct hisi_thermal_sensor *sensor; + writel(DIV_ROUND_UP(value, HI3660_TEMP_STEP) & 0x7F, + addr + HI3660_LAG(id)); +} - mutex_lock(&data->thermal_lock); +static inline void hi3660_thermal_alarm_clear(void __iomem *addr, + int id, int value) +{ + writel(value, addr + HI3660_INT_CLR(id)); +} - sensor = &data->sensors[data->irq_bind_sensor]; +static inline void hi3660_thermal_alarm_enable(void __iomem *addr, + int id, int value) +{ + writel(value, addr + HI3660_INT_EN(id)); +} - /* setting the hdak time */ - writel(0x0, data->regs + TEMP0_CFG); +static inline void hi3660_thermal_alarm_set(void __iomem *addr, + int id, int value) +{ + writel(value, addr + HI3660_TH(id)); +} + +static inline int hi3660_thermal_get_temperature(void __iomem *addr, int id) +{ + return hi3660_thermal_step_to_temp(readl(addr + HI3660_TEMP(id))); +} + +/* + * Temperature configuration register - Sensor selection + * + * Bits [19:12] + * + * 0x0: local sensor (default) + * 0x1: remote sensor 1 (ACPU cluster 1) + * 0x2: remote sensor 2 (ACPU cluster 0) + * 0x3: remote sensor 3 (G3D) + */ +static inline void hi6220_thermal_sensor_select(void __iomem *addr, int sensor) +{ + writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_SS_MSK) | + (sensor << 12), addr + HI6220_TEMP0_CFG); +} + +/* + * Temperature configuration register - Hdak conversion polling interval + * + * Bits [5:4] + * + * 0x0 : 0.768 ms + * 0x1 : 6.144 ms + * 0x2 : 49.152 ms + * 0x3 : 393.216 ms + */ +static inline void hi6220_thermal_hdak_set(void __iomem *addr, int value) +{ + writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_HDAK_MSK) | + (value << 4), addr + HI6220_TEMP0_CFG); +} + +static int hi6220_thermal_irq_handler(struct hisi_thermal_data *data) +{ + hi6220_thermal_alarm_clear(data->regs, 1); + return 0; +} + +static int hi3660_thermal_irq_handler(struct hisi_thermal_data *data) +{ + hi3660_thermal_alarm_clear(data->regs, data->sensor.id, 1); + return 0; +} + +static int hi6220_thermal_get_temp(struct hisi_thermal_data *data) +{ + return hi6220_thermal_get_temperature(data->regs); +} + +static int hi3660_thermal_get_temp(struct hisi_thermal_data *data) +{ + return hi3660_thermal_get_temperature(data->regs, data->sensor.id); +} + +static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data) +{ + /* disable sensor module */ + hi6220_thermal_enable(data->regs, 0); + hi6220_thermal_alarm_enable(data->regs, 0); + hi6220_thermal_reset_enable(data->regs, 0); + + clk_disable_unprepare(data->clk); + + return 0; +} + +static int hi3660_thermal_disable_sensor(struct hisi_thermal_data *data) +{ + /* disable sensor module */ + hi3660_thermal_alarm_enable(data->regs, data->sensor.id, 0); + return 0; +} + +static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data) +{ + struct hisi_thermal_sensor *sensor = &data->sensor; + int ret; + + /* enable clock for tsensor */ + ret = clk_prepare_enable(data->clk); + if (ret) + return ret; /* disable module firstly */ - writel(0x0, data->regs + TEMP0_RST_MSK); - writel(0x0, data->regs + TEMP0_EN); + hi6220_thermal_reset_enable(data->regs, 0); + hi6220_thermal_enable(data->regs, 0); /* select sensor id */ - writel((sensor->id << 12), data->regs + TEMP0_CFG); + hi6220_thermal_sensor_select(data->regs, sensor->id); + + /* setting the hdak time */ + hi6220_thermal_hdak_set(data->regs, 0); + + /* setting lag value between current temp and the threshold */ + hi6220_thermal_set_lag(data->regs, HI6220_TEMP_LAG); /* enable for interrupt */ - writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00, - data->regs + TEMP0_TH); + hi6220_thermal_alarm_set(data->regs, sensor->thres_temp); - writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH); + hi6220_thermal_reset_set(data->regs, HI6220_TEMP_RESET); /* enable module */ - writel(0x1, data->regs + TEMP0_RST_MSK); - writel(0x1, data->regs + TEMP0_EN); - - writel(0x0, data->regs + TEMP0_INT_CLR); - writel(0x1, data->regs + TEMP0_INT_EN); + hi6220_thermal_reset_enable(data->regs, 1); + hi6220_thermal_enable(data->regs, 1); - usleep_range(3000, 5000); + hi6220_thermal_alarm_clear(data->regs, 0); + hi6220_thermal_alarm_enable(data->regs, 1); - mutex_unlock(&data->thermal_lock); + return 0; } -static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data) +static int hi3660_thermal_enable_sensor(struct hisi_thermal_data *data) { - mutex_lock(&data->thermal_lock); + unsigned int value; + struct hisi_thermal_sensor *sensor = &data->sensor; - /* disable sensor module */ - writel(0x0, data->regs + TEMP0_INT_EN); - writel(0x0, data->regs + TEMP0_RST_MSK); - writel(0x0, data->regs + TEMP0_EN); + /* disable interrupt */ + hi3660_thermal_alarm_enable(data->regs, sensor->id, 0); - mutex_unlock(&data->thermal_lock); -} + /* setting lag value between current temp and the threshold */ + hi3660_thermal_set_lag(data->regs, sensor->id, HI3660_TEMP_LAG); -static int hisi_thermal_get_temp(void *_sensor, int *temp) -{ - struct hisi_thermal_sensor *sensor = _sensor; - struct hisi_thermal_data *data = sensor->thermal; + /* set interrupt threshold */ + value = hi3660_thermal_temp_to_step(sensor->thres_temp); + hi3660_thermal_alarm_set(data->regs, sensor->id, value); - int sensor_id = -1, i; - long max_temp = 0; + /* enable interrupt */ + hi3660_thermal_alarm_clear(data->regs, sensor->id, 1); + hi3660_thermal_alarm_enable(data->regs, sensor->id, 1); - *temp = hisi_thermal_get_sensor_temp(data, sensor); + return 0; +} - sensor->sensor_temp = *temp; +static int hi6220_thermal_probe(struct hisi_thermal_data *data) +{ + struct platform_device *pdev = data->pdev; + struct device *dev = &pdev->dev; + struct resource *res; + int ret; - for (i = 0; i < HISI_MAX_SENSORS; i++) { - if (!data->sensors[i].tzd) - continue; + data->get_temp = hi6220_thermal_get_temp; + data->enable_sensor = hi6220_thermal_enable_sensor; + data->disable_sensor = hi6220_thermal_disable_sensor; + data->irq_handler = hi6220_thermal_irq_handler; - if (data->sensors[i].sensor_temp >= max_temp) { - max_temp = data->sensors[i].sensor_temp; - sensor_id = i; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(data->regs)) { + dev_err(dev, "failed to get io address\n"); + return PTR_ERR(data->regs); } - /* If no sensor has been enabled, then skip to enable irq */ - if (sensor_id == -1) - return 0; - - mutex_lock(&data->thermal_lock); - data->irq_bind_sensor = sensor_id; - mutex_unlock(&data->thermal_lock); - - dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%d, thres=%d\n", - sensor->id, data->irq_enabled, *temp, sensor->thres_temp); - /* - * Bind irq to sensor for two cases: - * Reenable alarm IRQ if temperature below threshold; - * if irq has been enabled, always set it; - */ - if (data->irq_enabled) { - hisi_thermal_enable_bind_irq_sensor(data); - return 0; + data->clk = devm_clk_get(dev, "thermal_clk"); + if (IS_ERR(data->clk)) { + ret = PTR_ERR(data->clk); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get thermal clk: %d\n", ret); + return ret; } - if (max_temp < sensor->thres_temp) { - data->irq_enabled = true; - hisi_thermal_enable_bind_irq_sensor(data); - enable_irq(data->irq); - } + data->irq = platform_get_irq(pdev, 0); + if (data->irq < 0) + return data->irq; + + data->sensor.id = HI6220_DEFAULT_SENSOR; return 0; } -static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = { - .get_temp = hisi_thermal_get_temp, -}; +static int hi3660_thermal_probe(struct hisi_thermal_data *data) +{ + struct platform_device *pdev = data->pdev; + struct device *dev = &pdev->dev; + struct resource *res; + + data->get_temp = hi3660_thermal_get_temp; + data->enable_sensor = hi3660_thermal_enable_sensor; + data->disable_sensor = hi3660_thermal_disable_sensor; + data->irq_handler = hi3660_thermal_irq_handler; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(data->regs)) { + dev_err(dev, "failed to get io address\n"); + return PTR_ERR(data->regs); + } -static irqreturn_t hisi_thermal_alarm_irq(int irq, void *dev) + data->irq = platform_get_irq(pdev, 0); + if (data->irq < 0) + return data->irq; + + data->sensor.id = HI3660_DEFAULT_SENSOR; + + return 0; +} + +static int hisi_thermal_get_temp(void *__data, int *temp) { - struct hisi_thermal_data *data = dev; + struct hisi_thermal_data *data = __data; + struct hisi_thermal_sensor *sensor = &data->sensor; - disable_irq_nosync(irq); - data->irq_enabled = false; + *temp = data->get_temp(data); - return IRQ_WAKE_THREAD; + dev_dbg(&data->pdev->dev, "id=%d, temp=%d, thres=%d\n", + sensor->id, *temp, sensor->thres_temp); + + return 0; } +static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = { + .get_temp = hisi_thermal_get_temp, +}; + static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) { struct hisi_thermal_data *data = dev; - struct hisi_thermal_sensor *sensor; - int i; + struct hisi_thermal_sensor *sensor = &data->sensor; + int temp = 0; - mutex_lock(&data->thermal_lock); - sensor = &data->sensors[data->irq_bind_sensor]; + data->irq_handler(data); - dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n", - sensor->thres_temp / 1000); - mutex_unlock(&data->thermal_lock); + hisi_thermal_get_temp(data, &temp); - for (i = 0; i < HISI_MAX_SENSORS; i++) { - if (!data->sensors[i].tzd) - continue; + if (temp >= sensor->thres_temp) { + dev_crit(&data->pdev->dev, "THERMAL ALARM: %d > %d\n", + temp, sensor->thres_temp); - thermal_zone_device_update(data->sensors[i].tzd, + thermal_zone_device_update(data->sensor.tzd, THERMAL_EVENT_UNSPECIFIED); + + } else { + dev_crit(&data->pdev->dev, "THERMAL ALARM stopped: %d < %d\n", + temp, sensor->thres_temp); } return IRQ_HANDLED; @@ -246,17 +474,14 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) static int hisi_thermal_register_sensor(struct platform_device *pdev, struct hisi_thermal_data *data, - struct hisi_thermal_sensor *sensor, - int index) + struct hisi_thermal_sensor *sensor) { int ret, i; const struct thermal_trip *trip; - sensor->id = index; - sensor->thermal = data; - sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, - sensor->id, sensor, &hisi_of_thermal_ops); + sensor->id, data, + &hisi_of_thermal_ops); if (IS_ERR(sensor->tzd)) { ret = PTR_ERR(sensor->tzd); sensor->tzd = NULL; @@ -278,7 +503,14 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev, } static const struct of_device_id of_hisi_thermal_match[] = { - { .compatible = "hisilicon,tsensor" }, + { + .compatible = "hisilicon,tsensor", + .data = hi6220_thermal_probe + }, + { + .compatible = "hisilicon,hi3660-tsensor", + .data = hi3660_thermal_probe + }, { /* end */ } }; MODULE_DEVICE_TABLE(of, of_hisi_thermal_match); @@ -295,88 +527,63 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor, static int hisi_thermal_probe(struct platform_device *pdev) { struct hisi_thermal_data *data; - struct resource *res; - int i; + int const (*platform_probe)(struct hisi_thermal_data *); + struct device *dev = &pdev->dev; int ret; - data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - mutex_init(&data->thermal_lock); data->pdev = pdev; + platform_set_drvdata(pdev, data); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(data->regs)) { - dev_err(&pdev->dev, "failed to get io address\n"); - return PTR_ERR(data->regs); + platform_probe = of_device_get_match_data(dev); + if (!platform_probe) { + dev_err(dev, "failed to get probe func\n"); + return -EINVAL; } - data->irq = platform_get_irq(pdev, 0); - if (data->irq < 0) - return data->irq; - - ret = devm_request_threaded_irq(&pdev->dev, data->irq, - hisi_thermal_alarm_irq, - hisi_thermal_alarm_irq_thread, - 0, "hisi_thermal", data); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); + ret = platform_probe(data); + if (ret) return ret; - } - platform_set_drvdata(pdev, data); - - data->clk = devm_clk_get(&pdev->dev, "thermal_clk"); - if (IS_ERR(data->clk)) { - ret = PTR_ERR(data->clk); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "failed to get thermal clk: %d\n", ret); + ret = hisi_thermal_register_sensor(pdev, data, + &data->sensor); + if (ret) { + dev_err(dev, "failed to register thermal sensor: %d\n", ret); return ret; } - /* enable clock for thermal */ - ret = clk_prepare_enable(data->clk); + ret = data->enable_sensor(data); if (ret) { - dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); + dev_err(dev, "Failed to setup the sensor: %d\n", ret); return ret; } - hisi_thermal_enable_bind_irq_sensor(data); - irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED, - &data->irq_enabled); - - for (i = 0; i < HISI_MAX_SENSORS; ++i) { - ret = hisi_thermal_register_sensor(pdev, data, - &data->sensors[i], i); - if (ret) - dev_err(&pdev->dev, - "failed to register thermal sensor: %d\n", ret); - else - hisi_thermal_toggle_sensor(&data->sensors[i], true); + if (data->irq) { + ret = devm_request_threaded_irq(dev, data->irq, NULL, + hisi_thermal_alarm_irq_thread, + IRQF_ONESHOT, "hisi_thermal", data); + if (ret < 0) { + dev_err(dev, "failed to request alarm irq: %d\n", ret); + return ret; + } } + hisi_thermal_toggle_sensor(&data->sensor, true); + return 0; } static int hisi_thermal_remove(struct platform_device *pdev) { struct hisi_thermal_data *data = platform_get_drvdata(pdev); - int i; + struct hisi_thermal_sensor *sensor = &data->sensor; - for (i = 0; i < HISI_MAX_SENSORS; i++) { - struct hisi_thermal_sensor *sensor = &data->sensors[i]; + hisi_thermal_toggle_sensor(sensor, false); - if (!sensor->tzd) - continue; - - hisi_thermal_toggle_sensor(sensor, false); - } - - hisi_thermal_disable_sensor(data); - clk_disable_unprepare(data->clk); + data->disable_sensor(data); return 0; } @@ -386,10 +593,7 @@ static int hisi_thermal_suspend(struct device *dev) { struct hisi_thermal_data *data = dev_get_drvdata(dev); - hisi_thermal_disable_sensor(data); - data->irq_enabled = false; - - clk_disable_unprepare(data->clk); + data->disable_sensor(data); return 0; } @@ -397,16 +601,8 @@ static int hisi_thermal_suspend(struct device *dev) static int hisi_thermal_resume(struct device *dev) { struct hisi_thermal_data *data = dev_get_drvdata(dev); - int ret; - ret = clk_prepare_enable(data->clk); - if (ret) - return ret; - - data->irq_enabled = true; - hisi_thermal_enable_bind_irq_sensor(data); - - return 0; + return data->enable_sensor(data); } #endif diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index 8ee38f55c7f3..43b90fd577e4 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -319,17 +319,21 @@ static int int3400_thermal_probe(struct platform_device *pdev) result = sysfs_create_group(&pdev->dev.kobj, &uuid_attribute_group); if (result) - goto free_zone; + goto free_rel_misc; result = acpi_install_notify_handler( priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify, (void *)priv); if (result) - goto free_zone; + goto free_sysfs; return 0; -free_zone: +free_sysfs: + sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); +free_rel_misc: + if (!priv->rel_misc_dev_res) + acpi_thermal_rel_misc_device_remove(priv->adev->handle); thermal_zone_device_unregister(priv->thermal); free_art_trt: kfree(priv->trts); diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index f02341f7134d..1d9f524cb5b3 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -30,6 +30,10 @@ /* Skylake thermal reporting device */ #define PCI_DEVICE_ID_PROC_SKL_THERMAL 0x1903 +/* CannonLake thermal reporting device */ +#define PCI_DEVICE_ID_PROC_CNL_THERMAL 0x5a03 +#define PCI_DEVICE_ID_PROC_CFL_THERMAL 0x3E83 + /* Braswell thermal reporting device */ #define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC @@ -461,6 +465,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT1_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTX_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTP_THERMAL)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CNL_THERMAL)}, { 0, }, }; diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c index c60b1cfcc64e..ddd777cf5309 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel_pch_thermal.c @@ -30,6 +30,7 @@ #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ #define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */ #define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */ +#define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */ /* Wildcat Point-LP PCH Thermal registers */ #define WPT_TEMP 0x0000 /* Temperature */ @@ -278,6 +279,7 @@ enum board_ids { board_hsw, board_wpt, board_skl, + board_cnl, }; static const struct board_info { @@ -296,6 +298,10 @@ static const struct board_info { .name = "pch_skylake", .ops = &pch_dev_ops_wpt, }, + [board_cnl] = { + .name = "pch_cannonlake", + .ops = &pch_dev_ops_wpt, + }, }; static int intel_pch_thermal_probe(struct pci_dev *pdev, @@ -398,6 +404,8 @@ static const struct pci_device_id intel_pch_thermal_id[] = { .driver_data = board_skl, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H), .driver_data = board_skl, }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL), + .driver_data = board_cnl, }, { 0, }, }; MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index d718cd179ddb..e83d505716bb 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -99,6 +99,7 @@ struct powerclamp_worker_data { unsigned int target_ratio; unsigned int duration_jiffies; bool clamping; + bool setscheduler_done; }; static struct powerclamp_worker_data * __percpu worker_data; @@ -388,6 +389,10 @@ static void clamp_balancing_func(struct kthread_work *work) w_data = container_of(work, struct powerclamp_worker_data, balancing_work); + if (unlikely(w_data->setscheduler_done == false)) { + sched_setscheduler(current, SCHED_FIFO, &sparam); + w_data->setscheduler_done = true; + } /* * make sure user selected ratio does not take effect until * the next round. adjust target_ratio if user has changed @@ -503,7 +508,6 @@ static void start_power_clamp_worker(unsigned long cpu) w_data->cpu = cpu; w_data->clamping = true; set_bit(cpu, cpu_clamping_mask); - sched_setscheduler(worker->task, SCHED_FIFO, &sparam); kthread_init_work(&w_data->balancing_work, clamp_balancing_func); kthread_init_delayed_work(&w_data->idle_injection_work, clamp_idle_injection_func); diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index b4d3116cfdaf..3055f9a12a17 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; + mutex_lock(&tz->lock); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if ((instance->trip != params->trip_max_desired_temperature) || (!cdev_is_power_actor(instance->cdev))) @@ -534,6 +535,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) mutex_unlock(&instance->cdev->lock); thermal_cdev_update(instance->cdev); } + mutex_unlock(&tz->lock); } /** diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index be95826631b7..ee047ca43084 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -31,8 +31,7 @@ * If the temperature is higher than a trip point, * a. if the trend is THERMAL_TREND_RAISING, use higher cooling * state for this trip point - * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling - * state for this trip point + * b. if the trend is THERMAL_TREND_DROPPING, do nothing * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit * for this trip point * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit @@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance, if (!throttle) next_target = THERMAL_NO_TARGET; } else { - next_target = cur_state - 1; - if (next_target > instance->upper) - next_target = instance->upper; + if (!throttle) { + next_target = cur_state - 1; + if (next_target > instance->upper) + next_target = instance->upper; + } } break; case THERMAL_TREND_DROP_FULL: diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index d674e06767a5..1424581fd9af 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -225,6 +225,7 @@ static void tb_activate_pcie_devices(struct tb *tb) tb_port_info(up_port, "PCIe tunnel activation failed, aborting\n"); tb_pci_free(tunnel); + continue; } list_add(&tunnel->list, &tcm->tunnel_list); diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig new file mode 100644 index 000000000000..a230dad0434d --- /dev/null +++ b/drivers/trusty/Kconfig @@ -0,0 +1,66 @@ +# +# Trusty +# + +menu "Trusty" + +config TRUSTY + tristate "Trusty" + depends on X86_64 + default n + +config TRUSTY_FIQ + tristate + depends on TRUSTY + +config TRUSTY_FIQ_ARM + tristate + depends on TRUSTY + depends on ARM + select FIQ_GLUE + select TRUSTY_FIQ + default y + +config TRUSTY_FIQ_ARM64 + tristate + depends on TRUSTY + depends on ARM64 + select FIQ_GLUE + select TRUSTY_FIQ + default y + +config TRUSTY_LOG + tristate "Trusty Log support" + depends on TRUSTY + default y + +config TRUSTY_VIRTIO + tristate "Trusty virtio support" + depends on TRUSTY + select VIRTIO + default y + +config TRUSTY_VIRTIO_IPC + tristate "Trusty Virtio IPC driver" + depends on TRUSTY_VIRTIO + default y + help + This module adds support for communications with Trusty Services + + If you choose to build a module, it'll be called trusty-ipc. + Say N if unsure. + +config TRUSTY_BACKUP_TIMER + tristate "Trusty backup timer" + depends on TRUSTY + default y + help + This module adds support for Trusty backup timer. Trusty backup + timer might be required on platforms that might loose state of + secure timer in deep idle state. + + If you choose to build a module, it'll be called trusty-timer. + Say N if unsure. + + +endmenu diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile new file mode 100644 index 000000000000..69a78688f1b0 --- /dev/null +++ b/drivers/trusty/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for trusty components +# + +obj-$(CONFIG_TRUSTY) += trusty.o +obj-$(CONFIG_TRUSTY) += trusty-irq.o +obj-$(CONFIG_TRUSTY_FIQ) += trusty-fiq.o +obj-$(CONFIG_TRUSTY_FIQ_ARM) += trusty-fiq-arm.o +obj-$(CONFIG_TRUSTY_FIQ_ARM64) += trusty-fiq-arm64.o trusty-fiq-arm64-glue.o +obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o +obj-$(CONFIG_TRUSTY) += trusty-mem.o +obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o +obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o +obj-$(CONFIG_TRUSTY) += trusty-wall.o +obj-$(CONFIG_TRUSTY_BACKUP_TIMER) += trusty-timer.o diff --git a/drivers/trusty/trusty-fiq-arm.c b/drivers/trusty/trusty-fiq-arm.c new file mode 100644 index 000000000000..8c62a00bbc44 --- /dev/null +++ b/drivers/trusty/trusty-fiq-arm.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include "trusty-fiq.h" + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) + +static void __naked trusty_fiq_return(void) +{ + asm volatile( + ".arch_extension sec\n" + "mov r12, r0\n" + "ldr r0, =" STRINGIFY(SMC_FC_FIQ_EXIT) "\n" + "smc #0"); +} + +int trusty_fiq_arch_probe(struct platform_device *pdev) +{ + return fiq_glue_set_return_handler(trusty_fiq_return); +} + +void trusty_fiq_arch_remove(struct platform_device *pdev) +{ + fiq_glue_clear_return_handler(trusty_fiq_return); +} diff --git a/drivers/trusty/trusty-fiq-arm64-glue.S b/drivers/trusty/trusty-fiq-arm64-glue.S new file mode 100644 index 000000000000..6994b3a94fc3 --- /dev/null +++ b/drivers/trusty/trusty-fiq-arm64-glue.S @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +.macro push reg1,reg2,remregs:vararg + .ifnb \remregs + push \remregs + .endif + stp \reg1, \reg2, [sp, #-16]! +.endm + +.macro pop reg1,reg2,remregs:vararg + ldp \reg1, \reg2, [sp], #16 + .ifnb \remregs + pop \remregs + .endif +.endm + +ENTRY(trusty_fiq_glue_arm64) + sub sp, sp, #S_FRAME_SIZE - S_LR + push x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, \ + x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, \ + x26, x27, x28, x29 + ldr x0, =SMC_FC64_GET_FIQ_REGS + smc #0 + stp x0, x1, [sp, #S_PC] /* original pc, cpsr */ + tst x1, PSR_MODE_MASK + csel x2, x2, x3, eq /* sp el0, sp el1 */ + stp x30, x2, [sp, #S_LR] /* lr, original sp */ + mov x0, sp + mov x1, x3 + bl trusty_fiq_handler + pop x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, \ + x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, \ + x26, x27, x28, x29 + ldr x30, [sp], #S_FRAME_SIZE - S_LR /* load LR and restore SP */ + ldr x0, =SMC_FC_FIQ_EXIT + smc #0 + b . /* should not get here */ diff --git a/drivers/trusty/trusty-fiq-arm64.c b/drivers/trusty/trusty-fiq-arm64.c new file mode 100644 index 000000000000..8b9a40887587 --- /dev/null +++ b/drivers/trusty/trusty-fiq-arm64.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include + +#include "trusty-fiq.h" + +extern void trusty_fiq_glue_arm64(void); + +static struct device *trusty_dev; +static DEFINE_PER_CPU(void *, fiq_stack); +static struct fiq_glue_handler *fiq_handlers; +static DEFINE_MUTEX(fiq_glue_lock); + +void trusty_fiq_handler(struct pt_regs *regs, void *svc_sp) +{ + struct fiq_glue_handler *handler; + + for (handler = ACCESS_ONCE(fiq_handlers); handler; + handler = ACCESS_ONCE(handler->next)) { + /* Barrier paired with smp_wmb in fiq_glue_register_handler */ + smp_read_barrier_depends(); + handler->fiq(handler, regs, svc_sp); + } +} + +static void smp_nop_call(void *info) +{ + /* If this call is reached, the fiq handler is not currently running */ +} + +static void fiq_glue_clear_handler(void) +{ + int cpu; + int ret; + void *stack; + + for_each_possible_cpu(cpu) { + stack = per_cpu(fiq_stack, cpu); + if (!stack) + continue; + + ret = trusty_fast_call64(trusty_dev, SMC_FC64_SET_FIQ_HANDLER, + cpu, 0, 0); + if (ret) { + pr_err("%s: SMC_FC_SET_FIQ_HANDLER(%d, 0, 0) failed 0x%x, skip free stack\n", + __func__, cpu, ret); + continue; + } + + per_cpu(fiq_stack, cpu) = NULL; + smp_call_function_single(cpu, smp_nop_call, NULL, true); + free_pages((unsigned long)stack, THREAD_SIZE_ORDER); + } +} + +static int fiq_glue_set_handler(void) +{ + int ret; + int cpu; + void *stack; + unsigned long irqflags; + + for_each_possible_cpu(cpu) { + stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); + if (WARN_ON(!stack)) { + ret = -ENOMEM; + goto err_alloc_fiq_stack; + } + per_cpu(fiq_stack, cpu) = stack; + stack += THREAD_START_SP; + + local_irq_save(irqflags); + ret = trusty_fast_call64(trusty_dev, SMC_FC64_SET_FIQ_HANDLER, + cpu, (uintptr_t)trusty_fiq_glue_arm64, + (uintptr_t)stack); + local_irq_restore(irqflags); + if (ret) { + pr_err("%s: SMC_FC_SET_FIQ_HANDLER(%d, %p, %p) failed 0x%x\n", + __func__, cpu, trusty_fiq_glue_arm64, + stack, ret); + ret = -EINVAL; + goto err_set_fiq_handler; + } + } + return 0; + +err_alloc_fiq_stack: +err_set_fiq_handler: + fiq_glue_clear_handler(); + return ret; +} + +int fiq_glue_register_handler(struct fiq_glue_handler *handler) +{ + int ret; + + if (!handler || !handler->fiq) { + ret = -EINVAL; + goto err_bad_arg; + } + + mutex_lock(&fiq_glue_lock); + + if (!trusty_dev) { + ret = -ENODEV; + goto err_no_trusty; + } + + handler->next = fiq_handlers; + /* + * Write barrier paired with smp_read_barrier_depends in + * trusty_fiq_handler. Make sure next pointer is updated before + * fiq_handlers so trusty_fiq_handler does not see an uninitialized + * value and terminate early or crash. + */ + smp_wmb(); + fiq_handlers = handler; + + smp_call_function(smp_nop_call, NULL, true); + + if (!handler->next) { + ret = fiq_glue_set_handler(); + if (ret) + goto err_set_fiq_handler; + } + + mutex_unlock(&fiq_glue_lock); + return 0; + +err_set_fiq_handler: + fiq_handlers = handler->next; +err_no_trusty: + mutex_unlock(&fiq_glue_lock); +err_bad_arg: + pr_err("%s: failed, %d\n", __func__, ret); + return ret; +} + +int trusty_fiq_arch_probe(struct platform_device *pdev) +{ + mutex_lock(&fiq_glue_lock); + trusty_dev = pdev->dev.parent; + mutex_unlock(&fiq_glue_lock); + + return 0; +} + +void trusty_fiq_arch_remove(struct platform_device *pdev) +{ + mutex_lock(&fiq_glue_lock); + fiq_glue_clear_handler(); + trusty_dev = NULL; + mutex_unlock(&fiq_glue_lock); +} diff --git a/drivers/trusty/trusty-fiq.c b/drivers/trusty/trusty-fiq.c new file mode 100644 index 000000000000..1a031c67ea72 --- /dev/null +++ b/drivers/trusty/trusty-fiq.c @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "trusty-fiq.h" + +static int trusty_fiq_remove_child(struct device *dev, void *data) +{ + platform_device_unregister(to_platform_device(dev)); + return 0; +} + +static int trusty_fiq_probe(struct platform_device *pdev) +{ + int ret; + + ret = trusty_fiq_arch_probe(pdev); + if (ret) + goto err_set_fiq_return; + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add children: %d\n", ret); + goto err_add_children; + } + + return 0; + +err_add_children: + device_for_each_child(&pdev->dev, NULL, trusty_fiq_remove_child); + trusty_fiq_arch_remove(pdev); +err_set_fiq_return: + return ret; +} + +static int trusty_fiq_remove(struct platform_device *pdev) +{ + device_for_each_child(&pdev->dev, NULL, trusty_fiq_remove_child); + trusty_fiq_arch_remove(pdev); + return 0; +} + +static const struct of_device_id trusty_fiq_of_match[] = { + { .compatible = "android,trusty-fiq-v1", }, + {}, +}; + +static struct platform_driver trusty_fiq_driver = { + .probe = trusty_fiq_probe, + .remove = trusty_fiq_remove, + .driver = { + .name = "trusty-fiq", + .owner = THIS_MODULE, + .of_match_table = trusty_fiq_of_match, + }, +}; + +static int __init trusty_fiq_driver_init(void) +{ + return platform_driver_register(&trusty_fiq_driver); +} + +static void __exit trusty_fiq_driver_exit(void) +{ + platform_driver_unregister(&trusty_fiq_driver); +} + +subsys_initcall(trusty_fiq_driver_init); +module_exit(trusty_fiq_driver_exit); diff --git a/drivers/trusty/trusty-fiq.h b/drivers/trusty/trusty-fiq.h new file mode 100644 index 000000000000..d4ae9a9635f3 --- /dev/null +++ b/drivers/trusty/trusty-fiq.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +int trusty_fiq_arch_probe(struct platform_device *pdev); +void trusty_fiq_arch_remove(struct platform_device *pdev); diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c new file mode 100644 index 000000000000..f0b6b1bb444a --- /dev/null +++ b/drivers/trusty/trusty-ipc.c @@ -0,0 +1,1699 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +#include +#endif +#include +#include +#include + +#include +#include +#include + +#include +#include + +#define MAX_DEVICES 4 + +#define VIRTIO_ID_TRUSTY_IPC 13 /* virtio trusty ipc */ + +#define REPLY_TIMEOUT 5000 +#define TXBUF_TIMEOUT 15000 + +#define PULSE_ACTIVE 1 +#define PULSE_DEACTIVE 0 + +#define MAX_SRV_NAME_LEN 256 +#define MAX_DEV_NAME_LEN 32 + +#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE +#define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE + +#define TIPC_CTRL_ADDR 53 +#define TIPC_ANY_ADDR 0xFFFFFFFF + +#define TIPC_MIN_LOCAL_ADDR 1024 + +#define TIPC_IOC_MAGIC 'r' +#define TIPC_IOC_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, char *) +#if defined(CONFIG_COMPAT) +#define TIPC_IOC_CONNECT_COMPAT _IOW(TIPC_IOC_MAGIC, 0x80, \ + compat_uptr_t) +#endif + +struct tipc_virtio_dev; + +struct tipc_dev_config { + u32 msg_buf_max_size; + u32 msg_buf_alignment; + char dev_name[MAX_DEV_NAME_LEN]; +} __packed; + +struct tipc_msg_hdr { + u32 src; + u32 dst; + u32 len; + u16 flags; + u16 reserved; + u8 data[0]; +} __packed; + +enum tipc_ctrl_msg_types { + TIPC_CTRL_MSGTYPE_GO_ONLINE = 1, + TIPC_CTRL_MSGTYPE_GO_OFFLINE, + TIPC_CTRL_MSGTYPE_CONN_REQ, + TIPC_CTRL_MSGTYPE_CONN_RSP, + TIPC_CTRL_MSGTYPE_DISC_REQ, +}; + +struct tipc_ctrl_msg { + u32 type; + u32 body_len; + u8 body[0]; +} __packed; + +struct tipc_conn_req_body { + char name[MAX_SRV_NAME_LEN]; +} __packed; + +struct tipc_conn_rsp_body { + u32 target; + u32 status; + u32 remote; + u32 max_msg_size; + u32 max_msg_cnt; +} __packed; + +struct tipc_disc_req_body { + u32 target; +} __packed; + +struct tipc_cdev_node { + struct cdev cdev; + struct device *dev; + unsigned int minor; +}; + +enum tipc_device_state { + VDS_OFFLINE = 0, + VDS_ONLINE, + VDS_DEAD, +}; + +struct tipc_virtio_dev { + struct kref refcount; + struct mutex lock; /* protects access to this device */ + struct virtio_device *vdev; + struct virtqueue *rxvq; + struct virtqueue *txvq; + uint msg_buf_cnt; + uint msg_buf_max_cnt; + size_t msg_buf_max_sz; + uint free_msg_buf_cnt; + struct list_head free_buf_list; + wait_queue_head_t sendq; + struct idr addr_idr; + enum tipc_device_state state; + struct tipc_cdev_node cdev_node; + char cdev_name[MAX_DEV_NAME_LEN]; +}; + +enum tipc_chan_state { + TIPC_DISCONNECTED = 0, + TIPC_CONNECTING, + TIPC_CONNECTED, + TIPC_STALE, +}; + +struct tipc_chan { + struct mutex lock; /* protects channel state */ + struct kref refcount; + enum tipc_chan_state state; + struct tipc_virtio_dev *vds; + const struct tipc_chan_ops *ops; + void *ops_arg; + u32 remote; + u32 local; + u32 max_msg_size; + u32 max_msg_cnt; + char srv_name[MAX_SRV_NAME_LEN]; +}; + +static struct class *tipc_class; +static unsigned int tipc_major; + +struct virtio_device *default_vdev; + +static DEFINE_IDR(tipc_devices); +static DEFINE_MUTEX(tipc_devices_lock); + +static int _match_any(int id, void *p, void *data) +{ + return id; +} + +static int _match_data(int id, void *p, void *data) +{ + return (p == data); +} + +static void *_alloc_shareable_mem(size_t sz, phys_addr_t *ppa, gfp_t gfp) +{ + void *buf_va; + buf_va = alloc_pages_exact(sz, gfp); + *ppa = virt_to_phys(buf_va); + return buf_va; +} + +static void _free_shareable_mem(size_t sz, void *va, phys_addr_t pa) +{ + free_pages_exact(va, sz); +} + +static struct tipc_msg_buf *_alloc_msg_buf(size_t sz) +{ + struct tipc_msg_buf *mb; + + /* allocate tracking structure */ + mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL); + if (!mb) + return NULL; + + /* allocate buffer that can be shared with secure world */ + mb->buf_va = _alloc_shareable_mem(sz, &mb->buf_pa, GFP_KERNEL); + if (!mb->buf_va) + goto err_alloc; + + mb->buf_sz = sz; + + return mb; + +err_alloc: + kfree(mb); + return NULL; +} + +static void _free_msg_buf(struct tipc_msg_buf *mb) +{ + _free_shareable_mem(mb->buf_sz, mb->buf_va, mb->buf_pa); + kfree(mb); +} + +static void _free_msg_buf_list(struct list_head *list) +{ + struct tipc_msg_buf *mb = NULL; + + mb = list_first_entry_or_null(list, struct tipc_msg_buf, node); + while (mb) { + list_del(&mb->node); + _free_msg_buf(mb); + mb = list_first_entry_or_null(list, struct tipc_msg_buf, node); + } +} + +static inline void mb_reset(struct tipc_msg_buf *mb) +{ + mb->wpos = 0; + mb->rpos = 0; +} + +static void _free_chan(struct kref *kref) +{ + struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount); + kfree(ch); +} + +static void _free_vds(struct kref *kref) +{ + struct tipc_virtio_dev *vds = + container_of(kref, struct tipc_virtio_dev, refcount); + kfree(vds); +} + +static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds) +{ + return _alloc_msg_buf(vds->msg_buf_max_sz); +} + +static void vds_free_msg_buf(struct tipc_virtio_dev *vds, + struct tipc_msg_buf *mb) +{ + _free_msg_buf(mb); +} + +static bool _put_txbuf_locked(struct tipc_virtio_dev *vds, + struct tipc_msg_buf *mb) +{ + list_add_tail(&mb->node, &vds->free_buf_list); + return vds->free_msg_buf_cnt++ == 0; +} + +static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds) +{ + struct tipc_msg_buf *mb; + + if (vds->state != VDS_ONLINE) + return ERR_PTR(-ENODEV); + + if (vds->free_msg_buf_cnt) { + /* take it out of free list */ + mb = list_first_entry(&vds->free_buf_list, + struct tipc_msg_buf, node); + list_del(&mb->node); + vds->free_msg_buf_cnt--; + } else { + if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt) + return ERR_PTR(-EAGAIN); + + /* try to allocate it */ + mb = _alloc_msg_buf(vds->msg_buf_max_sz); + if (!mb) + return ERR_PTR(-ENOMEM); + + vds->msg_buf_cnt++; + } + return mb; +} + +static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds) +{ + struct tipc_msg_buf *mb; + + mutex_lock(&vds->lock); + mb = _get_txbuf_locked(vds); + mutex_unlock(&vds->lock); + + return mb; +} + +static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb) +{ + if (!vds) + return; + + mutex_lock(&vds->lock); + _put_txbuf_locked(vds, mb); + wake_up_interruptible(&vds->sendq); + mutex_unlock(&vds->lock); +} + +static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds, + long timeout) +{ + struct tipc_msg_buf *mb; + + if (!vds) + return ERR_PTR(-EINVAL); + + mb = _vds_get_txbuf(vds); + + if ((PTR_ERR(mb) == -EAGAIN) && timeout) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + timeout = msecs_to_jiffies(timeout); + add_wait_queue(&vds->sendq, &wait); + for (;;) { + timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, + timeout); + if (!timeout) { + mb = ERR_PTR(-ETIMEDOUT); + break; + } + + if (signal_pending(current)) { + mb = ERR_PTR(-ERESTARTSYS); + break; + } + + mb = _vds_get_txbuf(vds); + if (PTR_ERR(mb) != -EAGAIN) + break; + } + remove_wait_queue(&vds->sendq, &wait); + } + + if (IS_ERR(mb)) + return mb; + + BUG_ON(!mb); + + /* reset and reserve space for message header */ + mb_reset(mb); + mb_put_data(mb, sizeof(struct tipc_msg_hdr)); + + return mb; +} + +static int vds_queue_txbuf(struct tipc_virtio_dev *vds, + struct tipc_msg_buf *mb) +{ + int err; + struct scatterlist sg; + bool need_notify = false; + + if (!vds) + return -EINVAL; + + mutex_lock(&vds->lock); + if (vds->state == VDS_ONLINE) { + sg_init_one(&sg, mb->buf_va, mb->wpos); + err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL); + need_notify = virtqueue_kick_prepare(vds->txvq); + } else { + err = -ENODEV; + } + mutex_unlock(&vds->lock); + + if (need_notify) + virtqueue_notify(vds->txvq); + + return err; +} + +static int vds_add_channel(struct tipc_virtio_dev *vds, + struct tipc_chan *chan) +{ + int ret; + + mutex_lock(&vds->lock); + if (vds->state == VDS_ONLINE) { + ret = idr_alloc(&vds->addr_idr, chan, + TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1, + GFP_KERNEL); + if (ret > 0) { + chan->local = ret; + kref_get(&chan->refcount); + ret = 0; + } + } else { + ret = -EINVAL; + } + mutex_unlock(&vds->lock); + + return ret; +} + +static void vds_del_channel(struct tipc_virtio_dev *vds, + struct tipc_chan *chan) +{ + mutex_lock(&vds->lock); + if (chan->local) { + idr_remove(&vds->addr_idr, chan->local); + chan->local = 0; + chan->remote = 0; + kref_put(&chan->refcount, _free_chan); + } + mutex_unlock(&vds->lock); +} + +static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds, + u32 addr) +{ + int id; + struct tipc_chan *chan = NULL; + + mutex_lock(&vds->lock); + if (addr == TIPC_ANY_ADDR) { + id = idr_for_each(&vds->addr_idr, _match_any, NULL); + if (id > 0) + chan = idr_find(&vds->addr_idr, id); + } else { + chan = idr_find(&vds->addr_idr, addr); + } + if (chan) + kref_get(&chan->refcount); + mutex_unlock(&vds->lock); + + return chan; +} + +static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds, + const struct tipc_chan_ops *ops, + void *ops_arg) +{ + int ret; + struct tipc_chan *chan = NULL; + + if (!vds) + return ERR_PTR(-ENOENT); + + if (!ops) + return ERR_PTR(-EINVAL); + + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return ERR_PTR(-ENOMEM); + + kref_get(&vds->refcount); + chan->vds = vds; + chan->ops = ops; + chan->ops_arg = ops_arg; + mutex_init(&chan->lock); + kref_init(&chan->refcount); + chan->state = TIPC_DISCONNECTED; + + ret = vds_add_channel(vds, chan); + if (ret) { + kfree(chan); + kref_put(&vds->refcount, _free_vds); + return ERR_PTR(ret); + } + + return chan; +} + +static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst) +{ + struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr)); + + hdr->src = src; + hdr->dst = dst; + hdr->len = mb_avail_data(mb); + hdr->flags = 0; + hdr->reserved = 0; +} + +/*****************************************************************************/ + +struct tipc_chan *tipc_create_channel(struct device *dev, + const struct tipc_chan_ops *ops, + void *ops_arg) +{ + struct virtio_device *vd; + struct tipc_chan *chan; + struct tipc_virtio_dev *vds; + + mutex_lock(&tipc_devices_lock); + if (dev) { + vd = container_of(dev, struct virtio_device, dev); + } else { + vd = default_vdev; + if (!vd) { + mutex_unlock(&tipc_devices_lock); + return ERR_PTR(-ENOENT); + } + } + vds = vd->priv; + kref_get(&vds->refcount); + mutex_unlock(&tipc_devices_lock); + + chan = vds_create_channel(vds, ops, ops_arg); + kref_put(&vds->refcount, _free_vds); + return chan; +} +EXPORT_SYMBOL(tipc_create_channel); + +struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan) +{ + return vds_alloc_msg_buf(chan->vds); +} +EXPORT_SYMBOL(tipc_chan_get_rxbuf); + +void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb) +{ + vds_free_msg_buf(chan->vds, mb); +} +EXPORT_SYMBOL(tipc_chan_put_rxbuf); + +struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, + long timeout) +{ + return vds_get_txbuf(chan->vds, timeout); +} +EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout); + +void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb) +{ + vds_put_txbuf(chan->vds, mb); +} +EXPORT_SYMBOL(tipc_chan_put_txbuf); + +int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb) +{ + int err; + + mutex_lock(&chan->lock); + switch (chan->state) { + case TIPC_CONNECTED: + fill_msg_hdr(mb, chan->local, chan->remote); + err = vds_queue_txbuf(chan->vds, mb); + if (err) { + /* this should never happen */ + pr_err("%s: failed to queue tx buffer (%d)\n", + __func__, err); + } + break; + case TIPC_DISCONNECTED: + case TIPC_CONNECTING: + err = -ENOTCONN; + break; + case TIPC_STALE: + err = -ESHUTDOWN; + break; + default: + err = -EBADFD; + pr_err("%s: unexpected channel state %d\n", + __func__, chan->state); + } + mutex_unlock(&chan->lock); + return err; +} +EXPORT_SYMBOL(tipc_chan_queue_msg); + + +int tipc_chan_connect(struct tipc_chan *chan, const char *name) +{ + int err; + struct tipc_ctrl_msg *msg; + struct tipc_conn_req_body *body; + struct tipc_msg_buf *txbuf; + + txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT); + if (IS_ERR(txbuf)) + return PTR_ERR(txbuf); + + /* reserve space for connection request control message */ + msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body)); + body = (struct tipc_conn_req_body *)msg->body; + + /* fill message */ + msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ; + msg->body_len = sizeof(*body); + + strncpy(body->name, name, sizeof(body->name)); + body->name[sizeof(body->name)-1] = '\0'; + + mutex_lock(&chan->lock); + switch (chan->state) { + case TIPC_DISCONNECTED: + /* save service name we are connecting to */ + strcpy(chan->srv_name, body->name); + + fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR); + err = vds_queue_txbuf(chan->vds, txbuf); + if (err) { + /* this should never happen */ + pr_err("%s: failed to queue tx buffer (%d)\n", + __func__, err); + } else { + chan->state = TIPC_CONNECTING; + txbuf = NULL; /* prevents discarding buffer */ + } + break; + case TIPC_CONNECTED: + case TIPC_CONNECTING: + /* check if we are trying to connect to the same service */ + if (strcmp(chan->srv_name, body->name) == 0) + err = 0; + else + if (chan->state == TIPC_CONNECTING) + err = -EALREADY; /* in progress */ + else + err = -EISCONN; /* already connected */ + break; + + case TIPC_STALE: + err = -ESHUTDOWN; + break; + default: + err = -EBADFD; + pr_err("%s: unexpected channel state %d\n", + __func__, chan->state); + break; + } + mutex_unlock(&chan->lock); + + if (txbuf) + tipc_chan_put_txbuf(chan, txbuf); /* discard it */ + + return err; +} +EXPORT_SYMBOL(tipc_chan_connect); + +int tipc_chan_shutdown(struct tipc_chan *chan) +{ + int err; + struct tipc_ctrl_msg *msg; + struct tipc_disc_req_body *body; + struct tipc_msg_buf *txbuf = NULL; + + /* get tx buffer */ + txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT); + if (IS_ERR(txbuf)) + return PTR_ERR(txbuf); + + mutex_lock(&chan->lock); + if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) { + /* reserve space for disconnect request control message */ + msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body)); + body = (struct tipc_disc_req_body *)msg->body; + + msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ; + msg->body_len = sizeof(*body); + body->target = chan->remote; + + fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR); + err = vds_queue_txbuf(chan->vds, txbuf); + if (err) { + /* this should never happen */ + pr_err("%s: failed to queue tx buffer (%d)\n", + __func__, err); + } + } else { + err = -ENOTCONN; + } + chan->state = TIPC_STALE; + mutex_unlock(&chan->lock); + + if (err) { + /* release buffer */ + tipc_chan_put_txbuf(chan, txbuf); + } + + return err; +} +EXPORT_SYMBOL(tipc_chan_shutdown); + +void tipc_chan_destroy(struct tipc_chan *chan) +{ + mutex_lock(&chan->lock); + if (chan->vds) { + vds_del_channel(chan->vds, chan); + kref_put(&chan->vds->refcount, _free_vds); + chan->vds = NULL; + } + mutex_unlock(&chan->lock); + kref_put(&chan->refcount, _free_chan); +} +EXPORT_SYMBOL(tipc_chan_destroy); + +/***************************************************************************/ + +struct tipc_dn_chan { + int pulse; + int state; + struct mutex lock; /* protects rx_msg_queue list and channel state */ + struct tipc_chan *chan; + wait_queue_head_t readq; + struct completion reply_comp; + struct list_head rx_msg_queue; +}; + +static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout) +{ + int ret; + + ret = wait_for_completion_interruptible_timeout(&dn->reply_comp, + msecs_to_jiffies(timeout)); + if (ret < 0) + return ret; + + mutex_lock(&dn->lock); + if (!ret) { + /* no reply from remote */ + dn->state = TIPC_STALE; + ret = -ETIMEDOUT; + } else { + /* got reply */ + if (dn->pulse == PULSE_ACTIVE) { + dn->pulse = PULSE_DEACTIVE; + ret = 0; + } else if (dn->state == TIPC_DISCONNECTED) + if (!list_empty(&dn->rx_msg_queue)) + ret = 0; + else + ret = -ENOTCONN; + else + ret = -EIO; + } + mutex_unlock(&dn->lock); + + return ret; +} + +struct tipc_msg_buf *dn_handle_msg(void *data, struct tipc_msg_buf *rxbuf) +{ + struct tipc_dn_chan *dn = data; + struct tipc_msg_buf *newbuf = rxbuf; + + mutex_lock(&dn->lock); + if (dn->state == TIPC_CONNECTED) { + /* get new buffer */ + newbuf = tipc_chan_get_rxbuf(dn->chan); + if (newbuf) { + /* queue an old buffer and return a new one */ + list_add_tail(&rxbuf->node, &dn->rx_msg_queue); + wake_up_interruptible(&dn->readq); + } else { + /* + * return an old buffer effectively discarding + * incoming message + */ + pr_err("%s: discard incoming message\n", __func__); + newbuf = rxbuf; + } + } + mutex_unlock(&dn->lock); + + return newbuf; +} + +static void dn_connected(struct tipc_dn_chan *dn) +{ + mutex_lock(&dn->lock); + dn->state = TIPC_CONNECTED; + dn->pulse = PULSE_ACTIVE; + + /* complete all pending */ + complete(&dn->reply_comp); + + mutex_unlock(&dn->lock); +} + +static void dn_disconnected(struct tipc_dn_chan *dn) +{ + mutex_lock(&dn->lock); + dn->state = TIPC_DISCONNECTED; + + /* complete all pending */ + complete(&dn->reply_comp); + + /* wakeup all readers */ + wake_up_interruptible_all(&dn->readq); + + mutex_unlock(&dn->lock); +} + +static void dn_shutdown(struct tipc_dn_chan *dn) +{ + mutex_lock(&dn->lock); + + /* set state to STALE */ + dn->state = TIPC_STALE; + + /* complete all pending */ + complete(&dn->reply_comp); + + /* wakeup all readers */ + wake_up_interruptible_all(&dn->readq); + + mutex_unlock(&dn->lock); +} + +static void dn_handle_event(void *data, int event) +{ + struct tipc_dn_chan *dn = data; + + switch (event) { + case TIPC_CHANNEL_SHUTDOWN: + dn_shutdown(dn); + break; + + case TIPC_CHANNEL_DISCONNECTED: + dn_disconnected(dn); + break; + + case TIPC_CHANNEL_CONNECTED: + dn_connected(dn); + break; + + default: + pr_err("%s: unhandled event %d\n", __func__, event); + break; + } +} + +static struct tipc_chan_ops _dn_ops = { + .handle_msg = dn_handle_msg, + .handle_event = dn_handle_event, +}; + +#define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev) +#define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node) + +static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn) +{ + int ret; + struct tipc_virtio_dev *vds = NULL; + + mutex_lock(&tipc_devices_lock); + ret = idr_for_each(&tipc_devices, _match_data, cdn); + if (ret) { + vds = cdn_to_vds(cdn); + kref_get(&vds->refcount); + } + mutex_unlock(&tipc_devices_lock); + return vds; +} + +static int tipc_open(struct inode *inode, struct file *filp) +{ + int ret; + struct tipc_virtio_dev *vds; + struct tipc_dn_chan *dn; + struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev); + + vds = _dn_lookup_vds(cdn); + if (!vds) { + ret = -ENOENT; + goto err_vds_lookup; + } + + dn = kzalloc(sizeof(*dn), GFP_KERNEL); + if (!dn) { + ret = -ENOMEM; + goto err_alloc_chan; + } + + mutex_init(&dn->lock); + init_waitqueue_head(&dn->readq); + init_completion(&dn->reply_comp); + INIT_LIST_HEAD(&dn->rx_msg_queue); + + dn->state = TIPC_DISCONNECTED; + dn->pulse = PULSE_DEACTIVE; + + dn->chan = vds_create_channel(vds, &_dn_ops, dn); + if (IS_ERR(dn->chan)) { + ret = PTR_ERR(dn->chan); + goto err_create_chan; + } + + filp->private_data = dn; + kref_put(&vds->refcount, _free_vds); + return 0; + +err_create_chan: + kfree(dn); +err_alloc_chan: + kref_put(&vds->refcount, _free_vds); +err_vds_lookup: + return ret; +} + + +static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name) +{ + int err; + char name[MAX_SRV_NAME_LEN]; + + /* copy in service name from user space */ + err = strncpy_from_user(name, usr_name, sizeof(name)); + if (err < 0) { + pr_err("%s: copy_from_user (%p) failed (%d)\n", + __func__, usr_name, err); + return err; + } + name[sizeof(name)-1] = '\0'; + + /* send connect request */ + err = tipc_chan_connect(dn->chan, name); + if (err) + return err; + + /* and wait for reply */ + return dn_wait_for_reply(dn, REPLY_TIMEOUT); +} + +static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret; + struct tipc_dn_chan *dn = filp->private_data; + + if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC) + return -EINVAL; + + switch (cmd) { + case TIPC_IOC_CONNECT: + ret = dn_connect_ioctl(dn, (char __user *)arg); + break; + default: + pr_warn("%s: Unhandled ioctl cmd: 0x%x\n", + __func__, cmd); + ret = -EINVAL; + } + return ret; +} + +#if defined(CONFIG_COMPAT) +static long tipc_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int ret; + struct tipc_dn_chan *dn = filp->private_data; + void __user *user_req = compat_ptr(arg); + + if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC) + return -EINVAL; + + switch (cmd) { + case TIPC_IOC_CONNECT_COMPAT: + ret = dn_connect_ioctl(dn, user_req); + break; + default: + pr_warn("%s: Unhandled ioctl cmd: 0x%x\n", + __func__, cmd); + ret = -EINVAL; + } + return ret; +} +#endif + +static inline bool _got_rx(struct tipc_dn_chan *dn) +{ + if (dn->state != TIPC_CONNECTED) + return true; + + if (!list_empty(&dn->rx_msg_queue)) + return true; + + return false; +} + +static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + ssize_t ret; + size_t len; + struct tipc_msg_buf *mb; + struct file *filp = iocb->ki_filp; + struct tipc_dn_chan *dn = filp->private_data; + + mutex_lock(&dn->lock); + + while (list_empty(&dn->rx_msg_queue)) { + if (dn->state != TIPC_CONNECTED) { + if (dn->state == TIPC_CONNECTING) + ret = -ENOTCONN; + else if (dn->state == TIPC_DISCONNECTED) + ret = -ENOTCONN; + else if (dn->state == TIPC_STALE) + ret = -ESHUTDOWN; + else + ret = -EBADFD; + goto out; + } + + mutex_unlock(&dn->lock); + + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible(dn->readq, _got_rx(dn))) + return -ERESTARTSYS; + + mutex_lock(&dn->lock); + } + + mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node); + + len = mb_avail_data(mb); + if (len > iov_iter_count(iter)) { + ret = -EMSGSIZE; + goto out; + } + + if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) { + ret = -EFAULT; + goto out; + } + + ret = len; + list_del(&mb->node); + tipc_chan_put_rxbuf(dn->chan, mb); + +out: + mutex_unlock(&dn->lock); + return ret; +} + +static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + ssize_t ret; + size_t len; + long timeout = TXBUF_TIMEOUT; + struct tipc_msg_buf *txbuf = NULL; + struct file *filp = iocb->ki_filp; + struct tipc_dn_chan *dn = filp->private_data; + + if (filp->f_flags & O_NONBLOCK) + timeout = 0; + + txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout); + if (IS_ERR(txbuf)) + return PTR_ERR(txbuf); + + /* message length */ + len = iov_iter_count(iter); + + /* check available space */ + if (len > mb_avail_space(txbuf)) { + ret = -EMSGSIZE; + goto err_out; + } + + /* copy in message data */ + if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len) { + ret = -EFAULT; + goto err_out; + } + + /* queue message */ + ret = tipc_chan_queue_msg(dn->chan, txbuf); + if (ret) + goto err_out; + + return len; + +err_out: + tipc_chan_put_txbuf(dn->chan, txbuf); + return ret; +} + +static unsigned int tipc_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + struct tipc_dn_chan *dn = filp->private_data; + + mutex_lock(&dn->lock); + + poll_wait(filp, &dn->readq, wait); + + /* Writes always succeed for now */ + mask |= POLLOUT | POLLWRNORM; + + if (!list_empty(&dn->rx_msg_queue)) + mask |= POLLIN | POLLRDNORM; + + if (dn->state != TIPC_CONNECTED) + mask |= POLLERR; + + mutex_unlock(&dn->lock); + return mask; +} + + +static int tipc_release(struct inode *inode, struct file *filp) +{ + struct tipc_dn_chan *dn = filp->private_data; + + dn_shutdown(dn); + + /* free all pending buffers */ + _free_msg_buf_list(&dn->rx_msg_queue); + + /* shutdown channel */ + tipc_chan_shutdown(dn->chan); + + /* and destroy it */ + tipc_chan_destroy(dn->chan); + + kfree(dn); + + return 0; +} + +static const struct file_operations tipc_fops = { + .open = tipc_open, + .release = tipc_release, + .unlocked_ioctl = tipc_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = tipc_compat_ioctl, +#endif + .read_iter = tipc_read_iter, + .write_iter = tipc_write_iter, + .poll = tipc_poll, + .owner = THIS_MODULE, +}; + +/*****************************************************************************/ + +static void chan_trigger_event(struct tipc_chan *chan, int event) +{ + if (!event) + return; + + chan->ops->handle_event(chan->ops_arg, event); +} + +static void _cleanup_vq(struct virtqueue *vq) +{ + struct tipc_msg_buf *mb; + + while ((mb = virtqueue_detach_unused_buf(vq)) != NULL) + _free_msg_buf(mb); +} + +static int _create_cdev_node(struct device *parent, + struct tipc_cdev_node *cdn, + const char *name) +{ + int ret; + dev_t devt; + + if (!name) { + dev_dbg(parent, "%s: cdev name has to be provided\n", + __func__); + return -EINVAL; + } + + /* allocate minor */ + ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES-1, GFP_KERNEL); + if (ret < 0) { + dev_dbg(parent, "%s: failed (%d) to get id\n", + __func__, ret); + return ret; + } + + cdn->minor = ret; + cdev_init(&cdn->cdev, &tipc_fops); + cdn->cdev.owner = THIS_MODULE; + + /* Add character device */ + devt = MKDEV(tipc_major, cdn->minor); + ret = cdev_add(&cdn->cdev, devt, 1); + if (ret) { + dev_dbg(parent, "%s: cdev_add failed (%d)\n", + __func__, ret); + goto err_add_cdev; + } + + /* Create a device node */ + cdn->dev = device_create(tipc_class, parent, + devt, NULL, "trusty-ipc-%s", name); + if (IS_ERR(cdn->dev)) { + ret = PTR_ERR(cdn->dev); + dev_dbg(parent, "%s: device_create failed: %d\n", + __func__, ret); + goto err_device_create; + } + + return 0; + +err_device_create: + cdn->dev = NULL; + cdev_del(&cdn->cdev); +err_add_cdev: + idr_remove(&tipc_devices, cdn->minor); + return ret; +} + +static void create_cdev_node(struct tipc_virtio_dev *vds, + struct tipc_cdev_node *cdn) +{ + int err; + + mutex_lock(&tipc_devices_lock); + + if (!default_vdev) { + kref_get(&vds->refcount); + default_vdev = vds->vdev; + } + + if (vds->cdev_name[0] && !cdn->dev) { + kref_get(&vds->refcount); + err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name); + if (err) { + dev_err(&vds->vdev->dev, + "failed (%d) to create cdev node\n", err); + kref_put(&vds->refcount, _free_vds); + } + } + mutex_unlock(&tipc_devices_lock); +} + +static void destroy_cdev_node(struct tipc_virtio_dev *vds, + struct tipc_cdev_node *cdn) +{ + mutex_lock(&tipc_devices_lock); + if (cdn->dev) { + device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor)); + cdev_del(&cdn->cdev); + idr_remove(&tipc_devices, cdn->minor); + cdn->dev = NULL; + kref_put(&vds->refcount, _free_vds); + } + + if (default_vdev == vds->vdev) { + default_vdev = NULL; + kref_put(&vds->refcount, _free_vds); + } + + mutex_unlock(&tipc_devices_lock); +} + +static void _go_online(struct tipc_virtio_dev *vds) +{ + mutex_lock(&vds->lock); + if (vds->state == VDS_OFFLINE) + vds->state = VDS_ONLINE; + mutex_unlock(&vds->lock); + + create_cdev_node(vds, &vds->cdev_node); + + dev_info(&vds->vdev->dev, "is online\n"); +} + +static void _go_offline(struct tipc_virtio_dev *vds) +{ + struct tipc_chan *chan; + + /* change state to OFFLINE */ + mutex_lock(&vds->lock); + if (vds->state != VDS_ONLINE) { + mutex_unlock(&vds->lock); + return; + } + vds->state = VDS_OFFLINE; + mutex_unlock(&vds->lock); + + /* wakeup all waiters */ + wake_up_interruptible_all(&vds->sendq); + + /* shutdown all channels */ + while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) { + mutex_lock(&chan->lock); + chan->state = TIPC_STALE; + chan->remote = 0; + chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN); + mutex_unlock(&chan->lock); + kref_put(&chan->refcount, _free_chan); + } + + /* shutdown device node */ + destroy_cdev_node(vds, &vds->cdev_node); + + dev_info(&vds->vdev->dev, "is offline\n"); +} + +static void _handle_conn_rsp(struct tipc_virtio_dev *vds, + struct tipc_conn_rsp_body *rsp, size_t len) +{ + struct tipc_chan *chan; + + if (sizeof(*rsp) != len) { + dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n", + __func__, len); + return; + } + + dev_dbg(&vds->vdev->dev, + "%s: connection response: for addr 0x%x: " + "status %d remote addr 0x%x\n", + __func__, rsp->target, rsp->status, rsp->remote); + + /* Lookup channel */ + chan = vds_lookup_channel(vds, rsp->target); + if (chan) { + mutex_lock(&chan->lock); + if (chan->state == TIPC_CONNECTING) { + if (!rsp->status) { + chan->state = TIPC_CONNECTED; + chan->remote = rsp->remote; + chan->max_msg_cnt = rsp->max_msg_cnt; + chan->max_msg_size = rsp->max_msg_size; + chan_trigger_event(chan, + TIPC_CHANNEL_CONNECTED); + } else { + chan->state = TIPC_DISCONNECTED; + chan->remote = 0; + chan_trigger_event(chan, + TIPC_CHANNEL_DISCONNECTED); + } + } + mutex_unlock(&chan->lock); + kref_put(&chan->refcount, _free_chan); + } +} + +static void _handle_disc_req(struct tipc_virtio_dev *vds, + struct tipc_disc_req_body *req, size_t len) +{ + struct tipc_chan *chan; + + if (sizeof(*req) != len) { + dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n", + __func__, len); + return; + } + + dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n", + __func__, req->target); + + chan = vds_lookup_channel(vds, req->target); + if (chan) { + mutex_lock(&chan->lock); + if (chan->state == TIPC_CONNECTED || + chan->state == TIPC_CONNECTING) { + chan->state = TIPC_DISCONNECTED; + chan->remote = 0; + chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED); + } + mutex_unlock(&chan->lock); + kref_put(&chan->refcount, _free_chan); + } +} + +static void _handle_ctrl_msg(struct tipc_virtio_dev *vds, + void *data, int len, u32 src) +{ + struct tipc_ctrl_msg *msg = data; + + if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) { + dev_err(&vds->vdev->dev, + "%s: Invalid message length ( %d vs. %d)\n", + __func__, (int)(sizeof(*msg) + msg->body_len), len); + return; + } + + dev_dbg(&vds->vdev->dev, + "%s: Incoming ctrl message: src 0x%x type %d len %d\n", + __func__, src, msg->type, msg->body_len); + + switch (msg->type) { + case TIPC_CTRL_MSGTYPE_GO_ONLINE: + _go_online(vds); + break; + + case TIPC_CTRL_MSGTYPE_GO_OFFLINE: + _go_offline(vds); + break; + + case TIPC_CTRL_MSGTYPE_CONN_RSP: + _handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body, + msg->body_len); + break; + + case TIPC_CTRL_MSGTYPE_DISC_REQ: + _handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body, + msg->body_len); + break; + + default: + dev_warn(&vds->vdev->dev, + "%s: Unexpected message type: %d\n", + __func__, msg->type); + } +} + +static int _handle_rxbuf(struct tipc_virtio_dev *vds, + struct tipc_msg_buf *rxbuf, size_t rxlen) +{ + int err; + struct scatterlist sg; + struct tipc_msg_hdr *msg; + struct device *dev = &vds->vdev->dev; + + /* message sanity check */ + if (rxlen > rxbuf->buf_sz) { + dev_warn(dev, "inbound msg is too big: %zd\n", rxlen); + goto drop_it; + } + + if (rxlen < sizeof(*msg)) { + dev_warn(dev, "inbound msg is too short: %zd\n", rxlen); + goto drop_it; + } + + /* reset buffer and put data */ + mb_reset(rxbuf); + mb_put_data(rxbuf, rxlen); + + /* get message header */ + msg = mb_get_data(rxbuf, sizeof(*msg)); + if (mb_avail_data(rxbuf) != msg->len) { + dev_warn(dev, "inbound msg length mismatch: (%d vs. %d)\n", + (uint) mb_avail_data(rxbuf), (uint)msg->len); + goto drop_it; + } + + dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d\n", + msg->src, msg->dst, msg->len, msg->flags, msg->reserved); + + /* message directed to control endpoint is a special case */ + if (msg->dst == TIPC_CTRL_ADDR) { + _handle_ctrl_msg(vds, msg->data, msg->len, msg->src); + } else { + struct tipc_chan *chan = NULL; + /* Lookup channel */ + chan = vds_lookup_channel(vds, msg->dst); + if (chan) { + /* handle it */ + rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf); + BUG_ON(!rxbuf); + kref_put(&chan->refcount, _free_chan); + } + } + +drop_it: + /* add the buffer back to the virtqueue */ + sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz); + err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL); + if (err < 0) { + dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); + return err; + } + + return 0; +} + +static void _rxvq_cb(struct virtqueue *rxvq) +{ + unsigned int len; + struct tipc_msg_buf *mb; + unsigned int msg_cnt = 0; + struct tipc_virtio_dev *vds = rxvq->vdev->priv; + + while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) { + if (_handle_rxbuf(vds, mb, len)) + break; + msg_cnt++; + } + + /* tell the other size that we added rx buffers */ + if (msg_cnt) + virtqueue_kick(rxvq); +} + +static void _txvq_cb(struct virtqueue *txvq) +{ + unsigned int len; + struct tipc_msg_buf *mb; + bool need_wakeup = false; + struct tipc_virtio_dev *vds = txvq->vdev->priv; + + dev_dbg(&txvq->vdev->dev, "%s\n", __func__); + + /* detach all buffers */ + mutex_lock(&vds->lock); + while ((mb = virtqueue_get_buf(txvq, &len)) != NULL) + need_wakeup |= _put_txbuf_locked(vds, mb); + mutex_unlock(&vds->lock); + + if (need_wakeup) { + /* wake up potential senders waiting for a tx buffer */ + wake_up_interruptible_all(&vds->sendq); + } +} + +static int tipc_virtio_probe(struct virtio_device *vdev) +{ + int err, i; + struct tipc_virtio_dev *vds; + struct tipc_dev_config config; + struct virtqueue *vqs[2]; + vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb}; + const char *vq_names[] = { "rx", "tx" }; + + err = trusty_detect_vmm(); + if (err < 0) { + dev_err(&vdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&vdev->dev, "%s:\n", __func__); + + vds = kzalloc(sizeof(*vds), GFP_KERNEL); + if (!vds) + return -ENOMEM; + + vds->vdev = vdev; + + mutex_init(&vds->lock); + kref_init(&vds->refcount); + init_waitqueue_head(&vds->sendq); + INIT_LIST_HEAD(&vds->free_buf_list); + idr_init(&vds->addr_idr); + + /* set default max message size and alignment */ + memset(&config, 0, sizeof(config)); + config.msg_buf_max_size = DEFAULT_MSG_BUF_SIZE; + config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN; + + /* get configuration if present */ + vdev->config->get(vdev, 0, &config, sizeof(config)); + + /* copy dev name */ + strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name)); + vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0'; + + /* find tx virtqueues (rx and tx and in this order) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL, NULL); +#else + err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names); +#endif + if (err) + goto err_find_vqs; + + vds->rxvq = vqs[0]; + vds->txvq = vqs[1]; + + /* save max buffer size and count */ + vds->msg_buf_max_sz = config.msg_buf_max_size; + vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq); + + /* set up the receive buffers */ + for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) { + struct scatterlist sg; + struct tipc_msg_buf *rxbuf; + + rxbuf = _alloc_msg_buf(vds->msg_buf_max_sz); + if (!rxbuf) { + dev_err(&vdev->dev, "failed to allocate rx buffer\n"); + err = -ENOMEM; + goto err_free_rx_buffers; + } + + sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz); + err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL); + WARN_ON(err); /* sanity check; this can't really happen */ + } + + vdev->priv = vds; + vds->state = VDS_OFFLINE; + + dev_dbg(&vdev->dev, "%s: done\n", __func__); + return 0; + +err_free_rx_buffers: + _cleanup_vq(vds->rxvq); +err_find_vqs: + kref_put(&vds->refcount, _free_vds); + return err; +} + +static void tipc_virtio_remove(struct virtio_device *vdev) +{ + struct tipc_virtio_dev *vds = vdev->priv; + + _go_offline(vds); + + mutex_lock(&vds->lock); + vds->state = VDS_DEAD; + vds->vdev = NULL; + mutex_unlock(&vds->lock); + + vdev->config->reset(vdev); + + idr_destroy(&vds->addr_idr); + + _cleanup_vq(vds->rxvq); + _cleanup_vq(vds->txvq); + _free_msg_buf_list(&vds->free_buf_list); + + vdev->config->del_vqs(vdev); + + kref_put(&vds->refcount, _free_vds); +} + +static struct virtio_device_id tipc_virtio_id_table[] = { + { VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + 0, +}; + +static struct virtio_driver virtio_tipc_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = tipc_virtio_id_table, + .probe = tipc_virtio_probe, + .remove = tipc_virtio_remove, +}; + +static int __init tipc_init(void) +{ + int ret; + dev_t dev; + + ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME); + if (ret) { + pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret); + return ret; + } + + tipc_major = MAJOR(dev); + tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME); + if (IS_ERR(tipc_class)) { + ret = PTR_ERR(tipc_class); + pr_err("%s: class_create failed: %d\n", __func__, ret); + goto err_class_create; + } + + ret = register_virtio_driver(&virtio_tipc_driver); + if (ret) { + pr_err("failed to register virtio driver: %d\n", ret); + goto err_register_virtio_drv; + } + + return 0; + +err_register_virtio_drv: + class_destroy(tipc_class); + +err_class_create: + unregister_chrdev_region(dev, MAX_DEVICES); + return ret; +} + +static void __exit tipc_exit(void) +{ + unregister_virtio_driver(&virtio_tipc_driver); + class_destroy(tipc_class); + unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES); +} + +/* We need to init this early */ +subsys_initcall(tipc_init); +module_exit(tipc_exit); + +MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table); +MODULE_DESCRIPTION("Trusty IPC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c new file mode 100644 index 000000000000..af2af6ee37ba --- /dev/null +++ b/drivers/trusty/trusty-irq.c @@ -0,0 +1,670 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IRQ_VECTOR_OFFSET 0x30 +#define IRQ_FOR_LK_TIMER 1 + +struct trusty_irq { + struct trusty_irq_state *is; + struct hlist_node node; + unsigned int irq; + bool percpu; + bool enable; + struct trusty_irq __percpu *percpu_ptr; +}; + +struct trusty_irq_irqset { + struct hlist_head pending; + struct hlist_head inactive; +}; + +struct trusty_irq_state { + struct device *dev; + struct device *trusty_dev; + struct trusty_irq_irqset normal_irqs; + spinlock_t normal_irqs_lock; + struct trusty_irq_irqset __percpu *percpu_irqs; + struct notifier_block trusty_call_notifier; + /* CPU hotplug instances for online */ + struct hlist_node node; +}; + +static enum cpuhp_state trusty_irq_online; + +#define TRUSTY_VMCALL_PENDING_INTR 0x74727505 +static inline void set_pending_intr_to_lk(uint8_t vector) +{ + __asm__ __volatile__( + "vmcall" + ::"a"(TRUSTY_VMCALL_PENDING_INTR), "b"(vector) + ); +} + +static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset, + bool percpu) +{ + struct hlist_node *n; + struct trusty_irq *trusty_irq; + + hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) { + dev_dbg(is->dev, + "%s: enable pending irq %d, percpu %d, cpu %d\n", + __func__, trusty_irq->irq, percpu, smp_processor_id()); + if (percpu) + enable_percpu_irq(trusty_irq->irq, 0); + else + enable_irq(trusty_irq->irq); + hlist_del(&trusty_irq->node); + hlist_add_head(&trusty_irq->node, &irqset->inactive); + } +} + +static void trusty_irq_enable_irqset(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset) +{ + struct trusty_irq *trusty_irq; + + hlist_for_each_entry(trusty_irq, &irqset->inactive, node) { + if (trusty_irq->enable) { + dev_warn(is->dev, + "%s: percpu irq %d already enabled, cpu %d\n", + __func__, trusty_irq->irq, smp_processor_id()); + continue; + } + dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n", + __func__, trusty_irq->irq, smp_processor_id()); + enable_percpu_irq(trusty_irq->irq, 0); + trusty_irq->enable = true; + } +} + +static void trusty_irq_disable_irqset(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset) +{ + struct hlist_node *n; + struct trusty_irq *trusty_irq; + + hlist_for_each_entry(trusty_irq, &irqset->inactive, node) { + if (!trusty_irq->enable) { + dev_warn(is->dev, + "irq %d already disabled, percpu %d, cpu %d\n", + trusty_irq->irq, trusty_irq->percpu, + smp_processor_id()); + continue; + } + dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n", + __func__, trusty_irq->irq, trusty_irq->percpu, + smp_processor_id()); + trusty_irq->enable = false; + if (trusty_irq->percpu) + disable_percpu_irq(trusty_irq->irq); + else + disable_irq_nosync(trusty_irq->irq); + } + hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) { + if (!trusty_irq->enable) { + dev_warn(is->dev, + "pending irq %d already disabled, percpu %d, cpu %d\n", + trusty_irq->irq, trusty_irq->percpu, + smp_processor_id()); + } + dev_dbg(is->dev, + "%s: disable pending irq %d, percpu %d, cpu %d\n", + __func__, trusty_irq->irq, trusty_irq->percpu, + smp_processor_id()); + trusty_irq->enable = false; + hlist_del(&trusty_irq->node); + hlist_add_head(&trusty_irq->node, &irqset->inactive); + } +} + +static int trusty_irq_call_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_irq_state *is; + + BUG_ON(!irqs_disabled()); + + if (action != TRUSTY_CALL_PREPARE) + return NOTIFY_DONE; + + is = container_of(nb, struct trusty_irq_state, trusty_call_notifier); + + spin_lock(&is->normal_irqs_lock); + trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false); + spin_unlock(&is->normal_irqs_lock); + trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true); + + return NOTIFY_OK; +} + +irqreturn_t trusty_irq_handler(int irq, void *data) +{ + struct trusty_irq *trusty_irq = data; + struct trusty_irq_state *is = trusty_irq->is; + struct trusty_irq_irqset *irqset; + + dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n", + __func__, irq, trusty_irq->irq, smp_processor_id(), + trusty_irq->enable); + + WARN_ON(irq != IRQ_FOR_LK_TIMER); + + set_pending_intr_to_lk(irq+IRQ_VECTOR_OFFSET); + + if (trusty_irq->percpu) { + disable_percpu_irq(irq); + irqset = this_cpu_ptr(is->percpu_irqs); + } else { + disable_irq_nosync(irq); + irqset = &is->normal_irqs; + } + + spin_lock(&is->normal_irqs_lock); + if (trusty_irq->enable) { + hlist_del(&trusty_irq->node); + hlist_add_head(&trusty_irq->node, &irqset->pending); + } + spin_unlock(&is->normal_irqs_lock); + + trusty_enqueue_nop(is->trusty_dev, NULL); + + dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq); + + return IRQ_HANDLED; +} + +static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node) +{ + unsigned long irq_flags; + struct trusty_irq_state *is = hlist_entry_safe(node, struct trusty_irq_state, node); + + if(is == NULL) + return 0; + + dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); + + local_irq_save(irq_flags); + trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs)); + local_irq_restore(irq_flags); + return 0; +} + +static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node) +{ + unsigned long irq_flags; + struct trusty_irq_state *is = hlist_entry_safe(node, struct trusty_irq_state, node); + + if(is == NULL) + return 0; + + dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); + + local_irq_save(irq_flags); + trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs)); + local_irq_restore(irq_flags); + return 0; +} + +static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq) +{ + int ret; + int index; + u32 irq_pos; + u32 templ_idx; + u32 range_base; + u32 range_end; + struct of_phandle_args oirq; + + /* check if "interrupt-ranges" property is present */ + if (!of_find_property(is->dev->of_node, "interrupt-ranges", NULL)) { + /* fallback to old behavior to be backward compatible with + * systems that do not need IRQ domains. + */ + return irq; + } + + /* find irq range */ + for (index = 0;; index += 3) { + ret = of_property_read_u32_index(is->dev->of_node, + "interrupt-ranges", + index, &range_base); + if (ret) + return ret; + + ret = of_property_read_u32_index(is->dev->of_node, + "interrupt-ranges", + index + 1, &range_end); + if (ret) + return ret; + + if (irq >= range_base && irq <= range_end) + break; + } + + /* read the rest of range entry: template index and irq_pos */ + ret = of_property_read_u32_index(is->dev->of_node, + "interrupt-ranges", + index + 2, &templ_idx); + if (ret) + return ret; + + /* read irq template */ + ret = of_parse_phandle_with_args(is->dev->of_node, + "interrupt-templates", + "#interrupt-cells", + templ_idx, &oirq); + if (ret) + return ret; + + WARN_ON(!oirq.np); + WARN_ON(!oirq.args_count); + + /* + * An IRQ template is a non empty array of u32 values describing group + * of interrupts having common properties. The u32 entry with index + * zero contains the position of irq_id in interrupt specifier array + * followed by data representing interrupt specifier array with irq id + * field omitted, so to convert irq template to interrupt specifier + * array we have to move down one slot the first irq_pos entries and + * replace the resulting gap with real irq id. + */ + irq_pos = oirq.args[0]; + + if (irq_pos >= oirq.args_count) { + dev_err(is->dev, "irq pos is out of range: %d\n", irq_pos); + return -EINVAL; + } + + for (index = 1; index <= irq_pos; index++) + oirq.args[index - 1] = oirq.args[index]; + + oirq.args[irq_pos] = irq - range_base; + + ret = irq_create_of_mapping(&oirq); + + return (!ret) ? -EINVAL : ret; +} + +static inline void trusty_irq_unmask(struct irq_data *data) +{ + return; +} + +static inline void trusty_irq_mask(struct irq_data *data) +{ + return; +} + +static void trusty_irq_enable(struct irq_data *data) +{ + return; +} + +static void trusty_irq_disable(struct irq_data *data) +{ + return; +} + +void trusty_irq_eoi(struct irq_data *data) +{ + return; +} +static struct irq_chip trusty_irq_chip = { + .name = "TRUSY-IRQ", + .irq_mask = trusty_irq_mask, + .irq_unmask = trusty_irq_unmask, + .irq_enable = trusty_irq_enable, + .irq_disable = trusty_irq_disable, + .irq_eoi = trusty_irq_eoi, +}; + +static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq) +{ + int ret; + int irq; + unsigned long irq_flags; + struct trusty_irq *trusty_irq; + + dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); + + irq = tirq; + + trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL); + if (!trusty_irq) + return -ENOMEM; + + trusty_irq->is = is; + trusty_irq->irq = irq; + trusty_irq->enable = true; + + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + + ret = irq_alloc_desc_at(irq, 0); + if (ret >= 0) + irq_set_chip_and_handler_name(irq, &trusty_irq_chip, handle_edge_irq, "trusty-irq"); + else if (ret != -EEXIST) { + dev_err(is->dev, "can't allocate irq desc %d\n", ret); + goto err_request_irq; + } + + ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD, + "trusty-irq", trusty_irq); + + if (ret) { + dev_err(is->dev, "request_irq failed %d\n", ret); + goto err_request_irq; + } + return 0; + +err_request_irq: + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + hlist_del(&trusty_irq->node); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + kfree(trusty_irq); + return ret; +} + +static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq) +{ + int ret; + int irq; + unsigned int cpu; + struct trusty_irq __percpu *trusty_irq_handler_data; + + dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); + + irq = trusty_irq_create_irq_mapping(is, tirq); + if (irq <= 0) { + dev_err(is->dev, + "trusty_irq_create_irq_mapping failed (%d)\n", irq); + return irq; + } + + trusty_irq_handler_data = alloc_percpu(struct trusty_irq); + if (!trusty_irq_handler_data) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + struct trusty_irq *trusty_irq; + struct trusty_irq_irqset *irqset; + + if (cpu >= NR_CPUS) + return -EINVAL; + trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); + irqset = per_cpu_ptr(is->percpu_irqs, cpu); + + trusty_irq->is = is; + hlist_add_head(&trusty_irq->node, &irqset->inactive); + trusty_irq->irq = irq; + trusty_irq->percpu = true; + trusty_irq->percpu_ptr = trusty_irq_handler_data; + } + + ret = request_percpu_irq(irq, trusty_irq_handler, "trusty", + trusty_irq_handler_data); + if (ret) { + dev_err(is->dev, "request_percpu_irq failed %d\n", ret); + goto err_request_percpu_irq; + } + + return 0; + +err_request_percpu_irq: + for_each_possible_cpu(cpu) { + struct trusty_irq *trusty_irq; + + if (cpu >= NR_CPUS) + return -EINVAL; + trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); + hlist_del(&trusty_irq->node); + } + + free_percpu(trusty_irq_handler_data); + return ret; +} + +static int trusty_smc_get_next_irq(struct trusty_irq_state *is, + unsigned long min_irq, bool per_cpu) +{ + return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ, + min_irq, per_cpu, 0); +} + +static int trusty_irq_init_one(struct trusty_irq_state *is, + int irq, bool per_cpu) +{ + int ret; + + irq = trusty_smc_get_next_irq(is, irq, per_cpu); + if (irq < 0) + return irq; + dev_info(is->dev, "irq from lk = %d\n", irq); + + WARN_ON(irq-IRQ_VECTOR_OFFSET != IRQ_FOR_LK_TIMER); + + if (per_cpu) + ret = trusty_irq_init_per_cpu_irq(is, irq-IRQ_VECTOR_OFFSET); + else + ret = trusty_irq_init_normal_irq(is, irq-IRQ_VECTOR_OFFSET); + + if (ret) { + dev_warn(is->dev, + "failed to initialize irq %d, irq will be ignored\n", + irq); + } + + return irq + 1; +} + +static void trusty_irq_free_irqs(struct trusty_irq_state *is) +{ + struct trusty_irq *irq; + struct hlist_node *n; + + hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) { + dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq); + free_irq(irq->irq, irq); + hlist_del(&irq->node); + kfree(irq); + } +/* + hlist_for_each_entry_safe(irq, n, + &this_cpu_ptr(is->percpu_irqs)->inactive, + node) { + struct trusty_irq __percpu *trusty_irq_handler_data; + + dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq); + trusty_irq_handler_data = irq->percpu_ptr; + free_percpu_irq(irq->irq, trusty_irq_handler_data); + for_each_possible_cpu(cpu) { + struct trusty_irq *irq_tmp; + + irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu); + hlist_del(&irq_tmp->node); + } + free_percpu(trusty_irq_handler_data); + } */ +} + +static int trusty_irq_cpu_notif_add(struct trusty_irq_state *is) +{ + int ret; + + ret = cpuhp_state_add_instance(trusty_irq_online, &is->node); + + return ret; +} + +static void trusty_irq_cpu_notif_remove(struct trusty_irq_state *is) +{ + cpuhp_state_remove_instance(trusty_irq_online, &is->node); +} + +static int trusty_irq_probe(struct platform_device *pdev) +{ + int ret; + int irq; + unsigned long irq_flags; + struct trusty_irq_state *is; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "%s\n", __func__); + + is = kzalloc(sizeof(*is), GFP_KERNEL); + if (!is) { + ret = -ENOMEM; + goto err_alloc_is; + } + + is->dev = &pdev->dev; + is->trusty_dev = is->dev->parent; + spin_lock_init(&is->normal_irqs_lock); + is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset); + if (!is->percpu_irqs) { + ret = -ENOMEM; + goto err_alloc_pending_percpu_irqs; + } + + platform_set_drvdata(pdev, is); + + is->trusty_call_notifier.notifier_call = trusty_irq_call_notify; + ret = trusty_call_notifier_register(is->trusty_dev, + &is->trusty_call_notifier); + if (ret) { + dev_err(&pdev->dev, + "failed to register trusty call notifier\n"); + goto err_trusty_call_notifier_register; + } + + for (irq = 0; irq >= 0;) + irq = trusty_irq_init_one(is, irq, false); + + ret = trusty_irq_cpu_notif_add(is); + if (ret) { + dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret); + goto err_register_hotcpu_notifier; + } + + return 0; + +err_register_hotcpu_notifier: + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + trusty_irq_disable_irqset(is, &is->normal_irqs); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + trusty_irq_free_irqs(is); + trusty_call_notifier_unregister(is->trusty_dev, + &is->trusty_call_notifier); +err_trusty_call_notifier_register: + free_percpu(is->percpu_irqs); +err_alloc_pending_percpu_irqs: + kfree(is); +err_alloc_is: + return ret; +} + +static int trusty_irq_remove(struct platform_device *pdev) +{ + unsigned long irq_flags; + struct trusty_irq_state *is = platform_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s\n", __func__); + + trusty_irq_cpu_notif_remove(is); + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + trusty_irq_disable_irqset(is, &is->normal_irqs); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + + trusty_irq_free_irqs(is); + + trusty_call_notifier_unregister(is->trusty_dev, + &is->trusty_call_notifier); + free_percpu(is->percpu_irqs); + kfree(is); + + return 0; +} + +static const struct of_device_id trusty_test_of_match[] = { + { .compatible = "android,trusty-irq-v1", }, + {}, +}; + +static struct platform_driver trusty_irq_driver = { + .probe = trusty_irq_probe, + .remove = trusty_irq_remove, + .driver = { + .name = "trusty-irq", + .owner = THIS_MODULE, + .of_match_table = trusty_test_of_match, + }, +}; + +static int __init trusty_irq_driver_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "x86/trustyirq:online", + trusty_irq_cpu_up, trusty_irq_cpu_down); + if (ret < 0) + goto out; + trusty_irq_online = ret; + + ret = platform_driver_register(&trusty_irq_driver); + if (ret) + goto err_dead; + + return 0; +err_dead: + cpuhp_remove_multi_state(trusty_irq_online); +out: + return ret; +} + +static void __exit trusty_irq_driver_exit(void) +{ + cpuhp_remove_multi_state(trusty_irq_online); + platform_driver_unregister(&trusty_irq_driver); +} + +module_init(trusty_irq_driver_init); +module_exit(trusty_irq_driver_exit); + +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c new file mode 100755 index 000000000000..d2446a1f34c9 --- /dev/null +++ b/drivers/trusty/trusty-log.c @@ -0,0 +1,420 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trusty-log.h" + +#define TRUSTY_LOG_SIZE (PAGE_SIZE * 2) +#define TRUSTY_LINE_BUFFER_SIZE 256 + +#ifdef CONFIG_64BIT +static uint64_t g_vmm_debug_buf; +#else +static uint32_t g_vmm_debug_buf; +#endif + +struct trusty_log_state { + struct device *dev; + struct device *trusty_dev; + + /* + * This lock is here to ensure only one consumer will read + * from the log ring buffer at a time. + */ + spinlock_t lock; + struct log_rb *log; + uint32_t get; + + struct page *log_pages; + + struct notifier_block call_notifier; + struct notifier_block panic_notifier; + char line_buffer[TRUSTY_LINE_BUFFER_SIZE]; +}; + +static int log_read_line(struct trusty_log_state *s, int put, int get) +{ + struct log_rb *log = s->log; + int i; + char c = '\0'; + size_t max_to_read = min((size_t)(put - get), + sizeof(s->line_buffer) - 1); + size_t mask = log->sz - 1; + + for (i = 0; i < max_to_read && c != '\n';) + s->line_buffer[i++] = c = log->data[get++ & mask]; + s->line_buffer[i] = '\0'; + + return i; +} + +static void trusty_dump_logs(struct trusty_log_state *s, bool dump_panic_log) +{ + struct log_rb *log = s->log; + uint32_t get, put, alloc; + int read_chars; + + BUG_ON(!is_power_of_2(log->sz)); + + /* + * For this ring buffer, at any given point, alloc >= put >= get. + * The producer side of the buffer is not locked, so the put and alloc + * pointers must be read in a defined order (put before alloc) so + * that the above condition is maintained. A read barrier is needed + * to make sure the hardware and compiler keep the reads ordered. + */ + get = s->get; + while ((put = log->put) != get) { + /* Make sure that the read of put occurs before the read of log data */ + rmb(); + + /* Read a line from the log */ + read_chars = log_read_line(s, put, get); + + /* Force the loads from log_read_line to complete. */ + rmb(); + alloc = log->alloc; + + /* + * Discard the line that was just read if the data could + * have been corrupted by the producer. + */ + if (alloc - get > log->sz) { + pr_err("trusty: log overflow."); + get = alloc - log->sz; + continue; + } + + if (dump_panic_log) + pr_info("trusty: %s", s->line_buffer); + + get += read_chars; + } + s->get = get; +} + +static int trusty_log_call_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_log_state *s; + unsigned long flags; + + if (action != TRUSTY_CALL_RETURNED) + return NOTIFY_DONE; + + s = container_of(nb, struct trusty_log_state, call_notifier); + spin_lock_irqsave(&s->lock, flags); +#ifdef CONFIG_DEBUG_INFO + trusty_dump_logs(s, true); +#else + trusty_dump_logs(s, false); +#endif + spin_unlock_irqrestore(&s->lock, flags); + return NOTIFY_OK; +} + +static int trusty_log_panic_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_log_state *s; + + /* + * Don't grab the spin lock to hold up the panic notifier, even + * though this is racy. + */ + s = container_of(nb, struct trusty_log_state, panic_notifier); + pr_info("trusty-log panic notifier - trusty version %s", + trusty_version_str_get(s->trusty_dev)); + trusty_dump_logs(s, true); + return NOTIFY_OK; +} + +static void trusty_vmm_dump_header(struct deadloop_dump *dump) +{ + struct dump_header *header; + + if (!dump) + return; + + header = &(dump->header); + pr_info("-----------VMM PANIC HEADER-----------\n"); + pr_info("VMM version = %s\n", header->vmm_version); + pr_info("Signature = %s\n", header->signature); + pr_info("Error_info = %s\n", header->error_info); + pr_info("Cpuid = %d\n", header->cpuid); + pr_info("-----------END OF VMM PANIC HEADER-----------\n"); +} + +static void trusty_vmm_dump_data(struct deadloop_dump *dump) +{ + struct dump_data *dump_data; + char *p, *pstr; + + if (!dump) + return; + + dump_data = &(dump->data); + + pr_info("-----------VMM PANIC DATA INFO-----------\n"); + pstr = (char *)dump_data->data; + for (p = pstr; p < ((char *)dump_data->data + dump_data->length); p++) { + if (*p == '\r') { + *p = 0x00; + } else if (*p == '\n') { + *p = 0x00; + pr_info("%s\n", pstr); + pstr = (char *)(p + 1); + } + } + /* dump the characters in the last line */ + if ((pstr - (char *)(dump_data->data)) < dump_data->length) { + *p = 0x00; + pr_info("%s\n", pstr); + } + pr_info("-----------END OF VMM PANIC DATA INFO-----------\n"); +} + +static int trusty_vmm_panic_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct deadloop_dump *dump_info = NULL; + + if (g_vmm_debug_buf) { + dump_info = (struct deadloop_dump *)g_vmm_debug_buf; + + if (dump_info->is_valid) { + pr_info("trusty-vmm panic start!\n"); + trusty_vmm_dump_header(dump_info); + trusty_vmm_dump_data(dump_info); + pr_info("trusty-vmm panic dump end!\n"); + } + } + + return NOTIFY_OK; +} + +static struct notifier_block trusty_vmm_panic_nb = { + .notifier_call = trusty_vmm_panic_notify, + .priority = 0, +}; + +#define TRUSTY_VMCALL_DUMP_INIT 0x74727507 +static int trusty_vmm_dump_init(void *gva) +{ + int ret = -1; + + __asm__ __volatile__( + "vmcall" + : "=a"(ret) + : "a"(TRUSTY_VMCALL_DUMP_INIT), "D"(gva) + ); + + return ret; +} + +static bool trusty_supports_logging(struct device *device) +{ + int result; + + result = trusty_std_call32(device, SMC_SC_SHARED_LOG_VERSION, + TRUSTY_LOG_API_VERSION, 0, 0); + if (result == SM_ERR_UNDEFINED_SMC) { + pr_info("trusty-log not supported on secure side.\n"); + return false; + } else if (result < 0) { + pr_err("trusty std call (SMC_SC_SHARED_LOG_VERSION) failed: %d\n", + result); + return false; + } + + if (result == TRUSTY_LOG_API_VERSION) { + return true; + } else { + pr_info("trusty-log unsupported api version: %d, supported: %d\n", + result, TRUSTY_LOG_API_VERSION); + return false; + } +} + +static int trusty_log_probe(struct platform_device *pdev) +{ + struct trusty_log_state *s; + int result; + int vmm_id; + phys_addr_t pa; + struct deadloop_dump *dump; + + vmm_id = trusty_detect_vmm(); + if (vmm_id < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "%s\n", __func__); + if (!trusty_supports_logging(pdev->dev.parent)) { + return -ENXIO; + } + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) { + result = -ENOMEM; + goto error_alloc_state; + } + + spin_lock_init(&s->lock); + s->dev = &pdev->dev; + s->trusty_dev = s->dev->parent; + s->get = 0; + s->log_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(TRUSTY_LOG_SIZE)); + if (!s->log_pages) { + result = -ENOMEM; + goto error_alloc_log; + } + s->log = page_address(s->log_pages); + + pa = page_to_phys(s->log_pages); + result = trusty_std_call32(s->trusty_dev, + SMC_SC_SHARED_LOG_ADD, + (u32)(pa), (u32)HIULINT(pa), + TRUSTY_LOG_SIZE); + if (result < 0) { + pr_err("trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d %pa\n", + result, &pa); + goto error_std_call; + } + + s->call_notifier.notifier_call = trusty_log_call_notify; + result = trusty_call_notifier_register(s->trusty_dev, + &s->call_notifier); + if (result < 0) { + dev_err(&pdev->dev, + "failed to register trusty call notifier\n"); + goto error_call_notifier; + } + + s->panic_notifier.notifier_call = trusty_log_panic_notify; + result = atomic_notifier_chain_register(&panic_notifier_list, + &s->panic_notifier); + if (result < 0) { + dev_err(&pdev->dev, + "failed to register panic notifier\n"); + goto error_panic_notifier; + } + + if(vmm_id == VMM_ID_EVMM) { + /* allocate debug buffer for vmm panic dump */ + g_vmm_debug_buf = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); + if (!g_vmm_debug_buf) { + result = -ENOMEM; + goto error_alloc_vmm; + } + + dump = (struct deadloop_dump *)g_vmm_debug_buf; + dump->version_of_this_struct = VMM_DUMP_VERSION; + dump->size_of_this_struct = sizeof(struct deadloop_dump); + dump->is_valid = false; + + /* shared the buffer to vmm by VMCALL */ + result = trusty_vmm_dump_init(dump); + if (result < 0) { + dev_err(&pdev->dev, + "failed to share the dump buffer to VMM\n"); + goto error_vmm_panic_notifier; + } + + /* register the panic notifier for vmm */ + result = atomic_notifier_chain_register(&panic_notifier_list, + &trusty_vmm_panic_nb); + if (result < 0) { + dev_err(&pdev->dev, + "failed to register vmm panic notifier\n"); + goto error_vmm_panic_notifier; + } + } + + platform_set_drvdata(pdev, s); + + return 0; + +error_vmm_panic_notifier: + free_page(g_vmm_debug_buf); +error_alloc_vmm: + atomic_notifier_chain_unregister(&panic_notifier_list, + &s->panic_notifier); +error_panic_notifier: + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); +error_call_notifier: + trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, + (u32)pa, (u32)HIULINT(pa), 0); +error_std_call: + __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE)); +error_alloc_log: + kfree(s); +error_alloc_state: + return result; +} + +static int trusty_log_remove(struct platform_device *pdev) +{ + int result; + struct trusty_log_state *s = platform_get_drvdata(pdev); + phys_addr_t pa = page_to_phys(s->log_pages); + + dev_dbg(&pdev->dev, "%s\n", __func__); + + atomic_notifier_chain_unregister(&panic_notifier_list, + &trusty_vmm_panic_nb); + atomic_notifier_chain_unregister(&panic_notifier_list, + &s->panic_notifier); + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); + + result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, + (u32)pa, (u32)HIULINT(pa), 0); + if (result) { + pr_err("trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n", + result); + } + __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE)); + kfree(s); + free_page(g_vmm_debug_buf); + + return 0; +} + +static const struct of_device_id trusty_test_of_match[] = { + { .compatible = "android,trusty-log-v1", }, + {}, +}; + +static struct platform_driver trusty_log_driver = { + .probe = trusty_log_probe, + .remove = trusty_log_remove, + .driver = { + .name = "trusty-log", + .owner = THIS_MODULE, + .of_match_table = trusty_test_of_match, + }, +}; + +module_platform_driver(trusty_log_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h new file mode 100644 index 000000000000..587bc7aaa145 --- /dev/null +++ b/drivers/trusty/trusty-log.h @@ -0,0 +1,44 @@ +#ifndef _TRUSTY_LOG_H_ +#define _TRUSTY_LOG_H_ + +/* + * Ring buffer that supports one secure producer thread and one + * linux side consumer thread. + */ +struct log_rb { + volatile uint32_t alloc; + volatile uint32_t put; + uint32_t sz; + volatile char data[0]; +} __packed; + +#define SMC_SC_SHARED_LOG_VERSION SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 0) +#define SMC_SC_SHARED_LOG_ADD SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 1) +#define SMC_SC_SHARED_LOG_RM SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 2) + +#define TRUSTY_LOG_API_VERSION 1 + +#define VMM_DUMP_VERSION 1 + +struct dump_data { + uint32_t length; + uint8_t data[0]; +} __packed; + +struct dump_header { + uint8_t vmm_version[64]; /* version of the vmm */ + uint8_t signature[16]; /* signature for the dump structure */ + uint8_t error_info[32]; /* filename:linenum */ + uint16_t cpuid; +} __packed; + +struct deadloop_dump { + uint16_t size_of_this_struct; + uint16_t version_of_this_struct; + uint32_t is_valid; + struct dump_header header; + struct dump_data data; +} __packed; + +#endif + diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c new file mode 100644 index 000000000000..470df8823d3a --- /dev/null +++ b/drivers/trusty/trusty-mem.c @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include + +/* Normal memory */ +#define NS_MAIR_NORMAL_CACHED_WB_RWA 0xFF /* inner and outer write back read/write allocate */ +#define NS_MAIR_NORMAL_CACHED_WT_RA 0xAA /* inner and outer write through read allocate */ +#define NS_MAIR_NORMAL_CACHED_WB_RA 0xEE /* inner and outer wriet back, read allocate */ +#define NS_MAIR_NORMAL_UNCACHED 0x44 /* uncached */ + +static int get_mem_attr(struct page *page, pgprot_t pgprot) +{ +#if defined(CONFIG_ARM64) + uint64_t mair; + uint attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2; + + asm ("mrs %0, mair_el1\n" : "=&r" (mair)); + return (mair >> (attr_index * 8)) & 0xff; + +#elif defined(CONFIG_ARM_LPAE) + uint32_t mair; + uint attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2); + + if (attr_index >= 4) { + attr_index -= 4; + asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair)); + } else { + asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair)); + } + return (mair >> (attr_index * 8)) & 0xff; + +#elif defined(CONFIG_ARM) + /* check memory type */ + switch (pgprot_val(pgprot) & L_PTE_MT_MASK) { + case L_PTE_MT_WRITEALLOC: + /* Normal: write back write allocate */ + return 0xFF; + + case L_PTE_MT_BUFFERABLE: + /* Normal: non-cacheble */ + return 0x44; + + case L_PTE_MT_WRITEBACK: + /* Normal: writeback, read allocate */ + return 0xEE; + + case L_PTE_MT_WRITETHROUGH: + /* Normal: write through */ + return 0xAA; + + case L_PTE_MT_UNCACHED: + /* strongly ordered */ + return 0x00; + + case L_PTE_MT_DEV_SHARED: + case L_PTE_MT_DEV_NONSHARED: + /* device */ + return 0x04; + + default: + return -EINVAL; + } +#elif defined(CONFIG_X86) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + /* The porting to CHT kernel (3.14.55) is in the #else clause. + ** For BXT kernel (4.1.0), the function get_page_memtype() is static. + ** + ** The orignal google code (for arm) getst the cache states and page + ** flags from input parameter "pgprot", which is not prefered in x86. + ** In x86, both cache states and page flags should be got from input + ** parameter "page". But, since current caller of trusty_call32_mem_buf() + ** always allocate memory in kernel heap, it is also ok to use hardcode + ** here. + ** + ** The memory allocated in kernel heap should be CACHED. The reason to + ** return UNCACHED here is to pass the check in LK sm_decode_ns_memory_attr() + ** with SMP, which only allow UNCACHED. + */ + return NS_MAIR_NORMAL_UNCACHED; +#else + unsigned long type; + int ret_mem_attr = 0; + + type = get_page_memtype(page); + /* + * -1 from get_page_memtype() implies RAM page is in its + * default state and not reserved, and hence of type WB + */ + if (type == -1) { + type = _PAGE_CACHE_MODE_WB; + } + switch (type) { + case _PAGE_CACHE_MODE_UC_MINUS: + /* uncacheable */ + ret_mem_attr = NS_MAIR_NORMAL_UNCACHED; + break; + case _PAGE_CACHE_MODE_WB: + /* writeback */ + ret_mem_attr = NS_MAIR_NORMAL_CACHED_WB_RWA; + break; + case _PAGE_CACHE_MODE_WC: + /* write combined */ + ret_mem_attr = NS_MAIR_NORMAL_UNCACHED; + break; + + default: + printk(KERN_ERR "%s(): invalid type: 0x%x\n", __func__, type); + ret_mem_attr = -EINVAL; + } + return ret_mem_attr; +#endif +#else + return 0; +#endif +} + +int trusty_encode_page_info(struct ns_mem_page_info *inf, + struct page *page, pgprot_t pgprot) +{ + int mem_attr; + uint64_t pte; + + if (!inf || !page) + return -EINVAL; + + /* get physical address */ + pte = (uint64_t) page_to_phys(page); + + /* get memory attributes */ + mem_attr = get_mem_attr(page, pgprot); + if (mem_attr < 0) + return mem_attr; + + /* add other attributes */ +#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE) + pte |= pgprot_val(pgprot); +#elif defined(CONFIG_ARM) + if (pgprot_val(pgprot) & L_PTE_USER) + pte |= (1 << 6); + if (pgprot_val(pgprot) & L_PTE_RDONLY) + pte |= (1 << 7); + if (pgprot_val(pgprot) & L_PTE_SHARED) + pte |= (3 << 8); /* inner sharable */ +#elif defined(CONFIG_X86) + if (pgprot_val(pgprot) & _PAGE_USER) + pte |= (1 << 6); + if (!(pgprot_val(pgprot) & _PAGE_RW)) + pte |= (1 << 7); +#endif + + inf->attr = (pte & 0x0000FFFFFFFFFFFFull) | ((uint64_t)mem_attr << 48); + return 0; +} + +int trusty_call32_mem_buf(struct device *dev, u32 smcnr, + struct page *page, u32 size, + pgprot_t pgprot) +{ + int ret; + struct ns_mem_page_info pg_inf; + + if (!dev || !page) + return -EINVAL; + + ret = trusty_encode_page_info(&pg_inf, page, pgprot); + if (ret) + return ret; + + if (SMC_IS_FASTCALL(smcnr)) { + return trusty_fast_call32(dev, smcnr, + (u32)pg_inf.attr, + (u32)(pg_inf.attr >> 32), size); + } else { + return trusty_std_call32(dev, smcnr, + (u32)pg_inf.attr, + (u32)(pg_inf.attr >> 32), size); + } +} +EXPORT_SYMBOL(trusty_call32_mem_buf); +MODULE_LICENSE("GPL"); diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c new file mode 100644 index 000000000000..18e315c25067 --- /dev/null +++ b/drivers/trusty/trusty-timer.c @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2017 Intel, Inc. + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +struct trusty_timer { + struct sec_timer_state *sts; + struct hrtimer tm; + struct work_struct work; +}; + +struct trusty_timer_dev_state { + struct device *dev; + struct device *smwall_dev; + struct device *trusty_dev; + struct notifier_block call_notifier; + struct trusty_timer timer; + struct workqueue_struct *workqueue; +}; + +/* Max entity defined as SMC_NUM_ENTITIES(64) */ +#define SMC_ENTITY_SMC_X86 63 /* Used for customized SMC calls */ + +#define SMC_SC_LK_TIMER SMC_STDCALL_NR(SMC_ENTITY_SMC_X86, 0) + +static void timer_work_func(struct work_struct *work) +{ + int ret; + struct trusty_timer_dev_state *s; + + s = container_of(work, struct trusty_timer_dev_state, timer.work); + + ret = trusty_std_call32(s->trusty_dev, SMC_SC_LK_TIMER, 0, 0, 0); + if (ret != 0) + dev_err(s->dev, "%s failed %d\n", __func__, ret); +} + +static enum hrtimer_restart trusty_timer_cb(struct hrtimer *tm) +{ + struct trusty_timer_dev_state *s; + + s = container_of(tm, struct trusty_timer_dev_state, timer.tm); + + queue_work(s->workqueue, &s->timer.work); + + return HRTIMER_NORESTART; +} + +static int trusty_timer_call_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_timer *tt; + struct sec_timer_state *sts; + struct trusty_timer_dev_state *s; + + if (action != TRUSTY_CALL_RETURNED) + return NOTIFY_DONE; + + s = container_of(nb, struct trusty_timer_dev_state, call_notifier); + + /* this notifier is executed in non-preemptible context */ + tt = &s->timer; + sts = tt->sts; + + if (sts->tv_ns > sts->cv_ns) { + hrtimer_cancel(&tt->tm); + } else if (sts->cv_ns > sts->tv_ns) { + /* need to set/reset timer */ + hrtimer_start(&tt->tm, ns_to_ktime(sts->cv_ns - sts->tv_ns), + HRTIMER_MODE_REL_PINNED); + } + + sts->cv_ns = 0ULL; + sts->tv_ns = 0ULL; + + return NOTIFY_OK; +} + +static int trusty_timer_probe(struct platform_device *pdev) +{ + int ret; + struct trusty_timer_dev_state *s; + struct trusty_timer *tt; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "%s\n", __func__); + + if (!trusty_wall_base(pdev->dev.parent)) { + dev_notice(&pdev->dev, "smwall: is not setup by parent\n"); + return -ENODEV; + } + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->dev = &pdev->dev; + s->smwall_dev = s->dev->parent; + s->trusty_dev = s->smwall_dev->parent; + platform_set_drvdata(pdev, s); + + tt = &s->timer; + + hrtimer_init(&tt->tm, CLOCK_BOOTTIME, HRTIMER_MODE_REL_PINNED); + tt->tm.function = trusty_timer_cb; + tt->sts = + trusty_wall_per_cpu_item_ptr(s->smwall_dev, 0, + SM_WALL_PER_CPU_SEC_TIMER_ID, + sizeof(*tt->sts)); + WARN_ON(!tt->sts); + + s->workqueue = alloc_workqueue("trusty-timer-wq", WQ_CPU_INTENSIVE, 0); + if (!s->workqueue) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed to allocate work queue\n"); + goto err_allocate_work_queue; + } + + /* register notifier */ + s->call_notifier.notifier_call = trusty_timer_call_notify; + ret = trusty_call_notifier_register(s->trusty_dev, &s->call_notifier); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register call notifier\n"); + kfree(s); + return ret; + } + + INIT_WORK(&s->timer.work, timer_work_func); + + dev_info(s->dev, "initialized\n"); + + return 0; + + destroy_workqueue(s->workqueue); +err_allocate_work_queue: + kfree(s); + return ret; + +} + +static int trusty_timer_remove(struct platform_device *pdev) +{ + struct trusty_timer_dev_state *s = platform_get_drvdata(pdev); + struct trusty_timer *tt; + + + dev_dbg(&pdev->dev, "%s\n", __func__); + + /* unregister notifier */ + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); + + tt = &s->timer; + hrtimer_cancel(&tt->tm); + + flush_work(&tt->work); + destroy_workqueue(s->workqueue); + /* free state */ + kfree(s); + return 0; +} + +static const struct of_device_id trusty_test_of_match[] = { + { .compatible = "android,trusty-timer-v1", }, + {}, +}; + +static struct platform_driver trusty_timer_driver = { + .probe = trusty_timer_probe, + .remove = trusty_timer_remove, + .driver = { + .name = "trusty-timer", + .owner = THIS_MODULE, + .of_match_table = trusty_test_of_match, + }, +}; + +module_platform_driver(trusty_timer_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Trusty timer driver"); diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c new file mode 100644 index 000000000000..66b4ee7caf0d --- /dev/null +++ b/drivers/trusty/trusty-virtio.c @@ -0,0 +1,765 @@ +/* + * Trusty Virtio driver + * + * Copyright (C) 2015 Google, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#define RSC_DESCR_VER 1 + +struct trusty_vdev; + +struct trusty_ctx { + struct device *dev; + void *shared_va; + size_t shared_sz; + struct work_struct check_vqs; + struct work_struct kick_vqs; + struct notifier_block call_notifier; + struct list_head vdev_list; + struct mutex mlock; /* protects vdev_list */ + struct workqueue_struct *kick_wq; + struct workqueue_struct *check_wq; +}; + +struct trusty_vring { + void *vaddr; + phys_addr_t paddr; + size_t size; + uint align; + uint elem_num; + u32 notifyid; + atomic_t needs_kick; + struct fw_rsc_vdev_vring *vr_descr; + struct virtqueue *vq; + struct trusty_vdev *tvdev; + struct trusty_nop kick_nop; +}; + +struct trusty_vdev { + struct list_head node; + struct virtio_device vdev; + struct trusty_ctx *tctx; + u32 notifyid; + uint config_len; + void *config; + struct fw_rsc_vdev *vdev_descr; + uint vring_num; + struct trusty_vring vrings[0]; +}; + +#define vdev_to_tvdev(vd) container_of((vd), struct trusty_vdev, vdev) + +static void check_all_vqs(struct work_struct *work) +{ + uint i; + struct trusty_ctx *tctx = container_of(work, struct trusty_ctx, + check_vqs); + struct trusty_vdev *tvdev; + + list_for_each_entry(tvdev, &tctx->vdev_list, node) { + for (i = 0; i < tvdev->vring_num; i++) + vring_interrupt(0, tvdev->vrings[i].vq); + } +} + +static int trusty_call_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_ctx *tctx; + + if (action != TRUSTY_CALL_RETURNED) + return NOTIFY_DONE; + + tctx = container_of(nb, struct trusty_ctx, call_notifier); + queue_work(tctx->check_wq, &tctx->check_vqs); + + return NOTIFY_OK; +} + +static void kick_vq(struct trusty_ctx *tctx, + struct trusty_vdev *tvdev, + struct trusty_vring *tvr) +{ + int ret; + + dev_dbg(tctx->dev, "%s: vdev_id=%d: vq_id=%d\n", + __func__, tvdev->notifyid, tvr->notifyid); + + ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_KICK_VQ, + tvdev->notifyid, tvr->notifyid, 0); + if (ret) { + dev_err(tctx->dev, "vq notify (%d, %d) returned %d\n", + tvdev->notifyid, tvr->notifyid, ret); + } +} + +static void kick_vqs(struct work_struct *work) +{ + uint i; + struct trusty_vdev *tvdev; + struct trusty_ctx *tctx = container_of(work, struct trusty_ctx, + kick_vqs); + mutex_lock(&tctx->mlock); + list_for_each_entry(tvdev, &tctx->vdev_list, node) { + for (i = 0; i < tvdev->vring_num; i++) { + struct trusty_vring *tvr = &tvdev->vrings[i]; + if (atomic_xchg(&tvr->needs_kick, 0)) + kick_vq(tctx, tvdev, tvr); + } + } + mutex_unlock(&tctx->mlock); +} + +static bool trusty_virtio_notify(struct virtqueue *vq) +{ + struct trusty_vring *tvr = vq->priv; + struct trusty_vdev *tvdev = tvr->tvdev; + struct trusty_ctx *tctx = tvdev->tctx; + + u32 api_ver = trusty_get_api_version(tctx->dev->parent); + + if (api_ver < TRUSTY_API_VERSION_SMP_NOP) { + atomic_set(&tvr->needs_kick, 1); + queue_work(tctx->kick_wq, &tctx->kick_vqs); + } else { + trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop); + } + + return true; +} + +static int trusty_load_device_descr(struct trusty_ctx *tctx, + void *va, size_t sz) +{ + int ret; + + dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); + + ret = trusty_call32_mem_buf(tctx->dev->parent, + SMC_SC_VIRTIO_GET_DESCR, + virt_to_page(va), sz, PAGE_KERNEL); + if (ret < 0) { + dev_err(tctx->dev, "%s: virtio get descr returned (%d)\n", + __func__, ret); + return -ENODEV; + } + return ret; +} + +static void trusty_virtio_stop(struct trusty_ctx *tctx, void *va, size_t sz) +{ + int ret; + + dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); + + ret = trusty_call32_mem_buf(tctx->dev->parent, SMC_SC_VIRTIO_STOP, + virt_to_page(va), sz, PAGE_KERNEL); + if (ret) { + dev_err(tctx->dev, "%s: virtio done returned (%d)\n", + __func__, ret); + return; + } +} + +static int trusty_virtio_start(struct trusty_ctx *tctx, + void *va, size_t sz) +{ + int ret; + + dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); + + ret = trusty_call32_mem_buf(tctx->dev->parent, SMC_SC_VIRTIO_START, + virt_to_page(va), sz, PAGE_KERNEL); + if (ret) { + dev_err(tctx->dev, "%s: virtio start returned (%d)\n", + __func__, ret); + return -ENODEV; + } + return 0; +} + +static void trusty_virtio_reset(struct virtio_device *vdev) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + struct trusty_ctx *tctx = tvdev->tctx; + + dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid); + trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET, + tvdev->notifyid, 0, 0); + vdev->config->set_status(vdev, 0); +} + +static u64 trusty_virtio_get_features(struct virtio_device *vdev) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + return ((u64)tvdev->vdev_descr->dfeatures) & 0x00000000FFFFFFFFULL; +} + +static int trusty_virtio_finalize_features(struct virtio_device *vdev) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + + tvdev->vdev_descr->gfeatures = (u32)(vdev->features); + return 0; +} + +static void trusty_virtio_get_config(struct virtio_device *vdev, + unsigned offset, void *buf, + unsigned len) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + + dev_dbg(&vdev->dev, "%s: %d bytes @ offset %d\n", + __func__, len, offset); + + if (tvdev->config) { + if (offset + len <= tvdev->config_len) + memcpy(buf, tvdev->config + offset, len); + } +} + +static void trusty_virtio_set_config(struct virtio_device *vdev, + unsigned offset, const void *buf, + unsigned len) +{ + dev_dbg(&vdev->dev, "%s\n", __func__); +} + +static u8 trusty_virtio_get_status(struct virtio_device *vdev) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + return tvdev->vdev_descr->status; +} + +static void trusty_virtio_set_status(struct virtio_device *vdev, u8 status) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + tvdev->vdev_descr->status = status; +} + +static void _del_vqs(struct virtio_device *vdev) +{ + uint i; + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + struct trusty_vring *tvr = &tvdev->vrings[0]; + + for (i = 0; i < tvdev->vring_num; i++, tvr++) { + /* dequeue kick_nop */ + trusty_dequeue_nop(tvdev->tctx->dev->parent, &tvr->kick_nop); + + /* delete vq */ + if (tvr->vq) { + vring_del_virtqueue(tvr->vq); + tvr->vq = NULL; + } + /* delete vring */ + if (tvr->vaddr) { + free_pages_exact(tvr->vaddr, tvr->size); + tvr->vaddr = NULL; + } + } +} + +static void trusty_virtio_del_vqs(struct virtio_device *vdev) +{ + dev_dbg(&vdev->dev, "%s\n", __func__); + _del_vqs(vdev); +} + + +static struct virtqueue *_find_vq(struct virtio_device *vdev, + unsigned id, + void (*callback)(struct virtqueue *vq), + const char *name) +{ + struct trusty_vring *tvr; + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + phys_addr_t pa; + + if (!name) + return ERR_PTR(-EINVAL); + + if (id >= tvdev->vring_num) + return ERR_PTR(-EINVAL); + + tvr = &tvdev->vrings[id]; + + /* actual size of vring (in bytes) */ + tvr->size = PAGE_ALIGN(vring_size(tvr->elem_num, tvr->align)); + + /* allocate memory for the vring. */ + tvr->vaddr = alloc_pages_exact(tvr->size, GFP_KERNEL | __GFP_ZERO); + if (!tvr->vaddr) { + dev_err(&vdev->dev, "vring alloc failed\n"); + return ERR_PTR(-ENOMEM); + } + + pa = virt_to_phys(tvr->vaddr); + /* save vring address to shared structure */ + tvr->vr_descr->da = (u32)pa; + /* da field is only 32 bit wide. Use previously unused 'reserved' field + * to store top 32 bits of 64-bit address + */ + tvr->vr_descr->pa = (u32)HIULINT(pa); + + dev_info(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n", + id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, + vdev, true, true, tvr->vaddr, + trusty_virtio_notify, callback, name); +#else + tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, + vdev, true, tvr->vaddr, + trusty_virtio_notify, callback, name); +#endif + if (!tvr->vq) { + dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n", + name); + goto err_new_virtqueue; + } + + tvr->vq->priv = tvr; + + return tvr->vq; + +err_new_virtqueue: + free_pages_exact(tvr->vaddr, tvr->size); + tvr->vaddr = NULL; + return ERR_PTR(-ENOMEM); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool *ctx, + struct irq_affinity *desc) +#else +static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[]) +#endif +{ + uint i; + int ret; + + for (i = 0; i < nvqs; i++) { + vqs[i] = _find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) { + ret = PTR_ERR(vqs[i]); + _del_vqs(vdev); + return ret; + } + } + return 0; +} + +static const char *trusty_virtio_bus_name(struct virtio_device *vdev) +{ + return "trusty-virtio"; +} + +/* The ops structure which hooks everything together. */ +static const struct virtio_config_ops trusty_virtio_config_ops = { + .get_features = trusty_virtio_get_features, + .finalize_features = trusty_virtio_finalize_features, + .get = trusty_virtio_get_config, + .set = trusty_virtio_set_config, + .get_status = trusty_virtio_get_status, + .set_status = trusty_virtio_set_status, + .reset = trusty_virtio_reset, + .find_vqs = trusty_virtio_find_vqs, + .del_vqs = trusty_virtio_del_vqs, + .bus_name = trusty_virtio_bus_name, +}; + +void virtio_vdev_release(struct device *dev) +{ + dev_dbg(dev, "%s() is called\n", __func__); + return; +} + +static int trusty_virtio_add_device(struct trusty_ctx *tctx, + struct fw_rsc_vdev *vdev_descr, + struct fw_rsc_vdev_vring *vr_descr, + void *config) +{ + int i, ret; + struct trusty_vdev *tvdev; + + tvdev = kzalloc(sizeof(struct trusty_vdev) + + vdev_descr->num_of_vrings * sizeof(struct trusty_vring), + GFP_KERNEL); + if (!tvdev) { + dev_err(tctx->dev, "Failed to allocate VDEV\n"); + return -ENOMEM; + } + + /* setup vdev */ + tvdev->tctx = tctx; + tvdev->vdev.dev.parent = tctx->dev; + tvdev->vdev.dev.release = virtio_vdev_release; + tvdev->vdev.id.device = vdev_descr->id; + tvdev->vdev.config = &trusty_virtio_config_ops; + tvdev->vdev_descr = vdev_descr; + tvdev->notifyid = vdev_descr->notifyid; + + /* setup config */ + tvdev->config = config; + tvdev->config_len = vdev_descr->config_len; + + /* setup vrings and vdev resource */ + tvdev->vring_num = vdev_descr->num_of_vrings; + + for (i = 0; i < tvdev->vring_num; i++, vr_descr++) { + struct trusty_vring *tvr = &tvdev->vrings[i]; + tvr->tvdev = tvdev; + tvr->vr_descr = vr_descr; + tvr->align = vr_descr->align; + tvr->elem_num = vr_descr->num; + tvr->notifyid = vr_descr->notifyid; + trusty_nop_init(&tvr->kick_nop, SMC_NC_VDEV_KICK_VQ, + tvdev->notifyid, tvr->notifyid); + } + + /* register device */ + ret = register_virtio_device(&tvdev->vdev); + if (ret) { + dev_err(tctx->dev, + "Failed (%d) to register device dev type %u\n", + ret, vdev_descr->id); + goto err_register; + } + + /* add it to tracking list */ + list_add_tail(&tvdev->node, &tctx->vdev_list); + + return 0; + +err_register: + kfree(tvdev); + return ret; +} + +static int trusty_parse_device_descr(struct trusty_ctx *tctx, + void *descr_va, size_t descr_sz) +{ + u32 i; + struct resource_table *descr = descr_va; + + if (descr_sz < sizeof(*descr)) { + dev_err(tctx->dev, "descr table is too small (0x%x)\n", + (int)descr_sz); + return -ENODEV; + } + + if (descr->ver != RSC_DESCR_VER) { + dev_err(tctx->dev, "unexpected descr ver (0x%x)\n", + (int)descr->ver); + return -ENODEV; + } + + if (descr_sz < (sizeof(*descr) + descr->num * sizeof(u32))) { + dev_err(tctx->dev, "descr table is too small (0x%x)\n", + (int)descr->ver); + return -ENODEV; + } + + for (i = 0; i < descr->num; i++) { + struct fw_rsc_hdr *hdr; + struct fw_rsc_vdev *vd; + struct fw_rsc_vdev_vring *vr; + void *cfg; + size_t vd_sz; + + u32 offset = descr->offset[i]; + + if (offset >= descr_sz) { + dev_err(tctx->dev, "offset is out of bounds (%u)\n", + (uint)offset); + return -ENODEV; + } + + /* check space for rsc header */ + if ((descr_sz - offset) < sizeof(struct fw_rsc_hdr)) { + dev_err(tctx->dev, "no space for rsc header (%u)\n", + (uint)offset); + return -ENODEV; + } + hdr = (struct fw_rsc_hdr *)((u8 *)descr + offset); + offset += sizeof(struct fw_rsc_hdr); + + /* check type */ + if (hdr->type != RSC_VDEV) { + dev_err(tctx->dev, "unsupported rsc type (%u)\n", + (uint)hdr->type); + continue; + } + + /* got vdev: check space for vdev */ + if ((descr_sz - offset) < sizeof(struct fw_rsc_vdev)) { + dev_err(tctx->dev, "no space for vdev descr (%u)\n", + (uint)offset); + return -ENODEV; + } + vd = (struct fw_rsc_vdev *)((u8 *)descr + offset); + + /* check space for vrings and config area */ + vd_sz = sizeof(struct fw_rsc_vdev) + + vd->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) + + vd->config_len; + + if ((descr_sz - offset) < vd_sz) { + dev_err(tctx->dev, "no space for vdev (%u)\n", + (uint)offset); + return -ENODEV; + } + vr = (struct fw_rsc_vdev_vring *)vd->vring; + cfg = (void *)(vr + vd->num_of_vrings); + + trusty_virtio_add_device(tctx, vd, vr, cfg); + } + + return 0; +} + +static void _remove_devices_locked(struct trusty_ctx *tctx) +{ + struct trusty_vdev *tvdev, *next; + + list_for_each_entry_safe(tvdev, next, &tctx->vdev_list, node) { + list_del(&tvdev->node); + unregister_virtio_device(&tvdev->vdev); + kfree(tvdev); + } +} + +static void trusty_virtio_remove_devices(struct trusty_ctx *tctx) +{ + mutex_lock(&tctx->mlock); + _remove_devices_locked(tctx); + mutex_unlock(&tctx->mlock); +} + +static int trusty_virtio_add_devices(struct trusty_ctx *tctx) +{ + int ret; + void *descr_va; + size_t descr_sz; + size_t descr_buf_sz; + + /* allocate buffer to load device descriptor into */ + descr_buf_sz = PAGE_SIZE; + descr_va = alloc_pages_exact(descr_buf_sz, GFP_KERNEL | __GFP_ZERO); + if (!descr_va) { + dev_err(tctx->dev, "Failed to allocate shared area\n"); + return -ENOMEM; + } + + /* load device descriptors */ + ret = trusty_load_device_descr(tctx, descr_va, descr_buf_sz); + if (ret < 0) { + dev_err(tctx->dev, "failed (%d) to load device descr\n", ret); + goto err_load_descr; + } + + descr_sz = (size_t)ret; + + mutex_lock(&tctx->mlock); + + /* parse device descriptor and add virtio devices */ + ret = trusty_parse_device_descr(tctx, descr_va, descr_sz); + if (ret) { + dev_err(tctx->dev, "failed (%d) to parse device descr\n", ret); + goto err_parse_descr; + } + + /* register call notifier */ + ret = trusty_call_notifier_register(tctx->dev->parent, + &tctx->call_notifier); + if (ret) { + dev_err(tctx->dev, "%s: failed (%d) to register notifier\n", + __func__, ret); + goto err_register_notifier; + } + + /* start virtio */ + ret = trusty_virtio_start(tctx, descr_va, descr_sz); + if (ret) { + dev_err(tctx->dev, "failed (%d) to start virtio\n", ret); + goto err_start_virtio; + } + + /* attach shared area */ + tctx->shared_va = descr_va; + tctx->shared_sz = descr_buf_sz; + + mutex_unlock(&tctx->mlock); + + return 0; + +err_start_virtio: + trusty_call_notifier_unregister(tctx->dev->parent, + &tctx->call_notifier); + cancel_work_sync(&tctx->check_vqs); +err_register_notifier: +err_parse_descr: + _remove_devices_locked(tctx); + mutex_unlock(&tctx->mlock); + cancel_work_sync(&tctx->kick_vqs); + trusty_virtio_stop(tctx, descr_va, descr_sz); +err_load_descr: + free_pages_exact(descr_va, descr_buf_sz); + return ret; +} + +static int trusty_virtio_probe(struct platform_device *pdev) +{ + int ret; + struct trusty_ctx *tctx; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_info(&pdev->dev, "initializing\n"); + + tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); + if (!tctx) { + dev_err(&pdev->dev, "Failed to allocate context\n"); + return -ENOMEM; + } + + tctx->dev = &pdev->dev; + tctx->call_notifier.notifier_call = trusty_call_notify; + mutex_init(&tctx->mlock); + INIT_LIST_HEAD(&tctx->vdev_list); + INIT_WORK(&tctx->check_vqs, check_all_vqs); + INIT_WORK(&tctx->kick_vqs, kick_vqs); + platform_set_drvdata(pdev, tctx); + + tctx->check_wq = alloc_workqueue("trusty-check-wq", WQ_UNBOUND, 0); + if (!tctx->check_wq) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed create trusty-check-wq\n"); + goto err_create_check_wq; + } + + tctx->kick_wq = alloc_workqueue("trusty-kick-wq", + WQ_UNBOUND | WQ_CPU_INTENSIVE, 0); + if (!tctx->kick_wq) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed create trusty-kick-wq\n"); + goto err_create_kick_wq; + } + + ret = trusty_virtio_add_devices(tctx); + if (ret) { + dev_err(&pdev->dev, "Failed to add virtio devices\n"); + goto err_add_devices; + } + + dev_info(&pdev->dev, "initializing done\n"); + return 0; + +err_add_devices: + destroy_workqueue(tctx->kick_wq); +err_create_kick_wq: + destroy_workqueue(tctx->check_wq); +err_create_check_wq: + kfree(tctx); + return ret; +} + +static int trusty_virtio_remove(struct platform_device *pdev) +{ + struct trusty_ctx *tctx = platform_get_drvdata(pdev); + + dev_err(&pdev->dev, "removing\n"); + + /* unregister call notifier and wait until workqueue is done */ + trusty_call_notifier_unregister(tctx->dev->parent, + &tctx->call_notifier); + cancel_work_sync(&tctx->check_vqs); + + /* remove virtio devices */ + trusty_virtio_remove_devices(tctx); + cancel_work_sync(&tctx->kick_vqs); + + /* destroy workqueues */ + destroy_workqueue(tctx->kick_wq); + destroy_workqueue(tctx->check_wq); + + /* notify remote that shared area goes away */ + trusty_virtio_stop(tctx, tctx->shared_va, tctx->shared_sz); + + /* free shared area */ + free_pages_exact(tctx->shared_va, tctx->shared_sz); + + /* free context */ + kfree(tctx); + return 0; +} + +static const struct of_device_id trusty_of_match[] = { + { + .compatible = "android,trusty-virtio-v1", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, trusty_of_match); + +static struct platform_driver trusty_virtio_driver = { + .probe = trusty_virtio_probe, + .remove = trusty_virtio_remove, + .driver = { + .name = "trusty-virtio", + .owner = THIS_MODULE, + .of_match_table = trusty_of_match, + }, +}; + +module_platform_driver(trusty_virtio_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Trusty virtio driver"); diff --git a/drivers/trusty/trusty-wall.c b/drivers/trusty/trusty-wall.c new file mode 100644 index 000000000000..2345f56a6405 --- /dev/null +++ b/drivers/trusty/trusty-wall.c @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2017 Intel, Inc. + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include + + +void *trusty_wall_base(struct device *dev) +{ + struct trusty_wall_dev_state *s; + + s = platform_get_drvdata(to_platform_device(dev)); + + if (NULL == s) + return NULL; + + return s->va; +} +EXPORT_SYMBOL(trusty_wall_base); + +void *trusty_wall_per_cpu_item_ptr(struct device *dev, unsigned int cpu, + u32 item_id, size_t exp_sz) +{ + uint i; + struct sm_wall_toc *toc; + struct sm_wall_toc_item *item; + struct trusty_wall_dev_state *s; + + s = platform_get_drvdata(to_platform_device(dev)); + + if (!s->va) { + dev_dbg(s->dev, "No smwall buffer is set\n"); + return NULL; + } + + toc = (struct sm_wall_toc *)s->va; + if (toc->version != SM_WALL_TOC_VER) { + dev_err(s->dev, "Unexpected toc version: %d\n", toc->version); + return NULL; + } + + if (cpu >= toc->cpu_num) { + dev_err(s->dev, "Unsupported cpu (%d) requested\n", cpu); + return NULL; + } + + item = (struct sm_wall_toc_item *)((uintptr_t)toc + + toc->per_cpu_toc_offset); + for (i = 0; i < toc->per_cpu_num_items; i++, item++) { + if (item->id != item_id) + continue; + + if (item->size != exp_sz) { + dev_err(s->dev, + "Size mismatch (%zd vs. %zd) for item_id %d\n", + (size_t)item->size, exp_sz, item_id); + return NULL; + } + + return s->va + toc->per_cpu_base_offset + + cpu * toc->per_cpu_region_size + item->offset; + } + return NULL; +} +EXPORT_SYMBOL(trusty_wall_per_cpu_item_ptr); + +static int trusty_wall_setup(struct trusty_wall_dev_state *s) +{ + int ret; + void *va; + size_t sz; + + /* check if wall feature is supported by Trusted OS */ + ret = trusty_fast_call32(s->trusty_dev, SMC_FC_GET_WALL_SIZE, 0, 0, 0); + if (ret == SM_ERR_UNDEFINED_SMC || ret == SM_ERR_NOT_SUPPORTED) { + /* wall is not supported */ + dev_notice(s->dev, "smwall: is not supported by Trusted OS\n"); + return 0; + } else if (ret < 0) { + dev_err(s->dev, "smwall: failed (%d) to query buffer size\n", + ret); + return ret; + } else if (ret == 0) { + dev_notice(s->dev, "smwall: zero-sized buffer requested\n"); + return 0; + } + sz = (size_t)ret; + + /* allocate memory for shared buffer */ + va = alloc_pages_exact(sz, GFP_KERNEL | __GFP_ZERO); + if (!va) { + dev_err(s->dev, "smwall: failed to allocate buffer\n"); + return -ENOMEM; + } + + /* call into Trusted OS to setup wall */ + ret = trusty_call32_mem_buf(s->trusty_dev, SMC_SC_SETUP_WALL, + virt_to_page(va), sz, PAGE_KERNEL); + if (ret < 0) { + dev_err(s->dev, "smwall: TEE returned (%d)\n", ret); + free_pages_exact(va, sz); + return -ENODEV; + } + + dev_info(s->dev, "smwall: initialized %zu bytes\n", sz); + + s->va = va; + s->sz = sz; + + return 0; +} + +static void trusty_wall_destroy(struct trusty_wall_dev_state *s) +{ + int ret; + + ret = trusty_std_call32(s->trusty_dev, SMC_SC_DESTROY_WALL, 0, 0, 0); + if (ret) { + /** + * It should never happen, but if it happens, it is + * unsafe to free buffer so we have to leak memory + */ + dev_err(s->dev, "Failed (%d) to destroy the wall buffer\n", + ret); + } else { + free_pages_exact(s->va, s->sz); + } +} + +static int trusty_wall_probe(struct platform_device *pdev) +{ + int ret; + struct trusty_wall_dev_state *s; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "%s\n", __func__); + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->dev = &pdev->dev; + s->trusty_dev = s->dev->parent; + platform_set_drvdata(pdev, s); + + ret = trusty_wall_setup(s); + if (ret < 0) { + dev_warn(s->dev, "Failed (%d) to setup the wall\n", ret); + kfree(s); + return ret; + } + + return 0; +} + +static int trusty_wall_remove(struct platform_device *pdev) +{ + struct trusty_wall_dev_state *s = platform_get_drvdata(pdev); + + trusty_wall_destroy(s); + + return 0; +} + +static const struct of_device_id trusty_wall_of_match[] = { + { .compatible = "android, trusty-wall-v1", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, trusty_wall_of_match); + +static struct platform_driver trusty_wall_driver = { + .probe = trusty_wall_probe, + .remove = trusty_wall_remove, + .driver = { + .name = "trusty-wall", + .owner = THIS_MODULE, + .of_match_table = trusty_wall_of_match, + }, +}; + +module_platform_driver(trusty_wall_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Trusty smwall driver"); diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c new file mode 100755 index 000000000000..4d33f269851d --- /dev/null +++ b/drivers/trusty/trusty.c @@ -0,0 +1,777 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define EVMM_SMC_HC_ID 0x74727500 +#define ACRN_SMC_HC_ID 0x80000071 + +struct trusty_state; + +struct trusty_work { + struct trusty_state *ts; + struct work_struct work; +}; + +struct trusty_state { + struct mutex smc_lock; + struct atomic_notifier_head notifier; + struct completion cpu_idle_completion; + char *version_str; + u32 api_version; + struct device *dev; + struct workqueue_struct *nop_wq; + struct trusty_work __percpu *nop_works; + struct list_head nop_queue; + spinlock_t nop_lock; /* protects nop_queue */ +}; + +struct trusty_smc_interface { + struct device *dev; + ulong args[5]; +}; + +static ulong (*smc)(ulong, ulong, ulong, ulong); + +static inline ulong smc_evmm(ulong r0, ulong r1, ulong r2, ulong r3) +{ + register unsigned long smc_id asm("rax") = EVMM_SMC_HC_ID; + __asm__ __volatile__( + "vmcall; \n" + : "=D"(r0) + : "r"(smc_id), "D"(r0), "S"(r1), "d"(r2), "b"(r3) + ); + + return r0; +} + +static inline ulong smc_acrn(ulong r0, ulong r1, ulong r2, ulong r3) +{ + register unsigned long smc_id asm("r8") = ACRN_SMC_HC_ID; + __asm__ __volatile__( + "vmcall; \n" + : "=D"(r0) + : "r"(smc_id), "D"(r0), "S"(r1), "d"(r2), "b"(r3) + : "rax" + ); + + return r0; +} + +static void trusty_fast_call32_remote(void *args) +{ + struct trusty_smc_interface *p_args = args; + struct device *dev = p_args->dev; + ulong smcnr = p_args->args[0]; + ulong a0 = p_args->args[1]; + ulong a1 = p_args->args[2]; + ulong a2 = p_args->args[3]; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + BUG_ON(!s); + BUG_ON(!SMC_IS_FASTCALL(smcnr)); + BUG_ON(SMC_IS_SMC64(smcnr)); + + p_args->args[4] = smc(smcnr, a0, a1, a2); +} + +s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) +{ + int cpu = 0; + int ret = 0; + struct trusty_smc_interface s; + s.dev = dev; + s.args[0] = smcnr; + s.args[1] = a0; + s.args[2] = a1; + s.args[3] = a2; + s.args[4] = 0; + + ret = smp_call_function_single(cpu, trusty_fast_call32_remote, (void *)&s, 1); + + if (ret) { + pr_err("%s: smp_call_function_single failed: %d\n", __func__, ret); + } + + return s.args[4]; +} +EXPORT_SYMBOL(trusty_fast_call32); + +#ifdef CONFIG_64BIT +s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + BUG_ON(!s); + BUG_ON(!SMC_IS_FASTCALL(smcnr)); + BUG_ON(!SMC_IS_SMC64(smcnr)); + + return smc(smcnr, a0, a1, a2); +} +#endif + +static ulong trusty_std_call_inner(struct device *dev, ulong smcnr, + ulong a0, ulong a1, ulong a2) +{ + ulong ret; + int retry = 5; + + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n", + __func__, smcnr, a0, a1, a2); + while (true) { + ret = smc(smcnr, a0, a1, a2); + while ((s32)ret == SM_ERR_FIQ_INTERRUPTED) + ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0); + if ((int)ret != SM_ERR_BUSY || !retry) + break; + + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n", + __func__, smcnr, a0, a1, a2); + retry--; + } + + return ret; +} + +static ulong trusty_std_call_helper(struct device *dev, ulong smcnr, + ulong a0, ulong a1, ulong a2) +{ + ulong ret; + int sleep_time = 1; + unsigned long flags; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + while (true) { + local_irq_save(flags); + atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE, + NULL); + ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); + atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED, + NULL); + local_irq_restore(flags); + + if ((int)ret != SM_ERR_BUSY) + break; + + if (sleep_time == 256) + dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n", + __func__, smcnr, a0, a1, a2); + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n", + __func__, smcnr, a0, a1, a2, sleep_time); + + msleep(sleep_time); + if (sleep_time < 1000) + sleep_time <<= 1; + + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n", + __func__, smcnr, a0, a1, a2); + } + + if (sleep_time > 256) + dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n", + __func__, smcnr, a0, a1, a2); + + return ret; +} + +static void trusty_std_call_cpu_idle(struct trusty_state *s) +{ + int ret; + + ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10); + if (!ret) { + pr_warn("%s: timed out waiting for cpu idle to clear, retry anyway\n", + __func__); + } +} + + +struct trusty_std_call32_args { + struct device *dev; + u32 smcnr; + u32 a0; + u32 a1; + u32 a2; +}; + +static long trusty_std_call32_work(void *args) +{ + int ret; + struct device *dev; + u32 smcnr, a0, a1, a2; + struct trusty_state *s; + struct trusty_std_call32_args *work_args; + + BUG_ON(!args); + + work_args = (struct trusty_std_call32_args *)args; + dev = work_args->dev; + s = platform_get_drvdata(to_platform_device(dev)); + + smcnr = work_args->smcnr; + a0 = work_args->a0; + a1 = work_args->a1; + a2 = work_args->a2; + + BUG_ON(SMC_IS_FASTCALL(smcnr)); + BUG_ON(SMC_IS_SMC64(smcnr)); + + if (smcnr != SMC_SC_NOP) { + mutex_lock(&s->smc_lock); + reinit_completion(&s->cpu_idle_completion); + } + + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n", + __func__, smcnr, a0, a1, a2); + + ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2); + while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) { + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n", + __func__, smcnr, a0, a1, a2); + if (ret == SM_ERR_CPU_IDLE) + trusty_std_call_cpu_idle(s); + ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0); + } + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n", + __func__, smcnr, a0, a1, a2, ret); + + WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed"); + + if (smcnr == SMC_SC_NOP) + complete(&s->cpu_idle_completion); + else + mutex_unlock(&s->smc_lock); + + return ret; +} + +s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) +{ + const int cpu = 0; + struct trusty_std_call32_args args = { + .dev = dev, + .smcnr = smcnr, + .a0 = a0, + .a1 = a1, + .a2 = a2, + }; + + /* bind cpu 0 for now since trusty OS is running on physical cpu #0*/ + return work_on_cpu(cpu, trusty_std_call32_work, (void *) &args); +} + +EXPORT_SYMBOL(trusty_std_call32); + +int trusty_call_notifier_register(struct device *dev, struct notifier_block *n) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return atomic_notifier_chain_register(&s->notifier, n); +} +EXPORT_SYMBOL(trusty_call_notifier_register); + +int trusty_call_notifier_unregister(struct device *dev, + struct notifier_block *n) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return atomic_notifier_chain_unregister(&s->notifier, n); +} +EXPORT_SYMBOL(trusty_call_notifier_unregister); + +static int trusty_remove_child(struct device *dev, void *data) +{ + dev_dbg(dev, "%s() is called()\n", __func__); + platform_device_unregister(to_platform_device(dev)); + return 0; +} + +ssize_t trusty_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str); +} + +DEVICE_ATTR(trusty_version, S_IRUSR, trusty_version_show, NULL); + +const char *trusty_version_str_get(struct device *dev) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return s->version_str; +} +EXPORT_SYMBOL(trusty_version_str_get); + +static void trusty_init_version(struct trusty_state *s, struct device *dev) +{ + int ret; + int i; + int version_str_len; + + ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0); + if (ret <= 0) + goto err_get_size; + + version_str_len = ret; + + s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL); + if (!s->version_str) + goto err_get_size; + for (i = 0; i < version_str_len; i++) { + ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0); + if (ret < 0) + goto err_get_char; + s->version_str[i] = ret; + } + s->version_str[i] = '\0'; + + dev_info(dev, "trusty version: %s\n", s->version_str); + + ret = device_create_file(dev, &dev_attr_trusty_version); + if (ret) + goto err_create_file; + return; + +err_create_file: +err_get_char: + kfree(s->version_str); + s->version_str = NULL; +err_get_size: + dev_err(dev, "failed to get version: %d\n", ret); +} + +u32 trusty_get_api_version(struct device *dev) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return s->api_version; +} +EXPORT_SYMBOL(trusty_get_api_version); + +static int trusty_init_api_version(struct trusty_state *s, struct device *dev) +{ + u32 api_version; + api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION, + TRUSTY_API_VERSION_CURRENT, 0, 0); + if (api_version == SM_ERR_UNDEFINED_SMC) + api_version = 0; + + if (api_version > TRUSTY_API_VERSION_CURRENT) { + dev_err(dev, "unsupported api version %u > %u\n", + api_version, TRUSTY_API_VERSION_CURRENT); + return -EINVAL; + } + + dev_info(dev, "selected api version: %u (requested %u)\n", + api_version, TRUSTY_API_VERSION_CURRENT); + s->api_version = api_version; + + return 0; +} + +static bool dequeue_nop(struct trusty_state *s, u32 *args) +{ + unsigned long flags; + struct trusty_nop *nop = NULL; + + spin_lock_irqsave(&s->nop_lock, flags); + if (!list_empty(&s->nop_queue)) { + nop = list_first_entry(&s->nop_queue, + struct trusty_nop, node); + list_del_init(&nop->node); + args[0] = nop->args[0]; + args[1] = nop->args[1]; + args[2] = nop->args[2]; + } else { + args[0] = 0; + args[1] = 0; + args[2] = 0; + } + spin_unlock_irqrestore(&s->nop_lock, flags); + return nop; +} + +static void locked_nop_work_func(struct work_struct *work) +{ + int ret; + struct trusty_work *tw = container_of(work, struct trusty_work, work); + struct trusty_state *s = tw->ts; + + dev_dbg(s->dev, "%s\n", __func__); + + ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0); + if (ret != 0) + dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d", + __func__, ret); + dev_dbg(s->dev, "%s: done\n", __func__); +} + +static void nop_work_func(struct work_struct *work) +{ + int ret; + bool next; + u32 args[3]; + struct trusty_work *tw = container_of(work, struct trusty_work, work); + struct trusty_state *s = tw->ts; + + dev_dbg(s->dev, "%s:\n", __func__); + + dequeue_nop(s, args); + do { + dev_dbg(s->dev, "%s: %x %x %x\n", + __func__, args[0], args[1], args[2]); + + ret = trusty_std_call32(s->dev, SMC_SC_NOP, + args[0], args[1], args[2]); + + next = dequeue_nop(s, args); + + if (ret == SM_ERR_NOP_INTERRUPTED) + next = true; + else if (ret != SM_ERR_NOP_DONE) + dev_err(s->dev, "%s: SMC_SC_NOP failed %d", + __func__, ret); + } while (next); + + dev_dbg(s->dev, "%s: done\n", __func__); +} + +static void trusty_init_smc(int vmm_id) +{ + if (vmm_id == VMM_ID_EVMM) { + smc = smc_evmm; + } else if (vmm_id == VMM_ID_ACRN) { + smc = smc_acrn; + } else { + pr_err("%s: No smc supports VMM[%d](sig:%s)!", + __func__, vmm_id, vmm_signature[vmm_id]); + BUG(); + } +} + +void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop) +{ + unsigned long flags; + struct trusty_work *tw; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + preempt_disable(); + tw = this_cpu_ptr(s->nop_works); + if (nop) { + WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP); + + spin_lock_irqsave(&s->nop_lock, flags); + if (list_empty(&nop->node)) + list_add_tail(&nop->node, &s->nop_queue); + spin_unlock_irqrestore(&s->nop_lock, flags); + } + queue_work(s->nop_wq, &tw->work); + preempt_enable(); +} +EXPORT_SYMBOL(trusty_enqueue_nop); + +void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop) +{ + unsigned long flags; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + if (WARN_ON(!nop)) + return; + + spin_lock_irqsave(&s->nop_lock, flags); + if (!list_empty(&nop->node)) + list_del_init(&nop->node); + spin_unlock_irqrestore(&s->nop_lock, flags); +} +EXPORT_SYMBOL(trusty_dequeue_nop); + +static int trusty_probe(struct platform_device *pdev) +{ + int ret; + unsigned int cpu; + work_func_t work_func; + struct trusty_state *s; + struct device_node *node = pdev->dev.of_node; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + dev_dbg(&pdev->dev, "Detected VMM: sig=%s\n", vmm_signature[ret]); + + trusty_init_smc(ret); + + if (!node) { + dev_err(&pdev->dev, "of_node required\n"); + return -EINVAL; + } + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) { + ret = -ENOMEM; + goto err_allocate_state; + } + + s->dev = &pdev->dev; + spin_lock_init(&s->nop_lock); + INIT_LIST_HEAD(&s->nop_queue); + + mutex_init(&s->smc_lock); + ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); + init_completion(&s->cpu_idle_completion); + platform_set_drvdata(pdev, s); + + trusty_init_version(s, &pdev->dev); + + ret = trusty_init_api_version(s, &pdev->dev); + if (ret < 0) + goto err_api_version; + + s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0); + if (!s->nop_wq) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed create trusty-nop-wq\n"); + goto err_create_nop_wq; + } + + s->nop_works = alloc_percpu(struct trusty_work); + if (!s->nop_works) { + ret = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate works\n"); + goto err_alloc_works; + } + + if (s->api_version < TRUSTY_API_VERSION_SMP) + work_func = locked_nop_work_func; + else + work_func = nop_work_func; + + for_each_possible_cpu(cpu) { + struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); + + tw->ts = s; + INIT_WORK(&tw->work, work_func); + } + + return 0; + +err_alloc_works: + for_each_possible_cpu(cpu) { + struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); + + flush_work(&tw->work); + } + free_percpu(s->nop_works); + destroy_workqueue(s->nop_wq); +err_create_nop_wq: +err_api_version: + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); + } + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + mutex_destroy(&s->smc_lock); + kfree(s); +err_allocate_state: + return ret; +} + +static int trusty_remove(struct platform_device *pdev) +{ + unsigned int cpu; + struct trusty_state *s = platform_get_drvdata(pdev); + + dev_dbg(&(pdev->dev), "%s() is called\n", __func__); + + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + + for_each_possible_cpu(cpu) { + struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); + + flush_work(&tw->work); + } + free_percpu(s->nop_works); + destroy_workqueue(s->nop_wq); + + mutex_destroy(&s->smc_lock); + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); + } + kfree(s); + return 0; +} + +static const struct of_device_id trusty_of_match[] = { + { .compatible = "android,trusty-smc-v1", }, + {}, +}; + +static struct platform_driver trusty_driver = { + .probe = trusty_probe, + .remove = trusty_remove, + .driver = { + .name = "trusty", + .owner = THIS_MODULE, + .of_match_table = trusty_of_match, + }, +}; + +void trusty_dev_release(struct device *dev) +{ + dev_dbg(dev, "%s() is called()\n", __func__); + return; +} + +static struct device_node trusty_timer_node = { + .name = "trusty-timer", + .sibling = NULL, +}; + +static struct device_node trusty_wall_node = { + .name = "trusty-wall", + .sibling = NULL, +}; + +static struct device_node trusty_irq_node = { + .name = "trusty-irq", + .sibling = NULL, +}; + +static struct device_node trusty_virtio_node = { + .name = "trusty-virtio", + .sibling = &trusty_irq_node, +}; + +static struct device_node trusty_log_node = { + .name = "trusty-log", + .sibling = &trusty_virtio_node, +}; + + +static struct device_node trusty_node = { + .name = "trusty", + .child = &trusty_log_node, +}; + +static struct platform_device trusty_platform_dev = { + .name = "trusty", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .of_node = &trusty_node, + }, +}; +static struct platform_device trusty_platform_dev_log = { + .name = "trusty-log", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev.dev, + .of_node = &trusty_log_node, + }, +}; + +static struct platform_device trusty_platform_dev_virtio = { + .name = "trusty-virtio", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev.dev, + .of_node = &trusty_virtio_node, + }, +}; + +static struct platform_device trusty_platform_dev_irq = { + .name = "trusty-irq", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev.dev, + .of_node = &trusty_irq_node, + }, +}; + +static struct platform_device trusty_platform_dev_wall = { + .name = "trusty-wall", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev.dev, + .of_node = &trusty_wall_node, + }, +}; + +static struct platform_device trusty_platform_dev_timer = { + .name = "trusty-timer", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev_wall.dev, + .of_node = &trusty_timer_node, + }, +}; + +static struct platform_device *trusty_devices[] __initdata = { + &trusty_platform_dev, + &trusty_platform_dev_log, + &trusty_platform_dev_virtio, + &trusty_platform_dev_irq, + &trusty_platform_dev_wall, + &trusty_platform_dev_timer +}; +static int __init trusty_driver_init(void) +{ + int ret = 0; + + ret = platform_add_devices(trusty_devices, ARRAY_SIZE(trusty_devices)); + if (ret) { + printk(KERN_ERR "%s(): platform_add_devices() failed, ret %d\n", __func__, ret); + return ret; + } + return platform_driver_register(&trusty_driver); +} + +static void __exit trusty_driver_exit(void) +{ + platform_driver_unregister(&trusty_driver); + platform_device_unregister(&trusty_platform_dev); +} + +subsys_initcall(trusty_driver_init); +module_exit(trusty_driver_exit); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index cc2b4d9433ed..b811442c5ce6 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -394,10 +394,14 @@ config GOLDFISH_TTY depends on GOLDFISH select SERIAL_CORE select SERIAL_CORE_CONSOLE - select SERIAL_EARLYCON help Console and system TTY driver for the Goldfish virtual platform. +config GOLDFISH_TTY_EARLY_CONSOLE + bool + default y if GOLDFISH_TTY=y + select SERIAL_EARLYCON + config DA_TTY bool "DA TTY" depends on METAG_DA diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index 381e981dee06..85a500ddbcaa 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -442,6 +442,7 @@ static int goldfish_tty_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE static void gf_early_console_putchar(struct uart_port *port, int ch) { __raw_writel(ch, port->membase); @@ -465,6 +466,7 @@ static int __init gf_earlycon_setup(struct earlycon_device *device, } OF_EARLYCON_DECLARE(early_gf_tty, "google,goldfish-tty", gf_earlycon_setup); +#endif static const struct of_device_id goldfish_tty_of_match[] = { { .compatible = "google,goldfish-tty", }, diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 0a3c9665e015..7253e8d2c6d9 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1463,6 +1463,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) * in which case an opening port goes back to closed and a closing port * is simply put into closed state (any further frames from the other * end will get a DM response) + * + * Some control dlci can stay in ADM mode with other dlci working just + * fine. In that case we can just keep the control dlci open after the + * DLCI_OPENING retries time out. */ static void gsm_dlci_t1(unsigned long data) @@ -1476,8 +1480,15 @@ static void gsm_dlci_t1(unsigned long data) if (dlci->retries) { gsm_command(dlci->gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); - } else + } else if (!dlci->addr && gsm->control == (DM | PF)) { + if (debug & 8) + pr_info("DLCI %d opening in ADM mode.\n", + dlci->addr); + gsm_dlci_open(dlci); + } else { gsm_dlci_close(dlci); + } + break; case DLCI_CLOSING: dlci->retries--; @@ -1495,8 +1506,8 @@ static void gsm_dlci_t1(unsigned long data) * @dlci: DLCI to open * * Commence opening a DLCI from the Linux side. We issue SABM messages - * to the modem which should then reply with a UA, at which point we - * will move into open state. Opening is done asynchronously with retry + * to the modem which should then reply with a UA or ADM, at which point + * we will move into open state. Opening is done asynchronously with retry * running off timers and the responses. */ diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index bdf0e6e89991..faf50df81622 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1764,7 +1764,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct n_tty_data *ldata = tty->disc_data; - if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { + if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->line_start = ldata->read_tail; if (!L_ICANON(tty) || !read_cnt(ldata)) { @@ -2427,7 +2427,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file, return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: down_write(&tty->termios_rwsem); - if (L_ICANON(tty)) + if (L_ICANON(tty) && !L_EXTPROC(tty)) retval = inq_canon(ldata); else retval = read_cnt(ldata); diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index c68fb3a8ea1c..97db76afced2 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -65,21 +65,32 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env) */ int serdev_device_add(struct serdev_device *serdev) { + struct serdev_controller *ctrl = serdev->ctrl; struct device *parent = serdev->dev.parent; int err; dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr); + /* Only a single slave device is currently supported. */ + if (ctrl->serdev) { + dev_err(&serdev->dev, "controller busy\n"); + return -EBUSY; + } + ctrl->serdev = serdev; + err = device_add(&serdev->dev); if (err < 0) { dev_err(&serdev->dev, "Can't add %s, status %d\n", dev_name(&serdev->dev), err); - goto err_device_add; + goto err_clear_serdev; } dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev)); -err_device_add: + return 0; + +err_clear_serdev: + ctrl->serdev = NULL; return err; } EXPORT_SYMBOL_GPL(serdev_device_add); @@ -90,7 +101,10 @@ EXPORT_SYMBOL_GPL(serdev_device_add); */ void serdev_device_remove(struct serdev_device *serdev) { + struct serdev_controller *ctrl = serdev->ctrl; + device_unregister(&serdev->dev); + ctrl->serdev = NULL; } EXPORT_SYMBOL_GPL(serdev_device_remove); @@ -295,7 +309,6 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl) return NULL; serdev->ctrl = ctrl; - ctrl->serdev = serdev; device_initialize(&serdev->dev); serdev->dev.parent = &ctrl->dev; serdev->dev.bus = &serdev_bus_type; diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index 302018d67efa..69fc6d9ab490 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -35,23 +35,41 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp, { struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); + int ret; if (!test_bit(SERPORT_ACTIVE, &serport->flags)) return 0; - return serdev_controller_receive_buf(ctrl, cp, count); + ret = serdev_controller_receive_buf(ctrl, cp, count); + + dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count, + "receive_buf returns %d (count = %zu)\n", + ret, count); + if (ret < 0) + return 0; + else if (ret > count) + return count; + + return ret; } static void ttyport_write_wakeup(struct tty_port *port) { struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); + struct tty_struct *tty; + + tty = tty_port_tty_get(port); + if (!tty) + return; - if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) && + if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && test_bit(SERPORT_ACTIVE, &serport->flags)) serdev_controller_write_wakeup(ctrl); - wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT); + wake_up_interruptible_poll(&tty->write_wait, POLLOUT); + + tty_kref_put(tty); } static const struct tty_port_client_operations client_ops = { @@ -102,10 +120,10 @@ static int ttyport_open(struct serdev_controller *ctrl) return PTR_ERR(tty); serport->tty = tty; - if (tty->ops->open) - tty->ops->open(serport->tty, NULL); - else - tty_port_open(serport->port, tty, NULL); + if (!tty->ops->open) + goto err_unlock; + + tty->ops->open(serport->tty, NULL); /* Bring the UART into a known 8 bits no parity hw fc state */ ktermios = tty->termios; @@ -122,6 +140,12 @@ static int ttyport_open(struct serdev_controller *ctrl) tty_unlock(serport->tty); return 0; + +err_unlock: + tty_unlock(tty); + tty_release_struct(tty, serport->tty_idx); + + return -ENODEV; } static void ttyport_close(struct serdev_controller *ctrl) @@ -131,8 +155,10 @@ static void ttyport_close(struct serdev_controller *ctrl) clear_bit(SERPORT_ACTIVE, &serport->flags); + tty_lock(tty); if (tty->ops->close) tty->ops->close(tty, NULL); + tty_unlock(tty); tty_release_struct(tty, serport->tty_idx); } diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 7e638997bfc2..5ebf984919bf 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -24,9 +24,11 @@ #include #include #include +#include #include #include #include +#include #include @@ -513,7 +515,8 @@ static int dw8250_probe(struct platform_device *pdev) /* If no clock rate is defined, fail. */ if (!p->uartclk) { dev_err(dev, "clock rate not defined\n"); - return -EINVAL; + err = -EINVAL; + goto err_clk; } data->pclk = devm_clk_get(dev, "apb_pclk"); @@ -606,6 +609,20 @@ static int dw8250_suspend(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); + /* + * FIXME: For Platforms with LPSS PCI UARTs, the parent device should + * be prevented from going into D3 for the no_console_suspend flag to + * work as expected. + */ + if (platform_get_resource_byname(to_platform_device(dev), + IORESOURCE_MEM, "lpss_dev")) { + struct uart_8250_port *up = serial8250_get_port(data->line); + struct pci_dev *pdev = to_pci_dev(dev->parent); + + if (pdev && !console_suspend_enabled && uart_console(&up->port)) + pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; + } + serial8250_suspend_port(data->line); return 0; diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c index af72ec32e404..f135c1846477 100644 --- a/drivers/tty/serial/8250/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c @@ -125,12 +125,14 @@ static void __init init_port(struct earlycon_device *device) serial8250_early_out(port, UART_FCR, 0); /* no fifo */ serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */ - divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); - c = serial8250_early_in(port, UART_LCR); - serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); - serial8250_early_out(port, UART_DLL, divisor & 0xff); - serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); - serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); + if (port->uartclk && device->baud) { + divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); + c = serial8250_early_in(port, UART_LCR); + serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); + serial8250_early_out(port, UART_DLL, divisor & 0xff); + serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); + serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); + } } int __init early_serial8250_setup(struct earlycon_device *device, diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index e500f7dd2470..ba4af5434b91 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -118,6 +118,9 @@ static int fintek_8250_enter_key(u16 base_port, u8 key) if (!request_muxed_region(base_port, 2, "8250_fintek")) return -EBUSY; + /* Force to deactive all SuperIO in this base_port */ + outb(EXIT_KEY, base_port + ADDR_PORT); + outb(key, base_port + ADDR_PORT); outb(key, base_port + ADDR_PORT); return 0; @@ -208,7 +211,7 @@ static int fintek_8250_rs485_config(struct uart_port *port, if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) == (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND))) - rs485->flags &= SER_RS485_ENABLED; + rs485->flags &= ~SER_RS485_ENABLED; else config |= RS485_URA; diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index 1222c005fb98..3613a6aabfb3 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -141,8 +141,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev, } info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL); - if (IS_ERR(info->rst)) + if (IS_ERR(info->rst)) { + ret = PTR_ERR(info->rst); goto err_dispose; + } + ret = reset_control_deassert(info->rst); if (ret) goto err_dispose; @@ -318,6 +321,7 @@ static const struct of_device_id of_platform_serial_table[] = { { .compatible = "mrvl,mmp-uart", .data = (void *)PORT_XSCALE, }, { .compatible = "ti,da830-uart", .data = (void *)PORT_DA830, }, + { .compatible = "nuvoton,npcm750-uart", .data = (void *)PORT_NPCM, }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, of_platform_serial_table); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 0c101a7470b0..0d814a87acb2 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -3389,11 +3389,9 @@ static int serial_pci_is_class_communication(struct pci_dev *dev) /* * If it is not a communications device or the programming * interface is greater than 6, give up. - * - * (Should we try to make guesses for multiport serial devices - * later?) */ if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && + ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) && ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || (dev->class & 0xff) > 6) return -ENODEV; @@ -3430,6 +3428,12 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) { int num_iomem, num_port, first_port = -1, i; + /* + * Should we try to make guesses for multiport serial devices later? + */ + if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL) + return -ENODEV; + num_iomem = num_port = 0; for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { if (pci_resource_flags(dev, i) & IORESOURCE_IO) { @@ -4700,6 +4704,17 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ pbn_b2_4_115200 }, + /* + * BrainBoxes UC-260 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D21, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0E34, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, /* * Perle PCI-RAS cards */ @@ -5137,6 +5152,9 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, + /* Amazon PCI serial device */ + { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 }, + /* * These entries match devices with class COMMUNICATION_SERIAL, * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index f0cc04f62b67..fde34c84e707 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -51,6 +51,10 @@ #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ #define UART_EXAR_DVID 0x8d /* Device identification */ +/* Nuvoton NPCM timeout register */ +#define UART_NPCM_TOR 7 +#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */ + /* * Debugging. */ @@ -297,6 +301,15 @@ static const struct serial8250_config uart_config[] = { UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, .flags = UART_CAP_FIFO, }, + [PORT_NPCM] = { + .name = "Nuvoton 16550", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, }; /* Uart divisor latch read */ @@ -2168,6 +2181,15 @@ int serial8250_do_startup(struct uart_port *port) UART_DA830_PWREMU_MGMT_FREE); } + if (port->type == PORT_NPCM) { + /* + * Nuvoton calls the scratch register 'UART_TOR' (timeout + * register). Enable it, and set TIOC (timeout interrupt + * comparator) to be 0x20 for correct operation. + */ + serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20); + } + #ifdef CONFIG_SERIAL_8250_RSA /* * If this is an RSA port, see if we can kick it up to the @@ -2490,6 +2512,15 @@ static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up, return quot_16 >> 4; } +/* Nuvoton NPCM UARTs have a custom divisor calculation */ +static unsigned int npcm_get_divisor(struct uart_8250_port *up, + unsigned int baud) +{ + struct uart_port *port = &up->port; + + return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2; +} + static unsigned int serial8250_get_divisor(struct uart_8250_port *up, unsigned int baud, unsigned int *frac) @@ -2510,6 +2541,8 @@ static unsigned int serial8250_get_divisor(struct uart_8250_port *up, quot = 0x8002; else if (up->port.type == PORT_XR17V35X) quot = xr17v35x_get_divisor(up, baud, frac); + else if (up->port.type == PORT_NPCM) + quot = npcm_get_divisor(up, baud); else quot = uart_get_divisor(port, baud); @@ -2586,8 +2619,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud, serial_dl_write(up, quot); /* XR17V35x UARTs have an extra fractional divisor register (DLD) */ - if (up->port.type == PORT_XR17V35X) + if (up->port.type == PORT_XR17V35X) { + /* Preserve bits not related to baudrate; DLD[7:4]. */ + quot_frac |= serial_port_in(port, 0x2) & 0xf0; serial_port_out(port, 0x2, quot_frac); + } } static unsigned int serial8250_get_baud_rate(struct uart_port *port, diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index 8a10b10e27aa..c206f173f912 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -259,12 +259,13 @@ static int uniphier_uart_probe(struct platform_device *pdev) up.dl_read = uniphier_serial_dl_read; up.dl_write = uniphier_serial_dl_write; - priv->line = serial8250_register_8250_port(&up); - if (priv->line < 0) { + ret = serial8250_register_8250_port(&up); + if (ret < 0) { dev_err(dev, "failed to register 8250 port\n"); clk_disable_unprepare(priv->clk); return ret; } + priv->line = ret; platform_set_drvdata(pdev, priv); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 7551cab438ff..a0b24bc09783 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1763,6 +1763,7 @@ static void atmel_get_ip_name(struct uart_port *port) switch (version) { case 0x302: case 0x10213: + case 0x10302: dev_dbg(port->dev, "This version is usart\n"); atmel_port->has_frac_baudrate = true; atmel_port->has_hw_timer = true; diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index 98928f082d87..17dba0af5ee9 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -253,11 +253,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match, } port->mapbase = addr; port->uartclk = BASE_BAUD * 16; - port->membase = earlycon_map(port->mapbase, SZ_4K); val = of_get_flat_dt_prop(node, "reg-offset", NULL); if (val) port->mapbase += be32_to_cpu(*val); + port->membase = earlycon_map(port->mapbase, SZ_4K); + val = of_get_flat_dt_prop(node, "reg-shift", NULL); if (val) port->regshift = be32_to_cpu(*val); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index dfeff3951f93..521500c575c8 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -334,7 +334,8 @@ static void imx_port_rts_active(struct imx_port *sport, unsigned long *ucr2) { *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); - mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); + sport->port.mctrl |= TIOCM_RTS; + mctrl_gpio_set(sport->gpios, sport->port.mctrl); } static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2) @@ -342,7 +343,8 @@ static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2) *ucr2 &= ~UCR2_CTSC; *ucr2 |= UCR2_CTS; - mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); + sport->port.mctrl &= ~TIOCM_RTS; + mctrl_gpio_set(sport->gpios, sport->port.mctrl); } static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2) @@ -2273,12 +2275,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on) val &= ~UCR3_AWAKEN; writel(val, sport->port.membase + UCR3); - val = readl(sport->port.membase + UCR1); - if (on) - val |= UCR1_RTSDEN; - else - val &= ~UCR1_RTSDEN; - writel(val, sport->port.membase + UCR1); + if (sport->have_rtscts) { + val = readl(sport->port.membase + UCR1); + if (on) + val |= UCR1_RTSDEN; + else + val &= ~UCR1_RTSDEN; + writel(val, sport->port.membase + UCR1); + } } static int imx_serial_port_suspend_noirq(struct device *dev) diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 7754053deeda..26a22b100df1 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl) if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) up->efr |= UART_EFR_RTS; else - up->efr &= UART_EFR_RTS; + up->efr &= ~UART_EFR_RTS; serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, lcr); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 3a14cccbd7ff..c8cb0b398cb1 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -987,6 +987,8 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, } } else { retval = uart_startup(tty, state, 1); + if (retval == 0) + tty_port_set_initialized(port, true); if (retval > 0) retval = 0; } @@ -1155,6 +1157,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state) uport->ops->config_port(uport, flags); ret = uart_startup(tty, state, 1); + if (ret == 0) + tty_port_set_initialized(port, true); if (ret > 0) ret = 0; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 784dd42002ea..22f60239026c 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -886,6 +886,8 @@ static void sci_receive_chars(struct uart_port *port) /* Tell the rest of the system the news. New characters! */ tty_flip_buffer_push(tport); } else { + /* TTY buffers full; read from RX reg to prevent lockup */ + serial_port_in(port, SCxRDR); serial_port_in(port, SCxSR); /* dummy read */ sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); } @@ -1491,6 +1493,14 @@ static void sci_request_dma(struct uart_port *port) return; s->cookie_tx = -EINVAL; + + /* + * Don't request a dma channel if no channel was specified + * in the device tree. + */ + if (!of_find_property(port->dev->of_node, "dmas", NULL)) + return; + chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV); dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); if (chan) { diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index d008f5a75197..377b3592384e 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -246,8 +246,10 @@ static void sysrq_handle_showallcpus(int key) * architecture has no support for it: */ if (!trigger_all_cpu_backtrace()) { - struct pt_regs *regs = get_irq_regs(); + struct pt_regs *regs = NULL; + if (in_irq()) + regs = get_irq_regs(); if (regs) { pr_info("CPU%d:\n", smp_processor_id()); show_regs(regs); @@ -266,7 +268,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = { static void sysrq_handle_showregs(int key) { - struct pt_regs *regs = get_irq_regs(); + struct pt_regs *regs = NULL; + + if (in_irq()) + regs = get_irq_regs(); if (regs) show_regs(regs); perf_event_print_debug(); diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index f8eba1c5412f..677fa99b7747 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -446,7 +446,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); * Callers other than flush_to_ldisc() need to exclude the kworker * from concurrent use of the line discipline, see paste_selection(). * - * Returns the number of bytes not processed + * Returns the number of bytes processed */ int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, char *f, int count) diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 94cccb6efa32..7e77bd2118ad 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1322,6 +1322,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n", __func__, tty->driver->name); + retval = tty_ldisc_lock(tty, 5 * HZ); + if (retval) + goto err_release_lock; tty->port->itty = tty; /* @@ -1332,6 +1335,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) retval = tty_ldisc_setup(tty, tty->link); if (retval) goto err_release_tty; + tty_ldisc_unlock(tty); /* Return the tty locked so that it cannot vanish under the caller */ return tty; @@ -1344,9 +1348,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) /* call the tty release_tty routine to clean out this slot */ err_release_tty: - tty_unlock(tty); + tty_ldisc_unlock(tty); tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n", retval, idx); +err_release_lock: + tty_unlock(tty); release_tty(tty, idx); return ERR_PTR(retval); } @@ -1475,6 +1481,8 @@ static void release_tty(struct tty_struct *tty, int idx) if (tty->link) tty->link->port->itty = NULL; tty_buffer_cancel_work(tty->port); + if (tty->link) + tty_buffer_cancel_work(tty->link->port); tty_kref_put(tty->link); tty_kref_put(tty); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 84a8ac2a779f..7c895684c3ef 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -336,7 +336,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty) ldsem_up_write(&tty->ldisc_sem); } -static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) +int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) { int ret; @@ -347,7 +347,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) return 0; } -static void tty_ldisc_unlock(struct tty_struct *tty) +void tty_ldisc_unlock(struct tty_struct *tty) { clear_bit(TTY_LDISC_HALTED, &tty->flags); __tty_ldisc_unlock(tty); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 2ebaba16f785..de67abbda921 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1352,6 +1352,11 @@ static void csi_m(struct vc_data *vc) case 3: vc->vc_italic = 1; break; + case 21: + /* + * No console drivers support double underline, so + * convert it to a single underline. + */ case 4: vc->vc_underline = 1; break; @@ -1387,7 +1392,6 @@ static void csi_m(struct vc_data *vc) vc->vc_disp_ctrl = 1; vc->vc_toggle_meta = 1; break; - case 21: case 22: vc->vc_intensity = 1; break; @@ -1725,7 +1729,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) default_attr(vc); update_attr(vc); - vc->vc_tab_stop[0] = 0x01010100; + vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = vc->vc_tab_stop[2] = vc->vc_tab_stop[3] = @@ -1769,7 +1773,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) vc->vc_pos -= (vc->vc_x << 1); while (vc->vc_x < vc->vc_cols - 1) { vc->vc_x++; - if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) + if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31))) break; } vc->vc_pos += (vc->vc_x << 1); @@ -1829,7 +1833,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); return; case 'H': - vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31)); return; case 'Z': respond_ID(tty); @@ -2022,7 +2026,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 'g': if (!vc->vc_par[0]) - vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31)); else if (vc->vc_par[0] == 3) { vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index 48d5327d38d4..fe5cdda80b2c 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -124,6 +124,13 @@ hv_uio_probe(struct hv_device *dev, if (ret) goto fail; + /* Communicating with host has to be via shared memory not hypercall */ + if (!dev->channel->offermsg.monitor_allocated) { + dev_err(&dev->device, "vmbus channel requires hypercall\n"); + ret = -ENOTSUPP; + goto fail_close; + } + dev->channel->inbound.ring_buffer->interrupt_mask = 1; set_channel_read_mode(dev->channel, HV_CALL_DIRECT); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 939a63bca82f..72eb3e41e3b6 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -19,6 +19,14 @@ config USB_EHCI_BIG_ENDIAN_MMIO config USB_EHCI_BIG_ENDIAN_DESC bool +config USB_UHCI_BIG_ENDIAN_MMIO + bool + default y if SPARC_LEON + +config USB_UHCI_BIG_ENDIAN_DESC + bool + default y if SPARC_LEON + menuconfig USB_SUPPORT bool "USB support" depends on HAS_IOMEM diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index bb626120296f..53f3bf459dd1 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c @@ -251,7 +251,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev) if (ret) goto err_mux; - ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi"); + ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi"); if (ulpi_node) { phy_node = of_get_next_available_child(ulpi_node, NULL); ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy"); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 18c923a4c16e..22952d70b981 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -187,6 +187,7 @@ static int acm_wb_alloc(struct acm *acm) wb = &acm->wb[wbn]; if (!wb->use) { wb->use = 1; + wb->len = 0; return wbn; } wbn = (wbn + 1) % ACM_NW; @@ -438,7 +439,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) res = usb_submit_urb(acm->read_urbs[index], mem_flags); if (res) { - if (res != -EPERM) { + if (res != -EPERM && res != -ENODEV) { dev_err(&acm->data->dev, "urb %d failed submission with %d\n", index, res); @@ -818,16 +819,18 @@ static int acm_tty_write(struct tty_struct *tty, static void acm_tty_flush_chars(struct tty_struct *tty) { struct acm *acm = tty->driver_data; - struct acm_wb *cur = acm->putbuffer; + struct acm_wb *cur; int err; unsigned long flags; + spin_lock_irqsave(&acm->write_lock, flags); + + cur = acm->putbuffer; if (!cur) /* nothing to do */ - return; + goto out; acm->putbuffer = NULL; err = usb_autopm_get_interface_async(acm->control); - spin_lock_irqsave(&acm->write_lock, flags); if (err < 0) { cur->use = 0; acm->putbuffer = cur; @@ -1765,6 +1768,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */ .driver_info = SINGLE_RX_URB, /* firmware bug */ }, + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ + .driver_info = SINGLE_RX_URB, + }, { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 4aa5195db8ea..e02acfb1ca95 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -183,9 +183,9 @@ static int ulpi_of_register(struct ulpi *ulpi) /* Find a ulpi bus underneath the parent or the grandparent */ parent = ulpi->dev.parent; if (parent->of_node) - np = of_find_node_by_name(parent->of_node, "ulpi"); + np = of_get_child_by_name(parent->of_node, "ulpi"); else if (parent->parent && parent->parent->of_node) - np = of_find_node_by_name(parent->parent->of_node, "ulpi"); + np = of_get_child_by_name(parent->parent->of_node, "ulpi"); if (!np) return 0; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 883549ee946c..9e3355b97396 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, unsigned iad_num = 0; memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); + nintf = nintf_orig = config->desc.bNumInterfaces; + config->desc.bNumInterfaces = 0; // Adjusted later + if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE || config->desc.bLength > size) { @@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, buffer += config->desc.bLength; size -= config->desc.bLength; - nintf = nintf_orig = config->desc.bNumInterfaces; if (nintf > USB_MAXINTERFACES) { dev_warn(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", @@ -905,14 +907,25 @@ void usb_release_bos_descriptor(struct usb_device *dev) } } +static const __u8 bos_desc_len[256] = { + [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE, + [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE, + [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE, + [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1), + [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE, + [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE, +}; + /* Get BOS descriptor set */ int usb_get_bos_descriptor(struct usb_device *dev) { struct device *ddev = &dev->dev; struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; + struct usb_ssp_cap_descriptor *ssp_cap; unsigned char *buffer; - int length, total_len, num, i; + int length, total_len, num, i, ssac; + __u8 cap_type; int ret; bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); @@ -965,7 +978,13 @@ int usb_get_bos_descriptor(struct usb_device *dev) dev->bos->desc->bNumDeviceCaps = i; break; } + cap_type = cap->bDevCapabilityType; length = cap->bLength; + if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) { + dev->bos->desc->bNumDeviceCaps = i; + break; + } + total_len -= length; if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { @@ -973,7 +992,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) continue; } - switch (cap->bDevCapabilityType) { + switch (cap_type) { case USB_CAP_TYPE_WIRELESS_USB: /* Wireless USB cap descriptor is handled by wusb */ break; @@ -986,8 +1005,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) (struct usb_ss_cap_descriptor *)buffer; break; case USB_SSP_CAP_TYPE: - dev->bos->ssp_cap = - (struct usb_ssp_cap_descriptor *)buffer; + ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; + ssac = (le32_to_cpu(ssp_cap->bmAttributes) & + USB_SSP_SUBLINK_SPEED_ATTRIBS); + if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) + dev->bos->ssp_cap = ssp_cap; break; case CONTAINER_ID_TYPE: dev->bos->ss_id = diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9326f31db8d..ab245352f102 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1455,14 +1455,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb int number_of_packets = 0; unsigned int stream_id = 0; void *buf; - - if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP | - USBDEVFS_URB_SHORT_NOT_OK | + unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | - USBDEVFS_URB_NO_INTERRUPT)) - return -EINVAL; + USBDEVFS_URB_NO_INTERRUPT; + /* USBDEVFS_URB_ISO_ASAP is a special case */ + if (uurb->type == USBDEVFS_URB_TYPE_ISO) + mask |= USBDEVFS_URB_ISO_ASAP; + + if (uurb->flags & ~mask) + return -EINVAL; + if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) @@ -1833,6 +1837,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg) return 0; } +static void compute_isochronous_actual_length(struct urb *urb) +{ + unsigned int i; + + if (urb->number_of_packets > 0) { + urb->actual_length = 0; + for (i = 0; i < urb->number_of_packets; i++) + urb->actual_length += + urb->iso_frame_desc[i].actual_length; + } +} + static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; @@ -1840,6 +1856,7 @@ static int processcompl(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; @@ -2008,6 +2025,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index e9ce6bb0b22d..43a74bb708a6 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -3352,6 +3352,10 @@ static int wait_for_connected(struct usb_device *udev, while (delay_ms < 2000) { if (status || *portstatus & USB_PORT_STAT_CONNECTION) break; + if (!port_is_power_on(hub, *portstatus)) { + status = -ENODEV; + break; + } msleep(20); delay_ms += 20; status = hub_port_status(hub, *port1, portstatus, portchange); @@ -4935,6 +4939,15 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; + + /* When halfway through our retry count, power-cycle the port */ + if (i == (SET_CONFIG_TRIES / 2) - 1) { + dev_info(&port_dev->dev, "attempt power cycle\n"); + usb_hub_set_port_power(hdev, hub, port1, false); + msleep(2 * hub_power_on_good_delay(hub)); + usb_hub_set_port_power(hdev, hub, port1, true); + msleep(hub_power_on_good_delay(hub)); + } } if (hub->hdev->parent || !hcd->driver->port_handed_over || diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c index 1af877942110..2de0444c95a8 100644 --- a/drivers/usb/core/ledtrig-usbport.c +++ b/drivers/usb/core/ledtrig-usbport.c @@ -140,11 +140,17 @@ static bool usbport_trig_port_observed(struct usbport_trig_data *usbport_data, if (!led_np) return false; - /* Get node of port being added */ + /* + * Get node of port being added + * + * FIXME: This is really the device node of the connected device + */ port_np = usb_of_get_child_node(usb_dev->dev.of_node, port1); if (!port_np) return false; + of_node_put(port_np); + /* Amount of trigger sources for this LED */ count = of_count_phandle_with_args(led_np, "trigger-sources", "#trigger-source-cells"); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 371a07d874a3..dd29e6ec1c43 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -150,6 +150,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); + /* Linger a bit, prior to the next control message. */ + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) + msleep(200); + kfree(dr); return ret; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a6aaf2f193a4..64b4d820d572 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -45,6 +45,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* HP v222w 16GB Mini USB Drive */ + { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -57,10 +60,11 @@ static const struct usb_device_id usb_quirk_list[] = { /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, @@ -151,6 +155,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ + { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, + + /* ELSA MicroLink 56K */ + { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, @@ -218,8 +228,15 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1a0a, 0x0200), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Corsair K70 RGB */ + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair Strafe RGB */ - { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + + /* Corsair K70 LUX */ + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 47903d510955..8b800e34407b 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -187,6 +187,31 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); /*-------------------------------------------------------------------*/ +static const int pipetypes[4] = { + PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT +}; + +/** + * usb_urb_ep_type_check - sanity check of endpoint in the given urb + * @urb: urb to be checked + * + * This performs a light-weight sanity check for the endpoint in the + * given urb. It returns 0 if the urb contains a valid endpoint, otherwise + * a negative error code. + */ +int usb_urb_ep_type_check(const struct urb *urb) +{ + const struct usb_host_endpoint *ep; + + ep = usb_pipe_endpoint(urb->dev, urb->pipe); + if (!ep) + return -EINVAL; + if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); + /** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request @@ -326,9 +351,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); */ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { - static int pipetypes[4] = { - PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT - }; int xfertype, max; struct usb_device *dev; struct usb_host_endpoint *ep; @@ -444,7 +466,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) */ /* Check that the pipe's type matches the endpoint's type */ - if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) + if (usb_urb_ep_type_check(urb)) dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]); diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index c2631145f404..9bd60ec83ac6 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -3277,7 +3277,6 @@ static void dwc2_conn_id_status_change(struct work_struct *work) dwc2_core_init(hsotg, false); dwc2_enable_global_interrupts(hsotg); spin_lock_irqsave(&hsotg->lock, flags); - dwc2_hsotg_disconnect(hsotg); dwc2_hsotg_core_init_disconnected(hsotg, false); spin_unlock_irqrestore(&hsotg->lock, flags); dwc2_hsotg_core_connect(hsotg); @@ -3296,8 +3295,12 @@ static void dwc2_conn_id_status_change(struct work_struct *work) if (count > 250) dev_err(hsotg->dev, "Connection id status change timed out\n"); - hsotg->op_state = OTG_STATE_A_HOST; + spin_lock_irqsave(&hsotg->lock, flags); + dwc2_hsotg_disconnect(hsotg); + spin_unlock_irqrestore(&hsotg->lock, flags); + + hsotg->op_state = OTG_STATE_A_HOST; /* Initialize the Core for Host mode */ dwc2_core_init(hsotg, false); dwc2_enable_global_interrupts(hsotg); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 03474d3575ab..8e3efdfe8c90 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -186,7 +186,7 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode) dwc->desired_dr_role = mode; spin_unlock_irqrestore(&dwc->lock, flags); - queue_work(system_power_efficient_wq, &dwc->drd_work); + queue_work(system_freezable_wq, &dwc->drd_work); } u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) @@ -1011,6 +1011,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) hird_threshold = 12; dwc->maximum_speed = usb_get_maximum_speed(dev); + if (dwc->maximum_speed > USB_SPEED_HIGH) + dwc->maximum_speed = USB_SPEED_HIGH; + dwc->dr_mode = usb_get_dr_mode(dev); dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); @@ -1125,7 +1128,7 @@ static void dwc3_check_params(struct dwc3 *dwc) /* fall through */ case USB_SPEED_UNKNOWN: /* default to superspeed */ - dwc->maximum_speed = USB_SPEED_SUPER; + dwc->maximum_speed = USB_SPEED_HIGH; /* * default to superspeed plus if we are capable. diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index ea910acb4bb0..fc28819253f7 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -166,13 +166,15 @@ #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) -#define DWC3_TXFIFOQ 1 -#define DWC3_RXFIFOQ 3 -#define DWC3_TXREQQ 5 -#define DWC3_RXREQQ 7 -#define DWC3_RXINFOQ 9 -#define DWC3_DESCFETCHQ 13 -#define DWC3_EVENTQ 15 +#define DWC3_TXFIFOQ 0 +#define DWC3_RXFIFOQ 1 +#define DWC3_TXREQQ 2 +#define DWC3_RXREQQ 3 +#define DWC3_RXINFOQ 4 +#define DWC3_PSTATQ 5 +#define DWC3_DESCFETCHQ 6 +#define DWC3_EVENTQ 7 +#define DWC3_AUXEVENTQ 8 /* Global RX Threshold Configuration Register */ #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index a26d1fde0f5e..fbfc09ebd2ec 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -57,8 +57,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count) clk = of_clk_get(np, i); if (IS_ERR(clk)) { - while (--i >= 0) + while (--i >= 0) { + clk_disable_unprepare(simple->clks[i]); clk_put(simple->clks[i]); + } return PTR_ERR(clk); } diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 54343fbd85ee..c7c9229f6fdb 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -25,6 +25,7 @@ #include #include #include +#include #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce @@ -56,6 +57,7 @@ */ struct dwc3_pci { struct platform_device *dwc3; + struct platform_device *usb2_phy; struct pci_dev *pci; guid_t guid; @@ -124,6 +126,35 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc) dwc->has_dsm_for_pm = true; } + if (pdev->device == PCI_DEVICE_ID_INTEL_BXT || + pdev->device == PCI_DEVICE_ID_INTEL_BXT_M || + pdev->device == PCI_DEVICE_ID_INTEL_APL ) { + + if (IS_ERR_OR_NULL(usb_get_phy(USB_PHY_TYPE_USB2))) { + /* Unable to get phy, very likely intel_usb_dr_phy + * platform device not yet allocated. Possible + * race with another drivers. + */ + dwc->usb2_phy = platform_device_alloc( + "intel_usb_dr_phy", 0); + if (dwc->usb2_phy) { + dwc->usb2_phy->dev.parent = &pdev->dev; + ret = platform_device_add(dwc->usb2_phy); + if (ret) { + platform_device_put(dwc->usb2_phy); + return ret; + } + } else if (IS_ERR_OR_NULL(usb_get_phy(USB_PHY_TYPE_USB2))) { + /* In case of a race and another driver allocate + * intel_usb_dr_phy platform device earlier, check + * whether phy is already available. If not return error. + */ + return -ENOMEM; + } + } + } + + if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) { struct gpio_desc *gpio; @@ -245,6 +276,8 @@ static void dwc3_pci_remove(struct pci_dev *pci) device_init_wakeup(&pci->dev, false); pm_runtime_get(&pci->dev); + if(dwc->usb2_phy) + platform_device_unregister(dwc->usb2_phy); platform_device_unregister(dwc->dwc3); } diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 75e6cb044eb2..89fe53c846ef 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -884,7 +884,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, trb++; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; trace_dwc3_complete_trb(ep0, trb); - ep0->trb_enqueue = 0; + + if (r->direction) + dwc->eps[1]->trb_enqueue = 0; + else + dwc->eps[0]->trb_enqueue = 0; + dwc->ep0_bounced = false; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index f064f1549333..1802ab58d061 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -267,7 +268,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; - u32 timeout = 500; + u32 timeout = 1000; u32 reg; int cmd_status = 0; @@ -1853,12 +1854,66 @@ static void dwc3_gadget_setup_nump(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_DCFG, reg); } +static inline bool platform_is_bxtp(void) +{ +#ifdef CONFIG_X86_64 + if ((boot_cpu_data.x86_model == 0x5c) + && (boot_cpu_data.x86_stepping >= 0x8) + && (boot_cpu_data.x86_stepping <= 0xf)) + return true; +#endif + return false; +} + static int __dwc3_gadget_start(struct dwc3 *dwc) { struct dwc3_ep *dep; int ret = 0; u32 reg; + reg = dwc3_readl(dwc->regs, DWC3_DCFG); + reg &= ~(DWC3_DCFG_SPEED_MASK); + + /** + * WORKAROUND: DWC3 revision < 2.20a have an issue + * which would cause metastability state on Run/Stop + * bit if we try to force the IP to USB2-only mode. + * + * Because of that, we cannot configure the IP to any + * speed other than the SuperSpeed + * + * Refers to: + * + * STAR#9000525659: Clock Domain Crossing on DCTL in + * USB 2.0 Mode + */ + if (dwc->revision < DWC3_REVISION_220A) { + reg |= DWC3_DCFG_SUPERSPEED; + } else { + switch (dwc->maximum_speed) { + case USB_SPEED_LOW: + reg |= DWC3_DCFG_LOWSPEED; + break; + case USB_SPEED_FULL: + reg |= DWC3_DCFG_FULLSPEED; + break; + case USB_SPEED_HIGH: + reg |= DWC3_DCFG_HIGHSPEED; + break; + case USB_SPEED_SUPER_PLUS: + reg |= DWC3_DCFG_SUPERSPEED_PLUS; + break; + default: + dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", + dwc->maximum_speed); + /* fall through */ + case USB_SPEED_SUPER: + reg |= DWC3_DCFG_SUPERSPEED; + break; + } + } + dwc3_writel(dwc->regs, DWC3_DCFG, reg); + /* * Use IMOD if enabled via dwc->imod_interval. Otherwise, if * the core supports IMOD, disable it. @@ -1923,6 +1978,15 @@ static int dwc3_gadget_start(struct usb_gadget *g, int ret = 0; int irq; + if (dwc->usb2_phy) { + ret = otg_set_peripheral(dwc->usb2_phy->otg, &dwc->gadget); + if (ret == -ENOTSUPP) + dev_info(dwc->dev, "no OTG driver registered\n"); + else if (ret) + return ret; + } + + irq = dwc->irq_gadget; ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, IRQF_SHARED, "dwc3", dwc->ev_buf); @@ -1971,6 +2035,10 @@ static int dwc3_gadget_stop(struct usb_gadget *g) unsigned long flags; int epnum; + + if (dwc->usb2_phy) + otg_set_peripheral(dwc->usb2_phy->otg, NULL); + spin_lock_irqsave(&dwc->lock, flags); if (pm_runtime_suspended(dwc->dev)) @@ -2039,10 +2107,24 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g, reg |= DWC3_DCFG_HIGHSPEED; break; case USB_SPEED_SUPER: - reg |= DWC3_DCFG_SUPERSPEED; + /* + * WORKAROUND: BXTP platform USB3.0 port SS fail, + * We switch SS to HS to enable USB3.0. + */ + if (platform_is_bxtp()) + reg |= DWC3_DCFG_HIGHSPEED; + else + reg |= DWC3_DCFG_SUPERSPEED; break; case USB_SPEED_SUPER_PLUS: - reg |= DWC3_DCFG_SUPERSPEED_PLUS; + /* + * WORKAROUND: BXTP platform USB3.0 port SS fail, + * We switch SS to HS to enable USB3.0. + */ + if (platform_is_bxtp()) + reg |= DWC3_DCFG_HIGHSPEED; + else + reg |= DWC3_DCFG_SUPERSPEED_PLUS; break; default: dev_err(dwc->dev, "invalid speed (%d)\n", speed); @@ -2774,6 +2856,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) break; } + dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; + /* Enable USB2 LPM Capability */ if ((dwc->revision > DWC3_REVISION_194A) && diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h index 2df0f6e613fe..a516cab0bf4a 100644 --- a/drivers/usb/early/xhci-dbc.h +++ b/drivers/usb/early/xhci-dbc.h @@ -90,8 +90,8 @@ struct xdbc_context { #define XDBC_INFO_CONTEXT_SIZE 48 #define XDBC_MAX_STRING_LENGTH 64 -#define XDBC_STRING_MANUFACTURER "Linux" -#define XDBC_STRING_PRODUCT "Remote GDB" +#define XDBC_STRING_MANUFACTURER "Linux Foundation" +#define XDBC_STRING_PRODUCT "Linux USB GDB Target" #define XDBC_STRING_SERIAL "0001" struct xdbc_strings { @@ -103,7 +103,7 @@ struct xdbc_strings { #define XDBC_PROTOCOL 1 /* GNU Remote Debug Command Set */ #define XDBC_VENDOR_ID 0x1d6b /* Linux Foundation 0x1d6b */ -#define XDBC_PRODUCT_ID 0x0004 /* __le16 idProduct; device 0004 */ +#define XDBC_PRODUCT_ID 0x0011 /* __le16 idProduct; device 0011 */ #define XDBC_DEVICE_REV 0x0010 /* 0.10 */ /* diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 31cce7805eb2..cc0dceea40f5 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -215,6 +215,22 @@ config USB_F_PRINTER config USB_F_TCM tristate +config USB_F_DVCTRACE + tristate + select DVC_TRACE_BUS + +config USB_F_MTP + tristate + +config USB_F_PTP + tristate + +config USB_F_AUDIO_SRC + tristate + +config USB_F_ACC + tristate + # this first set of drivers all depend on bulk-capable hardware. config USB_CONFIGFS @@ -368,6 +384,44 @@ config USB_CONFIGFS_F_FS implemented in kernel space (for instance Ethernet, serial or mass storage) and other are implemented in user space. +config USB_CONFIGFS_F_MTP + boolean "MTP gadget" + depends on USB_CONFIGFS + select USB_F_MTP + help + USB gadget MTP support + +config USB_CONFIGFS_F_PTP + boolean "PTP gadget" + depends on USB_CONFIGFS && USB_CONFIGFS_F_MTP + select USB_F_PTP + help + USB gadget PTP support + +config USB_CONFIGFS_F_ACC + boolean "Accessory gadget" + depends on USB_CONFIGFS + select USB_F_ACC + help + USB gadget Accessory support + +config USB_CONFIGFS_F_AUDIO_SRC + boolean "Audio Source gadget" + depends on USB_CONFIGFS && USB_CONFIGFS_F_ACC + depends on SND + select SND_PCM + select USB_F_AUDIO_SRC + help + USB gadget Audio Source support + +config USB_CONFIGFS_UEVENT + boolean "Uevent notification of Gadget state" + depends on USB_CONFIGFS + help + Enable uevent notifications to userspace when the gadget + state changes. The gadget can be in any of the following + three states: "CONNECTED/DISCONNECTED/CONFIGURED" + config USB_CONFIGFS_F_UAC1 bool "Audio Class 1.0" depends on USB_CONFIGFS @@ -508,6 +562,13 @@ choice controller, and the relevant drivers for each function declared by the device. +config USB_CONFIGFS_F_DVCTRACE + bool "DvC Trace gadget" + depends on USB_CONFIGFS + select USB_F_DVCTRACE + help + USB gadget DvC Trace support + source "drivers/usb/gadget/legacy/Kconfig" endchoice diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 5d061b3d8224..60d198db0893 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -150,7 +150,6 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep) { - struct usb_composite_dev *cdev = get_gadget_data(g); struct usb_endpoint_descriptor *chosen_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; @@ -229,8 +228,12 @@ int config_ep_by_speed(struct usb_gadget *g, _ep->maxburst = comp_desc->bMaxBurst + 1; break; default: - if (comp_desc->bMaxBurst != 0) + if (comp_desc->bMaxBurst != 0) { + struct usb_composite_dev *cdev; + + cdev = get_gadget_data(g); ERROR(cdev, "ep0 bMaxBurst must be 0\n"); + } _ep->maxburst = 1; break; } @@ -2000,6 +2003,12 @@ void composite_disconnect(struct usb_gadget *gadget) struct usb_composite_dev *cdev = get_gadget_data(gadget); unsigned long flags; + if (cdev == NULL) { + WARN(1, "%s: Calling disconnect on a Gadget that is \ + not connected\n", __func__); + return; + } + /* REVISIT: should we have config and device level * disconnect callbacks? */ diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index aeb9f3c40521..bf2d0ce80c99 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -9,6 +9,31 @@ #include "u_f.h" #include "u_os_desc.h" +#ifdef CONFIG_USB_CONFIGFS_UEVENT +#include +#include +#include + +#ifdef CONFIG_USB_CONFIGFS_F_ACC +extern int acc_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl); +void acc_disconnect(void); +#endif +static struct class *android_class; +static struct device *android_device; +static int index; + +struct device *create_function_device(char *name) +{ + if (android_device && !IS_ERR(android_device)) + return device_create(android_class, android_device, + MKDEV(0, index++), NULL, name); + else + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(create_function_device); +#endif + int check_user_usb_string(const char *name, struct usb_gadget_strings *stringtab_dev) { @@ -60,6 +85,12 @@ struct gadget_info { bool use_os_desc; char b_vendor_code; char qw_sign[OS_STRING_QW_SIGN_LEN]; +#ifdef CONFIG_USB_CONFIGFS_UEVENT + bool connected; + bool sw_connected; + struct work_struct work; + struct device *dev; +#endif }; static inline struct gadget_info *to_gadget_info(struct config_item *item) @@ -265,7 +296,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, mutex_lock(&gi->lock); - if (!strlen(name)) { + if (!strlen(name) || strcmp(name, "none") == 0) { ret = unregister_gadget(gi); if (ret) goto err; @@ -1371,6 +1402,60 @@ static int configfs_composite_bind(struct usb_gadget *gadget, return ret; } +#ifdef CONFIG_USB_CONFIGFS_UEVENT +static void android_work(struct work_struct *data) +{ + struct gadget_info *gi = container_of(data, struct gadget_info, work); + struct usb_composite_dev *cdev = &gi->cdev; + char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL }; + char *connected[2] = { "USB_STATE=CONNECTED", NULL }; + char *configured[2] = { "USB_STATE=CONFIGURED", NULL }; + /* 0-connected 1-configured 2-disconnected*/ + bool status[3] = { false, false, false }; + unsigned long flags; + bool uevent_sent = false; + + spin_lock_irqsave(&cdev->lock, flags); + if (cdev->config) + status[1] = true; + + if (gi->connected != gi->sw_connected) { + if (gi->connected) + status[0] = true; + else + status[2] = true; + gi->sw_connected = gi->connected; + } + spin_unlock_irqrestore(&cdev->lock, flags); + + if (status[0]) { + kobject_uevent_env(&android_device->kobj, + KOBJ_CHANGE, connected); + pr_info("%s: sent uevent %s\n", __func__, connected[0]); + uevent_sent = true; + } + + if (status[1]) { + kobject_uevent_env(&android_device->kobj, + KOBJ_CHANGE, configured); + pr_info("%s: sent uevent %s\n", __func__, configured[0]); + uevent_sent = true; + } + + if (status[2]) { + kobject_uevent_env(&android_device->kobj, + KOBJ_CHANGE, disconnected); + pr_info("%s: sent uevent %s\n", __func__, disconnected[0]); + uevent_sent = true; + } + + if (!uevent_sent) { + pr_info("%s: did not send uevent (%d %d %p)\n", __func__, + gi->connected, gi->sw_connected, cdev->config); + } +} +#endif + static void configfs_composite_unbind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; @@ -1390,14 +1475,91 @@ static void configfs_composite_unbind(struct usb_gadget *gadget) set_gadget_data(gadget, NULL); } +#ifdef CONFIG_USB_CONFIGFS_UEVENT +static int android_setup(struct usb_gadget *gadget, + const struct usb_ctrlrequest *c) +{ + struct usb_composite_dev *cdev = get_gadget_data(gadget); + unsigned long flags; + struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev); + int value = -EOPNOTSUPP; + struct usb_function_instance *fi; + + spin_lock_irqsave(&cdev->lock, flags); + if (!gi->connected) { + gi->connected = 1; + schedule_work(&gi->work); + } + spin_unlock_irqrestore(&cdev->lock, flags); + list_for_each_entry(fi, &gi->available_func, cfs_list) { + if (fi != NULL && fi->f != NULL && fi->f->setup != NULL) { + value = fi->f->setup(fi->f, c); + if (value >= 0) + break; + } + } + +#ifdef CONFIG_USB_CONFIGFS_F_ACC + if (value < 0) + value = acc_ctrlrequest(cdev, c); +#endif + + if (value < 0) + value = composite_setup(gadget, c); + + spin_lock_irqsave(&cdev->lock, flags); + if (c->bRequest == USB_REQ_SET_CONFIGURATION && + cdev->config) { + schedule_work(&gi->work); + } + spin_unlock_irqrestore(&cdev->lock, flags); + + return value; +} + +static void android_disconnect(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev = get_gadget_data(gadget); + struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev); + + /* FIXME: There's a race between usb_gadget_udc_stop() which is likely + * to set the gadget driver to NULL in the udc driver and this drivers + * gadget disconnect fn which likely checks for the gadget driver to + * be a null ptr. It happens that unbind (doing set_gadget_data(NULL)) + * is called before the gadget driver is set to NULL and the udc driver + * calls disconnect fn which results in cdev being a null ptr. + */ + if (cdev == NULL) { + WARN(1, "%s: gadget driver already disconnected\n", __func__); + return; + } + + /* accessory HID support can be active while the + accessory function is not actually enabled, + so we need to inform it when we are disconnected. + */ + +#ifdef CONFIG_USB_CONFIGFS_F_ACC + acc_disconnect(); +#endif + gi->connected = 0; + schedule_work(&gi->work); + composite_disconnect(gadget); +} +#endif + static const struct usb_gadget_driver configfs_driver_template = { .bind = configfs_composite_bind, .unbind = configfs_composite_unbind, - +#ifdef CONFIG_USB_CONFIGFS_UEVENT + .setup = android_setup, + .reset = android_disconnect, + .disconnect = android_disconnect, +#else .setup = composite_setup, .reset = composite_disconnect, .disconnect = composite_disconnect, - +#endif .suspend = composite_suspend, .resume = composite_resume, @@ -1409,6 +1571,89 @@ static const struct usb_gadget_driver configfs_driver_template = { .match_existing_only = 1, }; +#ifdef CONFIG_USB_CONFIGFS_UEVENT +static ssize_t state_show(struct device *pdev, struct device_attribute *attr, + char *buf) +{ + struct gadget_info *dev = dev_get_drvdata(pdev); + struct usb_composite_dev *cdev; + char *state = "DISCONNECTED"; + unsigned long flags; + + if (!dev) + goto out; + + cdev = &dev->cdev; + + if (!cdev) + goto out; + + spin_lock_irqsave(&cdev->lock, flags); + if (cdev->config) + state = "CONFIGURED"; + else if (dev->connected) + state = "CONNECTED"; + spin_unlock_irqrestore(&cdev->lock, flags); +out: + return sprintf(buf, "%s\n", state); +} + +static DEVICE_ATTR(state, S_IRUGO, state_show, NULL); + +static struct device_attribute *android_usb_attributes[] = { + &dev_attr_state, + NULL +}; + +static int android_device_create(struct gadget_info *gi) +{ + struct device_attribute **attrs; + struct device_attribute *attr; + + INIT_WORK(&gi->work, android_work); + android_device = device_create(android_class, NULL, + MKDEV(0, 0), NULL, "android0"); + if (IS_ERR(android_device)) + return PTR_ERR(android_device); + + dev_set_drvdata(android_device, gi); + + attrs = android_usb_attributes; + while ((attr = *attrs++)) { + int err; + + err = device_create_file(android_device, attr); + if (err) { + device_destroy(android_device->class, + android_device->devt); + return err; + } + } + + return 0; +} + +static void android_device_destroy(void) +{ + struct device_attribute **attrs; + struct device_attribute *attr; + + attrs = android_usb_attributes; + while ((attr = *attrs++)) + device_remove_file(android_device, attr); + device_destroy(android_device->class, android_device->devt); +} +#else +static inline int android_device_create(struct gadget_info *gi) +{ + return 0; +} + +static inline void android_device_destroy(void) +{ +} +#endif + static struct config_group *gadgets_make( struct config_group *group, const char *name) @@ -1460,7 +1705,11 @@ static struct config_group *gadgets_make( if (!gi->composite.gadget_driver.function) goto err; + if (android_device_create(gi) < 0) + goto err; + return &gi->group; + err: kfree(gi); return ERR_PTR(-ENOMEM); @@ -1469,6 +1718,7 @@ static struct config_group *gadgets_make( static void gadgets_drop(struct config_group *group, struct config_item *item) { config_item_put(item); + android_device_destroy(); } static struct configfs_group_operations gadgets_ops = { @@ -1508,6 +1758,13 @@ static int __init gadget_cfs_init(void) config_group_init(&gadget_subsys.su_group); ret = configfs_register_subsystem(&gadget_subsys); + +#ifdef CONFIG_USB_CONFIGFS_UEVENT + android_class = class_create(THIS_MODULE, "android_usb"); + if (IS_ERR(android_class)) + return PTR_ERR(android_class); +#endif + return ret; } module_init(gadget_cfs_init); @@ -1515,5 +1772,10 @@ module_init(gadget_cfs_init); static void __exit gadget_cfs_exit(void) { configfs_unregister_subsystem(&gadget_subsys); +#ifdef CONFIG_USB_CONFIGFS_UEVENT + if (!IS_ERR(android_class)) + class_destroy(android_class); +#endif + } module_exit(gadget_cfs_exit); diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index 5d3a6cf02218..c91db3d592f5 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -50,3 +50,12 @@ usb_f_printer-y := f_printer.o obj-$(CONFIG_USB_F_PRINTER) += usb_f_printer.o usb_f_tcm-y := f_tcm.o obj-$(CONFIG_USB_F_TCM) += usb_f_tcm.o +obj-$(CONFIG_USB_F_DVCTRACE) += f_dvctrace.o +usb_f_mtp-y := f_mtp.o +obj-$(CONFIG_USB_F_MTP) += usb_f_mtp.o +usb_f_ptp-y := f_ptp.o +obj-$(CONFIG_USB_F_PTP) += usb_f_ptp.o +usb_f_audio_source-y := f_audio_source.o +obj-$(CONFIG_USB_F_AUDIO_SRC) += usb_f_audio_source.o +usb_f_accessory-y := f_accessory.o +obj-$(CONFIG_USB_F_ACC) += usb_f_accessory.o diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c new file mode 100644 index 000000000000..7aa2656a2328 --- /dev/null +++ b/drivers/usb/gadget/function/f_accessory.c @@ -0,0 +1,1352 @@ +/* + * Gadget Function Driver for Android USB accessories + * + * Copyright (C) 2011 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#define MAX_INST_NAME_LEN 40 +#define BULK_BUFFER_SIZE 16384 +#define ACC_STRING_SIZE 256 + +#define PROTOCOL_VERSION 2 + +/* String IDs */ +#define INTERFACE_STRING_INDEX 0 + +/* number of tx and rx requests to allocate */ +#define TX_REQ_MAX 4 +#define RX_REQ_MAX 2 + +struct acc_hid_dev { + struct list_head list; + struct hid_device *hid; + struct acc_dev *dev; + /* accessory defined ID */ + int id; + /* HID report descriptor */ + u8 *report_desc; + /* length of HID report descriptor */ + int report_desc_len; + /* number of bytes of report_desc we have received so far */ + int report_desc_offset; +}; + +struct acc_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + spinlock_t lock; + + struct usb_ep *ep_in; + struct usb_ep *ep_out; + + /* online indicates state of function_set_alt & function_unbind + * set to 1 when we connect + */ + int online:1; + + /* disconnected indicates state of open & release + * Set to 1 when we disconnect. + * Not cleared until our file is closed. + */ + int disconnected:1; + + /* strings sent by the host */ + char manufacturer[ACC_STRING_SIZE]; + char model[ACC_STRING_SIZE]; + char description[ACC_STRING_SIZE]; + char version[ACC_STRING_SIZE]; + char uri[ACC_STRING_SIZE]; + char serial[ACC_STRING_SIZE]; + + /* for acc_complete_set_string */ + int string_index; + + /* set to 1 if we have a pending start request */ + int start_requested; + + int audio_mode; + + /* synchronize access to our device file */ + atomic_t open_excl; + + struct list_head tx_idle; + + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + struct usb_request *rx_req[RX_REQ_MAX]; + int rx_done; + + /* delayed work for handling ACCESSORY_START */ + struct delayed_work start_work; + + /* worker for registering and unregistering hid devices */ + struct work_struct hid_work; + + /* list of active HID devices */ + struct list_head hid_list; + + /* list of new HID devices to register */ + struct list_head new_hid_list; + + /* list of dead HID devices to unregister */ + struct list_head dead_hid_list; +}; + +static struct usb_interface_descriptor acc_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC, + .bInterfaceProtocol = 0, +}; + +static struct usb_endpoint_descriptor acc_highspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor acc_highspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor acc_fullspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor acc_fullspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *fs_acc_descs[] = { + (struct usb_descriptor_header *) &acc_interface_desc, + (struct usb_descriptor_header *) &acc_fullspeed_in_desc, + (struct usb_descriptor_header *) &acc_fullspeed_out_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_acc_descs[] = { + (struct usb_descriptor_header *) &acc_interface_desc, + (struct usb_descriptor_header *) &acc_highspeed_in_desc, + (struct usb_descriptor_header *) &acc_highspeed_out_desc, + NULL, +}; + +static struct usb_string acc_string_defs[] = { + [INTERFACE_STRING_INDEX].s = "Android Accessory Interface", + { }, /* end of list */ +}; + +static struct usb_gadget_strings acc_string_table = { + .language = 0x0409, /* en-US */ + .strings = acc_string_defs, +}; + +static struct usb_gadget_strings *acc_strings[] = { + &acc_string_table, + NULL, +}; + +/* temporary variable used between acc_open() and acc_gadget_bind() */ +static struct acc_dev *_acc_dev; + +struct acc_instance { + struct usb_function_instance func_inst; + const char *name; +}; + +static inline struct acc_dev *func_to_dev(struct usb_function *f) +{ + return container_of(f, struct acc_dev, function); +} + +static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + + if (!req) + return NULL; + + /* now allocate buffers for the requests */ + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + + return req; +} + +static void acc_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +/* add a request to the tail of a list */ +static void req_put(struct acc_dev *dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* remove a request from the head of a list */ +static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(head)) { + req = 0; + } else { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&dev->lock, flags); + return req; +} + +static void acc_set_disconnected(struct acc_dev *dev) +{ + dev->disconnected = 1; +} + +static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = _acc_dev; + + if (req->status == -ESHUTDOWN) { + pr_debug("acc_complete_in set disconnected"); + acc_set_disconnected(dev); + } + + req_put(dev, &dev->tx_idle, req); + + wake_up(&dev->write_wq); +} + +static void acc_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = _acc_dev; + + dev->rx_done = 1; + if (req->status == -ESHUTDOWN) { + pr_debug("acc_complete_out set disconnected"); + acc_set_disconnected(dev); + } + + wake_up(&dev->read_wq); +} + +static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = ep->driver_data; + char *string_dest = NULL; + int length = req->actual; + + if (req->status != 0) { + pr_err("acc_complete_set_string, err %d\n", req->status); + return; + } + + switch (dev->string_index) { + case ACCESSORY_STRING_MANUFACTURER: + string_dest = dev->manufacturer; + break; + case ACCESSORY_STRING_MODEL: + string_dest = dev->model; + break; + case ACCESSORY_STRING_DESCRIPTION: + string_dest = dev->description; + break; + case ACCESSORY_STRING_VERSION: + string_dest = dev->version; + break; + case ACCESSORY_STRING_URI: + string_dest = dev->uri; + break; + case ACCESSORY_STRING_SERIAL: + string_dest = dev->serial; + break; + } + if (string_dest) { + unsigned long flags; + + if (length >= ACC_STRING_SIZE) + length = ACC_STRING_SIZE - 1; + + spin_lock_irqsave(&dev->lock, flags); + memcpy(string_dest, req->buf, length); + /* ensure zero termination */ + string_dest[length] = 0; + spin_unlock_irqrestore(&dev->lock, flags); + } else { + pr_err("unknown accessory string index %d\n", + dev->string_index); + } +} + +static void acc_complete_set_hid_report_desc(struct usb_ep *ep, + struct usb_request *req) +{ + struct acc_hid_dev *hid = req->context; + struct acc_dev *dev = hid->dev; + int length = req->actual; + + if (req->status != 0) { + pr_err("acc_complete_set_hid_report_desc, err %d\n", + req->status); + return; + } + + memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length); + hid->report_desc_offset += length; + if (hid->report_desc_offset == hid->report_desc_len) { + /* After we have received the entire report descriptor + * we schedule work to initialize the HID device + */ + schedule_work(&dev->hid_work); + } +} + +static void acc_complete_send_hid_event(struct usb_ep *ep, + struct usb_request *req) +{ + struct acc_hid_dev *hid = req->context; + int length = req->actual; + + if (req->status != 0) { + pr_err("acc_complete_send_hid_event, err %d\n", req->status); + return; + } + + hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1); +} + +static int acc_hid_parse(struct hid_device *hid) +{ + struct acc_hid_dev *hdev = hid->driver_data; + + hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len); + return 0; +} + +static int acc_hid_start(struct hid_device *hid) +{ + return 0; +} + +static void acc_hid_stop(struct hid_device *hid) +{ +} + +static int acc_hid_open(struct hid_device *hid) +{ + return 0; +} + +static void acc_hid_close(struct hid_device *hid) +{ +} + +static int acc_hid_raw_request(struct hid_device *hid, unsigned char reportnum, + __u8 *buf, size_t len, unsigned char rtype, int reqtype) +{ + return 0; +} + +static struct hid_ll_driver acc_hid_ll_driver = { + .parse = acc_hid_parse, + .start = acc_hid_start, + .stop = acc_hid_stop, + .open = acc_hid_open, + .close = acc_hid_close, + .raw_request = acc_hid_raw_request, +}; + +static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev, + int id, int desc_len) +{ + struct acc_hid_dev *hdev; + + hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC); + if (!hdev) + return NULL; + hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC); + if (!hdev->report_desc) { + kfree(hdev); + return NULL; + } + hdev->dev = dev; + hdev->id = id; + hdev->report_desc_len = desc_len; + + return hdev; +} + +static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id) +{ + struct acc_hid_dev *hid; + + list_for_each_entry(hid, list, list) { + if (hid->id == id) + return hid; + } + return NULL; +} + +static int acc_register_hid(struct acc_dev *dev, int id, int desc_length) +{ + struct acc_hid_dev *hid; + unsigned long flags; + + /* report descriptor length must be > 0 */ + if (desc_length <= 0) + return -EINVAL; + + spin_lock_irqsave(&dev->lock, flags); + /* replace HID if one already exists with this ID */ + hid = acc_hid_get(&dev->hid_list, id); + if (!hid) + hid = acc_hid_get(&dev->new_hid_list, id); + if (hid) + list_move(&hid->list, &dev->dead_hid_list); + + hid = acc_hid_new(dev, id, desc_length); + if (!hid) { + spin_unlock_irqrestore(&dev->lock, flags); + return -ENOMEM; + } + + list_add(&hid->list, &dev->new_hid_list); + spin_unlock_irqrestore(&dev->lock, flags); + + /* schedule work to register the HID device */ + schedule_work(&dev->hid_work); + return 0; +} + +static int acc_unregister_hid(struct acc_dev *dev, int id) +{ + struct acc_hid_dev *hid; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + hid = acc_hid_get(&dev->hid_list, id); + if (!hid) + hid = acc_hid_get(&dev->new_hid_list, id); + if (!hid) { + spin_unlock_irqrestore(&dev->lock, flags); + return -EINVAL; + } + + list_move(&hid->list, &dev->dead_hid_list); + spin_unlock_irqrestore(&dev->lock, flags); + + schedule_work(&dev->hid_work); + return 0; +} + +static int create_bulk_endpoints(struct acc_dev *dev, + struct usb_endpoint_descriptor *in_desc, + struct usb_endpoint_descriptor *out_desc) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + struct usb_ep *ep; + int i; + + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + + ep = usb_ep_autoconfig(cdev->gadget, in_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + /* now allocate requests for our endpoints */ + for (i = 0; i < TX_REQ_MAX; i++) { + req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = acc_complete_in; + req_put(dev, &dev->tx_idle, req); + } + for (i = 0; i < RX_REQ_MAX; i++) { + req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = acc_complete_out; + dev->rx_req[i] = req; + } + + return 0; + +fail: + pr_err("acc_bind() could not allocate requests\n"); + while ((req = req_get(dev, &dev->tx_idle))) + acc_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + acc_request_free(dev->rx_req[i], dev->ep_out); + return -1; +} + +static ssize_t acc_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct acc_dev *dev = fp->private_data; + struct usb_request *req; + ssize_t r = count; + unsigned xfer; + int ret = 0; + + pr_debug("acc_read(%zu)\n", count); + + if (dev->disconnected) { + pr_debug("acc_read disconnected"); + return -ENODEV; + } + + if (count > BULK_BUFFER_SIZE) + count = BULK_BUFFER_SIZE; + + /* we will block until we're online */ + pr_debug("acc_read: waiting for online\n"); + ret = wait_event_interruptible(dev->read_wq, dev->online); + if (ret < 0) { + r = ret; + goto done; + } + + if (dev->rx_done) { + // last req cancelled. try to get it. + req = dev->rx_req[0]; + goto copy_data; + } + +requeue_req: + /* queue a request */ + req = dev->rx_req[0]; + req->length = count; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + goto done; + } else { + pr_debug("rx %p queue\n", req); + } + + /* wait for a request to complete */ + ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + if (ret < 0) { + r = ret; + ret = usb_ep_dequeue(dev->ep_out, req); + if (ret != 0) { + // cancel failed. There can be a data already received. + // it will be retrieved in the next read. + pr_debug("acc_read: cancelling failed %d", ret); + } + goto done; + } + +copy_data: + dev->rx_done = 0; + if (dev->online) { + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) + goto requeue_req; + + pr_debug("rx %p %u\n", req, req->actual); + xfer = (req->actual < count) ? req->actual : count; + r = xfer; + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + } else + r = -EIO; + +done: + pr_debug("acc_read returning %zd\n", r); + return r; +} + +static ssize_t acc_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct acc_dev *dev = fp->private_data; + struct usb_request *req = 0; + ssize_t r = count; + unsigned xfer; + int ret; + + pr_debug("acc_write(%zu)\n", count); + + if (!dev->online || dev->disconnected) { + pr_debug("acc_write disconnected or not online"); + return -ENODEV; + } + + while (count > 0) { + if (!dev->online) { + pr_debug("acc_write dev->error\n"); + r = -EIO; + break; + } + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + ((req = req_get(dev, &dev->tx_idle)) || !dev->online)); + if (!req) { + r = ret; + break; + } + + if (count > BULK_BUFFER_SIZE) { + xfer = BULK_BUFFER_SIZE; + /* ZLP, They will be more TX requests so not yet. */ + req->zero = 0; + } else { + xfer = count; + /* If the data length is a multple of the + * maxpacket size then send a zero length packet(ZLP). + */ + req->zero = ((xfer % dev->ep_in->maxpacket) == 0); + } + if (copy_from_user(req->buf, buf, xfer)) { + r = -EFAULT; + break; + } + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + pr_debug("acc_write: xfer error %d\n", ret); + r = -EIO; + break; + } + + buf += xfer; + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + req_put(dev, &dev->tx_idle, req); + + pr_debug("acc_write returning %zd\n", r); + return r; +} + +static long acc_ioctl(struct file *fp, unsigned code, unsigned long value) +{ + struct acc_dev *dev = fp->private_data; + char *src = NULL; + int ret; + + switch (code) { + case ACCESSORY_GET_STRING_MANUFACTURER: + src = dev->manufacturer; + break; + case ACCESSORY_GET_STRING_MODEL: + src = dev->model; + break; + case ACCESSORY_GET_STRING_DESCRIPTION: + src = dev->description; + break; + case ACCESSORY_GET_STRING_VERSION: + src = dev->version; + break; + case ACCESSORY_GET_STRING_URI: + src = dev->uri; + break; + case ACCESSORY_GET_STRING_SERIAL: + src = dev->serial; + break; + case ACCESSORY_IS_START_REQUESTED: + return dev->start_requested; + case ACCESSORY_GET_AUDIO_MODE: + return dev->audio_mode; + } + if (!src) + return -EINVAL; + + ret = strlen(src) + 1; + if (copy_to_user((void __user *)value, src, ret)) + ret = -EFAULT; + return ret; +} + +static int acc_open(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "acc_open\n"); + if (atomic_xchg(&_acc_dev->open_excl, 1)) + return -EBUSY; + + _acc_dev->disconnected = 0; + fp->private_data = _acc_dev; + return 0; +} + +static int acc_release(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "acc_release\n"); + + WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0)); + /* indicate that we are disconnected + * still could be online so don't touch online flag + */ + _acc_dev->disconnected = 1; + return 0; +} + +/* file operations for /dev/usb_accessory */ +static const struct file_operations acc_fops = { + .owner = THIS_MODULE, + .read = acc_read, + .write = acc_write, + .unlocked_ioctl = acc_ioctl, + .open = acc_open, + .release = acc_release, +}; + +static int acc_hid_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int ret; + + ret = hid_parse(hdev); + if (ret) + return ret; + return hid_hw_start(hdev, HID_CONNECT_DEFAULT); +} + +static struct miscdevice acc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "usb_accessory", + .fops = &acc_fops, +}; + +static const struct hid_device_id acc_hid_table[] = { + { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) }, + { } +}; + +static struct hid_driver acc_hid_driver = { + .name = "USB accessory", + .id_table = acc_hid_table, + .probe = acc_hid_probe, +}; + +static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req) +{ + /* + * Default no-op function when nothing needs to be done for the + * setup request + */ +} + +int acc_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl) +{ + struct acc_dev *dev = _acc_dev; + int value = -EOPNOTSUPP; + struct acc_hid_dev *hid; + int offset; + u8 b_requestType = ctrl->bRequestType; + u8 b_request = ctrl->bRequest; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + unsigned long flags; + +/* + printk(KERN_INFO "acc_ctrlrequest " + "%02x.%02x v%04x i%04x l%u\n", + b_requestType, b_request, + w_value, w_index, w_length); +*/ + + if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) { + if (b_request == ACCESSORY_START) { + dev->start_requested = 1; + schedule_delayed_work( + &dev->start_work, msecs_to_jiffies(10)); + value = 0; + cdev->req->complete = acc_complete_setup_noop; + } else if (b_request == ACCESSORY_SEND_STRING) { + dev->string_index = w_index; + cdev->gadget->ep0->driver_data = dev; + cdev->req->complete = acc_complete_set_string; + value = w_length; + } else if (b_request == ACCESSORY_SET_AUDIO_MODE && + w_index == 0 && w_length == 0) { + dev->audio_mode = w_value; + cdev->req->complete = acc_complete_setup_noop; + value = 0; + } else if (b_request == ACCESSORY_REGISTER_HID) { + cdev->req->complete = acc_complete_setup_noop; + value = acc_register_hid(dev, w_value, w_index); + } else if (b_request == ACCESSORY_UNREGISTER_HID) { + cdev->req->complete = acc_complete_setup_noop; + value = acc_unregister_hid(dev, w_value); + } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) { + spin_lock_irqsave(&dev->lock, flags); + hid = acc_hid_get(&dev->new_hid_list, w_value); + spin_unlock_irqrestore(&dev->lock, flags); + if (!hid) { + value = -EINVAL; + goto err; + } + offset = w_index; + if (offset != hid->report_desc_offset + || offset + w_length > hid->report_desc_len) { + value = -EINVAL; + goto err; + } + cdev->req->context = hid; + cdev->req->complete = acc_complete_set_hid_report_desc; + value = w_length; + } else if (b_request == ACCESSORY_SEND_HID_EVENT) { + spin_lock_irqsave(&dev->lock, flags); + hid = acc_hid_get(&dev->hid_list, w_value); + spin_unlock_irqrestore(&dev->lock, flags); + if (!hid) { + value = -EINVAL; + goto err; + } + cdev->req->context = hid; + cdev->req->complete = acc_complete_send_hid_event; + value = w_length; + } + } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) { + if (b_request == ACCESSORY_GET_PROTOCOL) { + *((u16 *)cdev->req->buf) = PROTOCOL_VERSION; + value = sizeof(u16); + cdev->req->complete = acc_complete_setup_noop; + /* clear any string left over from a previous session */ + memset(dev->manufacturer, 0, sizeof(dev->manufacturer)); + memset(dev->model, 0, sizeof(dev->model)); + memset(dev->description, 0, sizeof(dev->description)); + memset(dev->version, 0, sizeof(dev->version)); + memset(dev->uri, 0, sizeof(dev->uri)); + memset(dev->serial, 0, sizeof(dev->serial)); + dev->start_requested = 0; + dev->audio_mode = 0; + } + } + + if (value >= 0) { + cdev->req->zero = 0; + cdev->req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); + if (value < 0) + ERROR(cdev, "%s setup response queue error\n", + __func__); + } + +err: + if (value == -EOPNOTSUPP) + VDBG(cdev, + "unknown class-specific control req " + "%02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + return value; +} +EXPORT_SYMBOL_GPL(acc_ctrlrequest); + +static int +__acc_function_bind(struct usb_configuration *c, + struct usb_function *f, bool configfs) +{ + struct usb_composite_dev *cdev = c->cdev; + struct acc_dev *dev = func_to_dev(f); + int id; + int ret; + + DBG(cdev, "acc_function_bind dev: %p\n", dev); + + if (configfs) { + if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) { + ret = usb_string_id(c->cdev); + if (ret < 0) + return ret; + acc_string_defs[INTERFACE_STRING_INDEX].id = ret; + acc_interface_desc.iInterface = ret; + } + dev->cdev = c->cdev; + } + ret = hid_register_driver(&acc_hid_driver); + if (ret) + return ret; + + dev->start_requested = 0; + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + acc_interface_desc.bInterfaceNumber = id; + + /* allocate endpoints */ + ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc, + &acc_fullspeed_out_desc); + if (ret) + return ret; + + /* support high speed hardware */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + acc_highspeed_in_desc.bEndpointAddress = + acc_fullspeed_in_desc.bEndpointAddress; + acc_highspeed_out_desc.bEndpointAddress = + acc_fullspeed_out_desc.bEndpointAddress; + } + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + f->name, dev->ep_in->name, dev->ep_out->name); + return 0; +} + +static int +acc_function_bind_configfs(struct usb_configuration *c, + struct usb_function *f) { + return __acc_function_bind(c, f, true); +} + +static void +kill_all_hid_devices(struct acc_dev *dev) +{ + struct acc_hid_dev *hid; + struct list_head *entry, *temp; + unsigned long flags; + + /* do nothing if usb accessory device doesn't exist */ + if (!dev) + return; + + spin_lock_irqsave(&dev->lock, flags); + list_for_each_safe(entry, temp, &dev->hid_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + list_del(&hid->list); + list_add(&hid->list, &dev->dead_hid_list); + } + list_for_each_safe(entry, temp, &dev->new_hid_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + list_del(&hid->list); + list_add(&hid->list, &dev->dead_hid_list); + } + spin_unlock_irqrestore(&dev->lock, flags); + + schedule_work(&dev->hid_work); +} + +static void +acc_hid_unbind(struct acc_dev *dev) +{ + hid_unregister_driver(&acc_hid_driver); + kill_all_hid_devices(dev); +} + +static void +acc_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_request *req; + int i; + + dev->online = 0; /* clear online flag */ + wake_up(&dev->read_wq); /* unblock reads on closure */ + wake_up(&dev->write_wq); /* likewise for writes */ + + while ((req = req_get(dev, &dev->tx_idle))) + acc_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + acc_request_free(dev->rx_req[i], dev->ep_out); + + acc_hid_unbind(dev); +} + +static void acc_start_work(struct work_struct *data) +{ + char *envp[2] = { "ACCESSORY=START", NULL }; + + kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp); +} + +static int acc_hid_init(struct acc_hid_dev *hdev) +{ + struct hid_device *hid; + int ret; + + hid = hid_allocate_device(); + if (IS_ERR(hid)) + return PTR_ERR(hid); + + hid->ll_driver = &acc_hid_ll_driver; + hid->dev.parent = acc_device.this_device; + + hid->bus = BUS_USB; + hid->vendor = HID_ANY_ID; + hid->product = HID_ANY_ID; + hid->driver_data = hdev; + ret = hid_add_device(hid); + if (ret) { + pr_err("can't add hid device: %d\n", ret); + hid_destroy_device(hid); + return ret; + } + + hdev->hid = hid; + return 0; +} + +static void acc_hid_delete(struct acc_hid_dev *hid) +{ + kfree(hid->report_desc); + kfree(hid); +} + +static void acc_hid_work(struct work_struct *data) +{ + struct acc_dev *dev = _acc_dev; + struct list_head *entry, *temp; + struct acc_hid_dev *hid; + struct list_head new_list, dead_list; + unsigned long flags; + + INIT_LIST_HEAD(&new_list); + + spin_lock_irqsave(&dev->lock, flags); + + /* copy hids that are ready for initialization to new_list */ + list_for_each_safe(entry, temp, &dev->new_hid_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + if (hid->report_desc_offset == hid->report_desc_len) + list_move(&hid->list, &new_list); + } + + if (list_empty(&dev->dead_hid_list)) { + INIT_LIST_HEAD(&dead_list); + } else { + /* move all of dev->dead_hid_list to dead_list */ + dead_list.prev = dev->dead_hid_list.prev; + dead_list.next = dev->dead_hid_list.next; + dead_list.next->prev = &dead_list; + dead_list.prev->next = &dead_list; + INIT_LIST_HEAD(&dev->dead_hid_list); + } + + spin_unlock_irqrestore(&dev->lock, flags); + + /* register new HID devices */ + list_for_each_safe(entry, temp, &new_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + if (acc_hid_init(hid)) { + pr_err("can't add HID device %p\n", hid); + acc_hid_delete(hid); + } else { + spin_lock_irqsave(&dev->lock, flags); + list_move(&hid->list, &dev->hid_list); + spin_unlock_irqrestore(&dev->lock, flags); + } + } + + /* remove dead HID devices */ + list_for_each_safe(entry, temp, &dead_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + list_del(&hid->list); + if (hid->hid) + hid_destroy_device(hid->hid); + acc_hid_delete(hid); + } +} + +static int acc_function_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt); + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_in); + if (ret) + return ret; + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_out); + if (ret) { + usb_ep_disable(dev->ep_in); + return ret; + } + + dev->online = 1; + dev->disconnected = 0; /* if online then not disconnected */ + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + return 0; +} + +static void acc_function_disable(struct usb_function *f) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "acc_function_disable\n"); + acc_set_disconnected(dev); /* this now only sets disconnected */ + dev->online = 0; /* so now need to clear online flag here too */ + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + + VDBG(cdev, "%s disabled\n", dev->function.name); +} + +static int acc_setup(void) +{ + struct acc_dev *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + spin_lock_init(&dev->lock); + init_waitqueue_head(&dev->read_wq); + init_waitqueue_head(&dev->write_wq); + atomic_set(&dev->open_excl, 0); + INIT_LIST_HEAD(&dev->tx_idle); + INIT_LIST_HEAD(&dev->hid_list); + INIT_LIST_HEAD(&dev->new_hid_list); + INIT_LIST_HEAD(&dev->dead_hid_list); + INIT_DELAYED_WORK(&dev->start_work, acc_start_work); + INIT_WORK(&dev->hid_work, acc_hid_work); + + /* _acc_dev must be set before calling usb_gadget_register_driver */ + _acc_dev = dev; + + ret = misc_register(&acc_device); + if (ret) + goto err; + + return 0; + +err: + kfree(dev); + pr_err("USB accessory gadget driver failed to initialize\n"); + return ret; +} + +void acc_disconnect(void) +{ + /* unregister all HID devices if USB is disconnected */ + kill_all_hid_devices(_acc_dev); +} +EXPORT_SYMBOL_GPL(acc_disconnect); + +static void acc_cleanup(void) +{ + misc_deregister(&acc_device); + kfree(_acc_dev); + _acc_dev = NULL; +} +static struct acc_instance *to_acc_instance(struct config_item *item) +{ + return container_of(to_config_group(item), struct acc_instance, + func_inst.group); +} + +static void acc_attr_release(struct config_item *item) +{ + struct acc_instance *fi_acc = to_acc_instance(item); + + usb_put_function_instance(&fi_acc->func_inst); +} + +static struct configfs_item_operations acc_item_ops = { + .release = acc_attr_release, +}; + +static struct config_item_type acc_func_type = { + .ct_item_ops = &acc_item_ops, + .ct_owner = THIS_MODULE, +}; + +static struct acc_instance *to_fi_acc(struct usb_function_instance *fi) +{ + return container_of(fi, struct acc_instance, func_inst); +} + +static int acc_set_inst_name(struct usb_function_instance *fi, const char *name) +{ + struct acc_instance *fi_acc; + char *ptr; + int name_len; + + name_len = strlen(name) + 1; + if (name_len > MAX_INST_NAME_LEN) + return -ENAMETOOLONG; + + ptr = kstrndup(name, name_len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + fi_acc = to_fi_acc(fi); + fi_acc->name = ptr; + return 0; +} + +static void acc_free_inst(struct usb_function_instance *fi) +{ + struct acc_instance *fi_acc; + + fi_acc = to_fi_acc(fi); + kfree(fi_acc->name); + acc_cleanup(); +} + +static struct usb_function_instance *acc_alloc_inst(void) +{ + struct acc_instance *fi_acc; + struct acc_dev *dev; + int err; + + fi_acc = kzalloc(sizeof(*fi_acc), GFP_KERNEL); + if (!fi_acc) + return ERR_PTR(-ENOMEM); + fi_acc->func_inst.set_inst_name = acc_set_inst_name; + fi_acc->func_inst.free_func_inst = acc_free_inst; + + err = acc_setup(); + if (err) { + kfree(fi_acc); + pr_err("Error setting ACCESSORY\n"); + return ERR_PTR(err); + } + + config_group_init_type_name(&fi_acc->func_inst.group, + "", &acc_func_type); + dev = _acc_dev; + return &fi_acc->func_inst; +} + +static void acc_free(struct usb_function *f) +{ +/*NO-OP: no function specific resource allocation in mtp_alloc*/ +} + +int acc_ctrlrequest_configfs(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) { + if (f->config != NULL && f->config->cdev != NULL) + return acc_ctrlrequest(f->config->cdev, ctrl); + else + return -1; +} + +static struct usb_function *acc_alloc(struct usb_function_instance *fi) +{ + struct acc_dev *dev = _acc_dev; + + pr_info("acc_alloc\n"); + + dev->function.name = "accessory"; + dev->function.strings = acc_strings, + dev->function.fs_descriptors = fs_acc_descs; + dev->function.hs_descriptors = hs_acc_descs; + dev->function.bind = acc_function_bind_configfs; + dev->function.unbind = acc_function_unbind; + dev->function.set_alt = acc_function_set_alt; + dev->function.disable = acc_function_disable; + dev->function.free_func = acc_free; + dev->function.setup = acc_ctrlrequest_configfs; + + return &dev->function; +} +DECLARE_USB_FUNCTION_INIT(accessory, acc_alloc_inst, acc_alloc); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c new file mode 100644 index 000000000000..8124af33b738 --- /dev/null +++ b/drivers/usb/gadget/function/f_audio_source.c @@ -0,0 +1,1071 @@ +/* + * Gadget Function Driver for USB audio source device + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#define SAMPLE_RATE 44100 +#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000) + +#define IN_EP_MAX_PACKET_SIZE 256 + +/* Number of requests to allocate */ +#define IN_EP_REQ_COUNT 4 + +#define AUDIO_AC_INTERFACE 0 +#define AUDIO_AS_INTERFACE 1 +#define AUDIO_NUM_INTERFACES 2 +#define MAX_INST_NAME_LEN 40 + +/* B.3.1 Standard AC Interface Descriptor */ +static struct usb_interface_descriptor ac_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +}; + +DECLARE_UAC_AC_HEADER_DESCRIPTOR(2); + +#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES) +/* 1 input terminal, 1 output terminal and 1 feature unit */ +#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \ + + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \ + + UAC_DT_FEATURE_UNIT_SIZE(0)) +/* B.3.2 Class-Specific AC Interface Descriptor */ +static struct uac1_ac_header_descriptor_2 ac_header_desc = { + .bLength = UAC_DT_AC_HEADER_LENGTH, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_HEADER, + .bcdADC = __constant_cpu_to_le16(0x0100), + .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH), + .bInCollection = AUDIO_NUM_INTERFACES, + .baInterfaceNr = { + [0] = AUDIO_AC_INTERFACE, + [1] = AUDIO_AS_INTERFACE, + } +}; + +#define INPUT_TERMINAL_ID 1 +static struct uac_input_terminal_descriptor input_terminal_desc = { + .bLength = UAC_DT_INPUT_TERMINAL_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_INPUT_TERMINAL, + .bTerminalID = INPUT_TERMINAL_ID, + .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE, + .bAssocTerminal = 0, + .wChannelConfig = 0x3, +}; + +DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0); + +#define FEATURE_UNIT_ID 2 +static struct uac_feature_unit_descriptor_0 feature_unit_desc = { + .bLength = UAC_DT_FEATURE_UNIT_SIZE(0), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_FEATURE_UNIT, + .bUnitID = FEATURE_UNIT_ID, + .bSourceID = INPUT_TERMINAL_ID, + .bControlSize = 2, +}; + +#define OUTPUT_TERMINAL_ID 3 +static struct uac1_output_terminal_descriptor output_terminal_desc = { + .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, + .bTerminalID = OUTPUT_TERMINAL_ID, + .wTerminalType = UAC_TERMINAL_STREAMING, + .bAssocTerminal = FEATURE_UNIT_ID, + .bSourceID = FEATURE_UNIT_ID, +}; + +/* B.4.1 Standard AS Interface Descriptor */ +static struct usb_interface_descriptor as_interface_alt_0_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, +}; + +static struct usb_interface_descriptor as_interface_alt_1_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bAlternateSetting = 1, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, +}; + +/* B.4.2 Class-Specific AS Interface Descriptor */ +static struct uac1_as_header_descriptor as_header_desc = { + .bLength = UAC_DT_AS_HEADER_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_AS_GENERAL, + .bTerminalLink = INPUT_TERMINAL_ID, + .bDelay = 1, + .wFormatTag = UAC_FORMAT_TYPE_I_PCM, +}; + +DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1); + +static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = { + .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_FORMAT_TYPE, + .bFormatType = UAC_FORMAT_TYPE_I, + .bSubframeSize = 2, + .bBitResolution = 16, + .bSamFreqType = 1, +}; + +/* Standard ISO IN Endpoint Descriptor for highspeed */ +static struct usb_endpoint_descriptor hs_as_in_ep_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_SYNC + | USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE), + .bInterval = 4, /* poll 1 per millisecond */ +}; + +/* Standard ISO IN Endpoint Descriptor for highspeed */ +static struct usb_endpoint_descriptor fs_as_in_ep_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_SYNC + | USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE), + .bInterval = 1, /* poll 1 per millisecond */ +}; + +/* Class-specific AS ISO OUT Endpoint Descriptor */ +static struct uac_iso_endpoint_descriptor as_iso_in_desc = { + .bLength = UAC_ISO_ENDPOINT_DESC_SIZE, + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubtype = UAC_EP_GENERAL, + .bmAttributes = 1, + .bLockDelayUnits = 1, + .wLockDelay = __constant_cpu_to_le16(1), +}; + +static struct usb_descriptor_header *hs_audio_desc[] = { + (struct usb_descriptor_header *)&ac_interface_desc, + (struct usb_descriptor_header *)&ac_header_desc, + + (struct usb_descriptor_header *)&input_terminal_desc, + (struct usb_descriptor_header *)&output_terminal_desc, + (struct usb_descriptor_header *)&feature_unit_desc, + + (struct usb_descriptor_header *)&as_interface_alt_0_desc, + (struct usb_descriptor_header *)&as_interface_alt_1_desc, + (struct usb_descriptor_header *)&as_header_desc, + + (struct usb_descriptor_header *)&as_type_i_desc, + + (struct usb_descriptor_header *)&hs_as_in_ep_desc, + (struct usb_descriptor_header *)&as_iso_in_desc, + NULL, +}; + +static struct usb_descriptor_header *fs_audio_desc[] = { + (struct usb_descriptor_header *)&ac_interface_desc, + (struct usb_descriptor_header *)&ac_header_desc, + + (struct usb_descriptor_header *)&input_terminal_desc, + (struct usb_descriptor_header *)&output_terminal_desc, + (struct usb_descriptor_header *)&feature_unit_desc, + + (struct usb_descriptor_header *)&as_interface_alt_0_desc, + (struct usb_descriptor_header *)&as_interface_alt_1_desc, + (struct usb_descriptor_header *)&as_header_desc, + + (struct usb_descriptor_header *)&as_type_i_desc, + + (struct usb_descriptor_header *)&fs_as_in_ep_desc, + (struct usb_descriptor_header *)&as_iso_in_desc, + NULL, +}; + +static struct snd_pcm_hardware audio_hw_info = { + .info = SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID | + SNDRV_PCM_INFO_BATCH | + SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_BLOCK_TRANSFER, + + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 2, + .channels_max = 2, + .rate_min = SAMPLE_RATE, + .rate_max = SAMPLE_RATE, + + .buffer_bytes_max = 1024 * 1024, + .period_bytes_min = 64, + .period_bytes_max = 512 * 1024, + .periods_min = 2, + .periods_max = 1024, +}; + +/*-------------------------------------------------------------------------*/ + +struct audio_source_config { + int card; + int device; +}; + +struct audio_dev { + struct usb_function func; + struct snd_card *card; + struct snd_pcm *pcm; + struct snd_pcm_substream *substream; + + struct list_head idle_reqs; + struct usb_ep *in_ep; + + spinlock_t lock; + + /* beginning, end and current position in our buffer */ + void *buffer_start; + void *buffer_end; + void *buffer_pos; + + /* byte size of a "period" */ + unsigned int period; + /* bytes sent since last call to snd_pcm_period_elapsed */ + unsigned int period_offset; + /* time we started playing */ + ktime_t start_time; + /* number of frames sent since start_time */ + s64 frames_sent; + struct audio_source_config *config; + /* for creating and issuing QoS requests */ + struct pm_qos_request pm_qos; +}; + +static inline struct audio_dev *func_to_audio(struct usb_function *f) +{ + return container_of(f, struct audio_dev, func); +} + +/*-------------------------------------------------------------------------*/ + +struct audio_source_instance { + struct usb_function_instance func_inst; + const char *name; + struct audio_source_config *config; + struct device *audio_device; +}; + +static void audio_source_attr_release(struct config_item *item); + +static struct configfs_item_operations audio_source_item_ops = { + .release = audio_source_attr_release, +}; + +static struct config_item_type audio_source_func_type = { + .ct_item_ops = &audio_source_item_ops, + .ct_owner = THIS_MODULE, +}; + +static ssize_t audio_source_pcm_show(struct device *dev, + struct device_attribute *attr, char *buf); + +static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL); + +static struct device_attribute *audio_source_function_attributes[] = { + &dev_attr_pcm, + NULL +}; + +/*--------------------------------------------------------------------------*/ + +static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + + if (!req) + return NULL; + + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + req->length = buffer_size; + return req; +} + +static void audio_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +static void audio_req_put(struct audio_dev *audio, struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&audio->lock, flags); + list_add_tail(&req->list, &audio->idle_reqs); + spin_unlock_irqrestore(&audio->lock, flags); +} + +static struct usb_request *audio_req_get(struct audio_dev *audio) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&audio->lock, flags); + if (list_empty(&audio->idle_reqs)) { + req = 0; + } else { + req = list_first_entry(&audio->idle_reqs, struct usb_request, + list); + list_del(&req->list); + } + spin_unlock_irqrestore(&audio->lock, flags); + return req; +} + +/* send the appropriate number of packets to match our bitrate */ +static void audio_send(struct audio_dev *audio) +{ + struct snd_pcm_runtime *runtime; + struct usb_request *req; + int length, length1, length2, ret; + s64 msecs; + s64 frames; + ktime_t now; + + /* audio->substream will be null if we have been closed */ + if (!audio->substream) + return; + /* audio->buffer_pos will be null if we have been stopped */ + if (!audio->buffer_pos) + return; + + runtime = audio->substream->runtime; + + /* compute number of frames to send */ + now = ktime_get(); + msecs = div_s64((ktime_to_ns(now) - ktime_to_ns(audio->start_time)), + 1000000); + frames = div_s64((msecs * SAMPLE_RATE), 1000); + + /* Readjust our frames_sent if we fall too far behind. + * If we get too far behind it is better to drop some frames than + * to keep sending data too fast in an attempt to catch up. + */ + if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC) + audio->frames_sent = frames - FRAMES_PER_MSEC; + + frames -= audio->frames_sent; + + /* We need to send something to keep the pipeline going */ + if (frames <= 0) + frames = FRAMES_PER_MSEC; + + while (frames > 0) { + req = audio_req_get(audio); + if (!req) + break; + + length = frames_to_bytes(runtime, frames); + if (length > IN_EP_MAX_PACKET_SIZE) + length = IN_EP_MAX_PACKET_SIZE; + + if (audio->buffer_pos + length > audio->buffer_end) + length1 = audio->buffer_end - audio->buffer_pos; + else + length1 = length; + memcpy(req->buf, audio->buffer_pos, length1); + if (length1 < length) { + /* Wrap around and copy remaining length + * at beginning of buffer. + */ + length2 = length - length1; + memcpy(req->buf + length1, audio->buffer_start, + length2); + audio->buffer_pos = audio->buffer_start + length2; + } else { + audio->buffer_pos += length1; + if (audio->buffer_pos >= audio->buffer_end) + audio->buffer_pos = audio->buffer_start; + } + + req->length = length; + ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC); + if (ret < 0) { + pr_err("usb_ep_queue failed ret: %d\n", ret); + audio_req_put(audio, req); + break; + } + + frames -= bytes_to_frames(runtime, length); + audio->frames_sent += bytes_to_frames(runtime, length); + } +} + +static void audio_control_complete(struct usb_ep *ep, struct usb_request *req) +{ + /* nothing to do here */ +} + +static void audio_data_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct audio_dev *audio = req->context; + + pr_debug("audio_data_complete req->status %d req->actual %d\n", + req->status, req->actual); + + audio_req_put(audio, req); + + if (!audio->buffer_start || req->status) + return; + + audio->period_offset += req->actual; + if (audio->period_offset >= audio->period) { + snd_pcm_period_elapsed(audio->substream); + audio->period_offset = 0; + } + audio_send(audio); +} + +static int audio_set_endpoint_req(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + int value = -EOPNOTSUPP; + u16 ep = le16_to_cpu(ctrl->wIndex); + u16 len = le16_to_cpu(ctrl->wLength); + u16 w_value = le16_to_cpu(ctrl->wValue); + + pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n", + ctrl->bRequest, w_value, len, ep); + + switch (ctrl->bRequest) { + case UAC_SET_CUR: + case UAC_SET_MIN: + case UAC_SET_MAX: + case UAC_SET_RES: + value = len; + break; + default: + break; + } + + return value; +} + +static int audio_get_endpoint_req(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev = f->config->cdev; + int value = -EOPNOTSUPP; + u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); + u16 len = le16_to_cpu(ctrl->wLength); + u16 w_value = le16_to_cpu(ctrl->wValue); + u8 *buf = cdev->req->buf; + + pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n", + ctrl->bRequest, w_value, len, ep); + + if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) { + switch (ctrl->bRequest) { + case UAC_GET_CUR: + case UAC_GET_MIN: + case UAC_GET_MAX: + case UAC_GET_RES: + /* return our sample rate */ + buf[0] = (u8)SAMPLE_RATE; + buf[1] = (u8)(SAMPLE_RATE >> 8); + buf[2] = (u8)(SAMPLE_RATE >> 16); + value = 3; + break; + default: + break; + } + } + + return value; +} + +static int +audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + /* composite driver infrastructure handles everything; interface + * activation uses set_alt(). + */ + switch (ctrl->bRequestType) { + case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: + value = audio_set_endpoint_req(f, ctrl); + break; + + case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: + value = audio_get_endpoint_req(f, ctrl); + break; + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + pr_debug("audio req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = value; + req->complete = audio_control_complete; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + pr_err("audio response on err %d\n", value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + +static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct audio_dev *audio = func_to_audio(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt); + + ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep); + if (ret) + return ret; + + usb_ep_enable(audio->in_ep); + return 0; +} + +static void audio_disable(struct usb_function *f) +{ + struct audio_dev *audio = func_to_audio(f); + + pr_debug("audio_disable\n"); + usb_ep_disable(audio->in_ep); +} + +static void audio_free_func(struct usb_function *f) +{ + /* no-op */ +} + +/*-------------------------------------------------------------------------*/ + +static void audio_build_desc(struct audio_dev *audio) +{ + u8 *sam_freq; + int rate; + + /* Set channel numbers */ + input_terminal_desc.bNrChannels = 2; + as_type_i_desc.bNrChannels = 2; + + /* Set sample rates */ + rate = SAMPLE_RATE; + sam_freq = as_type_i_desc.tSamFreq[0]; + memcpy(sam_freq, &rate, 3); +} + + +static int snd_card_setup(struct usb_configuration *c, + struct audio_source_config *config); +static struct audio_source_instance *to_fi_audio_source( + const struct usb_function_instance *fi); + + +/* audio function driver setup/binding */ +static int +audio_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct audio_dev *audio = func_to_audio(f); + int status; + struct usb_ep *ep; + struct usb_request *req; + int i; + int err; + + if (IS_ENABLED(CONFIG_USB_CONFIGFS)) { + struct audio_source_instance *fi_audio = + to_fi_audio_source(f->fi); + struct audio_source_config *config = + fi_audio->config; + + err = snd_card_setup(c, config); + if (err) + return err; + } + + audio_build_desc(audio); + + /* allocate instance-specific interface IDs, and patch descriptors */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ac_interface_desc.bInterfaceNumber = status; + + /* AUDIO_AC_INTERFACE */ + ac_header_desc.baInterfaceNr[0] = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + as_interface_alt_0_desc.bInterfaceNumber = status; + as_interface_alt_1_desc.bInterfaceNumber = status; + + /* AUDIO_AS_INTERFACE */ + ac_header_desc.baInterfaceNr[1] = status; + + status = -ENODEV; + + /* allocate our endpoint */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc); + if (!ep) + goto fail; + audio->in_ep = ep; + ep->driver_data = audio; /* claim */ + + if (gadget_is_dualspeed(c->cdev->gadget)) + hs_as_in_ep_desc.bEndpointAddress = + fs_as_in_ep_desc.bEndpointAddress; + + f->fs_descriptors = fs_audio_desc; + f->hs_descriptors = hs_audio_desc; + + for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) { + req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE); + if (req) { + req->context = audio; + req->complete = audio_data_complete; + audio_req_put(audio, req); + } else + status = -ENOMEM; + } + +fail: + return status; +} + +static void +audio_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct audio_dev *audio = func_to_audio(f); + struct usb_request *req; + + while ((req = audio_req_get(audio))) + audio_request_free(req, audio->in_ep); + + snd_card_free_when_closed(audio->card); + audio->card = NULL; + audio->pcm = NULL; + audio->substream = NULL; + audio->in_ep = NULL; + + if (IS_ENABLED(CONFIG_USB_CONFIGFS)) { + struct audio_source_instance *fi_audio = + to_fi_audio_source(f->fi); + struct audio_source_config *config = + fi_audio->config; + + config->card = -1; + config->device = -1; + } +} + +static void audio_pcm_playback_start(struct audio_dev *audio) +{ + audio->start_time = ktime_get(); + audio->frames_sent = 0; + audio_send(audio); +} + +static void audio_pcm_playback_stop(struct audio_dev *audio) +{ + unsigned long flags; + + spin_lock_irqsave(&audio->lock, flags); + audio->buffer_start = 0; + audio->buffer_end = 0; + audio->buffer_pos = 0; + spin_unlock_irqrestore(&audio->lock, flags); +} + +static int audio_pcm_open(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct audio_dev *audio = substream->private_data; + + runtime->private_data = audio; + runtime->hw = audio_hw_info; + snd_pcm_limit_hw_rates(runtime); + runtime->hw.channels_max = 2; + + audio->substream = substream; + + /* Add the QoS request and set the latency to 0 */ + pm_qos_add_request(&audio->pm_qos, PM_QOS_CPU_DMA_LATENCY, 0); + + return 0; +} + +static int audio_pcm_close(struct snd_pcm_substream *substream) +{ + struct audio_dev *audio = substream->private_data; + unsigned long flags; + + spin_lock_irqsave(&audio->lock, flags); + + /* Remove the QoS request */ + pm_qos_remove_request(&audio->pm_qos); + + audio->substream = NULL; + spin_unlock_irqrestore(&audio->lock, flags); + + return 0; +} + +static int audio_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + unsigned int channels = params_channels(params); + unsigned int rate = params_rate(params); + + if (rate != SAMPLE_RATE) + return -EINVAL; + if (channels != 2) + return -EINVAL; + + return snd_pcm_lib_alloc_vmalloc_buffer(substream, + params_buffer_bytes(params)); +} + +static int audio_pcm_hw_free(struct snd_pcm_substream *substream) +{ + return snd_pcm_lib_free_vmalloc_buffer(substream); +} + +static int audio_pcm_prepare(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct audio_dev *audio = runtime->private_data; + + audio->period = snd_pcm_lib_period_bytes(substream); + audio->period_offset = 0; + audio->buffer_start = runtime->dma_area; + audio->buffer_end = audio->buffer_start + + snd_pcm_lib_buffer_bytes(substream); + audio->buffer_pos = audio->buffer_start; + + return 0; +} + +static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct audio_dev *audio = runtime->private_data; + ssize_t bytes = audio->buffer_pos - audio->buffer_start; + + /* return offset of next frame to fill in our buffer */ + return bytes_to_frames(runtime, bytes); +} + +static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream, + int cmd) +{ + struct audio_dev *audio = substream->runtime->private_data; + int ret = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + audio_pcm_playback_start(audio); + break; + + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + audio_pcm_playback_stop(audio); + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static struct audio_dev _audio_dev = { + .func = { + .name = "audio_source", + .bind = audio_bind, + .unbind = audio_unbind, + .set_alt = audio_set_alt, + .setup = audio_setup, + .disable = audio_disable, + .free_func = audio_free_func, + }, + .lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock), + .idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs), +}; + +static struct snd_pcm_ops audio_playback_ops = { + .open = audio_pcm_open, + .close = audio_pcm_close, + .ioctl = snd_pcm_lib_ioctl, + .hw_params = audio_pcm_hw_params, + .hw_free = audio_pcm_hw_free, + .prepare = audio_pcm_prepare, + .trigger = audio_pcm_playback_trigger, + .pointer = audio_pcm_pointer, +}; + +int audio_source_bind_config(struct usb_configuration *c, + struct audio_source_config *config) +{ + struct audio_dev *audio; + int err; + + config->card = -1; + config->device = -1; + + audio = &_audio_dev; + + err = snd_card_setup(c, config); + if (err) + return err; + + err = usb_add_function(c, &audio->func); + if (err) + goto add_fail; + + return 0; + +add_fail: + snd_card_free(audio->card); + return err; +} + +static int snd_card_setup(struct usb_configuration *c, + struct audio_source_config *config) +{ + struct audio_dev *audio; + struct snd_card *card; + struct snd_pcm *pcm; + int err; + + audio = &_audio_dev; + + err = snd_card_new(&c->cdev->gadget->dev, + SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, + THIS_MODULE, 0, &card); + if (err) + return err; + + err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm); + if (err) + goto pcm_fail; + + pcm->private_data = audio; + pcm->info_flags = 0; + audio->pcm = pcm; + + strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name)); + + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops); + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, + NULL, 0, 64 * 1024); + + strlcpy(card->driver, "audio_source", sizeof(card->driver)); + strlcpy(card->shortname, card->driver, sizeof(card->shortname)); + strlcpy(card->longname, "USB accessory audio source", + sizeof(card->longname)); + + err = snd_card_register(card); + if (err) + goto register_fail; + + config->card = pcm->card->number; + config->device = pcm->device; + audio->card = card; + return 0; + +register_fail: +pcm_fail: + snd_card_free(audio->card); + return err; +} + +static struct audio_source_instance *to_audio_source_instance( + struct config_item *item) +{ + return container_of(to_config_group(item), struct audio_source_instance, + func_inst.group); +} + +static struct audio_source_instance *to_fi_audio_source( + const struct usb_function_instance *fi) +{ + return container_of(fi, struct audio_source_instance, func_inst); +} + +static void audio_source_attr_release(struct config_item *item) +{ + struct audio_source_instance *fi_audio = to_audio_source_instance(item); + + usb_put_function_instance(&fi_audio->func_inst); +} + +static int audio_source_set_inst_name(struct usb_function_instance *fi, + const char *name) +{ + struct audio_source_instance *fi_audio; + char *ptr; + int name_len; + + name_len = strlen(name) + 1; + if (name_len > MAX_INST_NAME_LEN) + return -ENAMETOOLONG; + + ptr = kstrndup(name, name_len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + fi_audio = to_fi_audio_source(fi); + fi_audio->name = ptr; + + return 0; +} + +static void audio_source_free_inst(struct usb_function_instance *fi) +{ + struct audio_source_instance *fi_audio; + + fi_audio = to_fi_audio_source(fi); + device_destroy(fi_audio->audio_device->class, + fi_audio->audio_device->devt); + kfree(fi_audio->name); + kfree(fi_audio->config); +} + +static ssize_t audio_source_pcm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct audio_source_instance *fi_audio = dev_get_drvdata(dev); + struct audio_source_config *config = fi_audio->config; + + /* print PCM card and device numbers */ + return sprintf(buf, "%d %d\n", config->card, config->device); +} + +struct device *create_function_device(char *name); + +static struct usb_function_instance *audio_source_alloc_inst(void) +{ + struct audio_source_instance *fi_audio; + struct device_attribute **attrs; + struct device_attribute *attr; + struct device *dev; + void *err_ptr; + int err = 0; + + fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL); + if (!fi_audio) + return ERR_PTR(-ENOMEM); + + fi_audio->func_inst.set_inst_name = audio_source_set_inst_name; + fi_audio->func_inst.free_func_inst = audio_source_free_inst; + + fi_audio->config = kzalloc(sizeof(struct audio_source_config), + GFP_KERNEL); + if (!fi_audio->config) { + err_ptr = ERR_PTR(-ENOMEM); + goto fail_audio; + } + + config_group_init_type_name(&fi_audio->func_inst.group, "", + &audio_source_func_type); + dev = create_function_device("f_audio_source"); + + if (IS_ERR(dev)) { + err_ptr = dev; + goto fail_audio_config; + } + + fi_audio->config->card = -1; + fi_audio->config->device = -1; + fi_audio->audio_device = dev; + + attrs = audio_source_function_attributes; + if (attrs) { + while ((attr = *attrs++) && !err) + err = device_create_file(dev, attr); + if (err) { + err_ptr = ERR_PTR(-EINVAL); + goto fail_device; + } + } + + dev_set_drvdata(dev, fi_audio); + _audio_dev.config = fi_audio->config; + + return &fi_audio->func_inst; + +fail_device: + device_destroy(dev->class, dev->devt); +fail_audio_config: + kfree(fi_audio->config); +fail_audio: + kfree(fi_audio); + return err_ptr; + +} + +static struct usb_function *audio_source_alloc(struct usb_function_instance *fi) +{ + return &_audio_dev.func; +} + +DECLARE_USB_FUNCTION_INIT(audio_source, audio_source_alloc_inst, + audio_source_alloc); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_dvctrace.c b/drivers/usb/gadget/function/f_dvctrace.c new file mode 100644 index 000000000000..2589632a0e90 --- /dev/null +++ b/drivers/usb/gadget/function/f_dvctrace.c @@ -0,0 +1,825 @@ +/* + * Gadget Driver for DvC.Trace Function + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#ifdef VERBOSE_DEBUG +#define DVCT_IN() pr_debug("in\n") +#else +#define DVCT_IN() do {} while (0) +#endif + +#include +#include +#include +#include +#include "u_dvctrace.h" + +enum { + DVCT_IAD_DESC_POS, + DVCT_CITF_DESC_POS, + DVCT_DITF_DESC_POS, + DVCT_EP_DESC_POS, + DVCT_LS_NULL_DESC_POS, /*Low speed descriptors end with this one*/ + DVCT_EP_COMP_DESC_POS = DVCT_LS_NULL_DESC_POS, + DVCT_LS_DESC_COUNT, /*Count of low speed descriptors*/ + DVCT_NULL_DESC_POS = DVCT_LS_DESC_COUNT, + DVCT_HS_DESC_COUNT,/*Count of super speed descriptors*/ +}; + +/*The full list of descriptors will look like: + * IAD_DESCRIPTOR -----|=> USB function specific + * CONTROL_ITF_DESCRIPTOR -----| + * SOURCE_SPECIFIC_DESCRIPTOR_0 ----| + * .... |=> s_cnt descriptors provided by the + * SOURCE_SPECIFIC_DESCRIPTOR_s_cnt----| source device. + * DATA_ITF_DESCRIPTOR -----| + * ENDPOINT_DESCRIPTOR |=> USB function specific + * .... -----| + * This makes a good part of the descriptors to shift, + * the following should help*/ +#define DVCT_IAD_DESC_DYN_POS(s_cnt) (DVCT_IAD_DESC_POS) +#define DVCT_CITF_DESC_DYN_POS(s_cnt) (DVCT_CITF_DESC_POS) +#define DVCT_SOURCE_DESC_FIRST(s_cnt) (DVCT_DITF_DESC_POS) +#define DVCT_DITF_DESC_DYN_POS(s_cnt) ((s_cnt)+DVCT_DITF_DESC_POS) +#define DVCT_EP_DESC_DYN_POS(s_cnt) ((s_cnt)+DVCT_EP_DESC_POS) +#define DVCT_EP_COMP_DESC_DYN_POS(s_cnt) ((s_cnt)+DVCT_EP_COMP_DESC_POS) +#define DVCT_LS_DESC_DYN_COUNT(s_cnt) ((s_cnt)+DVCT_LS_DESC_COUNT) +#define DVCT_HS_DESC_DYN_COUNT(s_cnt) ((s_cnt)+DVCT_HS_DESC_COUNT) + +enum { + DVCT_STR_IAD_IDX, + DVCT_STR_C_ITF_IDX, + DVCT_STR_D_ITF_IDX, + DVCT_STR_NULL_IDX, /*always last */ + DVCT_STR_COUNT, +}; + +static int dvct_alloc_desc(struct dvct_function *d_fun) +{ + int i; + unsigned int s_desc_count = 0; + struct usb_descriptor_header **s_desc; + struct dvct_function_desc *desc = &d_fun->desc; + + DVCT_IN(); + + if (d_fun->source_dev->desc) { + for (s_desc = d_fun->source_dev->desc->dvc_spec; + s_desc && (*s_desc); s_desc++) + s_desc_count++; + } + + /*alloc the descriptors array */ + desc->fs = + kzalloc(DVCT_LS_DESC_DYN_COUNT(s_desc_count) * + sizeof(struct usb_descriptor_header *), GFP_KERNEL); + if (!desc->fs) + goto err_fs; + + desc->hs = + kzalloc(DVCT_LS_DESC_DYN_COUNT(s_desc_count) * + sizeof(struct usb_descriptor_header *), GFP_KERNEL); + if (!desc->hs) + goto err_hs; + + desc->ss = + kzalloc(DVCT_HS_DESC_DYN_COUNT(s_desc_count) * + sizeof(struct usb_descriptor_header *), GFP_KERNEL); + if (!desc->ss) + goto err_ss; + + /*IAD */ + desc->iad = kzalloc(sizeof(*desc->iad), GFP_KERNEL); + if (!desc->iad) + goto err_iad; + + desc->iad->bLength = sizeof(*desc->iad); + desc->iad->bDescriptorType = USB_DT_INTERFACE_ASSOCIATION; + desc->iad->bInterfaceCount = 2; + desc->iad->bFunctionClass = USB_CLASS_DEBUG; + desc->iad->bFunctionSubClass = USB_SUBCLASS_DVC_TRACE; + desc->iad->bFunctionProtocol = d_fun->source_dev->protocol; + /*bFirstInterface - updated on bind */ + + desc->fs[DVCT_IAD_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->iad; + desc->hs[DVCT_IAD_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->iad; + desc->ss[DVCT_IAD_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->iad; + + /*Control interface */ + desc->c_itf = kzalloc(sizeof(*desc->c_itf), GFP_KERNEL); + if (!desc->c_itf) + goto err_c_itf; + + desc->c_itf->bLength = USB_DT_INTERFACE_SIZE; + desc->c_itf->bDescriptorType = USB_DT_INTERFACE; + desc->c_itf->bInterfaceClass = USB_CLASS_DEBUG; + desc->c_itf->bInterfaceSubClass = USB_SUBCLASS_DEBUG_CONTROL; + desc->c_itf->bInterfaceProtocol = d_fun->source_dev->protocol; + + desc->fs[DVCT_CITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->c_itf; + desc->hs[DVCT_CITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->c_itf; + desc->ss[DVCT_CITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->c_itf; + + if (d_fun->source_dev->desc) { + /*Copy whatever the source device has provided */ + s_desc = d_fun->source_dev->desc->dvc_spec; + for (i = 0; i < s_desc_count; i++) { + desc->fs[DVCT_SOURCE_DESC_FIRST(s_desc_count) + i] + = s_desc[i]; + desc->hs[DVCT_SOURCE_DESC_FIRST(s_desc_count) + i] + = s_desc[i]; + desc->ss[DVCT_SOURCE_DESC_FIRST(s_desc_count) + i] + = s_desc[i]; + } + } + /*Data interface */ + desc->d_itf = kzalloc(sizeof(*desc->d_itf), GFP_KERNEL); + if (!desc->d_itf) + goto err_d_itf; + + desc->d_itf->bLength = USB_DT_INTERFACE_SIZE; + desc->d_itf->bDescriptorType = USB_DT_INTERFACE; + desc->d_itf->bNumEndpoints = 1; + desc->d_itf->bInterfaceClass = USB_CLASS_DEBUG; + desc->d_itf->bInterfaceSubClass = USB_SUBCLASS_DVC_TRACE; + desc->d_itf->bInterfaceProtocol = d_fun->source_dev->protocol; + + desc->fs[DVCT_DITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->d_itf; + desc->hs[DVCT_DITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->d_itf; + desc->ss[DVCT_DITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->d_itf; + + /*Full Speed ep */ + desc->fs_ep = kzalloc(sizeof(*desc->fs_ep), GFP_KERNEL); + if (!desc->fs_ep) + goto err_fs_ep; + + desc->fs_ep->bLength = USB_DT_ENDPOINT_SIZE; + desc->fs_ep->bDescriptorType = USB_DT_ENDPOINT; + desc->fs_ep->bEndpointAddress = USB_DIR_IN; + desc->fs_ep->bmAttributes = USB_ENDPOINT_XFER_BULK; + desc->fs_ep->wMaxPacketSize = cpu_to_le16(64); + + desc->fs[DVCT_EP_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->fs_ep; + + /*High Speed ep */ + desc->hs_ep = kzalloc(sizeof(*desc->hs_ep), GFP_KERNEL); + if (!desc->hs_ep) + goto err_hs_ep; + + desc->hs_ep->bLength = USB_DT_ENDPOINT_SIZE; + desc->hs_ep->bDescriptorType = USB_DT_ENDPOINT; + desc->hs_ep->bEndpointAddress = USB_DIR_IN; + desc->hs_ep->bmAttributes = USB_ENDPOINT_XFER_BULK; + desc->hs_ep->wMaxPacketSize = cpu_to_le16(512); + + desc->hs[DVCT_EP_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->hs_ep; + + /*Super Speed ep */ + desc->ss_ep = kzalloc(sizeof(*desc->ss_ep), GFP_KERNEL); + if (!desc->ss_ep) + goto err_ss_ep; + + desc->ss_ep->bLength = USB_DT_ENDPOINT_SIZE; + desc->ss_ep->bDescriptorType = USB_DT_ENDPOINT; + desc->ss_ep->bEndpointAddress = USB_DIR_IN; + desc->ss_ep->bmAttributes = USB_ENDPOINT_XFER_BULK; + desc->ss_ep->wMaxPacketSize = cpu_to_le16(1024); + + desc->ss[DVCT_EP_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->ss_ep; + + /*Super Speed ep comp */ + desc->ss_ep_comp = kzalloc(sizeof(*desc->ss_ep_comp), GFP_KERNEL); + if (!desc->ss_ep_comp) + goto err_ss_ep_comp; + + desc->ss_ep_comp->bLength = USB_DT_SS_EP_COMP_SIZE; + desc->ss_ep_comp->bDescriptorType = USB_DT_SS_ENDPOINT_COMP; + + desc->ss[DVCT_EP_COMP_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->ss_ep_comp; + + /* strings */ + /*the table */ + desc->str.language = 0x0409; /*en-us */ + desc->str.strings = + kzalloc(DVCT_STR_COUNT * sizeof(struct usb_string), GFP_KERNEL); + if (!desc->str.strings) + goto err_str; + + /*lookup table */ + desc->lk_tbl = + kzalloc(DVCT_STR_COUNT * sizeof(struct dvct_string_lookup), + GFP_KERNEL); + if (!desc->lk_tbl) + goto err_str_lk; + + /*actual strings */ + /*IAD*/ + desc->str.strings[DVCT_STR_IAD_IDX].s = + kasprintf(GFP_KERNEL, "DvC Trace (%s)", + dev_name(&d_fun->source_dev->device)); + if (!desc->str.strings[DVCT_STR_IAD_IDX].s) + goto err_str_iad; + + desc->lk_tbl[DVCT_STR_IAD_IDX].str = + &desc->str.strings[DVCT_STR_IAD_IDX]; + desc->lk_tbl[DVCT_STR_IAD_IDX].id = &desc->iad->iFunction; + + /*control */ + desc->str.strings[DVCT_STR_C_ITF_IDX].s = + kasprintf(GFP_KERNEL, "DvC Trace Control (%s)", + dev_name(&d_fun->source_dev->device)); + if (!desc->str.strings[DVCT_STR_C_ITF_IDX].s) + goto err_str_ctrl; + + desc->lk_tbl[DVCT_STR_C_ITF_IDX].str = + &desc->str.strings[DVCT_STR_C_ITF_IDX]; + desc->lk_tbl[DVCT_STR_C_ITF_IDX].id = &desc->c_itf->iInterface; + + /*data */ + desc->str.strings[DVCT_STR_D_ITF_IDX].s = + kasprintf(GFP_KERNEL, "DvC Trace Data (%s)", + dev_name(&d_fun->source_dev->device)); + if (!desc->str.strings[DVCT_STR_D_ITF_IDX].s) + goto err_str_data; + + desc->lk_tbl[DVCT_STR_D_ITF_IDX].str = + &desc->str.strings[DVCT_STR_D_ITF_IDX]; + desc->lk_tbl[DVCT_STR_D_ITF_IDX].id = &desc->d_itf->iInterface; + + return 0; +/*cleanup*/ +err_str_data: + kfree(desc->str.strings[DVCT_STR_C_ITF_IDX].s); +err_str_ctrl: + kfree(desc->str.strings[DVCT_STR_IAD_IDX].s); +err_str_iad: + kfree(desc->lk_tbl); +err_str_lk: + kfree(desc->str.strings); +err_str: + kfree(desc->ss_ep_comp); +err_ss_ep_comp: + kfree(desc->ss_ep); +err_ss_ep: + kfree(desc->hs_ep); +err_hs_ep: + kfree(desc->fs_ep); +err_fs_ep: + kfree(desc->d_itf); +err_d_itf: + kfree(desc->c_itf); +err_c_itf: + kfree(desc->iad); +err_iad: + kfree(desc->ss); +err_ss: + kfree(desc->hs); +err_hs: + kfree(desc->fs); +err_fs: + pr_err("Failed OFM"); + return -ENOMEM; +} + +static void dvct_free_desc(struct dvct_function *d_fun) +{ + struct dvct_function_desc *desc = &d_fun->desc; + + DVCT_IN(); + + kfree(desc->str.strings[DVCT_STR_D_ITF_IDX].s); + kfree(desc->str.strings[DVCT_STR_C_ITF_IDX].s); + kfree(desc->str.strings[DVCT_STR_IAD_IDX].s); + kfree(desc->lk_tbl); + kfree(desc->str.strings); + kfree(desc->ss_ep_comp); + kfree(desc->ss_ep); + kfree(desc->hs_ep); + kfree(desc->fs_ep); + kfree(desc->d_itf); + kfree(desc->c_itf); + kfree(desc->iad); + kfree(desc->ss); + kfree(desc->hs); + kfree(desc->fs); +} + +ssize_t dvct_start_transfer(struct dvct_function *d_fun, u8 config) +{ + DVCT_IN(); + if (!dvct_get_status(&d_fun->status, DVCT_MASK_ONLINE)) + return -EIO; + + d_fun->trace_config = config; + return d_fun->source_drv->start_transfer(d_fun->source_dev, config); +} +EXPORT_SYMBOL(dvct_start_transfer); + +int dvct_stop_transfer(struct dvct_function *d_fun) +{ + + DVCT_IN(); + if (!dvct_get_status(&d_fun->status, DVCT_MASK_ONLINE)) + return -EIO; + + if (dvct_get_status(&d_fun->status, DVCT_MASK_TRANS)) { + d_fun->trace_config = 0; + return d_fun->source_drv->stop_transfer(d_fun->source_dev); + } + + return 0; +} +EXPORT_SYMBOL(dvct_stop_transfer); + +static int dvct_strings_setup(struct usb_composite_dev *cdev, + struct usb_string *strings, + struct dvct_string_lookup *lk_tbl) +{ + int status; + struct dvct_string_lookup *str_lk; + + DVCT_IN(); + if (!strings || !lk_tbl) + return -EINVAL; + + status = usb_string_ids_tab(cdev, strings); + if (status < 0) + return status; + + for (str_lk = lk_tbl; str_lk->str; str_lk++) { + *str_lk->id = str_lk->str->id; + pr_info("Setting id %d for str \"%s\"\n", str_lk->str->id, + str_lk->str->s); + } + return 0; +} + +static int dvct_setup(struct usb_function *func, + const struct usb_ctrlrequest *ctrl) +{ + int status = -EOPNOTSUPP; + u16 w_index; + u16 w_value; + u16 w_length; + u8 b_index_value; + struct dvct_function *d_fun = to_dvct_function(func); + + DVCT_IN(); + + w_index = le16_to_cpu(ctrl->wIndex); + w_value = le16_to_cpu(ctrl->wValue); + w_length = le16_to_cpu(ctrl->wLength); + b_index_value = (u8) (w_index >> 8); + + if (ctrl->bRequestType != + (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) + goto done; + + switch (ctrl->bRequest) { + case DC_REQUEST_SET_RESET: + + pr_info("SET_RESET v%04x i%04x l%u\n", + w_value, w_index, w_length); + + dvct_stop_transfer(d_fun); + status = 0; + break; + + case DC_REQUEST_SET_TRACE: + /* There are some inconsistencies in the spec regarding some of the + * control requests, like SET/GET _TRACE, where even if the message + * is defined as interface specific the wIndex field is used for + * something else, making these request unusable in a "standard" + * composite device. + * To get around this we expect the interface to be specified in + * wIndex 7:0 and any other values in wIndex 15:8. + * A "special" composite implementation is free to treat these setup + * requests "on spec" and call directly dvct_start_transfer and/or + * dvct_stop_transfer (exported in u_dvctrace.h). + */ + pr_info("SET_TRACE v%04x i%04x l%u\n", + w_value, w_index, w_length); + + if (!b_index_value) { + dvct_stop_transfer(d_fun); + status = 0; + } else { + status = dvct_start_transfer(d_fun, b_index_value); + } + break; + } + +done: + if (status >= 0) { + d_fun->cdev->req->zero = 0; + d_fun->cdev->req->length = 0; + status = + usb_ep_queue(d_fun->cdev->gadget->ep0, d_fun->cdev->req, + GFP_ATOMIC); + if (status) + pr_err("Setup response queue error\n"); + } else { + pr_debug("Unexpected request %02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, w_value, w_index, + w_length); + } + + return status; +} + +static int dvct_function_bind(struct usb_configuration *cconfig, + struct usb_function *func) +{ + int id, ret; + struct usb_ep *ep; + struct dvct_function *d_fun = to_dvct_function(func); + + DVCT_IN(); + d_fun->cdev = cconfig->cdev; + + spin_lock(&d_fun->source_dev->lock); + d_fun->source_dev->function_taken = 1; + spin_unlock(&d_fun->source_dev->lock); + + /*allocate id's */ + /*strings. not crucial just print on failure */ + if (d_fun->source_dev->desc && d_fun->source_dev->desc->str.strings) { + ret = dvct_strings_setup(d_fun->cdev, + d_fun->source_dev->desc->str.strings, + d_fun->source_dev->desc->lk_tbl); + if (ret) + pr_warn("Cannot allocate source device string id's\n"); + } + ret = dvct_strings_setup(d_fun->cdev, d_fun->desc.str.strings, + d_fun->desc.lk_tbl); + if (ret) + pr_warn("Cannot allocate function string id's\n"); + + /* allocate interface ID(s) */ + id = usb_interface_id(cconfig, func); + if (id < 0) + return id; + + d_fun->desc.c_itf->bInterfaceNumber = id; + d_fun->desc.iad->bFirstInterface = id; + + pr_debug("Setting id %d for dvc-control interface\n", id); + + id = usb_interface_id(cconfig, func); + if (id < 0) + return id; + + d_fun->desc.d_itf->bInterfaceNumber = id; + + pr_debug("Setting id %d for dvc-trace-data interface\n", id); + + /* allocate endpoints */ + d_fun->desc.ss_ep->wMaxPacketSize = 0; /*get the real max */ + ep = usb_ep_autoconfig_ss(d_fun->cdev->gadget, + d_fun->desc.ss_ep, d_fun->desc.ss_ep_comp); + + if (!ep) { + pr_err("usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + + /*copy over the endpoint parameters */ + d_fun->desc.hs_ep->bEndpointAddress = + d_fun->desc.ss_ep->bEndpointAddress; + d_fun->desc.fs_ep->bEndpointAddress = + d_fun->desc.ss_ep->bEndpointAddress; + + if (le16_to_cpu(d_fun->desc.hs_ep->wMaxPacketSize) > + le16_to_cpu(d_fun->desc.ss_ep->wMaxPacketSize)) + d_fun->desc.hs_ep->wMaxPacketSize = + d_fun->desc.ss_ep->wMaxPacketSize; + + if (le16_to_cpu(d_fun->desc.fs_ep->wMaxPacketSize) > + le16_to_cpu(d_fun->desc.ss_ep->wMaxPacketSize)) + d_fun->desc.fs_ep->wMaxPacketSize = + d_fun->desc.ss_ep->wMaxPacketSize; + + pr_info("usb_ep_autoconfig %s, addr 0x%hhx, size ss=%hu hs=%hu fs=%hu\n", + ep->name, + d_fun->desc.ss_ep->bEndpointAddress, + d_fun->desc.ss_ep->wMaxPacketSize, + d_fun->desc.hs_ep->wMaxPacketSize, + d_fun->desc.fs_ep->wMaxPacketSize); + + ep->driver_data = d_fun; /* claim the endpoint */ + d_fun->ep_in = ep; + + ret = d_fun->source_drv->binded(d_fun->source_dev, ep, + &d_fun->function); + + return ret; +} + +static void dvct_function_unbind(struct usb_configuration *c, + struct usb_function *func) +{ + struct dvct_function *d_fun = to_dvct_function(func); + + DVCT_IN(); + dvct_clr_status(&d_fun->status, DVCT_MASK_ONLINE); + d_fun->online_data = 0; + d_fun->online_ctrl = 0; + + d_fun->source_drv->unbinded(d_fun->source_dev); + + spin_lock(&d_fun->source_dev->lock); + d_fun->source_dev->function_taken = 0; + spin_unlock(&d_fun->source_dev->lock); +} + +static int dvct_function_set_alt(struct usb_function *func, + unsigned intf, unsigned alt) +{ + struct dvct_function *d_fun = to_dvct_function(func); + struct usb_composite_dev *cdev = func->config->cdev; + int ret; + + DVCT_IN(); + + if (intf == d_fun->desc.c_itf->bInterfaceNumber) { + d_fun->online_ctrl = 1; + pr_debug("dvc-control interface %u set alt %u\n", intf, alt); + } + + if (intf == d_fun->desc.d_itf->bInterfaceNumber) { + ret = config_ep_by_speed(cdev->gadget, func, d_fun->ep_in); + if (ret) { + pr_debug("intf: %d alt: %d ep_by_speed in err %d\n", + intf, alt, ret); + return ret; + } + + ret = usb_ep_enable(d_fun->ep_in); + if (ret) { + pr_debug("intf: %d alt: %d ep_enable in err %d\n", + intf, alt, ret); + return ret; + } + d_fun->online_data = 1; + } + + pr_info("dvc-trace interface %u set alt %u\n", intf, alt); + + if (unlikely(dvct_get_status(&d_fun->status, DVCT_MASK_TRANS))) + dvct_stop_transfer(d_fun); + + if (d_fun->online_data && d_fun->online_ctrl) { + dvct_set_status(&d_fun->status, DVCT_MASK_ONLINE); + if (d_fun->source_drv->connected) + d_fun->source_drv->connected(d_fun->source_dev, + cdev->gadget->speed); + } + return 0; +} + +static void dvct_function_disable(struct usb_function *func) +{ + struct dvct_function *d_fun = to_dvct_function(func); + struct usb_composite_dev *cdev; + + DVCT_IN(); + + cdev = d_fun->cdev; + + if (dvct_get_status(&d_fun->status, DVCT_MASK_TRANS)) + dvct_stop_transfer(d_fun); + + usb_ep_disable(d_fun->ep_in); + + d_fun->online_ctrl = 0; + d_fun->online_data = 0; + + if (d_fun->source_drv->disconnected) + d_fun->source_drv->disconnected(d_fun->source_dev); + + pr_debug("%s disabled\n", d_fun->function.name); +} + +static void dvct_attr_release(struct config_item *item) +{ + struct dvct_function_inst *d_inst; + + DVCT_IN(); + d_inst = container_of(to_config_group(item), struct dvct_function_inst, + instance.group); + usb_put_function_instance(&d_inst->instance); +} + +static struct configfs_item_operations dvctrace_item_ops = { + .release = dvct_attr_release, +}; + +static ssize_t f_dvctrace_device_show(struct config_item *item, char *page) +{ + struct dvct_function_inst *d_inst; + + DVCT_IN(); + d_inst = container_of(to_config_group(item), struct dvct_function_inst, + instance.group); + + return sprintf(page, "%s\n", dev_name(&d_inst->source_dev->device)); +} + +CONFIGFS_ATTR_RO(f_dvctrace_, device); + +static struct configfs_attribute *dvct_attrs[] = { + &f_dvctrace_attr_device, + NULL, +}; + +static struct config_item_type dvct_func_type = { + .ct_item_ops = &dvctrace_item_ops, + .ct_attrs = dvct_attrs, + .ct_owner = THIS_MODULE, +}; + +static void dvct_free_func_inst(struct usb_function_instance *inst) +{ + struct dvct_function_inst *d_inst; + + DVCT_IN(); + d_inst = to_dvct_function_inst(inst); + + spin_lock(&d_inst->source_dev->lock); + d_inst->source_dev->instance_taken = 0; + spin_unlock(&d_inst->source_dev->lock); + + kfree(d_inst); +} + +static int dvct_set_inst_name(struct usb_function_instance *inst, + const char *name) +{ + struct dvct_function_inst *d_inst; + struct dvct_source_device *new_src; + struct dvct_source_device *old_src; + + DVCT_IN(); + d_inst = to_dvct_function_inst(inst); + old_src = d_inst->source_dev; + + new_src = dvct_source_find_by_name(name); + + if (IS_ERR_OR_NULL(new_src)) + return -ENODEV; + + if (new_src != old_src) { + if (new_src->instance_taken) + return -EBUSY; + + spin_lock(&new_src->lock); + spin_lock(&old_src->lock); + + d_inst->source_dev = new_src; + new_src->instance_taken = 1; + old_src->instance_taken = 0; + + spin_unlock(&old_src->lock); + spin_unlock(&new_src->lock); + } + return 0; +} + +static struct usb_function_instance *dvct_alloc_inst(void) +{ + struct dvct_function_inst *d_inst; + struct dvct_source_device *src_dev = NULL; + + DVCT_IN(); + /*get the first free source, this will change via set name + * if available */ + src_dev = dvct_source_find_free(); + + if (IS_ERR_OR_NULL(src_dev)) + return ERR_PTR(-ENODEV); + + d_inst = kzalloc(sizeof(*d_inst), GFP_KERNEL); + + if (!d_inst) + return ERR_PTR(-ENOMEM); + + d_inst->instance.free_func_inst = dvct_free_func_inst; + d_inst->instance.set_inst_name = dvct_set_inst_name; + + spin_lock(&src_dev->lock); + d_inst->source_dev = src_dev; + src_dev->instance_taken = 1; + spin_unlock(&src_dev->lock); + + config_group_init_type_name(&d_inst->instance.group, + "", &dvct_func_type); + return &d_inst->instance; +} + +static void dvct_free_func(struct usb_function *func) +{ + struct dvct_function *d_fun = to_dvct_function(func); + + DVCT_IN(); + d_fun->source_drv->deactivate(d_fun->source_dev); + + dvct_free_desc(d_fun); + + kfree(d_fun); +} + +static struct usb_function *dvct_alloc_func(struct usb_function_instance *inst) +{ + int ret; + struct dvct_function *d_fun; + struct dvct_function_inst *d_inst = to_dvct_function_inst(inst); + + DVCT_IN(); + d_fun = kzalloc(sizeof(struct dvct_function), GFP_KERNEL); + if (!d_fun) + return ERR_PTR(-ENOMEM); + + d_fun->source_dev = d_inst->source_dev; + d_fun->source_drv = dvct_source_get_drv(d_fun->source_dev); + d_fun->trace_config = 0; + + ret = d_fun->source_drv->activate(d_fun->source_dev, &d_fun->status); + if (ret) { + pr_err("Cannot activate source device %d\n", ret); + goto err; + } + + ret = dvct_alloc_desc(d_fun); + if (ret) + goto err_des; + + /*String table*/ + /*1 - source dev (if present) , 1 - function, 1 - NULL */ + if (d_fun->source_dev->desc && d_fun->source_dev->desc->str.strings) + d_fun->function.strings = + kzalloc(3 * sizeof(struct usb_gadget_strings), GFP_KERNEL); + else + d_fun->function.strings = + kzalloc(2 * sizeof(struct usb_gadget_strings), GFP_KERNEL); + + if (!d_fun->function.strings) { + ret = -ENOMEM; + goto err_string_table; + } + + d_fun->function.strings[0] = &d_fun->desc.str; + if (d_fun->source_dev->desc && d_fun->source_dev->desc->str.strings) + d_fun->function.strings[1] = &d_fun->source_dev->desc->str; + + d_fun->function.name = "dvctrace"; + d_fun->function.fs_descriptors = d_fun->desc.fs; + d_fun->function.hs_descriptors = d_fun->desc.hs; + d_fun->function.ss_descriptors = d_fun->desc.ss; + d_fun->function.bind = dvct_function_bind; + d_fun->function.unbind = dvct_function_unbind; + d_fun->function.set_alt = dvct_function_set_alt; + d_fun->function.disable = dvct_function_disable; + d_fun->function.free_func = dvct_free_func; + d_fun->function.setup = dvct_setup; + + return &d_fun->function; + +err_string_table: + dvct_free_desc(d_fun); +err_des: + d_fun->source_drv->deactivate(d_fun->source_dev); +err: + kfree(d_fun); + return ERR_PTR(ret); +} + +DECLARE_USB_FUNCTION_INIT(dvctrace, dvct_alloc_inst, dvct_alloc_func); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DvC-Trace function driver"); +MODULE_AUTHOR("Traian Schiau "); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 8b342587f8ad..bfbb56e28cdf 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1010,13 +1010,17 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) if (interrupted) ret = -EINTR; + else if (epfile->ep != ep) { + /* In the meantime, endpoint got disabled or changed. */ + ret = -ESHUTDOWN; + } else if (io_data->read && ep->status > 0) ret = __ffs_epfile_read_data(epfile, data, ep->status, &io_data->data); else ret = ep->status; goto error_mutex; - } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) { + } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { ret = -ENOMEM; } else { req->buf = data; @@ -1539,7 +1543,6 @@ ffs_fs_kill_sb(struct super_block *sb) if (sb->s_fs_info) { ffs_release_dev(sb->s_fs_info); ffs_data_closed(sb->s_fs_info); - ffs_data_put(sb->s_fs_info); } } @@ -1856,44 +1859,20 @@ static int ffs_func_eps_enable(struct ffs_function *func) spin_lock_irqsave(&func->ffs->eps_lock, flags); while(count--) { - struct usb_endpoint_descriptor *ds; - struct usb_ss_ep_comp_descriptor *comp_desc = NULL; - int needs_comp_desc = false; - int desc_idx; - - if (ffs->gadget->speed == USB_SPEED_SUPER) { - desc_idx = 2; - needs_comp_desc = true; - } else if (ffs->gadget->speed == USB_SPEED_HIGH) - desc_idx = 1; - else - desc_idx = 0; - - /* fall-back to lower speed if desc missing for current speed */ - do { - ds = ep->descs[desc_idx]; - } while (!ds && --desc_idx >= 0); - - if (!ds) { - ret = -EINVAL; - break; - } - ep->ep->driver_data = ep; - ep->ep->desc = ds; - if (needs_comp_desc) { - comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + - USB_DT_ENDPOINT_SIZE); - ep->ep->maxburst = comp_desc->bMaxBurst + 1; - ep->ep->comp_desc = comp_desc; + ret = config_ep_by_speed(func->gadget, &func->function, ep->ep); + if (ret) { + pr_err("%s: config_ep_by_speed(%s) returned %d\n", + __func__, ep->ep->name, ret); + break; } ret = usb_ep_enable(ep->ep); if (likely(!ret)) { epfile->ep = ep; - epfile->in = usb_endpoint_dir_in(ds); - epfile->isoc = usb_endpoint_xfer_isoc(ds); + epfile->in = usb_endpoint_dir_in(ep->ep->desc); + epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc); } else { break; } @@ -2286,9 +2265,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, int i; if (len < sizeof(*d) || - d->bFirstInterfaceNumber >= ffs->interfaces_count || - !d->Reserved1) + d->bFirstInterfaceNumber >= ffs->interfaces_count) return -EINVAL; + if (d->Reserved1 != 1) { + /* + * According to the spec, Reserved1 must be set to 1 + * but older kernels incorrectly rejected non-zero + * values. We fix it here to avoid returning EINVAL + * in response to values we used to accept. + */ + pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n"); + d->Reserved1 = 1; + } for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) if (d->Reserved2[i]) return -EINVAL; @@ -2971,10 +2959,8 @@ static int _ffs_func_bind(struct usb_configuration *c, struct ffs_data *ffs = func->ffs; const int full = !!func->ffs->fs_descs_count; - const int high = gadget_is_dualspeed(func->gadget) && - func->ffs->hs_descs_count; - const int super = gadget_is_superspeed(func->gadget) && - func->ffs->ss_descs_count; + const int high = !!func->ffs->hs_descs_count; + const int super = !!func->ffs->ss_descs_count; int fs_len, hs_len, ss_len, ret, i; struct ffs_ep *eps_ptr; @@ -3677,6 +3663,7 @@ static void ffs_closed(struct ffs_data *ffs) goto done; ffs_obj->desc_ready = false; + ffs_obj->ffs_data = NULL; if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && ffs_obj->ffs_closed_callback) @@ -3694,7 +3681,8 @@ static void ffs_closed(struct ffs_data *ffs) ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; ffs_dev_unlock(); - unregister_gadget_item(ci); + if (test_bit(FFS_FL_BOUND, &ffs->flags)) + unregister_gadget_item(ci); return; done: ffs_dev_unlock(); diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 5d3d7941d2c2..7d2e5e6e6ba2 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -1207,6 +1207,65 @@ static void f_midi_free_inst(struct usb_function_instance *f) kfree(opts); } +#ifdef CONFIG_USB_CONFIGFS_UEVENT +extern struct device *create_function_device(char *name); +static ssize_t alsa_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_function_instance *fi_midi = dev_get_drvdata(dev); + struct f_midi *midi; + + if (!fi_midi->f) + dev_warn(dev, "f_midi: function not set\n"); + + if (fi_midi && fi_midi->f) { + midi = func_to_midi(fi_midi->f); + if (midi->rmidi && midi->rmidi->card) + return sprintf(buf, "%d %d\n", + midi->rmidi->card->number, midi->rmidi->device); + } + + /* print PCM card and device numbers */ + return sprintf(buf, "%d %d\n", -1, -1); +} + +static DEVICE_ATTR(alsa, S_IRUGO, alsa_show, NULL); + +static struct device_attribute *alsa_function_attributes[] = { + &dev_attr_alsa, + NULL +}; + +static int create_alsa_device(struct usb_function_instance *fi) +{ + struct device *dev; + struct device_attribute **attrs; + struct device_attribute *attr; + int err = 0; + + dev = create_function_device("f_midi"); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + attrs = alsa_function_attributes; + if (attrs) { + while ((attr = *attrs++) && !err) + err = device_create_file(dev, attr); + if (err) { + device_destroy(dev->class, dev->devt); + return -EINVAL; + } + } + dev_set_drvdata(dev, fi); + return 0; +} +#else +static int create_alsa_device(struct usb_function_instance *fi) +{ + return 0; +} +#endif + static struct usb_function_instance *f_midi_alloc_inst(void) { struct f_midi_opts *opts; @@ -1224,6 +1283,11 @@ static struct usb_function_instance *f_midi_alloc_inst(void) opts->in_ports = 1; opts->out_ports = 1; + if (create_alsa_device(&opts->func_inst)) { + kfree(opts); + return ERR_PTR(-ENODEV); + } + config_group_init_type_name(&opts->func_inst.group, "", &midi_func_type); @@ -1242,6 +1306,7 @@ static void f_midi_free(struct usb_function *f) kfree(midi->id); kfifo_free(&midi->in_req_fifo); kfree(midi); + opts->func_inst.f = NULL; --opts->refcnt; } mutex_unlock(&opts->lock); @@ -1328,6 +1393,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) midi->func.disable = f_midi_disable; midi->func.free_func = f_midi_free; + fi->f = &midi->func; return &midi->func; setup_fail: diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c new file mode 100644 index 000000000000..54f7ebbf858e --- /dev/null +++ b/drivers/usb/gadget/function/f_mtp.c @@ -0,0 +1,1554 @@ +/* + * Gadget Function Driver for MTP + * + * Copyright (C) 2010 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "configfs.h" + +#define MTP_BULK_BUFFER_SIZE 16384 +#define INTR_BUFFER_SIZE 28 +#define MAX_INST_NAME_LEN 40 +#define MTP_MAX_FILE_SIZE 0xFFFFFFFFL + +/* String IDs */ +#define INTERFACE_STRING_INDEX 0 + +/* values for mtp_dev.state */ +#define STATE_OFFLINE 0 /* initial state, disconnected */ +#define STATE_READY 1 /* ready for userspace calls */ +#define STATE_BUSY 2 /* processing userspace calls */ +#define STATE_CANCELED 3 /* transaction canceled by host */ +#define STATE_ERROR 4 /* error from completion routine */ + +/* number of tx and rx requests to allocate */ +#define TX_REQ_MAX 4 +#define RX_REQ_MAX 2 +#define INTR_REQ_MAX 5 + +/* ID for Microsoft MTP OS String */ +#define MTP_OS_STRING_ID 0xEE + +/* MTP class reqeusts */ +#define MTP_REQ_CANCEL 0x64 +#define MTP_REQ_GET_EXT_EVENT_DATA 0x65 +#define MTP_REQ_RESET 0x66 +#define MTP_REQ_GET_DEVICE_STATUS 0x67 + +/* constants for device status */ +#define MTP_RESPONSE_OK 0x2001 +#define MTP_RESPONSE_DEVICE_BUSY 0x2019 +#define DRIVER_NAME "mtp" + +static const char mtp_shortname[] = DRIVER_NAME "_usb"; + +struct mtp_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + spinlock_t lock; + + struct usb_ep *ep_in; + struct usb_ep *ep_out; + struct usb_ep *ep_intr; + + int state; + + /* synchronize access to our device file */ + atomic_t open_excl; + /* to enforce only one ioctl at a time */ + atomic_t ioctl_excl; + + struct list_head tx_idle; + struct list_head intr_idle; + + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + wait_queue_head_t intr_wq; + struct usb_request *rx_req[RX_REQ_MAX]; + int rx_done; + + /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and + * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue + */ + struct workqueue_struct *wq; + struct work_struct send_file_work; + struct work_struct receive_file_work; + struct file *xfer_file; + loff_t xfer_file_offset; + int64_t xfer_file_length; + unsigned xfer_send_header; + uint16_t xfer_command; + uint32_t xfer_transaction_id; + int xfer_result; +}; + +static struct usb_interface_descriptor mtp_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC, + .bInterfaceProtocol = 0, +}; + +static struct usb_interface_descriptor ptp_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_STILL_IMAGE, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 1, +}; + +static struct usb_endpoint_descriptor mtp_ss_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = { + .bLength = sizeof(mtp_ss_in_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* .bMaxBurst = DYNAMIC, */ +}; + +static struct usb_endpoint_descriptor mtp_ss_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = { + .bLength = sizeof(mtp_ss_out_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* .bMaxBurst = DYNAMIC, */ +}; + +static struct usb_endpoint_descriptor mtp_highspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor mtp_highspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor mtp_intr_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE), + .bInterval = 6, +}; + +static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = { + .bLength = sizeof(mtp_intr_ss_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE), +}; + +static struct usb_descriptor_header *fs_mtp_descs[] = { + (struct usb_descriptor_header *) &mtp_interface_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_mtp_descs[] = { + (struct usb_descriptor_header *) &mtp_interface_desc, + (struct usb_descriptor_header *) &mtp_highspeed_in_desc, + (struct usb_descriptor_header *) &mtp_highspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *ss_mtp_descs[] = { + (struct usb_descriptor_header *) &mtp_interface_desc, + (struct usb_descriptor_header *) &mtp_ss_in_desc, + (struct usb_descriptor_header *) &mtp_ss_in_comp_desc, + (struct usb_descriptor_header *) &mtp_ss_out_desc, + (struct usb_descriptor_header *) &mtp_ss_out_comp_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc, + NULL, +}; + +static struct usb_descriptor_header *fs_ptp_descs[] = { + (struct usb_descriptor_header *) &ptp_interface_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_ptp_descs[] = { + (struct usb_descriptor_header *) &ptp_interface_desc, + (struct usb_descriptor_header *) &mtp_highspeed_in_desc, + (struct usb_descriptor_header *) &mtp_highspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *ss_ptp_descs[] = { + (struct usb_descriptor_header *) &ptp_interface_desc, + (struct usb_descriptor_header *) &mtp_ss_in_desc, + (struct usb_descriptor_header *) &mtp_ss_in_comp_desc, + (struct usb_descriptor_header *) &mtp_ss_out_desc, + (struct usb_descriptor_header *) &mtp_ss_out_comp_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc, + NULL, +}; + +static struct usb_string mtp_string_defs[] = { + /* Naming interface "MTP" so libmtp will recognize us */ + [INTERFACE_STRING_INDEX].s = "MTP", + { }, /* end of list */ +}; + +static struct usb_gadget_strings mtp_string_table = { + .language = 0x0409, /* en-US */ + .strings = mtp_string_defs, +}; + +static struct usb_gadget_strings *mtp_strings[] = { + &mtp_string_table, + NULL, +}; + +/* Microsoft MTP OS String */ +static u8 mtp_os_string[] = { + 18, /* sizeof(mtp_os_string) */ + USB_DT_STRING, + /* Signature field: "MSFT100" */ + 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0, + /* vendor code */ + 1, + /* padding */ + 0 +}; + +/* Microsoft Extended Configuration Descriptor Header Section */ +struct mtp_ext_config_desc_header { + __le32 dwLength; + __u16 bcdVersion; + __le16 wIndex; + __u8 bCount; + __u8 reserved[7]; +}; + +/* Microsoft Extended Configuration Descriptor Function Section */ +struct mtp_ext_config_desc_function { + __u8 bFirstInterfaceNumber; + __u8 bInterfaceCount; + __u8 compatibleID[8]; + __u8 subCompatibleID[8]; + __u8 reserved[6]; +}; + +/* MTP Extended Configuration Descriptor */ +struct { + struct mtp_ext_config_desc_header header; + struct mtp_ext_config_desc_function function; +} mtp_ext_config_desc = { + .header = { + .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)), + .bcdVersion = __constant_cpu_to_le16(0x0100), + .wIndex = __constant_cpu_to_le16(4), + .bCount = 1, + }, + .function = { + .bFirstInterfaceNumber = 0, + .bInterfaceCount = 1, + .compatibleID = { 'M', 'T', 'P' }, + }, +}; + +struct mtp_device_status { + __le16 wLength; + __le16 wCode; +}; + +struct mtp_data_header { + /* length of packet, including this header */ + __le32 length; + /* container type (2 for data packet) */ + __le16 type; + /* MTP command code */ + __le16 command; + /* MTP transaction ID */ + __le32 transaction_id; +}; + +struct mtp_instance { + struct usb_function_instance func_inst; + const char *name; + struct mtp_dev *dev; + char mtp_ext_compat_id[16]; + struct usb_os_desc mtp_os_desc; +}; + +/* temporary variable used between mtp_open() and mtp_gadget_bind() */ +static struct mtp_dev *_mtp_dev; + +static inline struct mtp_dev *func_to_mtp(struct usb_function *f) +{ + return container_of(f, struct mtp_dev, function); +} + +static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + + if (!req) + return NULL; + + /* now allocate buffers for the requests */ + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + + return req; +} + +static void mtp_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +static inline int mtp_lock(atomic_t *excl) +{ + if (atomic_inc_return(excl) == 1) { + return 0; + } else { + atomic_dec(excl); + return -1; + } +} + +static inline void mtp_unlock(atomic_t *excl) +{ + atomic_dec(excl); +} + +/* add a request to the tail of a list */ +static void mtp_req_put(struct mtp_dev *dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* remove a request from the head of a list */ +static struct usb_request +*mtp_req_get(struct mtp_dev *dev, struct list_head *head) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(head)) { + req = 0; + } else { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&dev->lock, flags); + return req; +} + +static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + if (req->status != 0) + dev->state = STATE_ERROR; + + mtp_req_put(dev, &dev->tx_idle, req); + + wake_up(&dev->write_wq); +} + +static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + dev->rx_done = 1; + if (req->status != 0) + dev->state = STATE_ERROR; + + wake_up(&dev->read_wq); +} + +static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + if (req->status != 0) + dev->state = STATE_ERROR; + + mtp_req_put(dev, &dev->intr_idle, req); + + wake_up(&dev->intr_wq); +} + +static int mtp_create_bulk_endpoints(struct mtp_dev *dev, + struct usb_endpoint_descriptor *in_desc, + struct usb_endpoint_descriptor *out_desc, + struct usb_endpoint_descriptor *intr_desc) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + struct usb_ep *ep; + int i; + + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + + ep = usb_ep_autoconfig(cdev->gadget, in_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + ep = usb_ep_autoconfig(cdev->gadget, intr_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_intr = ep; + + /* now allocate requests for our endpoints */ + for (i = 0; i < TX_REQ_MAX; i++) { + req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_in; + mtp_req_put(dev, &dev->tx_idle, req); + } + for (i = 0; i < RX_REQ_MAX; i++) { + req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_out; + dev->rx_req[i] = req; + } + for (i = 0; i < INTR_REQ_MAX; i++) { + req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_intr; + mtp_req_put(dev, &dev->intr_idle, req); + } + + return 0; + +fail: + pr_err("mtp_bind() could not allocate requests\n"); + return -1; +} + +static ssize_t mtp_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct mtp_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + ssize_t r = count; + unsigned xfer; + int ret = 0; + size_t len = 0; + + DBG(cdev, "mtp_read(%zu)\n", count); + + /* we will block until we're online */ + DBG(cdev, "mtp_read: waiting for online state\n"); + ret = wait_event_interruptible(dev->read_wq, + dev->state != STATE_OFFLINE); + if (ret < 0) { + r = ret; + goto done; + } + spin_lock_irq(&dev->lock); + if (dev->ep_out->desc) { + len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count); + if (len > MTP_BULK_BUFFER_SIZE) { + spin_unlock_irq(&dev->lock); + return -EINVAL; + } + } + + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + return -ECANCELED; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + +requeue_req: + /* queue a request */ + req = dev->rx_req[0]; + req->length = len; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + goto done; + } else { + DBG(cdev, "rx %p queue\n", req); + } + + /* wait for a request to complete */ + ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + if (ret < 0) { + r = ret; + usb_ep_dequeue(dev->ep_out, req); + goto done; + } + if (dev->state == STATE_BUSY) { + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) + goto requeue_req; + + DBG(cdev, "rx %p %d\n", req, req->actual); + xfer = (req->actual < count) ? req->actual : count; + r = xfer; + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + } else + r = -EIO; + +done: + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + r = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + + DBG(cdev, "mtp_read returning %zd\n", r); + return r; +} + +static ssize_t mtp_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mtp_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = 0; + ssize_t r = count; + unsigned xfer; + int sendZLP = 0; + int ret; + + DBG(cdev, "mtp_write(%zu)\n", count); + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + return -ECANCELED; + } + if (dev->state == STATE_OFFLINE) { + spin_unlock_irq(&dev->lock); + return -ENODEV; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + + /* we need to send a zero length packet to signal the end of transfer + * if the transfer size is aligned to a packet boundary. + */ + if ((count & (dev->ep_in->maxpacket - 1)) == 0) + sendZLP = 1; + + while (count > 0 || sendZLP) { + /* so we exit after sending ZLP */ + if (count == 0) + sendZLP = 0; + + if (dev->state != STATE_BUSY) { + DBG(cdev, "mtp_write dev->error\n"); + r = -EIO; + break; + } + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + ((req = mtp_req_get(dev, &dev->tx_idle)) + || dev->state != STATE_BUSY)); + if (!req) { + r = ret; + break; + } + + if (count > MTP_BULK_BUFFER_SIZE) + xfer = MTP_BULK_BUFFER_SIZE; + else + xfer = count; + if (xfer && copy_from_user(req->buf, buf, xfer)) { + r = -EFAULT; + break; + } + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + DBG(cdev, "mtp_write: xfer error %d\n", ret); + r = -EIO; + break; + } + + buf += xfer; + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + mtp_req_put(dev, &dev->tx_idle, req); + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + r = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + + DBG(cdev, "mtp_write returning %zd\n", r); + return r; +} + +/* read from a local file and write to USB */ +static void send_file_work(struct work_struct *data) +{ + struct mtp_dev *dev = container_of(data, struct mtp_dev, + send_file_work); + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = 0; + struct mtp_data_header *header; + struct file *filp; + loff_t offset; + int64_t count; + int xfer, ret, hdr_size; + int r = 0; + int sendZLP = 0; + + /* read our parameters */ + smp_rmb(); + filp = dev->xfer_file; + offset = dev->xfer_file_offset; + count = dev->xfer_file_length; + + DBG(cdev, "send_file_work(%lld %lld)\n", offset, count); + + if (dev->xfer_send_header) { + hdr_size = sizeof(struct mtp_data_header); + count += hdr_size; + } else { + hdr_size = 0; + } + + /* we need to send a zero length packet to signal the end of transfer + * if the transfer size is aligned to a packet boundary. + */ + if ((count & (dev->ep_in->maxpacket - 1)) == 0) + sendZLP = 1; + + while (count > 0 || sendZLP) { + /* so we exit after sending ZLP */ + if (count == 0) + sendZLP = 0; + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + (req = mtp_req_get(dev, &dev->tx_idle)) + || dev->state != STATE_BUSY); + if (dev->state == STATE_CANCELED) { + r = -ECANCELED; + break; + } + if (!req) { + r = ret; + break; + } + + if (count > MTP_BULK_BUFFER_SIZE) + xfer = MTP_BULK_BUFFER_SIZE; + else + xfer = count; + + if (hdr_size) { + /* prepend MTP data header */ + header = (struct mtp_data_header *)req->buf; + /* + * set file size with header according to + * MTP Specification v1.0 + */ + header->length = (count > MTP_MAX_FILE_SIZE) ? + MTP_MAX_FILE_SIZE : __cpu_to_le32(count); + header->type = __cpu_to_le16(2); /* data packet */ + header->command = __cpu_to_le16(dev->xfer_command); + header->transaction_id = + __cpu_to_le32(dev->xfer_transaction_id); + } + + ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size, + &offset); + if (ret < 0) { + r = ret; + break; + } + xfer = ret + hdr_size; + hdr_size = 0; + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + DBG(cdev, "send_file_work: xfer error %d\n", ret); + dev->state = STATE_ERROR; + r = -EIO; + break; + } + + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + mtp_req_put(dev, &dev->tx_idle, req); + + DBG(cdev, "send_file_work returning %d\n", r); + /* write the result */ + dev->xfer_result = r; + smp_wmb(); +} + +/* read from USB and write to a local file */ +static void receive_file_work(struct work_struct *data) +{ + struct mtp_dev *dev = container_of(data, struct mtp_dev, + receive_file_work); + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *read_req = NULL, *write_req = NULL; + struct file *filp; + loff_t offset; + int64_t count, len; + int ret, cur_buf = 0; + int r = 0; + + /* read our parameters */ + smp_rmb(); + filp = dev->xfer_file; + offset = dev->xfer_file_offset; + count = dev->xfer_file_length; + + DBG(cdev, "receive_file_work(%lld)\n", count); + + while (count > 0 || write_req) { + if (count > 0) { + /* queue a request */ + read_req = dev->rx_req[cur_buf]; + cur_buf = (cur_buf + 1) % RX_REQ_MAX; + + len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count); + if (len > MTP_BULK_BUFFER_SIZE) + len = MTP_BULK_BUFFER_SIZE; + read_req->length = len; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + dev->state = STATE_ERROR; + break; + } + } + + if (write_req) { + DBG(cdev, "rx %p %d\n", write_req, write_req->actual); + ret = vfs_write(filp, write_req->buf, write_req->actual, + &offset); + DBG(cdev, "vfs_write %d\n", ret); + if (ret != write_req->actual) { + r = -EIO; + dev->state = STATE_ERROR; + break; + } + write_req = NULL; + } + + if (read_req) { + /* wait for our last read to complete */ + ret = wait_event_interruptible(dev->read_wq, + dev->rx_done || dev->state != STATE_BUSY); + if (dev->state == STATE_CANCELED) { + r = -ECANCELED; + if (!dev->rx_done) + usb_ep_dequeue(dev->ep_out, read_req); + break; + } + if (read_req->status) { + r = read_req->status; + break; + } + /* if xfer_file_length is 0xFFFFFFFF, then we read until + * we get a zero length packet + */ + if (count != 0xFFFFFFFF) + count -= read_req->actual; + if (read_req->actual < read_req->length) { + /* + * short packet is used to signal EOF for + * sizes > 4 gig + */ + DBG(cdev, "got short packet\n"); + count = 0; + } + + write_req = read_req; + read_req = NULL; + } + } + + DBG(cdev, "receive_file_work returning %d\n", r); + /* write the result */ + dev->xfer_result = r; + smp_wmb(); +} + +static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event) +{ + struct usb_request *req = NULL; + int ret; + int length = event->length; + + DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length); + + if (length < 0 || length > INTR_BUFFER_SIZE) + return -EINVAL; + if (dev->state == STATE_OFFLINE) + return -ENODEV; + + ret = wait_event_interruptible_timeout(dev->intr_wq, + (req = mtp_req_get(dev, &dev->intr_idle)), + msecs_to_jiffies(1000)); + if (!req) + return -ETIME; + + if (copy_from_user(req->buf, (void __user *)event->data, length)) { + mtp_req_put(dev, &dev->intr_idle, req); + return -EFAULT; + } + req->length = length; + ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL); + if (ret) + mtp_req_put(dev, &dev->intr_idle, req); + + return ret; +} + +static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value) +{ + struct mtp_dev *dev = fp->private_data; + struct file *filp = NULL; + int ret = -EINVAL; + + if (mtp_lock(&dev->ioctl_excl)) + return -EBUSY; + + switch (code) { + case MTP_SEND_FILE: + case MTP_RECEIVE_FILE: + case MTP_SEND_FILE_WITH_HEADER: + { + struct mtp_file_range mfr; + struct work_struct *work; + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + ret = -ECANCELED; + goto out; + } + if (dev->state == STATE_OFFLINE) { + spin_unlock_irq(&dev->lock); + ret = -ENODEV; + goto out; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + + if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) { + ret = -EFAULT; + goto fail; + } + /* hold a reference to the file while we are working with it */ + filp = fget(mfr.fd); + if (!filp) { + ret = -EBADF; + goto fail; + } + + /* write the parameters */ + dev->xfer_file = filp; + dev->xfer_file_offset = mfr.offset; + dev->xfer_file_length = mfr.length; + smp_wmb(); + + if (code == MTP_SEND_FILE_WITH_HEADER) { + work = &dev->send_file_work; + dev->xfer_send_header = 1; + dev->xfer_command = mfr.command; + dev->xfer_transaction_id = mfr.transaction_id; + } else if (code == MTP_SEND_FILE) { + work = &dev->send_file_work; + dev->xfer_send_header = 0; + } else { + work = &dev->receive_file_work; + } + + /* We do the file transfer on a work queue so it will run + * in kernel context, which is necessary for vfs_read and + * vfs_write to use our buffers in the kernel address space. + */ + queue_work(dev->wq, work); + /* wait for operation to complete */ + flush_workqueue(dev->wq); + fput(filp); + + /* read the result */ + smp_rmb(); + ret = dev->xfer_result; + break; + } + case MTP_SEND_EVENT: + { + struct mtp_event event; + /* return here so we don't change dev->state below, + * which would interfere with bulk transfer state. + */ + if (copy_from_user(&event, (void __user *)value, sizeof(event))) + ret = -EFAULT; + else + ret = mtp_send_event(dev, &event); + goto out; + } + } + +fail: + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + ret = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); +out: + mtp_unlock(&dev->ioctl_excl); + DBG(dev->cdev, "ioctl returning %d\n", ret); + return ret; +} + +static int mtp_open(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "mtp_open\n"); + if (mtp_lock(&_mtp_dev->open_excl)) + return -EBUSY; + + /* clear any error condition */ + if (_mtp_dev->state != STATE_OFFLINE) + _mtp_dev->state = STATE_READY; + + fp->private_data = _mtp_dev; + return 0; +} + +static int mtp_release(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "mtp_release\n"); + + mtp_unlock(&_mtp_dev->open_excl); + return 0; +} + +/* file operations for /dev/mtp_usb */ +static const struct file_operations mtp_fops = { + .owner = THIS_MODULE, + .read = mtp_read, + .write = mtp_write, + .unlocked_ioctl = mtp_ioctl, + .open = mtp_open, + .release = mtp_release, +}; + +static struct miscdevice mtp_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = mtp_shortname, + .fops = &mtp_fops, +}; + +static int mtp_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl) +{ + struct mtp_dev *dev = _mtp_dev; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + unsigned long flags; + + VDBG(cdev, "mtp_ctrlrequest " + "%02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + + /* Handle MTP OS string */ + if (ctrl->bRequestType == + (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE) + && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR + && (w_value >> 8) == USB_DT_STRING + && (w_value & 0xFF) == MTP_OS_STRING_ID) { + value = (w_length < sizeof(mtp_os_string) + ? w_length : sizeof(mtp_os_string)); + memcpy(cdev->req->buf, mtp_os_string, value); + } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { + /* Handle MTP OS descriptor */ + DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n", + ctrl->bRequest, w_index, w_value, w_length); + + if (ctrl->bRequest == 1 + && (ctrl->bRequestType & USB_DIR_IN) + && (w_index == 4 || w_index == 5)) { + value = (w_length < sizeof(mtp_ext_config_desc) ? + w_length : sizeof(mtp_ext_config_desc)); + memcpy(cdev->req->buf, &mtp_ext_config_desc, value); + } + } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { + DBG(cdev, "class request: %d index: %d value: %d length: %d\n", + ctrl->bRequest, w_index, w_value, w_length); + + if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0 + && w_value == 0) { + DBG(cdev, "MTP_REQ_CANCEL\n"); + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state == STATE_BUSY) { + dev->state = STATE_CANCELED; + wake_up(&dev->read_wq); + wake_up(&dev->write_wq); + } + spin_unlock_irqrestore(&dev->lock, flags); + + /* We need to queue a request to read the remaining + * bytes, but we don't actually need to look at + * the contents. + */ + value = w_length; + } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS + && w_index == 0 && w_value == 0) { + struct mtp_device_status *status = cdev->req->buf; + + status->wLength = + __constant_cpu_to_le16(sizeof(*status)); + + DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n"); + spin_lock_irqsave(&dev->lock, flags); + /* device status is "busy" until we report + * the cancelation to userspace + */ + if (dev->state == STATE_CANCELED) + status->wCode = + __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY); + else + status->wCode = + __cpu_to_le16(MTP_RESPONSE_OK); + spin_unlock_irqrestore(&dev->lock, flags); + value = sizeof(*status); + } + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + int rc; + + cdev->req->zero = value < w_length; + cdev->req->length = value; + rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); + if (rc < 0) + ERROR(cdev, "%s: response queue error\n", __func__); + } + return value; +} + +static int +mtp_function_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct mtp_dev *dev = func_to_mtp(f); + int id; + int ret; + struct mtp_instance *fi_mtp; + + dev->cdev = cdev; + DBG(cdev, "mtp_function_bind dev: %p\n", dev); + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + mtp_interface_desc.bInterfaceNumber = id; + + if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) { + ret = usb_string_id(c->cdev); + if (ret < 0) + return ret; + mtp_string_defs[INTERFACE_STRING_INDEX].id = ret; + mtp_interface_desc.iInterface = ret; + } + + fi_mtp = container_of(f->fi, struct mtp_instance, func_inst); + + if (cdev->use_os_string) { + f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), + GFP_KERNEL); + if (!f->os_desc_table) + return -ENOMEM; + f->os_desc_n = 1; + f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc; + } + + /* allocate endpoints */ + ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc, + &mtp_fullspeed_out_desc, &mtp_intr_desc); + if (ret) + return ret; + + /* support high speed hardware */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + mtp_highspeed_in_desc.bEndpointAddress = + mtp_fullspeed_in_desc.bEndpointAddress; + mtp_highspeed_out_desc.bEndpointAddress = + mtp_fullspeed_out_desc.bEndpointAddress; + } + /* support super speed hardware */ + if (gadget_is_superspeed(c->cdev->gadget)) { + unsigned max_burst; + + /* Calculate bMaxBurst, we know packet size is 1024 */ + max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15); + mtp_ss_in_desc.bEndpointAddress = + mtp_fullspeed_in_desc.bEndpointAddress; + mtp_ss_in_comp_desc.bMaxBurst = max_burst; + mtp_ss_out_desc.bEndpointAddress = + mtp_fullspeed_out_desc.bEndpointAddress; + mtp_ss_out_comp_desc.bMaxBurst = max_burst; + } + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", + gadget_is_superspeed(c->cdev->gadget) ? "super" : + (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"), + f->name, dev->ep_in->name, dev->ep_out->name); + return 0; +} + +static void +mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct mtp_dev *dev = func_to_mtp(f); + struct usb_request *req; + int i; + + mtp_string_defs[INTERFACE_STRING_INDEX].id = 0; + while ((req = mtp_req_get(dev, &dev->tx_idle))) + mtp_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + mtp_request_free(dev->rx_req[i], dev->ep_out); + while ((req = mtp_req_get(dev, &dev->intr_idle))) + mtp_request_free(req, dev->ep_intr); + dev->state = STATE_OFFLINE; + kfree(f->os_desc_table); + f->os_desc_n = 0; +} + +static int mtp_function_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct mtp_dev *dev = func_to_mtp(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt); + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_in); + if (ret) + return ret; + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_out); + if (ret) { + usb_ep_disable(dev->ep_in); + return ret; + } + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_intr); + if (ret) { + usb_ep_disable(dev->ep_out); + usb_ep_disable(dev->ep_in); + return ret; + } + dev->state = STATE_READY; + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + return 0; +} + +static void mtp_function_disable(struct usb_function *f) +{ + struct mtp_dev *dev = func_to_mtp(f); + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "mtp_function_disable\n"); + dev->state = STATE_OFFLINE; + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + usb_ep_disable(dev->ep_intr); + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + + VDBG(cdev, "%s disabled\n", dev->function.name); +} + +static int __mtp_setup(struct mtp_instance *fi_mtp) +{ + struct mtp_dev *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + + if (fi_mtp != NULL) + fi_mtp->dev = dev; + + if (!dev) + return -ENOMEM; + + spin_lock_init(&dev->lock); + init_waitqueue_head(&dev->read_wq); + init_waitqueue_head(&dev->write_wq); + init_waitqueue_head(&dev->intr_wq); + atomic_set(&dev->open_excl, 0); + atomic_set(&dev->ioctl_excl, 0); + INIT_LIST_HEAD(&dev->tx_idle); + INIT_LIST_HEAD(&dev->intr_idle); + + dev->wq = create_singlethread_workqueue("f_mtp"); + if (!dev->wq) { + ret = -ENOMEM; + goto err1; + } + INIT_WORK(&dev->send_file_work, send_file_work); + INIT_WORK(&dev->receive_file_work, receive_file_work); + + _mtp_dev = dev; + + ret = misc_register(&mtp_device); + if (ret) + goto err2; + + return 0; + +err2: + destroy_workqueue(dev->wq); +err1: + _mtp_dev = NULL; + kfree(dev); + printk(KERN_ERR "mtp gadget driver failed to initialize\n"); + return ret; +} + +static int mtp_setup_configfs(struct mtp_instance *fi_mtp) +{ + return __mtp_setup(fi_mtp); +} + + +static void mtp_cleanup(void) +{ + struct mtp_dev *dev = _mtp_dev; + + if (!dev) + return; + + misc_deregister(&mtp_device); + destroy_workqueue(dev->wq); + _mtp_dev = NULL; + kfree(dev); +} + +static struct mtp_instance *to_mtp_instance(struct config_item *item) +{ + return container_of(to_config_group(item), struct mtp_instance, + func_inst.group); +} + +static void mtp_attr_release(struct config_item *item) +{ + struct mtp_instance *fi_mtp = to_mtp_instance(item); + + usb_put_function_instance(&fi_mtp->func_inst); +} + +static struct configfs_item_operations mtp_item_ops = { + .release = mtp_attr_release, +}; + +static struct config_item_type mtp_func_type = { + .ct_item_ops = &mtp_item_ops, + .ct_owner = THIS_MODULE, +}; + + +static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi) +{ + return container_of(fi, struct mtp_instance, func_inst); +} + +static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name) +{ + struct mtp_instance *fi_mtp; + char *ptr; + int name_len; + + name_len = strlen(name) + 1; + if (name_len > MAX_INST_NAME_LEN) + return -ENAMETOOLONG; + + ptr = kstrndup(name, name_len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + fi_mtp = to_fi_mtp(fi); + fi_mtp->name = ptr; + + return 0; +} + +static void mtp_free_inst(struct usb_function_instance *fi) +{ + struct mtp_instance *fi_mtp; + + fi_mtp = to_fi_mtp(fi); + kfree(fi_mtp->name); + mtp_cleanup(); + kfree(fi_mtp); +} + +struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config) +{ + struct mtp_instance *fi_mtp; + int ret = 0; + struct usb_os_desc *descs[1]; + char *names[1]; + + fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL); + if (!fi_mtp) + return ERR_PTR(-ENOMEM); + fi_mtp->func_inst.set_inst_name = mtp_set_inst_name; + fi_mtp->func_inst.free_func_inst = mtp_free_inst; + + fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id; + INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop); + descs[0] = &fi_mtp->mtp_os_desc; + names[0] = "MTP"; + + if (mtp_config) { + ret = mtp_setup_configfs(fi_mtp); + if (ret) { + kfree(fi_mtp); + pr_err("Error setting MTP\n"); + return ERR_PTR(ret); + } + } else + fi_mtp->dev = _mtp_dev; + + config_group_init_type_name(&fi_mtp->func_inst.group, + "", &mtp_func_type); + usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1, + descs, names, THIS_MODULE); + + return &fi_mtp->func_inst; +} +EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp); + +static struct usb_function_instance *mtp_alloc_inst(void) +{ + return alloc_inst_mtp_ptp(true); +} + +static int mtp_ctrlreq_configfs(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + return mtp_ctrlrequest(f->config->cdev, ctrl); +} + +static void mtp_free(struct usb_function *f) +{ + /*NO-OP: no function specific resource allocation in mtp_alloc*/ +} + +struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi, + bool mtp_config) +{ + struct mtp_instance *fi_mtp = to_fi_mtp(fi); + struct mtp_dev *dev; + + /* + * PTP piggybacks on MTP function so make sure we have + * created MTP function before we associate this PTP + * function with a gadget configuration. + */ + if (fi_mtp->dev == NULL) { + pr_err("Error: Create MTP function before linking" + " PTP function with a gadget configuration\n"); + pr_err("\t1: Delete existing PTP function if any\n"); + pr_err("\t2: Create MTP function\n"); + pr_err("\t3: Create and symlink PTP function" + " with a gadget configuration\n"); + return ERR_PTR(-EINVAL); /* Invalid Configuration */ + } + + dev = fi_mtp->dev; + dev->function.name = DRIVER_NAME; + dev->function.strings = mtp_strings; + if (mtp_config) { + dev->function.fs_descriptors = fs_mtp_descs; + dev->function.hs_descriptors = hs_mtp_descs; + dev->function.ss_descriptors = ss_mtp_descs; + } else { + dev->function.fs_descriptors = fs_ptp_descs; + dev->function.hs_descriptors = hs_ptp_descs; + dev->function.ss_descriptors = ss_ptp_descs; + } + dev->function.bind = mtp_function_bind; + dev->function.unbind = mtp_function_unbind; + dev->function.set_alt = mtp_function_set_alt; + dev->function.disable = mtp_function_disable; + dev->function.setup = mtp_ctrlreq_configfs; + dev->function.free_func = mtp_free; + + return &dev->function; +} +EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp); + +static struct usb_function *mtp_alloc(struct usb_function_instance *fi) +{ + return function_alloc_mtp_ptp(fi, true); +} + +DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_mtp.h b/drivers/usb/gadget/function/f_mtp.h new file mode 100644 index 000000000000..7adb1ff08eff --- /dev/null +++ b/drivers/usb/gadget/function/f_mtp.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Badhri Jagan Sridharan + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +extern struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config); +extern struct usb_function *function_alloc_mtp_ptp( + struct usb_function_instance *fi, bool mtp_config); diff --git a/drivers/usb/gadget/function/f_ptp.c b/drivers/usb/gadget/function/f_ptp.c new file mode 100644 index 000000000000..da3e4d53e085 --- /dev/null +++ b/drivers/usb/gadget/function/f_ptp.c @@ -0,0 +1,38 @@ +/* + * Gadget Function Driver for PTP + * + * Copyright (C) 2014 Google, Inc. + * Author: Badhri Jagan Sridharan + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include +#include + +#include "f_mtp.h" + +static struct usb_function_instance *ptp_alloc_inst(void) +{ + return alloc_inst_mtp_ptp(false); +} + +static struct usb_function *ptp_alloc(struct usb_function_instance *fi) +{ + return function_alloc_mtp_ptp(fi, false); +} + +DECLARE_USB_FUNCTION_INIT(ptp, ptp_alloc_inst, ptp_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Badhri Jagan Sridharan"); diff --git a/drivers/usb/gadget/function/u_dvctrace.h b/drivers/usb/gadget/function/u_dvctrace.h new file mode 100644 index 000000000000..56a5d0cd577a --- /dev/null +++ b/drivers/usb/gadget/function/u_dvctrace.h @@ -0,0 +1,73 @@ + +/* + * Gadget Driver for DvC.Trace Function + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __U_DVCTRACE_H +#define __U_DVCTRACE_H + +#include +#include + +struct dvct_function_desc { + struct usb_descriptor_header **fs; + struct usb_descriptor_header **hs; + struct usb_descriptor_header **ss; + + /*special descriptors, update on bind */ + struct usb_interface_assoc_descriptor *iad; + struct usb_interface_descriptor *d_itf; + struct usb_interface_descriptor *c_itf; + struct usb_endpoint_descriptor *fs_ep; + struct usb_endpoint_descriptor *hs_ep; + struct usb_endpoint_descriptor *ss_ep; + struct usb_ss_ep_comp_descriptor *ss_ep_comp; + + /* strings */ + struct usb_gadget_strings str; + struct dvct_string_lookup *lk_tbl; +}; + +struct dvct_function { + struct usb_function function; + struct usb_composite_dev *cdev; + struct usb_ep *ep_in; + + u32 online_data:1; /*set to one when the data itf is set */ + u32 online_ctrl:1; /*set to one when the control itf is set */ + atomic_t status; + + struct dvct_source_device *source_dev; + struct dvct_source_driver *source_drv; + + u8 trace_config; + struct dvct_function_desc desc; +}; + +struct dvct_function_inst { + struct usb_function_instance instance; + struct dvct_source_device *source_dev; +}; + +#define to_dvct_function_inst(inst) \ + container_of(inst, struct dvct_function_inst, instance) + +#define to_dvct_function(func) \ + container_of(func, struct dvct_function, function) + +ssize_t dvct_start_transfer(struct dvct_function *dev, u8 config); +int dvct_stop_transfer(struct dvct_function *dev); + +#endif /*__U_DVCTRACE_H*/ diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c index 02968842b359..708e36f530d8 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c +++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c @@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (ret) { dev_err(&pci->dev, "couldn't add resources to bdc device\n"); + platform_device_put(bdc); return ret; } diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index d41d07aae0ce..794bb4958383 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -923,7 +923,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget, return 0; /* "high bandwidth" works only at high speed */ - if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11)) + if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1) return 0; switch (type) { @@ -1080,8 +1080,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc) static inline void usb_gadget_udc_set_speed(struct usb_udc *udc, enum usb_device_speed speed) { - if (udc->gadget->ops->udc_set_speed) - udc->gadget->ops->udc_set_speed(udc->gadget, speed); + if (udc->gadget->ops->udc_set_speed) { + enum usb_device_speed s; + + s = min(speed, udc->gadget->max_speed); + udc->gadget->ops->udc_set_speed(udc->gadget, s); + } } /** @@ -1154,11 +1158,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) - goto err1; - - ret = device_add(&gadget->dev); - if (ret) - goto err2; + goto err_put_gadget; device_initialize(&udc->dev); udc->dev.release = usb_udc_release; @@ -1167,7 +1167,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, udc->dev.parent = parent; ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); if (ret) - goto err3; + goto err_put_udc; + + ret = device_add(&gadget->dev); + if (ret) + goto err_put_udc; udc->gadget = gadget; gadget->udc = udc; @@ -1177,7 +1181,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, ret = device_add(&udc->dev); if (ret) - goto err4; + goto err_unlist_udc; usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); udc->vbus = true; @@ -1185,27 +1189,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, /* pick up one of pending gadget drivers */ ret = check_pending_gadget_drivers(udc); if (ret) - goto err5; + goto err_del_udc; mutex_unlock(&udc_lock); return 0; -err5: + err_del_udc: device_del(&udc->dev); -err4: + err_unlist_udc: list_del(&udc->list); mutex_unlock(&udc_lock); -err3: - put_device(&udc->dev); device_del(&gadget->dev); -err2: - kfree(udc); + err_put_udc: + put_device(&udc->dev); -err1: + err_put_gadget: put_device(&gadget->dev); return ret; } diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 63a206122058..6b3e8adb64e6 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -254,7 +254,7 @@ #define USB3_EP0_SS_MAX_PACKET_SIZE 512 #define USB3_EP0_HSFS_MAX_PACKET_SIZE 64 #define USB3_EP0_BUF_SIZE 8 -#define USB3_MAX_NUM_PIPES 30 +#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */ #define USB3_WAIT_US 3 #define USB3_DMA_NUM_SETTING_AREA 4 /* diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index fa5692dec832..92b19721b595 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -637,14 +637,6 @@ config USB_UHCI_ASPEED bool default y if ARCH_ASPEED -config USB_UHCI_BIG_ENDIAN_MMIO - bool - default y if SPARC_LEON - -config USB_UHCI_BIG_ENDIAN_DESC - bool - default y if SPARC_LEON - config USB_FHCI_HCD tristate "Freescale QE USB Host Controller support" depends on OF_GPIO && QE_GPIO && QUICC_ENGINE diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index cbb9b8e12c3c..8c5a6fee4dfd 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -837,7 +837,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) default: /* unknown */ break; } - temp = (cap >> 8) & 0xff; + offset = (cap >> 8) & 0xff; } } #endif diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index df169c8e7225..37ef2ac9cdae 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -787,12 +787,12 @@ static struct urb *request_single_step_set_feature_urb( atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); urb->setup_dma = dma_map_single( - hcd->self.controller, + hcd->self.sysdev, urb->setup_packet, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); urb->transfer_dma = dma_map_single( - hcd->self.controller, + hcd->self.sysdev, urb->transfer_buffer, urb->transfer_buffer_length, DMA_FROM_DEVICE); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 44924824fa41..1099465b27f0 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -73,6 +73,7 @@ static const char hcd_name [] = "ohci_hcd"; #define STATECHANGE_DELAY msecs_to_jiffies(300) #define IO_WATCHDOG_DELAY msecs_to_jiffies(275) +#define IO_WATCHDOG_OFF 0xffffff00 #include "ohci.h" #include "pci-quirks.h" @@ -230,7 +231,7 @@ static int ohci_urb_enqueue ( } /* Start up the I/O watchdog timer, if it's not running */ - if (!timer_pending(&ohci->io_watchdog) && + if (ohci->prev_frame_no == IO_WATCHDOG_OFF && list_empty(&ohci->eds_in_use) && !(ohci->flags & OHCI_QUIRK_QEMU)) { ohci->prev_frame_no = ohci_frame_no(ohci); @@ -501,6 +502,7 @@ static int ohci_init (struct ohci_hcd *ohci) setup_timer(&ohci->io_watchdog, io_watchdog_func, (unsigned long) ohci); + ohci->prev_frame_no = IO_WATCHDOG_OFF; ohci->hcca = dma_alloc_coherent (hcd->self.controller, sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); @@ -730,7 +732,7 @@ static void io_watchdog_func(unsigned long _ohci) u32 head; struct ed *ed; struct td *td, *td_start, *td_next; - unsigned frame_no; + unsigned frame_no, prev_frame_no = IO_WATCHDOG_OFF; unsigned long flags; spin_lock_irqsave(&ohci->lock, flags); @@ -835,7 +837,7 @@ static void io_watchdog_func(unsigned long _ohci) } } if (!list_empty(&ohci->eds_in_use)) { - ohci->prev_frame_no = frame_no; + prev_frame_no = frame_no; ohci->prev_wdh_cnt = ohci->wdh_cnt; ohci->prev_donehead = ohci_readl(ohci, &ohci->regs->donehead); @@ -845,6 +847,7 @@ static void io_watchdog_func(unsigned long _ohci) } done: + ohci->prev_frame_no = prev_frame_no; spin_unlock_irqrestore(&ohci->lock, flags); } @@ -973,6 +976,7 @@ static void ohci_stop (struct usb_hcd *hcd) if (quirk_nec(ohci)) flush_work(&ohci->nec_work); del_timer_sync(&ohci->io_watchdog); + ohci->prev_frame_no = IO_WATCHDOG_OFF; ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); ohci_usb_reset(ohci); diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 248eb7702463..aca57bcb9afe 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c @@ -310,8 +310,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd) rc = ohci_rh_suspend (ohci, 0); spin_unlock_irq (&ohci->lock); - if (rc == 0) + if (rc == 0) { del_timer_sync(&ohci->io_watchdog); + ohci->prev_frame_no = IO_WATCHDOG_OFF; + } return rc; } diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index 641fed609911..24edb7674710 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -1018,6 +1018,8 @@ static void finish_unlinks(struct ohci_hcd *ohci) * have modified this list. normally it's just prepending * entries (which we'd ignore), but paranoia won't hurt. */ + *last = ed->ed_next; + ed->ed_next = NULL; modified = 0; /* unlink urbs as requested, but rescan the list after @@ -1076,21 +1078,22 @@ static void finish_unlinks(struct ohci_hcd *ohci) goto rescan_this; /* - * If no TDs are queued, take ED off the ed_rm_list. + * If no TDs are queued, ED is now idle. * Otherwise, if the HC is running, reschedule. - * If not, leave it on the list for further dequeues. + * If the HC isn't running, add ED back to the + * start of the list for later processing. */ if (list_empty(&ed->td_list)) { - *last = ed->ed_next; - ed->ed_next = NULL; ed->state = ED_IDLE; list_del(&ed->in_use_list); } else if (ohci->rh_state == OHCI_RH_RUNNING) { - *last = ed->ed_next; - ed->ed_next = NULL; ed_schedule(ohci, ed); } else { - last = &ed->ed_next; + ed->ed_next = ohci->ed_rm_list; + ohci->ed_rm_list = ed; + /* Don't loop on the same ED */ + if (last == &ohci->ed_rm_list) + last = &ed->ed_next; } if (modified) diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 6dda3623a276..fd4643207ae8 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "pci-quirks.h" #include "xhci-ext-caps.h" @@ -1091,9 +1092,31 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); hc_init: - if (pdev->vendor == PCI_VENDOR_ID_INTEL) + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { usb_enable_intel_xhci_ports(pdev); + /* + * Initialize the internal mux that shares a port between USB + * Device Controller and xHCI on platforms that have it. + */ +#define XHCI_INTEL_VENDOR_CAPS 192 + ext_cap_offset = xhci_find_next_ext_cap(base, + XHCI_HCC_PARAMS_OFFSET, + XHCI_INTEL_VENDOR_CAPS); + if (ext_cap_offset) { + struct intel_usb_mux *mux; + struct resource r; + + r.start = pci_resource_start(pdev, 0) + 0x80d8; + r.end = r.start + 8; + r.flags = IORESOURCE_MEM; + + mux = intel_usb_mux_register(&pdev->dev, &r); + if (IS_ERR(mux)) + dev_err(&pdev->dev, "failed to register mux\n"); + } + } + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); /* Wait for the host controller to be ready before writing any diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a2336deb5e36..9762333d8d7f 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -634,7 +634,10 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, xhci_dbg(xhci, "Disable all slots\n"); spin_unlock_irqrestore(&xhci->lock, *flags); for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { - retval = xhci_disable_slot(xhci, NULL, i); + if (!xhci->devs[i]) + continue; + + retval = xhci_disable_slot(xhci, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 2a82c927ded2..ccdc971283d0 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -947,6 +947,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) if (!vdev) return; + if (vdev->real_port == 0 || + vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { + xhci_dbg(xhci, "Bad vdev->real_port.\n"); + goto out; + } + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { /* is this a hub device that added a tt_info to the tts list */ @@ -960,6 +966,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) } } } +out: /* we are now at a leaf device */ xhci_free_virt_device(xhci, slot_id); } @@ -976,10 +983,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, return 0; } - xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); - if (!xhci->devs[slot_id]) + dev = kzalloc(sizeof(*dev), flags); + if (!dev) return 0; - dev = xhci->devs[slot_id]; /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); @@ -1020,9 +1026,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, trace_xhci_alloc_virt_device(dev); + xhci->devs[slot_id] = dev; + return 1; fail: - xhci_free_virt_device(xhci, slot_id); + + if (dev->in_ctx) + xhci_free_container_ctx(xhci, dev->in_ctx); + if (dev->out_ctx) + xhci_free_container_ctx(xhci, dev->out_ctx); + kfree(dev); + return 0; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 76f392954733..370c0c7b67b3 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -24,6 +24,9 @@ #include #include #include +#include +#include +#include #include "xhci.h" #include "xhci-trace.h" @@ -54,6 +57,8 @@ #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 +#define XHCI_INTEL_VENDOR_CAPS 192 + #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 static const char hcd_name[] = "xhci_hcd"; @@ -69,11 +74,62 @@ static const struct xhci_driver_overrides xhci_pci_overrides __initconst = { /* called after powerup, by probe or system-pm "wakeup" */ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev) { + struct usb_hcd *hcd; + int retval; + /* * TODO: Implement finding debug ports later. * TODO: see if there are any quirks that need to be added to handle * new extended capabilities. */ + retval = XHCI_HCC_EXT_CAPS(readl(&xhci->cap_regs->hcc_params)); + retval = xhci_find_next_ext_cap(&xhci->cap_regs->hc_capbase, + retval << 2, + XHCI_INTEL_VENDOR_CAPS); + /* If This capbility is found, register host on PHY for OTG purpose */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL && retval) { + hcd = xhci_to_hcd(xhci); + + hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2); + + if (IS_ERR_OR_NULL(hcd->usb_phy)) { + /* Unable to get phy, very likely intel_usb_dr_phy + * platform device not yet allocated. Possible + * race with another drivers. + */ + int ret; + xhci->usb2_phy = platform_device_alloc( + "intel_usb_dr_phy", 0); + if (xhci->usb2_phy) { + xhci->usb2_phy->dev.parent = &pdev->dev; + ret = platform_device_add(xhci->usb2_phy); + if (ret) { + platform_device_put(xhci->usb2_phy); + return ret; + } + } else { + /* In case of a race and another driver allocate + * intel_usb_dr_phy platform device earlier, check + * whether phy is already available. If not return error. + */ + hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2); + if (IS_ERR_OR_NULL(hcd->usb_phy)) + return -ENOMEM; + } + } + + hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2); + + if (!IS_ERR_OR_NULL(hcd->usb_phy)) { + retval = otg_set_host(hcd->usb_phy->otg, &hcd->self); + if (retval) + usb_put_phy(hcd->usb_phy); + } else { + xhci_dbg(xhci, "No USB2 PHY transceiver found\n"); + hcd->usb_phy = NULL; + } + } + /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ if (!pci_set_mwi(pdev)) @@ -134,6 +190,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; + if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) + xhci->quirks |= XHCI_SUSPEND_DELAY; + if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; @@ -189,6 +248,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; } + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && + pdev->device == 0x0014) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0015) xhci->quirks |= XHCI_RESET_ON_RESUME; @@ -336,6 +398,8 @@ static void xhci_pci_remove(struct pci_dev *dev) xhci = hcd_to_xhci(pci_get_drvdata(dev)); xhci->xhc_state |= XHCI_STATE_REMOVING; + if(xhci->usb2_phy) + platform_device_unregister(xhci->usb2_phy); if (xhci->shared_hcd) { usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index 198bc188ab25..97f23cc31f4c 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -86,6 +86,10 @@ static const struct soc_device_attribute rcar_quirks_match[] = { .soc_id = "r8a7796", .data = (void *)RCAR_XHCI_FIRMWARE_V3, }, + { + .soc_id = "r8a77965", + .data = (void *)RCAR_XHCI_FIRMWARE_V3, + }, { /* sentinel */ }, }; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 82c746e2d85c..6996235e34a9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2486,12 +2486,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ if (list_empty(&ep_ring->td_list)) { /* - * A stopped endpoint may generate an extra completion - * event if the device was suspended. Don't print - * warnings. + * Don't print wanings if it's due to a stopped endpoint + * generating an extra completion event if the device + * was suspended. Or, a event for the last TRB of a + * short TD we already got a short event for. + * The short TD is already removed from the TD list. */ + if (!(trb_comp_code == COMP_STOPPED || - trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { + trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + ep_ring->last_td_was_short)) { xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); @@ -3117,7 +3121,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, { u32 maxp, total_packet_count; - /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ + /* MTK xHCI 0.96 contains some features from 1.0 */ if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) return ((td_total_len - transferred) >> 10); @@ -3126,8 +3130,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, trb_buff_len == td_total_len) return 0; - /* for MTK xHCI, TD size doesn't include this TRB */ - if (xhci->quirks & XHCI_MTK_HOST) + /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ + if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) trb_buff_len = 0; maxp = usb_endpoint_maxp(&urb->ep->desc); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 51535ba2bcd4..bcf315b32bff 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -887,6 +887,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); del_timer_sync(&xhci->shared_hcd->rh_timer); + if (xhci->quirks & XHCI_SUSPEND_DELAY) + usleep_range(1000, 1500); + spin_lock_irq(&xhci->lock); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); @@ -3520,11 +3523,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_virt_device *virt_dev; struct xhci_slot_ctx *slot_ctx; int i, ret; - struct xhci_command *command; - - command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); - if (!command) - return; #ifndef CONFIG_USB_DEFAULT_PERSIST /* @@ -3540,10 +3538,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) /* If the host is halted due to driver unload, we still need to free the * device. */ - if (ret <= 0 && ret != -ENODEV) { - kfree(command); + if (ret <= 0 && ret != -ENODEV) return; - } virt_dev = xhci->devs[udev->slot_id]; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); @@ -3555,26 +3551,21 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } - xhci_disable_slot(xhci, command, udev->slot_id); + xhci_disable_slot(xhci, udev->slot_id); /* * Event command completion handler will free any data structures * associated with the slot. XXX Can free sleep? */ } -int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command, - u32 slot_id) +int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) { + struct xhci_command *command; unsigned long flags; u32 state; int ret = 0; - struct xhci_virt_device *virt_dev; - virt_dev = xhci->devs[slot_id]; - if (!virt_dev) - return -EINVAL; - if (!command) - command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); if (!command) return -ENOMEM; @@ -3583,17 +3574,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command, state = readl(&xhci->op_regs->status); if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_free_virt_device(xhci, slot_id); spin_unlock_irqrestore(&xhci->lock, flags); kfree(command); - return ret; + return -ENODEV; } ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, slot_id); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + kfree(command); return ret; } xhci_ring_cmd_db(xhci); @@ -3668,6 +3658,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) return 0; } + xhci_free_command(xhci, command); + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { spin_lock_irqsave(&xhci->lock, flags); ret = xhci_reserve_host_control_ep_resources(xhci); @@ -3703,18 +3695,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) pm_runtime_get_noresume(hcd->self.controller); #endif - - xhci_free_command(xhci, command); /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; disable_slot: - /* Disable slot, if we can do it without mem alloc */ - kfree(command->completion); - command->completion = NULL; - command->status = 0; - return xhci_disable_slot(xhci, command, udev->slot_id); + return xhci_disable_slot(xhci, udev->slot_id); } /* diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 2b48aa4f6b76..d2dc71e6cab2 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -728,11 +728,12 @@ struct xhci_ep_ctx { /* bits 10:14 are Max Primary Streams */ /* bit 15 is Linear Stream Array */ /* Interval - period between requests to an endpoint - 125u increments. */ -#define EP_INTERVAL(p) (((p) & 0xff) << 16) -#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) -#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) -#define EP_MAXPSTREAMS_MASK (0x1f << 10) -#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) +#define EP_INTERVAL(p) (((p) & 0xff) << 16) +#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) +#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) +#define EP_MAXPSTREAMS_MASK (0x1f << 10) +#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) +#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10) /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ #define EP_HAS_LSA (1 << 15) /* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ @@ -1830,6 +1831,7 @@ struct xhci_hcd { #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) /* Reserved. It was XHCI_U2_DISABLE_WAKE */ #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) +#define XHCI_SUSPEND_DELAY (1 << 30) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1856,6 +1858,7 @@ struct xhci_hcd { struct timer_list comp_mode_recovery_timer; u32 port_status_u0; u16 test_mode; + struct platform_device *usb2_phy; /* Compliance Mode Timer Triggered every 2 seconds */ #define COMP_MODE_RCVRY_MSECS 2000 @@ -2012,8 +2015,7 @@ int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); -int xhci_disable_slot(struct xhci_hcd *xhci, - struct xhci_command *command, u32 slot_id); +int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); int xhci_resume(struct xhci_hcd *xhci, bool hibernated); @@ -2539,21 +2541,22 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq, u8 burst; u8 cerr; u8 mult; - u8 lsa; - u8 hid; + + bool lsa; + bool hid; esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | CTX_TO_MAX_ESIT_PAYLOAD(tx_info); ep_state = info & EP_STATE_MASK; - max_pstr = info & EP_MAXPSTREAMS_MASK; + max_pstr = CTX_TO_EP_MAXPSTREAMS(info); interval = CTX_TO_EP_INTERVAL(info); mult = CTX_TO_EP_MULT(info) + 1; - lsa = info & EP_HAS_LSA; + lsa = !!(info & EP_HAS_LSA); cerr = (info2 & (3 << 1)) >> 1; ep_type = CTX_TO_EP_TYPE(info2); - hid = info2 & (1 << 7); + hid = !!(info2 & (1 << 7)); burst = CTX_TO_MAX_BURST(info2); maxp = MAX_PACKET_DECODED(info2); diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 680bddb3ce05..6635a3c990f6 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -46,6 +46,9 @@ #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */ #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */ #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */ +#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */ +#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */ +#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */ #define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ #define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ #define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ @@ -88,6 +91,9 @@ static const struct usb_device_id ld_usb_table[] = { { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c index 8e7737d7ac0a..03be5d574f23 100644 --- a/drivers/usb/misc/usb3503.c +++ b/drivers/usb/misc/usb3503.c @@ -292,6 +292,8 @@ static int usb3503_probe(struct usb3503 *hub) if (gpio_is_valid(hub->gpio_reset)) { err = devm_gpio_request_one(dev, hub->gpio_reset, GPIOF_OUT_INIT_LOW, "usb3503 reset"); + /* Datasheet defines a hardware reset to be at least 100us */ + usleep_range(100, 10000); if (err) { dev_err(dev, "unable to request GPIO %d as reset pin (%d)\n", diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index f6ae753ab99b..f932f40302df 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg break; case MON_IOCQ_RING_SIZE: + mutex_lock(&rp->fetch_lock); ret = rp->b_size; + mutex_unlock(&rp->fetch_lock); break; case MON_IOCT_RING_SIZE: @@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf) unsigned long offset, chunk_idx; struct page *pageptr; + mutex_lock(&rp->fetch_lock); offset = vmf->pgoff << PAGE_SHIFT; - if (offset >= rp->b_size) + if (offset >= rp->b_size) { + mutex_unlock(&rp->fetch_lock); return VM_FAULT_SIGBUS; + } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); + mutex_unlock(&rp->fetch_lock); vmf->page = pageptr; return 0; } diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index f5e1bb5e5217..984f7e12a6a5 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -85,6 +85,8 @@ struct mon_reader_text { wait_queue_head_t wait; int printf_size; + size_t printf_offset; + size_t printf_togo; char *printf_buf; struct mutex printf_lock; @@ -376,75 +378,103 @@ static int mon_text_open(struct inode *inode, struct file *file) return rc; } -/* - * For simplicity, we read one record in one system call and throw out - * what does not fit. This means that the following does not work: - * dd if=/dbg/usbmon/0t bs=10 - * Also, we do not allow seeks and do not bother advancing the offset. - */ +static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, + char __user * const buf, const size_t nbytes) +{ + const size_t togo = min(nbytes, rp->printf_togo); + + if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) + return -EFAULT; + rp->printf_togo -= togo; + rp->printf_offset += togo; + return togo; +} + +/* ppos is not advanced since the llseek operation is not permitted. */ static ssize_t mon_text_read_t(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) + size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; + ssize_t ret; - ep = mon_text_read_wait(rp, file); - if (IS_ERR(ep)) - return PTR_ERR(ep); mutex_lock(&rp->printf_lock); - ptr.cnt = 0; - ptr.pbuf = rp->printf_buf; - ptr.limit = rp->printf_size; - - mon_text_read_head_t(rp, &ptr, ep); - mon_text_read_statset(rp, &ptr, ep); - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, - " %d", ep->length); - mon_text_read_data(rp, &ptr, ep); - - if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) - ptr.cnt = -EFAULT; + + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_t(rp, &ptr, ep); + mon_text_read_statset(rp, &ptr, ep); + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); + } + + ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); - kmem_cache_free(rp->e_slab, ep); - return ptr.cnt; + return ret; } +/* ppos is not advanced since the llseek operation is not permitted. */ static ssize_t mon_text_read_u(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) + size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; + ssize_t ret; - ep = mon_text_read_wait(rp, file); - if (IS_ERR(ep)) - return PTR_ERR(ep); mutex_lock(&rp->printf_lock); - ptr.cnt = 0; - ptr.pbuf = rp->printf_buf; - ptr.limit = rp->printf_size; - mon_text_read_head_u(rp, &ptr, ep); - if (ep->type == 'E') { - mon_text_read_statset(rp, &ptr, ep); - } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { - mon_text_read_isostat(rp, &ptr, ep); - mon_text_read_isodesc(rp, &ptr, ep); - } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { - mon_text_read_intstat(rp, &ptr, ep); - } else { - mon_text_read_statset(rp, &ptr, ep); + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_u(rp, &ptr, ep); + if (ep->type == 'E') { + mon_text_read_statset(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { + mon_text_read_isostat(rp, &ptr, ep); + mon_text_read_isodesc(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { + mon_text_read_intstat(rp, &ptr, ep); + } else { + mon_text_read_statset(rp, &ptr, ep); + } + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); } - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, - " %d", ep->length); - mon_text_read_data(rp, &ptr, ep); - if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) - ptr.cnt = -EFAULT; + ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); - kmem_cache_free(rp->e_slab, ep); - return ptr.cnt; + return ret; } static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index 99c65b0788ff..947579842ad7 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -774,9 +774,9 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb) return -ENOMEM; mtu->irq = platform_get_irq(pdev, 0); - if (mtu->irq <= 0) { + if (mtu->irq < 0) { dev_err(dev, "fail to get irq number\n"); - return -ENODEV; + return mtu->irq; } dev_info(dev, "irq %d\n", mtu->irq); diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index df88123274ca..972bf4210189 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -305,7 +305,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; portstate(musb->port1_status |= USB_PORT_STAT_POWER); del_timer(&otg_workaround); - } else { + } else if (!(musb->int_usb & MUSB_INTR_BABBLE)){ + /* + * When babble condition happens, drvvbus interrupt + * is also generated. Ignore this drvvbus interrupt + * and let babble interrupt handler recovers the + * controller; otherwise, the host-mode flag is lost + * due to the MUSB_DEV_MODE() call below and babble + * recovery logic will not called. + */ musb->is_active = 0; MUSB_DEV_MODE(musb); otg->default_a = 0; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index b17450a59882..b5cc08d8aa22 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -418,13 +418,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, } } - /* - * The pipe must be broken if current urb->status is set, so don't - * start next urb. - * TODO: to minimize the risk of regression, only check urb->status - * for RX, until we have a test case to understand the behavior of TX. - */ - if ((!status || !is_in) && qh && qh->is_ready) { + if (qh != NULL && qh->is_ready) { musb_dbg(musb, "... next ep%d %cX urb %p", hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); musb_start_urb(musb, is_in, qh); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index aff702c0eb9f..8d3cf2c6dd72 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -7,6 +7,14 @@ config USB_PHY select EXTCON def_bool n +config USB_OTG_WAKELOCK + bool "Hold a wakelock when USB connected" + depends on PM_WAKELOCKS + select USB_OTG_UTILS + help + Select this to automatically hold a wakelock when USB is + connected, preventing suspend. + # # USB Transceiver Drivers # @@ -202,4 +210,21 @@ config USB_ULPI_VIEWPORT Provides read/write operations to the ULPI phy register set for controllers with a viewport register (e.g. Chipidea/ARC controllers). +config USB_INTEL_DUAL_ROLE_PHY + tristate "Intel Dual Role Transceiver Driver" + depends on USB_OTG_FSM && PM + depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' + help + Enable this to support USB Dual Role transceiver for Broxton/Cherrytrail + platforms. + +config DUAL_ROLE_USB_INTF + bool "Generic DUAL ROLE sysfs interface" + depends on SYSFS && USB_PHY + help + A generic sysfs interface to track and change the state of + dual role usb phys. The usb phy drivers can register to + this interface to expose it capabilities to the userspace + and thereby allowing userspace to change the port mode. + endmenu diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index 0c40ccc90631..07bf3e6eae52 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -4,6 +4,8 @@ # obj-$(CONFIG_USB_PHY) += phy.o obj-$(CONFIG_OF) += of.o +obj-$(CONFIG_USB_OTG_WAKELOCK) += otg-wakelock.o +obj-$(CONFIG_DUAL_ROLE_USB_INTF) += class-dual-role.o # transceiver drivers, keep the list sorted @@ -26,3 +28,4 @@ obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o obj-$(CONFIG_USB_ULPI) += phy-ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o +obj-$(CONFIG_USB_INTEL_DUAL_ROLE_PHY) += phy-intel-dualrole.o diff --git a/drivers/usb/phy/class-dual-role.c b/drivers/usb/phy/class-dual-role.c new file mode 100644 index 000000000000..51fcb545a9d5 --- /dev/null +++ b/drivers/usb/phy/class-dual-role.c @@ -0,0 +1,529 @@ +/* + * class-dual-role.c + * + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DUAL_ROLE_NOTIFICATION_TIMEOUT 2000 + +static ssize_t dual_role_store_property(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static ssize_t dual_role_show_property(struct device *dev, + struct device_attribute *attr, + char *buf); + +#define DUAL_ROLE_ATTR(_name) \ +{ \ + .attr = { .name = #_name }, \ + .show = dual_role_show_property, \ + .store = dual_role_store_property, \ +} + +static struct device_attribute dual_role_attrs[] = { + DUAL_ROLE_ATTR(supported_modes), + DUAL_ROLE_ATTR(mode), + DUAL_ROLE_ATTR(power_role), + DUAL_ROLE_ATTR(data_role), + DUAL_ROLE_ATTR(powers_vconn), +}; + +struct class *dual_role_class; +EXPORT_SYMBOL_GPL(dual_role_class); + +static struct device_type dual_role_dev_type; + +static char *kstrdupcase(const char *str, gfp_t gfp, bool to_upper) +{ + char *ret, *ustr; + + ustr = ret = kmalloc(strlen(str) + 1, gfp); + + if (!ret) + return NULL; + + while (*str) + *ustr++ = to_upper ? toupper(*str++) : tolower(*str++); + + *ustr = 0; + + return ret; +} + +static void dual_role_changed_work(struct work_struct *work) +{ + struct dual_role_phy_instance *dual_role = + container_of(work, struct dual_role_phy_instance, + changed_work); + + dev_dbg(&dual_role->dev, "%s\n", __func__); + kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE); +} + +void dual_role_instance_changed(struct dual_role_phy_instance *dual_role) +{ + dev_dbg(&dual_role->dev, "%s\n", __func__); + pm_wakeup_event(&dual_role->dev, DUAL_ROLE_NOTIFICATION_TIMEOUT); + schedule_work(&dual_role->changed_work); +} +EXPORT_SYMBOL_GPL(dual_role_instance_changed); + +int dual_role_get_property(struct dual_role_phy_instance *dual_role, + enum dual_role_property prop, + unsigned int *val) +{ + return dual_role->desc->get_property(dual_role, prop, val); +} +EXPORT_SYMBOL_GPL(dual_role_get_property); + +int dual_role_set_property(struct dual_role_phy_instance *dual_role, + enum dual_role_property prop, + const unsigned int *val) +{ + if (!dual_role->desc->set_property) + return -ENODEV; + + return dual_role->desc->set_property(dual_role, prop, val); +} +EXPORT_SYMBOL_GPL(dual_role_set_property); + +int dual_role_property_is_writeable(struct dual_role_phy_instance *dual_role, + enum dual_role_property prop) +{ + if (!dual_role->desc->property_is_writeable) + return -ENODEV; + + return dual_role->desc->property_is_writeable(dual_role, prop); +} +EXPORT_SYMBOL_GPL(dual_role_property_is_writeable); + +static void dual_role_dev_release(struct device *dev) +{ + struct dual_role_phy_instance *dual_role = + container_of(dev, struct dual_role_phy_instance, dev); + pr_debug("device: '%s': %s\n", dev_name(dev), __func__); + kfree(dual_role); +} + +static struct dual_role_phy_instance *__must_check +__dual_role_register(struct device *parent, + const struct dual_role_phy_desc *desc) +{ + struct device *dev; + struct dual_role_phy_instance *dual_role; + int rc; + + dual_role = kzalloc(sizeof(*dual_role), GFP_KERNEL); + if (!dual_role) + return ERR_PTR(-ENOMEM); + + dev = &dual_role->dev; + + device_initialize(dev); + + dev->class = dual_role_class; + dev->type = &dual_role_dev_type; + dev->parent = parent; + dev->release = dual_role_dev_release; + dev_set_drvdata(dev, dual_role); + dual_role->desc = desc; + + rc = dev_set_name(dev, "%s", desc->name); + if (rc) + goto dev_set_name_failed; + + INIT_WORK(&dual_role->changed_work, dual_role_changed_work); + + rc = device_init_wakeup(dev, true); + if (rc) + goto wakeup_init_failed; + + rc = device_add(dev); + if (rc) + goto device_add_failed; + + dual_role_instance_changed(dual_role); + + return dual_role; + +device_add_failed: + device_init_wakeup(dev, false); +wakeup_init_failed: +dev_set_name_failed: + put_device(dev); + kfree(dual_role); + + return ERR_PTR(rc); +} + +static void dual_role_instance_unregister(struct dual_role_phy_instance + *dual_role) +{ + cancel_work_sync(&dual_role->changed_work); + device_init_wakeup(&dual_role->dev, false); + device_unregister(&dual_role->dev); +} + +static void devm_dual_role_release(struct device *dev, void *res) +{ + struct dual_role_phy_instance **dual_role = res; + + dual_role_instance_unregister(*dual_role); +} + +struct dual_role_phy_instance *__must_check +devm_dual_role_instance_register(struct device *parent, + const struct dual_role_phy_desc *desc) +{ + struct dual_role_phy_instance **ptr, *dual_role; + + ptr = devres_alloc(devm_dual_role_release, sizeof(*ptr), GFP_KERNEL); + + if (!ptr) + return ERR_PTR(-ENOMEM); + dual_role = __dual_role_register(parent, desc); + if (IS_ERR(dual_role)) { + devres_free(ptr); + } else { + *ptr = dual_role; + devres_add(parent, ptr); + } + return dual_role; +} +EXPORT_SYMBOL_GPL(devm_dual_role_instance_register); + +static int devm_dual_role_match(struct device *dev, void *res, void *data) +{ + struct dual_role_phy_instance **r = res; + + if (WARN_ON(!r || !*r)) + return 0; + + return *r == data; +} + +void devm_dual_role_instance_unregister(struct device *dev, + struct dual_role_phy_instance + *dual_role) +{ + int rc; + + rc = devres_release(dev, devm_dual_role_release, + devm_dual_role_match, dual_role); + WARN_ON(rc); +} +EXPORT_SYMBOL_GPL(devm_dual_role_instance_unregister); + +void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role) +{ + return dual_role->drv_data; +} +EXPORT_SYMBOL_GPL(dual_role_get_drvdata); + +/***************** Device attribute functions **************************/ + +/* port type */ +static char *supported_modes_text[] = { + "ufp dfp", "dfp", "ufp" +}; + +/* current mode */ +static char *mode_text[] = { + "ufp", "dfp", "none" +}; + +/* Power role */ +static char *pr_text[] = { + "source", "sink", "none" +}; + +/* Data role */ +static char *dr_text[] = { + "host", "device", "none" +}; + +/* Vconn supply */ +static char *vconn_supply_text[] = { + "n", "y" +}; + +static ssize_t dual_role_show_property(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev); + const ptrdiff_t off = attr - dual_role_attrs; + unsigned int value; + + if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) { + value = dual_role->desc->supported_modes; + } else { + ret = dual_role_get_property(dual_role, off, &value); + + if (ret < 0) { + if (ret == -ENODATA) + dev_dbg(dev, + "driver has no data for `%s' property\n", + attr->attr.name); + else if (ret != -ENODEV) + dev_err(dev, + "driver failed to report `%s' property: %zd\n", + attr->attr.name, ret); + return ret; + } + } + + if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) { + BUILD_BUG_ON(DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL != + ARRAY_SIZE(supported_modes_text)); + if (value < DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + supported_modes_text[value]); + else + return -EIO; + } else if (off == DUAL_ROLE_PROP_MODE) { + BUILD_BUG_ON(DUAL_ROLE_PROP_MODE_TOTAL != + ARRAY_SIZE(mode_text)); + if (value < DUAL_ROLE_PROP_MODE_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + mode_text[value]); + else + return -EIO; + } else if (off == DUAL_ROLE_PROP_PR) { + BUILD_BUG_ON(DUAL_ROLE_PROP_PR_TOTAL != ARRAY_SIZE(pr_text)); + if (value < DUAL_ROLE_PROP_PR_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + pr_text[value]); + else + return -EIO; + } else if (off == DUAL_ROLE_PROP_DR) { + BUILD_BUG_ON(DUAL_ROLE_PROP_DR_TOTAL != ARRAY_SIZE(dr_text)); + if (value < DUAL_ROLE_PROP_DR_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + dr_text[value]); + else + return -EIO; + } else if (off == DUAL_ROLE_PROP_VCONN_SUPPLY) { + BUILD_BUG_ON(DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL != + ARRAY_SIZE(vconn_supply_text)); + if (value < DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + vconn_supply_text[value]); + else + return -EIO; + } else + return -EIO; +} + +static ssize_t dual_role_store_property(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + ssize_t ret; + struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev); + const ptrdiff_t off = attr - dual_role_attrs; + unsigned int value; + int total, i; + char *dup_buf, **text_array; + bool result = false; + + dup_buf = kstrdupcase(buf, GFP_KERNEL, false); + switch (off) { + case DUAL_ROLE_PROP_MODE: + total = DUAL_ROLE_PROP_MODE_TOTAL; + text_array = mode_text; + break; + case DUAL_ROLE_PROP_PR: + total = DUAL_ROLE_PROP_PR_TOTAL; + text_array = pr_text; + break; + case DUAL_ROLE_PROP_DR: + total = DUAL_ROLE_PROP_DR_TOTAL; + text_array = dr_text; + break; + case DUAL_ROLE_PROP_VCONN_SUPPLY: + ret = strtobool(dup_buf, &result); + value = result; + if (!ret) + goto setprop; + default: + ret = -EINVAL; + goto error; + } + + for (i = 0; i <= total; i++) { + if (i == total) { + ret = -ENOTSUPP; + goto error; + } + if (!strncmp(*(text_array + i), dup_buf, + strlen(*(text_array + i)))) { + value = i; + break; + } + } + +setprop: + ret = dual_role->desc->set_property(dual_role, off, &value); + +error: + kfree(dup_buf); + + if (ret < 0) + return ret; + + return count; +} + +static umode_t dual_role_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int attrno) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev); + umode_t mode = S_IRUSR | S_IRGRP | S_IROTH; + int i; + + if (attrno == DUAL_ROLE_PROP_SUPPORTED_MODES) + return mode; + + for (i = 0; i < dual_role->desc->num_properties; i++) { + int property = dual_role->desc->properties[i]; + + if (property == attrno) { + if (dual_role->desc->property_is_writeable && + dual_role_property_is_writeable(dual_role, property) + > 0) + mode |= S_IWUSR; + + return mode; + } + } + + return 0; +} + +static struct attribute *__dual_role_attrs[ARRAY_SIZE(dual_role_attrs) + 1]; + +static struct attribute_group dual_role_attr_group = { + .attrs = __dual_role_attrs, + .is_visible = dual_role_attr_is_visible, +}; + +static const struct attribute_group *dual_role_attr_groups[] = { + &dual_role_attr_group, + NULL, +}; + +void dual_role_init_attrs(struct device_type *dev_type) +{ + int i; + + dev_type->groups = dual_role_attr_groups; + + for (i = 0; i < ARRAY_SIZE(dual_role_attrs); i++) + __dual_role_attrs[i] = &dual_role_attrs[i].attr; +} + +int dual_role_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev); + int ret = 0, j; + char *prop_buf; + char *attrname; + + dev_dbg(dev, "uevent\n"); + + if (!dual_role || !dual_role->desc) { + dev_dbg(dev, "No dual_role phy yet\n"); + return ret; + } + + dev_dbg(dev, "DUAL_ROLE_NAME=%s\n", dual_role->desc->name); + + ret = add_uevent_var(env, "DUAL_ROLE_NAME=%s", dual_role->desc->name); + if (ret) + return ret; + + prop_buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!prop_buf) + return -ENOMEM; + + for (j = 0; j < dual_role->desc->num_properties; j++) { + struct device_attribute *attr; + char *line; + + attr = &dual_role_attrs[dual_role->desc->properties[j]]; + + ret = dual_role_show_property(dev, attr, prop_buf); + if (ret == -ENODEV || ret == -ENODATA) { + ret = 0; + continue; + } + + if (ret < 0) + goto out; + line = strnchr(prop_buf, PAGE_SIZE, '\n'); + if (line) + *line = 0; + + attrname = kstrdupcase(attr->attr.name, GFP_KERNEL, true); + if (!attrname) + ret = -ENOMEM; + + dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); + + ret = add_uevent_var(env, "DUAL_ROLE_%s=%s", attrname, + prop_buf); + kfree(attrname); + if (ret) + goto out; + } + +out: + free_page((unsigned long)prop_buf); + + return ret; +} + +/******************* Module Init ***********************************/ + +static int __init dual_role_class_init(void) +{ + dual_role_class = class_create(THIS_MODULE, "dual_role_usb"); + + if (IS_ERR(dual_role_class)) + return PTR_ERR(dual_role_class); + + dual_role_class->dev_uevent = dual_role_uevent; + dual_role_init_attrs(&dual_role_dev_type); + + return 0; +} + +static void __exit dual_role_class_exit(void) +{ + class_destroy(dual_role_class); +} + +subsys_initcall(dual_role_class_init); +module_exit(dual_role_class_exit); diff --git a/drivers/usb/phy/otg-wakelock.c b/drivers/usb/phy/otg-wakelock.c new file mode 100644 index 000000000000..ecd741027f53 --- /dev/null +++ b/drivers/usb/phy/otg-wakelock.c @@ -0,0 +1,170 @@ +/* + * otg-wakelock.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define TEMPORARY_HOLD_TIME 2000 + +static bool enabled = true; +static struct usb_phy *otgwl_xceiv; +static struct notifier_block otgwl_nb; + +/* + * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the + * held field is updated to match. + */ + +static DEFINE_SPINLOCK(otgwl_spinlock); + +/* + * Only one lock, but since these 3 fields are associated with each other... + */ + +struct otgwl_lock { + char name[40]; + struct wakeup_source wakesrc; + bool held; +}; + +/* + * VBUS present lock. Also used as a timed lock on charger + * connect/disconnect and USB host disconnect, to allow the system + * to react to the change in power. + */ + +static struct otgwl_lock vbus_lock; + +static void otgwl_hold(struct otgwl_lock *lock) +{ + if (!lock->held) { + __pm_stay_awake(&lock->wakesrc); + lock->held = true; + } +} + +static void otgwl_temporary_hold(struct otgwl_lock *lock) +{ + __pm_wakeup_event(&lock->wakesrc, TEMPORARY_HOLD_TIME); + lock->held = false; +} + +static void otgwl_drop(struct otgwl_lock *lock) +{ + if (lock->held) { + __pm_relax(&lock->wakesrc); + lock->held = false; + } +} + +static void otgwl_handle_event(unsigned long event) +{ + unsigned long irqflags; + + spin_lock_irqsave(&otgwl_spinlock, irqflags); + + if (!enabled) { + otgwl_drop(&vbus_lock); + spin_unlock_irqrestore(&otgwl_spinlock, irqflags); + return; + } + + switch (event) { + case USB_EVENT_VBUS: + case USB_EVENT_ENUMERATED: + otgwl_hold(&vbus_lock); + break; + + case USB_EVENT_NONE: + case USB_EVENT_ID: + case USB_EVENT_CHARGER: + otgwl_temporary_hold(&vbus_lock); + break; + + default: + break; + } + + spin_unlock_irqrestore(&otgwl_spinlock, irqflags); +} + +static int otgwl_otg_notifications(struct notifier_block *nb, + unsigned long event, void *unused) +{ + otgwl_handle_event(event); + return NOTIFY_OK; +} + +static int set_enabled(const char *val, const struct kernel_param *kp) +{ + int rv = param_set_bool(val, kp); + + if (rv) + return rv; + + if (otgwl_xceiv) + otgwl_handle_event(otgwl_xceiv->last_event); + + return 0; +} + +static struct kernel_param_ops enabled_param_ops = { + .set = set_enabled, + .get = param_get_bool, +}; + +module_param_cb(enabled, &enabled_param_ops, &enabled, 0644); +MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present"); + +static int __init otg_wakelock_init(void) +{ + int ret; + struct usb_phy *phy; + + phy = usb_get_phy(USB_PHY_TYPE_USB2); + + if (IS_ERR(phy)) { + pr_err("%s: No USB transceiver found\n", __func__); + return PTR_ERR(phy); + } + otgwl_xceiv = phy; + + snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s", + dev_name(otgwl_xceiv->dev)); + wakeup_source_init(&vbus_lock.wakesrc, vbus_lock.name); + + otgwl_nb.notifier_call = otgwl_otg_notifications; + ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb); + + if (ret) { + pr_err("%s: usb_register_notifier on transceiver %s" + " failed\n", __func__, + dev_name(otgwl_xceiv->dev)); + otgwl_xceiv = NULL; + wakeup_source_trash(&vbus_lock.wakesrc); + return ret; + } + + otgwl_handle_event(otgwl_xceiv->last_event); + return ret; +} + +late_initcall(otg_wakelock_init); diff --git a/drivers/usb/phy/phy-intel-dualrole.c b/drivers/usb/phy/phy-intel-dualrole.c new file mode 100644 index 000000000000..0eaaf8e9e738 --- /dev/null +++ b/drivers/usb/phy/phy-intel-dualrole.c @@ -0,0 +1,409 @@ +/* + * Intel USB Dual Role transceiver driver + * + * This driver is based on Cherrytrail OTG driver written by + * Wu, Hao + * + * Copyright (C) 2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "../host/xhci.h" + +struct intel_dr_phy { + struct usb_phy phy; + struct otg_fsm fsm; + struct notifier_block nb; + struct extcon_dev *edev; + struct delayed_work mux_work; +}; + +static struct intel_dr_phy *intel_phy_dev; + +static int intel_usb_mux_update(struct intel_dr_phy *intel_phy, + enum phy_mode mode) +{ + struct usb_bus *host = intel_phy->phy.otg->host; + struct usb_gadget *gadget = intel_phy->phy.otg->gadget; + + if (!host || !gadget || !gadget->dev.parent || !intel_phy->edev) + return -ENODEV; + + /* PHY is shared between host and device controller. So both needs + * to be in D0 before making any PHY state transition.*/ + pm_runtime_get_sync(host->controller); + pm_runtime_get_sync(gadget->dev.parent); + + if (mode == PHY_MODE_USB_HOST) + extcon_set_state_sync(intel_phy->edev, EXTCON_USB_HOST, 1); + else if (mode == PHY_MODE_USB_DEVICE) + extcon_set_state_sync(intel_phy->edev, EXTCON_USB_HOST, 0); + + pm_runtime_put(gadget->dev.parent); + pm_runtime_put(host->controller); + + return 0; +} + +static int intel_dr_phy_start_host(struct otg_fsm *fsm, int on) +{ + struct intel_dr_phy *intel_phy; + + if (!fsm || !fsm->otg) + return -ENODEV; + + intel_phy = container_of(fsm->otg->usb_phy, struct intel_dr_phy, phy); + + if (!fsm->otg->host) + return -ENODEV; + + /* Just switch the mux to host path */ + return intel_usb_mux_update(intel_phy, PHY_MODE_USB_HOST); +} + +static int intel_dr_phy_start_gadget(struct otg_fsm *fsm, int on) +{ + struct intel_dr_phy *intel_phy; + + if (!fsm || !fsm->otg) + return -ENODEV; + + intel_phy = container_of(fsm->otg->usb_phy, struct intel_dr_phy, phy); + + if (!fsm->otg->gadget) + return -ENODEV; + + /* Just switch the mux to device mode */ + return intel_usb_mux_update(intel_phy, PHY_MODE_USB_DEVICE); +} + +static int intel_dr_phy_set_host(struct usb_otg *otg, struct usb_bus *host) +{ + struct intel_dr_phy *intel_phy; + + if (!otg || !host) + return -ENODEV; + + intel_phy = container_of(otg->usb_phy, struct intel_dr_phy, phy); + + otg->host = host; + + intel_phy->fsm.a_bus_drop = 0; + intel_phy->fsm.a_bus_req = 0; + + if (intel_phy->phy.otg->gadget) + otg_statemachine(&intel_phy->fsm); + + return 0; +} + +static int intel_dr_phy_set_peripheral(struct usb_otg *otg, + struct usb_gadget *gadget) +{ + struct intel_dr_phy *intel_phy; + + if (!otg || !gadget) + return -ENODEV; + + intel_phy = container_of(otg->usb_phy, struct intel_dr_phy, phy); + + otg->gadget = gadget; + + intel_phy->fsm.b_bus_req = 1; + + /* if host is registered already then kick the state machine. + * Only trigger the mode switch once both host and device are + * registered */ + if (intel_phy->phy.otg->host) + otg_statemachine(&intel_phy->fsm); + + return 0; +} + +static int intel_dr_phy_handle_notification(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct usb_bus *host; + struct usb_gadget *gadget; + int state; + + if (!intel_phy_dev) + return NOTIFY_BAD; + + host = intel_phy_dev->phy.otg->host; + gadget = intel_phy_dev->phy.otg->gadget; + + switch (event) { + case USB_EVENT_VBUS: + if (intel_phy_dev->fsm.id) + intel_phy_dev->fsm.b_sess_vld = 1; + state = NOTIFY_OK; + break; + case USB_EVENT_ID: + intel_phy_dev->fsm.id = 0; + state = NOTIFY_OK; + break; + case USB_EVENT_NONE: + if (intel_phy_dev->fsm.id == 0) + intel_phy_dev->fsm.id = 1; + else if (intel_phy_dev->fsm.b_sess_vld) + intel_phy_dev->fsm.b_sess_vld = 0; + else + dev_err(intel_phy_dev->phy.dev, "USB_EVENT_NONE?\n"); + state = NOTIFY_OK; + break; + default: + dev_info(intel_phy_dev->phy.dev, "unknown notification\n"); + state = NOTIFY_DONE; + break; + } + + /* + * Don't kick the state machine if host or device controller + * were never registered before. Just wait to kick it when + * set_host or set_peripheral. + * */ + + if (host && gadget) + otg_statemachine(&intel_phy_dev->fsm); + + return state; +} + + +static struct otg_fsm_ops intel_dr_phy_fsm_ops = { + .start_host = intel_dr_phy_start_host, + .start_gadget = intel_dr_phy_start_gadget, +}; + +static ssize_t show_mux_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct otg_fsm *fsm = &intel_phy_dev->fsm; + + if (fsm->id && fsm->a_vbus_vld) + return sprintf(buf, "peripheral state\n"); + else if (!fsm->id && !fsm->a_vbus_vld) + return sprintf(buf, "host state\n"); + else + return sprintf(buf, "unknown state\n"); + +} + +static ssize_t store_mux_state(struct device *_dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct otg_fsm *fsm; + + if (!intel_phy_dev) + return -EINVAL; + + fsm = &intel_phy_dev->fsm; + + if (count != 2) + return -EINVAL; + + switch (buf[0]) { + case 'D': + case 'd': + case 'P': + case 'p': + case 'a': + case 'A': + case '1': + dev_info(intel_phy_dev->phy.dev, "p: set PERIPHERAL mode\n"); + /* disable host mode */ + dev_info(intel_phy_dev->phy.dev, "ID = 1\n"); + atomic_notifier_call_chain(&intel_phy_dev->phy.notifier, + USB_EVENT_NONE, NULL); + /* enable device mode */ + dev_info(intel_phy_dev->phy.dev, "VBUS = 1\n"); + atomic_notifier_call_chain(&intel_phy_dev->phy.notifier, + USB_EVENT_VBUS, NULL); + return count; + case 'H': + case 'h': + case 'b': + case 'B': + case '0': + dev_info(intel_phy_dev->phy.dev, "h: set HOST mode\n"); + /* disable device mode */ + dev_info(intel_phy_dev->phy.dev, "VBUS = 0\n"); + atomic_notifier_call_chain(&intel_phy_dev->phy.notifier, + USB_EVENT_NONE, NULL); + /* enable host mode */ + dev_info(intel_phy_dev->phy.dev, "ID = 0\n"); + atomic_notifier_call_chain(&intel_phy_dev->phy.notifier, + USB_EVENT_ID, NULL); + return count; + default: + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR(mux_state, S_IRWXU|S_IRWXG, show_mux_state, store_mux_state); + +static int intel_usb_mux_init(struct intel_dr_phy *intel_phy) +{ + if (!intel_phy) + return -ENODEV; + + intel_phy->edev = extcon_get_extcon_dev("intel_usb_mux"); + if (!intel_phy->edev) { + dev_err(intel_phy->phy.dev, "intel mux device not ready\n"); + return -ENODEV; + } + + /* first switch the mux to host mode to force the host + * enumerate the port */ + extcon_set_state_sync(intel_phy->edev, EXTCON_USB_HOST, 1); + msleep(10); + extcon_set_state_sync(intel_phy->edev, EXTCON_USB_HOST, 0); + + return 0; +} + +static void intel_usb_mux_delayed_init(struct work_struct *work) +{ + struct intel_dr_phy *intel_phy = container_of(work, + struct intel_dr_phy, + mux_work.work); + static int retry_count = 0; + + if (intel_usb_mux_init(intel_phy) && retry_count < 10) { + retry_count++; + schedule_delayed_work(&intel_phy->mux_work, HZ); + } +} + +static int intel_dr_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct intel_dr_phy *intel_phy; + int err; + + intel_phy = devm_kzalloc(dev, sizeof(*intel_phy), GFP_KERNEL); + if (!intel_phy) + return -ENOMEM; + + intel_phy->phy.otg = devm_kzalloc(dev, sizeof(*intel_phy->phy.otg), + GFP_KERNEL); + if (!intel_phy->phy.otg) + return -ENOMEM; + + INIT_DELAYED_WORK(&intel_phy->mux_work, intel_usb_mux_delayed_init); + + if (intel_usb_mux_init(intel_phy)) { + dev_warn(dev, "mux is not available, so delayed mux_init"); + schedule_delayed_work(&intel_phy->mux_work, HZ); + } + + /* initialize fsm ops */ + intel_phy->fsm.ops = &intel_dr_phy_fsm_ops; + intel_phy->fsm.otg = intel_phy->phy.otg; + /* default device mode */ + intel_phy->fsm.id = 1; + intel_phy->fsm.b_sess_vld = 1; + mutex_init(&intel_phy->fsm.lock); + + /* initalize the phy structure */ + intel_phy->phy.label = "intel_usb_dr_phy"; + intel_phy->phy.dev = &pdev->dev; + intel_phy->phy.type = USB_PHY_TYPE_USB2; + /* initalize otg structure */ + intel_phy->phy.otg->state = OTG_STATE_UNDEFINED; + intel_phy->phy.otg->usb_phy = &intel_phy->phy; + intel_phy->phy.otg->set_host = intel_dr_phy_set_host; + intel_phy->phy.otg->set_peripheral = intel_dr_phy_set_peripheral; + /* No support for ADP, HNP and SRP */ + intel_phy->phy.otg->start_hnp = NULL; + intel_phy->phy.otg->start_srp = NULL; + + intel_phy_dev = intel_phy; + + err = usb_add_phy_dev(&intel_phy->phy); + if (err) { + dev_err(&pdev->dev, "can't register intel_dr_phy, err: %d\n", + err); + return err; + } + + intel_phy->nb.notifier_call = intel_dr_phy_handle_notification; + usb_register_notifier(&intel_phy->phy, &intel_phy->nb); + + platform_set_drvdata(pdev, intel_phy); + + err = device_create_file(&pdev->dev, &dev_attr_mux_state); + if (err) { + dev_err(&pdev->dev, "failed to create mux_status sysfs attribute\n"); + usb_remove_phy(&intel_phy->phy); + return err; + } + + otg_statemachine(&intel_phy->fsm); + + return 0; +} + +static int intel_dr_phy_remove(struct platform_device *pdev) +{ + struct intel_dr_phy *intel_phy = platform_get_drvdata(pdev); + + usb_remove_phy(&intel_phy->phy); + + return 0; +} + +static struct platform_driver intel_dr_phy_driver = { + .probe = intel_dr_phy_probe, + .remove = intel_dr_phy_remove, + .driver = { + .name = "intel_usb_dr_phy", + }, +}; + +static int __init intel_dr_phy_init(void) +{ + return platform_driver_register(&intel_dr_phy_driver); +} +subsys_initcall(intel_dr_phy_init); + +static void __exit intel_dr_phy_exit(void) +{ + platform_driver_unregister(&intel_dr_phy_driver); +} +module_exit(intel_dr_phy_exit); + +MODULE_ALIAS("platform:intel_usb_dr_phy"); +MODULE_DESCRIPTION("Intel USB Dual Role Transceiver driver"); +MODULE_AUTHOR("Hao Wu"); +MODULE_AUTHOR("Sathya Kuppuswamy "); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index 8babd318c0ed..1ec00eae339a 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev) tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable); if (IS_ERR(tu->extcon)) { dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); - return -ENOMEM; + ret = PTR_ERR(tu->extcon); + goto err_disable_clk; } ret = devm_extcon_dev_register(&pdev->dev, tu->extcon); diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 50285b01da92..5d369b38868a 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -998,6 +998,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) goto usbhsf_pio_prepare_pop; + /* return at this time if the pipe is running */ + if (usbhs_pipe_is_running(pipe)) + return 0; + usbhs_pipe_config_change_bfre(pipe, 1); ret = usbhsf_fifo_select(pipe, fifo, 0); @@ -1188,6 +1192,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt, usbhsf_fifo_clear(pipe, fifo); pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); + usbhs_pipe_running(pipe, 0); usbhsf_dma_stop(pipe, fifo); usbhsf_dma_unmap(pkt); usbhsf_fifo_unselect(pipe, pipe->fifo); diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index a8d5f2e4878d..c66b93664d54 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE - Google USB serial devices - HP4x calculators - a number of Motorola phones + - Motorola Tetra devices - Novatel Wireless GPS receivers - Siemens USB/MPI adapter. - ViVOtech ViVOpay USB device. diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 412f812522ee..2836acf73a07 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -127,6 +127,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ + { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ @@ -157,6 +158,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ + { USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ @@ -177,6 +179,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 49d1b2d4606d..a2a5232751cb 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -773,6 +773,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, + { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, @@ -935,6 +936,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_FHE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, @@ -1017,6 +1019,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, + { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4faa09fe308c..975d02666c5a 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -914,9 +914,18 @@ #define ICPDAS_I7561U_PID 0x0104 #define ICPDAS_I7563U_PID 0x0105 +/* + * Airbus Defence and Space + */ +#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */ +#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */ + /* * RT Systems programming cables for various ham radios */ +/* This device uses the VID of FTDI */ +#define RTSYSTEMS_USB_VX8_PID 0x9e50 /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */ + #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ #define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */ #define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */ @@ -1435,6 +1444,12 @@ */ #define FTDI_CINTERION_MC55I_PID 0xA951 +/* + * Product: FirmwareHubEmulator + * Manufacturer: Harman Becker Automotive Systems + */ +#define FTDI_FHE_PID 0xA9A0 + /* * Product: Comet Caller ID decoder * Manufacturer: Crucible Technologies diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index b2f2e87aed94..91e7e3a166a5 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -138,6 +138,7 @@ struct garmin_data { __u8 privpkt[4*6]; spinlock_t lock; struct list_head pktlist; + struct usb_anchor write_urbs; }; @@ -905,13 +906,19 @@ static int garmin_init_session(struct usb_serial_port *port) sizeof(GARMIN_START_SESSION_REQ), 0); if (status < 0) - break; + goto err_kill_urbs; } if (status > 0) status = 0; } + return status; + +err_kill_urbs: + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); + usb_kill_urb(port->interrupt_in_urb); + return status; } @@ -930,7 +937,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port) spin_unlock_irqrestore(&garmin_data_p->lock, flags); /* shutdown any bulk reads that might be going on */ - usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); if (garmin_data_p->state == STATE_RESET) @@ -953,7 +959,7 @@ static void garmin_close(struct usb_serial_port *port) /* shutdown our urbs */ usb_kill_urb(port->read_urb); - usb_kill_urb(port->write_urb); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) @@ -1037,12 +1043,14 @@ static int garmin_write_bulk(struct usb_serial_port *port, } /* send it down the pipe */ + usb_anchor_urb(urb, &garmin_data_p->write_urbs); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n", __func__, status); count = status; + usb_unanchor_urb(urb); kfree(buffer); } @@ -1401,9 +1409,16 @@ static int garmin_port_probe(struct usb_serial_port *port) garmin_data_p->state = 0; garmin_data_p->flags = 0; garmin_data_p->count = 0; + init_usb_anchor(&garmin_data_p->write_urbs); usb_set_serial_port_data(port, garmin_data_p); status = garmin_init_session(port); + if (status) + goto err_free; + + return 0; +err_free: + kfree(garmin_data_p); return status; } @@ -1413,6 +1428,7 @@ static int garmin_port_remove(struct usb_serial_port *port) { struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); usb_kill_urb(port->interrupt_in_urb); del_timer_sync(&garmin_data_p->timer); kfree(garmin_data_p); diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index bdf8bd814a9a..01f3ac7769f3 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2286,7 +2286,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port, /* something went wrong */ dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n", __func__, status); - usb_kill_urb(urb); usb_free_urb(urb); atomic_dec(&CmdUrbs); return status; diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c index 14511d6a7d44..3950d44b80f1 100644 --- a/drivers/usb/serial/metro-usb.c +++ b/drivers/usb/serial/metro-usb.c @@ -189,7 +189,7 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) dev_err(&port->dev, "%s - failed submitting interrupt in urb, error code=%d\n", __func__, result); - goto exit; + return result; } /* Send activate cmd to device */ @@ -198,9 +198,14 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) dev_err(&port->dev, "%s - failed to configure device, error code=%d\n", __func__, result); - goto exit; + goto err_kill_urb; } -exit: + + return 0; + +err_kill_urb: + usb_kill_urb(port->interrupt_in_urb); + return result; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ba672cf4e888..dcf78a498927 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -236,11 +236,14 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Qualcomm's vendor ID */ #define QUECTEL_PRODUCT_UC20 0x9003 #define QUECTEL_PRODUCT_UC15 0x9090 +/* These Yuga products use Qualcomm's vendor ID */ +#define YUGA_PRODUCT_CLM920_NC5 0x9625 #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_BG96 0x0296 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -282,6 +285,7 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 #define TELIT_PRODUCT_ME910 0x1100 +#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101 #define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE910 0x1201 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 @@ -379,6 +383,9 @@ static void option_instat_callback(struct urb *urb); #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 +/* Fujisoft products */ +#define FUJISOFT_PRODUCT_FS040U 0x9b02 + /* iBall 3.5G connect wireless modem */ #define IBALL_3_5G_CONNECT 0x9605 @@ -647,6 +654,11 @@ static const struct option_blacklist_info telit_me910_blacklist = { .reserved = BIT(1) | BIT(3), }; +static const struct option_blacklist_info telit_me910_dual_modem_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(3), +}; + static const struct option_blacklist_info telit_le910_blacklist = { .sendsetup = BIT(0), .reserved = BIT(1) | BIT(2), @@ -676,6 +688,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = { .reserved = BIT(4) | BIT(5), }; +static const struct option_blacklist_info yuga_clm920_nc5_blacklist = { + .reserved = BIT(1) | BIT(4), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -1180,11 +1196,16 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + /* Yuga products use Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), + .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), @@ -1244,6 +1265,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), + .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), @@ -1877,6 +1900,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), .driver_info = (kernel_ulong_t)&four_g_w100_blacklist }, + {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist}, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index a585b477415d..34c5a75f98a7 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -41,6 +41,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 3b5a15d1dc0d..123289085ee2 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -17,6 +17,7 @@ #define PL2303_PRODUCT_ID_DCU11 0x1234 #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 +#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8 #define PL2303_PRODUCT_ID_ALDIGA 0x0611 #define PL2303_PRODUCT_ID_MMX 0x0612 #define PL2303_PRODUCT_ID_GPRS 0x0609 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index eb9928963a53..55a8fb25ce2b 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -148,6 +148,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */ {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ + {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ @@ -165,6 +166,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ + {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ + {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ @@ -345,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) break; case 2: dev_dbg(dev, "NMEA GPS interface found\n"); + sendsetup = true; break; case 3: dev_dbg(dev, "Modem port found\n"); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index e98b6e57b703..6aa7ff2c1cf7 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS); { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ DEVICE(moto_modem, MOTO_IDS); +/* Motorola Tetra driver */ +#define MOTOROLA_TETRA_IDS() \ + { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ +DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); + /* Novatel Wireless GPS driver */ #define NOVATEL_IDS() \ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ @@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &google_device, &vivopay_device, &moto_modem_device, + &motorola_tetra_device, &novatel_gps_device, &hp4x_device, &suunto_device, @@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = { GOOGLE_IDS(), VIVOPAY_IDS(), MOTO_IDS(), + MOTOROLA_TETRA_IDS(), NOVATEL_IDS(), HP4X_IDS(), SUUNTO_IDS(), diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 12f4c5a91e62..c593ca8800e5 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -34,13 +34,15 @@ static const struct usb_device_id id_table[] = { }; static const struct usb_device_id dbc_id_table[] = { - { USB_DEVICE(0x1d6b, 0x0004) }, + { USB_DEVICE(0x1d6b, 0x0010) }, + { USB_DEVICE(0x1d6b, 0x0011) }, { }, }; static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(0x0525, 0x127a) }, - { USB_DEVICE(0x1d6b, 0x0004) }, + { USB_DEVICE(0x1d6b, 0x0010) }, + { USB_DEVICE(0x1d6b, 0x0011) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table_combined); diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 1fcd758a961f..3734a25e09e5 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf, } } + /* All Seagate disk enclosures have broken ATA pass-through support */ + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) + flags |= US_FL_NO_ATA_1X; + usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 63cf981ed81c..25a281f876b5 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf) return 0; err = uas_configure_endpoints(devinfo); - if (err) { + if (err && err != -ENODEV) shost_printk(KERN_ERR, shost, "%s: alloc streams error %d after reset", __func__, err); - return 1; - } + /* we must unblock the host in every case lest we deadlock */ spin_lock_irqsave(shost->host_lock, flags); scsi_report_bus_reset(shost, 0); spin_unlock_irqrestore(shost->host_lock, flags); scsi_unblock_requests(shost); - return 0; + return err ? 1 : 0; } static int uas_suspend(struct usb_interface *intf, pm_message_t message) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index eb06d88b41d6..52b3e6da0745 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2113,6 +2113,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA ), +/* Reported by David Kozub */ +UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, + "JMicron", + "JMS567", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA), + /* * Reported by Alexandre Oliva * JMicron responds to USN and several other SCSI ioctls with a @@ -2130,6 +2137,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA ), +/* Reported by Teijo Kinnunen */ +UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported-by George Cherian */ UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, "JMicron", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index cde115359793..719ec68ae309 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -142,6 +142,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), +/* Reported-by: David Kozub */ +UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, + "JMicron", + "JMS567", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", @@ -149,6 +156,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Icenowy Zheng */ +UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999, + "Norelsys", + "NS1068X", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Takeo Nakayama */ UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, "JMicron", diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c index e9c4e784a9cb..74607bb6cfc4 100644 --- a/drivers/usb/typec/typec_wcove.c +++ b/drivers/usb/typec/typec_wcove.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include /* Register offsets */ #define WCOVE_CHGRIRQ0 0x4e09 @@ -86,6 +88,7 @@ struct wcove_typec { struct typec_port *port; struct typec_capability cap; struct typec_partner *partner; + struct pci_dev *xhci_dev; }; enum wcove_typec_func { @@ -108,6 +111,18 @@ enum wcove_typec_role { static guid_t guid = GUID_INIT(0x482383f0, 0x2876, 0x4e49, 0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37); +static int wcove_access_xhci(struct wcove_typec *wcove, bool enable) +{ + if (wcove->xhci_dev) { + if (enable) { + pm_runtime_get_sync(&wcove->xhci_dev->dev); + } else { + pm_runtime_put(&wcove->xhci_dev->dev); + } + } + return 0; +} + static int wcove_typec_func(struct wcove_typec *wcove, enum wcove_typec_func func, int param) { @@ -205,7 +220,9 @@ static irqreturn_t wcove_typec_irq(int irq, void *data) WCOVE_ORIENTATION_NORMAL); /* This makes sure the device controller is disconnected */ + wcove_access_xhci(wcove, true); wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST); + wcove_access_xhci(wcove, false); /* Port to default role */ typec_set_data_role(wcove->port, TYPEC_DEVICE); @@ -257,11 +274,15 @@ static irqreturn_t wcove_typec_irq(int irq, void *data) } if (role == TYPEC_SINK) { + wcove_access_xhci(wcove, true); wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_DEVICE); + wcove_access_xhci(wcove, false); typec_set_data_role(wcove->port, TYPEC_DEVICE); typec_set_pwr_role(wcove->port, TYPEC_SINK); } else { + wcove_access_xhci(wcove, true); wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST); + wcove_access_xhci(wcove, false); typec_set_pwr_role(wcove->port, TYPEC_SOURCE); typec_set_data_role(wcove->port, TYPEC_HOST); } @@ -342,6 +363,8 @@ static int wcove_typec_probe(struct platform_device *pdev) regmap_write(wcove->regmap, USBC_IRQMASK2, val & ~USBC_IRQMASK2_ALL); platform_set_drvdata(pdev, wcove); + + wcove->xhci_dev = pci_get_class(PCI_CLASS_SERIAL_USB_XHCI, NULL); return 0; } diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index c653ce533430..b8915513fc84 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -87,6 +87,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, goto err; sdev->ud.tcp_socket = socket; + sdev->ud.sockfd = sockfd; spin_unlock_irq(&sdev->ud.lock); @@ -163,8 +164,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) * step 1? */ if (ud->tcp_socket) { - dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n", - ud->tcp_socket); + dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } @@ -187,6 +187,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) if (ud->tcp_socket) { sockfd_put(ud->tcp_socket); ud->tcp_socket = NULL; + ud->sockfd = -1; } /* 3. free used data */ @@ -281,6 +282,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) sdev->ud.status = SDEV_ST_AVAILABLE; spin_lock_init(&sdev->ud.lock); sdev->ud.tcp_socket = NULL; + sdev->ud.sockfd = -1; INIT_LIST_HEAD(&sdev->priv_init); INIT_LIST_HEAD(&sdev->priv_tx); diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index 7170404e8979..6968c906fa29 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -251,11 +251,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) struct stub_priv *priv; struct urb *urb; - dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev); + dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n"); while ((priv = stub_priv_pop(sdev))) { urb = priv->urb; - dev_dbg(&sdev->udev->dev, "free urb %p\n", urb); + dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n", + priv->seqnum); usb_kill_urb(urb); kmem_cache_free(stub_priv_cache, priv); diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 191b176ffedf..5b807185f79e 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -225,9 +225,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, if (priv->seqnum != pdu->u.cmd_unlink.seqnum) continue; - dev_info(&priv->urb->dev->dev, "unlink urb %p\n", - priv->urb); - /* * This matched urb is not completed yet (i.e., be in * flight in usb hcd hardware/driver). Now we are @@ -266,8 +263,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, ret = usb_unlink_urb(priv->urb); if (ret != -EINPROGRESS) dev_err(&priv->urb->dev->dev, - "failed to unlink a urb %p, ret %d\n", - priv->urb, ret); + "failed to unlink a urb # %lu, ret %d\n", + priv->seqnum, ret); return 0; } @@ -336,23 +333,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, return priv; } -static int get_pipe(struct stub_device *sdev, int epnum, int dir) +static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) { struct usb_device *udev = sdev->udev; struct usb_host_endpoint *ep; struct usb_endpoint_descriptor *epd = NULL; + int epnum = pdu->base.ep; + int dir = pdu->base.direction; + + if (epnum < 0 || epnum > 15) + goto err_ret; if (dir == USBIP_DIR_IN) ep = udev->ep_in[epnum & 0x7f]; else ep = udev->ep_out[epnum & 0x7f]; - if (!ep) { - dev_err(&sdev->udev->dev, "no such endpoint?, %d\n", - epnum); - BUG(); - } + if (!ep) + goto err_ret; epd = &ep->desc; + + /* validate transfer_buffer_length */ + if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) { + dev_err(&sdev->udev->dev, + "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n", + pdu->u.cmd_submit.transfer_buffer_length); + return -1; + } + if (usb_endpoint_xfer_control(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndctrlpipe(udev, epnum); @@ -375,15 +383,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) } if (usb_endpoint_xfer_isoc(epd)) { + /* validate packet size and number of packets */ + unsigned int maxp, packets, bytes; + + maxp = usb_endpoint_maxp(epd); + maxp *= usb_endpoint_maxp_mult(epd); + bytes = pdu->u.cmd_submit.transfer_buffer_length; + packets = DIV_ROUND_UP(bytes, maxp); + + if (pdu->u.cmd_submit.number_of_packets < 0 || + pdu->u.cmd_submit.number_of_packets > packets) { + dev_err(&sdev->udev->dev, + "CMD_SUBMIT: isoc invalid num packets %d\n", + pdu->u.cmd_submit.number_of_packets); + return -1; + } if (dir == USBIP_DIR_OUT) return usb_sndisocpipe(udev, epnum); else return usb_rcvisocpipe(udev, epnum); } +err_ret: /* NOT REACHED */ - dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum); - return 0; + dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); + return -1; } static void masking_bogus_flags(struct urb *urb) @@ -447,7 +471,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, struct stub_priv *priv; struct usbip_device *ud = &sdev->ud; struct usb_device *udev = sdev->udev; - int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); + int pipe = get_pipe(sdev, pdu); + + if (pipe == -1) + return; priv = stub_priv_alloc(sdev, pdu); if (!priv) @@ -466,7 +493,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, } /* allocate urb transfer buffer, if needed */ - if (pdu->u.cmd_submit.transfer_buffer_length > 0) { + if (pdu->u.cmd_submit.transfer_buffer_length > 0 && + pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) { priv->urb->transfer_buffer = kzalloc(pdu->u.cmd_submit.transfer_buffer_length, GFP_KERNEL); diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index be50cef645d8..96aa375b80d9 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -102,7 +102,7 @@ void stub_complete(struct urb *urb) /* link a urb to the queue of tx. */ spin_lock_irqsave(&sdev->priv_lock, flags); if (sdev->ud.tcp_socket == NULL) { - usbip_dbg_stub_tx("ignore urb for closed connection %p", urb); + usbip_dbg_stub_tx("ignore urb for closed connection\n"); /* It will be freed in stub_device_cleanup_urbs(). */ } else if (priv->unlinking) { stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); @@ -181,6 +181,13 @@ static int stub_send_ret_submit(struct stub_device *sdev) memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); + if (urb->actual_length > 0 && !urb->transfer_buffer) { + dev_err(&sdev->udev->dev, + "urb: actual_length %d transfer_buffer null\n", + urb->actual_length); + return -1; + } + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) iovnum = 2 + urb->number_of_packets; else @@ -197,8 +204,8 @@ static int stub_send_ret_submit(struct stub_device *sdev) /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb); - usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", - pdu_header.base.seqnum, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d\n", + pdu_header.base.seqnum); usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 2281f3562870..7f0d22131121 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -105,7 +105,7 @@ static void usbip_dump_usb_device(struct usb_device *udev) dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", udev->devnum, udev->devpath, usb_speed_string(udev->speed)); - pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); + pr_debug("tt hub ttport %d\n", udev->ttport); dev_dbg(dev, " "); for (i = 0; i < 16; i++) @@ -138,12 +138,8 @@ static void usbip_dump_usb_device(struct usb_device *udev) } pr_debug("\n"); - dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); - - dev_dbg(dev, - "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n", - &udev->descriptor, udev->config, - udev->actconfig, udev->rawdescriptors); + dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev), + udev->bus->bus_name); dev_dbg(dev, "have_langid %d, string_langid %d\n", udev->have_langid, udev->string_langid); @@ -251,9 +247,6 @@ void usbip_dump_urb(struct urb *urb) dev = &urb->dev->dev; - dev_dbg(dev, " urb :%p\n", urb); - dev_dbg(dev, " dev :%p\n", urb->dev); - usbip_dump_usb_device(urb->dev); dev_dbg(dev, " pipe :%08x ", urb->pipe); @@ -262,11 +255,9 @@ void usbip_dump_urb(struct urb *urb) dev_dbg(dev, " status :%d\n", urb->status); dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); - dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer); dev_dbg(dev, " transfer_buffer_length:%d\n", urb->transfer_buffer_length); dev_dbg(dev, " actual_length :%d\n", urb->actual_length); - dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet); if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) usbip_dump_usb_ctrlrequest( @@ -276,8 +267,6 @@ void usbip_dump_urb(struct urb *urb) dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); dev_dbg(dev, " interval :%d\n", urb->interval); dev_dbg(dev, " error_count :%d\n", urb->error_count); - dev_dbg(dev, " context :%p\n", urb->context); - dev_dbg(dev, " complete :%p\n", urb->complete); } EXPORT_SYMBOL_GPL(usbip_dump_urb); @@ -331,26 +320,20 @@ int usbip_recv(struct socket *sock, void *buf, int size) struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; int total = 0; + if (!sock || !buf || !size) + return -EINVAL; + iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); usbip_dbg_xmit("enter\n"); - if (!sock || !buf || !size) { - pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf, - size); - return -EINVAL; - } - do { - int sz = msg_data_left(&msg); + msg_data_left(&msg); sock->sk->sk_allocation = GFP_NOIO; result = sock_recvmsg(sock, &msg, MSG_WAITALL); - if (result <= 0) { - pr_debug("receive sock %p buf %p size %u ret %d total %d\n", - sock, buf + total, sz, result, total); + if (result <= 0) goto err; - } total += result; } while (msg_data_left(&msg)); diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 3050fc99a417..33737b612b1f 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -270,6 +270,7 @@ struct usbip_device { /* lock for status */ spinlock_t lock; + int sockfd; struct socket *tcp_socket; struct task_struct *tcp_rx; diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 11b9a22799cc..89858aeed647 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -670,9 +670,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct vhci_device *vdev; unsigned long flags; - usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n", - hcd, urb, mem_flags); - if (portnum > VHCI_HC_PORTS) { pr_err("invalid port number %d\n", portnum); return -ENODEV; @@ -836,8 +833,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) struct vhci_device *vdev; unsigned long flags; - pr_info("dequeue a urb %p\n", urb); - spin_lock_irqsave(&vhci->lock, flags); priv = urb->hcpriv; @@ -865,7 +860,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) /* tcp connection is closed */ spin_lock(&vdev->priv_lock); - pr_info("device %p seems to be disconnected\n", vdev); list_del(&priv->list); kfree(priv); urb->hcpriv = NULL; @@ -877,8 +871,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) * vhci_rx will receive RET_UNLINK and give back the URB. * Otherwise, we give back it here. */ - pr_info("gives back urb %p\n", urb); - usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&vhci->lock, flags); @@ -906,8 +898,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) unlink->unlink_seqnum = priv->seqnum; - pr_info("device %p seems to be still connected\n", vdev); - /* send cmd_unlink and try to cancel the pending URB in the * peer */ list_add_tail(&unlink->list, &vdev->unlink_tx); @@ -989,7 +979,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) /* need this? see stub_dev.c */ if (ud->tcp_socket) { - pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket); + pr_debug("shutdown tcp_socket %d\n", ud->sockfd); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } @@ -1008,6 +998,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) if (vdev->ud.tcp_socket) { sockfd_put(vdev->ud.tcp_socket); vdev->ud.tcp_socket = NULL; + vdev->ud.sockfd = -1; } pr_info("release socket\n"); @@ -1054,6 +1045,7 @@ static void vhci_device_reset(struct usbip_device *ud) if (ud->tcp_socket) { sockfd_put(ud->tcp_socket); ud->tcp_socket = NULL; + ud->sockfd = -1; } ud->status = VDEV_ST_NULL; @@ -1112,7 +1104,6 @@ static int hcd_name_to_id(const char *name) static int vhci_setup(struct usb_hcd *hcd) { struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller)); - hcd->self.sg_tablesize = ~0; if (usb_hcd_is_primary_hcd(hcd)) { vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd); vhci->vhci_hcd_hs->vhci = vhci; diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index ef2f2d5ca6b2..1343037d00f9 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum) urb = priv->urb; status = urb->status; - usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n", - urb, priv, seqnum); + usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum); switch (status) { case -ENOENT: /* fall through */ case -ECONNRESET: - dev_info(&urb->dev->dev, - "urb %p was unlinked %ssynchronuously.\n", urb, - status == -ENOENT ? "" : "a"); + dev_dbg(&urb->dev->dev, + "urb seq# %u was unlinked %ssynchronuously\n", + seqnum, status == -ENOENT ? "" : "a"); break; case -EINPROGRESS: /* no info output */ break; default: - dev_info(&urb->dev->dev, - "urb %p may be in a error, status %d\n", urb, - status); + dev_dbg(&urb->dev->dev, + "urb seq# %u may be in a error, status %d\n", + seqnum, status); } list_del(&priv->list); @@ -81,8 +80,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, spin_unlock_irqrestore(&vdev->priv_lock, flags); if (!urb) { - pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); - pr_info("max seqnum %d\n", + pr_err("cannot find a urb of seqnum %u max seqnum %d\n", + pdu->base.seqnum, atomic_read(&vhci_hcd->seqnum)); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; @@ -105,7 +104,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); - usbip_dbg_vhci_rx("now giveback urb %p\n", urb); + usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); spin_lock_irqsave(&vhci->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb); @@ -172,7 +171,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, pr_info("the urb (seqnum %d) was already given back\n", pdu->base.seqnum); } else { - usbip_dbg_vhci_rx("now giveback urb %p\n", urb); + usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum); /* If unlink is successful, status is -ECONNRESET */ urb->status = pdu->u.ret_unlink.status; diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 1b9f60a22e0b..84df63e3130d 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -31,15 +31,20 @@ /* * output example: - * hub port sta spd dev socket local_busid - * hs 0000 004 000 00000000 c5a7bb80 1-2.3 + * hub port sta spd dev sockfd local_busid + * hs 0000 004 000 00000000 3 1-2.3 * ................................................ - * ss 0008 004 000 00000000 d8cee980 2-3.4 + * ss 0008 004 000 00000000 4 2-3.4 * ................................................ * - * IP address can be retrieved from a socket pointer address by looking - * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a - * port number and its peer IP address. + * Output includes socket fd instead of socket pointer address to avoid + * leaking kernel memory address in: + * /sys/devices/platform/vhci_hcd.0/status and in debug output. + * The socket pointer address is not used at the moment and it was made + * visible as a convenient way to find IP address from socket pointer + * address by looking up /proc/net/{tcp,tcp6}. As this opens a security + * hole, the change is made to use sockfd instead. + * */ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) { @@ -53,8 +58,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd if (vdev->ud.status == VDEV_ST_USED) { *out += sprintf(*out, "%03u %08x ", vdev->speed, vdev->devid); - *out += sprintf(*out, "%16p %s", - vdev->ud.tcp_socket, + *out += sprintf(*out, "%u %s", + vdev->ud.sockfd, dev_name(&vdev->udev->dev)); } else { @@ -174,7 +179,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr, char *s = out; /* - * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2. + * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, + * thus the * 2. */ out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); return out - s; @@ -380,6 +386,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, vdev->devid = devid; vdev->speed = speed; + vdev->ud.sockfd = sockfd; vdev->ud.tcp_socket = socket; vdev->ud.status = VDEV_ST_NOTASSIGNED; diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index 3e7878fe2fd4..a9a663a578b6 100644 --- a/drivers/usb/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c @@ -83,7 +83,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) memset(&msg, 0, sizeof(msg)); memset(&iov, 0, sizeof(iov)); - usbip_dbg_vhci_tx("setup txdata urb %p\n", urb); + usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n", + priv->seqnum); /* 1. setup usbip_header */ setup_cmd_submit_pdu(&pdu_header, urb); diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c index e429b59f6f8a..d020e72b3122 100644 --- a/drivers/usb/usbip/vudc_rx.c +++ b/drivers/usb/usbip/vudc_rx.c @@ -132,6 +132,25 @@ static int v_recv_cmd_submit(struct vudc *udc, urb_p->new = 1; urb_p->seqnum = pdu->base.seqnum; + if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) { + /* validate packet size and number of packets */ + unsigned int maxp, packets, bytes; + + maxp = usb_endpoint_maxp(urb_p->ep->desc); + maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc); + bytes = pdu->u.cmd_submit.transfer_buffer_length; + packets = DIV_ROUND_UP(bytes, maxp); + + if (pdu->u.cmd_submit.number_of_packets < 0 || + pdu->u.cmd_submit.number_of_packets > packets) { + dev_err(&udc->gadget.dev, + "CMD_SUBMIT: isoc invalid num packets %d\n", + pdu->u.cmd_submit.number_of_packets); + ret = -EMSGSIZE; + goto free_urbp; + } + } + ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type); if (ret) { usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index 0f98f2c7475f..7efa374a4970 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -117,10 +117,14 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, if (rv != 0) return -EINVAL; + if (!udc) { + dev_err(dev, "no device"); + return -ENODEV; + } spin_lock_irqsave(&udc->lock, flags); /* Don't export what we don't have */ - if (!udc || !udc->driver || !udc->pullup) { - dev_err(dev, "no device or gadget not bound"); + if (!udc->driver || !udc->pullup) { + dev_err(dev, "gadget not bound"); ret = -ENODEV; goto unlock; } diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c index 234661782fa0..3ab4c86486a7 100644 --- a/drivers/usb/usbip/vudc_tx.c +++ b/drivers/usb/usbip/vudc_tx.c @@ -97,6 +97,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); + if (urb->actual_length > 0 && !urb->transfer_buffer) { + dev_err(&udc->gadget.dev, + "urb: actual_length %d transfer_buffer null\n", + urb->actual_length); + return -1; + } + if (urb_p->type == USB_ENDPOINT_XFER_ISOC) iovnum = 2 + urb->number_of_packets; else @@ -112,8 +119,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb_p); - usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", - pdu_header.base.seqnum, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d\n", + pdu_header.base.seqnum); usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; diff --git a/drivers/vbs/Kconfig b/drivers/vbs/Kconfig new file mode 100644 index 000000000000..c7a5c6ca25b8 --- /dev/null +++ b/drivers/vbs/Kconfig @@ -0,0 +1,30 @@ +# +# This Kconfig describes VBS for ACRN hypervisor +# +config VBS + bool "Enable VBS framework for ACRN hypervisor" + depends on ACRN + default n + ---help--- + This option is selected by any driver which needs to use + the Virtio Backend Service (VBS) framework on ACRN + hypervisor. + +config VBS_DEBUG + bool "ACRN VBS debugging" + depends on VBS != n + default n + ---help--- + This is an option for use by developers; most people should + say N here. This enables ACRN VBS debugging. + +config VBS_RNG + bool "ACRN VBS reference driver: virtio RNG" + depends on VBS != n + default n + ---help--- + Say M or * here to enable a VBS-K reference driver for ACRN + hypervisor, virtio RNG driver, to work with virtio-rng + frontend driver in guest. + The reference driver shows an example on how to use VBS-K + APIs. diff --git a/drivers/vbs/Makefile b/drivers/vbs/Makefile new file mode 100644 index 000000000000..85e1cc252197 --- /dev/null +++ b/drivers/vbs/Makefile @@ -0,0 +1,6 @@ +ccflags-$(CONFIG_VBS_DEBUG) := -DDEBUG + +obj-$(CONFIG_VBS) += vbs.o +obj-$(CONFIG_VBS) += vq.o + +obj-$(CONFIG_VBS_RNG) += vbs_rng.o diff --git a/drivers/vbs/vbs.c b/drivers/vbs/vbs.c new file mode 100644 index 000000000000..f6445996f371 --- /dev/null +++ b/drivers/vbs/vbs.c @@ -0,0 +1,321 @@ +/* + * ACRN Project + * Virtio Backend Service (VBS) for ACRN hypervisor + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Contact Information: Hao Li + * + * BSD LICENSE + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Hao Li + * Created Virtio Backend Service (VBS) framework: + * - VBS-K is a kernel-level virtio framework that can be used for + * virtio backend driver development for ACRN hypervisor. + * - VBS-K should be working with VBS-U (Virtio Backend Service in + * User) together, in order to connect with virtio frontend driver. + * - VBS-K mainly handles data plane part of a virtio backend driver, + * such as virtqueue parsing and processing, while VBS-U mainly + * hanldes control plane part. + */ + +#include +#include +#include +#include + +long virtio_dev_register(struct virtio_dev_info *dev) +{ + struct vm_info info; + int ret; + + pr_debug("vmid is %d\n", dev->_ctx.vmid); + + if (dev->dev_notify == NULL) { + pr_err("%s dev_notify empty!\n", dev->name); + goto err; + } + + /* + * dev->name is 32 chars while vhm only accepts 16 chars + * at most, so we make sure there will be a NULL + * terminator for the chars. + */ + dev->name[15] = '\0'; + dev->_ctx.vhm_client_id = + acrn_ioreq_create_client(dev->_ctx.vmid, + dev->dev_notify, + dev->name); + if (dev->_ctx.vhm_client_id < 0) { + pr_err("failed to create client of ioreq!\n"); + goto err; + } + + ret = acrn_ioreq_add_iorange(dev->_ctx.vhm_client_id, + dev->io_range_type ? REQ_MMIO : REQ_PORTIO, + dev->io_range_start, + dev->io_range_start + dev->io_range_len - 1); + if (ret < 0) { + pr_err("failed to add iorange to ioreq!\n"); + goto err; + } + + /* feed up max_cpu and req_buf */ + ret = vhm_get_vm_info(dev->_ctx.vmid, &info); + if (ret < 0) { + pr_err("failed in vhm_get_vm_info!\n"); + goto range_err; + } + dev->_ctx.max_vcpu = info.max_vcpu; + + dev->_ctx.req_buf = acrn_ioreq_get_reqbuf(dev->_ctx.vhm_client_id); + if (dev->_ctx.req_buf == NULL) { + pr_err("failed in ioreq_get_reqbuf!\n"); + goto range_err; + } + + acrn_ioreq_attach_client(dev->_ctx.vhm_client_id, 0); + + return 0; + +range_err: + acrn_ioreq_del_iorange(dev->_ctx.vhm_client_id, + dev->io_range_type ? REQ_MMIO : REQ_PORTIO, + dev->io_range_start, + dev->io_range_start + dev->io_range_len); + +err: + acrn_ioreq_destroy_client(dev->_ctx.vhm_client_id); + + return -EINVAL; +} + +long virtio_dev_deregister(struct virtio_dev_info *dev) +{ + acrn_ioreq_del_iorange(dev->_ctx.vhm_client_id, + dev->io_range_type ? REQ_MMIO : REQ_PORTIO, + dev->io_range_start, + dev->io_range_start + dev->io_range_len); + + acrn_ioreq_destroy_client(dev->_ctx.vhm_client_id); + + return 0; +} + +int virtio_vq_index_get(struct virtio_dev_info *dev, int req_cnt) +{ + int val = -1; + struct vhm_request *req; + int i; + + if (unlikely(req_cnt <= 0)) + return -EINVAL; + + if (dev == NULL) { + pr_err("%s: dev is NULL!\n", __func__); + return -EINVAL; + } + + for (i = 0; i < dev->_ctx.max_vcpu; i++) { + req = &dev->_ctx.req_buf[i]; + if (req->valid && req->processed == REQ_STATE_PROCESSING && + req->client == dev->_ctx.vhm_client_id) { + if (req->reqs.pio_request.direction == REQUEST_READ) { + /* currently we handle kick only, + * so read will return 0 + */ + pr_debug("%s: read request!\n", __func__); + if (dev->io_range_type == PIO_RANGE) + req->reqs.pio_request.value = 0; + else + req->reqs.mmio_request.value = 0; + } else { + pr_debug("%s: write request! type %d\n", + __func__, req->type); + if (dev->io_range_type == PIO_RANGE) + val = req->reqs.pio_request.value; + else + val = req->reqs.mmio_request.value; + } + req->processed = REQ_STATE_SUCCESS; + acrn_ioreq_complete_request(dev->_ctx.vhm_client_id, i); + } + } + + return val; +} + +static long virtio_vqs_info_set(struct virtio_dev_info *dev, + struct vbs_vqs_info __user *i) +{ + struct vbs_vqs_info info; + struct virtio_vq_info *vq; + int j; + + vq = dev->vqs; + + if (copy_from_user(&info, i, sizeof(struct vbs_vqs_info))) + return -EFAULT; + + /* setup struct virtio_vq_info based on info in struct vbs_vq_info */ + if (dev->nvq && dev->nvq != info.nvq) { + pr_err("Oops! dev's nvq != vqs's nvq. Not the same device?\n"); + return -EFAULT; + } + + for (j = 0; j < info.nvq; j++) { + vq->qsize = info.vqs[j].qsize; + vq->pfn = info.vqs[j].pfn; + vq->msix_idx = info.vqs[j].msix_idx; + vq->msix_addr = info.vqs[j].msix_addr; + vq->msix_data = info.vqs[j].msix_data; + + pr_debug("msix id %x, addr %llx, data %x\n", vq->msix_idx, + vq->msix_addr, vq->msix_data); + + virtio_vq_init(vq, vq->pfn); + + vq++; + } + + return 0; +} + +/* invoked by VBS-K device's ioctl routine */ +long virtio_vqs_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp) +{ + long ret; + + /* + * Currently we don't conduct ownership checking, + * but assuming caller would have device mutex. + */ + + switch (ioctl) { + case VBS_SET_VQ: + ret = virtio_vqs_info_set(dev, argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} +EXPORT_SYMBOL_GPL(virtio_vqs_ioctl); + +static long virtio_dev_info_set(struct virtio_dev_info *dev, + struct vbs_dev_info __user *i) +{ + struct vbs_dev_info info; + + if (copy_from_user(&info, i, sizeof(struct vbs_dev_info))) + return -EFAULT; + + /* setup struct virtio_dev_info based on info in vbs_dev_info */ + strncpy(dev->name, info.name, VBS_NAME_LEN); + dev->_ctx.vmid = info.vmid; + dev->nvq = info.nvq; + dev->negotiated_features = info.negotiated_features; + dev->io_range_start = info.pio_range_start; + dev->io_range_len = info.pio_range_len; + dev->io_range_type = PIO_RANGE; + + return 0; +} + +/* invoked by VBS-K device's ioctl routine */ +long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp) +{ + long ret; + + /* + * Currently we don't conduct ownership checking, + * but assuming caller would have device mutex. + */ + + switch (ioctl) { + case VBS_SET_DEV: + ret = virtio_dev_info_set(dev, argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} +EXPORT_SYMBOL_GPL(virtio_dev_ioctl); + +/* called in VBS-K device's .open() */ +long virtio_dev_init(struct virtio_dev_info *dev, + struct virtio_vq_info *vqs, int nvq) +{ + int i; + + for (i = 0; i < nvq; i++) + virtio_vq_reset(&vqs[i]); + + return 0; +} +EXPORT_SYMBOL_GPL(virtio_dev_init); + +static int __init vbs_init(void) +{ + return 0; +} + +static void __exit vbs_exit(void) +{ +} + +module_init(vbs_init); +module_exit(vbs_exit); + +MODULE_VERSION("0.1"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL and additional rights"); +MODULE_DESCRIPTION("Virtio Backend Service framework for ACRN hypervisor"); diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c new file mode 100644 index 000000000000..88f6108ca2e8 --- /dev/null +++ b/drivers/vbs/vbs_rng.c @@ -0,0 +1,494 @@ +/* + * ACRN Project + * Virtio Backend Service (VBS) for ACRN hypervisor + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Contact Information: Hao Li + * + * BSD LICENSE + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Hao Li + * VBS-K Reference Driver: virtio-rng + * - Each VBS-K driver exports a char device to /dev/, e.g. /dev/vbs_rng; + * - Each VBS-K driver uses Virtqueue APIs to interact with the virtio + * frontend driver in guest; + * - Each VBS-K driver registers itelf as VHM (Virtio and Hypervisor + * service Module) client, which enables in-kernel handling of register + * access of virtio device; + * - Each VBS-K driver could maintain the connections, from VBS-U, in a + * list/table, so that it could serve multiple guests. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +enum { + VBS_K_RNG_VQ = 0, + VBS_K_RNG_VQ_MAX = 1, +}; + +#define VTRND_RINGSZ 64 + +/* VBS-K features if any */ +/* + *enum { + * VBS_K_RNG_FEATURES = VBS_K_FEATURES | + * (1ULL << VIRTIO_F_VERSION_1), + *}; + */ + +/** + * struct vbs_rng - Backend of virtio-rng based on VBS-K + * + * @dev : instance of struct virtio_dev_info + * @vqs : instances of struct virtio_vq_info + * @hwrng : device specific member + * @node : hashtable maintaining multiple connections + * from multiple guests/devices + */ +struct vbs_rng { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[VBS_K_RNG_VQ_MAX]; + /* Below could be device specific members */ + struct hwrng hwrng; + /* + * Each VBS-K module might serve multiple connections + * from multiple guests/device models/VBS-Us, so better + * to maintain the connections in a list, and here we + * use hashtable as an example. + */ + struct hlist_node node; +}; + +#define RNG_MAX_HASH_BITS 4 /* MAX is 2^4 */ +#define HASH_NAME vbs_rng_hash + +DECLARE_HASHTABLE(HASH_NAME, RNG_MAX_HASH_BITS); +static int vbs_rng_hash_initialized = 0; +static int vbs_rng_connection_cnt = 0; + +/* function declarations */ +static int handle_kick(int client_id, int req_cnt); +static void vbs_rng_reset(struct vbs_rng *rng); +static void vbs_rng_stop(struct vbs_rng *rng); +static void vbs_rng_flush(struct vbs_rng *rng); +#ifdef RUNTIME_CTRL +static int vbs_rng_enable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq); +static void vbs_rng_disable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq); +static void vbs_rng_stop_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq); +static void vbs_rng_flush_vq(struct vbs_rng *rng, int index); +#endif + +/* hash table related functions */ +static void vbs_rng_hash_init(void) +{ + if (vbs_rng_hash_initialized) + return; + + hash_init(HASH_NAME); + vbs_rng_hash_initialized = 1; +} + +static int vbs_rng_hash_add(struct vbs_rng *entry) +{ + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_add(HASH_NAME, &entry->node, virtio_dev_client_id(&entry->dev)); + return 0; +} + +static struct vbs_rng *vbs_rng_hash_find(int client_id) +{ + struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return NULL; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + if (virtio_dev_client_id(&entry->dev) == client_id) + return entry; + + pr_err("Not found item matching client_id!\n"); + return NULL; +} + +static int vbs_rng_hash_del(int client_id) +{ + struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + if (virtio_dev_client_id(&entry->dev) == client_id) { + hash_del(&entry->node); + return 0; + } + + pr_err("%s failed, not found matching client_id!\n", + __func__); + return -1; +} + +static int vbs_rng_hash_del_all(void) +{ + struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + hash_del(&entry->node); + + return 0; +} + +static void handle_vq_kick(struct vbs_rng *rng, int vq_idx) +{ + struct iovec iov; + struct vbs_rng *sc; + struct virtio_vq_info *vq; + int len; + uint16_t idx; + + pr_debug("%s: vq_idx %d\n", __func__, vq_idx); + + sc = rng; + + if (!sc) { + pr_err("rng is NULL! Cannot proceed!\n"); + return; + } + + vq = &(sc->vqs[vq_idx]); + + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + + /* device specific operations, for example: */ + /* len = read(sc->vrsc_fd, iov.iov_base, iov.iov_len); */ + pr_debug("iov base %p len %lx\n", iov.iov_base, iov.iov_len); + + /* let's generate some cool data... :-) */ + len = iov.iov_len; + + pr_debug("vtrnd: vtrnd_notify(): %d\r\n", len); + + /* + * Release this chain and handle more + */ + virtio_vq_relchain(vq, idx, len); + } + virtio_vq_endchains(vq, 1); /* Generate interrupt if appropriate. */ +} + +static int handle_kick(int client_id, int req_cnt) +{ + int val = -1; + struct vbs_rng *rng; + + if (unlikely(req_cnt <= 0)) + return -EINVAL; + + pr_debug("%s: handle kick!\n", __func__); + + rng = vbs_rng_hash_find(client_id); + if (rng == NULL) { + pr_err("%s: client %d not found!\n", + __func__, client_id); + return -EINVAL; + } + + val = virtio_vq_index_get(&rng->dev, req_cnt); + + if (val >= 0) + handle_vq_kick(rng, val); + + return 0; +} + +static int vbs_rng_open(struct inode *inode, struct file *f) +{ + struct vbs_rng *rng; + struct virtio_dev_info *dev; + struct virtio_vq_info *vqs; + int i; + + rng = kmalloc(sizeof(*rng), GFP_KERNEL); + if (rng == NULL) { + pr_err("Failed to allocate memory for vbs_rng!\n"); + return -ENOMEM; + } + + dev = &rng->dev; + strncpy(dev->name, "vbs_rng", VBS_NAME_LEN); + dev->dev_notify = handle_kick; + vqs = (struct virtio_vq_info *)&rng->vqs; + + for (i = 0; i < VBS_K_RNG_VQ_MAX; i++) { + vqs[i].dev = dev; + /* + * Currently relies on VHM to kick us, + * thus vq_notify not used + */ + vqs[i].vq_notify = NULL; + } + + /* link dev and vqs */ + dev->vqs = vqs; + + virtio_dev_init(dev, vqs, VBS_K_RNG_VQ_MAX); + + f->private_data = rng; + + /* init a hash table to maintain multi-connections */ + vbs_rng_hash_init(); + + return 0; +} + +static int vbs_rng_release(struct inode *inode, struct file *f) +{ + struct vbs_rng *rng = f->private_data; + int i; + + if (!rng) + pr_err("%s: UNLIKELY rng NULL!\n", + __func__); + + vbs_rng_stop(rng); + vbs_rng_flush(rng); + for (i = 0; i < VBS_K_RNG_VQ_MAX; i++) + virtio_vq_reset(&(rng->vqs[i])); + + /* device specific release */ + vbs_rng_reset(rng); + + pr_debug("vbs_rng_connection cnt is %d\n", + vbs_rng_connection_cnt); + + if (rng && vbs_rng_connection_cnt--) + vbs_rng_hash_del(virtio_dev_client_id(&rng->dev)); + if (!vbs_rng_connection_cnt) { + pr_debug("vbs_rng remove all hash entries\n"); + vbs_rng_hash_del_all(); + } + + kfree(rng); + + pr_debug("%s done\n", __func__); + return 0; +} + +static long vbs_rng_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct vbs_rng *rng = f->private_data; + void __user *argp = (void __user *)arg; + /*u64 __user *featurep = argp;*/ + /*u64 features;*/ + int r; + + switch (ioctl) { +/* + * case VHOST_GET_FEATURES: + * features = VHOST_NET_FEATURES; + * if (copy_to_user(featurep, &features, sizeof features)) + * return -EFAULT; + * return 0; + * case VHOST_SET_FEATURES: + * if (copy_from_user(&features, featurep, sizeof features)) + * return -EFAULT; + * if (features & ~VHOST_NET_FEATURES) + * return -EOPNOTSUPP; + * return vhost_net_set_features(n, features); + */ + case VBS_SET_VQ: + /* + * we handle this here because we want to register VHM client + * after handling VBS_K_SET_VQ request + */ + pr_debug("VBS_K_SET_VQ ioctl:\n"); + r = virtio_vqs_ioctl(&rng->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) { + pr_err("VBS_K_SET_VQ: virtio_vqs_ioctl failed!\n"); + return -EFAULT; + } + /* Register VHM client */ + if (virtio_dev_register(&rng->dev) < 0) { + pr_err("failed to register VHM client!\n"); + return -EFAULT; + } + /* Added to local hash table */ + if (vbs_rng_hash_add(rng) < 0) { + pr_err("failed to add to hashtable!\n"); + return -EFAULT; + } + /* Increment counter */ + vbs_rng_connection_cnt++; + return r; + default: + /*mutex_lock(&n->dev.mutex);*/ + pr_debug("VBS_K generic ioctls!\n"); + r = virtio_dev_ioctl(&rng->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = virtio_vqs_ioctl(&rng->dev, ioctl, argp); + else + vbs_rng_flush(rng); + /*mutex_unlock(&n->dev.mutex);*/ + return r; + } +} + +/* device specific function to cleanup itself */ +static void vbs_rng_reset(struct vbs_rng *rng) +{ +} + +/* device specific function */ +static void vbs_rng_stop(struct vbs_rng *rng) +{ + virtio_dev_deregister(&rng->dev); +} + +/* device specific function */ +static void vbs_rng_flush(struct vbs_rng *rng) +{ +} + +#ifdef RUNTIME_CTRL +/* device specific function */ +static int vbs_rng_enable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq) +{ + return 0; +} + +/* device specific function */ +static void vbs_rng_disable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq) +{ +} + +/* device specific function */ +static void vbs_rng_stop_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq) +{ +} + +/* device specific function */ +static void vbs_rng_flush_vq(struct vbs_rng *rng, int index) +{ +} + +static struct hwrng get_hwrng(struct vbs_rng *rng) +{ + return rng->hwrng; +} + +/* Set feature bits in kernel side device */ +static int vbs_rng_set_features(struct vbs_rng *rng, u64 features) +{ + return 0; +} +#endif + +static const struct file_operations vbs_rng_fops = { + .owner = THIS_MODULE, + .release = vbs_rng_release, + .unlocked_ioctl = vbs_rng_ioctl, + .open = vbs_rng_open, + .llseek = noop_llseek, +}; + +static struct miscdevice vbs_rng_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vbs_rng", + .fops = &vbs_rng_fops, +}; + +static int vbs_rng_init(void) +{ + return misc_register(&vbs_rng_misc); +} +module_init(vbs_rng_init); + +static void vbs_rng_exit(void) +{ + misc_deregister(&vbs_rng_misc); +} +module_exit(vbs_rng_exit); + +MODULE_VERSION("0.1"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL and additional rights"); +MODULE_DESCRIPTION("Virtio Backend Service reference driver on ACRN hypervisor"); diff --git a/drivers/vbs/vq.c b/drivers/vbs/vq.c new file mode 100644 index 000000000000..9f7a829c7a67 --- /dev/null +++ b/drivers/vbs/vq.c @@ -0,0 +1,395 @@ +/* + * ACRN Project + * Virtio Backend Service (VBS) for ACRN hypervisor + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Contact Information: Hao Li + * + * BSD LICENSE + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Chris Torek + * Hao Li + * Created Virtqueue APIs for ACRN VBS framework: + * - VBS-K is a kernel-level virtio framework that can be used for + * virtio backend driver development for ACRN hypervisor. + * - Virtqueue APIs abstract away the details of the internal data + * structures of virtqueue, so that callers could easily access + * the data from guest through virtqueues. + */ + +#include +#include +#include +#include + +/* helper function for remote memory map */ +void * paddr_guest2host(struct ctx *ctx, uintptr_t gaddr, size_t len) +{ + return map_guest_phys(ctx->vmid, gaddr, len); +} + +/* + * helper function for vq_getchain(): + * record the i'th "real" descriptor. + */ +static inline void _vq_record(int i, volatile struct virtio_desc *vd, + struct ctx *ctx, struct iovec *iov, + int n_iov, uint16_t *flags) +{ + if (i >= n_iov) + return; + + iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len); + iov[i].iov_len = vd->len; + + if (flags != NULL) + flags[i] = vd->flags; +} + +/* + * Walk descriptor table and put requests into iovec. + * + * Examine the chain of descriptors starting at the "next one" to + * make sure that they describe a sensible request. If so, return + * the number of "real" descriptors that would be needed/used in + * acting on this request. This may be smaller than the number of + * available descriptors, e.g., if there are two available but + * they are two separate requests, this just returns 1. Or, it + * may be larger: if there are indirect descriptors involved, + * there may only be one descriptor available but it may be an + * indirect pointing to eight more. We return 8 in this case, + * i.e., we do not count the indirect descriptors, only the "real" + * ones. + * + * Basically, this vets the vd_flags and vd_next field of each + * descriptor and tells you how many are involved. Since some may + * be indirect, this also needs the vmctx (in the pci_vdev + * at vc->vc_pi) so that it can find indirect descriptors. + * + * As we process each descriptor, we copy and adjust it (guest to + * host address wise, also using the vmtctx) into the given iov[] + * array (of the given size). If the array overflows, we stop + * placing values into the array but keep processing descriptors, + * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1. + * So you, the caller, must not assume that iov[] is as big as the + * return value (you can process the same thing twice to allocate + * a larger iov array if needed, or supply a zero length to find + * out how much space is needed). + * + * If you want to verify the WRITE flag on each descriptor, pass a + * non-NULL "flags" pointer to an array of "uint16_t" of the same size + * as n_iov and we'll copy each vd_flags field after unwinding any + * indirects. + * + * If some descriptor(s) are invalid, this prints a diagnostic message + * and returns -1. If no descriptors are ready now it simply returns 0. + * + * You are assumed to have done a vq_ring_ready() if needed (note + * that vq_has_descs() does one). + */ +int virtio_vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, + struct iovec *iov, int n_iov, uint16_t *flags) +{ + int i; + unsigned int ndesc, n_indir; + unsigned int idx, next; + struct ctx *ctx; + struct virtio_dev_info *dev; + const char *name; + + volatile struct virtio_desc *vdir, *vindir, *vp; + + dev = vq->dev; + name = dev->name; + + /* + * Note: it's the responsibility of the guest not to + * update vq->vq_avail->va_idx until all of the descriptors + * the guest has written are valid (including all their + * vd_next fields and vd_flags). + * + * Compute (last_avail - va_idx) in integers mod 2**16. This is + * the number of descriptors the device has made available + * since the last time we updated vq->vq_last_avail. + * + * We just need to do the subtraction as an unsigned int, + * then trim off excess bits. + */ + idx = vq->last_avail; + ndesc = (uint16_t)((unsigned int)vq->avail->idx - idx); + + if (ndesc == 0) + return 0; + + if (ndesc > vq->qsize) { + /* XXX need better way to diagnose issues */ + pr_err("%s: ndesc (%u) out of range, driver confused?\r\n", + name, (unsigned int)ndesc); + return -1; + } + + /* + * Now count/parse "involved" descriptors starting from + * the head of the chain. + * + * To prevent loops, we could be more complicated and + * check whether we're re-visiting a previously visited + * index, but we just abort if the count gets excessive. + */ + ctx = &dev->_ctx; + *pidx = next = vq->avail->ring[idx & (vq->qsize - 1)]; + vq->last_avail++; + for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) { + if (next >= vq->qsize) { + pr_err("%s: descriptor index %u out of range, " + "driver confused?\r\n", name, next); + return -1; + } + vdir = &vq->desc[next]; + if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) { + _vq_record(i, vdir, ctx, iov, n_iov, flags); + i++; + } else if ((dev->negotiated_features & + VIRTIO_RING_F_INDIRECT_DESC) == 0) { + pr_err("%s: descriptor has forbidden INDIRECT flag, " + "driver confused?\r\n", name); + return -1; + } else { + n_indir = vdir->len / 16; + if ((vdir->len & 0xf) || n_indir == 0) { + pr_err("%s: invalid indir len 0x%x, " + "driver confused?\r\n", name, + (unsigned int)vdir->len); + return -1; + } + vindir = paddr_guest2host(ctx, vdir->addr, vdir->len); + /* + * Indirects start at the 0th, then follow + * their own embedded "next"s until those run + * out. Each one's indirect flag must be off + * (we don't really have to check, could just + * ignore errors...). + */ + next = 0; + for (;;) { + vp = &vindir[next]; + if (vp->flags & VRING_DESC_F_INDIRECT) { + pr_err("%s: indirect desc has INDIR flag," + " driver confused?\r\n", name); + return -1; + } + _vq_record(i, vp, ctx, iov, n_iov, flags); + if (++i > VQ_MAX_DESCRIPTORS) + goto loopy; + if ((vp->flags & VRING_DESC_F_NEXT) == 0) + break; + next = vp->next; + if (next >= n_indir) { + pr_err("%s: invalid next %u > %u, " + "driver confused?\r\n", + name, (unsigned int)next, n_indir); + return -1; + } + } + } + if ((vdir->flags & VRING_DESC_F_NEXT) == 0) + return i; + } +loopy: + pr_err("%s: descriptor loop? count > %d - driver confused?\r\n", + name, i); + return -1; +} + +/* + * Return the currently-first request chain back to the available queue. + * + * (This chain is the one you handled when you called vq_getchain() + * and used its positive return value.) + */ +void virtio_vq_retchain(struct virtio_vq_info *vq) +{ + vq->last_avail--; +} + +/* + * Return specified request chain to the guest, setting its I/O length + * to the provided value. + * + * (This chain is the one you handled when you called vq_getchain() + * and used its positive return value.) + */ +void virtio_vq_relchain(struct virtio_vq_info *vq, uint16_t idx, + uint32_t iolen) +{ + uint16_t uidx, mask; + volatile struct vring_used *vuh; + volatile struct virtio_used *vue; + + /* + * Notes: + * - mask is N-1 where N is a power of 2 so computes x % N + * - vuh points to the "used" data shared with guest + * - vue points to the "used" ring entry we want to update + * - head is the same value we compute in vq_iovecs(). + * + * (I apologize for the two fields named vu_idx; the + * virtio spec calls the one that vue points to, "id"...) + */ + mask = vq->qsize - 1; + vuh = vq->used; + + uidx = vuh->idx; + vue = &vuh->ring[uidx++ & mask]; + vue->idx = idx; + vue->len = iolen; + vuh->idx = uidx; +} + +/* + * Driver has finished processing "available" chains and calling + * vq_relchain on each one. If driver used all the available + * chains, used_all should be set. + * + * If the "used" index moved we may need to inform the guest, i.e., + * deliver an interrupt. Even if the used index did NOT move we + * may need to deliver an interrupt, if the avail ring is empty and + * we are supposed to interrupt on empty. + * + * Note that used_all_avail is provided by the caller because it's + * a snapshot of the ring state when he decided to finish interrupt + * processing -- it's possible that descriptors became available after + * that point. (It's also typically a constant 1/True as well.) + */ +void virtio_vq_endchains(struct virtio_vq_info *vq, int used_all_avail) +{ + struct virtio_dev_info *dev; + uint16_t event_idx, new_idx, old_idx; + int intr; + + /* + * Interrupt generation: if we're using EVENT_IDX, + * interrupt if we've crossed the event threshold. + * Otherwise interrupt is generated if we added "used" entries, + * but suppressed by VRING_AVAIL_F_NO_INTERRUPT. + * + * In any case, though, if NOTIFY_ON_EMPTY is set and the + * entire avail was processed, we need to interrupt always. + */ + dev = vq->dev; + old_idx = vq->save_used; + vq->save_used = new_idx = vq->used->idx; + if (used_all_avail && + (dev->negotiated_features & VIRTIO_F_NOTIFY_ON_EMPTY)) + intr = 1; + else if (dev->negotiated_features & VIRTIO_RING_F_EVENT_IDX) { + event_idx = VQ_USED_EVENT_IDX(vq); + /* + * This calculation is per docs and the kernel + * (see src/sys/dev/virtio/virtio_ring.h). + */ + intr = (uint16_t)(new_idx - event_idx - 1) < + (uint16_t)(new_idx - old_idx); + } else { + intr = new_idx != old_idx && + !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT); + } + if (intr) + virtio_vq_interrupt(dev, vq); +} + +/* + * Initialize the currently-selected virtqueue. + * The guest just gave us a page frame number, from which we can + * calculate the addresses of the queue. + */ +void virtio_vq_init(struct virtio_vq_info *vq, uint32_t pfn) +{ + uint64_t phys; + size_t size; + char *base; + struct ctx *ctx; + + ctx = &vq->dev->_ctx; + + phys = (uint64_t)pfn << VRING_PAGE_BITS; + size = virtio_vq_ring_size(vq->qsize); + base = paddr_guest2host(ctx, phys, size); + + /* First page(s) are descriptors... */ + vq->desc = (struct virtio_desc *)base; + base += vq->qsize * sizeof(struct virtio_desc); + + /* ... immediately followed by "avail" ring (entirely uint16_t's) */ + vq->avail = (struct vring_avail *)base; + base += (2 + vq->qsize + 1) * sizeof(uint16_t); + + /* Then it's rounded up to the next page... */ + base = (char *)roundup2((uintptr_t)base, VRING_ALIGN); + + /* ... and the last page(s) are the used ring. */ + vq->used = (struct vring_used *)base; + + /* Mark queue as allocated, and start at 0 when we use it. */ + vq->flags = VQ_ALLOC; + vq->last_avail = 0; + vq->save_used = 0; +} + +/* reset one virtqueue, make it invalid */ +void virtio_vq_reset(struct virtio_vq_info *vq) +{ + if (!vq) { + pr_info("%s: vq is NULL!\n", __func__); + return; + } + + vq->pfn = 0; + vq->msix_idx = VIRTIO_MSI_NO_VECTOR; + vq->flags = 0; + vq->last_avail = 0; + vq->save_used = 0; +} diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 5628fe114347..91335e6de88a 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -849,11 +849,13 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm) /* * Allow writes to device control fields, except devctl_phantom, - * which could confuse IOMMU, and the ARI bit in devctl2, which + * which could confuse IOMMU, MPS, which can break communication + * with other physical devices, and the ARI bit in devctl2, which * is set at probe time. FLR gets virtualized via our writefn. */ p_setw(perm, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM); + PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD, + ~PCI_EXP_DEVCTL_PHANTOM); p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI); return 0; } diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 92155cce926d..fb4e6a7ee521 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, { struct page *page[1]; struct vm_area_struct *vma; + struct vm_area_struct *vmas[1]; int ret; if (mm == current->mm) { - ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), - page); + ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), + page, vmas); } else { unsigned int flags = 0; @@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, down_read(&mm->mmap_sem); ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, - NULL, NULL); + vmas, NULL); + /* + * The lifetime of a vaddr_get_pfn() page pin is + * userspace-controlled. In the fs-dax case this could + * lead to indefinite stalls in filesystem operations. + * Disallow attempts to pin fs-dax pages via this + * interface. + */ + if (ret > 0 && vma_is_fsdax(vmas[0])) { + ret = -EOPNOTSUPP; + put_page(page[0]); + } up_read(&mm->mmap_sem); } diff --git a/drivers/vhm/Kconfig b/drivers/vhm/Kconfig new file mode 100644 index 000000000000..e59b9487cd65 --- /dev/null +++ b/drivers/vhm/Kconfig @@ -0,0 +1,17 @@ +config ACRN_VHM + bool "Intel ACRN Hypervisor Virtio and Hypervisor service Module (VHM)" + depends on ACRN + depends on DMA_CMA + depends on PCI_MSI + depends on !VMAP_STACK + default n + ---help--- + This is the Virtio and Hypervisor service Module (VHM) for + Intel ACRN hypervisor. + + It is required for Service OS. + User OS doesn't need to have this config. + + Say Y for SOS and say N for UOS. + + If unsure, say N. diff --git a/drivers/vhm/Makefile b/drivers/vhm/Makefile new file mode 100644 index 000000000000..b4d58a92dcfd --- /dev/null +++ b/drivers/vhm/Makefile @@ -0,0 +1 @@ +obj-y += vhm_mm.o vhm_ioreq.o vhm_vm_mngt.o vhm_msi.o vhm_hypercall.o diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c new file mode 100644 index 000000000000..df87febaf60d --- /dev/null +++ b/drivers/vhm/vhm_hypercall.c @@ -0,0 +1,154 @@ +/* + * virtio and hyperviosr service module (VHM): hypercall wrap + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include +#include +#include + +inline long hcall_get_api_version(unsigned long api_version) +{ + return acrn_hypercall1(HC_GET_API_VERSION, api_version); +} + +inline long hcall_create_vm(unsigned long vminfo) +{ + return acrn_hypercall1(HC_CREATE_VM, vminfo); +} + +inline long hcall_start_vm(unsigned long vmid) +{ + return acrn_hypercall1(HC_START_VM, vmid); +} + +inline long hcall_pause_vm(unsigned long vmid) +{ + return acrn_hypercall1(HC_PAUSE_VM, vmid); +} + +inline long hcall_destroy_vm(unsigned long vmid) +{ + return acrn_hypercall1(HC_DESTROY_VM, vmid); +} + +inline long hcall_setup_sbuf(unsigned long sbuf_head) +{ + return acrn_hypercall1(HC_SETUP_SBUF, sbuf_head); +} + +inline long hcall_get_cpu_state(unsigned long cmd, unsigned long state_pa) +{ + return acrn_hypercall2(HC_PM_GET_CPU_STATE, cmd, state_pa); +} + +inline long hcall_set_memmap(unsigned long vmid, unsigned long memmap) +{ + return acrn_hypercall2(HC_VM_SET_MEMMAP, vmid, memmap); +} + +inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) +{ + return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); +} + +inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu) +{ + return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu); +} + +inline long hcall_assert_irqline(unsigned long vmid, unsigned long irq) +{ + return acrn_hypercall2(HC_ASSERT_IRQLINE, vmid, irq); +} + +inline long hcall_deassert_irqline(unsigned long vmid, unsigned long irq) +{ + return acrn_hypercall2(HC_DEASSERT_IRQLINE, vmid, irq); +} + +inline long hcall_pulse_irqline(unsigned long vmid, unsigned long irq) +{ + return acrn_hypercall2(HC_PULSE_IRQLINE, vmid, irq); +} + +inline long hcall_inject_msi(unsigned long vmid, unsigned long msi) +{ + return acrn_hypercall2(HC_INJECT_MSI, vmid, msi); +} + +inline long hcall_assign_ptdev(unsigned long vmid, unsigned long bdf) +{ + return acrn_hypercall2(HC_ASSIGN_PTDEV, vmid, bdf); +} + +inline long hcall_deassign_ptdev(unsigned long vmid, unsigned long bdf) +{ + return acrn_hypercall2(HC_DEASSIGN_PTDEV, vmid, bdf); +} + +inline long hcall_set_ptdev_intr_info(unsigned long vmid, unsigned long pt_irq) +{ + return acrn_hypercall2(HC_SET_PTDEV_INTR_INFO, vmid, pt_irq); +} + +inline long hcall_reset_ptdev_intr_info(unsigned long vmid, + unsigned long pt_irq) +{ + return acrn_hypercall2(HC_RESET_PTDEV_INTR_INFO, vmid, pt_irq); +} + +inline long hcall_remap_pci_msix(unsigned long vmid, unsigned long msi) +{ + return acrn_hypercall2(HC_VM_PCI_MSIX_REMAP, vmid, msi); +} + +inline long hcall_vm_gpa2hpa(unsigned long vmid, unsigned long addr) +{ + return acrn_hypercall2(HC_VM_GPA2HPA, vmid, addr); +} diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c new file mode 100644 index 000000000000..08826c575780 --- /dev/null +++ b/drivers/vhm/vhm_ioreq.c @@ -0,0 +1,914 @@ +/* + * virtio and hyperviosr service module (VHM): ioreq multi client feature + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Jason Chen CJ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ioreq_range { + struct list_head list; + uint32_t type; + long start; + long end; +}; + +struct ioreq_client { + /* client name */ + char name[16]; + /* client id */ + int id; + /* vm this client belongs to */ + unsigned long vmid; + /* list node for this ioreq_client */ + struct list_head list; + /* + * is this client fallback? + * there is only one fallback client in a vm - dm + * a fallback client shares IOReq buffer pages + * a fallback client handles all left IOReq not handled by other clients + * a fallback client does not need add io ranges + * a fallback client handles ioreq in its own context + */ + bool fallback; + + bool destroying; + bool kthread_exit; + + /* client covered io ranges - N/A for fallback client */ + struct list_head range_list; + spinlock_t range_lock; + + /* + * this req records the req number this client need handle + */ + atomic_t req; + + /* + * client ioreq handler: + * if client provides a handler, it means vhm need create a kthread + * to call the handler while there is ioreq. + * if client doesn't provide a handler, client should handle ioreq + * in its own context when calls acrn_ioreq_attach_client. + * + * NOTE: for fallback client, there is no ioreq handler. + */ + ioreq_handler_t handler; + bool vhm_create_kthread; + struct task_struct *thread; + wait_queue_head_t wq; + + /* pci bdf trap */ + bool trap_bdf; + int pci_bus; + int pci_dev; + int pci_func; +}; + +#define MAX_CLIENT 64 +static struct ioreq_client *clients[MAX_CLIENT]; +static DECLARE_BITMAP(client_bitmap, MAX_CLIENT); + +static void acrn_ioreq_notify_client(struct ioreq_client *client); + +static inline bool is_range_type(uint32_t type) +{ + return (type == REQ_MMIO || type == REQ_PORTIO || type == REQ_WP); +} + +static int alloc_client(void) +{ + struct ioreq_client *client; + int i; + + i = find_first_zero_bit(client_bitmap, MAX_CLIENT); + if (i >= MAX_CLIENT) + return -ENOMEM; + set_bit(i, client_bitmap); + + client = kzalloc(sizeof(struct ioreq_client), GFP_KERNEL); + if (!client) + return -ENOMEM; + client->id = i; + clients[i] = client; + + return i; +} + +static void free_client(int i) +{ + if (i < MAX_CLIENT && i >= 0) { + if (test_and_clear_bit(i, client_bitmap)) { + kfree(clients[i]); + clients[i] = NULL; + } + } +} + +int acrn_ioreq_create_client(unsigned long vmid, ioreq_handler_t handler, + char *name) +{ + struct vhm_vm *vm; + struct ioreq_client *client; + unsigned long flags; + int client_id; + + might_sleep(); + + vm = find_get_vm(vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", + vmid); + return -EINVAL; + } + if (unlikely(vm->req_buf == NULL)) { + pr_err("vhm-ioreq: vm[%ld]'s reqbuf is not ready\n", + vmid); + put_vm(vm); + return -EINVAL; + } + + client_id = alloc_client(); + if (unlikely(client_id < 0)) { + pr_err("vhm-ioreq: vm[%ld] failed to alloc ioreq " + "client id\n", vmid); + put_vm(vm); + return -EINVAL; + } + + client = clients[client_id]; + + if (handler) { + client->handler = handler; + client->vhm_create_kthread = true; + } + + client->vmid = vmid; + if (name) + strncpy(client->name, name, 16); + spin_lock_init(&client->range_lock); + INIT_LIST_HEAD(&client->range_list); + init_waitqueue_head(&client->wq); + + spin_lock_irqsave(&vm->ioreq_client_lock, flags); + list_add(&client->list, &vm->ioreq_client_list); + spin_unlock_irqrestore(&vm->ioreq_client_lock, flags); + + put_vm(vm); + + pr_info("vhm-ioreq: created ioreq client %d\n", client_id); + + return client_id; +} + +int acrn_ioreq_create_fallback_client(unsigned long vmid, char *name) +{ + struct vhm_vm *vm; + int client_id; + + vm = find_get_vm(vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", + vmid); + return -EINVAL; + } + + if (unlikely(vm->ioreq_fallback_client > 0)) { + pr_err("vhm-ioreq: there is already fallback " + "client exist for vm %ld\n", + vmid); + put_vm(vm); + return -EINVAL; + } + + client_id = acrn_ioreq_create_client(vmid, NULL, name); + if (unlikely(client_id < 0)) { + put_vm(vm); + return -EINVAL; + } + + clients[client_id]->fallback = true; + vm->ioreq_fallback_client = client_id; + + put_vm(vm); + + return client_id; +} + +static void acrn_ioreq_destroy_client_pervm(struct ioreq_client *client, + struct vhm_vm *vm) +{ + struct list_head *pos, *tmp; + unsigned long flags; + + /* blocking operation: notify client for cleanup + * if waitqueue not active, it means client is handling request, + * at that time, we need wait client finish its handling. + */ + while (!waitqueue_active(&client->wq) && !client->kthread_exit) + msleep(10); + client->destroying = true; + acrn_ioreq_notify_client(client); + + spin_lock_irqsave(&client->range_lock, flags); + list_for_each_safe(pos, tmp, &client->range_list) { + struct ioreq_range *range = + container_of(pos, struct ioreq_range, list); + list_del(&range->list); + kfree(range); + } + spin_unlock_irqrestore(&client->range_lock, flags); + + spin_lock_irqsave(&vm->ioreq_client_lock, flags); + list_del(&client->list); + spin_unlock_irqrestore(&vm->ioreq_client_lock, flags); + free_client(client->id); + + if (client->id == vm->ioreq_fallback_client) + vm->ioreq_fallback_client = -1; +} + +void acrn_ioreq_destroy_client(int client_id) +{ + struct vhm_vm *vm; + struct ioreq_client *client; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + + might_sleep(); + + vm = find_get_vm(client->vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", + client->vmid); + return; + } + + acrn_ioreq_destroy_client_pervm(client, vm); + + put_vm(vm); +} + +static void __attribute__((unused)) dump_iorange(struct ioreq_client *client) +{ + struct list_head *pos; + unsigned long flags; + + spin_lock_irqsave(&client->range_lock, flags); + list_for_each(pos, &client->range_list) { + struct ioreq_range *range = + container_of(pos, struct ioreq_range, list); + pr_debug("\tio range: type %d, start 0x%lx, " + "end 0x%lx\n", range->type, range->start, range->end); + } + spin_unlock_irqrestore(&client->range_lock, flags); +} + +/* + * NOTE: here just add iorange entry directly, no check for the overlap.. + * please client take care of it + */ +int acrn_ioreq_add_iorange(int client_id, uint32_t type, + long start, long end) +{ + struct ioreq_client *client; + struct ioreq_range *range; + unsigned long flags; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + + if (end < start) { + pr_err("vhm-ioreq: end < start\n"); + return -EFAULT; + } + + might_sleep(); + + range = kzalloc(sizeof(struct ioreq_range), GFP_KERNEL); + if (!range) { + pr_err("vhm-ioreq: failed to alloc ioreq range\n"); + return -ENOMEM; + } + range->type = type; + range->start = start; + range->end = end; + + spin_lock_irqsave(&client->range_lock, flags); + list_add(&range->list, &client->range_list); + spin_unlock_irqrestore(&client->range_lock, flags); + + return 0; +} + +int acrn_ioreq_del_iorange(int client_id, uint32_t type, + long start, long end) +{ + struct ioreq_client *client; + struct ioreq_range *range; + struct list_head *pos, *tmp; + unsigned long flags; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + + if (end < start) { + pr_err("vhm-ioreq: end < start\n"); + return -EFAULT; + } + + might_sleep(); + + spin_lock_irqsave(&client->range_lock, flags); + list_for_each_safe(pos, tmp, &client->range_list) { + range = container_of(pos, struct ioreq_range, list); + if (range->type == type) { + if (is_range_type(type)) { + if (start == range->start && + end == range->end) { + list_del(&range->list); + kfree(range); + break; + } + } else { + list_del(&range->list); + kfree(range); + break; + } + } + } + spin_unlock_irqrestore(&client->range_lock, flags); + + return 0; +} + +static inline bool is_destroying(struct ioreq_client *client) +{ + if (client) + return client->destroying; + else + return true; +} + +static inline bool has_pending_request(struct ioreq_client *client) +{ + if (client) + return (atomic_read(&client->req) > 0); + else + return false; +} + +struct vhm_request *acrn_ioreq_get_reqbuf(int client_id) +{ + struct ioreq_client *client; + struct vhm_vm *vm; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return NULL; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return NULL; + } + vm = find_get_vm(client->vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", + client->vmid); + return NULL; + } + + if (vm->req_buf == NULL) { + pr_warn("vhm-ioreq: the req buf page not ready yet " + "for vmid %ld\n", client->vmid); + } + put_vm(vm); + return (struct vhm_request *)vm->req_buf; +} + +static int ioreq_client_thread(void *data) +{ + struct ioreq_client *client; + int ret, client_id = (unsigned long)data; + + while (1) { + client = clients[client_id]; + if (is_destroying(client)) { + pr_info("vhm-ioreq: client destroying->stop thread\n"); + break; + } + if (has_pending_request(client)) { + if (client->handler) { + ret = client->handler(client->id, + client->req.counter); + if (ret < 0) + BUG(); + } else { + pr_err("vhm-ioreq: no ioreq handler\n"); + break; + } + } else + wait_event_freezable(client->wq, + (has_pending_request(client) || + is_destroying(client))); + } + + return 0; +} + +int acrn_ioreq_attach_client(int client_id, bool check_kthread_stop) +{ + struct ioreq_client *client; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + + if (client->vhm_create_kthread) { + if (client->thread) { + pr_warn("vhm-ioreq: kthread already exist" + " for client %s\n", client->name); + return 0; + } + client->thread = kthread_run(ioreq_client_thread, + (void *)(unsigned long)client_id, + "ioreq_client[%ld]:%s", + client->vmid, client->name); + if (IS_ERR(client->thread)) { + pr_err("vhm-ioreq: failed to run kthread " + "for client %s\n", client->name); + return -ENOMEM; + } + } else { + might_sleep(); + + if (check_kthread_stop) { + wait_event_freezable(client->wq, + (kthread_should_stop() || + has_pending_request(client) || + is_destroying(client))); + if (kthread_should_stop()) + client->kthread_exit = true; + } else { + wait_event_freezable(client->wq, + (has_pending_request(client) || + is_destroying(client))); + } + + if (is_destroying(client)) + return 1; + } + + return 0; +} + +void acrn_ioreq_intercept_bdf(int client_id, int bus, int dev, int func) +{ + struct ioreq_client *client; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client->trap_bdf = true; + client->pci_bus = bus; + client->pci_dev = dev; + client->pci_func = func; +} + +void acrn_ioreq_unintercept_bdf(int client_id) +{ + struct ioreq_client *client; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client->trap_bdf = false; + client->pci_bus = -1; + client->pci_dev = -1; + client->pci_func = -1; +} + +static void acrn_ioreq_notify_client(struct ioreq_client *client) +{ + /* if client thread is in waitqueue, wake up it */ + if (waitqueue_active(&client->wq)) + wake_up_interruptible(&client->wq); +} + +static bool req_in_range(struct ioreq_range *range, struct vhm_request *req) +{ + bool ret = false; + + if (range->type == req->type) { + switch (req->type) { + case REQ_MMIO: + case REQ_WP: + { + if (req->reqs.mmio_request.address >= range->start && + (req->reqs.mmio_request.address + + req->reqs.mmio_request.size - 1) <= range->end) + ret = true; + break; + } + case REQ_PORTIO: { + if (req->reqs.pio_request.address >= range->start && + (req->reqs.pio_request.address + + req->reqs.pio_request.size - 1) <= range->end) + ret = true; + break; + } + + default: + ret = false; + break; + } + } + + return ret; +} + +static bool is_cfg_addr(struct vhm_request *req) +{ + return (req->type == REQ_PORTIO && + (req->reqs.pio_request.address >= 0xcf8 && + req->reqs.pio_request.address < 0xcf8+4)); +} + +static bool is_cfg_data(struct vhm_request *req) +{ + return (req->type == REQ_PORTIO && + (req->reqs.pio_request.address >= 0xcfc && + req->reqs.pio_request.address < 0xcfc+4)); +} + +static int cached_bus; +static int cached_dev; +static int cached_func; +static int cached_reg; +static int cached_enable; +#define PCI_REGMAX 255 /* highest supported config register addr.*/ +#define PCI_FUNCMAX 7 /* highest supported function number */ +#define PCI_SLOTMAX 31 /* highest supported slot number */ +#define PCI_BUSMAX 255 /* highest supported bus number */ +#define CONF1_ENABLE 0x80000000ul +static int handle_cf8cfc(struct vhm_vm *vm, struct vhm_request *req, int vcpu) +{ + int req_handled = 0; + + /*XXX: like DM, assume cfg address write is size 4 */ + if (is_cfg_addr(req)) { + if (req->reqs.pio_request.direction == REQUEST_WRITE) { + if (req->reqs.pio_request.size == 4) { + int value = req->reqs.pio_request.value; + + cached_bus = (value >> 16) & PCI_BUSMAX; + cached_dev = (value >> 11) & PCI_SLOTMAX; + cached_func = (value >> 8) & PCI_FUNCMAX; + cached_reg = value & PCI_REGMAX; + cached_enable = + (value & CONF1_ENABLE) == CONF1_ENABLE; + req_handled = 1; + } + } else { + if (req->reqs.pio_request.size == 4) { + req->reqs.pio_request.value = + (cached_bus << 16) | + (cached_dev << 11) | (cached_func << 8) + | cached_reg; + if (cached_enable) + req->reqs.pio_request.value |= + CONF1_ENABLE; + req_handled = 1; + } + } + } else if (is_cfg_data(req)) { + if (!cached_enable) { + if (req->reqs.pio_request.direction == REQUEST_READ) + req->reqs.pio_request.value = 0xffffffff; + req_handled = 1; + } else { + /* pci request is same as io request at top */ + int offset = req->reqs.pio_request.address - 0xcfc; + + req->type = REQ_PCICFG; + req->reqs.pci_request.bus = cached_bus; + req->reqs.pci_request.dev = cached_dev; + req->reqs.pci_request.func = cached_func; + req->reqs.pci_request.reg = cached_reg + offset; + } + } + + if (req_handled) { + req->processed = REQ_STATE_SUCCESS; + if (hcall_notify_req_finish(vm->vmid, vcpu) < 0) { + pr_err("vhm-ioreq: failed to " + "notify request finished !\n"); + return -EFAULT; + } + } + + return req_handled; +} + +static bool bdf_match(struct ioreq_client *client) +{ + return (client->trap_bdf && + client->pci_bus == cached_bus && + client->pci_dev == cached_dev && + client->pci_func == cached_func); +} + +static struct ioreq_client *acrn_ioreq_find_client_by_request(struct vhm_vm *vm, + struct vhm_request *req) +{ + struct list_head *pos, *range_pos; + struct ioreq_client *client; + struct ioreq_client *target_client = NULL, *fallback_client = NULL; + struct ioreq_range *range; + bool found = false; + + spin_lock(&vm->ioreq_client_lock); + list_for_each(pos, &vm->ioreq_client_list) { + client = container_of(pos, struct ioreq_client, list); + + if (client->fallback) { + fallback_client = client; + continue; + } + + if (req->type == REQ_PCICFG) { + if (bdf_match(client)) { /* bdf match client */ + target_client = client; + break; + } else /* other or fallback client */ + continue; + } + + spin_lock(&client->range_lock); + list_for_each(range_pos, &client->range_list) { + range = + container_of(range_pos, struct ioreq_range, list); + if (req_in_range(range, req)) { + found = true; + target_client = client; + break; + } + } + spin_unlock(&client->range_lock); + + if (found) + break; + } + spin_unlock(&vm->ioreq_client_lock); + + if (target_client) + return target_client; + + if (fallback_client) + return fallback_client; + + return NULL; +} + +int acrn_ioreq_distribute_request(struct vhm_vm *vm) +{ + struct vhm_request *req; + struct list_head *pos; + struct ioreq_client *client; + int i; + + /* TODO: replace VHM_REQUEST_MAX with vcpu num get at runtime */ + for (i = 0; i < VHM_REQUEST_MAX; i++) { + req = vm->req_buf->req_queue + i; + if (req->valid && (req->processed == REQ_STATE_PENDING)) { + if (handle_cf8cfc(vm, req, i)) + continue; + client = acrn_ioreq_find_client_by_request(vm, req); + if (client == NULL) { + pr_err("vhm-ioreq: failed to " + "find ioreq client -> " + "BUG\n"); + BUG(); + } else { + req->processed = REQ_STATE_PROCESSING; + req->client = client->id; + atomic_inc(&client->req); + } + } + } + + spin_lock(&vm->ioreq_client_lock); + list_for_each(pos, &vm->ioreq_client_list) { + client = container_of(pos, struct ioreq_client, list); + if (has_pending_request(client)) + acrn_ioreq_notify_client(client); + } + spin_unlock(&vm->ioreq_client_lock); + + return 0; +} + +int acrn_ioreq_complete_request(int client_id, uint64_t vcpu) +{ + struct ioreq_client *client; + int ret; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EINVAL; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EINVAL; + } + + atomic_dec(&client->req); + ret = hcall_notify_req_finish(client->vmid, vcpu); + if (ret < 0) { + pr_err("vhm-ioreq: failed to notify request finished !\n"); + return -EFAULT; + } + + return 0; +} + +unsigned int vhm_dev_poll(struct file *filep, poll_table *wait) +{ + struct vhm_vm *vm = filep->private_data; + struct ioreq_client *fallback_client; + unsigned int ret = 0; + + if (vm == NULL || vm->req_buf == NULL || + vm->ioreq_fallback_client <= 0) { + pr_err("vhm: invalid VM !\n"); + ret = POLLERR; + return ret; + } + + fallback_client = clients[vm->ioreq_fallback_client]; + if (!fallback_client) { + pr_err("vhm-ioreq: no client for id %d\n", + vm->ioreq_fallback_client); + return -EINVAL; + } + + poll_wait(filep, &fallback_client->wq, wait); + if (has_pending_request(fallback_client) || + is_destroying(fallback_client)) + ret = POLLIN | POLLRDNORM; + + return ret; +} + +int acrn_ioreq_init(struct vhm_vm *vm, unsigned long vma) +{ + struct acrn_set_ioreq_buffer set_buffer; + struct page *page; + int ret; + + if (vm->req_buf) + BUG(); + + ret = get_user_pages_fast(vma, 1, 1, &page); + if (unlikely(ret != 1) || (page == NULL)) { + pr_err("vhm-ioreq: failed to pin request buffer!\n"); + return -ENOMEM; + } + + vm->req_buf = page_address(page); + vm->pg = page; + + set_buffer.req_buf = page_to_phys(page); + + ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(&set_buffer)); + if (ret < 0) { + pr_err("vhm-ioreq: failed to set request buffer !\n"); + return -EFAULT; + } + + /* reserve 0, let client_id start from 1 */ + set_bit(0, client_bitmap); + + pr_info("vhm-ioreq: init request buffer @ %p!\n", + vm->req_buf); + + return 0; +} + +void acrn_ioreq_free(struct vhm_vm *vm) +{ + struct list_head *pos, *tmp; + + list_for_each_safe(pos, tmp, &vm->ioreq_client_list) { + struct ioreq_client *client = + container_of(pos, struct ioreq_client, list); + acrn_ioreq_destroy_client_pervm(client, vm); + } + + if (vm->req_buf && vm->pg) { + put_page(vm->pg); + vm->pg = NULL; + vm->req_buf = NULL; + } +} diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c new file mode 100644 index 000000000000..be6a47afad9a --- /dev/null +++ b/drivers/vhm/vhm_mm.c @@ -0,0 +1,424 @@ +/* + * virtio and hyperviosr service module (VHM): memory map + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Jason Zeng + * Jason Chen CJ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +struct guest_memseg { + struct list_head list; + u64 vm0_gpa; + size_t len; + u64 gpa; + long vma_count; +}; + +static u64 _alloc_memblk(struct device *dev, size_t len) +{ + unsigned int count; + struct page *page; + + if (!PAGE_ALIGNED(len)) { + pr_warn("alloc size of memblk must be page aligned\n"); + return 0ULL; + } + + count = PAGE_ALIGN(len) >> PAGE_SHIFT; + page = dma_alloc_from_contiguous(dev, count, 1, GFP_KERNEL); + if (page) + return page_to_phys(page); + else + return 0ULL; +} + +static bool _free_memblk(struct device *dev, u64 vm0_gpa, size_t len) +{ + unsigned int count = PAGE_ALIGN(len) >> PAGE_SHIFT; + struct page *page = pfn_to_page(vm0_gpa >> PAGE_SHIFT); + + return dma_release_from_contiguous(dev, page, count); +} + +int alloc_guest_memseg(struct vhm_vm *vm, struct vm_memseg *memseg) +{ + struct guest_memseg *seg; + u64 vm0_gpa; + int max_gfn; + + seg = kzalloc(sizeof(struct guest_memseg), GFP_KERNEL); + if (seg == NULL) + return -ENOMEM; + + vm0_gpa = _alloc_memblk(vm->dev, memseg->len); + if (vm0_gpa == 0ULL) { + kfree(seg); + return -ENOMEM; + } + + seg->vm0_gpa = vm0_gpa; + seg->len = memseg->len; + seg->gpa = memseg->gpa; + + max_gfn = (seg->gpa + seg->len) >> PAGE_SHIFT; + if (vm->max_gfn < max_gfn) + vm->max_gfn = max_gfn; + + pr_info("VHM: alloc memseg with len=0x%lx, vm0_gpa=0x%llx," + " and its guest gpa = 0x%llx, vm max_gfn 0x%x\n", + seg->len, seg->vm0_gpa, seg->gpa, vm->max_gfn); + + seg->vma_count = 0; + mutex_lock(&vm->seg_lock); + list_add(&seg->list, &vm->memseg_list); + mutex_unlock(&vm->seg_lock); + + return 0; +} + +static int _mem_set_memmap(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right, + unsigned int type) +{ + struct vm_set_memmap set_memmap; + + set_memmap.type = type; + set_memmap.remote_gpa = guest_gpa; + set_memmap.vm0_gpa = host_gpa; + set_memmap.length = len; + set_memmap.prot = ((mem_type & MEM_TYPE_MASK) | + (mem_access_right & MEM_ACCESS_RIGHT_MASK)); + + /* hypercall to notify hv the guest EPT setting*/ + if (hcall_set_memmap(vmid, + virt_to_phys(&set_memmap)) < 0) { + pr_err("vhm: failed to set memmap %ld!\n", vmid); + return -EFAULT; + } + + pr_debug("VHM: set ept for mem map[type=0x%x, host_gpa=0x%lx," + "guest_gpa=0x%lx,len=0x%lx, prot=0x%x]\n", + type, host_gpa, guest_gpa, len, set_memmap.prot); + + return 0; +} + +int set_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned mem_access_right) +{ + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, + mem_type, mem_access_right, MAP_MMIO); +} + +int unset_mmio_map(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len) +{ + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, + 0, 0, MAP_UNMAP); +} + +int update_memmap_attr(unsigned long vmid, unsigned long guest_gpa, + unsigned long host_gpa, unsigned long len, + unsigned int mem_type, unsigned int mem_access_right) +{ + return _mem_set_memmap(vmid, guest_gpa, host_gpa, len, + mem_type, mem_access_right, MAP_MEM); +} + +int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) +{ + struct guest_memseg *seg = NULL; + unsigned int type; + unsigned int mem_type, mem_access_right; + unsigned long guest_gpa, host_gpa; + + mutex_lock(&vm->seg_lock); + + if (memmap->type == VM_SYSMEM) { + list_for_each_entry(seg, &vm->memseg_list, list) { + if (seg->gpa == memmap->gpa + && seg->len == memmap->len) + break; + } + if (&seg->list == &vm->memseg_list) { + mutex_unlock(&vm->seg_lock); + return -EINVAL; + } + guest_gpa = seg->gpa; + host_gpa = seg->vm0_gpa; + mem_type = MEM_TYPE_WB; + mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); + type = MAP_MEM; + } else { + guest_gpa = memmap->gpa; + host_gpa = acrn_hpa2gpa(memmap->hpa); + mem_type = MEM_TYPE_UC; + mem_access_right = (memmap->prot & MEM_ACCESS_RIGHT_MASK); + type = MAP_MMIO; + } + + if (_mem_set_memmap(vm->vmid, guest_gpa, host_gpa, memmap->len, + mem_type, mem_access_right, type) < 0) { + pr_err("vhm: failed to set memmap %ld!\n", vm->vmid); + mutex_unlock(&vm->seg_lock); + return -EFAULT; + } + + mutex_unlock(&vm->seg_lock); + + return 0; +} + +void free_guest_mem(struct vhm_vm *vm) +{ + struct guest_memseg *seg; + + mutex_lock(&vm->seg_lock); + while (!list_empty(&vm->memseg_list)) { + seg = list_first_entry(&vm->memseg_list, + struct guest_memseg, list); + if (!_free_memblk(vm->dev, seg->vm0_gpa, seg->len)) + pr_warn("failed to free memblk\n"); + list_del(&seg->list); + kfree(seg); + } + mutex_unlock(&vm->seg_lock); +} + +int check_guest_mem(struct vhm_vm *vm) +{ + struct guest_memseg *seg; + + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { + if (seg->vma_count == 0) + continue; + + mutex_unlock(&vm->seg_lock); + return -EAGAIN; + } + mutex_unlock(&vm->seg_lock); + return 0; +} + +static void guest_vm_open(struct vm_area_struct *vma) +{ + struct vhm_vm *vm = vma->vm_file->private_data; + struct guest_memseg *seg = vma->vm_private_data; + + mutex_lock(&vm->seg_lock); + seg->vma_count++; + mutex_unlock(&vm->seg_lock); +} + +static void guest_vm_close(struct vm_area_struct *vma) +{ + struct vhm_vm *vm = vma->vm_file->private_data; + struct guest_memseg *seg = vma->vm_private_data; + + mutex_lock(&vm->seg_lock); + seg->vma_count--; + BUG_ON(seg->vma_count < 0); + mutex_unlock(&vm->seg_lock); +} + +static const struct vm_operations_struct guest_vm_ops = { + .open = guest_vm_open, + .close = guest_vm_close, +}; + +static int do_mmap_guest(struct file *file, + struct vm_area_struct *vma, struct guest_memseg *seg) +{ + struct page *page; + size_t size = seg->len; + unsigned long pfn; + unsigned long start_addr; + + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTCOPY; + pfn = seg->vm0_gpa >> PAGE_SHIFT; + start_addr = vma->vm_start; + while (size > 0) { + page = pfn_to_page(pfn); + if (vm_insert_page(vma, start_addr, page)) + return -EINVAL; + size -= PAGE_SIZE; + start_addr += PAGE_SIZE; + pfn++; + } + seg->vma_count++; + vma->vm_ops = &guest_vm_ops; + vma->vm_private_data = (void *)seg; + + pr_info("VHM: mmap for memseg [seg vm0_gpa=0x%llx, gpa=0x%llx] " + "to start addr 0x%lx\n", + seg->vm0_gpa, seg->gpa, start_addr); + + return 0; +} + +int vhm_dev_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct vhm_vm *vm = file->private_data; + struct guest_memseg *seg; + u64 offset = vma->vm_pgoff << PAGE_SHIFT; + size_t len = vma->vm_end - vma->vm_start; + int ret; + + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { + if (seg->gpa != offset || seg->len != len) + continue; + + ret = do_mmap_guest(file, vma, seg); + mutex_unlock(&vm->seg_lock); + return ret; + } + mutex_unlock(&vm->seg_lock); + return -EINVAL; +} + +static void *do_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size) +{ + struct guest_memseg *seg; + + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { + if (seg->gpa > guest_phys || + guest_phys >= seg->gpa + seg->len) + continue; + + if (guest_phys + size > seg->gpa + seg->len) { + mutex_unlock(&vm->seg_lock); + return NULL; + } + + mutex_unlock(&vm->seg_lock); + return phys_to_virt(seg->vm0_gpa + guest_phys - seg->gpa); + } + mutex_unlock(&vm->seg_lock); + return NULL; +} + +void *map_guest_phys(unsigned long vmid, u64 guest_phys, size_t size) +{ + struct vhm_vm *vm; + void *ret; + + vm = find_get_vm(vmid); + if (vm == NULL) + return NULL; + + ret = do_map_guest_phys(vm, guest_phys, size); + + put_vm(vm); + + return ret; +} +EXPORT_SYMBOL(map_guest_phys); + +static int do_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys) +{ + struct guest_memseg *seg; + + mutex_lock(&vm->seg_lock); + list_for_each_entry(seg, &vm->memseg_list, list) { + if (seg->gpa <= guest_phys && + guest_phys < seg->gpa + seg->len) { + mutex_unlock(&vm->seg_lock); + return 0; + } + } + mutex_unlock(&vm->seg_lock); + + return -ESRCH; +} + +int unmap_guest_phys(unsigned long vmid, u64 guest_phys) +{ + struct vhm_vm *vm; + int ret; + + vm = find_get_vm(vmid); + if (vm == NULL) { + pr_warn("vm_list corrupted\n"); + return -ESRCH; + } + + ret = do_unmap_guest_phys(vm, guest_phys); + put_vm(vm); + return ret; +} +EXPORT_SYMBOL(unmap_guest_phys); diff --git a/drivers/vhm/vhm_msi.c b/drivers/vhm/vhm_msi.c new file mode 100644 index 000000000000..73affd60fc46 --- /dev/null +++ b/drivers/vhm/vhm_msi.c @@ -0,0 +1,135 @@ +/* + * virtio and hyperviosr service module (VHM): msi paravirt + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Jason Chen CJ + * + */ + +#include +#include +#include +#include + +#include "../pci/pci.h" + +static struct msi_msg acrn_notify_msix_remap(struct msi_desc *entry, + struct msi_msg *msg) +{ + volatile struct acrn_vm_pci_msix_remap notify; + struct pci_dev *dev = msi_desc_to_pci_dev(entry); + struct msi_msg remapped_msg = *msg; + u16 msgctl; + int ret; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); + + notify.msi_ctl = msgctl; + notify.virt_bdf = (dev->bus->number << 8) | dev->devfn; + notify.msi_addr = msg->address_hi; + notify.msi_addr <<= 32; + notify.msi_addr |= msg->address_lo; + notify.msi_data = msg->data; + notify.msix = !!entry->msi_attrib.is_msix; + + if (notify.msix) + notify.msix_entry_index = entry->msi_attrib.entry_nr; + else + notify.msix_entry_index = 0; + + ret = hcall_remap_pci_msix(0, virt_to_phys(¬ify)); + if (ret < 0) + dev_err(&dev->dev, "Failed to notify MSI/x change to HV\n"); + else { + remapped_msg.address_hi = (unsigned int)(notify.msi_addr >> 32); + remapped_msg.address_lo = (unsigned int)notify.msi_addr; + remapped_msg.data = notify.msi_data; + } + return remapped_msg; +} + +void acrn_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +{ + struct pci_dev *dev = msi_desc_to_pci_dev(entry); + struct msi_msg fmsg; + + if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { + /* Don't touch the hardware now */ + } else if (entry->msi_attrib.is_msix) { + void __iomem *base = pci_msix_desc_addr(entry); + + fmsg = acrn_notify_msix_remap(entry, msg); + + writel(fmsg.address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(fmsg.address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(fmsg.data, base + PCI_MSIX_ENTRY_DATA); + } else { + int pos = dev->msi_cap; + u16 msgctl; + + fmsg = acrn_notify_msix_remap(entry, msg); + + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); + msgctl &= ~PCI_MSI_FLAGS_QSIZE; + msgctl |= entry->msi_attrib.multiple << 4; + pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); + + pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, + fmsg.address_lo); + if (entry->msi_attrib.is_64) { + pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, + fmsg.address_hi); + pci_write_config_word(dev, pos + PCI_MSI_DATA_64, + fmsg.data); + } else { + pci_write_config_word(dev, pos + PCI_MSI_DATA_32, + fmsg.data); + } + } + entry->msg = *msg; +} diff --git a/drivers/vhm/vhm_vm_mngt.c b/drivers/vhm/vhm_vm_mngt.c new file mode 100644 index 000000000000..8f1a00777dd4 --- /dev/null +++ b/drivers/vhm/vhm_vm_mngt.c @@ -0,0 +1,162 @@ +/* + * virtio and hyperviosr service module (VHM): vm management + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Liang Ding + * Jason Zeng + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +LIST_HEAD(vhm_vm_list); +DEFINE_MUTEX(vhm_vm_list_lock); + +struct vhm_vm *find_get_vm(unsigned long vmid) +{ + struct vhm_vm *vm; + + mutex_lock(&vhm_vm_list_lock); + list_for_each_entry(vm, &vhm_vm_list, list) { + if (vm->vmid == vmid) { + vm->refcnt++; + mutex_unlock(&vhm_vm_list_lock); + return vm; + } + } + mutex_unlock(&vhm_vm_list_lock); + return NULL; +} + +void put_vm(struct vhm_vm *vm) +{ + mutex_lock(&vhm_vm_list_lock); + vm->refcnt--; + if (vm->refcnt == 0) { + list_del(&vm->list); + free_guest_mem(vm); + acrn_ioreq_free(vm); + kfree(vm); + pr_info("vhm: freed vm\n"); + } + mutex_unlock(&vhm_vm_list_lock); +} + +int vhm_get_vm_info(unsigned long vmid, struct vm_info *info) +{ + struct vhm_vm *vm; + + vm = find_get_vm(vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm: failed to find vm from vmid %ld\n", + vmid); + return -EINVAL; + } + /*TODO: hardcode max_vcpu here, should be fixed by getting at runtime */ + info->max_vcpu = 4; + info->max_gfn = vm->max_gfn; + put_vm(vm); + return 0; +} + +int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, + unsigned long msi_data) +{ + struct acrn_msi_entry msi; + int ret; + + /* msi_addr: addr[19:12] with dest vcpu id */ + /* msi_data: data[7:0] with vector */ + msi.msi_addr = msi_addr; + msi.msi_data = msi_data; + ret = hcall_inject_msi(vmid, virt_to_phys(&msi)); + if (ret < 0) { + pr_err("vhm: failed to inject!\n"); + return -EFAULT; + } + return 0; +} + +unsigned long vhm_vm_gpa2hpa(unsigned long vmid, unsigned long gpa) +{ + struct vm_gpa2hpa gpa2hpa; + int ret; + + gpa2hpa.gpa = gpa; + gpa2hpa.hpa = -1UL; /* Init value as invalid gpa */ + ret = hcall_vm_gpa2hpa(vmid, virt_to_phys(&gpa2hpa)); + if (ret < 0) { + pr_err("vhm: failed to inject!\n"); + return -EFAULT; + } + mb(); + return gpa2hpa.hpa; +} + +void vm_list_add(struct list_head *list) +{ + list_add(list, &vhm_vm_list); +} + +void vm_mutex_lock(struct mutex *mlock) +{ + mutex_lock(mlock); +} + +void vm_mutex_unlock(struct mutex *mlock) +{ + mutex_unlock(mlock); +} diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 58585ec8699e..b0d606b2d06c 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -622,7 +622,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) if (!len && vq->busyloop_timeout) { /* Both tx vq and rx socket were polled here */ - mutex_lock(&vq->mutex); + mutex_lock_nested(&vq->mutex, 1); vhost_disable_notify(&net->dev, vq); preempt_disable(); @@ -755,7 +755,7 @@ static void handle_rx(struct vhost_net *net) struct iov_iter fixup; __virtio16 num_buffers; - mutex_lock(&vq->mutex); + mutex_lock_nested(&vq->mutex, 0); sock = vq->private_data; if (!sock) goto out; @@ -782,16 +782,6 @@ static void handle_rx(struct vhost_net *net) /* On error, stop handling until the next kick. */ if (unlikely(headcount < 0)) goto out; - if (nvq->rx_array) - msg.msg_control = vhost_net_buf_consume(&nvq->rxq); - /* On overrun, truncate and discard */ - if (unlikely(headcount > UIO_MAXIOV)) { - iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); - err = sock->ops->recvmsg(sock, &msg, - 1, MSG_DONTWAIT | MSG_TRUNC); - pr_debug("Discarded rx packet: len %zd\n", sock_len); - continue; - } /* OK, now we need to know about added descriptors. */ if (!headcount) { if (unlikely(vhost_enable_notify(&net->dev, vq))) { @@ -804,6 +794,16 @@ static void handle_rx(struct vhost_net *net) * they refilled. */ goto out; } + if (nvq->rx_array) + msg.msg_control = vhost_net_buf_consume(&nvq->rxq); + /* On overrun, truncate and discard */ + if (unlikely(headcount > UIO_MAXIOV)) { + iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); + err = sock->ops->recvmsg(sock, &msg, + 1, MSG_DONTWAIT | MSG_TRUNC); + pr_debug("Discarded rx packet: len %zd\n", sock_len); + continue; + } /* We don't need to be notified again. */ iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); fixup = msg.msg_iter; @@ -1212,6 +1212,7 @@ static long vhost_net_reset_owner(struct vhost_net *n) } vhost_net_stop(n, &tx_sock, &rx_sock); vhost_net_flush(n); + vhost_dev_stop(&n->dev); vhost_dev_reset_owner(&n->dev, umem); vhost_net_vq_reset(n); done: diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 046f6d280af5..e47c5bc3ddca 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -688,6 +688,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, struct scatterlist *sg, int sg_count) { size_t off = iter->iov_offset; + struct scatterlist *p = sg; int i, ret; for (i = 0; i < iter->nr_segs; i++) { @@ -696,8 +697,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); if (ret < 0) { - for (i = 0; i < sg_count; i++) { - struct page *page = sg_page(&sg[i]); + while (p < sg) { + struct page *page = sg_page(p++); if (page) put_page(page); } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d6dbb28245e6..8e3ca4400766 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -213,8 +213,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); if (mask & POLLERR) { - if (poll->wqh) - remove_wait_queue(poll->wqh, &poll->wait); + vhost_poll_stop(poll); ret = -EINVAL; } @@ -757,7 +756,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, struct iov_iter t; void __user *uaddr = vhost_vq_meta_fetch(vq, (u64)(uintptr_t)to, size, - VHOST_ADDR_DESC); + VHOST_ADDR_USED); if (uaddr) return __copy_to_user(uaddr, from, size); @@ -904,7 +903,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d) { int i = 0; for (i = 0; i < d->nvqs; ++i) - mutex_lock(&d->vqs[i]->mutex); + mutex_lock_nested(&d->vqs[i]->mutex, i); } static void vhost_dev_unlock_vqs(struct vhost_dev *d) @@ -1253,14 +1252,14 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, /* Caller should have vq mutex and device mutex */ int vhost_vq_access_ok(struct vhost_virtqueue *vq) { - if (vq->iotlb) { - /* When device IOTLB was used, the access validation - * will be validated during prefetching. - */ + if (!vq_log_access_ok(vq, vq->log_base)) + return 0; + + /* Access validation occurs at prefetch time with IOTLB */ + if (vq->iotlb) return 1; - } - return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) && - vq_log_access_ok(vq, vq->log_base); + + return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index d7c239ea3d09..f5574060f9c8 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data) struct spi_message msg; struct spi_transfer xfer = { .len = 1, - .cs_change = 1, + .cs_change = 0, .tx_buf = lcd->buf, }; diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 9bd17682655a..1c2289ddd555 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb) static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) { unsigned int lth = pb->lth_brightness; - int duty_cycle; + u64 duty_cycle; if (pb->levels) duty_cycle = pb->levels[brightness]; else duty_cycle = brightness; - return (duty_cycle * (pb->period - lth) / pb->scale) + lth; + duty_cycle *= pb->period - lth; + do_div(duty_cycle, pb->scale); + + return duty_cycle + lth; } static int pwm_backlight_update_status(struct backlight_device *bl) diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index eab1f842f9c0..e4bd63e9db6b 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c @@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi) spi_message_init(m); - x->cs_change = 1; + x->cs_change = 0; x->tx_buf = &lcd->buf[0]; spi_message_add_tail(x, m); diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index 6a41ea92737a..4dc5ee8debeb 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data) struct spi_message msg; struct spi_transfer xfer = { .len = 1, - .cs_change = 1, + .cs_change = 0, .tx_buf = buf, }; diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c index 9269d5685239..b90ef96e43d6 100644 --- a/drivers/video/console/dummycon.c +++ b/drivers/video/console/dummycon.c @@ -67,7 +67,6 @@ const struct consw dummy_con = { .con_switch = DUMMY, .con_blank = DUMMY, .con_font_set = DUMMY, - .con_font_get = DUMMY, .con_font_default = DUMMY, .con_font_copy = DUMMY, }; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 445b1dc5d441..a17ba1465815 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -422,7 +422,10 @@ static const char *vgacon_startup(void) vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = - { .name = "ega", .start = 0x3B0, .end = 0x3BF }; + { .name = "ega", + .flags = IORESOURCE_IO, + .start = 0x3B0, + .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; @@ -430,9 +433,15 @@ static const char *vgacon_startup(void) &ega_console_resource); } else { static struct resource mda1_console_resource = - { .name = "mda", .start = 0x3B0, .end = 0x3BB }; + { .name = "mda", + .flags = IORESOURCE_IO, + .start = 0x3B0, + .end = 0x3BB }; static struct resource mda2_console_resource = - { .name = "mda", .start = 0x3BF, .end = 0x3BF }; + { .name = "mda", + .flags = IORESOURCE_IO, + .start = 0x3BF, + .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; @@ -454,15 +463,21 @@ static const char *vgacon_startup(void) vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { - static struct resource ega_console_resource - = { .name = "ega", .start = 0x3C0, .end = 0x3DF }; + static struct resource ega_console_resource = + { .name = "ega", + .flags = IORESOURCE_IO, + .start = 0x3C0, + .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { - static struct resource vga_console_resource - = { .name = "vga+", .start = 0x3C0, .end = 0x3DF }; + static struct resource vga_console_resource = + { .name = "vga+", + .flags = IORESOURCE_IO, + .start = 0x3C0, + .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, @@ -494,7 +509,10 @@ static const char *vgacon_startup(void) } } else { static struct resource cga_console_resource = - { .name = "cga", .start = 0x3D4, .end = 0x3D5 }; + { .name = "cga", + .flags = IORESOURCE_IO, + .start = 0x3D4, + .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index e06358da4b99..3dee267d7c75 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1119,7 +1119,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) goto put_display_node; } - timings_np = of_find_node_by_name(display_np, "display-timings"); + timings_np = of_get_child_by_name(display_np, "display-timings"); if (!timings_np) { dev_err(dev, "failed to find display-timings node\n"); ret = -ENODEV; @@ -1140,6 +1140,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) fb_add_videomode(&fb_vm, &info->modelist); } + /* + * FIXME: Make sure we are not referencing any fields in display_np + * and timings_np and drop our references to them before returning to + * avoid leaking the nodes on probe deferral and driver unbind. + */ + return 0; put_timings_node: diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 5f04b4096c42..6c542d0ca076 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1681,8 +1681,10 @@ static int au1200fb_drv_probe(struct platform_device *dev) fbi = framebuffer_alloc(sizeof(struct au1200fb_device), &dev->dev); - if (!fbi) + if (!fbi) { + ret = -ENOMEM; goto failed; + } _au1200fb_infos[plane] = fbi; fbdev = fbi->par; @@ -1701,7 +1703,8 @@ static int au1200fb_drv_probe(struct platform_device *dev) if (!fbdev->fb_mem) { print_err("fail to allocate frambuffer (size: %dK))", fbdev->fb_len / 1024); - return -ENOMEM; + ret = -ENOMEM; + goto failed; } /* diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h index 6026c60fc100..261522fabdac 100644 --- a/drivers/video/fbdev/controlfb.h +++ b/drivers/video/fbdev/controlfb.h @@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = { {{ 1, 2}}, /* 1152x870, 75Hz */ {{ 0, 1}}, /* 1280x960, 75Hz */ {{ 0, 1}}, /* 1280x1024, 75Hz */ + {{ 1, 2}}, /* 1152x768, 60Hz */ + {{ 0, 1}}, /* 1600x1024, 60Hz */ }; diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c index 6082f653c68a..67773e8bbb95 100644 --- a/drivers/video/fbdev/geode/video_gx.c +++ b/drivers/video/fbdev/geode/video_gx.c @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info) int timeout = 1000; /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ - if (cpu_data(0).x86_mask == 1) { + if (cpu_data(0).x86_stepping == 1) { pll_table = gx_pll_table_14MHz; pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); } else { diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 7f6c9e6cfc6c..1e56b50e4082 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -26,6 +26,7 @@ #include #include #include +#include enum { FB_GET_WIDTH = 0x00, @@ -234,7 +235,7 @@ static int goldfish_fb_probe(struct platform_device *pdev) fb->fb.var.activate = FB_ACTIVATE_NOW; fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT); fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH); - fb->fb.var.pixclock = 10000; + fb->fb.var.pixclock = 0; fb->fb.var.red.offset = 11; fb->fb.var.red.length = 5; @@ -304,12 +305,25 @@ static int goldfish_fb_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_fb_of_match[] = { + { .compatible = "google,goldfish-fb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_fb_of_match); + +static const struct acpi_device_id goldfish_fb_acpi_match[] = { + { "GFSH0004", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match); static struct platform_driver goldfish_fb_driver = { .probe = goldfish_fb_probe, .remove = goldfish_fb_remove, .driver = { - .name = "goldfish_fb" + .name = "goldfish_fb", + .of_match_table = goldfish_fb_of_match, + .acpi_match_table = ACPI_PTR(goldfish_fb_acpi_match), } }; diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c index a0f496049db7..3a6bb6561ba0 100644 --- a/drivers/video/fbdev/mmp/core.c +++ b/drivers/video/fbdev/mmp/core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include